code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Pre-configured remote application for enabling sign in/up with GitHub.
**Usage:**
1. Ensure you have ``github3.py`` package installed:
.. code-block:: console
cdvirtualenv src/invenio
pip install -e .[github]
2. Edit your configuration and add:
.. code-block:: python
from invenio_oauthclient.contrib import github
OAUTHCLIENT_REMOTE_APPS = dict(
github=github.REMOTE_APP,
)
GITHUB_APP_CREDENTIALS = dict(
consumer_key="changeme",
consumer_secret="changeme",
)
3. Go to GitHub and register a new application:
https://github.com/settings/applications/new. When registering the
application ensure that the *Authorization callback URL* points to:
``CFG_SITE_SECURE_URL/oauth/authorized/github/`` (e.g.
``http://localhost:4000/oauth/authorized/github/`` for development).
4. Grab the *Client ID* and *Client Secret* after registering the application
and add them to your instance configuration (``invenio.cfg``):
.. code-block:: python
GITHUB_APP_CREDENTIALS = dict(
consumer_key="<CLIENT ID>",
consumer_secret="<CLIENT SECRET>",
)
5. Now go to ``CFG_SITE_SECURE_URL/oauth/login/github/`` (e.g.
http://localhost:4000/oauth/login/github/)
6. Also, you should see GitHub listed under Linked accounts:
http://localhost:4000//account/settings/linkedaccounts/
By default the GitHub module will try first look if a link already exists
between a GitHub account and a user. If no link is found, the module tries to
retrieve the user email address from GitHub to match it with a local user. If
this fails, the user is asked to provide an email address to sign-up.
In templates you can add a sign in/up link:
.. code-block:: jinja
<a href="{{url_for('oauthclient.login', remote_app='github')}}">
Sign in with GitHub
</a>
"""
import github3
REMOTE_APP = dict(
title='GitHub',
description='Software collaboration platform.',
icon='fa fa-github',
authorized_handler="invenio_oauthclient.handlers"
":authorized_signup_handler",
disconnect_handler="invenio_oauthclient.handlers"
":disconnect_handler",
signup_handler=dict(
info="invenio_oauthclient.contrib.github:account_info",
setup="invenio_oauthclient.contrib.github:account_setup",
view="invenio_oauthclient.handlers:signup_handler",
),
params=dict(
request_token_params={'scope': 'user:email'},
base_url='https://api.github.com/',
request_token_url=None,
access_token_url="https://github.com/login/oauth/access_token",
access_token_method='POST',
authorize_url="https://github.com/login/oauth/authorize",
app_key="GITHUB_APP_CREDENTIALS",
)
)
def account_info(remote, resp):
""" Retrieve remote account information used to find local user. """
gh = github3.login(token=resp['access_token'])
ghuser = gh.user()
return dict(email=ghuser.email, nickname=ghuser.login)
def account_setup(remote, token):
""" Perform additional setup after user have been logged in. """
pass
|
jmartinm/invenio-oauthclient
|
invenio_oauthclient/contrib/github.py
|
Python
|
gpl-2.0
| 3,966
|
# (c) 2019 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.nxos import nxos_bfd_global
from ansible.module_utils.network.nxos.nxos import NxosCmdRef
from .nxos_module import TestNxosModule, load_fixture, set_module_args
# TBD: These imports / import checks are only needed as a workaround for
# shippable, which fails this test due to import yaml & import ordereddict.
import pytest
from ansible.module_utils.network.nxos.nxos import nxosCmdRef_import_check
msg = nxosCmdRef_import_check()
@pytest.mark.skipif(len(msg), reason=msg)
class TestNxosBfdGlobalModule(TestNxosModule):
module = nxos_bfd_global
def setUp(self):
super(TestNxosBfdGlobalModule, self).setUp()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_bfd_global.load_config')
self.load_config = self.mock_load_config.start()
self.mock_execute_show_command = patch('ansible.module_utils.network.nxos.nxos.NxosCmdRef.execute_show_command')
self.execute_show_command = self.mock_execute_show_command.start()
self.mock_get_platform_shortname = patch('ansible.module_utils.network.nxos.nxos.NxosCmdRef.get_platform_shortname')
self.get_platform_shortname = self.mock_get_platform_shortname.start()
def tearDown(self):
super(TestNxosBfdGlobalModule, self).tearDown()
self.mock_load_config.stop()
self.execute_show_command.stop()
self.get_platform_shortname.stop()
def load_fixtures(self, commands=None, device=''):
self.load_config.return_value = None
def test_bfd_defaults_n9k(self):
# feature bfd is enabled, no non-defaults are set.
self.execute_show_command.return_value = "feature bfd"
self.get_platform_shortname.return_value = 'N9K'
set_module_args(dict(
echo_interface='deleted',
echo_rx_interval=50,
interval={'tx': 50, 'min_rx': 50, 'multiplier': 3},
slow_timer=2000,
startup_timer=5,
ipv4_echo_rx_interval=50,
ipv4_interval={'tx': 50, 'min_rx': 50, 'multiplier': 3},
ipv4_slow_timer=2000,
ipv6_echo_rx_interval=50,
ipv6_interval={'tx': 50, 'min_rx': 50, 'multiplier': 3},
ipv6_slow_timer=2000
))
self.execute_module(changed=False)
def test_bfd_defaults_n3k(self):
# feature bfd is enabled, no non-defaults are set.
self.execute_show_command.return_value = "feature bfd"
self.get_platform_shortname.return_value = 'N3K'
set_module_args(dict(
echo_interface='deleted',
echo_rx_interval=250,
interval={'tx': 250, 'min_rx': 250, 'multiplier': 3},
slow_timer=2000,
startup_timer=5,
ipv4_echo_rx_interval=250,
ipv4_interval={'tx': 250, 'min_rx': 250, 'multiplier': 3},
ipv4_slow_timer=2000,
ipv6_echo_rx_interval=250,
ipv6_interval={'tx': 250, 'min_rx': 250, 'multiplier': 3},
ipv6_slow_timer=2000
))
self.execute_module(changed=False)
def test_bfd_defaults_n35(self):
# feature bfd is enabled, no non-defaults are set.
self.execute_show_command.return_value = "feature bfd"
self.get_platform_shortname.return_value = 'N35'
set_module_args(dict(
echo_interface='deleted',
echo_rx_interval=50,
interval={'tx': 50, 'min_rx': 50, 'multiplier': 3},
slow_timer=2000,
startup_timer=5,
ipv4_echo_rx_interval=50,
ipv4_interval={'tx': 50, 'min_rx': 50, 'multiplier': 3},
ipv4_slow_timer=2000,
))
self.execute_module(changed=False)
def test_bfd_defaults_n6k(self):
# feature bfd is enabled, no non-defaults are set.
self.execute_show_command.return_value = "feature bfd"
self.get_platform_shortname.return_value = 'N6K'
set_module_args(dict(
echo_interface='deleted',
interval={'tx': 50, 'min_rx': 50, 'multiplier': 3},
slow_timer=2000,
fabricpath_interval={'tx': 50, 'min_rx': 50, 'multiplier': 3},
fabricpath_slow_timer=2000,
fabricpath_vlan=1
))
self.execute_module(changed=False)
def test_bfd_defaults_n7k(self):
# feature bfd is enabled, no non-defaults are set.
self.execute_show_command.return_value = "feature bfd"
self.get_platform_shortname.return_value = 'N7K'
set_module_args(dict(
echo_interface='deleted',
echo_rx_interval=50,
interval={'tx': 50, 'min_rx': 50, 'multiplier': 3},
slow_timer=2000,
ipv4_echo_rx_interval=50,
ipv4_interval={'tx': 50, 'min_rx': 50, 'multiplier': 3},
ipv4_slow_timer=2000,
ipv6_echo_rx_interval=50,
ipv6_interval={'tx': 50, 'min_rx': 50, 'multiplier': 3},
ipv6_slow_timer=2000,
fabricpath_interval={'tx': 50, 'min_rx': 50, 'multiplier': 3},
fabricpath_slow_timer=2000,
fabricpath_vlan=1
))
self.execute_module(changed=False)
def test_bfd_existing_n9k(self):
module_name = self.module.__name__.rsplit('.', 1)[1]
self.execute_show_command.return_value = load_fixture(module_name, 'N9K.cfg')
self.get_platform_shortname.return_value = 'N9K'
set_module_args(dict(
echo_interface='deleted',
echo_rx_interval=51,
interval={'tx': 51, 'min_rx': 51, 'multiplier': 3},
slow_timer=2000,
startup_timer=5,
ipv4_echo_rx_interval=50,
ipv4_interval={'tx': 51, 'min_rx': 51, 'multiplier': 3},
ipv4_slow_timer=2000,
ipv6_echo_rx_interval=50,
ipv6_interval={'tx': 51, 'min_rx': 51, 'multiplier': 3},
ipv6_slow_timer=2000
))
self.execute_module(changed=True, commands=[
'no bfd echo-interface loopback2',
'bfd echo-rx-interval 51',
'bfd interval 51 min_rx 51 multiplier 3',
'bfd slow-timer 2000',
'bfd startup-timer 5',
'bfd ipv4 echo-rx-interval 50',
'bfd ipv4 interval 51 min_rx 51 multiplier 3',
'bfd ipv4 slow-timer 2000',
'bfd ipv6 echo-rx-interval 50',
'bfd ipv6 interval 51 min_rx 51 multiplier 3',
'bfd ipv6 slow-timer 2000',
])
def test_bfd_idempotence_n9k(self):
module_name = self.module.__name__.rsplit('.', 1)[1]
self.execute_show_command.return_value = load_fixture(module_name, 'N9K.cfg')
self.get_platform_shortname.return_value = 'N9K'
set_module_args(dict(
echo_interface='loopback2',
echo_rx_interval=56,
interval={'tx': 51, 'min_rx': 52, 'multiplier': 4},
slow_timer=2001,
startup_timer=6,
ipv4_echo_rx_interval=54,
ipv4_interval={'tx': 54, 'min_rx': 54, 'multiplier': 4},
ipv4_slow_timer=2004,
ipv6_echo_rx_interval=56,
ipv6_interval={'tx': 56, 'min_rx': 56, 'multiplier': 6},
ipv6_slow_timer=2006
))
self.execute_module(changed=False)
def test_bfd_existing_n7k(self):
module_name = self.module.__name__.rsplit('.', 1)[1]
self.execute_show_command.return_value = load_fixture(module_name, 'N7K.cfg')
self.get_platform_shortname.return_value = 'N7K'
set_module_args(dict(
echo_interface='deleted',
echo_rx_interval=51,
interval={'tx': 51, 'min_rx': 51, 'multiplier': 3},
slow_timer=2002,
ipv4_echo_rx_interval=51,
ipv4_interval={'tx': 51, 'min_rx': 51, 'multiplier': 3},
ipv4_slow_timer=2002,
ipv6_echo_rx_interval=51,
ipv6_interval={'tx': 51, 'min_rx': 51, 'multiplier': 3},
ipv6_slow_timer=2002,
fabricpath_interval={'tx': 51, 'min_rx': 51, 'multiplier': 3},
fabricpath_slow_timer=2003,
fabricpath_vlan=3,
))
self.execute_module(changed=True, commands=[
'no bfd echo-interface loopback2',
'bfd echo-rx-interval 51',
'bfd interval 51 min_rx 51 multiplier 3',
'bfd slow-timer 2002',
'bfd ipv4 echo-rx-interval 51',
'bfd ipv4 interval 51 min_rx 51 multiplier 3',
'bfd ipv4 slow-timer 2002',
'bfd ipv6 echo-rx-interval 51',
'bfd ipv6 interval 51 min_rx 51 multiplier 3',
'bfd ipv6 slow-timer 2002',
'bfd fabricpath interval 51 min_rx 51 multiplier 3',
'bfd fabricpath slow-timer 2003',
'bfd fabricpath vlan 3',
])
def test_bfd_idempotence_n7k(self):
module_name = self.module.__name__.rsplit('.', 1)[1]
self.execute_show_command.return_value = load_fixture(module_name, 'N7K.cfg')
self.get_platform_shortname.return_value = 'N7K'
set_module_args(dict(
echo_interface='loopback2',
echo_rx_interval=56,
interval={'tx': 51, 'min_rx': 52, 'multiplier': 4},
slow_timer=2001,
ipv4_echo_rx_interval=54,
ipv4_interval={'tx': 54, 'min_rx': 54, 'multiplier': 4},
ipv4_slow_timer=2004,
ipv6_echo_rx_interval=56,
ipv6_interval={'tx': 56, 'min_rx': 56, 'multiplier': 6},
ipv6_slow_timer=2006,
fabricpath_interval={'tx': 58, 'min_rx': 58, 'multiplier': 8},
fabricpath_slow_timer=2008,
fabricpath_vlan=2,
))
self.execute_module(changed=False)
|
aperigault/ansible
|
test/units/modules/network/nxos/test_nxos_bfd_global.py
|
Python
|
gpl-3.0
| 10,607
|
#!/usr/bin/env python3
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import platform
import sys
import subprocess
DIR_OF_THIS_SCRIPT = os.path.dirname( os.path.abspath( __file__ ) )
DIR_OF_DOCS = os.path.join( DIR_OF_THIS_SCRIPT, 'docs' )
def OnWindows():
return platform.system() == 'Windows'
# On Windows, distutils.spawn.find_executable only works for .exe files
# but .bat and .cmd files are also executables, so we use our own
# implementation.
def FindExecutable( executable ):
# Executable extensions used on Windows
WIN_EXECUTABLE_EXTS = [ '.exe', '.bat', '.cmd' ]
paths = os.environ[ 'PATH' ].split( os.pathsep )
base, extension = os.path.splitext( executable )
if OnWindows() and extension.lower() not in WIN_EXECUTABLE_EXTS:
extensions = WIN_EXECUTABLE_EXTS
else:
extensions = [ '' ]
for extension in extensions:
executable_name = executable + extension
if not os.path.isfile( executable_name ):
for path in paths:
executable_path = os.path.join( path, executable_name )
if os.path.isfile( executable_path ):
return executable_path
else:
return executable_name
return None
def GenerateApiDocs():
npm = FindExecutable( 'npm' )
if not npm:
sys.exit( 'ERROR: NPM is required to generate API docs.' )
os.chdir( os.path.join( DIR_OF_DOCS ) )
subprocess.call( [ npm, 'install', '--production' ] )
bootprint = FindExecutable( os.path.join( DIR_OF_DOCS, 'node_modules',
'.bin', 'bootprint' ) )
api = os.path.join( DIR_OF_DOCS, 'openapi.yml' )
subprocess.call( [ bootprint, 'openapi', api, DIR_OF_DOCS ] )
if __name__ == '__main__':
GenerateApiDocs()
|
Valloric/ycmd
|
update_api_docs.py
|
Python
|
gpl-3.0
| 1,822
|
# coding=utf-8
# This file is part of SickRage.
#
# URL: https://sickrage.github.io
# Git: https://github.com/SickRage/SickRage.git
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from datetime import datetime
from feedparser.util import FeedParserDict
from hachoir_parser import createParser
import sickbeard
from sickbeard import logger
from sickbeard.classes import Proper, TorrentSearchResult
from sickbeard.common import Quality
from sickbeard.db import DBConnection
from sickrage.helper.common import try_int
from sickrage.helper.exceptions import ex
from sickrage.providers.GenericProvider import GenericProvider
from sickrage.show.Show import Show
class TorrentProvider(GenericProvider):
def __init__(self, name):
GenericProvider.__init__(self, name)
self.ratio = None
self.provider_type = GenericProvider.TORRENT
def find_propers(self, search_date=None):
results = []
db = DBConnection()
placeholder = ','.join([str(x) for x in Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_BEST])
sql_results = db.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate'
' FROM tv_episodes AS e'
' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)'
' WHERE e.airdate >= ' + str(search_date.toordinal()) +
' AND e.status IN (' + placeholder + ') and e.is_proper = 0'
)
for result in sql_results or []:
show = Show.find(sickbeard.showList, int(result[b'showid']))
if show:
episode = show.getEpisode(result[b'season'], result[b'episode'])
for term in self.proper_strings:
search_strings = self._get_episode_search_strings(episode, add_string=term)
for item in self.search(search_strings[0]):
title, url = self._get_title_and_url(item)
results.append(Proper(title, url, datetime.today(), show))
return results
def is_active(self):
return bool(sickbeard.USE_TORRENTS) and self.is_enabled()
@property
def _custom_trackers(self):
if not (sickbeard.TRACKERS_LIST and self.public):
return ''
return '&tr=' + '&tr='.join({x.strip() for x in sickbeard.TRACKERS_LIST.split(',') if x.strip()})
def _get_result(self, episodes):
return TorrentSearchResult(episodes)
def _get_size(self, item):
if isinstance(item, dict):
size = item.get('size', -1)
elif isinstance(item, (list, tuple)) and len(item) > 2:
size = item[2]
else:
size = -1
# Make sure we didn't select seeds/leechers by accident
if not size or size < 1024 * 1024:
size = -1
return try_int(size, -1)
def _get_storage_dir(self):
return sickbeard.TORRENT_DIR
def _get_title_and_url(self, item):
if isinstance(item, (dict, FeedParserDict)):
download_url = item.get('url', '')
title = item.get('title', '')
if not download_url:
download_url = item.get('link', '')
elif isinstance(item, (list, tuple)) and len(item) > 1:
download_url = item[1]
title = item[0]
else:
download_url = ''
title = ''
if title.endswith('DIAMOND'):
logger.log('Skipping DIAMOND release for mass fake releases.')
download_url = title = 'FAKERELEASE'
if download_url:
download_url = download_url.replace('&', '&')
if title:
title = title.replace(' ', '.')
return title, download_url
def _verify_download(self, file_name=None):
try:
parser = createParser(file_name)
if parser:
# pylint: disable=protected-access
# Access to a protected member of a client class
mime_type = parser._getMimeType()
try:
parser.stream._input.close()
except Exception:
pass
if mime_type == 'application/x-bittorrent':
return True
except Exception as e:
logger.log('Failed to validate torrent file: {0}'.format(ex(e)), logger.DEBUG)
logger.log('Result is not a valid torrent file', logger.DEBUG)
return False
def seed_ratio(self):
return self.ratio
|
Maximilian-Reuter/SickRage-1
|
sickrage/providers/torrent/TorrentProvider.py
|
Python
|
gpl-3.0
| 5,136
|
from django.db import migrations, models
from django.conf import settings
from opaque_keys.edx.django.models import CourseKeyField
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CourseGoal',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('course_key', CourseKeyField(max_length=255, db_index=True)),
('goal_key', models.CharField(default='unsure', max_length=100, choices=[('certify', 'Earn a certificate.'), ('complete', 'Complete the course.'), ('explore', 'Explore the course.'), ('unsure', 'Not sure yet.')])),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
),
migrations.AlterUniqueTogether(
name='coursegoal',
unique_together={('user', 'course_key')},
),
]
|
eduNEXT/edx-platform
|
lms/djangoapps/course_goals/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 1,064
|
# -*- coding: utf-8 -*-
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='VideoPipelineIntegration',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('api_url', models.URLField(help_text='edx-video-pipeline API URL.', verbose_name='Internal API URL')),
('service_username', models.CharField(default=u'video_pipeline_service_user', help_text='Username created for Video Pipeline Integration, e.g. video_pipeline_service_user.', max_length=100)),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),
],
options={
'ordering': ('-change_date',),
'abstract': False,
},
),
]
|
stvstnfrd/edx-platform
|
openedx/core/djangoapps/video_pipeline/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 1,352
|
"""
Data models used for Blockstore API Client
"""
from datetime import datetime
from uuid import UUID
import attr
import six
def _convert_to_uuid(value):
if not isinstance(value, UUID):
return UUID(value)
return value
@attr.s(frozen=True)
class Collection(object):
"""
Metadata about a blockstore collection
"""
uuid = attr.ib(type=UUID, converter=_convert_to_uuid)
title = attr.ib(type=six.text_type)
@attr.s(frozen=True)
class Bundle(object):
"""
Metadata about a blockstore bundle
"""
uuid = attr.ib(type=UUID, converter=_convert_to_uuid)
title = attr.ib(type=six.text_type)
description = attr.ib(type=six.text_type)
slug = attr.ib(type=six.text_type)
drafts = attr.ib(type=dict) # Dict of drafts, where keys are the draft names and values are draft UUIDs
# Note that if latest_version is 0, it means that no versions yet exist
latest_version = attr.ib(type=int, validator=attr.validators.instance_of(int))
@attr.s(frozen=True)
class Draft(object):
"""
Metadata about a blockstore draft
"""
uuid = attr.ib(type=UUID, converter=_convert_to_uuid)
bundle_uuid = attr.ib(type=UUID, converter=_convert_to_uuid)
name = attr.ib(type=six.text_type)
updated_at = attr.ib(type=datetime, validator=attr.validators.instance_of(datetime))
files = attr.ib(type=dict)
links = attr.ib(type=dict)
@attr.s(frozen=True)
class BundleFile(object):
"""
Metadata about a file in a blockstore bundle or draft.
"""
path = attr.ib(type=six.text_type)
size = attr.ib(type=int)
url = attr.ib(type=six.text_type)
hash_digest = attr.ib(type=six.text_type)
@attr.s(frozen=True)
class DraftFile(BundleFile):
"""
Metadata about a file in a blockstore draft.
"""
modified = attr.ib(type=bool) # Was this file modified in the draft?
@attr.s(frozen=True)
class LinkReference(object):
"""
A pointer to a specific BundleVersion
"""
bundle_uuid = attr.ib(type=UUID, converter=_convert_to_uuid)
version = attr.ib(type=int)
snapshot_digest = attr.ib(type=six.text_type)
@attr.s(frozen=True)
class LinkDetails(object):
"""
Details about a specific link in a BundleVersion or Draft
"""
name = attr.ib(type=str)
direct = attr.ib(type=LinkReference)
indirect = attr.ib(type=list) # List of LinkReference objects
@attr.s(frozen=True)
class DraftLinkDetails(LinkDetails):
"""
Details about a specific link in a Draft
"""
modified = attr.ib(type=bool)
|
stvstnfrd/edx-platform
|
openedx/core/lib/blockstore_api/models.py
|
Python
|
agpl-3.0
| 2,551
|
'''
Created on Jun 6, 2013
@author: dmitchell
'''
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from student.tests.factories import AdminFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
import xmodule_modifiers
import datetime
from pytz import UTC
from xmodule.modulestore.tests import factories
class TestXmoduleModfiers(ModuleStoreTestCase):
# FIXME disabled b/c start date inheritance is not occuring and render_... in get_html is failing due
# to middleware.lookup['main'] not being defined
def _test_add_histogram(self):
instructor = AdminFactory.create()
self.client.login(username=instructor.username, password='test')
course = CourseFactory.create(org='test',
number='313', display_name='histogram test')
section = ItemFactory.create(
parent_location=course.location, display_name='chapter hist',
category='chapter')
problem = ItemFactory.create(
parent_location=section.location, display_name='problem hist 1',
category='problem')
problem.has_score = False # don't trip trying to retrieve db data
late_problem = ItemFactory.create(
parent_location=section.location, display_name='problem hist 2',
category='problem')
late_problem.start = datetime.datetime.now(UTC) + datetime.timedelta(days=32)
late_problem.has_score = False
problem_module = factories.get_test_xmodule_for_descriptor(problem)
problem_module.get_html = xmodule_modifiers.add_histogram(lambda:'', problem_module, instructor)
self.assertRegexpMatches(
problem_module.get_html(), r'.*<font color=\'green\'>Not yet</font>.*')
problem_module = factories.get_test_xmodule_for_descriptor(late_problem)
problem_module.get_html = xmodule_modifiers.add_histogram(lambda: '', problem_module, instructor)
self.assertRegexpMatches(
problem_module.get_html(), r'.*<font color=\'red\'>Yes!</font>.*')
|
PepperPD/edx-pepper-platform
|
common/djangoapps/tests.py
|
Python
|
agpl-3.0
| 2,072
|
# -*- coding: utf-8 -*-
#***************************************************************************
#* *
#* Copyright (c) 2015 Yorik van Havre <yorik@uncreated.net> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import FreeCAD,FreeCADGui,Path,PathGui
from PySide import QtCore,QtGui
"""Path SimpleCopy command"""
# Qt tanslation handling
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def translate(context, text, disambig=None):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def translate(context, text, disambig=None):
return QtGui.QApplication.translate(context, text, disambig)
class CommandPathSimpleCopy:
def GetResources(self):
return {'Pixmap' : 'Path-SimpleCopy',
'MenuText': QtCore.QT_TRANSLATE_NOOP("Path_SimpleCopy","Simple Copy"),
'Accel': "P, Y",
'ToolTip': QtCore.QT_TRANSLATE_NOOP("Path_SimpleCopy","Creates a non-parametric copy of another path")}
def IsActive(self):
return not FreeCAD.ActiveDocument is None
def Activated(self):
# check that the selection contains exactly what we want
selection = FreeCADGui.Selection.getSelection()
if len(selection) != 1:
FreeCAD.Console.PrintError(translate("Path_SimpleCopy","Please select exactly one path object\n"))
return
if not(selection[0].isDerivedFrom("Path::Feature")):
FreeCAD.Console.PrintError(translate("Path_SimpleCopy","Please select exactly one path object\n"))
return
FreeCAD.ActiveDocument.openTransaction(translate("Path_SimpleCopy","Simple Copy"))
FreeCADGui.addModule("PathScripts.PathUtils")
FreeCADGui.doCommand('obj = FreeCAD.ActiveDocument.addObject("Path::Feature","'+selection[0].Name+ '_copy")')
FreeCADGui.doCommand('obj.Path = FreeCAD.ActiveDocument.'+selection[0].Name+'.Path')
FreeCADGui.doCommand('PathScripts.PathUtils.addToProject(obj)')
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
if FreeCAD.GuiUp:
# register the FreeCAD command
FreeCADGui.addCommand('Path_SimpleCopy',CommandPathSimpleCopy())
|
timthelion/FreeCAD
|
src/Mod/Path/PathScripts/PathSimpleCopy.py
|
Python
|
lgpl-2.1
| 3,675
|
'''
* Copyright (C) 2015 Tripwire, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
'''
import xml.etree.ElementTree as ET
from stix.core import STIXPackage
import argparse, sys, os, shutil, re, socket
import TARDIS
if __name__=="__main__":
#Get options from the command line...
parser = argparse.ArgumentParser(description='TARDIS Threat Parser')
parser.add_argument('-f', help='STIX File', dest='file', required=True)
parser.add_argument('-i', help='Vulnerable IP', dest='ip', required=True)
parser.add_argument('-d', help='Vulnerable DNS Hostname', dest='hostname')
args = parser.parse_args()
file=args.file
sourceIP = args.ip
sourceHost = args.hostname
cve=''
vulnObject=''
if not os.path.exists(file):
print file + " does not exist."
sys.exit()
if (sourceHost is None):
try:
result = socket.gethostbyaddr(sourceIP)
sourceHost = result[0]
except:
sourceHost = ""
if (len(sourceHost) > 0):
print ("File: " + file)
print ("IP: " + sourceIP)
print ("Host: " + sourceHost)
if os.path.exists('Results'):
shutil.rmtree('Results')
directory='Results'
#Create results directory to store the raw output
if not os.path.exists(directory):
os.makedirs(directory)
if not os.path.exists(directory + '/' + sourceIP):
os.makedirs(directory + '/' + sourceIP)
#Get CVE from STIX
stix_package = STIXPackage.from_xml(file)
for target in stix_package.exploit_targets:
for vuln in target.vulnerabilities:
print "CVE: " + vuln.cve_id
print "DESC:" + str(vuln.description)
vulnObject=str(vuln.description)
cve = vuln.cve_id
if len(cve) > 0:
if len(vulnObject) > 0:
if not os.path.exists('VulnXML/' + vuln.cve_id + '.xml'):
shutil.copyfile(file,'VulnXML/' + vuln.cve_id + '.xml')
numResults=TARDIS.main(cve, vulnObject, sourceIP, sourceHost)
else:
print("Description missing from Exploit Target")
else:
print("CVE Missing from STIX File")
else:
print ("Unable to resolve hostname, please provide one with -d option")
|
xujun10110/TARDIS
|
parseSTIX.py
|
Python
|
apache-2.0
| 2,526
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import random
import paddle.utils.image_util as image_util
from paddle.trainer.PyDataProvider2 import *
#
# {'img_size': 32,
# 'settings': a global object,
# 'color': True,
# 'mean_img_size': 32,
# 'meta': './data/cifar-out/batches/batches.meta',
# 'num_classes': 10,
# 'file_list': ('./data/cifar-out/batches/train_batch_000',),
# 'use_jpeg': True}
def hook(settings, img_size, mean_img_size, num_classes, color, meta, use_jpeg,
is_train, **kwargs):
settings.mean_img_size = mean_img_size
settings.img_size = img_size
settings.num_classes = num_classes
settings.color = color
settings.is_train = is_train
if settings.color:
settings.img_raw_size = settings.img_size * settings.img_size * 3
else:
settings.img_raw_size = settings.img_size * settings.img_size
settings.meta_path = meta
settings.use_jpeg = use_jpeg
settings.img_mean = image_util.load_meta(settings.meta_path,
settings.mean_img_size,
settings.img_size, settings.color)
settings.logger.info('Image size: %s', settings.img_size)
settings.logger.info('Meta path: %s', settings.meta_path)
settings.input_types = {
'image': dense_vector(settings.img_raw_size),
'label': integer_value(settings.num_classes)
}
settings.logger.info('DataProvider Initialization finished')
@provider(init_hook=hook, min_pool_size=0)
def processData(settings, file_list):
"""
The main function for loading data.
Load the batch, iterate all the images and labels in this batch.
file_list: the batch file list.
"""
with open(file_list, 'r') as fdata:
lines = [line.strip() for line in fdata]
random.shuffle(lines)
for file_name in lines:
with io.open(file_name.strip(), 'rb') as file:
data = cPickle.load(file)
indexes = list(range(len(data['images'])))
if settings.is_train:
random.shuffle(indexes)
for i in indexes:
if settings.use_jpeg == 1:
img = image_util.decode_jpeg(data['images'][i])
else:
img = data['images'][i]
img_feat = image_util.preprocess_img(
img, settings.img_mean, settings.img_size,
settings.is_train, settings.color)
label = data['labels'][i]
yield {
'image': img_feat.astype('float32'),
'label': int(label)
}
|
emailweixu/Paddle
|
demo/image_classification/image_provider.py
|
Python
|
apache-2.0
| 3,297
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for the internal ops used by tfdbg v2."""
import os
import numpy as np
from tensorflow.core.protobuf import debug_event_pb2
from tensorflow.python.debug.lib import debug_events_reader
from tensorflow.python.debug.lib import debug_events_writer
from tensorflow.python.debug.lib import dumping_callback_test_lib
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_debug_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class DebugIdentityV2OpTest(dumping_callback_test_lib.DumpingCallbackTestBase):
"""Tests for DebugIdentityV2Op: when DebugEventsWriter is initialized.
DebugEventsWriter being initialized prior to DebugIdentityV2 ops being invoked
for the first time is the typical case (e.g., tfdbg2 running on a local
machine with only local devices.)
"""
def setUp(self):
super(DebugIdentityV2OpTest, self).setUp()
# Testing using a small circular-buffer size.
self.circular_buffer_size = 4
self.tfdbg_run_id = "test_tfdbg_run"
self.writer = debug_events_writer.DebugEventsWriter(
self.dump_root, self.tfdbg_run_id, self.circular_buffer_size)
def tearDown(self):
self.writer.Close()
super(DebugIdentityV2OpTest, self).tearDown()
@test_util.run_in_graph_and_eager_modes
def testSingleTensorFullTensorDebugModeWithCircularBufferBehavior(self):
@def_function.function
def write_debug_trace(x):
square = math_ops.square(x)
gen_debug_ops.debug_identity_v2(
square,
tfdbg_context_id="deadbeaf",
op_name="Square",
output_slot=0,
tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR,
debug_urls=["file://%s" % self.dump_root])
sqrt = math_ops.sqrt(x)
gen_debug_ops.debug_identity_v2(
sqrt,
tfdbg_context_id="beafdead",
op_name="Sqrt",
output_slot=0,
tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR,
debug_urls=["file://%s" % self.dump_root])
return square + sqrt
x = np.array([3.0, 4.0])
# Only the graph-execution trace of the last iteration should be written
# to self.dump_root.
for _ in range(self.circular_buffer_size // 2 + 1):
self.assertAllClose(
write_debug_trace(x), [9.0 + np.sqrt(3.0), 16.0 + 2.0])
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
# Check that the .metadata DebugEvents data file has been created, even
# before FlushExecutionFiles() is called.
self.assertGreater(reader.starting_wall_time(), 0)
self.assertTrue(reader.tensorflow_version())
self.assertTrue(reader.tfdbg_file_version().startswith("debug.Event"))
graph_trace_iter = reader.graph_execution_traces_iterators()[0]
# Before FlushExecutionFiles() is called, the .graph_execution_traces file
# ought to be empty.
with self.assertRaises(StopIteration):
next(graph_trace_iter)
# Flush the circular buffer.
self.writer.FlushExecutionFiles()
graph_trace_iter = reader.graph_execution_traces_iterators()[0]
# The circular buffer has a size of 4. So only the data from the
# last two iterations should have been written to self.dump_root.
for _ in range(2):
debug_event = next(graph_trace_iter).debug_event
self.assertGreater(debug_event.wall_time, 0)
trace = debug_event.graph_execution_trace
self.assertEqual(trace.tfdbg_context_id, "deadbeaf")
self.assertEqual(trace.op_name, "Square")
self.assertEqual(trace.output_slot, 0)
self.assertEqual(trace.tensor_debug_mode,
debug_event_pb2.TensorDebugMode.FULL_TENSOR)
tensor_value = tensor_util.MakeNdarray(trace.tensor_proto)
self.assertAllClose(tensor_value, [9.0, 16.0])
debug_event = next(graph_trace_iter).debug_event
self.assertGreater(debug_event.wall_time, 0)
trace = debug_event.graph_execution_trace
self.assertEqual(trace.tfdbg_context_id, "beafdead")
self.assertEqual(trace.op_name, "Sqrt")
self.assertEqual(trace.output_slot, 0)
self.assertEqual(trace.tensor_debug_mode,
debug_event_pb2.TensorDebugMode.FULL_TENSOR)
tensor_value = tensor_util.MakeNdarray(trace.tensor_proto)
self.assertAllClose(tensor_value, [np.sqrt(3.0), 2.0])
# Only the graph-execution trace of the last iteration should be written
# to self.dump_root.
with self.assertRaises(StopIteration):
next(graph_trace_iter)
@test_util.run_in_graph_and_eager_modes
def testControlFlow(self):
@def_function.function
def collatz(x):
counter = constant_op.constant(0, dtype=dtypes.int32)
while math_ops.greater(x, 1):
counter = counter + 1
gen_debug_ops.debug_identity_v2(
x,
tfdbg_context_id="deadbeaf",
op_name="x",
output_slot=0,
tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR,
debug_urls=["file://%s" % self.dump_root])
if math_ops.equal(x % 2, 0):
x = math_ops.div(x, 2)
else:
x = x * 3 + 1
return counter
x = constant_op.constant(10, dtype=dtypes.int32)
self.evaluate(collatz(x))
self.writer.FlushExecutionFiles()
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
graph_trace_iter = reader.graph_execution_traces_iterators()[0]
try:
x_values = []
timestamp = 0
while True:
debug_event = next(graph_trace_iter).debug_event
self.assertGreater(debug_event.wall_time, timestamp)
timestamp = debug_event.wall_time
trace = debug_event.graph_execution_trace
self.assertEqual(trace.tfdbg_context_id, "deadbeaf")
self.assertEqual(trace.op_name, "x")
self.assertEqual(trace.output_slot, 0)
self.assertEqual(trace.tensor_debug_mode,
debug_event_pb2.TensorDebugMode.FULL_TENSOR)
x_values.append(int(tensor_util.MakeNdarray(trace.tensor_proto)))
except StopIteration:
pass
# Due to the circular buffer, only the last 4 iterations of
# [10, 5, 16, 8, 4, 2] should have been written.
self.assertAllEqual(x_values, [16, 8, 4, 2])
@test_util.run_in_graph_and_eager_modes
def testTwoDumpRoots(self):
another_dump_root = os.path.join(self.dump_root, "another")
another_debug_url = "file://%s" % another_dump_root
another_writer = debug_events_writer.DebugEventsWriter(
another_dump_root, "test_tfdbg_run")
@def_function.function
def write_debug_trace(x):
# DebugIdentityV2 is a stateful op. It ought to be included by auto
# control dependency.
square = math_ops.square(x)
gen_debug_ops.debug_identity_v2(
square,
tfdbg_context_id="deadbeaf",
tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR,
debug_urls=["file://%s" % self.dump_root, another_debug_url])
return square + 1.0
x = np.array([3.0, 4.0])
self.assertAllClose(write_debug_trace(x), np.array([10.0, 17.0]))
self.writer.FlushExecutionFiles()
another_writer.FlushExecutionFiles()
another_writer.Close()
for debug_root in (self.dump_root, another_dump_root):
with debug_events_reader.DebugEventsReader(debug_root) as reader:
graph_trace_iter = reader.graph_execution_traces_iterators()[0]
debug_event = next(graph_trace_iter).debug_event
trace = debug_event.graph_execution_trace
self.assertEqual(trace.tfdbg_context_id, "deadbeaf")
self.assertEqual(trace.op_name, "")
self.assertEqual(trace.tensor_debug_mode,
debug_event_pb2.TensorDebugMode.FULL_TENSOR)
tensor_value = tensor_util.MakeNdarray(trace.tensor_proto)
self.assertAllClose(tensor_value, [9.0, 16.0])
with self.assertRaises(StopIteration):
next(graph_trace_iter)
class DebugIdentityV2OpUninitializedWriterTest(
dumping_callback_test_lib.DumpingCallbackTestBase):
"""Tests for DebugIdentityV2Op: when DebugEventsWriter is not initialized.
This case can occur when DebugIdentityV2Ops are running on a remote
TensorFlow server (e.g., a TPU worker).
"""
@test_util.run_in_graph_and_eager_modes
def testInvokingDebugIdentityV2OpBeforeCreatingDebugEventsWriterWorks(self):
circular_buffer_size = 3
@def_function.function
def write_debug_trace(x):
# DebugIdentityV2 is a stateful op. It ought to be included by auto
# control dependency.
square = math_ops.square(x)
gen_debug_ops.debug_identity_v2(
square,
tfdbg_context_id="deadbeaf",
op_name="Square",
output_slot=0,
tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR,
debug_urls=["file://%s" % self.dump_root],
circular_buffer_size=circular_buffer_size)
return square
# The DebugIdentityV2 ops are invokes *before* a DebugEventsWriter at the
# same dump root is created.
for i in range(circular_buffer_size * 2):
self.assertAllClose(
write_debug_trace(np.array([i]).astype(np.float32)), [i**2.0])
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
"test_tfdbg_run",
circular_buffer_size)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
graph_trace_iter = reader.graph_execution_traces_iterators()[0]
graph_execution_traces = []
while True:
try:
graph_execution_traces.append(
next(graph_trace_iter).debug_event.graph_execution_trace)
except StopIteration:
break
self.assertLen(graph_execution_traces, circular_buffer_size)
for i in range(circular_buffer_size):
self.assertAllClose(
tensor_util.MakeNdarray(graph_execution_traces[i].tensor_proto),
[(i + circular_buffer_size)**2.0])
class DebugNumericSummaryV2Test(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpReduceInfNanThreeSlots(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(
debug_event_pb2.TensorDebugMode.REDUCE_INF_NAN_THREE_SLOTS)))
self.assertAllEqual(
debug_summary(constant_op.constant([])), [0.0, 0.0, 0.0])
self.assertAllEqual(
debug_summary(constant_op.constant(42.0)), [0.0, 0.0, 0.0])
self.assertAllEqual(
debug_summary(constant_op.constant([3.0, 4.0])), [0.0, 0.0, 0.0])
self.assertAllEqual(
debug_summary(constant_op.constant(np.array([3.0, -np.inf]))),
[-np.inf, 0.0, 0.0])
self.assertAllEqual(
debug_summary(constant_op.constant(np.array([[0, 0], [np.nan, 0]]))),
[0.0, 0.0, np.nan])
self.assertAllEqual(
debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, np.inf]]))),
[0.0, np.inf, np.nan])
self.assertAllEqual(
debug_summary(
constant_op.constant(np.array([[0, np.inf], [np.nan, -np.inf]]))),
[-np.inf, np.inf, np.nan])
x = np.zeros([100, 100], dtype=np.float16)
x[32, 47] = np.nan
self.assertAllEqual(
debug_summary(constant_op.constant(x)), [0.0, 0.0, np.nan])
x = np.zeros([97, 97], dtype=np.float32)
x[50, 83] = -np.inf
self.assertAllEqual(
debug_summary(constant_op.constant(x)), [-np.inf, 0.0, 0.0])
x[1, 41] = np.nan
self.assertAllEqual(
debug_summary(constant_op.constant(x)), [-np.inf, 0.0, np.nan])
x = np.zeros([9701], dtype=np.float64)
x[9700] = np.nan
self.assertAllEqual(
debug_summary(constant_op.constant(x)), [0.0, 0.0, np.nan])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpLargeTensorIDError(self):
modes = [
debug_event_pb2.TensorDebugMode.CURT_HEALTH,
debug_event_pb2.TensorDebugMode.CONCISE_HEALTH,
debug_event_pb2.TensorDebugMode.SHAPE,
]
# Maximum allowed tensor_id
tensor_id = np.power(2, 53, dtype=np.int64)
for mode in modes:
self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
constant_op.constant(42.0),
tensor_debug_mode=mode,
tensor_id=tensor_id,
output_dtype=dtypes.float64))
# Incrementing by one should error
tensor_id += 1
for mode in modes:
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
constant_op.constant(42.0),
tensor_debug_mode=mode,
tensor_id=tensor_id,
output_dtype=dtypes.float64))
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpCurtHealthValuesSmall(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.CURT_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
tensor, tensor_id = debug_summary(constant_op.constant([]))
self.assertAllEqual(tensor, [tensor_id, 0.0])
tensor, tensor_id = debug_summary(constant_op.constant(42.0))
self.assertAllEqual(tensor, [tensor_id, 0.0])
tensor, tensor_id = debug_summary(constant_op.constant([3.0, 4.0]))
self.assertAllEqual(tensor, [tensor_id, 0.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([3.0, -np.inf])))
self.assertAllEqual(tensor, [tensor_id, 1.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, 0]])))
self.assertAllEqual(tensor, [tensor_id, 1.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, np.inf]])))
self.assertAllEqual(tensor, [tensor_id, 1.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, np.inf], [np.nan, -np.inf]])))
self.assertAllEqual(tensor, [tensor_id, 1.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpCurtHealthValuesLarge(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.CURT_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
x = np.zeros([100, 100], dtype=np.float16)
x[32, 47] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 1.0])
x = np.zeros([97, 97], dtype=np.float32)
x[50, 83] = -np.inf
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 1.0])
x[1, 41] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 1.0])
x = np.zeros([9701], dtype=np.float64)
x[9700] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 1.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpCurtHealthConsistency(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.CURT_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
x = np.zeros([100, 100], dtype=np.float16)
x[43, 99] = np.nan
c = constant_op.constant(x)
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
x = np.zeros([100, 100, 50], dtype=np.float64)
x[0, 0, 1] = np.inf
c = constant_op.constant(x)
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
c = constant_op.constant(np.ones((100, 200), np.double))
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpDeterminism(self):
x = np.zeros([100, 100, 50], dtype=np.float64)
x = constant_op.constant(x)
modes = (
debug_event_pb2.TensorDebugMode.CONCISE_HEALTH,
debug_event_pb2.TensorDebugMode.FULL_HEALTH,
)
for mode in modes:
debug_mode = debug_event_pb2.TensorDebugMode.Name(mode)
with test_util.deterministic_ops():
if test_util.is_gpu_available(cuda_only=True):
with self.assertRaisesRegex(
errors_impl.UnimplementedError, "Determinism is not yet "
"supported for DebugNumericSummaryV2 when tensor_debug_mode is "
+ debug_mode + "."):
self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=mode,
tensor_id=x._id,
output_dtype=dtypes.float64))
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpConciseHealthSmall(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(
debug_event_pb2.TensorDebugMode.CONCISE_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
tensor, tensor_id = debug_summary(constant_op.constant([]))
self.assertAllEqual(tensor, [tensor_id, 0.0, 0.0, 0.0, 0.0])
tensor, tensor_id = debug_summary(constant_op.constant(42.0))
self.assertAllEqual(tensor, [tensor_id, 1.0, 0.0, 0.0, 0.0])
tensor, tensor_id = debug_summary(constant_op.constant([3.0, 4.0]))
self.assertAllEqual(tensor, [tensor_id, 2.0, 0.0, 0.0, 0.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([3.0, -np.inf])))
self.assertAllEqual(tensor, [tensor_id, 2.0, 1.0, 0.0, 0.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, 0]])))
self.assertAllEqual(tensor, [tensor_id, 4.0, 0.0, 0.0, 1.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, np.inf]])))
self.assertAllEqual(tensor, [tensor_id, 4.0, 0.0, 1.0, 1.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, np.inf], [np.nan, -np.inf]])))
self.assertAllEqual(tensor, [tensor_id, 4.0, 1.0, 1.0, 1.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpConciseHealthLarge(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(
debug_event_pb2.TensorDebugMode.CONCISE_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
x = np.zeros([100, 100], dtype=np.float16)
x[32, :] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 10000.0, 0.0, 0.0, 100.0])
x = np.zeros([97, 97], dtype=np.float32)
x[50, 83:85] = -np.inf
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 97 * 97, 2.0, 0.0, 0.0])
x[1:9, 41] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 97 * 97, 2.0, 0.0, 8.0])
x = np.zeros([9701], dtype=np.float64)
x[9700] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 9701, 0.0, 0.0, 1.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpConciseHealthConsistency(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(
debug_event_pb2.TensorDebugMode.CONCISE_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
# Assert the same op is returns a consistent value
x = np.zeros([100, 100], dtype=np.float16)
x[3, 4] = -np.inf
c = constant_op.constant(x)
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
c = constant_op.constant(np.ones((100, 200), np.double))
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpShapeEmpty(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.SHAPE),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
tensor, tensor_id = debug_summary(constant_op.constant(0.0))
self.assertAllEqual(
tensor, [tensor_id, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpShapeSmall(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.SHAPE),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
x = np.zeros([3, 4], dtype=np.float32)
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(
tensor, [tensor_id, 1.0, 2.0, 12.0, 3.0, 4.0, 0.0, 0.0, 0.0, 0.0])
x = np.ones([1, 2, 3, 4, 5, 6], dtype=np.float16)
x[0, 1, 2, 2, 2, 2] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(
tensor,
[tensor_id, 19, 6.0, 2 * 3 * 4 * 5 * 6, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
x = np.zeros([2], dtype=np.float32)
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(
tensor, [tensor_id, 1.0, 1.0, 2.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0])
tensor, tensor_id = debug_summary(constant_op.constant([]))
self.assertAllEqual(
tensor, [tensor_id, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpShapeLarge(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.SHAPE),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
x = np.ones([1, 2, 3, 4, 5, 6, 7], dtype=np.double)
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [
tensor_id, 2.0, 7.0, 2 * 3 * 4 * 5 * 6 * 7, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0
])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpFullHealthSmall(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.FULL_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
tensor, tensor_id = debug_summary(constant_op.constant([]))
expected = [tensor_id, -1, 1, 1, 0, 0, 0, 0, 0, 0, 0]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(constant_op.constant(42.0))
expected = [tensor_id, -1, 1, 0, 1, 0, 0, 0, 0, 0, 1]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(constant_op.constant([3.0, 4.0]))
expected = [tensor_id, -1, 1, 1, 2, 0, 0, 0, 0, 0, 2]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([3, -np.inf], dtype=np.float32)))
expected = [tensor_id, -1, 1, 1, 2, 1, 0, 0, 0, 0, 1]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, 0]], dtype=np.float64)))
expected = [tensor_id, -1, 2, 2, 4, 0, 0, 1, 0, 3, 0]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(
constant_op.constant(
np.array([[0, 0], [np.nan, np.inf]], dtype=np.float16)))
expected = [tensor_id, -1, 19, 2, 4, 0, 1, 1, 0, 2, 0]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(
constant_op.constant(
np.array([[0, np.inf], [np.nan, -np.inf]], dtype=np.float32)))
expected = [tensor_id, -1, 1, 2, 4, 1, 1, 1, 0, 1, 0]
self.assertAllEqual(tensor, expected)
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpFullHealthLarge(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.FULL_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
def tensor_counts(arr):
counts = [len(np.shape(arr)), np.size(arr), 0, 0, 0, 0, 0, 0]
for n in np.ravel(arr):
if np.isneginf(n):
counts[2] += 1
elif np.isposinf(n):
counts[3] += 1
elif np.isnan(n):
counts[4] += 1
elif n < 0.:
counts[5] += 1
elif n == 0.:
counts[6] += 1
else:
counts[7] += 1
return counts
x = np.zeros([50, 50], dtype=np.float16)
x[32, 47] = np.nan
x[0:4, 3] = np.inf
x[40:50, 40:50] = 10
x[3, 20] = -10
tensor, tensor_id = debug_summary(constant_op.constant(x))
expected = [tensor_id, -1, 19] + tensor_counts(x)
self.assertAllEqual(tensor, expected)
x = np.ones([25, 25, 50], dtype=np.float32) * np.inf
x[:, :, 1] = np.nan
x[:, :, 2] = -np.inf
x[:, :, 3] = -1
x[:, :, 4] = 0
x[:, :, 5] = 1
tensor, tensor_id = debug_summary(constant_op.constant(x))
expected = [tensor_id, -1, 1] + tensor_counts(x)
self.assertAllEqual(tensor, expected)
x[0, 0, 0] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
expected = [
tensor_id,
-1,
1,
] + tensor_counts(x)
self.assertAllEqual(tensor, expected)
x = np.zeros([9701], dtype=np.float64)
x[9700] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
expected = [tensor_id, -1, 2] + tensor_counts(x)
self.assertAllEqual(tensor, expected)
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpFullHealthConsistency(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.FULL_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
# Assert the same op is returns a consistent value
x = np.zeros([100, 100], dtype=np.float16)
x[32, 47] = np.nan
x[0:4, 3] = np.inf
x[90:100, 90:100] = 10
x[3, 20] = -10
c = constant_op.constant(x)
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
x = np.ones((100, 200, 3, 10), np.double)
x[1, 30, 2] = 10
x[5, :, 0, 1] = np.nan
x[90:100, 150, :, :] = np.inf
c = constant_op.constant(x)
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
def testCheckNumericsV2OpNegativeAndPositiveInf(self):
"""Test that CheckNumericsV2 op distinguishes negative and positive infs."""
with self.session(graph=ops.Graph()):
t1 = constant_op.constant([-1.0, 1.0])
t2 = constant_op.constant([0.0, 0.0])
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r"pass through test.*had -Inf and \+Inf values"):
self.evaluate(
array_ops.check_numerics_v2(t1 / t2, message="pass through test"))
def testCheckNumericsV2OpNegativeAndPositiveInfAndNaN(self):
"""CheckNumericsV2 op distinguishes - & + infs when nan is present."""
with self.session(graph=ops.Graph()):
t1 = constant_op.constant([-1.0, 1.0, 0.0])
t2 = constant_op.constant([0.0, 0.0, 0.0])
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r"pass through test.*had -Inf, \+Inf, and NaN values"):
self.evaluate(
array_ops.check_numerics_v2(t1 / t2, message="pass through test"))
def testCheckNumericsV2PositiveInfAndNaN(self):
"""Test that CheckNumericsV2 op shows sign of inf when nan is present."""
with self.session(graph=ops.Graph()):
t1 = constant_op.constant([0.0, 1.0])
t2 = constant_op.constant([0.0, 0.0])
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r"pass through test.*had \+Inf and NaN values"):
self.evaluate(
array_ops.check_numerics_v2(t1 / t2, message="pass through test"))
if __name__ == "__main__":
ops.enable_eager_execution()
googletest.main()
|
tensorflow/tensorflow
|
tensorflow/python/debug/lib/debug_v2_ops_test.py
|
Python
|
apache-2.0
| 31,247
|
# -*- coding: utf-8 -*-
"""
This module contains backports to support older Python versions.
They contain the backported code originally developed for Python. It is
therefore distributed under the PSF license, as follows:
PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
--------------------------------------------
1. This LICENSE AGREEMENT is between the Python Software Foundation
("PSF"), and the Individual or Organization ("Licensee") accessing and
otherwise using this software ("Python") in source or binary form and
its associated documentation.
2. Subject to the terms and conditions of this License Agreement, PSF hereby
grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
analyze, test, perform and/or display publicly, prepare derivative works,
distribute, and otherwise use Python alone or in any derivative version,
provided, however, that PSF's License Agreement and PSF's notice of copyright,
i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
2011, 2012, 2013, 2014 Python Software Foundation; All Rights Reserved" are
retained in Python alone or in any derivative version prepared by Licensee.
3. In the event Licensee prepares a derivative work that is based on
or incorporates Python or any part thereof, and wants to make
the derivative work available to others as provided herein, then
Licensee hereby agrees to include in any such work a brief summary of
the changes made to Python.
4. PSF is making Python available to Licensee on an "AS IS"
basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
INFRINGE ANY THIRD PARTY RIGHTS.
5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
6. This License Agreement will automatically terminate upon a material
breach of its terms and conditions.
7. Nothing in this License Agreement shall be deemed to create any
relationship of agency, partnership, or joint venture between PSF and
Licensee. This License Agreement does not grant permission to use PSF
trademarks or trade name in a trademark sense to endorse or promote
products or services of Licensee, or any third party.
8. By copying, installing or otherwise using Python, Licensee
agrees to be bound by the terms and conditions of this License
Agreement.
"""
#
# (C) Python Software Foundation, 2001-2014
# (C) with modifications from Pywikibot team, 2015
#
# Distributed under the terms of the PSF license.
#
from __future__ import unicode_literals
import logging
import warnings
def format_range_unified(start, stop):
"""
Convert range to the "ed" format.
Copied from C{difflib._format_range_unified()} which was introduced in
Python 2.7.2.
@see: https://hg.python.org/cpython/file/8527427914a2/Lib/difflib.py#l1147
"""
# Per the diff spec at http://www.unix.org/single_unix_specification/
beginning = start + 1 # lines start numbering with one
length = stop - start
if length == 1:
return '{0}'.format(beginning)
if not length:
beginning -= 1 # empty ranges begin at line just before the range
return '{0},{1}'.format(beginning, length)
# Logging/Warnings integration
_warnings_showwarning = None
class NullHandler(logging.Handler):
"""
This handler does nothing.
It's intended to be used to avoid the "No handlers could be found for
logger XXX" one-off warning. This is important for library code, which
may contain code to log events. If a user of the library does not configure
logging, the one-off warning might be produced; to avoid this, the library
developer simply needs to instantiate a NullHandler and add it to the
top-level logger of the library module or package.
Copied from C{logging.NullHandler} which was introduced in Python 2.7.
@see: http://bugs.python.org/issue4384
"""
def handle(self, record):
"""Dummy handling."""
pass
def emit(self, record):
"""Dummy handling."""
pass
def createLock(self):
"""Dummy handling."""
self.lock = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging.
It will first check to see if the file parameter is None. If a file is
specified, it will delegate to the original warnings implementation of
showwarning. Otherwise, it will call warnings.formatwarning and will log
the resulting string to a warnings logger named "py.warnings" with level
logging.WARNING.
Copied from C{logging._showwarning} which was introduced in Python 2.7.
@see: http://bugs.python.org/issue4384
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(message, category, filename, lineno, file, line)
else:
s = warnings.formatwarning(message, category, filename, lineno, line)
logger = logging.getLogger("py.warnings")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
def captureWarnings(capture):
"""
Capture warnings into logging.
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
Copied from C{logging.captureWarnings} which was introduced in Python 2.7.
@see: http://bugs.python.org/issue4384
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
|
valhallasw/pywikibot-core
|
pywikibot/backports.py
|
Python
|
mit
| 6,204
|
import logging
import six
import warnings
from ..auth import auth
from ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING
from .. import utils
from .. import errors
log = logging.getLogger(__name__)
class ImageApiMixin(object):
@utils.check_resource
def get_image(self, image):
res = self._get(self._url("/images/{0}/get", image), stream=True)
self._raise_for_status(res)
return res.raw
@utils.check_resource
def history(self, image):
res = self._get(self._url("/images/{0}/history", image))
return self._result(res, True)
def images(self, name=None, quiet=False, all=False, viz=False,
filters=None):
if viz:
if utils.compare_version('1.7', self._version) >= 0:
raise Exception('Viz output is not supported in API >= 1.7!')
return self._result(self._get(self._url("images/viz")))
params = {
'filter': name,
'only_ids': 1 if quiet else 0,
'all': 1 if all else 0,
}
if filters:
params['filters'] = utils.convert_filters(filters)
res = self._result(self._get(self._url("/images/json"), params=params),
True)
if quiet:
return [x['Id'] for x in res]
return res
def import_image(self, src=None, repository=None, tag=None, image=None):
if src:
if isinstance(src, six.string_types):
try:
result = self.import_image_from_file(
src, repository=repository, tag=tag)
except IOError:
result = self.import_image_from_url(
src, repository=repository, tag=tag)
else:
result = self.import_image_from_data(
src, repository=repository, tag=tag)
elif image:
result = self.import_image_from_image(
image, repository=repository, tag=tag)
else:
raise Exception("Must specify a src or image")
return result
def import_image_from_data(self, data, repository=None, tag=None):
u = self._url("/images/create")
params = {
'fromSrc': '-',
'repo': repository,
'tag': tag
}
headers = {
'Content-Type': 'application/tar',
}
return self._result(
self._post(u, data=data, params=params, headers=headers))
def import_image_from_file(self, filename, repository=None, tag=None):
u = self._url("/images/create")
params = {
'fromSrc': '-',
'repo': repository,
'tag': tag
}
headers = {
'Content-Type': 'application/tar',
}
with open(filename, 'rb') as f:
return self._result(
self._post(u, data=f, params=params, headers=headers,
timeout=None))
def import_image_from_stream(self, stream, repository=None, tag=None):
u = self._url("/images/create")
params = {
'fromSrc': '-',
'repo': repository,
'tag': tag
}
headers = {
'Content-Type': 'application/tar',
'Transfer-Encoding': 'chunked',
}
return self._result(
self._post(u, data=stream, params=params, headers=headers))
def import_image_from_url(self, url, repository=None, tag=None):
u = self._url("/images/create")
params = {
'fromSrc': url,
'repo': repository,
'tag': tag
}
return self._result(
self._post(u, data=None, params=params))
def import_image_from_image(self, image, repository=None, tag=None):
u = self._url("/images/create")
params = {
'fromImage': image,
'repo': repository,
'tag': tag
}
return self._result(
self._post(u, data=None, params=params))
@utils.check_resource
def insert(self, image, url, path):
if utils.compare_version('1.12', self._version) >= 0:
raise errors.DeprecatedMethod(
'insert is not available for API version >=1.12'
)
api_url = self._url("/images/{0}/insert", image)
params = {
'url': url,
'path': path
}
return self._result(self._post(api_url, params=params))
@utils.check_resource
def inspect_image(self, image):
return self._result(
self._get(self._url("/images/{0}/json", image)), True
)
def load_image(self, data):
res = self._post(self._url("/images/load"), data=data)
self._raise_for_status(res)
def pull(self, repository, tag=None, stream=False,
insecure_registry=False, auth_config=None):
if insecure_registry:
warnings.warn(
INSECURE_REGISTRY_DEPRECATION_WARNING.format('pull()'),
DeprecationWarning
)
if not tag:
repository, tag = utils.parse_repository_tag(repository)
registry, repo_name = auth.resolve_repository_name(repository)
params = {
'tag': tag,
'fromImage': repository
}
headers = {}
if utils.compare_version('1.5', self._version) >= 0:
# If we don't have any auth data so far, try reloading the config
# file one more time in case anything showed up in there.
if auth_config is None:
log.debug('Looking for auth config')
if not self._auth_configs:
log.debug(
"No auth config in memory - loading from filesystem"
)
self._auth_configs = auth.load_config()
authcfg = auth.resolve_authconfig(self._auth_configs, registry)
# Do not fail here if no authentication exists for this
# specific registry as we can have a readonly pull. Just
# put the header if we can.
if authcfg:
log.debug('Found auth config')
# auth_config needs to be a dict in the format used by
# auth.py username , password, serveraddress, email
headers['X-Registry-Auth'] = auth.encode_header(
authcfg
)
else:
log.debug('No auth config found')
else:
log.debug('Sending supplied auth config')
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
response = self._post(
self._url('/images/create'), params=params, headers=headers,
stream=stream, timeout=None
)
self._raise_for_status(response)
if stream:
return self._stream_helper(response)
return self._result(response)
def push(self, repository, tag=None, stream=False,
insecure_registry=False):
if insecure_registry:
warnings.warn(
INSECURE_REGISTRY_DEPRECATION_WARNING.format('push()'),
DeprecationWarning
)
if not tag:
repository, tag = utils.parse_repository_tag(repository)
registry, repo_name = auth.resolve_repository_name(repository)
u = self._url("/images/{0}/push", repository)
params = {
'tag': tag
}
headers = {}
if utils.compare_version('1.5', self._version) >= 0:
# If we don't have any auth data so far, try reloading the config
# file one more time in case anything showed up in there.
if not self._auth_configs:
self._auth_configs = auth.load_config()
authcfg = auth.resolve_authconfig(self._auth_configs, registry)
# Do not fail here if no authentication exists for this specific
# registry as we can have a readonly pull. Just put the header if
# we can.
if authcfg:
headers['X-Registry-Auth'] = auth.encode_header(authcfg)
response = self._post_json(
u, None, headers=headers, stream=stream, params=params
)
self._raise_for_status(response)
if stream:
return self._stream_helper(response)
return self._result(response)
@utils.check_resource
def remove_image(self, image, force=False, noprune=False):
params = {'force': force, 'noprune': noprune}
res = self._delete(self._url("/images/{0}", image), params=params)
self._raise_for_status(res)
def search(self, term):
return self._result(
self._get(self._url("/images/search"), params={'term': term}),
True
)
@utils.check_resource
def tag(self, image, repository, tag=None, force=False):
params = {
'tag': tag,
'repo': repository,
'force': 1 if force else 0
}
url = self._url("/images/{0}/tag", image)
res = self._post(url, params=params)
self._raise_for_status(res)
return res.status_code == 201
|
AccelAI/accel.ai
|
flask-aws/lib/python2.7/site-packages/docker/api/image.py
|
Python
|
mit
| 9,342
|
try:
# Try using ez_setup to install setuptools if not already installed.
from ez_setup import use_setuptools
use_setuptools()
except ImportError:
# Ignore import error and assume Python 3 which already has setuptools.
pass
from setuptools import setup, find_packages
classifiers = ['Development Status :: 4 - Beta',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: System :: Hardware']
setup(name = 'Adafruit_MPR121',
version = '1.1.2',
author = 'Tony DiCola',
author_email = 'tdicola@adafruit.com',
description = 'Library for MPR121 capacitive touch sensor.',
license = 'MIT',
classifiers = classifiers,
url = 'https://github.com/adafruit/Adafruit_Python_MPR121/',
dependency_links = ['https://github.com/adafruit/Adafruit_Python_GPIO/tarball/master#egg=Adafruit-GPIO-0.7'],
install_requires = ['Adafruit-GPIO>=0.7'],
packages = find_packages())
|
cwalk/CapacitiveTouchLamp
|
setup.py
|
Python
|
mit
| 1,311
|
import sys
import urllib
import urlparse
import weakref
import datetime
import json
import xbmc
import xbmcaddon
import xbmcplugin
import xbmcvfs
from ..abstract_context import AbstractContext
from .xbmc_plugin_settings import XbmcPluginSettings
from .xbmc_context_ui import XbmcContextUI
from .xbmc_system_version import XbmcSystemVersion
from .xbmc_playlist import XbmcPlaylist
from .xbmc_player import XbmcPlayer
from ... import utils
class XbmcContext(AbstractContext):
def __init__(self, path='/', params=None, plugin_name=u'', plugin_id=u'', override=True):
AbstractContext.__init__(self, path, params, plugin_name, plugin_id)
if plugin_id:
self._addon = xbmcaddon.Addon(id=plugin_id)
else:
self._addon = xbmcaddon.Addon()
pass
self._system_version = None
"""
I don't know what xbmc/kodi is doing with a simple uri, but we have to extract the information from the
sys parameters and re-build our clean uri.
Also we extract the path and parameters - man, that would be so simple with the normal url-parsing routines.
"""
# first the path of the uri
if override:
self._uri = sys.argv[0]
comps = urlparse.urlparse(self._uri)
self._path = urllib.unquote(comps.path).decode('utf-8')
# after that try to get the params
if len(sys.argv) > 2:
params = sys.argv[2][1:]
if len(params) > 0:
self._uri = self._uri + '?' + params
self._params = {}
params = dict(urlparse.parse_qsl(params))
for _param in params:
item = params[_param]
self._params[_param] = item.decode('utf-8')
pass
pass
pass
self._ui = None
self._video_playlist = None
self._audio_playlist = None
self._video_player = None
self._audio_player = None
self._plugin_handle = int(sys.argv[1]) if len(sys.argv) > 1 else None
self._plugin_id = plugin_id or self._addon.getAddonInfo('id')
self._plugin_name = plugin_name or self._addon.getAddonInfo('name')
self._version = self._addon.getAddonInfo('version')
self._native_path = xbmc.translatePath(self._addon.getAddonInfo('path'))
self._settings = XbmcPluginSettings(self._addon)
"""
Set the data path for this addon and create the folder
"""
self._data_path = xbmc.translatePath('special://profile/addon_data/%s' % self._plugin_id)
if isinstance(self._data_path, str):
self._data_path = self._data_path.decode('utf-8')
pass
if not xbmcvfs.exists(self._data_path):
xbmcvfs.mkdir(self._data_path)
pass
pass
def format_date_short(self, date_obj):
date_format = xbmc.getRegion('dateshort')
_date_obj = date_obj
if isinstance(_date_obj, datetime.date):
_date_obj = datetime.datetime(_date_obj.year, _date_obj.month, _date_obj.day)
pass
return _date_obj.strftime(date_format)
def format_time(self, time_obj):
time_format = xbmc.getRegion('time')
_time_obj = time_obj
if isinstance(_time_obj, datetime.time):
_time_obj = datetime.time(_time_obj.hour, _time_obj.minute, _time_obj.second)
pass
return _time_obj.strftime(time_format)
def get_language(self):
"""
The xbmc.getLanguage() method is fucked up!!! We always return 'en-US' for now
"""
return 'en-US'
"""
if self.get_system_version().get_release_name() == 'Frodo':
return 'en-US'
try:
language = xbmc.getLanguage(0, region=True)
language = language.split('-')
language = '%s-%s' % (language[0].lower(), language[1].upper())
return language
except Exception, ex:
self.log_error('Failed to get system language (%s)', ex.__str__())
return 'en-US'
pass
"""
def get_system_version(self):
if not self._system_version:
self._system_version = XbmcSystemVersion(version='', releasename='', appname='')
pass
return self._system_version
def get_video_playlist(self):
if not self._video_playlist:
self._video_playlist = XbmcPlaylist('video', weakref.proxy(self))
pass
return self._video_playlist
def get_audio_playlist(self):
if not self._audio_playlist:
self._audio_playlist = XbmcPlaylist('audio', weakref.proxy(self))
pass
return self._audio_playlist
def get_video_player(self):
if not self._video_player:
self._video_player = XbmcPlayer('video', weakref.proxy(self))
pass
return self._video_player
def get_audio_player(self):
if not self._audio_player:
self._audio_player = XbmcPlayer('audio', weakref.proxy(self))
pass
return self._audio_player
def get_ui(self):
if not self._ui:
self._ui = XbmcContextUI(self._addon, weakref.proxy(self))
pass
return self._ui
def get_handle(self):
return self._plugin_handle
def get_data_path(self):
return self._data_path
def get_native_path(self):
return self._native_path
def get_settings(self):
return self._settings
def localize(self, text_id, default_text=u''):
if isinstance(text_id, int):
"""
We want to use all localization strings!
Addons should only use the range 30000 thru 30999 (see: http://kodi.wiki/view/Language_support) but we
do it anyway. I want some of the localized strings for the views of a skin.
"""
if text_id >= 0 and (text_id < 30000 or text_id > 30999):
result = xbmc.getLocalizedString(text_id)
if result is not None and result:
return utils.to_unicode(result)
pass
pass
result = self._addon.getLocalizedString(int(text_id))
if result is not None and result:
return utils.to_unicode(result)
return utils.to_unicode(default_text)
def set_content_type(self, content_type):
self.log_debug('Setting content-type: "%s" for "%s"' % (content_type, self.get_path()))
xbmcplugin.setContent(self._plugin_handle, content_type)
pass
def add_sort_method(self, *sort_methods):
for sort_method in sort_methods:
xbmcplugin.addSortMethod(self._plugin_handle, sort_method)
pass
pass
def clone(self, new_path=None, new_params=None):
if not new_path:
new_path = self.get_path()
pass
if not new_params:
new_params = self.get_params()
pass
new_context = XbmcContext(path=new_path, params=new_params, plugin_name=self._plugin_name,
plugin_id=self._plugin_id, override=False)
new_context._function_cache = self._function_cache
new_context._search_history = self._search_history
new_context._favorite_list = self._favorite_list
new_context._watch_later_list = self._watch_later_list
new_context._access_manager = self._access_manager
new_context._ui = self._ui
new_context._video_playlist = self._video_playlist
new_context._video_player = self._video_player
return new_context
def execute(self, command):
xbmc.executebuiltin(command)
pass
def sleep(self, milli_seconds):
xbmc.sleep(milli_seconds)
pass
def addon_enabled(self, addon_id):
rpc_request = json.dumps({"jsonrpc": "2.0",
"method": "Addons.GetAddonDetails",
"id": 1,
"params": {"addonid": "%s" % addon_id,
"properties": ["enabled"]}
})
response = json.loads(xbmc.executeJSONRPC(rpc_request))
try:
return response['result']['addon']['enabled'] is True
except KeyError:
message = response['error']['message']
code = response['error']['code']
error = 'Requested |%s| received error |%s| and code: |%s|' % (rpc_request, message, code)
xbmc.log(error, xbmc.LOGDEBUG)
return False
def set_addon_enabled(self, addon_id, enabled=True):
rpc_request = json.dumps({"jsonrpc": "2.0",
"method": "Addons.SetAddonEnabled",
"id": 1,
"params": {"addonid": "%s" % addon_id,
"enabled": enabled}
})
response = json.loads(xbmc.executeJSONRPC(rpc_request))
try:
return response['result'] == 'OK'
except KeyError:
message = response['error']['message']
code = response['error']['code']
error = 'Requested |%s| received error |%s| and code: |%s|' % (rpc_request, message, code)
xbmc.log(error, xbmc.LOGDEBUG)
return False
|
guidosarducci/plugin.video.youtube
|
resources/lib/kodion/impl/xbmc/xbmc_context.py
|
Python
|
gpl-2.0
| 9,558
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, json
from frappe.model.document import Document
class SetupProgress(Document):
pass
def get_setup_progress():
if not getattr(frappe.local, "setup_progress", None):
frappe.local.setup_progress = frappe.get_doc("Setup Progress", "Setup Progress")
return frappe.local.setup_progress
def get_action_completed_state(action_name):
for d in get_setup_progress().actions:
if d.action_name == action_name:
return d.is_completed
def update_action_completed_state(action_name):
action_table_doc = [d for d in get_setup_progress().actions
if d.action_name == action_name][0]
update_action(action_table_doc)
def update_action(doc):
doctype = doc.action_doctype
docname = doc.action_document
field = doc.action_field
if not doc.is_completed:
if doc.min_doc_count:
if frappe.db.count(doctype) >= doc.min_doc_count:
doc.is_completed = 1
doc.save()
if docname and field:
d = frappe.get_doc(doctype, docname)
if d.get(field):
doc.is_completed = 1
doc.save()
def update_domain_actions(domain):
for d in get_setup_progress().actions:
domains = json.loads(d.domains)
if domains == [] or domain in domains:
update_action(d)
def get_domain_actions_state(domain):
state = {}
for d in get_setup_progress().actions:
domains = json.loads(d.domains)
if domains == [] or domain in domains:
state[d.action_name] = d.is_completed
return state
@frappe.whitelist()
def set_action_completed_state(action_name):
action_table_doc = [d for d in get_setup_progress().actions
if d.action_name == action_name][0]
action_table_doc.is_completed = 1
action_table_doc.save()
|
ovresko/erpnext
|
erpnext/setup/doctype/setup_progress/setup_progress.py
|
Python
|
gpl-3.0
| 1,811
|
#!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO(brettw) bug 582594: merge this with build/android/gn/zip.py and update
# callers to use the existing template rather than invoking this directly.
"""Archives a set of files.
"""
import optparse
import os
import sys
import zipfile
sys.path.append(os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir, os.pardir,
"build"))
import gn_helpers
sys.path.append(os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir, os.pardir,
'build', 'android', 'gyp'))
from util import build_utils
def DoZip(inputs, link_inputs, zip_inputs, output, base_dir):
files = []
with zipfile.ZipFile(output, 'w', zipfile.ZIP_DEFLATED) as outfile:
for f in inputs:
file_name = os.path.relpath(f, base_dir)
files.append(file_name)
build_utils.AddToZipHermetic(outfile, file_name, f)
for f in link_inputs:
realf = os.path.realpath(f) # Resolve symlinks.
file_name = os.path.relpath(realf, base_dir)
files.append(file_name)
build_utils.AddToZipHermetic(outfile, file_name, realf)
for zf_name in zip_inputs:
with zipfile.ZipFile(zf_name, 'r') as zf:
for f in zf.namelist():
if f not in files:
files.append(f)
build_utils.AddToZipHermetic(outfile, f, data=zf.read(f))
def main():
parser = optparse.OptionParser()
parser.add_option('--inputs',
help='GN format list of files to archive.')
parser.add_option('--link-inputs',
help='GN-format list of files to archive. Symbolic links are resolved.')
parser.add_option('--zip-inputs',
help='GN-format list of zip files to re-archive.')
parser.add_option('--output', help='Path to output archive.')
parser.add_option('--base-dir',
help='If provided, the paths in the archive will be '
'relative to this directory', default='.')
options, _ = parser.parse_args()
inputs = []
if (options.inputs):
parser = gn_helpers.GNValueParser(options.inputs)
inputs = parser.ParseList()
link_inputs = []
if options.link_inputs:
parser = gn_helpers.GNValueParser(options.link_inputs)
link_inputs = parser.ParseList()
zip_inputs = []
if options.zip_inputs:
parser = gn_helpers.GNValueParser(options.zip_inputs)
zip_inputs = parser.ParseList()
output = options.output
base_dir = options.base_dir
DoZip(inputs, link_inputs, zip_inputs, output, base_dir)
if __name__ == '__main__':
sys.exit(main())
|
geminy/aidear
|
oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/mojo/public/tools/gn/zip.py
|
Python
|
gpl-3.0
| 2,774
|
# -*- coding: utf-8 -*-
# Copyright(C) 2012 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from decimal import Decimal
from weboob.deprecated.browser import Page
from weboob.capabilities import NotAvailable
from weboob.capabilities.pricecomparison import Product, Shop, Price
class IndexPage(Page):
def get_token(self):
input = self.parser.select(self.document.getroot(), 'div#localisation input#recherche_recherchertype__token', 1)
return input.attrib['value']
def iter_products(self):
for li in self.parser.select(self.document.getroot(), 'div#choix_carbu ul li'):
input = li.find('input')
label = li.find('label')
product = Product(input.attrib['value'])
product.name = unicode(label.text.strip())
if '&' in product.name:
# "E10 & SP95" produces a non-supported table.
continue
yield product
class ComparisonResultsPage(Page):
def get_product_name(self):
th = self.document.getroot().cssselect('table#tab_resultat tr th')
if th and len(th) == 9:
return u'%s' % th[5].find('a').text
def iter_results(self, product=None):
price = None
product.name = self.get_product_name()
for tr in self.document.getroot().cssselect('table#tab_resultat tr'):
tds = self.parser.select(tr, 'td')
if tds and len(tds) == 9 and product is not None:
price = Price('%s.%s' % (product.id, tr.attrib['id']))
price.product = product
price.cost = Decimal(tds[5].text.replace(',', '.'))
price.currency = u'€'
shop = Shop(price.id)
shop.name = unicode(tds[3].text.strip())
shop.location = unicode(tds[2].text.strip())
price.shop = shop
price.set_empty_fields(NotAvailable)
yield price
class ShopInfoPage(Page):
def get_info(self):
return self.parser.tostring(self.parser.select(self.document.getroot(), 'div.infos', 1))
|
laurent-george/weboob
|
modules/prixcarburants/pages.py
|
Python
|
agpl-3.0
| 2,744
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##
# myapp.urls
##
##
# Copyright (C) $YEAR$, $AUTHOR_NAME$ <$AUTHOR_EMAIL$>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of version 3 of the GNU Affero General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this source code; if not, see <http://www.gnu.org/licenses/>,
# or write to
#
# Free Software Foundation, Inc.
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301 USA
##
##
# End of File
##
|
maaku/django-reuse
|
templates/apps/basic/myapp/urls.py
|
Python
|
agpl-3.0
| 886
|
# -*- coding: utf-8 -*-
from . import sale
from . import magento_model
|
acsone/connector-magento
|
magentoerpconnect_order_comment/__init__.py
|
Python
|
agpl-3.0
| 72
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_SGIX_shadow'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_SGIX_shadow',error_checker=_errors._error_checker)
GL_TEXTURE_COMPARE_OPERATOR_SGIX=_C('GL_TEXTURE_COMPARE_OPERATOR_SGIX',0x819B)
GL_TEXTURE_COMPARE_SGIX=_C('GL_TEXTURE_COMPARE_SGIX',0x819A)
GL_TEXTURE_GEQUAL_R_SGIX=_C('GL_TEXTURE_GEQUAL_R_SGIX',0x819D)
GL_TEXTURE_LEQUAL_R_SGIX=_C('GL_TEXTURE_LEQUAL_R_SGIX',0x819C)
|
stack-of-tasks/rbdlpy
|
tutorial/lib/python2.7/site-packages/OpenGL/raw/GL/SGIX/shadow.py
|
Python
|
lgpl-3.0
| 750
|
"""
SAX driver for the pyexpat C module. This driver works with
pyexpat.__version__ == '2.22'.
"""
version = "0.20"
from xml.sax._exceptions import *
from xml.sax.handler import feature_validation, feature_namespaces
from xml.sax.handler import feature_namespace_prefixes
from xml.sax.handler import feature_external_ges, feature_external_pes
from xml.sax.handler import feature_string_interning
from xml.sax.handler import property_xml_string, property_interning_dict
# xml.parsers.expat does not raise ImportError in Jython
import sys
if sys.platform[:4] == "java":
raise SAXReaderNotAvailable("expat not available in Java", None)
del sys
try:
from xml.parsers import expat
except ImportError:
raise SAXReaderNotAvailable("expat not supported", None)
else:
if not hasattr(expat, "ParserCreate"):
raise SAXReaderNotAvailable("expat not supported", None)
from xml.sax import xmlreader, saxutils, handler
AttributesImpl = xmlreader.AttributesImpl
AttributesNSImpl = xmlreader.AttributesNSImpl
# If we're using a sufficiently recent version of Python, we can use
# weak references to avoid cycles between the parser and content
# handler, otherwise we'll just have to pretend.
try:
import _weakref
except ImportError:
def _mkproxy(o):
return o
else:
import weakref
_mkproxy = weakref.proxy
del weakref, _weakref
class _ClosedParser:
pass
# --- ExpatLocator
class ExpatLocator(xmlreader.Locator):
"""Locator for use with the ExpatParser class.
This uses a weak reference to the parser object to avoid creating
a circular reference between the parser and the content handler.
"""
def __init__(self, parser):
self._ref = _mkproxy(parser)
def getColumnNumber(self):
parser = self._ref
if parser._parser is None:
return None
return parser._parser.ErrorColumnNumber
def getLineNumber(self):
parser = self._ref
if parser._parser is None:
return 1
return parser._parser.ErrorLineNumber
def getPublicId(self):
parser = self._ref
if parser is None:
return None
return parser._source.getPublicId()
def getSystemId(self):
parser = self._ref
if parser is None:
return None
return parser._source.getSystemId()
# --- ExpatParser
class ExpatParser(xmlreader.IncrementalParser, xmlreader.Locator):
"""SAX driver for the pyexpat C module."""
def __init__(self, namespaceHandling=0, bufsize=2**16-20):
xmlreader.IncrementalParser.__init__(self, bufsize)
self._source = xmlreader.InputSource()
self._parser = None
self._namespaces = namespaceHandling
self._lex_handler_prop = None
self._parsing = 0
self._entity_stack = []
self._external_ges = 1
self._interning = None
# XMLReader methods
def parse(self, source):
"Parse an XML document from a URL or an InputSource."
source = saxutils.prepare_input_source(source)
self._source = source
try:
self.reset()
self._cont_handler.setDocumentLocator(ExpatLocator(self))
xmlreader.IncrementalParser.parse(self, source)
except:
# bpo-30264: Close the source on error to not leak resources:
# xml.sax.parse() doesn't give access to the underlying parser
# to the caller
self._close_source()
raise
def prepareParser(self, source):
if source.getSystemId() is not None:
self._parser.SetBase(source.getSystemId())
# Redefined setContentHandler to allow changing handlers during parsing
def setContentHandler(self, handler):
xmlreader.IncrementalParser.setContentHandler(self, handler)
if self._parsing:
self._reset_cont_handler()
def getFeature(self, name):
if name == feature_namespaces:
return self._namespaces
elif name == feature_string_interning:
return self._interning is not None
elif name in (feature_validation, feature_external_pes,
feature_namespace_prefixes):
return 0
elif name == feature_external_ges:
return self._external_ges
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def setFeature(self, name, state):
if self._parsing:
raise SAXNotSupportedException("Cannot set features while parsing")
if name == feature_namespaces:
self._namespaces = state
elif name == feature_external_ges:
self._external_ges = state
elif name == feature_string_interning:
if state:
if self._interning is None:
self._interning = {}
else:
self._interning = None
elif name == feature_validation:
if state:
raise SAXNotSupportedException(
"expat does not support validation")
elif name == feature_external_pes:
if state:
raise SAXNotSupportedException(
"expat does not read external parameter entities")
elif name == feature_namespace_prefixes:
if state:
raise SAXNotSupportedException(
"expat does not report namespace prefixes")
else:
raise SAXNotRecognizedException(
"Feature '%s' not recognized" % name)
def getProperty(self, name):
if name == handler.property_lexical_handler:
return self._lex_handler_prop
elif name == property_interning_dict:
return self._interning
elif name == property_xml_string:
if self._parser:
if hasattr(self._parser, "GetInputContext"):
return self._parser.GetInputContext()
else:
raise SAXNotRecognizedException(
"This version of expat does not support getting"
" the XML string")
else:
raise SAXNotSupportedException(
"XML string cannot be returned when not parsing")
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
def setProperty(self, name, value):
if name == handler.property_lexical_handler:
self._lex_handler_prop = value
if self._parsing:
self._reset_lex_handler_prop()
elif name == property_interning_dict:
self._interning = value
elif name == property_xml_string:
raise SAXNotSupportedException("Property '%s' cannot be set" %
name)
else:
raise SAXNotRecognizedException("Property '%s' not recognized" %
name)
# IncrementalParser methods
def feed(self, data, isFinal = 0):
if not self._parsing:
self.reset()
self._parsing = 1
self._cont_handler.startDocument()
try:
# The isFinal parameter is internal to the expat reader.
# If it is set to true, expat will check validity of the entire
# document. When feeding chunks, they are not normally final -
# except when invoked from close.
self._parser.Parse(data, isFinal)
except expat.error as e:
exc = SAXParseException(expat.ErrorString(e.code), e, self)
# FIXME: when to invoke error()?
self._err_handler.fatalError(exc)
def _close_source(self):
source = self._source
try:
file = source.getCharacterStream()
if file is not None:
file.close()
finally:
file = source.getByteStream()
if file is not None:
file.close()
def close(self):
if (self._entity_stack or self._parser is None or
isinstance(self._parser, _ClosedParser)):
# If we are completing an external entity, do nothing here
return
try:
self.feed("", isFinal = 1)
self._cont_handler.endDocument()
self._parsing = 0
# break cycle created by expat handlers pointing to our methods
self._parser = None
finally:
self._parsing = 0
if self._parser is not None:
# Keep ErrorColumnNumber and ErrorLineNumber after closing.
parser = _ClosedParser()
parser.ErrorColumnNumber = self._parser.ErrorColumnNumber
parser.ErrorLineNumber = self._parser.ErrorLineNumber
self._parser = parser
self._close_source()
def _reset_cont_handler(self):
self._parser.ProcessingInstructionHandler = \
self._cont_handler.processingInstruction
self._parser.CharacterDataHandler = self._cont_handler.characters
def _reset_lex_handler_prop(self):
lex = self._lex_handler_prop
parser = self._parser
if lex is None:
parser.CommentHandler = None
parser.StartCdataSectionHandler = None
parser.EndCdataSectionHandler = None
parser.StartDoctypeDeclHandler = None
parser.EndDoctypeDeclHandler = None
else:
parser.CommentHandler = lex.comment
parser.StartCdataSectionHandler = lex.startCDATA
parser.EndCdataSectionHandler = lex.endCDATA
parser.StartDoctypeDeclHandler = self.start_doctype_decl
parser.EndDoctypeDeclHandler = lex.endDTD
def reset(self):
if self._namespaces:
self._parser = expat.ParserCreate(self._source.getEncoding(), " ",
intern=self._interning)
self._parser.namespace_prefixes = 1
self._parser.StartElementHandler = self.start_element_ns
self._parser.EndElementHandler = self.end_element_ns
else:
self._parser = expat.ParserCreate(self._source.getEncoding(),
intern = self._interning)
self._parser.StartElementHandler = self.start_element
self._parser.EndElementHandler = self.end_element
self._reset_cont_handler()
self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
self._parser.NotationDeclHandler = self.notation_decl
self._parser.StartNamespaceDeclHandler = self.start_namespace_decl
self._parser.EndNamespaceDeclHandler = self.end_namespace_decl
self._decl_handler_prop = None
if self._lex_handler_prop:
self._reset_lex_handler_prop()
# self._parser.DefaultHandler =
# self._parser.DefaultHandlerExpand =
# self._parser.NotStandaloneHandler =
self._parser.ExternalEntityRefHandler = self.external_entity_ref
try:
self._parser.SkippedEntityHandler = self.skipped_entity_handler
except AttributeError:
# This pyexpat does not support SkippedEntity
pass
self._parser.SetParamEntityParsing(
expat.XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE)
self._parsing = 0
self._entity_stack = []
# Locator methods
def getColumnNumber(self):
if self._parser is None:
return None
return self._parser.ErrorColumnNumber
def getLineNumber(self):
if self._parser is None:
return 1
return self._parser.ErrorLineNumber
def getPublicId(self):
return self._source.getPublicId()
def getSystemId(self):
return self._source.getSystemId()
# event handlers
def start_element(self, name, attrs):
self._cont_handler.startElement(name, AttributesImpl(attrs))
def end_element(self, name):
self._cont_handler.endElement(name)
def start_element_ns(self, name, attrs):
pair = name.split()
if len(pair) == 1:
# no namespace
pair = (None, name)
elif len(pair) == 3:
pair = pair[0], pair[1]
else:
# default namespace
pair = tuple(pair)
newattrs = {}
qnames = {}
for (aname, value) in attrs.items():
parts = aname.split()
length = len(parts)
if length == 1:
# no namespace
qname = aname
apair = (None, aname)
elif length == 3:
qname = "%s:%s" % (parts[2], parts[1])
apair = parts[0], parts[1]
else:
# default namespace
qname = parts[1]
apair = tuple(parts)
newattrs[apair] = value
qnames[apair] = qname
self._cont_handler.startElementNS(pair, None,
AttributesNSImpl(newattrs, qnames))
def end_element_ns(self, name):
pair = name.split()
if len(pair) == 1:
pair = (None, name)
elif len(pair) == 3:
pair = pair[0], pair[1]
else:
pair = tuple(pair)
self._cont_handler.endElementNS(pair, None)
# this is not used (call directly to ContentHandler)
def processing_instruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
# this is not used (call directly to ContentHandler)
def character_data(self, data):
self._cont_handler.characters(data)
def start_namespace_decl(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def end_namespace_decl(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
self._lex_handler_prop.startDTD(name, pubid, sysid)
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
self._dtd_handler.unparsedEntityDecl(name, pubid, sysid, notation_name)
def notation_decl(self, name, base, sysid, pubid):
self._dtd_handler.notationDecl(name, pubid, sysid)
def external_entity_ref(self, context, base, sysid, pubid):
if not self._external_ges:
return 1
source = self._ent_handler.resolveEntity(pubid, sysid)
source = saxutils.prepare_input_source(source,
self._source.getSystemId() or
"")
self._entity_stack.append((self._parser, self._source))
self._parser = self._parser.ExternalEntityParserCreate(context)
self._source = source
try:
xmlreader.IncrementalParser.parse(self, source)
except:
return 0 # FIXME: save error info here?
(self._parser, self._source) = self._entity_stack[-1]
del self._entity_stack[-1]
return 1
def skipped_entity_handler(self, name, is_pe):
if is_pe:
# The SAX spec requires to report skipped PEs with a '%'
name = '%'+name
self._cont_handler.skippedEntity(name)
# ---
def create_parser(*args, **kwargs):
return ExpatParser(*args, **kwargs)
# ---
if __name__ == "__main__":
import xml.sax.saxutils
p = create_parser()
p.setContentHandler(xml.sax.saxutils.XMLGenerator())
p.setErrorHandler(xml.sax.ErrorHandler())
p.parse("http://www.ibiblio.org/xml/examples/shakespeare/hamlet.xml")
|
batermj/algorithm-challenger
|
code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Lib/xml/sax/expatreader.py
|
Python
|
apache-2.0
| 15,704
|
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
from oslo_serialization import jsonutils
import six
from nova import test
from nova.tests.functional import integrated_helpers
class NoMatch(test.TestingException):
pass
class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
ctype = 'json'
all_extensions = False
extension_name = None
sample_dir = None
request_api_version = None
_use_common_server_api_samples = False
def _pretty_data(self, data):
data = jsonutils.dumps(jsonutils.loads(data), sort_keys=True,
indent=4)
return '\n'.join(line.rstrip() for line in data.split('\n')).strip()
def _objectify(self, data):
if not data:
return {}
# NOTE(vish): allow non-quoted replacements to survive json
data = re.sub(r'([^"])%\((.+)\)s([^"])', r'\1"%(int:\2)s"\3', data)
return jsonutils.loads(data)
@classmethod
def _get_sample_path(cls, name, dirname, suffix='', api_version=None):
parts = [dirname]
parts.append('api_samples')
if cls.all_extensions:
parts.append('all_extensions')
# Note(gmann): if _use_common_server_api_samples is set to True
# then common server sample files present in 'servers' directory
# will be used. As of now it is being used for server POST request
# to avoid duplicate copy of server req and resp sample files.
# Example - ServersSampleBase's _post_server method.
elif cls._use_common_server_api_samples:
parts.append('servers')
else:
if cls.sample_dir:
parts.append(cls.sample_dir)
elif cls.extension_name:
parts.append(cls.extension_name)
if api_version:
parts.append('v' + api_version)
parts.append(name + "." + cls.ctype + suffix)
return os.path.join(*parts)
@classmethod
def _get_sample(cls, name, api_version=None):
dirname = os.path.dirname(os.path.abspath(__file__))
dirname = os.path.normpath(os.path.join(dirname,
"../../../doc"))
return cls._get_sample_path(name, dirname, api_version=api_version)
@classmethod
def _get_template(cls, name, api_version=None):
dirname = os.path.dirname(os.path.abspath(__file__))
dirname = os.path.normpath(os.path.join(dirname,
"./api_sample_tests"))
return cls._get_sample_path(name, dirname, suffix='.tpl',
api_version=api_version)
def _read_template(self, name):
template = self._get_template(name, self.request_api_version)
with open(template) as inf:
return inf.read().strip()
def _write_template(self, name, data):
with open(self._get_template(name,
self.request_api_version), 'w') as outf:
outf.write(data)
def _write_sample(self, name, data):
with open(self._get_sample(
name, self.request_api_version), 'w') as outf:
outf.write(data)
def _compare_result(self, subs, expected, result, result_str):
matched_value = None
if isinstance(expected, dict):
if not isinstance(result, dict):
raise NoMatch('%(result_str)s: %(result)s is not a dict.'
% {'result_str': result_str, 'result': result})
ex_keys = sorted(expected.keys())
res_keys = sorted(result.keys())
if ex_keys != res_keys:
ex_delta = []
res_delta = []
for key in ex_keys:
if key not in res_keys:
ex_delta.append(key)
for key in res_keys:
if key not in ex_keys:
res_delta.append(key)
raise NoMatch(
'Dictionary key mismatch:\n'
'Extra key(s) in template:\n%(ex_delta)s\n'
'Extra key(s) in %(result_str)s:\n%(res_delta)s\n' %
{'ex_delta': ex_delta, 'result_str': result_str,
'res_delta': res_delta})
for key in ex_keys:
res = self._compare_result(subs, expected[key], result[key],
result_str)
matched_value = res or matched_value
elif isinstance(expected, list):
if not isinstance(result, list):
raise NoMatch(
'%(result_str)s: %(result)s is not a list.' %
{'result_str': result_str, 'result': result})
expected = expected[:]
extra = []
for res_obj in result:
for i, ex_obj in enumerate(expected):
try:
matched_value = self._compare_result(subs, ex_obj,
res_obj,
result_str)
del expected[i]
break
except NoMatch:
pass
else:
extra.append(res_obj)
error = []
if expected:
error.append('Extra list items in template:')
error.extend([repr(o) for o in expected])
if extra:
error.append('Extra list items in %(result_str)s:' %
{'result_str': result_str})
error.extend([repr(o) for o in extra])
if error:
raise NoMatch('\n'.join(error))
elif isinstance(expected, six.string_types) and '%' in expected:
# NOTE(vish): escape stuff for regex
for char in '[]<>?':
expected = expected.replace(char, '\\%s' % char)
# NOTE(vish): special handling of subs that are not quoted. We are
# expecting an int but we had to pass in a string
# so the json would parse properly.
if expected.startswith("%(int:"):
result = str(result)
expected = expected.replace('int:', '')
expected = expected % subs
expected = '^%s$' % expected
match = re.match(expected, result)
if not match:
raise NoMatch(
'Values do not match:\n'
'Template: %(expected)s\n%(result_str)s: %(result)s' %
{'expected': expected, 'result_str': result_str,
'result': result})
try:
matched_value = match.group('id')
except IndexError:
if match.groups():
matched_value = match.groups()[0]
else:
if isinstance(expected, six.string_types):
# NOTE(danms): Ignore whitespace in this comparison
expected = expected.strip()
if isinstance(result, six.string_types):
result = result.strip()
if expected != result:
# NOTE(tdurakov):this attempt to parse string as JSON
# is needed for correct comparison of hypervisor.cpu_info,
# which is stringified JSON object
#
# TODO(tdurakov): remove this check as soon as
# hypervisor.cpu_info become common JSON object in REST API.
try:
expected = self._objectify(expected)
result = self._objectify(result)
return self._compare_result(subs, expected, result,
result_str)
except ValueError:
pass
raise NoMatch(
'Values do not match:\n'
'Template: %(expected)s\n%(result_str)s: '
'%(result)s' % {'expected': expected,
'result_str': result_str,
'result': result})
return matched_value
def generalize_subs(self, subs, vanilla_regexes):
"""Give the test a chance to modify subs after the server response
was verified, and before the on-disk doc/api_samples file is checked.
This may be needed by some tests to convert exact matches expected
from the server into pattern matches to verify what is in the
sample file.
If there are no changes to be made, subs is returned unharmed.
"""
return subs
def _verify_response(self, name, subs, response, exp_code):
self.assertEqual(response.status_code, exp_code)
response_data = response.content
response_data = self._pretty_data(response_data)
if not os.path.exists(self._get_template(name,
self.request_api_version)):
self._write_template(name, response_data)
template_data = response_data
else:
template_data = self._read_template(name)
if (self.generate_samples and
not os.path.exists(self._get_sample(
name, self.request_api_version))):
self._write_sample(name, response_data)
sample_data = response_data
else:
with file(self._get_sample(name,
self.request_api_version)) as sample:
sample_data = sample.read()
try:
template_data = self._objectify(template_data)
response_data = self._objectify(response_data)
response_result = self._compare_result(subs, template_data,
response_data, "Response")
# NOTE(danms): replace some of the subs with patterns for the
# doc/api_samples check, which won't have things like the
# correct compute host name. Also let the test do some of its
# own generalization, if necessary
vanilla_regexes = self._get_regexes()
subs['compute_host'] = vanilla_regexes['host_name']
subs['id'] = vanilla_regexes['id']
subs = self.generalize_subs(subs, vanilla_regexes)
sample_data = self._objectify(sample_data)
self._compare_result(subs, template_data, sample_data, "Sample")
return response_result
except NoMatch:
raise
def _get_host(self):
return 'http://openstack.example.com'
def _get_glance_host(self):
return 'http://glance.openstack.example.com'
def _get_regexes(self):
if self.ctype == 'json':
text = r'(\\"|[^"])*'
else:
text = r'[^<]*'
isotime_re = '\d{4}-[0,1]\d-[0-3]\dT\d{2}:\d{2}:\d{2}Z'
strtime_re = '\d{4}-[0,1]\d-[0-3]\dT\d{2}:\d{2}:\d{2}\.\d{6}'
xmltime_re = ('\d{4}-[0,1]\d-[0-3]\d '
'\d{2}:\d{2}:\d{2}'
'(\.\d{6})?(\+00:00)?')
# NOTE(claudiub): the x509 keypairs are different from the
# ssh keypairs. For example, the x509 fingerprint has 40 bytes.
return {
'isotime': isotime_re,
'strtime': strtime_re,
'strtime_or_none': r'None|%s' % strtime_re,
'xmltime': xmltime_re,
'password': '[0-9a-zA-Z]{1,12}',
'ip': '[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}',
'ip6': '([0-9a-zA-Z]{1,4}:){1,7}:?[0-9a-zA-Z]{1,4}',
'id': '(?P<id>[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
'-[0-9a-f]{4}-[0-9a-f]{12})',
'uuid': '[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
'-[0-9a-f]{4}-[0-9a-f]{12}',
'reservation_id': 'r-[0-9a-zA-Z]{8}',
'private_key': '(-----BEGIN RSA PRIVATE KEY-----|)'
'[a-zA-Z0-9\n/+=]*'
'(-----END RSA PRIVATE KEY-----|)',
'public_key': '(ssh-rsa|-----BEGIN CERTIFICATE-----)'
'[ a-zA-Z0-9\n/+=]*'
'(Generated-by-Nova|-----END CERTIFICATE-----)',
'fingerprint': '(([0-9a-f]{2}:){19}|([0-9a-f]{2}:){15})'
'[0-9a-f]{2}',
'keypair_type': 'ssh|x509',
'host': self._get_host(),
'host_name': '[0-9a-z]{32}',
'glance_host': self._get_glance_host(),
'compute_host': self.compute.host,
'text': text,
'int': '[0-9]+',
'user_id': text,
}
def _get_response(self, url, method, body=None, strip_version=False,
api_version=None):
headers = {}
headers['Content-Type'] = 'application/' + self.ctype
headers['Accept'] = 'application/' + self.ctype
if api_version:
headers['X-OpenStack-Nova-API-Version'] = api_version
return self.api.api_request(url, body=body, method=method,
headers=headers, strip_version=strip_version)
def _do_get(self, url, strip_version=False, api_version=None):
return self._get_response(url, 'GET', strip_version=strip_version,
api_version=(api_version or
self.request_api_version))
def _do_post(self, url, name, subs, method='POST', api_version=None):
body = self._read_template(name) % subs
sample = self._get_sample(name, self.request_api_version)
if self.generate_samples and not os.path.exists(sample):
self._write_sample(name, body)
return self._get_response(url, method, body,
api_version=(api_version or
self.request_api_version))
def _do_put(self, url, name, subs, api_version=None):
return self._do_post(url, name, subs, method='PUT',
api_version=(api_version or
self.request_api_version))
def _do_delete(self, url, api_version=None):
return self._get_response(url, 'DELETE',
api_version=(api_version or
self.request_api_version))
|
yosshy/nova
|
nova/tests/functional/api_samples_test_base.py
|
Python
|
apache-2.0
| 15,165
|
from nose.tools import * # flake8: noqa
from api.base import settings
from tests.base import ApiTestCase
# The versions below are specifically for testing purposes and do not reflect the actual versioning of the API.
# If changes are made to this list, or to DEFAULT_VERSION, please reflect those changes in:
# api/base/settings/local-travis.py so that travis tests will pass.
TESTING_ALLOWED_VERSIONS = (
'2.0',
'2.0.1',
'2.1',
'2.2',
'3.0',
'3.0.1',
)
DEFAULT_VERSION = '2.0'
class VersioningTestCase(ApiTestCase):
def setUp(self):
super(VersioningTestCase, self).setUp()
self.valid_url_path_version = '2.0'
self.valid_header_version = '2.0.1'
self.valid_query_parameter_version = '2.1'
self.invalid_url_path_version = '1.0'
self.invalid_header_version = '1.0.1'
self.invalid_query_parameter_version = '1.1'
self.valid_url_path_version_url = '/v2/'
self.invalid_url_path_version_url = '/v1/'
self.valid_query_parameter_version_url = '/v2/?version={}'.format(self.valid_query_parameter_version)
self.invalid_query_parameter_version_url = '/v2/?version={}'.format(self.invalid_query_parameter_version)
self._ALLOWED_VERSIONS = settings.REST_FRAMEWORK['ALLOWED_VERSIONS']
self._DEFAULT_VERSION = settings.REST_FRAMEWORK['DEFAULT_VERSION']
settings.REST_FRAMEWORK['ALLOWED_VERSIONS'] = TESTING_ALLOWED_VERSIONS
settings.REST_FRAMEWORK['DEFAULT_VERSION'] = DEFAULT_VERSION
def tearDown(self):
super(VersioningTestCase, self).tearDown()
settings.REST_FRAMEWORK['ALLOWED_VERSIONS'] = self._ALLOWED_VERSIONS
settings.REST_FRAMEWORK['DEFAULT_VERSION'] = self._DEFAULT_VERSION
class TestBaseVersioning(VersioningTestCase):
def setUp(self):
super(TestBaseVersioning, self).setUp()
def test_url_path_version(self):
res = self.app.get(self.valid_url_path_version_url)
assert_equal(res.status_code, 200)
assert_equal(res.json['meta']['version'], self.valid_url_path_version)
def test_header_version(self):
headers = {'accept': 'application/vnd.api+json;version={}'.format(self.valid_header_version)}
res = self.app.get(self.valid_url_path_version_url, headers=headers)
assert_equal(res.status_code, 200)
assert_equal(res.json['meta']['version'], self.valid_header_version)
def test_query_param_version(self):
res = self.app.get(self.valid_query_parameter_version_url)
assert_equal(res.status_code, 200)
assert_equal(res.json['meta']['version'], self.valid_query_parameter_version)
def test_url_path_version_not_in_allowed_versions(self):
res = self.app.get(self.invalid_url_path_version_url, expect_errors=True)
assert_equal(res.status_code, 404)
def test_header_version_not_in_allowed_versions(self):
headers = {'accept': 'application/vnd.api+json;version={}'.format(self.invalid_header_version)}
res = self.app.get(self.valid_url_path_version_url, headers=headers, expect_errors=True)
assert_equal(res.status_code, 406)
assert_equal(res.json['errors'][0]['detail'], 'Invalid version in "Accept" header.')
def test_query_param_version_not_in_allowed_versions(self):
res = self.app.get(self.invalid_query_parameter_version_url, expect_errors=True)
assert_equal(res.status_code, 404)
assert_equal(res.json['errors'][0]['detail'], 'Invalid version in query parameter.')
def test_query_parameter_version_not_within_url_path_major_version(self):
url = '/v2/?version=3.0.1'
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 409)
assert_equal(
res.json['errors'][0]['detail'],
'Version {} specified in query parameter does not fall within URL path version {}'.format(
'3.0.1',
self.valid_url_path_version
)
)
def test_header_version_not_within_url_path_major_version(self):
headers = {'accept': 'application/vnd.api+json;version=3.0.1'}
res = self.app.get(self.valid_url_path_version_url, headers=headers, expect_errors=True)
assert_equal(res.status_code, 409)
assert_equal(
res.json['errors'][0]['detail'],
'Version {} specified in "Accept" header does not fall within URL path version {}'.format(
'3.0.1',
self.valid_url_path_version
)
)
def test_header_version_and_query_parameter_version_match(self):
headers = {'accept': 'application/vnd.api+json;version={}'.format(self.valid_header_version)}
url = '/v2/?version={}'.format(self.valid_header_version)
res = self.app.get(url, headers=headers)
assert_equal(res.status_code, 200)
assert_equal(res.json['meta']['version'], self.valid_header_version)
def test_header_version_and_query_parameter_version_mismatch(self):
headers = {'accept': 'application/vnd.api+json;version={}'.format(self.valid_header_version)}
url = '/v2/?version={}'.format(self.valid_query_parameter_version)
res = self.app.get(url, headers=headers, expect_errors=True)
assert_equal(res.status_code, 409)
assert_equal(
res.json['errors'][0]['detail'],
'Version {} specified in "Accept" header does not match version {} specified in query parameter'.format(
self.valid_header_version,
self.valid_query_parameter_version
)
)
def test_header_version_bad_format(self):
headers = {'accept': 'application/vnd.api+json;version=not_at_all_a_version'}
res = self.app.get(self.valid_url_path_version_url, headers=headers, expect_errors=True)
assert_equal(res.status_code, 406)
assert_equal(res.json['errors'][0]['detail'], 'Invalid version in "Accept" header.')
def test_query_version_bad_format(self):
url = '/v2/?version=not_at_all_a_version'
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 404)
assert_equal(res.json['errors'][0]['detail'], 'Invalid version in query parameter.')
|
acshi/osf.io
|
api_tests/base/test_versioning.py
|
Python
|
apache-2.0
| 6,291
|
# $Id$
#
# Copyright (C) 2005 Gregory P. Smith (greg@krypto.org)
# Licensed to PSF under a Contributor Agreement.
#
__doc__ = """hashlib module - A common interface to many hash functions.
new(name, string='') - returns a new hash object implementing the
given hash function; initializing the hash
using the given string data.
Named constructor functions are also available, these are much faster
than using new():
md5(), sha1(), sha224(), sha256(), sha384(), and sha512()
More algorithms may be available on your platform but the above are guaranteed
to exist. See the algorithms_guaranteed and algorithms_available attributes
to find out what algorithm names can be passed to new().
NOTE: If you want the adler32 or crc32 hash functions they are available in
the zlib module.
Choose your hash function wisely. Some have known collision weaknesses.
sha384 and sha512 will be slow on 32 bit platforms.
Hash objects have these methods:
- update(arg): Update the hash object with the string arg. Repeated calls
are equivalent to a single call with the concatenation of all
the arguments.
- digest(): Return the digest of the strings passed to the update() method
so far. This may contain non-ASCII characters, including
NUL bytes.
- hexdigest(): Like digest() except the digest is returned as a string of
double length, containing only hexadecimal digits.
- copy(): Return a copy (clone) of the hash object. This can be used to
efficiently compute the digests of strings that share a common
initial substring.
For example, to obtain the digest of the string 'Nobody inspects the
spammish repetition':
>>> import hashlib
>>> m = hashlib.md5()
>>> m.update("Nobody inspects")
>>> m.update(" the spammish repetition")
>>> m.digest()
'\\xbbd\\x9c\\x83\\xdd\\x1e\\xa5\\xc9\\xd9\\xde\\xc9\\xa1\\x8d\\xf0\\xff\\xe9'
More condensed:
>>> hashlib.sha224("Nobody inspects the spammish repetition").hexdigest()
'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2'
"""
# This tuple and __get_builtin_constructor() must be modified if a new
# always available algorithm is added.
__always_supported = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
algorithms_guaranteed = set(__always_supported)
algorithms_available = set(__always_supported)
algorithms = __always_supported
__all__ = __always_supported + ('new', 'algorithms_guaranteed',
'algorithms_available', 'algorithms',
'pbkdf2_hmac')
def __get_builtin_constructor(name):
try:
if name in ('SHA1', 'sha1'):
import _sha
return _sha.new
elif name in ('MD5', 'md5'):
import _md5
return _md5.new
elif name in ('SHA256', 'sha256', 'SHA224', 'sha224'):
import _sha256
bs = name[3:]
if bs == '256':
return _sha256.sha256
elif bs == '224':
return _sha256.sha224
elif name in ('SHA512', 'sha512', 'SHA384', 'sha384'):
import _sha512
bs = name[3:]
if bs == '512':
return _sha512.sha512
elif bs == '384':
return _sha512.sha384
except ImportError:
pass # no extension module, this hash is unsupported.
raise ValueError('unsupported hash type ' + name)
def __get_openssl_constructor(name):
try:
f = getattr(_hashlib, 'openssl_' + name)
# Allow the C module to raise ValueError. The function will be
# defined but the hash not actually available thanks to OpenSSL.
f()
# Use the C function directly (very fast)
return f
except (AttributeError, ValueError):
return __get_builtin_constructor(name)
def __py_new(name, string=''):
"""new(name, string='') - Return a new hashing object using the named algorithm;
optionally initialized with a string.
"""
return __get_builtin_constructor(name)(string)
def __hash_new(name, string=''):
"""new(name, string='') - Return a new hashing object using the named algorithm;
optionally initialized with a string.
"""
try:
return _hashlib.new(name, string)
except ValueError:
# If the _hashlib module (OpenSSL) doesn't support the named
# hash, try using our builtin implementations.
# This allows for SHA224/256 and SHA384/512 support even though
# the OpenSSL library prior to 0.9.8 doesn't provide them.
return __get_builtin_constructor(name)(string)
try:
import _hashlib
new = __hash_new
__get_hash = __get_openssl_constructor
algorithms_available = algorithms_available.union(
_hashlib.openssl_md_meth_names)
except ImportError:
new = __py_new
__get_hash = __get_builtin_constructor
for __func_name in __always_supported:
# try them all, some may not work due to the OpenSSL
# version not supporting that algorithm.
try:
globals()[__func_name] = __get_hash(__func_name)
except ValueError:
import logging
logging.exception('code for hash %s was not found.', __func_name)
try:
# OpenSSL's PKCS5_PBKDF2_HMAC requires OpenSSL 1.0+ with HMAC and SHA
from _hashlib import pbkdf2_hmac
except ImportError:
import binascii
import struct
_trans_5C = b"".join(chr(x ^ 0x5C) for x in range(256))
_trans_36 = b"".join(chr(x ^ 0x36) for x in range(256))
def pbkdf2_hmac(hash_name, password, salt, iterations, dklen=None):
"""Password based key derivation function 2 (PKCS #5 v2.0)
This Python implementations based on the hmac module about as fast
as OpenSSL's PKCS5_PBKDF2_HMAC for short passwords and much faster
for long passwords.
"""
if not isinstance(hash_name, str):
raise TypeError(hash_name)
if not isinstance(password, (bytes, bytearray)):
password = bytes(buffer(password))
if not isinstance(salt, (bytes, bytearray)):
salt = bytes(buffer(salt))
# Fast inline HMAC implementation
inner = new(hash_name)
outer = new(hash_name)
blocksize = getattr(inner, 'block_size', 64)
if len(password) > blocksize:
password = new(hash_name, password).digest()
password = password + b'\x00' * (blocksize - len(password))
inner.update(password.translate(_trans_36))
outer.update(password.translate(_trans_5C))
def prf(msg, inner=inner, outer=outer):
# PBKDF2_HMAC uses the password as key. We can re-use the same
# digest objects and just update copies to skip initialization.
icpy = inner.copy()
ocpy = outer.copy()
icpy.update(msg)
ocpy.update(icpy.digest())
return ocpy.digest()
if iterations < 1:
raise ValueError(iterations)
if dklen is None:
dklen = outer.digest_size
if dklen < 1:
raise ValueError(dklen)
hex_format_string = "%%0%ix" % (new(hash_name).digest_size * 2)
dkey = b''
loop = 1
while len(dkey) < dklen:
prev = prf(salt + struct.pack(b'>I', loop))
rkey = int(binascii.hexlify(prev), 16)
for i in xrange(iterations - 1):
prev = prf(prev)
rkey ^= int(binascii.hexlify(prev), 16)
loop += 1
dkey += binascii.unhexlify(hex_format_string % rkey)
return dkey[:dklen]
# Cleanup locals()
del __always_supported, __func_name, __get_hash
del __py_new, __hash_new, __get_openssl_constructor
|
nmercier/linux-cross-gcc
|
win32/bin/Lib/hashlib.py
|
Python
|
bsd-3-clause
| 8,063
|
#import binwalk.core.C
import binwalk.core.plugin
#from binwalk.core.common import *
class CompressdPlugin(binwalk.core.plugin.Plugin):
# '''
# Searches for and validates compress'd data.
# '''
MODULES = ['Signature']
#READ_SIZE = 64
#COMPRESS42 = "compress42"
#COMPRESS42_FUNCTIONS = [
# binwalk.core.C.Function(name="is_compressed", type=bool),
#]
#comp = None
#def init(self):
#self.comp = binwalk.core.C.Library(self.COMPRESS42, self.COMPRESS42_FUNCTIONS)
# This plugin is currently disabled due to the need to move away from supporting C
# libraries and into a pure Python project, for cross-platform support and ease of
# installation / package maintenance. A Python implementation will likely need to
# be custom developed in the future, but for now, since this compression format is
# not very common, especially in firmware, simply disable it.
#self.comp = None
#def scan(self, result):
# if self.comp and result.file and result.description.lower().startswith("compress'd data"):
# fd = self.module.config.open_file(result.file.name, offset=result.offset, length=self.READ_SIZE)
# compressed_data = fd.read(self.READ_SIZE)
# fd.close()
# if not self.comp.is_compressed(compressed_data, len(compressed_data)):
# result.valid = False
|
cristianst85/binwalk
|
src/binwalk/plugins/compressd.py
|
Python
|
mit
| 1,421
|
"""
A component which allows you to send data to Dweet.io.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/dweet/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.const import EVENT_STATE_CHANGED, STATE_UNKNOWN
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers import state as state_helper
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
DOMAIN = "dweet"
DEPENDENCIES = []
REQUIREMENTS = ['dweepy==0.2.0']
CONF_NAME = 'name'
CONF_WHITELIST = 'whitelist'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=1)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_WHITELIST): cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
# pylint: disable=too-many-locals
def setup(hass, config):
"""Setup the Dweet.io component."""
conf = config[DOMAIN]
name = conf[CONF_NAME]
whitelist = conf.get(CONF_WHITELIST, [])
json_body = {}
def dweet_event_listener(event):
"""Listen for new messages on the bus and sends them to Dweet.io."""
state = event.data.get('new_state')
if state is None or state.state in (STATE_UNKNOWN, '') \
or state.entity_id not in whitelist:
return
try:
_state = state_helper.state_as_number(state)
except ValueError:
_state = state.state
json_body[state.attributes.get('friendly_name')] = _state
send_data(name, json_body)
hass.bus.listen(EVENT_STATE_CHANGED, dweet_event_listener)
return True
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def send_data(name, msg):
"""Send the collected data to Dweet.io."""
import dweepy
try:
dweepy.dweet_for(name, msg)
except dweepy.DweepyError:
_LOGGER.error("Error saving data '%s' to Dweet.io", msg)
|
mikaelboman/home-assistant
|
homeassistant/components/dweet.py
|
Python
|
mit
| 1,961
|
class A:
def test(self):
print "I##|nitializing A", "test"##|
attribute = "hello"
def my_method(self):
print self.attribute
a = A()
a.test()
##r Should expand to Full String "Initializing A"
# Invalid selection:
# nitializing A", "test"
|
aptana/Pydev
|
tests/org.python.pydev.refactoring.tests/src/python/visitor/selectionextension/testSelectionExtensionExprFail.py
|
Python
|
epl-1.0
| 286
|
import rtaudio as rt
from math import cos
import struct
class audio_generator:
def __init__(self):
self.idx = -1
self.freq = 440.
def __call__(self):
self.idx += 1
if self.idx%48000 == 0:
self.freq *= 2**(1/12.)
return 0.5*cos(2.*3.1416*self.freq*self.idx/48000.)
class callback:
def __init__(self, gen):
self.gen = gen
self.i = 0
def __call__(self,playback, capture):
[struct.pack_into("f", playback, 4*o, self.gen()) for o in xrange(256)]
self.i = self.i + 256
if self.i > 48000*10:
print '.'
return 1
dac = rt.RtAudio()
n = dac.getDeviceCount()
print 'Number of devices available: ', n
for i in range(n):
try:
print dac.getDeviceInfo(i)
except rt.RtError as e:
print e
print 'Default output device: ', dac.getDefaultOutputDevice()
print 'Default input device: ', dac.getDefaultInputDevice()
print 'is stream open: ', dac.isStreamOpen()
print 'is stream running: ', dac.isStreamRunning()
oParams = {'deviceId': 1, 'nChannels': 1, 'firstChannel': 0}
iParams = {'deviceId': 1, 'nChannels': 1, 'firstChannel': 0}
try:
dac.openStream(oParams,oParams,48000,256,callback(audio_generator()) )
except rt.RtError as e:
print e
else:
dac.startStream()
import time
print 'latency: ', dac.getStreamLatency()
while (dac.isStreamRunning()):
time.sleep(0.1)
print dac.getStreamTime()
dac.stopStream()
dac.abortStream()
dac.closeStream()
|
naivesound/glitch
|
vendor/github.com/thestk/rtaudio/contrib/python/pyrtaudio/PyRtAudioTest.py
|
Python
|
gpl-3.0
| 1,553
|
#!/bin/false
# This file is part of Espruino, a JavaScript interpreter for Microcontrollers
#
# Copyright (C) 2013 Gordon Williams <gw@pur3.co.uk>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ----------------------------------------------------------------------------------------
# This file contains information for a specific board - the available pins, and where LEDs,
# Buttons, and other in-built peripherals are. It is used to build documentation as well
# as various source and header files for Espruino.
# ----------------------------------------------------------------------------------------
import pinutils;
info = {
'name' : "STM32 F429 Discovery",
'link' : [ "http://www.st.com/web/catalog/tools/FM116/SC959/SS1532/LN1199/PF259090" ],
'default_console' : "EV_SERIAL1",
'variables' : 5450,
'binary_name' : 'espruino_%v_stm32f429idiscovery.bin',
};
chip = {
'part' : "STM32F429ZIT6",
'family' : "STM32F4",
'package' : "LQFP144",
'ram' : 128,#256,
'flash' : 512, #2048,
'speed' : 168,
'usart' : 6,
'spi' : 3,
'i2c' : 3,
'adc' : 3,
'dac' : 2,
};
# left-right, or top-bottom order
board = {
'left' : [ ], # fixme
'left2' : [ ],
'right2' : [ ],
'right' : [ ],
};
devices = {
'OSC' : { 'pin_1' : 'H0',
'pin_2' : 'H1' },
'OSC_RTC' : { 'pin_1' : 'C14',
'pin_2' : 'C15' },
'LED1' : { 'pin' : 'G13' }, # green
'LED2' : { 'pin' : 'G14' }, # red
'BTN1' : { 'pin' : 'A0' },
'USB' : { 'pin_dm' : 'B14',
'pin_dp' : 'B15',
'pin_vbus' : 'B13',
'pin_id' : 'B12',
'pin_pso' : 'C4', # Power supply enable
'pin_oc' : 'C5', # Overcurrent
},
'MEMS' : { 'device' : 'L3GD20',
'pin_cs' : 'C1',
'pin_int1' : 'A1',
'pin_int2' : 'A2',
'pin_mosi' : 'F9',
'pin_miso' : 'F8',
'pin_sck' : 'F7' },
'TOUCHSCREEN' : {
'pin_irq' : 'A15',
'pin_cs' : '',
'pin_scl' : 'A8',
'pin_sda' : 'C9',
},
'LCD' : {
'width' : 320, 'height' : 240, 'bpp' : 16, 'controller' : 'fsmc', 'controller2' : 'ili9341',
'pin_d0' : 'D6',
'pin_d1' : 'G11',
'pin_d2' : 'G12',
'pin_d3' : 'A3',
'pin_d4' : 'B8',
'pin_d5' : 'B9',
'pin_d6' : 'A6',
'pin_d7' : 'G10',
'pin_d8' : 'B10',
'pin_d9' : 'B11',
'pin_d10' : 'C7',
'pin_d11' : 'D3',
'pin_d12' : 'C10',
'pin_d13' : 'B0',
'pin_d14' : 'A11',
'pin_d15' : 'A12',
'pin_d16' : 'B1',
'pin_d16' : 'G6',
'pin_rd' : 'D12', # RDX
'pin_wr' : 'D13',# WRQ (or SPI DC - data=1/command=0)
'pin_cs' : 'C2', # SPI CS (enable=0)
'pin_en' : 'F10',
'pin_vsync' : 'A4',
'pin_hsync' : 'C6',
'pin_dotlck' : 'G7',
'pin_dc' : 'F7', # SPI CLK
'pin_sda' : 'F9', # SPI SDI/SDO
'pin_im0' : 'D2', # solder bridge normally open, pulled to 0
'pin_im1' : 'D4', # solder bridge normally open, pulled to 1
'pin_im2' : 'D5', # solder bridge normally open, pulled to 1
'pin_im3' : 'D7', # solder bridge normally open, pulled to 0
},
'SDRAM' : {
'pin_sdcke1' : 'B5',
'pin_sdne1' : 'B6',
'pin_sdnwe' : 'C0',
'pin_d2' : 'D0',
'pin_d3' : 'D1',
'pin_d13' : 'D8',
'pin_d14' : 'D9',
'pin_d15' : 'D10',
'pin_d0' : 'D14',
'pin_d1' : 'D15',
'pin_nbl0' : 'E0',
'pin_nbl1' : 'E1',
'pin_d4' : 'E7',
'pin_d5' : 'E8',
'pin_d6' : 'E9',
'pin_d7' : 'E10',
'pin_d8' : 'E11',
'pin_d9' : 'E12',
'pin_d10' : 'E13',
'pin_d11' : 'E14',
'pin_d12' : 'E15',
'pin_a0' : 'F0',
'pin_a1' : 'F1',
'pin_a2' : 'F2',
'pin_a3' : 'F3',
'pin_a4' : 'F4',
'pin_a5' : 'F5',
'pin_sdnras' : 'F11',
'pin_a6' : 'F12',
'pin_a7' : 'F13',
'pin_a8' : 'F14',
'pin_a9' : 'F15',
'pin_a10' : 'G0',
'pin_a11' : 'G1',
'pin_ba0' : 'G4',
'pin_ba1' : 'G5',
'pin_sdclk' : 'G8',
'pin_sdncas' : 'G15',
},
'JTAG' : {
'pin_MS' : 'A13',
'pin_CK' : 'A14',
'pin_DI' : 'A15'
},
};
board_css = """
#board {
width: 680px;
height: 1020px;
left: 200px;
background-image: url(img/STM32F429IDISCOVERY.jpg);
}
#boardcontainer {
height: 1020px;
}
#left {
top: 375px;
right: 590px;
}
#left2 {
top: 375px;
left: 105px;
}
#right {
top: 375px;
left: 550px;
}
#right2 {
top: 375px;
right: 145px;
}
""";
def get_pins():
pins = pinutils.scan_pin_file([], 'stm32f40x.csv', 6, 9, 10)
return pinutils.only_from_package(pinutils.fill_gaps_in_pin_list(pins), chip["package"])
|
vshymanskyy/Espruino
|
boards/STM32F429IDISCOVERY.py
|
Python
|
mpl-2.0
| 5,336
|
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''constants.py: constants for integration test for pyheron'''
INTEGRATION_TEST_MOCK_MESSAGE_ID = "__integration_test_mock_message_id"
INTEGRATION_TEST_TERMINAL = "__integration_test_mock_terminal"
INTEGRATION_TEST_CONTROL_STREAM_ID = "__integration_test_control_stream_id"
# internal config key
MAX_EXECUTIONS = 10
HTTP_POST_URL_KEY = "http.post.url"
# user defined config key
USER_SPOUT_CLASSPATH = "user.spout.classpath"
USER_BOLT_CLASSPATH = "user.bolt.classpath"
# user defined max executions
USER_MAX_EXECUTIONS = "user.max.exec"
|
zhangzhonglai/heron
|
integration-test/src/python/integration_test/core/constants.py
|
Python
|
apache-2.0
| 1,130
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import sys
from resource_management import *
from yarn import yarn
from service import service
class ApplicationTimelineServer(Script):
def install(self, env):
self.install_packages(env)
#self.configure(env)
def configure(self, env):
import params
env.set_params(params)
yarn()
def start(self, env):
import params
env.set_params(params)
self.configure(env) # FOR SECURITY
service('historyserver', action='start')
def stop(self, env):
import params
env.set_params(params)
service('historyserver', action='stop')
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.yarn_historyserver_pid_file)
if __name__ == "__main__":
ApplicationTimelineServer().execute()
|
zouzhberk/ambaridemo
|
demo-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/application_timeline_server.py
|
Python
|
apache-2.0
| 1,573
|
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
v2 Neutron Plug-in API specification.
:class:`NeutronPluginBaseV2` provides the definition of minimum set of
methods that needs to be implemented by a v2 Neutron Plug-in.
"""
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class NeutronPluginBaseV2(object):
@abc.abstractmethod
def create_subnet(self, context, subnet):
"""Create a subnet.
Create a subnet, which represents a range of IP addresses
that can be allocated to devices
:param context: neutron api request context
:param subnet: dictionary describing the subnet, with keys
as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object
in :file:`neutron/api/v2/attributes.py`. All keys will
be populated.
"""
pass
@abc.abstractmethod
def update_subnet(self, context, id, subnet):
"""Update values of a subnet.
:param context: neutron api request context
:param id: UUID representing the subnet to update.
:param subnet: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for
'allow_put' as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`.
"""
pass
@abc.abstractmethod
def get_subnet(self, context, id, fields=None):
"""Retrieve a subnet.
:param context: neutron api request context
:param id: UUID representing the subnet to fetch.
:param fields: a list of strings that are valid keys in a
subnet dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
@abc.abstractmethod
def get_subnets(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
"""Retrieve a list of subnets.
The contents of the list depends on
the identity of the user making the request (as indicated by the
context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a subnet as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP`
object in :file:`neutron/api/v2/attributes.py`.
Values in this dictiontary are an iterable containing
values that will be used for an exact match comparison
for that value. Each result returned by this
function will have matched one of the values for each
key in filters.
:param fields: a list of strings that are valid keys in a
subnet dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
def get_subnets_count(self, context, filters=None):
"""Return the number of subnets.
The result depends on the identity of
the user making the request (as indicated by the context) as well as
any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Values in this
dictiontary are an iterable containing values that
will be used for an exact match comparison for that
value. Each result returned by this function will
have matched one of the values for each key in filters.
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError
@abc.abstractmethod
def delete_subnet(self, context, id):
"""Delete a subnet.
:param context: neutron api request context
:param id: UUID representing the subnet to delete.
"""
pass
@abc.abstractmethod
def create_network(self, context, network):
"""Create a network.
Create a network, which represents an L2 network segment which
can have a set of subnets and ports associated with it.
:param context: neutron api request context
:param network: dictionary describing the network, with keys
as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object
in :file:`neutron/api/v2/attributes.py`. All keys will
be populated.
"""
pass
@abc.abstractmethod
def update_network(self, context, id, network):
"""Update values of a network.
:param context: neutron api request context
:param id: UUID representing the network to update.
:param network: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for
'allow_put' as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`.
"""
pass
@abc.abstractmethod
def get_network(self, context, id, fields=None):
"""Retrieve a network.
:param context: neutron api request context
:param id: UUID representing the network to fetch.
:param fields: a list of strings that are valid keys in a
network dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
@abc.abstractmethod
def get_networks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
"""Retrieve a list of networks.
The contents of the list depends on
the identity of the user making the request (as indicated by the
context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Values in this
dictiontary are an iterable containing values that will
be used for an exact match comparison for that value.
Each result returned by this function will have matched
one of the values for each key in filters.
:param fields: a list of strings that are valid keys in a
network dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
def get_networks_count(self, context, filters=None):
"""Return the number of networks.
The result depends on the identity
of the user making the request (as indicated by the context) as well
as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object
in :file:`neutron/api/v2/attributes.py`. Values in
this dictiontary are an iterable containing values that
will be used for an exact match comparison for that
value. Each result returned by this function will have
matched one of the values for each key in filters.
NOTE: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError
@abc.abstractmethod
def delete_network(self, context, id):
"""Delete a network.
:param context: neutron api request context
:param id: UUID representing the network to delete.
"""
pass
@abc.abstractmethod
def create_port(self, context, port):
"""Create a port.
Create a port, which is a connection point of a device (e.g., a VM
NIC) to attach to a L2 neutron network.
:param context: neutron api request context
:param port: dictionary describing the port, with keys as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. All keys will be
populated.
"""
pass
@abc.abstractmethod
def update_port(self, context, id, port):
"""Update values of a port.
:param context: neutron api request context
:param id: UUID representing the port to update.
:param port: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for
'allow_put' as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP`
object in :file:`neutron/api/v2/attributes.py`.
"""
pass
@abc.abstractmethod
def get_port(self, context, id, fields=None):
"""Retrieve a port.
:param context: neutron api request context
:param id: UUID representing the port to fetch.
:param fields: a list of strings that are valid keys in a port
dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
@abc.abstractmethod
def get_ports(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
"""Retrieve a list of ports.
The contents of the list depends on the identity of the user making
the request (as indicated by the context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a port as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP`
object in :file:`neutron/api/v2/attributes.py`. Values
in this dictiontary are an iterable containing values
that will be used for an exact match comparison for
that value. Each result returned by this function will
have matched one of the values for each key in filters.
:param fields: a list of strings that are valid keys in a
port dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
def get_ports_count(self, context, filters=None):
"""Return the number of ports.
The result depends on the identity of the user making the request
(as indicated by the context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Values in this
dictiontary are an iterable containing values that will
be used for an exact match comparison for that value.
Each result returned by this function will have matched
one of the values for each key in filters.
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError
@abc.abstractmethod
def delete_port(self, context, id):
"""Delete a port.
:param context: neutron api request context
:param id: UUID representing the port to delete.
"""
pass
def start_rpc_listeners(self):
"""Start the RPC listeners.
Most plugins start RPC listeners implicitly on initialization. In
order to support multiple process RPC, the plugin needs to expose
control over when this is started.
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError
def rpc_workers_supported(self):
"""Return whether the plugin supports multiple RPC workers.
A plugin that supports multiple RPC workers should override the
start_rpc_listeners method to ensure that this method returns True and
that start_rpc_listeners is called at the appropriate time.
Alternately, a plugin can override this method to customize detection
of support for multiple rpc workers
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
return (self.__class__.start_rpc_listeners !=
NeutronPluginBaseV2.start_rpc_listeners)
|
shakamunyi/neutron-vrrp
|
neutron/neutron_plugin_base_v2.py
|
Python
|
apache-2.0
| 14,707
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-06-27 20:29
from __future__ import unicode_literals
from django.db import migrations
import osf.utils.fields
class Migration(migrations.Migration):
dependencies = [
('addons_onedrive', '0002_auto_20171121_1426'),
]
operations = [
migrations.RenameField(
model_name='nodesettings',
new_name='is_deleted',
old_name='deleted',
),
migrations.RenameField(
model_name='usersettings',
new_name='is_deleted',
old_name='deleted',
),
migrations.AddField(
model_name='nodesettings',
name='deleted',
field=osf.utils.fields.NonNaiveDateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='usersettings',
name='deleted',
field=osf.utils.fields.NonNaiveDateTimeField(blank=True, null=True),
),
]
|
Johnetordoff/osf.io
|
addons/onedrive/migrations/0003_rename_deleted_field.py
|
Python
|
apache-2.0
| 997
|
#!/user/bin/env python
"""
@package mi.dataset.param_dict
@file mi/dataset/param_dict.py
@author Emily Hahn
@brief Extend the protocol param dict to handle dataset encoding exceptions
"""
import re
from mi.core.instrument.protocol_param_dict import ProtocolParameterDict, ParameterDescription
from mi.core.instrument.protocol_param_dict import ParameterValue, ParameterDictVisibility
from mi.core.log import get_logger ; log = get_logger()
class DatasetParameterValue(ParameterValue):
def clear_value(self):
"""
Ensure value is cleared to None
"""
self.value = None
class Parameter(object):
"""
A parameter dictionary item.
"""
def __init__(self, name, f_format, value=None, expiration=None):
"""
Parameter value constructor.
@param name The parameter name.
@param f_format The function that formats the parameter value for a set command.
@param value The parameter value (initializes to None).
"""
self.description = ParameterDescription(name,
menu_path_read=None,
submenu_read=None,
menu_path_write=None,
submenu_write=None,
multi_match=False,
visibility=ParameterDictVisibility.READ_WRITE,
direct_access=False,
startup_param=False,
default_value=None,
init_value=None,
get_timeout=10,
set_timeout=10,
display_name=None,
description=None,
type=None,
units=None,
value_description=None)
self.value = DatasetParameterValue(name, f_format, value=value,
expiration=expiration)
self.name = name
def update(self, input):
"""
Attempt to udpate a parameter value. By default, this assumes the input
will be new new value. In subclasses, this must be updated to handle
a real string of data appropriately.
@param input A string that is the parameter value.
@retval True if an update was successful, False otherwise.
"""
self.value.set_value(input)
return True
def get_value(self, timestamp=None):
"""
Get the value of the parameter that has been stored in the ParameterValue
object.
@param timestamp timestamp to use for expiration calculation
@retval The actual data value if it is valid
@raises InstrumentParameterExpirationException If the value has expired
"""
return self.value.get_value(timestamp)
def clear_value(self):
"""
Clear the value in the parameter by setting it to None
"""
self.value.clear_value()
class RegexParameter(Parameter):
def __init__(self, name, pattern, f_getval, f_format, value=None,
regex_flags=None, expiration=None):
"""
Parameter value constructor.
@param name The parameter name.
@param pattern The regex that matches the parameter in line output.
@param f_getval The fuction that extracts the value from a regex match.
@param f_format The function that formats the parameter value for a set command.
@param value The parameter value (initializes to None).
@param regex_flags Flags that should be passed to the regex in this
parameter. Should comply with regex compile() interface (XORed flags).
@throws TypeError if regex flags are bad
@see ProtocolParameterDict.add() for details of parameters
"""
Parameter.__init__(self, name, f_format, value=value, expiration=expiration)
self.pattern = pattern
if regex_flags == None:
self.regex = re.compile(pattern)
else:
self.regex = re.compile(pattern, regex_flags)
self.f_getval = f_getval
def update(self, input):
"""
Attempt to update a parameter value. If the input string matches the
value regex, extract and update the dictionary value.
@param input A string possibly containing the parameter value.
@retval True if an update was successful, False otherwise.
"""
if not (isinstance(input, str)):
match = self.regex.search(str(input))
else:
match = self.regex.search(input)
if match:
self.value.set_value(self.f_getval(match))
return True
else:
return False
class DatasetParameterDict(ProtocolParameterDict):
"""
Dataset parameter dictionary. Manages, matches and formats parameters.
"""
def __init__(self):
"""
Constructor.
"""
super(DatasetParameterDict, self).__init__()
self._encoding_errors = []
def add(self, name, pattern, f_getval, f_format, value=None, regex_flags=None):
"""
Add a parameter object to the dictionary using a regex for extraction.
@param name The parameter name.
@param pattern The regex that matches the parameter in line output.
@param f_getval The fuction that extracts the value from a regex match.
@param f_format The function that formats the parameter value for a set command.
@param regex_flags Flags that should be passed to the regex in this
parameter. Should comply with regex compile() interface (XORed flags).
"""
val = RegexParameter(name, pattern, f_getval, f_format, value=value, regex_flags=regex_flags)
self._param_dict[name] = val
def update(self, in_data):
"""
Update the dictionaray with a line input. Iterate through all objects
and attempt to match and update a parameter. Only updates the first
match encountered. If we pass in a target params list then will will
only iterate through those allowing us to limit upstate to only specific
parameters.
@param in_data A set of data to match to a dictionary object.
@raise InstrumentParameterException on invalid target prams
@raise KeyError on invalid parameter name
"""
params = self._param_dict.keys()
for name in params:
log.trace("update param dict name: %s", name)
try:
val = self._param_dict[name]
val.update(in_data)
except Exception as e:
# set the value to None if we failed
val.clear_value()
log.error("Dataset parameter dict error encoding Name:%s, set to None", name)
self._encoding_errors.append({name: None})
def get_encoding_errors(self):
"""
Return the encoding errors list
"""
return self._encoding_errors
|
janeen666/mi-instrument
|
mi/dataset/param_dict.py
|
Python
|
bsd-2-clause
| 7,444
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
from document_parser import ParseDocument
from third_party.json_schema_compiler.model import UnixName
class DocumentRenderer(object):
'''Performs document-level rendering such as the title, references,
and table of contents: pulling that data out of the document, then
replacing the $(title), $(ref:...) and $(table_of_contents) tokens with them.
This can be thought of as a parallel to TemplateRenderer; while
TemplateRenderer is responsible for interpreting templates and rendering files
within the template engine, DocumentRenderer is responsible for interpreting
higher-level document concepts like the title and TOC, then performing string
replacement for them. The syntax for this replacement is $(...) where ... is
the concept. Currently title and table_of_contents are supported.
'''
def __init__(self, table_of_contents_renderer, ref_resolver):
self._table_of_contents_renderer = table_of_contents_renderer
self._ref_resolver = ref_resolver
def _RenderLinks(self, document, path):
''' Replaces all $(ref:...) references in |document| with html links.
References have two forms:
$(ref:api.node) - Replaces the reference with a link to node on the
API page. The title is set to the name of the node.
$(ref:api.node Title) - Same as the previous form, but title is set
to "Title".
'''
START_REF = '$(ref:'
END_REF = ')'
MAX_REF_LENGTH = 256
new_document = []
# Keeps track of position within |document|
cursor_index = 0
start_ref_index = document.find(START_REF)
while start_ref_index != -1:
end_ref_index = document.find(END_REF, start_ref_index)
if (end_ref_index == -1 or
end_ref_index - start_ref_index > MAX_REF_LENGTH):
end_ref_index = document.find(' ', start_ref_index)
logging.error('%s:%s has no terminating ) at line %s' % (
path,
document[start_ref_index:end_ref_index],
document.count('\n', 0, end_ref_index)))
new_document.append(document[cursor_index:end_ref_index + 1])
else:
ref = document[start_ref_index:end_ref_index]
ref_parts = ref[len(START_REF):].split(None, 1)
# Guess the api name from the html name, replacing '_' with '.' (e.g.
# if the page is app_window.html, guess the api name is app.window)
api_name = os.path.splitext(os.path.basename(path))[0].replace('_', '.')
title = ref_parts[0] if len(ref_parts) == 1 else ref_parts[1]
ref_dict = self._ref_resolver.SafeGetLink(ref_parts[0],
namespace=api_name,
title=title)
new_document.append(document[cursor_index:start_ref_index])
new_document.append('<a href=%s>%s</a>' % (ref_dict['href'],
ref_dict['text']))
cursor_index = end_ref_index + 1
start_ref_index = document.find(START_REF, cursor_index)
new_document.append(document[cursor_index:])
return ''.join(new_document)
def Render(self, document, path, render_title=False):
# Render links first so that parsing and later replacements aren't
# affected by $(ref...) substitutions
document = self._RenderLinks(document, path)
parsed_document = ParseDocument(document, expect_title=render_title)
toc_text, toc_warnings = self._table_of_contents_renderer.Render(
parsed_document.sections)
# Only 1 title and 1 table of contents substitution allowed; in the common
# case, save necessarily running over the entire file.
if parsed_document.title:
document = document.replace('$(title)', parsed_document.title, 1)
return (document.replace('$(table_of_contents)', toc_text, 1),
parsed_document.warnings + toc_warnings)
|
TeamEOS/external_chromium_org
|
chrome/common/extensions/docs/server2/document_renderer.py
|
Python
|
bsd-3-clause
| 4,116
|
import re
import ast
from setuptools import setup, find_packages
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('mycli/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
description = 'CLI for MySQL Database. With auto-completion and syntax highlighting.'
setup(
name='mycli',
author='Amjith Ramanujam',
author_email='amjith[dot]r[at]gmail.com',
version=version,
license='LICENSE.txt',
url='http://mycli.net',
packages=find_packages(),
package_data={'mycli': ['myclirc', '../AUTHORS', '../SPONSORS']},
description=description,
long_description=description,
install_requires=[
'click >= 4.1',
'Pygments >= 2.0', # Pygments has to be Capitalcased. WTF?
'prompt_toolkit==0.46',
'PyMySQL >= 0.6.6',
'sqlparse >= 0.1.16',
'configobj >= 5.0.6',
],
entry_points='''
[console_scripts]
mycli=mycli.main:cli
''',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: SQL',
'Topic :: Database',
'Topic :: Database :: Front-Ends',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
suzukaze/mycli
|
setup.py
|
Python
|
bsd-3-clause
| 1,852
|
#-----------------------------------------------------------------------------
# Copyright (c) 2008-2012, David P. D. Moss. All rights reserved.
#
# Released under the BSD license. See the LICENSE file for details.
#-----------------------------------------------------------------------------
"""Routines for IPv4 and IPv6 addresses, subnets and ranges."""
import sys as _sys
import re as _re
from netaddr.core import AddrFormatError, AddrConversionError, num_bits, \
DictDotLookup, NOHOST, N, INET_PTON, P, ZEROFILL, Z
from netaddr.strategy import ipv4 as _ipv4, ipv6 as _ipv6
from netaddr.compat import _sys_maxint, _iter_range, _is_str, _int_type, \
_str_type
#-----------------------------------------------------------------------------
# Pre-compiled regexen used by cidr_merge() function.
RE_CIDR_ADJACENT = _re.compile(r'^([01]+)0 \1[1]$')
RE_CIDR_WITHIN = _re.compile(r'^([01]+) \1[10]+$')
RE_VALID_CIDR_BITS = _re.compile('^[01]+$')
#-----------------------------------------------------------------------------
class BaseIP(object):
"""
An abstract base class for common operations shared between various IP
related subclasses.
"""
__slots__ = ('_value', '_module')
def __init__(self):
"""Constructor."""
self._value = None
self._module = None
def _set_value(self, value):
if not isinstance(value, _int_type):
raise TypeError('int argument expected, not %s' % type(value))
if not 0 <= value <= self._module.max_int:
raise AddrFormatError('value out of bounds for an %s address!' \
% self._module.family_name)
self._value = value
value = property(lambda self: self._value, _set_value,
doc='a positive integer representing the value of IP address/subnet.')
def key(self):
"""
:return: a key tuple that uniquely identifies this IP address.
"""
return NotImplemented
def sort_key(self):
"""
:return: A key tuple used to compare and sort this `IPAddress`
correctly.
"""
return NotImplemented
def __hash__(self):
"""
:return: A hash value uniquely indentifying this IP object.
"""
return hash(self.key())
def __eq__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
equivalent to ``other``, ``False`` otherwise.
"""
try:
return self.key() == other.key()
except (AttributeError, TypeError):
return NotImplemented
def __ne__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
not equivalent to ``other``, ``False`` otherwise.
"""
try:
return self.key() != other.key()
except (AttributeError, TypeError):
return NotImplemented
def __lt__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
less than ``other``, ``False`` otherwise.
"""
try:
return self.sort_key() < other.sort_key()
except (AttributeError, TypeError):
return NotImplemented
def __le__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
less than or equal to ``other``, ``False`` otherwise.
"""
try:
return self.sort_key() <= other.sort_key()
except (AttributeError, TypeError):
return NotImplemented
def __gt__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
greater than ``other``, ``False`` otherwise.
"""
try:
return self.sort_key() > other.sort_key()
except (AttributeError, TypeError):
return NotImplemented
def __ge__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
greater than or equal to ``other``, ``False`` otherwise.
"""
try:
return self.sort_key() >= other.sort_key()
except (AttributeError, TypeError):
return NotImplemented
def is_unicast(self):
""":return: ``True`` if this IP is unicast, ``False`` otherwise"""
return not self.is_multicast()
def is_multicast(self):
""":return: ``True`` if this IP is multicast, ``False`` otherwise"""
if self._module == _ipv4:
return self in IPV4_MULTICAST
elif self._module == _ipv6:
return self in IPV6_MULTICAST
def is_loopback(self):
"""
:return: ``True`` if this IP is loopback address (not for network
transmission), ``False`` otherwise.
References: RFC 3330 and 4291.
"""
if self.version == 4:
return self in IPV4_LOOPBACK
elif self.version == 6:
return self == IPV6_LOOPBACK
def is_private(self):
"""
:return: ``True`` if this IP is for internal/private use only
(i.e. non-public), ``False`` otherwise. Reference: RFCs 1918,
3330, 4193, 3879 and 2365.
"""
if self.version == 4:
for cidr in IPV4_PRIVATE:
if self in cidr:
return True
elif self.version == 6:
for cidr in IPV6_PRIVATE:
if self in cidr:
return True
if self.is_link_local():
return True
return False
def is_link_local(self):
"""
:return: ``True`` if this IP is link-local address ``False`` otherwise.
Reference: RFCs 3927 and 4291.
"""
if self.version == 4:
return self in IPV4_LINK_LOCAL
elif self.version == 6:
return self in IPV6_LINK_LOCAL
def is_reserved(self):
"""
:return: ``True`` if this IP is in IANA reserved range, ``False``
otherwise. Reference: RFCs 3330 and 3171.
"""
if self.version == 4:
for cidr in IPV4_RESERVED:
if self in cidr:
return True
elif self.version == 6:
for cidr in IPV6_RESERVED:
if self in cidr:
return True
return False
def is_ipv4_mapped(self):
"""
:return: ``True`` if this IP is IPv4-compatible IPv6 address, ``False``
otherwise.
"""
return self.version == 6 and (self._value >> 32) == 0xffff
def is_ipv4_compat(self):
"""
:return: ``True`` if this IP is IPv4-mapped IPv6 address, ``False``
otherwise.
"""
return self.version == 6 and (self._value >> 32) == 0
@property
def info(self):
"""
A record dict containing IANA registration details for this IP address
if available, None otherwise.
"""
# Lazy loading of IANA data structures.
from netaddr.ip.iana import query
return DictDotLookup(query(self))
@property
def version(self):
"""the IP protocol version represented by this IP object."""
return self._module.version
#-----------------------------------------------------------------------------
class IPAddress(BaseIP):
"""
An individual IPv4 or IPv6 address without a net mask or subnet prefix.
To support these and other network based operations, see `IPNetwork`.
"""
__slots__ = ()
def __init__(self, addr, version=None, flags=0):
"""
Constructor.
:param addr: an IPv4 or IPv6 address which may be represented in an
accepted string format, as an unsigned integer or as another
IPAddress object (copy construction).
:param version: (optional) optimizes version detection if specified
and distinguishes between IPv4 and IPv6 for addresses with an
equivalent integer value.
:param flags: (optional) decides which rules are applied to the
interpretation of the addr value. Supported constants are
INET_PTON and ZEROFILL. See the netaddr.core docs for further
details.
"""
super(IPAddress, self).__init__()
if isinstance(addr, BaseIP):
# Copy constructor.
if version is not None and version != addr._module.version:
raise ValueError('cannot switch IP versions using '
'copy constructor!')
self._value = addr._value
self._module = addr._module
else:
# Explicit IP address version.
if version is not None:
if version == 4:
self._module = _ipv4
elif version == 6:
self._module = _ipv6
else:
raise ValueError('%r is an invalid IP version!' % version)
has_upper = hasattr(addr, 'upper')
if has_upper and '/' in addr:
raise ValueError('%s() does not support netmasks or subnet' \
' prefixes! See documentation for details.'
% self.__class__.__name__)
if self._module is None:
# IP version is implicit, detect it from addr.
if isinstance(addr, _int_type):
try:
if 0 <= int(addr) <= _ipv4.max_int:
self._value = int(addr)
self._module = _ipv4
elif _ipv4.max_int < int(addr) <= _ipv6.max_int:
self._value = int(addr)
self._module = _ipv6
except ValueError:
pass
else:
for module in _ipv4, _ipv6:
try:
self._value = module.str_to_int(addr, flags)
except:
continue
else:
self._module = module
break
if self._module is None:
raise AddrFormatError('failed to detect a valid IP ' \
'address from %r' % addr)
else:
# IP version is explicit.
if has_upper:
try:
self._value = self._module.str_to_int(addr, flags)
except AddrFormatError:
raise AddrFormatError('base address %r is not IPv%d'
% (addr, self._module.version))
else:
if 0 <= int(addr) <= self._module.max_int:
self._value = int(addr)
else:
raise AddrFormatError('bad address format: %r' % addr)
def __getstate__(self):
""":returns: Pickled state of an `IPAddress` object."""
return self._value, self._module.version
def __setstate__(self, state):
"""
:param state: data used to unpickle a pickled `IPAddress` object.
"""
value, version = state
self._value = value
if version == 4:
self._module = _ipv4
elif version == 6:
self._module = _ipv6
else:
raise ValueError('unpickling failed for object state: %s' \
% str(state))
def is_hostmask(self):
"""
:return: ``True`` if this IP address host mask, ``False`` otherwise.
"""
int_val = self._value + 1
return (int_val & (int_val - 1) == 0)
def is_netmask(self):
"""
:return: ``True`` if this IP address network mask, ``False`` otherwise.
"""
int_val = (self._value ^ self._module.max_int) + 1
return (int_val & (int_val - 1) == 0)
def __iadd__(self, num):
"""
Increases the numerical value of this IPAddress by num.
An IndexError is raised if result exceeds maximum IP address value or
is less than zero.
:param num: size of IP address increment.
"""
new_value = self._value + num
if 0 <= new_value <= self._module.max_int:
self._value = new_value
return self
raise IndexError('result outside valid IP address boundary!')
def __isub__(self, num):
"""
Decreases the numerical value of this IPAddress by num.
An IndexError is raised if result is less than zero or exceeds maximum
IP address value.
:param num: size of IP address decrement.
"""
new_value = self._value - num
if 0 <= new_value <= self._module.max_int:
self._value = new_value
return self
raise IndexError('result outside valid IP address boundary!')
def __add__(self, num):
"""
Add the numerical value of this IP address to num and provide the
result as a new IPAddress object.
:param num: size of IP address increase.
:return: a new IPAddress object with its numerical value increased by num.
"""
new_value = self._value + num
if 0 <= new_value <= self._module.max_int:
return self.__class__(new_value, self.version)
raise IndexError('result outside valid IP address boundary!')
__radd__ = __add__
def __sub__(self, num):
"""
Subtract the numerical value of this IP address from num providing
the result as a new IPAddress object.
:param num: size of IP address decrease.
:return: a new IPAddress object with its numerical value decreased by num.
"""
new_value = self._value - num
if 0 <= new_value <= self._module.max_int:
return self.__class__(new_value, self.version)
raise IndexError('result outside valid IP address boundary!')
def __rsub__(self, num):
"""
Subtract num (lvalue) from the numerical value of this IP address
(rvalue) providing the result as a new IPAddress object.
:param num: size of IP address decrease.
:return: a new IPAddress object with its numerical value decreased by num.
"""
new_value = num - self._value
if 0 <= new_value <= self._module.max_int:
return self.__class__(new_value, self.version)
raise IndexError('result outside valid IP address boundary!')
def key(self):
"""
:return: a key tuple that uniquely identifies this IP address.
"""
# NB - we return the value here twice because this IP Address may
# be sorted with a list of networks and it should still end up
# in the expected order.
return self.version, self._value
def sort_key(self):
""":return: A key tuple used to compare and sort this `IPAddress` correctly."""
return self.version, self._value, self._module.width
def __int__(self):
""":return: the value of this IP address as an unsigned integer"""
return self._value
def __long__(self):
""":return: the value of this IP address as an unsigned integer"""
return self._value
def __oct__(self):
""":return: an octal string representation of this IP address."""
# Python 2.x
if self._value == 0:
return '0'
return '0%o' % self._value
def __hex__(self):
""":return: a hexadecimal string representation of this IP address."""
# Python 2.x
return '0x%x' % self._value
def __index__(self):
"""
:return: return the integer value of this IP address when called by \
hex(), oct() or bin().
"""
# Python 3.x
return self._value
def bits(self, word_sep=None):
"""
:param word_sep: (optional) the separator to insert between words.
Default: None - use default separator for address type.
:return: the value of this IP address as a binary digit string."""
return self._module.int_to_bits(self._value, word_sep)
@property
def packed(self):
"""The value of this IP address as a packed binary string."""
return self._module.int_to_packed(self._value)
@property
def words(self):
"""
A list of unsigned integer words (octets for IPv4, hextets for IPv6)
found in this IP address.
"""
return self._module.int_to_words(self._value)
@property
def bin(self):
"""
The value of this IP adddress in standard Python binary
representational form (0bxxx). A back port of the format provided by
the builtin bin() function found in Python 2.6.x and higher.
"""
return self._module.int_to_bin(self._value)
@property
def reverse_dns(self):
"""The reverse DNS lookup record for this IP address"""
return self._module.int_to_arpa(self._value)
def ipv4(self):
"""
Raises an `AddrConversionError` if IPv6 address cannot be converted
to IPv4.
:return: A numerically equivalent version 4 `IPAddress` object.
"""
ip = None
klass = self.__class__
if self.version == 4:
ip = klass(self._value, 4)
elif self.version == 6:
if 0 <= self._value <= _ipv4.max_int:
ip = klass(self._value, 4)
elif _ipv4.max_int <= self._value <= 0xffffffffffff:
ip = klass(self._value - 0xffff00000000, 4)
else:
raise AddrConversionError('IPv6 address %s unsuitable for ' \
'conversion to IPv4!' % self)
return ip
def ipv6(self, ipv4_compatible=False):
"""
.. note:: The IPv4-mapped IPv6 address format is now considered \
deprecated. See RFC 4291 or later for details.
:param ipv4_compatible: If ``True`` returns an IPv4-mapped address
(::ffff:x.x.x.x), an IPv4-compatible (::x.x.x.x) address
otherwise. Default: False (IPv4-mapped).
:return: A numerically equivalent version 6 `IPAddress` object.
"""
ip = None
klass = self.__class__
if self.version == 6:
if ipv4_compatible and \
(0xffff00000000 <= self._value <= 0xffffffffffff):
ip = klass(self._value - 0xffff00000000, 6)
else:
ip = klass(self._value, 6)
elif self.version == 4:
# IPv4-Compatible IPv6 address
ip = klass(self._value, 6)
if not ipv4_compatible:
# IPv4-Mapped IPv6 address
ip = klass(0xffff00000000 + self._value, 6)
return ip
def format(self, dialect=None):
"""
Only relevant for IPv6 addresses. Has no effect for IPv4.
:param dialect: An ipv6_* dialect class.
:return: an alternate string representation for this IP address.
"""
if dialect is not None:
if not hasattr(dialect, 'word_fmt'):
raise TypeError(
'custom dialects should subclass ipv6_verbose!')
return self._module.int_to_str(self._value, dialect=dialect)
def __or__(self, other):
"""
:param other: An `IPAddress` object (or other int-like object).
:return: bitwise OR (x | y) between the integer value of this IP
address and ``other``.
"""
return self.__class__(self._value | int(other), self.version)
def __and__(self, other):
"""
:param other: An `IPAddress` object (or other int-like object).
:return: bitwise AND (x & y) between the integer value of this IP
address and ``other``.
"""
return self.__class__(self._value & int(other), self.version)
def __xor__(self, other):
"""
:param other: An `IPAddress` object (or other int-like object).
:return: bitwise exclusive OR (x ^ y) between the integer value of
this IP address and ``other``.
"""
return self.__class__(self._value ^ int(other), self.version)
def __lshift__(self, numbits):
"""
:param numbits: size of bitwise shift.
:return: an `IPAddress` object based on this one with its integer
value left shifted by ``numbits``.
"""
return self.__class__(self._value << numbits, self.version)
def __rshift__(self, numbits):
"""
:param numbits: size of bitwise shift.
:return: an `IPAddress` object based on this one with its integer
value right shifted by ``numbits``.
"""
return self.__class__(self._value >> numbits, self.version)
def __nonzero__(self):
""":return: ``True`` if the numerical value of this IP address is not \
zero, ``False`` otherwise."""
# Python 2.x.
return bool(self._value)
__bool__ = __nonzero__ # Python 3.x.
def __str__(self):
""":return: IP address in presentational format"""
return self._module.int_to_str(self._value)
def __repr__(self):
""":return: Python statement to create an equivalent object"""
return "%s('%s')" % (self.__class__.__name__, self)
#-----------------------------------------------------------------------------
class IPListMixin(object):
"""
A mixin class providing shared list-like functionality to classes
representing groups of IP addresses.
"""
def __iter__(self):
"""
:return: An iterator providing access to all `IPAddress` objects
within range represented by this ranged IP object.
"""
start_ip = IPAddress(self.first, self.version)
end_ip = IPAddress(self.last, self.version)
return iter_iprange(start_ip, end_ip)
@property
def size(self):
"""
The total number of IP addresses within this ranged IP object.
"""
return int(self.last - self.first + 1)
def __len__(self):
"""
:return: the number of IP addresses in this ranged IP object. Raises
an `IndexError` if size > system max int (a Python 2.x
limitation). Use the .size property for subnets of any size.
"""
size = self.size
if size > _sys_maxint:
raise IndexError(("range contains more than %d (index size max) "
"IP addresses! Use the .size property instead." % _sys_maxint))
return size
def __getitem__(self, index):
"""
:return: The IP address(es) in this `IPNetwork` object referenced by
index or slice. As slicing can produce large sequences of objects
an iterator is returned instead of the more usual `list`.
"""
item = None
if hasattr(index, 'indices'):
if self._module.version == 6:
raise TypeError('IPv6 slices are not supported!')
(start, stop, step) = index.indices(self.size)
if (start + step < 0) or (step > stop):
# step value exceeds start and stop boundaries.
item = iter([IPAddress(self.first, self.version)])
else:
start_ip = IPAddress(self.first + start, self.version)
end_ip = IPAddress(self.first + stop - step, self.version)
item = iter_iprange(start_ip, end_ip, step)
else:
try:
index = int(index)
if (- self.size) <= index < 0:
# negative index.
item = IPAddress(self.last + index + 1, self.version)
elif 0 <= index <= (self.size - 1):
# Positive index or zero index.
item = IPAddress(self.first + index, self.version)
else:
raise IndexError('index out range for address range size!')
except ValueError:
raise TypeError('unsupported index type %r!' % index)
return item
def __contains__(self, other):
"""
:param other: an `IPAddress` or ranged IP object.
:return: ``True`` if other falls within the boundary of this one,
``False`` otherwise.
"""
if self.version != other.version:
return False
if hasattr(other, '_value') and not hasattr(other, '_prefixlen'):
return other._value >= self.first and other._value <= self.last
return other.first >= self.first and other.last <= self.last
def __nonzero__(self):
"""
Ranged IP objects always represent a sequence of at least one IP
address and are therefore always True in the boolean context.
"""
# Python 2.x.
return True
__bool__ = __nonzero__ # Python 3.x.
#-----------------------------------------------------------------------------
def parse_ip_network(module, addr, implicit_prefix=False, flags=0):
if isinstance(addr, tuple):
# CIDR integer tuple
try:
val1, val2 = addr
except ValueError:
raise AddrFormatError('invalid %s tuple!' % module.family_name)
if 0 <= val1 <= module.max_int:
value = val1
if 0 <= val2 <= module.width:
prefixlen = val2
else:
raise AddrFormatError('invalid prefix for %s tuple!' \
% module.family_name)
else:
raise AddrFormatError('invalid address value for %s tuple!' \
% module.family_name)
elif isinstance(addr, _str_type):
# CIDR-like string subnet
if implicit_prefix:
#TODO: deprecate this option in netaddr 0.8.x
addr = cidr_abbrev_to_verbose(addr)
try:
if '/' in addr:
val1, val2 = addr.split('/', 1)
else:
val1 = addr
val2 = None
except ValueError:
raise AddrFormatError('invalid IPNetwork address %s!' % addr)
try:
ip = IPAddress(val1, module.version, flags=INET_PTON)
except AddrFormatError:
if module.version == 4:
# Try a partial IPv4 network address...
expanded_addr = _ipv4.expand_partial_address(val1)
ip = IPAddress(expanded_addr, module.version, flags=INET_PTON)
else:
raise AddrFormatError('invalid IPNetwork address %s!' % addr)
value = ip._value
try:
# Integer CIDR prefix.
prefixlen = int(val2)
except TypeError:
if val2 is None:
# No prefix was specified.
prefixlen = module.width
except ValueError:
# Not an integer prefix, try a netmask/hostmask prefix.
mask = IPAddress(val2, module.version, flags=INET_PTON)
if mask.is_netmask():
prefixlen = module.netmask_to_prefix[mask._value]
elif mask.is_hostmask():
prefixlen = module.hostmask_to_prefix[mask._value]
else:
raise AddrFormatError('addr %r is not a valid IPNetwork!' \
% addr)
if not 0 <= prefixlen <= module.width:
raise AddrFormatError('invalid prefix for %s address!' \
% module.family_name)
else:
raise TypeError('unexpected type %s for addr arg' % type(addr))
if flags & NOHOST:
# Remove host bits.
netmask = module.prefix_to_netmask[prefixlen]
value = value & netmask
return value, prefixlen
#-----------------------------------------------------------------------------
class IPNetwork(BaseIP, IPListMixin):
"""
An IPv4 or IPv6 network or subnet.
A combination of an IP address and a network mask.
Accepts CIDR and several related variants :
a) Standard CIDR::
x.x.x.x/y -> 192.0.2.0/24
x::/y -> fe80::/10
b) Hybrid CIDR format (netmask address instead of prefix), where 'y' \
address represent a valid netmask::
x.x.x.x/y.y.y.y -> 192.0.2.0/255.255.255.0
x::/y:: -> fe80::/ffc0::
c) ACL hybrid CIDR format (hostmask address instead of prefix like \
Cisco's ACL bitmasks), where 'y' address represent a valid netmask::
x.x.x.x/y.y.y.y -> 192.0.2.0/0.0.0.255
x::/y:: -> fe80::/3f:ffff:ffff:ffff:ffff:ffff:ffff:ffff
d) Abbreviated CIDR format (as of netaddr 0.7.x this requires the \
optional constructor argument ``implicit_prefix=True``)::
x -> 192
x/y -> 10/8
x.x/y -> 192.168/16
x.x.x/y -> 192.168.0/24
which are equivalent to::
x.0.0.0/y -> 192.0.0.0/24
x.0.0.0/y -> 10.0.0.0/8
x.x.0.0/y -> 192.168.0.0/16
x.x.x.0/y -> 192.168.0.0/24
"""
__slots__ = ('_prefixlen',)
def __init__(self, addr, implicit_prefix=False, version=None, flags=0):
"""
Constructor.
:param addr: an IPv4 or IPv6 address with optional CIDR prefix,
netmask or hostmask. May be an IP address in presentation
(string) format, an tuple containing and integer address and a
network prefix, or another IPAddress/IPNetwork object (copy
construction).
:param implicit_prefix: (optional) if True, the constructor uses
classful IPv4 rules to select a default prefix when one is not
provided. If False it uses the length of the IP address version.
(default: False)
:param version: (optional) optimizes version detection if specified
and distinguishes between IPv4 and IPv6 for addresses with an
equivalent integer value.
:param flags: (optional) decides which rules are applied to the
interpretation of the addr value. Currently only supports the
NOHOST option. See the netaddr.core docs for further details.
"""
super(IPNetwork, self).__init__()
value, prefixlen, module = None, None, None
if hasattr(addr, '_prefixlen'):
# IPNetwork object copy constructor
value = addr._value
module = addr._module
prefixlen = addr._prefixlen
elif hasattr(addr, '_value'):
# IPAddress object copy constructor
value = addr._value
module = addr._module
prefixlen = module.width
elif version == 4:
value, prefixlen = parse_ip_network(_ipv4, addr,
implicit_prefix=implicit_prefix, flags=flags)
module = _ipv4
elif version == 6:
value, prefixlen = parse_ip_network(_ipv6, addr,
implicit_prefix=implicit_prefix, flags=flags)
module = _ipv6
else:
if version is not None:
raise ValueError('%r is an invalid IP version!' % version)
try:
module = _ipv4
value, prefixlen = parse_ip_network(module, addr,
implicit_prefix, flags)
except AddrFormatError:
try:
module = _ipv6
value, prefixlen = parse_ip_network(module, addr,
implicit_prefix, flags)
except AddrFormatError:
pass
if value is None:
raise AddrFormatError('invalid IPNetwork %s' % addr)
self._value = value
self._prefixlen = prefixlen
self._module = module
def __getstate__(self):
""":return: Pickled state of an `IPNetwork` object."""
return self._value, self._prefixlen, self._module.version
def __setstate__(self, state):
"""
:param state: data used to unpickle a pickled `IPNetwork` object.
"""
value, prefixlen, version = state
self._value = value
if version == 4:
self._module = _ipv4
elif version == 6:
self._module = _ipv6
else:
raise ValueError('unpickling failed for object state %s' \
% str(state))
if 0 <= prefixlen <= self._module.width:
self._prefixlen = prefixlen
else:
raise ValueError('unpickling failed for object state %s' \
% str(state))
def _set_prefixlen(self, value):
if not isinstance(value, _int_type):
raise TypeError('int argument expected, not %s' % type(value))
if not 0 <= value <= self._module.width:
raise AddrFormatError('invalid prefix for an %s address!' \
% self._module.family_name)
self._prefixlen = value
prefixlen = property(lambda self: self._prefixlen, _set_prefixlen,
doc='size of the bitmask used to separate the network from the host bits')
@property
def ip(self):
"""
The IP address of this `IPNetwork` object. This is may or may not be
the same as the network IP address which varies according to the value
of the CIDR subnet prefix.
"""
return IPAddress(self._value, self.version)
@property
def network(self):
"""The network address of this `IPNetwork` object."""
return IPAddress(self._value & int(self.netmask), self.version)
@property
def broadcast(self):
"""The broadcast address of this `IPNetwork` object"""
return IPAddress(self._value | self.hostmask._value, self.version)
@property
def first(self):
"""
The integer value of first IP address found within this `IPNetwork`
object.
"""
return self._value & (self._module.max_int ^ self.hostmask._value)
@property
def last(self):
"""
The integer value of last IP address found within this `IPNetwork`
object.
"""
hostmask = (1 << (self._module.width - self._prefixlen)) - 1
return self._value | hostmask
@property
def netmask(self):
"""The subnet mask of this `IPNetwork` object."""
netmask = self._module.max_int ^ self.hostmask._value
return IPAddress(netmask, self.version)
@property
def hostmask(self):
"""The host mask of this `IPNetwork` object."""
hostmask = (1 << (self._module.width - self._prefixlen)) - 1
return IPAddress(hostmask, self.version)
@property
def cidr(self):
"""
The true CIDR address for this `IPNetwork` object which omits any
host bits to the right of the CIDR subnet prefix.
"""
ip = IPAddress(self._value & int(self.netmask), self.version)
cidr = IPNetwork("%s/%d" % (ip, self.prefixlen))
return cidr
def __iadd__(self, num):
"""
Increases the value of this `IPNetwork` object by the current size
multiplied by ``num``.
An `IndexError` is raised if result exceeds maximum IP address value
or is less than zero.
:param num: (optional) number of `IPNetwork` blocks to increment \
this IPNetwork's value by.
"""
new_value = int(self.network) + (self.size * num)
if (new_value + (self.size - 1)) > self._module.max_int:
raise IndexError('increment exceeds address boundary!')
if new_value < 0:
raise IndexError('increment is less than zero!')
self._value = new_value
return self
def __isub__(self, num):
"""
Decreases the value of this `IPNetwork` object by the current size
multiplied by ``num``.
An `IndexError` is raised if result is less than zero or exceeds
maximum IP address value.
:param num: (optional) number of `IPNetwork` blocks to decrement \
this IPNetwork's value by.
"""
new_value = int(self.network) - (self.size * num)
if new_value < 0:
raise IndexError('decrement is less than zero!')
if (new_value + (self.size - 1)) > self._module.max_int:
raise IndexError('decrement exceeds address boundary!')
self._value = new_value
return self
def key(self):
"""
:return: A key tuple used to uniquely identify this `IPNetwork`.
"""
return self.version, self.first, self.last
def sort_key(self):
"""
:return: A key tuple used to compare and sort this `IPNetwork` correctly.
"""
net_size_bits = self._module.width - num_bits(self.size)
host_bits = self._value - self.first
return self.version, self.first, net_size_bits, host_bits
def ipv4(self):
"""
:return: A numerically equivalent version 4 `IPNetwork` object. \
Raises an `AddrConversionError` if IPv6 address cannot be \
converted to IPv4.
"""
ip = None
klass = self.__class__
if self.version == 4:
ip = klass('%s/%d' % (self.ip, self.prefixlen))
elif self.version == 6:
if 0 <= self._value <= _ipv4.max_int:
addr = _ipv4.int_to_str(self._value)
ip = klass('%s/%d' % (addr, self.prefixlen - 96))
elif _ipv4.max_int <= self._value <= 0xffffffffffff:
addr = _ipv4.int_to_str(self._value - 0xffff00000000)
ip = klass('%s/%d' % (addr, self.prefixlen - 96))
else:
raise AddrConversionError('IPv6 address %s unsuitable for ' \
'conversion to IPv4!' % self)
return ip
def ipv6(self, ipv4_compatible=False):
"""
.. note:: the IPv4-mapped IPv6 address format is now considered \
deprecated. See RFC 4291 or later for details.
:param ipv4_compatible: If ``True`` returns an IPv4-mapped address
(::ffff:x.x.x.x), an IPv4-compatible (::x.x.x.x) address
otherwise. Default: False (IPv4-mapped).
:return: A numerically equivalent version 6 `IPNetwork` object.
"""
ip = None
klass = self.__class__
if self.version == 6:
if ipv4_compatible and \
(0xffff00000000 <= self._value <= 0xffffffffffff):
ip = klass((self._value - 0xffff00000000, self._prefixlen),
version=6)
else:
ip = klass((self._value, self._prefixlen), version=6)
elif self.version == 4:
if ipv4_compatible:
# IPv4-Compatible IPv6 address
ip = klass((self._value, self._prefixlen + 96), version=6)
else:
# IPv4-Mapped IPv6 address
ip = klass((0xffff00000000 + self._value,
self._prefixlen + 96), version=6)
return ip
def previous(self, step=1):
"""
:param step: the number of IP subnets between this `IPNetwork` object
and the expected subnet. Default: 1 (the previous IP subnet).
:return: The adjacent subnet preceding this `IPNetwork` object.
"""
ip_copy = self.__class__('%s/%d' % (self.network, self.prefixlen),
self.version)
ip_copy -= step
return ip_copy
def next(self, step=1):
"""
:param step: the number of IP subnets between this `IPNetwork` object
and the expected subnet. Default: 1 (the next IP subnet).
:return: The adjacent subnet succeeding this `IPNetwork` object.
"""
ip_copy = self.__class__('%s/%d' % (self.network, self.prefixlen),
self.version)
ip_copy += step
return ip_copy
def supernet(self, prefixlen=0):
"""
Provides a list of supernets for this `IPNetwork` object between the
size of the current prefix and (if specified) an endpoint prefix.
:param prefixlen: (optional) a CIDR prefix for the maximum supernet.
Default: 0 - returns all possible supernets.
:return: a tuple of supernet `IPNetwork` objects.
"""
if not 0 <= prefixlen <= self._module.width:
raise ValueError('CIDR prefix /%d invalid for IPv%d!' \
% (prefixlen, self.version))
# Use a copy of self as we'll be editing it.
supernet = self.cidr
supernets = []
while supernet.prefixlen > prefixlen:
supernet.prefixlen -= 1
supernets.append(supernet.cidr)
return list(reversed(supernets))
def subnet(self, prefixlen, count=None, fmt=None):
"""
A generator that divides up this IPNetwork's subnet into smaller
subnets based on a specified CIDR prefix.
:param prefixlen: a CIDR prefix indicating size of subnets to be
returned.
:param count: (optional) number of consecutive IP subnets to be
returned.
:return: an iterator containing IPNetwork subnet objects.
"""
if not 0 <= self.prefixlen <= self._module.width:
raise ValueError('CIDR prefix /%d invalid for IPv%d!' \
% (prefixlen, self.version))
if not self.prefixlen <= prefixlen:
# Don't return anything.
raise StopIteration
# Calculate number of subnets to be returned.
width = self._module.width
max_subnets = 2 ** (width - self.prefixlen) // 2 ** (width - prefixlen)
if count is None:
count = max_subnets
if not 1 <= count <= max_subnets:
raise ValueError('count outside of current IP subnet boundary!')
base_subnet = self._module.int_to_str(self.first)
i = 0
while(i < count):
subnet = self.__class__('%s/%d' % (base_subnet, prefixlen),
self.version)
subnet.value += (subnet.size * i)
subnet.prefixlen = prefixlen
i += 1
yield subnet
def iter_hosts(self):
"""
An generator that provides all the IP addresses that can be assigned
to hosts within the range of this IP object's subnet.
- for IPv4, the network and broadcast addresses are always excluded. \
Any subnet that contains less than 4 IP addresses yields an empty list.
- for IPv6, only the unspecified address '::' is excluded from any \
yielded IP addresses.
:return: an IPAddress iterator
"""
it_hosts = iter([])
if self.version == 4:
# IPv4 logic.
if self.size >= 4:
it_hosts = iter_iprange(IPAddress(self.first+1, self.version),
IPAddress(self.last-1, self.version))
else:
# IPv6 logic.
if self.first == 0:
if self.size != 1:
# Don't return '::'.
it_hosts = iter_iprange(
IPAddress(self.first+1, self.version),
IPAddress(self.last, self.version))
else:
it_hosts = iter(self)
return it_hosts
def __str__(self):
""":return: this IPNetwork in CIDR format"""
addr = self._module.int_to_str(self._value)
return "%s/%s" % (addr, self.prefixlen)
def __repr__(self):
""":return: Python statement to create an equivalent object"""
return "%s('%s')" % (self.__class__.__name__, self)
#-----------------------------------------------------------------------------
class IPRange(BaseIP, IPListMixin):
"""
An arbitrary IPv4 or IPv6 address range.
Formed from a lower and upper bound IP address. The upper bound IP cannot
be numerically smaller than the lower bound and the IP version of both
must match.
"""
__slots__ = ('_start', '_end')
def __init__(self, start, end, flags=0):
"""
Constructor.
:param start: an IPv4 or IPv6 address that forms the lower
boundary of this IP range.
:param end: an IPv4 or IPv6 address that forms the upper
boundary of this IP range.
:param flags: (optional) decides which rules are applied to the
interpretation of the start and end values. Supported constants
are INET_PTON and ZEROFILL. See the netaddr.core docs for further
details.
"""
self._start = IPAddress(start, flags=flags)
self._module = self._start._module
self._end = IPAddress(end, self._module.version, flags=flags)
if int(self._start) > int(self._end):
raise AddrFormatError('lower bound IP greater than upper bound!')
def __getstate__(self):
""":return: Pickled state of an `IPRange` object."""
return self._start.value, self._end.value, self._module.version
def __setstate__(self, state):
"""
:param state: data used to unpickle a pickled `IPRange` object.
"""
start, end, version = state
self._start = IPAddress(start, version)
self._module = self._start._module
self._end = IPAddress(end, version)
@property
def first(self):
"""The integer value of first IP address in this `IPRange` object."""
return int(self._start)
@property
def last(self):
"""The integer value of last IP address in this `IPRange` object."""
return int(self._end)
def key(self):
"""
:return: A key tuple used to uniquely identify this `IPRange`.
"""
return self.version, self.first, self.last
def sort_key(self):
"""
:return: A key tuple used to compare and sort this `IPRange` correctly.
"""
skey = self._module.width - num_bits(self.size)
return self.version, self.first, skey
def cidrs(self):
"""
The list of CIDR addresses found within the lower and upper bound
addresses of this `IPRange`.
"""
return iprange_to_cidrs(self._start, self._end)
def __str__(self):
""":return: this `IPRange` in a common representational format."""
return "%s-%s" % (self._start, self._end)
def __repr__(self):
""":return: Python statement to create an equivalent object"""
return "%s('%s', '%s')" % (self.__class__.__name__,
self._start, self._end)
#-----------------------------------------------------------------------------
def iter_unique_ips(*args):
"""
:param args: A list of IP addresses and subnets passed in as arguments.
:return: A generator that flattens out IP subnets, yielding unique
individual IP addresses (no duplicates).
"""
for cidr in cidr_merge(args):
for ip in cidr:
yield ip
#-----------------------------------------------------------------------------
def cidr_abbrev_to_verbose(abbrev_cidr):
"""
A function that converts abbreviated IPv4 CIDRs to their more verbose
equivalent.
:param abbrev_cidr: an abbreviated CIDR.
Uses the old-style classful IP address rules to decide on a default
subnet prefix if one is not explicitly provided.
Only supports IPv4 addresses.
Examples ::
10 - 10.0.0.0/8
10/16 - 10.0.0.0/16
128 - 128.0.0.0/16
128/8 - 128.0.0.0/8
192.168 - 192.168.0.0/16
:return: A verbose CIDR from an abbreviated CIDR or old-style classful \
network address, The original value if it was not recognised as a \
supported abbreviation.
"""
# Internal function that returns a prefix value based on the old IPv4
# classful network scheme that has been superseded (almost) by CIDR.
def classful_prefix(octet):
octet = int(octet)
if not 0 <= octet <= 255:
raise IndexError('Invalid octet: %r!' % octet)
if 0 <= octet <= 127: # Legacy class 'A' classification.
return 8
elif 128 <= octet <= 191: # Legacy class 'B' classification.
return 16
elif 192 <= octet <= 223: # Legacy class 'C' classification.
return 24
elif 224 <= octet <= 239: # Multicast address range.
return 4
return 32 # Default.
start = ''
tokens = []
prefix = None
if _is_str(abbrev_cidr):
if ':' in abbrev_cidr:
return abbrev_cidr
try:
# Single octet partial integer or string address.
i = int(abbrev_cidr)
tokens = [str(i), '0', '0', '0']
return "%s%s/%s" % (start, '.'.join(tokens), classful_prefix(i))
except ValueError:
# Multi octet partial string address with optional prefix.
part_addr = abbrev_cidr
tokens = []
if part_addr == '':
# Not a recognisable format.
return abbrev_cidr
if '/' in part_addr:
(part_addr, prefix) = part_addr.split('/', 1)
# Check prefix for validity.
if prefix is not None:
try:
if not 0 <= int(prefix) <= 32:
raise ValueError('prefixlen in address %r out of range' \
' for IPv4!' % abbrev_cidr)
except ValueError:
return abbrev_cidr
if '.' in part_addr:
tokens = part_addr.split('.')
else:
tokens = [part_addr]
if 1 <= len(tokens) <= 4:
for i in range(4 - len(tokens)):
tokens.append('0')
else:
# Not a recognisable format.
return abbrev_cidr
if prefix is None:
try:
prefix = classful_prefix(tokens[0])
except ValueError:
return abbrev_cidr
return "%s%s/%s" % (start, '.'.join(tokens), prefix)
except TypeError:
pass
except IndexError:
pass
# Not a recognisable format.
return abbrev_cidr
#-----------------------------------------------------------------------------
def cidr_merge(ip_addrs):
"""
A function that accepts an iterable sequence of IP addresses and subnets
merging them into the smallest possible list of CIDRs. It merges adjacent
subnets where possible, those contained within others and also removes
any duplicates.
:param ip_addrs: an iterable sequence of IP addresses and subnets.
:return: a summarized list of `IPNetwork` objects.
"""
if not hasattr(ip_addrs, '__iter__') or hasattr(ip_addrs, 'keys'):
raise ValueError('A sequence or iterator is expected!')
# Start off using set as we'll remove any duplicates at the start.
ipv4_bit_cidrs = set()
ipv6_bit_cidrs = set()
# Convert IP addresses and subnets into their CIDR bit strings.
ipv4_match_all_found = False
ipv6_match_all_found = False
for ip in ip_addrs:
cidr = IPNetwork(ip)
bits = cidr.network.bits(word_sep='')[0:cidr.prefixlen]
if cidr.version == 4:
if bits == '':
ipv4_match_all_found = True
ipv4_bit_cidrs = set(['']) # Clear all other IPv4 values.
if not ipv4_match_all_found:
ipv4_bit_cidrs.add(bits)
else:
if bits == '':
ipv6_match_all_found = True
ipv6_bit_cidrs = set(['']) # Clear all other IPv6 values.
if not ipv6_match_all_found:
ipv6_bit_cidrs.add(bits)
# Merge binary CIDR addresses where possible.
def _reduce_bit_cidrs(cidrs):
new_cidrs = []
cidrs.sort()
# Multiple passes are required to obtain precise results.
while 1:
finished = True
while (cidrs):
if not new_cidrs:
new_cidrs.append(cidrs.pop(0))
if not cidrs:
break
# lhs and rhs are same size and adjacent.
(new_cidr, subs) = RE_CIDR_ADJACENT.subn(
r'\1', '%s %s' % (new_cidrs[-1], cidrs[0]))
if subs:
# merge lhs with rhs.
new_cidrs[-1] = new_cidr
cidrs.pop(0)
finished = False
else:
# lhs contains rhs.
(new_cidr, subs) = RE_CIDR_WITHIN.subn(
r'\1', '%s %s' % (new_cidrs[-1], cidrs[0]))
if subs:
# keep lhs, discard rhs.
new_cidrs[-1] = new_cidr
cidrs.pop(0)
finished = False
else:
# no matches - accept rhs.
new_cidrs.append(cidrs.pop(0))
if finished:
break
else:
# still seeing matches, reset.
cidrs = new_cidrs
new_cidrs = []
if new_cidrs == ['0', '1']:
# Special case where summary CIDR result is '0.0.0.0/0' or
# '::/0' i.e. the whole IPv4 or IPv6 address space.
new_cidrs = ['']
return new_cidrs
new_cidrs = []
def _bits_to_cidr(bits, module):
if bits == '':
if module.version == 4:
return IPNetwork('0.0.0.0/0', 4)
else:
return IPNetwork('::/0', 6)
if RE_VALID_CIDR_BITS.match(bits) is None:
raise ValueError('%r is an invalid bit string!' % bits)
num_bits = len(bits)
if bits == '':
return IPAddress(module.int_to_str(0), module.version)
else:
bits = bits + '0' * (module.width - num_bits)
return IPNetwork((module.bits_to_int(bits), num_bits),
version=module.version)
# Reduce and format lists of reduced CIDRs.
for bits in _reduce_bit_cidrs(list(ipv4_bit_cidrs)):
new_cidrs.append(_bits_to_cidr(bits, _ipv4))
for bits in _reduce_bit_cidrs(list(ipv6_bit_cidrs)):
new_cidrs.append(_bits_to_cidr(bits, _ipv6))
return new_cidrs
#-----------------------------------------------------------------------------
def cidr_exclude(target, exclude):
"""
Removes an exclude IP address or subnet from target IP subnet.
:param target: the target IP address or subnet to be divided up.
:param exclude: the IP address or subnet to be removed from target.
:return: list of `IPNetwork` objects remaining after exclusion.
"""
cidrs = []
target = IPNetwork(target)
exclude = IPNetwork(exclude)
if exclude.last < target.first:
# Exclude subnet's upper bound address less than target
# subnet's lower bound.
return [target.cidr]
elif target.last < exclude.first:
# Exclude subnet's lower bound address greater than target
# subnet's upper bound.
return [target.cidr]
new_prefixlen = target.prefixlen + 1
if new_prefixlen <= target._module.width:
i_lower = target.first
i_upper = target.first + (2 ** (target._module.width - new_prefixlen))
lower = IPNetwork((i_lower, new_prefixlen))
upper = IPNetwork((i_upper, new_prefixlen))
while exclude.prefixlen >= new_prefixlen:
if exclude in lower:
matched = i_lower
unmatched = i_upper
elif exclude in upper:
matched = i_upper
unmatched = i_lower
else:
# Exclude subnet not within target subnet.
cidrs.append(target.cidr)
break
ip = IPNetwork((unmatched, new_prefixlen))
cidrs.append(ip)
new_prefixlen += 1
if new_prefixlen > target._module.width:
break
i_lower = matched
i_upper = matched + (2 ** (target._module.width - new_prefixlen))
lower = IPNetwork((i_lower, new_prefixlen))
upper = IPNetwork((i_upper, new_prefixlen))
cidrs.sort()
return cidrs
#-----------------------------------------------------------------------------
def spanning_cidr(ip_addrs):
"""
Function that accepts a sequence of IP addresses and subnets returning
a single `IPNetwork` subnet that is large enough to span the lower and
upper bound IP addresses with a possible overlap on either end.
:param ip_addrs: sequence of IP addresses and subnets.
:return: a single spanning `IPNetwork` subnet.
"""
sorted_ips = sorted(
[IPNetwork(ip) for ip in ip_addrs])
if not len(sorted_ips) > 1:
raise ValueError('IP sequence must contain at least 2 elements!')
lowest_ip = sorted_ips[0]
highest_ip = sorted_ips[-1]
if lowest_ip.version != highest_ip.version:
raise TypeError('IP sequence cannot contain both IPv4 and IPv6!')
ip = highest_ip.cidr
while ip.prefixlen > 0:
if highest_ip in ip and lowest_ip not in ip:
ip.prefixlen -= 1
else:
break
return ip.cidr
#-----------------------------------------------------------------------------
def iter_iprange(start, end, step=1):
"""
A generator that produces IPAddress objects between an arbitrary start
and stop IP address with intervals of step between them. Sequences
produce are inclusive of boundary IPs.
:param start: start IP address.
:param end: end IP address.
:param step: (optional) size of step between IP addresses. Default: 1
:return: an iterator of one or more `IPAddress` objects.
"""
start = IPAddress(start)
end = IPAddress(end)
if start.version != end.version:
raise TypeError('start and stop IP versions do not match!')
version = start.version
step = int(step)
if step == 0:
raise ValueError('step argument cannot be zero')
# We don't need objects from here, just integers.
start = int(start)
stop = int(end)
negative_step = False
if step < 0:
negative_step = True
index = start - step
while True:
index += step
if negative_step:
if not index >= stop:
break
else:
if not index <= stop:
break
yield IPAddress(index, version)
#-----------------------------------------------------------------------------
def iprange_to_cidrs(start, end):
"""
A function that accepts an arbitrary start and end IP address or subnet
and returns a list of CIDR subnets that fit exactly between the boundaries
of the two with no overlap.
:param start: the start IP address or subnet.
:param end: the end IP address or subnet.
:return: a list of one or more IP addresses and subnets.
"""
cidr_list = []
start = IPNetwork(start)
end = IPNetwork(end)
iprange = [start.first, end.last]
# Get spanning CIDR covering both addresses.
cidr_span = spanning_cidr([start, end])
if cidr_span.first == iprange[0] and cidr_span.last == iprange[-1]:
# Spanning CIDR matches start and end exactly.
cidr_list = [cidr_span]
elif cidr_span.last == iprange[-1]:
# Spanning CIDR matches end exactly.
ip = IPAddress(start)
first_int_val = int(ip)
ip -= 1
cidr_remainder = cidr_exclude(cidr_span, ip)
first_found = False
for cidr in cidr_remainder:
if cidr.first == first_int_val:
first_found = True
if first_found:
cidr_list.append(cidr)
elif cidr_span.first == iprange[0]:
# Spanning CIDR matches start exactly.
ip = IPAddress(end)
last_int_val = int(ip)
ip += 1
cidr_remainder = cidr_exclude(cidr_span, ip)
last_found = False
for cidr in cidr_remainder:
cidr_list.append(cidr)
if cidr.last == last_int_val:
break
elif cidr_span.first <= iprange[0] and cidr_span.last >= iprange[-1]:
# Spanning CIDR overlaps start and end.
ip = IPAddress(start)
first_int_val = int(ip)
ip -= 1
cidr_remainder = cidr_exclude(cidr_span, ip)
# Fix start.
first_found = False
for cidr in cidr_remainder:
if cidr.first == first_int_val:
first_found = True
if first_found:
cidr_list.append(cidr)
# Fix end.
ip = IPAddress(end)
last_int_val = int(ip)
ip += 1
cidr_remainder = cidr_exclude(cidr_list.pop(), ip)
last_found = False
for cidr in cidr_remainder:
cidr_list.append(cidr)
if cidr.last == last_int_val:
break
return cidr_list
#-----------------------------------------------------------------------------
def smallest_matching_cidr(ip, cidrs):
"""
Matches an IP address or subnet against a given sequence of IP addresses
and subnets.
:param ip: a single IP address or subnet.
:param cidrs: a sequence of IP addresses and/or subnets.
:return: the smallest (most specific) matching IPAddress or IPNetwork
object from the provided sequence, None if there was no match.
"""
match = None
if not hasattr(cidrs, '__iter__'):
raise TypeError('IP address/subnet sequence expected, not %r!'
% cidrs)
ip = IPAddress(ip)
for cidr in sorted([IPNetwork(cidr) for cidr in cidrs]):
if ip in cidr:
match = cidr
else:
if match is not None:
break
return match
#-----------------------------------------------------------------------------
def largest_matching_cidr(ip, cidrs):
"""
Matches an IP address or subnet against a given sequence of IP addresses
and subnets.
:param ip: a single IP address or subnet.
:param cidrs: a sequence of IP addresses and/or subnets.
:return: the largest (least specific) matching IPAddress or IPNetwork
object from the provided sequence, None if there was no match.
"""
match = None
if not hasattr(cidrs, '__iter__'):
raise TypeError('IP address/subnet sequence expected, not %r!'
% cidrs)
ip = IPAddress(ip)
for cidr in sorted([IPNetwork(cidr) for cidr in cidrs]):
if ip in cidr:
match = cidr
break
return match
#-----------------------------------------------------------------------------
def all_matching_cidrs(ip, cidrs):
"""
Matches an IP address or subnet against a given sequence of IP addresses
and subnets.
:param ip: a single IP address.
:param cidrs: a sequence of IP addresses and/or subnets.
:return: all matching IPAddress and/or IPNetwork objects from the provided
sequence, an empty list if there was no match.
"""
matches = []
if not hasattr(cidrs, '__iter__'):
raise TypeError('IP address/subnet sequence expected, not %r!'
% cidrs)
ip = IPAddress(ip)
for cidr in sorted([IPNetwork(cidr) for cidr in cidrs]):
if ip in cidr:
matches.append(cidr)
else:
if matches:
break
return matches
#-----------------------------------------------------------------------------
# Cached IPv4 address range lookups.
#-----------------------------------------------------------------------------
IPV4_LOOPBACK = IPNetwork('127.0.0.0/8')
IPV4_PRIVATE = (
IPNetwork('10.0.0.0/8'), # Private-Use Networks
IPNetwork('172.16.0.0/12'), # Private-Use Networks
IPNetwork('192.0.2.0/24'), # Test-Net
IPNetwork('192.168.0.0/16'), # Private-Use Networks
IPRange('239.0.0.0', '239.255.255.255'), # Administrative Multicast
)
IPV4_LINK_LOCAL = IPNetwork('169.254.0.0/16')
IPV4_MULTICAST = IPNetwork('224.0.0.0/4')
IPV4_6TO4 = IPNetwork('192.88.99.0/24') # 6to4 Relay Anycast
IPV4_RESERVED = (
IPNetwork('128.0.0.0/16'), # Reserved but subject to allocation
IPNetwork('191.255.0.0/16'), # Reserved but subject to allocation
IPNetwork('192.0.0.0/24'), # Reserved but subject to allocation
IPNetwork('223.255.255.0/24'), # Reserved but subject to allocation
IPNetwork('240.0.0.0/4'), # Reserved for Future Use
# Reserved multicast
IPRange('234.0.0.0', '238.255.255.255'),
IPRange('225.0.0.0', '231.255.255.255'),
)
#-----------------------------------------------------------------------------
# Cached IPv6 address range lookups.
#-----------------------------------------------------------------------------
IPV6_LOOPBACK = IPAddress('::1')
IPV6_PRIVATE = (
IPNetwork('fc00::/7'), # Unique Local Addresses (ULA)
IPNetwork('fec0::/10'), # Site Local Addresses (deprecated - RFC 3879)
)
IPV6_LINK_LOCAL = IPNetwork('fe80::/10')
IPV6_MULTICAST = IPNetwork('ff00::/8')
IPV6_RESERVED = (
IPNetwork('ff00::/12'), IPNetwork('::/8'),
IPNetwork('0100::/8'), IPNetwork('0200::/7'),
IPNetwork('0400::/6'), IPNetwork('0800::/5'),
IPNetwork('1000::/4'), IPNetwork('4000::/3'),
IPNetwork('6000::/3'), IPNetwork('8000::/3'),
IPNetwork('A000::/3'), IPNetwork('C000::/3'),
IPNetwork('E000::/4'), IPNetwork('F000::/5'),
IPNetwork('F800::/6'), IPNetwork('FE00::/9'),
)
|
ecolitan/fatics
|
venv/lib/python2.7/site-packages/netaddr/ip/__init__.py
|
Python
|
agpl-3.0
| 66,411
|
#!/usr/bin/env python
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
import atexit
import json
import os
import shutil
import subprocess
import re
import yaml
# This is here because of a bug that causes yaml
# to incorrectly handle timezone info on timestamps
def timestamp_constructor(_, node):
'''return timestamps as strings'''
return str(node.value)
yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = kubeconfig
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = '/tmp/%s' % rname
yed = Yedit(fname, res['results'][0])
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([not change[0] for change in changes]):
return {'returncode': 0, 'updated': False}
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
def _replace(self, fname, force=False):
'''return all pods '''
cmd = ['-n', self.namespace, 'replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create(self, fname):
'''return all pods '''
return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
def _delete(self, resource, rname):
'''return all pods '''
return self.openshift_cmd(['delete', resource, rname, '-n', self.namespace])
def _get(self, resource, rname=None):
'''return a secret by name '''
cmd = ['get', resource, '-o', 'json', '-n', self.namespace]
if rname:
cmd.append(rname)
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if rval.has_key('items'):
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json'):
'''Base command for oc '''
#cmds = ['/usr/bin/oc', '--config', self.kubeconfig]
cmds = []
if oadm:
cmds = ['/usr/bin/oadm']
else:
cmds = ['/usr/bin/oc']
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
proc = subprocess.Popen(cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={'KUBECONFIG': self.kubeconfig})
proc.wait()
stdout = proc.stdout.read()
stderr = proc.stderr.read()
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
print
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds
})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {},
})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def create_file(rname, data, ftype=None):
''' create a file in tmp with name and contents'''
path = os.path.join('/tmp', rname)
with open(path, 'w') as fds:
if ftype == 'yaml':
fds.write(yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
fds.write(json.dumps(data))
else:
fds.write(data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [path])
return path
@staticmethod
def create_files_from_contents(data):
'''Turn an array of dict: filename, content into a files array'''
files = []
for sfile in data:
path = Utils.create_file(sfile['path'], sfile['content'])
files.append(path)
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if result.has_key('metadata') and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if not isinstance(user_def[key], list):
if debug:
print 'user_def[key] is not a list'
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print 'sending list - list'
print type(values[0])
print type(values[1])
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print 'list compare returned false'
return False
elif value != user_def[key]:
if debug:
print 'value should be identical'
print value
print user_def[key]
return False
# recurse on a dictionary
elif isinstance(value, dict):
if not isinstance(user_def[key], dict):
if debug:
print "dict returned false not instance of dict"
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print api_values
print user_values
print "keys are not equal in dict"
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print "dict returned false"
print result
return False
# Verify each key, value pair is the same
else:
if not user_def.has_key(key) or value != user_def[key]:
if debug:
print "value not equal; user_def does not have key"
print value
print user_def[key]
return False
return True
class YeditException(Exception):
''' Exception class for Yedit '''
pass
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([a-zA-Z-./]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([a-zA-Z-./]+)"
def __init__(self, filename=None, content=None, content_type='yaml'):
self.content = content
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
if self.filename and not self.content:
self.load(content_type=self.content_type)
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def remove_entry(data, key):
''' remove data at location key '''
if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
return None
key_indexes = re.findall(Yedit.re_key, key)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
return None
curr_data = data
key_indexes = re.findall(Yedit.re_key, key)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and data.has_key(dict_key):
data = data[dict_key]
continue
data[dict_key] = {}
data = data[dict_key]
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for add
# expected list entry
if key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
return curr_data
@staticmethod
def get_entry(data, key):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
return None
key_indexes = re.findall(Yedit.re_key, key)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
return data
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
with open(self.filename, 'w') as yfd:
yfd.write(yaml.safe_dump(self.yaml_dict, default_flow_style=False))
def read(self):
''' write to file '''
# check if it exists
if not self.exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents:
return None
# check if it is yaml
try:
if content_type == 'yaml':
self.yaml_dict = yaml.load(contents)
elif content_type == 'json':
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as _:
# Error loading yaml or json
return None
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key)
except KeyError as _:
entry = None
return entry
def delete(self, key):
''' remove key from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, key)
except KeyError as _:
entry = None
if not entry:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, key)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def put(self, key, value):
''' put key, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, key)
except KeyError as _:
entry = None
if entry == value:
return (False, self.yaml_dict)
result = Yedit.add_entry(self.yaml_dict, key, value)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def create(self, key, value):
''' create a yaml file '''
if not self.exists():
self.yaml_dict = {key: value}
return (True, self.yaml_dict)
return (False, self.yaml_dict)
import time
class RouterConfig(object):
''' RouterConfig is a DTO for the router. '''
def __init__(self, rname, kubeconfig, router_options):
self.name = rname
self.kubeconfig = kubeconfig
self._router_options = router_options
@property
def router_options(self):
''' return router options '''
return self._router_options
def to_option_list(self):
''' return all options as a string'''
return RouterConfig.stringify(self.router_options)
@staticmethod
def stringify(options):
''' return hash as list of key value pairs '''
rval = []
for key, data in options.items():
if data['include'] and data['value']:
rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
return rval
class Router(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
def __init__(self,
router_config,
verbose=False):
''' Constructor for OpenshiftOC
a router consists of 3 or more parts
- dc/router
- svc/router
- endpoint/router
'''
super(Router, self).__init__('default', router_config.kubeconfig, verbose)
self.rconfig = router_config
self.verbose = verbose
self.router_parts = [{'kind': 'dc', 'name': self.rconfig.name},
{'kind': 'svc', 'name': self.rconfig.name},
#{'kind': 'endpoints', 'name': self.rconfig.name},
]
def get(self, filter_kind=None):
''' return the self.router_parts '''
rparts = self.router_parts
parts = []
if filter_kind:
rparts = [part for part in self.router_parts if filter_kind == part['kind']]
for part in rparts:
parts.append(self._get(part['kind'], rname=part['name']))
return parts
def exists(self):
'''return a deploymentconfig by name '''
parts = self.get()
for part in parts:
if part['returncode'] != 0:
return False
return True
def delete(self):
'''return all pods '''
parts = []
for part in self.router_parts:
parts.append(self._delete(part['kind'], part['name']))
return parts
def create(self, dryrun=False, output=False, output_type='json'):
'''Create a deploymentconfig '''
# We need to create the pem file
router_pem = '/tmp/router.pem'
with open(router_pem, 'w') as rfd:
rfd.write(open(self.rconfig.router_options['cert_file']['value']).read())
rfd.write(open(self.rconfig.router_options['key_file']['value']).read())
atexit.register(Utils.cleanup, [router_pem])
self.rconfig.router_options['default_cert']['value'] = router_pem
options = self.rconfig.to_option_list()
cmd = ['router']
cmd.extend(options)
if dryrun:
cmd.extend(['--dry-run=True', '-o', 'json'])
results = self.openshift_cmd(cmd, oadm=True, output=output, output_type=output_type)
return results
def update(self):
'''run update for the router. This performs a delete and then create '''
parts = self.delete()
if any([part['returncode'] != 0 for part in parts]):
return parts
# Ugly built in sleep here.
time.sleep(15)
return self.create()
def needs_update(self, verbose=False):
''' check to see if we need to update '''
dc_inmem = self.get(filter_kind='dc')[0]
if dc_inmem['returncode'] != 0:
return dc_inmem
user_dc = self.create(dryrun=True, output=True, output_type='raw')
if user_dc['returncode'] != 0:
return user_dc
# Since the output from oadm_router is returned as raw
# we need to parse it. The first line is the stats_password
user_dc_results = user_dc['results'].split('\n')
# stats_password = user_dc_results[0]
# Load the string back into json and get the newly created dc
user_dc = json.loads('\n'.join(user_dc_results[1:]))['items'][0]
# Router needs some exceptions.
# We do not want to check the autogenerated password for stats admin
if not self.rconfig.router_options['stats_password']['value']:
for idx, env_var in enumerate(user_dc['spec']['template']['spec']['containers'][0]['env']):
if env_var['name'] == 'STATS_PASSWORD':
env_var['value'] = \
dc_inmem['results'][0]['spec']['template']['spec']['containers'][0]['env'][idx]['value']
# dry-run doesn't add the protocol to the ports section. We will manually do that.
for idx, port in enumerate(user_dc['spec']['template']['spec']['containers'][0]['ports']):
if not port.has_key('protocol'):
port['protocol'] = 'TCP'
# These are different when generating
skip = ['dnsPolicy',
'terminationGracePeriodSeconds',
'restartPolicy', 'timeoutSeconds',
'livenessProbe', 'readinessProbe',
'terminationMessagePath',
'rollingParams',
]
return not Utils.check_def_equal(user_dc, dc_inmem['results'][0], skip_keys=skip, debug=verbose)
def main():
'''
ansible oc module for secrets
'''
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', type='str',
choices=['present', 'absent']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default='router', type='str'),
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
credentials=dict(default='/etc/origin/master/openshift-router.kubeconfig', type='str'),
cert_file=dict(default=None, type='str'),
key_file=dict(default=None, type='str'),
image=dict(default=None, type='str'), #'openshift3/ose-${component}:${version}'
latest_image=dict(default=False, type='bool'),
labels=dict(default=None, type='list'),
ports=dict(default=['80:80', '443:443'], type='list'),
replicas=dict(default=1, type='int'),
selector=dict(default=None, type='str'),
service_account=dict(default='router', type='str'),
router_type=dict(default='haproxy-router', type='str'),
host_network=dict(default=True, type='bool'),
# external host options
external_host=dict(default=None, type='str'),
external_host_vserver=dict(default=None, type='str'),
external_host_insecure=dict(default=False, type='bool'),
external_host_partition_path=dict(default=None, type='str'),
external_host_username=dict(default=None, type='str'),
external_host_password=dict(default=None, type='str'),
external_host_private_key=dict(default=None, type='str'),
# Metrics
expose_metrics=dict(default=False, type='bool'),
metrics_image=dict(default=None, type='str'),
# Stats
stats_user=dict(default=None, type='str'),
stats_password=dict(default=None, type='str'),
stats_port=dict(default=1936, type='int'),
),
mutually_exclusive=[["router_type", "images"]],
supports_check_mode=True,
)
rconfig = RouterConfig(module.params['name'],
module.params['kubeconfig'],
{'credentials': {'value': module.params['credentials'], 'include': True},
'default_cert': {'value': None, 'include': True},
'cert_file': {'value': module.params['cert_file'], 'include': False},
'key_file': {'value': module.params['key_file'], 'include': False},
'image': {'value': module.params['image'], 'include': True},
'latest_image': {'value': module.params['latest_image'], 'include': True},
'labels': {'value': module.params['labels'], 'include': True},
'ports': {'value': ','.join(module.params['ports']), 'include': True},
'replicas': {'value': module.params['replicas'], 'include': True},
'selector': {'value': module.params['selector'], 'include': True},
'service_account': {'value': module.params['service_account'], 'include': True},
'router_type': {'value': module.params['router_type'], 'include': False},
'host_network': {'value': module.params['host_network'], 'include': True},
'external_host': {'value': module.params['external_host'], 'include': True},
'external_host_vserver': {'value': module.params['external_host_vserver'],
'include': True},
'external_host_insecure': {'value': module.params['external_host_insecure'],
'include': True},
'external_host_partition_path': {'value': module.params['external_host_partition_path'],
'include': True},
'external_host_username': {'value': module.params['external_host_username'],
'include': True},
'external_host_password': {'value': module.params['external_host_password'],
'include': True},
'external_host_private_key': {'value': module.params['external_host_private_key'],
'include': True},
'expose_metrics': {'value': module.params['expose_metrics'], 'include': True},
'metrics_image': {'value': module.params['metrics_image'], 'include': True},
'stats_user': {'value': module.params['stats_user'], 'include': True},
'stats_password': {'value': module.params['stats_password'], 'include': True},
'stats_port': {'value': module.params['stats_port'], 'include': True},
})
ocrouter = Router(rconfig)
state = module.params['state']
########
# Delete
########
if state == 'absent':
if not ocrouter.exists():
module.exit_json(changed=False, state="absent")
if module.check_mode:
module.exit_json(change=False, msg='Would have performed a delete.')
api_rval = ocrouter.delete()
module.exit_json(changed=True, results=api_rval, state="absent")
if state == 'present':
########
# Create
########
if not ocrouter.exists():
if module.check_mode:
module.exit_json(change=False, msg='Would have performed a create.')
api_rval = ocrouter.create()
module.exit_json(changed=True, results=api_rval, state="present")
########
# Update
########
if not ocrouter.needs_update():
module.exit_json(changed=False, state="present")
if module.check_mode:
module.exit_json(change=False, msg='Would have performed an update.')
api_rval = ocrouter.update()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
|
twiest/openshift-tools
|
openshift/installer/vendored/openshift-ansible-git-2016-04-18/roles/lib_openshift_api/library/oadm_router.py
|
Python
|
apache-2.0
| 28,872
|
from ..broker import Broker
class FailOverConfigurationBroker(Broker):
controller = "fail_over_configurations"
def get_config(self, **kwargs):
"""Get the failover configuration for the specified unit.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param unit_id: Unit ID. While not set in OC environment, the API request returns the failover configuration of all units.
:type unit_id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return failover_progress: The id of the failover action output file.
:rtype failover_progress: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return fail_over_configuration: Text (json,xml or csv) interpretation of current failover configuration.
:rtype fail_over_configuration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return sync_ok: Success indicator of sync to neighbour operation.
:rtype sync_ok: Boolean
"""
return self.api_request(self._get_method_fullname("get_config"), kwargs)
def action_status(self, **kwargs):
"""Shows failover action progress for specified unit.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param unit_id: Unit ID. Should be specified in OC/Collector environment. Default value is 0.
:type unit_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The id of the session output file.
:type id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param read: File offset to show
:type read: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return read: Offset in bytes from the start of the file, to be used in the next get_progress call, in order to retrieve the next lines of the output.
:rtype read: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return output: Result of the failover action.
:rtype output: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return status: Status of the remaining output data to dump: 0 - no data to dump, 1 - more data is available
:rtype status: Integer
"""
return self.api_request(self._get_method_fullname("action_status"), kwargs)
def action(self, **kwargs):
"""Performs the failover action (enable or disable) for the specified unit.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param unit_id: Unit ID. Should be specified in OC/Collector environment. Default value is 0.
:type unit_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param name: Failover action name, possible values: 'enable', 'disable'
:type name: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return failover_progress: The internal id of the failover action progress.
:rtype failover_progress: String
"""
return self.api_request(self._get_method_fullname("action"), kwargs)
def failover(self, **kwargs):
"""Switches the specified unit to the secondary role.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param unit_id: Unit ID. Should be specified in OC/Collector environment. Default value is 0.
:type unit_id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return status: Text (json,xml or csv) interpretation of the operation result. Contains just unit_id and current status.
:rtype status: String
"""
return self.api_request(self._get_method_fullname("failover"), kwargs)
def set_config(self, **kwargs):
"""Sets the failover configuration for the specified unit.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param unit_id: Unit ID. Should be specified in OC/Collector environment. Default value is 0.
:type unit_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param primary_index: Primary index. It indicates who is primary now (1-first, 2-second).
:type primary_index: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param virtual_ip: Virtual IP address.
:type virtual_ip: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param virtual_hostname: Virtual hostname.
:type virtual_hostname: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param replication_direct_connect: Indicates if replication uses a direct connection through HA port. Default value is true.
:type replication_direct_connect: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param replication_port: Replication port. Required for non direct connection replication.
:type replication_port: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param first_replication_ip: First replication IP. Required for non direct connection replication.
:type first_replication_ip: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param first_management_ip: First management IP. Required for secondary peer.
:type first_management_ip: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param first_management_hostname: First management hostname. Required for secondary peer.
:type first_management_hostname: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param first_replication_subnet: First replication subnet. Required for non direct connection replication.
:type first_replication_subnet: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param second_replication_ip: Second replication IP. Required for non direct connection replication.
:type second_replication_ip: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param second_management_ip: Second management IP. Required for secondary peer.
:type second_management_ip: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param second_management_hostname: Second management hostname. Required for secondary peer.
:type second_management_hostname: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param second_replication_subnet: Second replication subnet. Required for non direct connection replication.
:type second_replication_subnet: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return fail_over_configuration: Text (json,xml or csv) interpretation of current failover configuration for the specified unit.
:rtype fail_over_configuration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return sync_ok: Success indicator of sync to neighbour operation.
:rtype sync_ok: Boolean
"""
return self.api_request(self._get_method_fullname("set_config"), kwargs)
def status(self, **kwargs):
"""Get detailed failover replication and connection status for the specified unit.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param unit_id: Unit ID. Should be specified in OC/Collector environment. Default value is 0.
:type unit_id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return status: JSON structure with the status information.
:rtype status: String
"""
return self.api_request(self._get_method_fullname("status"), kwargs)
def reset_config_for_collector(self, **kwargs):
"""Drop failover collector on the collector to re-fetch it next time failover preferences are opened
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param unit_id: Unit ID. Should be specified in OC/Collector environment
:type unit_id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return status: Status of the operation
:rtype status: String
"""
return self.api_request(self._get_method_fullname("reset_config_for_collector"), kwargs)
|
infobloxopen/infoblox-netmri
|
infoblox_netmri/api/broker/v3_8_0/fail_over_configuration_broker.py
|
Python
|
apache-2.0
| 12,204
|
# testyacc.py
import unittest
try:
import StringIO
except ImportError:
import io as StringIO
import sys
import os
sys.path.insert(0,"..")
sys.tracebacklimit = 0
import ply.yacc
def check_expected(result,expected):
resultlines = []
for line in result.splitlines():
if line.startswith("WARNING: "):
line = line[9:]
elif line.startswith("ERROR: "):
line = line[7:]
resultlines.append(line)
expectedlines = expected.splitlines()
if len(resultlines) != len(expectedlines):
return False
for rline,eline in zip(resultlines,expectedlines):
if not rline.endswith(eline):
return False
return True
def run_import(module):
code = "import "+module
exec(code)
del sys.modules[module]
# Tests related to errors and warnings when building parsers
class YaccErrorWarningTests(unittest.TestCase):
def setUp(self):
sys.stderr = StringIO.StringIO()
sys.stdout = StringIO.StringIO()
try:
os.remove("parsetab.py")
os.remove("parsetab.pyc")
except OSError:
pass
def tearDown(self):
sys.stderr = sys.__stderr__
sys.stdout = sys.__stdout__
def test_yacc_badargs(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_badargs")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_badargs.py:23: Rule 'p_statement_assign' has too many arguments\n"
"yacc_badargs.py:27: Rule 'p_statement_expr' requires an argument\n"
))
def test_yacc_badid(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_badid")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_badid.py:32: Illegal name 'bad&rule' in rule 'statement'\n"
"yacc_badid.py:36: Illegal rule name 'bad&rule'\n"
))
def test_yacc_badprec(self):
try:
run_import("yacc_badprec")
except ply.yacc.YaccError:
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"precedence must be a list or tuple\n"
))
def test_yacc_badprec2(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_badprec2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Bad precedence table\n"
))
def test_yacc_badprec3(self):
run_import("yacc_badprec3")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Precedence already specified for terminal 'MINUS'\n"
"Generating LALR tables\n"
))
def test_yacc_badrule(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_badrule")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_badrule.py:24: Syntax error. Expected ':'\n"
"yacc_badrule.py:28: Syntax error in rule 'statement'\n"
"yacc_badrule.py:33: Syntax error. Expected ':'\n"
"yacc_badrule.py:42: Syntax error. Expected ':'\n"
))
def test_yacc_badtok(self):
try:
run_import("yacc_badtok")
except ply.yacc.YaccError:
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"tokens must be a list or tuple\n"))
def test_yacc_dup(self):
run_import("yacc_dup")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_dup.py:27: Function p_statement redefined. Previously defined on line 23\n"
"Token 'EQUALS' defined, but not used\n"
"There is 1 unused token\n"
"Generating LALR tables\n"
))
def test_yacc_error1(self):
try:
run_import("yacc_error1")
except ply.yacc.YaccError:
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_error1.py:61: p_error() requires 1 argument\n"))
def test_yacc_error2(self):
try:
run_import("yacc_error2")
except ply.yacc.YaccError:
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_error2.py:61: p_error() requires 1 argument\n"))
def test_yacc_error3(self):
try:
run_import("yacc_error3")
except ply.yacc.YaccError:
e = sys.exc_info()[1]
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"'p_error' defined, but is not a function or method\n"))
def test_yacc_error4(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_error4")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_error4.py:62: Illegal rule name 'error'. Already defined as a token\n"
))
def test_yacc_inf(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_inf")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Token 'NUMBER' defined, but not used\n"
"There is 1 unused token\n"
"Infinite recursion detected for symbol 'statement'\n"
"Infinite recursion detected for symbol 'expression'\n"
))
def test_yacc_literal(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_literal")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_literal.py:36: Literal token '**' in rule 'expression' may only be a single character\n"
))
def test_yacc_misplaced(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_misplaced")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_misplaced.py:32: Misplaced '|'\n"
))
def test_yacc_missing1(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_missing1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_missing1.py:24: Symbol 'location' used, but not defined as a token or a rule\n"
))
def test_yacc_nested(self):
run_import("yacc_nested")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"A\n"
"A\n"
"A\n",
))
def test_yacc_nodoc(self):
run_import("yacc_nodoc")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_nodoc.py:27: No documentation string specified in function 'p_statement_expr' (ignored)\n"
"Generating LALR tables\n"
))
def test_yacc_noerror(self):
run_import("yacc_noerror")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"no p_error() function is defined\n"
"Generating LALR tables\n"
))
def test_yacc_nop(self):
run_import("yacc_nop")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_nop.py:27: Possible grammar rule 'statement_expr' defined without p_ prefix\n"
"Generating LALR tables\n"
))
def test_yacc_notfunc(self):
run_import("yacc_notfunc")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"'p_statement_assign' not defined as a function\n"
"Token 'EQUALS' defined, but not used\n"
"There is 1 unused token\n"
"Generating LALR tables\n"
))
def test_yacc_notok(self):
try:
run_import("yacc_notok")
except ply.yacc.YaccError:
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"No token list is defined\n"))
def test_yacc_rr(self):
run_import("yacc_rr")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Generating LALR tables\n"
"1 reduce/reduce conflict\n"
"reduce/reduce conflict in state 15 resolved using rule (statement -> NAME EQUALS NUMBER)\n"
"rejected rule (expression -> NUMBER)\n"
))
def test_yacc_simple(self):
run_import("yacc_simple")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Generating LALR tables\n"
))
def test_yacc_sr(self):
run_import("yacc_sr")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Generating LALR tables\n"
"20 shift/reduce conflicts\n"
))
def test_yacc_term1(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_term1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_term1.py:24: Illegal rule name 'NUMBER'. Already defined as a token\n"
))
def test_yacc_unused(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_unused")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_unused.py:62: Symbol 'COMMA' used, but not defined as a token or a rule\n"
"Symbol 'COMMA' is unreachable\n"
"Symbol 'exprlist' is unreachable\n"
))
def test_yacc_unused_rule(self):
run_import("yacc_unused_rule")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_unused_rule.py:62: Rule 'integer' defined, but not used\n"
"There is 1 unused rule\n"
"Symbol 'integer' is unreachable\n"
"Generating LALR tables\n"
))
def test_yacc_uprec(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_uprec")
result = sys.stderr.getvalue()
print repr(result)
self.assert_(check_expected(result,
"yacc_uprec.py:37: Nothing known about the precedence of 'UMINUS'\n"
))
def test_yacc_uprec2(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_uprec2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_uprec2.py:37: Syntax error. Nothing follows %prec\n"
))
def test_yacc_prec1(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_prec1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Precedence rule 'left' defined for unknown symbol '+'\n"
"Precedence rule 'left' defined for unknown symbol '*'\n"
"Precedence rule 'left' defined for unknown symbol '-'\n"
"Precedence rule 'left' defined for unknown symbol '/'\n"
))
unittest.main()
|
anuragiitg/nixysa
|
third_party/ply-3.1/test/testyacc.py
|
Python
|
apache-2.0
| 13,190
|
# Used swedish insurance data from smalldata instead of MASS/insurance due to the license of the MASS R package.
import h2o
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
h2o.init()
h2o_df = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/glm_test/Motor_insurance_sweden.txt", sep = '\t')
poisson_fit = H2OGeneralizedLinearEstimator(family = "poisson")
poisson_fit.train(y="Claims", x = ["Payment", "Insured", "Kilometres", "Zone", "Bonus", "Make"], training_frame = h2o_df)
|
YzPaul3/h2o-3
|
h2o-docs/src/booklets/v2_2015/source/GLM_Vignette_code_examples/glm_poisson_example.py
|
Python
|
apache-2.0
| 514
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from dateutil import relativedelta
from odoo import fields, models, api
class LeadTest(models.Model):
_name = "base.automation.lead.test"
_description = "Automated Rule Test"
name = fields.Char(string='Subject', required=True, index=True)
user_id = fields.Many2one('res.users', string='Responsible')
state = fields.Selection([('draft', 'New'), ('cancel', 'Cancelled'), ('open', 'In Progress'),
('pending', 'Pending'), ('done', 'Closed')],
string="Status", readonly=True, default='draft')
active = fields.Boolean(default=True)
partner_id = fields.Many2one('res.partner', string='Partner')
date_action_last = fields.Datetime(string='Last Action', readonly=True)
customer = fields.Boolean(related='partner_id.customer', readonly=True, store=True)
line_ids = fields.One2many('base.automation.line.test', 'lead_id')
priority = fields.Boolean()
deadline = fields.Boolean(compute='_compute_deadline', store=True)
is_assigned_to_admin = fields.Boolean(string='Assigned to admin user')
@api.depends('priority')
def _compute_deadline(self):
for record in self:
if not record.priority:
record.deadline = False
else:
record.deadline = fields.Datetime.from_string(record.create_date) + relativedelta.relativedelta(days=3)
class LineTest(models.Model):
_name = "base.automation.line.test"
_description = "Automated Rule Line Test"
name = fields.Char()
lead_id = fields.Many2one('base.automation.lead.test', ondelete='cascade')
user_id = fields.Many2one('res.users')
|
t3dev/odoo
|
addons/base_automation/tests/test_models.py
|
Python
|
gpl-3.0
| 1,762
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import unittest
from openerp.tools.translate import quote, unquote, xml_translate
class TranslationToolsTestCase(unittest.TestCase):
def test_quote_unquote(self):
def test_string(str):
quoted = quote(str)
#print "\n1:", repr(str)
#print "2:", repr(quoted)
unquoted = unquote("".join(quoted.split('"\n"')))
#print "3:", repr(unquoted)
self.assertEquals(str, unquoted)
test_string("""test \nall kinds\n \n o\r
\\\\ nope\n\n"
""")
# The ones with 1+ backslashes directly followed by
# a newline or literal N can fail... we would need a
# state-machine parser to handle these, but this would
# be much slower so it's better to avoid them at the moment
self.assertRaises(AssertionError, quote, """test \nall kinds\n\no\r
\\\\nope\n\n"
""")
def test_translate_xml_base(self):
""" Test xml_translate() without formatting elements. """
terms = []
source = """<form string="Form stuff">
<h1>Blah blah blah</h1>
Put some more text here
<field name="foo"/>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah blah blah', 'Put some more text here'])
def test_translate_xml_inline1(self):
""" Test xml_translate() with formatting elements. """
terms = []
source = """<form string="Form stuff">
<h1>Blah <i>blah</i> blah</h1>
Put some <b>more text</b> here
<field name="foo"/>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah <i>blah</i> blah', 'Put some <b>more text</b> here'])
def test_translate_xml_inline2(self):
""" Test xml_translate() with formatting elements embedding other elements. """
terms = []
source = """<form string="Form stuff">
<b><h1>Blah <i>blah</i> blah</h1></b>
Put <em>some <b>more text</b></em> here
<field name="foo"/>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah <i>blah</i> blah', 'Put <em>some <b>more text</b></em> here'])
def test_translate_xml_inline3(self):
""" Test xml_translate() with formatting elements without actual text. """
terms = []
source = """<form string="Form stuff">
<div>
<span class="before"/>
<h1>Blah blah blah</h1>
<span class="after">
<i class="hack"/>
</span>
</div>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah blah blah'])
def test_translate_xml_t(self):
""" Test xml_translate() with t-* attributes. """
terms = []
source = """<t t-name="stuff">
stuff before
<span t-field="o.name"/>
stuff after
</t>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['stuff before', 'stuff after'])
def test_translate_xml_off(self):
""" Test xml_translate() with attribute translate="off". """
terms = []
source = """<div>
stuff before
<div translation="off">Do not translate this</div>
stuff after
</div>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['stuff before', 'stuff after'])
def test_translate_xml_attribute(self):
""" Test xml_translate() with <attribute> elements. """
terms = []
source = """<field name="foo" position="attributes">
<attribute name="string">Translate this</attribute>
<attribute name="option">Do not translate this</attribute>
</field>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Translate this'])
def test_translate_xml_a(self):
""" Test xml_translate() with <a> elements. """
terms = []
source = """<t t-name="stuff">
<ul class="nav navbar-nav">
<li>
<a class="oe_menu_leaf" href="/web#menu_id=42&action=54">
<span class="oe_menu_text">Blah</span>
</a>
</li>
<li class="dropdown" id="menu_more_container" style="display: none;">
<a class="dropdown-toggle" data-toggle="dropdown" href="#">More <b class="caret"/></a>
<ul class="dropdown-menu" id="menu_more"/>
</li>
</ul>
</t>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['<span class="oe_menu_text">Blah</span>', 'More <b class="caret"/>'])
|
minhphung171093/GreenERP
|
openerp/addons/base/tests/test_translate.py
|
Python
|
gpl-3.0
| 6,065
|
"""
Map urls to the relevant view handlers
"""
from django.conf.urls import url
from openedx.core.djangoapps.zendesk_proxy.v0.views import ZendeskPassthroughView as v0_view
from openedx.core.djangoapps.zendesk_proxy.v1.views import ZendeskPassthroughView as v1_view
urlpatterns = [
url(r'^v0$', v0_view.as_view(), name='zendesk_proxy_v0'),
url(r'^v1$', v1_view.as_view(), name='zendesk_proxy_v1'),
]
|
eduNEXT/edunext-platform
|
openedx/core/djangoapps/zendesk_proxy/urls.py
|
Python
|
agpl-3.0
| 412
|
# Copyright (c) 2012 Citrix Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Aggregate admin API extension."""
import datetime
from webob import exc
from nova.api.openstack import extensions
from nova.compute import api as compute_api
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import utils
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'aggregates')
def _get_context(req):
return req.environ['nova.context']
def get_host_from_body(fn):
"""Makes sure that the host exists."""
def wrapped(self, req, id, body, *args, **kwargs):
if len(body) == 1 and "host" in body:
host = body['host']
else:
raise exc.HTTPBadRequest()
return fn(self, req, id, host, *args, **kwargs)
return wrapped
class AggregateController(object):
"""The Host Aggregates API controller for the OpenStack API."""
def __init__(self):
self.api = compute_api.AggregateAPI()
def index(self, req):
"""Returns a list a host aggregate's id, name, availability_zone."""
context = _get_context(req)
authorize(context)
aggregates = self.api.get_aggregate_list(context)
return {'aggregates': [self._marshall_aggregate(a)['aggregate']
for a in aggregates]}
def create(self, req, body):
"""Creates an aggregate, given its name and availability_zone."""
context = _get_context(req)
authorize(context)
if len(body) != 1:
raise exc.HTTPBadRequest()
try:
host_aggregate = body["aggregate"]
name = host_aggregate["name"]
avail_zone = host_aggregate["availability_zone"]
except KeyError:
raise exc.HTTPBadRequest()
try:
utils.check_string_length(name, "Aggregate name", 1, 255)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
if len(host_aggregate) != 2:
raise exc.HTTPBadRequest()
try:
aggregate = self.api.create_aggregate(context, name, avail_zone)
except exception.AggregateNameExists as e:
LOG.info(e)
raise exc.HTTPConflict()
except exception.InvalidAggregateAction as e:
LOG.info(e)
raise
return self._marshall_aggregate(aggregate)
def show(self, req, id):
"""Shows the details of an aggregate, hosts and metadata included."""
context = _get_context(req)
authorize(context)
try:
aggregate = self.api.get_aggregate(context, id)
except exception.AggregateNotFound:
LOG.info(_("Cannot show aggregate: %s"), id)
raise exc.HTTPNotFound()
return self._marshall_aggregate(aggregate)
def update(self, req, id, body):
"""Updates the name and/or availability_zone of given aggregate."""
context = _get_context(req)
authorize(context)
if len(body) != 1:
raise exc.HTTPBadRequest()
try:
updates = body["aggregate"]
except KeyError:
raise exc.HTTPBadRequest()
if len(updates) < 1:
raise exc.HTTPBadRequest()
for key in updates.keys():
if key not in ["name", "availability_zone"]:
raise exc.HTTPBadRequest()
if 'name' in updates:
try:
utils.check_string_length(updates['name'], "Aggregate name", 1,
255)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
try:
aggregate = self.api.update_aggregate(context, id, updates)
except exception.AggregateNotFound:
LOG.info(_('Cannot update aggregate: %s'), id)
raise exc.HTTPNotFound()
return self._marshall_aggregate(aggregate)
def delete(self, req, id):
"""Removes an aggregate by id."""
context = _get_context(req)
authorize(context)
try:
self.api.delete_aggregate(context, id)
except exception.AggregateNotFound:
LOG.info(_('Cannot delete aggregate: %s'), id)
raise exc.HTTPNotFound()
def action(self, req, id, body):
_actions = {
'add_host': self._add_host,
'remove_host': self._remove_host,
'set_metadata': self._set_metadata,
}
for action, data in body.iteritems():
if action not in _actions.keys():
msg = _('Aggregates does not have %s action') % action
raise exc.HTTPBadRequest(explanation=msg)
return _actions[action](req, id, data)
raise exc.HTTPBadRequest(explanation=_("Invalid request body"))
@get_host_from_body
def _add_host(self, req, id, host):
"""Adds a host to the specified aggregate."""
context = _get_context(req)
authorize(context)
try:
aggregate = self.api.add_host_to_aggregate(context, id, host)
except (exception.AggregateNotFound, exception.ComputeHostNotFound):
LOG.info(_('Cannot add host %(host)s in aggregate %(id)s'),
{'host': host, 'id': id})
raise exc.HTTPNotFound()
except (exception.AggregateHostExists,
exception.InvalidAggregateAction) as e:
LOG.info(_('Cannot add host %(host)s in aggregate %(id)s'),
{'host': host, 'id': id})
raise exc.HTTPConflict(explanation=e.format_message())
return self._marshall_aggregate(aggregate)
@get_host_from_body
def _remove_host(self, req, id, host):
"""Removes a host from the specified aggregate."""
context = _get_context(req)
authorize(context)
try:
aggregate = self.api.remove_host_from_aggregate(context, id, host)
except (exception.AggregateNotFound, exception.AggregateHostNotFound,
exception.ComputeHostNotFound):
LOG.info(_('Cannot remove host %(host)s in aggregate %(id)s'),
{'host': host, 'id': id})
raise exc.HTTPNotFound()
except exception.InvalidAggregateAction:
LOG.info(_('Cannot remove host %(host)s in aggregate %(id)s'),
{'host': host, 'id': id})
raise exc.HTTPConflict()
return self._marshall_aggregate(aggregate)
def _set_metadata(self, req, id, body):
"""Replaces the aggregate's existing metadata with new metadata."""
context = _get_context(req)
authorize(context)
if len(body) != 1:
raise exc.HTTPBadRequest()
try:
metadata = body["metadata"]
except KeyError:
raise exc.HTTPBadRequest()
try:
aggregate = self.api.update_aggregate_metadata(context,
id, metadata)
except exception.AggregateNotFound:
LOG.info(_('Cannot set metadata %(metadata)s in aggregate %(id)s'),
{'metadata': metadata, 'id': id})
raise exc.HTTPNotFound()
return self._marshall_aggregate(aggregate)
def _marshall_aggregate(self, aggregate):
_aggregate = {}
for key, value in aggregate.items():
# NOTE(danms): The original API specified non-TZ-aware timestamps
if isinstance(value, datetime.datetime):
value = value.replace(tzinfo=None)
_aggregate[key] = value
return {"aggregate": _aggregate}
class Aggregates(extensions.ExtensionDescriptor):
"""Admin-only aggregate administration."""
name = "Aggregates"
alias = "os-aggregates"
namespace = "http://docs.openstack.org/compute/ext/aggregates/api/v1.1"
updated = "2012-01-12T00:00:00+00:00"
def __init__(self, ext_mgr):
ext_mgr.register(self)
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-aggregates',
AggregateController(),
member_actions={"action": "POST", })
resources.append(res)
return resources
|
ntt-sic/nova
|
nova/api/openstack/compute/contrib/aggregates.py
|
Python
|
apache-2.0
| 8,962
|
# Copyright (C) 2012 Balazs Ankes (bank@inf.u-szeged.hu) University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for png.py."""
import unittest
from png import PNGChecker
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.systemhost_mock import MockSystemHost
class PNGCheckerTest(unittest.TestCase):
"""Tests PNGChecker class."""
def test_init(self):
"""Test __init__() method."""
def mock_handle_style_error(self):
pass
checker = PNGChecker("test/config", mock_handle_style_error, MockSystemHost())
self.assertEqual(checker._file_path, "test/config")
self.assertEqual(checker._handle_style_error, mock_handle_style_error)
def test_check(self):
errors = []
def mock_handle_style_error(line_number, category, confidence, message):
error = (line_number, category, confidence, message)
errors.append(error)
fs = MockFileSystem()
file_path = "foo.png"
fs.write_binary_file(file_path, "Dummy binary data")
errors = []
checker = PNGChecker(file_path, mock_handle_style_error, MockSystemHost(os_name='linux', filesystem=fs))
checker.check()
self.assertEqual(len(errors), 0)
file_path = "foo-expected.png"
fs.write_binary_file(file_path, "Dummy binary data")
errors = []
checker = PNGChecker(file_path, mock_handle_style_error, MockSystemHost(os_name='linux', filesystem=fs))
checker.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0], (0, 'image/png', 5, 'Image lacks a checksum. Generate pngs using run-webkit-tests to ensure they have a checksum.'))
|
hujiajie/chromium-crosswalk
|
third_party/WebKit/Tools/Scripts/webkitpy/style/checkers/png_unittest.py
|
Python
|
bsd-3-clause
| 3,001
|
# Test the Unicode versions of normal file functions
# open, os.open, os.stat. os.listdir, os.rename, os.remove, os.mkdir, os.chdir, os.rmdir
import sys, os, unittest
from unicodedata import normalize
from test import test_support
filenames = [
'1_abc',
u'2_ascii',
u'3_Gr\xfc\xdf-Gott',
u'4_\u0393\u03b5\u03b9\u03ac-\u03c3\u03b1\u03c2',
u'5_\u0417\u0434\u0440\u0430\u0432\u0441\u0442\u0432\u0443\u0439\u0442\u0435',
u'6_\u306b\u307d\u3093',
u'7_\u05d4\u05e9\u05e7\u05e6\u05e5\u05e1',
u'8_\u66e8\u66e9\u66eb',
u'9_\u66e8\u05e9\u3093\u0434\u0393\xdf',
# Specific code points: fn, NFC(fn) and NFKC(fn) all differents
u'10_\u1fee\u1ffd',
]
# Mac OS X decomposes Unicode names, using Normal Form D.
# http://developer.apple.com/mac/library/qa/qa2001/qa1173.html
# "However, most volume formats do not follow the exact specification for
# these normal forms. For example, HFS Plus uses a variant of Normal Form D
# in which U+2000 through U+2FFF, U+F900 through U+FAFF, and U+2F800 through
# U+2FAFF are not decomposed."
if sys.platform != 'darwin':
filenames.extend([
# Specific code points: NFC(fn), NFD(fn), NFKC(fn) and NFKD(fn) all differents
u'11_\u0385\u03d3\u03d4',
u'12_\u00a8\u0301\u03d2\u0301\u03d2\u0308', # == NFD(u'\u0385\u03d3\u03d4')
u'13_\u0020\u0308\u0301\u038e\u03ab', # == NFKC(u'\u0385\u03d3\u03d4')
u'14_\u1e9b\u1fc1\u1fcd\u1fce\u1fcf\u1fdd\u1fde\u1fdf\u1fed',
# Specific code points: fn, NFC(fn) and NFKC(fn) all differents
u'15_\u1fee\u1ffd\ufad1',
u'16_\u2000\u2000\u2000A',
u'17_\u2001\u2001\u2001A',
u'18_\u2003\u2003\u2003A', # == NFC(u'\u2001\u2001\u2001A')
u'19_\u0020\u0020\u0020A', # u'\u0020' == u' ' == NFKC(u'\u2000') ==
# NFKC(u'\u2001') == NFKC(u'\u2003')
])
# Is it Unicode-friendly?
if not os.path.supports_unicode_filenames:
fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
try:
for name in filenames:
name.encode(fsencoding)
except UnicodeEncodeError:
raise unittest.SkipTest("only NT+ and systems with "
"Unicode-friendly filesystem encoding")
# Destroy directory dirname and all files under it, to one level.
def deltree(dirname):
# Don't hide legitimate errors: if one of these suckers exists, it's
# an error if we can't remove it.
if os.path.exists(dirname):
# must pass unicode to os.listdir() so we get back unicode results.
for fname in os.listdir(unicode(dirname)):
os.unlink(os.path.join(dirname, fname))
os.rmdir(dirname)
class UnicodeFileTests(unittest.TestCase):
files = set(filenames)
normal_form = None
def setUp(self):
try:
os.mkdir(test_support.TESTFN)
except OSError:
pass
files = set()
for name in self.files:
name = os.path.join(test_support.TESTFN, self.norm(name))
with open(name, 'w') as f:
f.write((name+'\n').encode("utf-8"))
os.stat(name)
files.add(name)
self.files = files
def tearDown(self):
deltree(test_support.TESTFN)
def norm(self, s):
if self.normal_form and isinstance(s, unicode):
return normalize(self.normal_form, s)
return s
def _apply_failure(self, fn, filename, expected_exception,
check_fn_in_exception = True):
with self.assertRaises(expected_exception) as c:
fn(filename)
exc_filename = c.exception.filename
# the "filename" exception attribute may be encoded
if isinstance(exc_filename, str):
filename = filename.encode(sys.getfilesystemencoding())
if check_fn_in_exception:
self.assertEqual(exc_filename, filename, "Function '%s(%r) failed "
"with bad filename in the exception: %r" %
(fn.__name__, filename, exc_filename))
def test_failures(self):
# Pass non-existing Unicode filenames all over the place.
for name in self.files:
name = "not_" + name
self._apply_failure(open, name, IOError)
self._apply_failure(os.stat, name, OSError)
self._apply_failure(os.chdir, name, OSError)
self._apply_failure(os.rmdir, name, OSError)
self._apply_failure(os.remove, name, OSError)
# listdir may append a wildcard to the filename, so dont check
self._apply_failure(os.listdir, name, OSError, False)
def test_open(self):
for name in self.files:
f = open(name, 'w')
f.write((name+'\n').encode("utf-8"))
f.close()
os.stat(name)
# Skip the test on darwin, because darwin does normalize the filename to
# NFD (a variant of Unicode NFD form). Normalize the filename to NFC, NFKC,
# NFKD in Python is useless, because darwin will normalize it later and so
# open(), os.stat(), etc. don't raise any exception.
@unittest.skipIf(sys.platform == 'darwin', 'irrelevant test on Mac OS X')
def test_normalize(self):
files = set(f for f in self.files if isinstance(f, unicode))
others = set()
for nf in set(['NFC', 'NFD', 'NFKC', 'NFKD']):
others |= set(normalize(nf, file) for file in files)
others -= files
for name in others:
self._apply_failure(open, name, IOError)
self._apply_failure(os.stat, name, OSError)
self._apply_failure(os.chdir, name, OSError)
self._apply_failure(os.rmdir, name, OSError)
self._apply_failure(os.remove, name, OSError)
# listdir may append a wildcard to the filename, so dont check
self._apply_failure(os.listdir, name, OSError, False)
# Skip the test on darwin, because darwin uses a normalization different
# than Python NFD normalization: filenames are different even if we use
# Python NFD normalization.
@unittest.skipIf(sys.platform == 'darwin', 'irrelevant test on Mac OS X')
def test_listdir(self):
sf0 = set(self.files)
f1 = os.listdir(test_support.TESTFN)
f2 = os.listdir(unicode(test_support.TESTFN,
sys.getfilesystemencoding()))
sf2 = set(os.path.join(unicode(test_support.TESTFN), f) for f in f2)
self.assertEqual(sf0, sf2)
self.assertEqual(len(f1), len(f2))
def test_rename(self):
for name in self.files:
os.rename(name, "tmp")
os.rename("tmp", name)
def test_directory(self):
dirname = os.path.join(test_support.TESTFN,
u'Gr\xfc\xdf-\u66e8\u66e9\u66eb')
filename = u'\xdf-\u66e8\u66e9\u66eb'
oldwd = os.getcwd()
os.mkdir(dirname)
os.chdir(dirname)
try:
with open(filename, 'w') as f:
f.write((filename + '\n').encode("utf-8"))
os.access(filename,os.R_OK)
os.remove(filename)
finally:
os.chdir(oldwd)
os.rmdir(dirname)
class UnicodeNFCFileTests(UnicodeFileTests):
normal_form = 'NFC'
class UnicodeNFDFileTests(UnicodeFileTests):
normal_form = 'NFD'
class UnicodeNFKCFileTests(UnicodeFileTests):
normal_form = 'NFKC'
class UnicodeNFKDFileTests(UnicodeFileTests):
normal_form = 'NFKD'
def test_main():
try:
test_support.run_unittest(
UnicodeFileTests,
UnicodeNFCFileTests,
UnicodeNFDFileTests,
UnicodeNFKCFileTests,
UnicodeNFKDFileTests,
)
finally:
deltree(test_support.TESTFN)
if __name__ == "__main__":
test_main()
|
ktan2020/legacy-automation
|
win/Lib/test/test_pep277.py
|
Python
|
mit
| 8,085
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
inOutSelector.py
---------------------
Date : April 2011
Copyright : (C) 2011 by Giuseppe Sucameli
Email : brush dot tyler at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Giuseppe Sucameli'
__date__ = 'April 2011'
__copyright__ = '(C) 2011, Giuseppe Sucameli'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import SIGNAL, Qt, pyqtProperty
from PyQt4.QtGui import QWidget, QComboBox
from qgis.core import QgsMapLayerRegistry, QgsMapLayer
from ui_inOutSelector import Ui_GdalToolsInOutSelector
class GdalToolsInOutSelector(QWidget, Ui_GdalToolsInOutSelector):
FILE = 0x1
LAYER = 0x2
MULTIFILE = 0x4 # NOT IMPLEMENTED YET
FILE_LAYER = 0x1|0x2
FILES = 0x1|0x4 # NOT IMPLEMENTED YET
FILES_LAYER = 0x3|0x4 # NOT IMPLEMENTED YET
__pyqtSignals__ = ("selectClicked()", "filenameChanged(), layerChanged()")
def __init__(self, parent=None, type=None):
QWidget.__init__(self, parent)
self.setupUi(self)
self.setFocusPolicy(Qt.StrongFocus)
self.combo.setInsertPolicy(QComboBox.NoInsert)
self.clear()
self.typ = None
if type is None:
self.resetType()
else:
self.setType(type)
self.connect(self.selectBtn, SIGNAL("clicked()"), self.selectButtonClicked)
self.connect(self.fileEdit, SIGNAL("textChanged(const QString &)"), self.textChanged)
self.connect(self.combo, SIGNAL("editTextChanged(const QString &)"), self.textChanged)
self.connect(self.combo, SIGNAL("currentIndexChanged(int)"), self.indexChanged)
def clear(self):
self.filenames = []
self.fileEdit.clear()
self.clearComboState()
self.combo.clear()
def textChanged(self):
if self.getType() & self.MULTIFILE:
self.filenames = self.fileEdit.text().split(",")
if self.getType() & self.LAYER:
index = self.combo.currentIndex()
if index >= 0:
text = self.combo.currentText()
if text != self.combo.itemText( index ):
return self.setFilename( text )
self.filenameChanged()
def indexChanged(self):
self.layerChanged()
self.filenameChanged()
def selectButtonClicked(self):
self.emit(SIGNAL("selectClicked()"))
def filenameChanged(self):
self.emit(SIGNAL("filenameChanged()"))
def layerChanged(self):
self.emit(SIGNAL("layerChanged()"))
def setType(self, type):
if type == self.typ:
return
if type & self.MULTIFILE: # MULTITYPE IS NOT IMPLEMENTED YET
type = type & ~self.MULTIFILE
self.typ = type
self.selectBtn.setVisible( self.getType() & self.FILE )
self.combo.setVisible( self.getType() & self.LAYER )
self.fileEdit.setVisible( not (self.getType() & self.LAYER) )
self.combo.setEditable( self.getType() & self.FILE )
if self.getType() & self.FILE:
self.setFocusProxy(self.selectBtn)
else:
self.setFocusProxy(self.combo)
# send signals to refresh connected widgets
self.filenameChanged()
self.layerChanged()
def getType(self):
return self.typ
def resetType(self):
self.setType( self.FILE_LAYER )
selectorType = pyqtProperty("int", getType, setType, resetType)
def setFilename(self, fn=None):
self.blockSignals( True )
prevFn, prevLayer = self.filename(), self.layer()
if isinstance(fn, QgsMapLayer):
fn = fn.source()
elif isinstance(fn, str) or isinstance(fn, unicode):
fn = unicode( fn )
# TODO test
elif isinstance(fn, list):
if len( fn ) > 0:
if self.getType() & self.MULTIFILE:
self.filenames = fn
#fn = "".join( fn, "," )
fn = ",".join( fn )
else:
fn = ''
else:
fn = ''
if not (self.getType() & self.LAYER):
self.fileEdit.setText( fn )
else:
self.combo.setCurrentIndex(-1)
self.combo.setEditText( fn )
self.blockSignals( False )
if self.filename() != prevFn:
self.filenameChanged()
if self.layer() != prevLayer:
self.layerChanged()
def setLayer(self, layer=None):
if not (self.getType() & self.LAYER):
return self.setFilename( layer )
self.blockSignals( True )
prevFn, prevLayer = self.filename(), self.layer()
if isinstance(layer, QgsMapLayer):
if self.combo.findData(layer.id()) >= 0:
index = self.combo.findData( layer.id() )
self.combo.setCurrentIndex( index )
else:
self.combo.setCurrentIndex( -1 )
self.combo.setEditText( layer.source() )
elif isinstance(layer, int) and layer >= 0 and layer < self.combo.count():
self.combo.setCurrentIndex( layer )
else:
self.combo.clearEditText()
self.combo.setCurrentIndex(-1)
self.blockSignals( False )
if self.filename() != prevFn:
self.filenameChanged()
if self.layer() != prevLayer:
self.layerChanged()
def setLayers(self, layers=None):
if layers is None or not hasattr(layers, '__iter__') or len(layers) <= 0:
self.combo.clear()
return
self.blockSignals( True )
prevFn, prevLayer = self.filename(), self.layer()
self.saveComboState()
self.combo.clear()
for l in layers:
self.combo.addItem( l.name(), l.id() )
self.restoreComboState()
self.blockSignals( False )
if self.filename() != prevFn:
self.filenameChanged()
if self.layer() != prevLayer:
self.layerChanged()
def clearComboState(self):
self.prevState = None
def saveComboState(self):
index = self.combo.currentIndex()
text = self.combo.currentText()
layerID = self.combo.itemData(index) if index >= 0 else ""
self.prevState = ( index, text, layerID )
def restoreComboState(self):
if self.prevState is None:
return
index, text, layerID = self.prevState
if index < 0:
if text == '' and self.combo.count() > 0:
index = 0
elif self.combo.findData( layerID ) < 0:
index = -1
text = ""
else:
index = self.combo.findData( layerID )
self.combo.setCurrentIndex( index )
if index >= 0:
text = self.combo.itemText( index )
self.combo.setEditText( text )
def layer(self):
if self.getType() != self.FILE and self.combo.currentIndex() >= 0:
layerID = self.combo.itemData(self.combo.currentIndex())
return QgsMapLayerRegistry.instance().mapLayer( layerID )
return None
def filename(self):
if not (self.getType() & self.LAYER):
if self.getType() & self.MULTIFILE:
return self.filenames
return self.fileEdit.text()
if self.combo.currentIndex() < 0:
if self.getType() & self.MULTIFILE:
return self.filenames
return self.combo.currentText()
layer = self.layer()
if layer is not None:
return layer.source()
return ''
|
dracos/QGIS
|
python/plugins/GdalTools/tools/inOutSelector.py
|
Python
|
gpl-2.0
| 7,841
|
#
# Copyright (C) 2012 Stanislav Bohm
#
# This file is part of Kaira.
#
# Kaira is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License, or
# (at your option) any later version.
#
# Kaira is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Kaira. If not, see <http://www.gnu.org/licenses/>.
#
import os
KAIRA_PTP = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
KAIRA_ROOT = os.path.dirname(KAIRA_PTP)
KAIRA_CONFIG_INI = os.path.join(KAIRA_ROOT, "build", "config.ini")
CAILIE_INCLUDE_DIR = "libs/cailie"
CAILIE_LIB_DIR = "build/libs/cailie"
CASERVER_INCLUDE_DIR = "libs/caserver"
CASERVER_LIB_DIR = "build/libs/caserver"
CACLIENT_INCLUDE_DIR = "libs/caclient"
CACLIENT_LIB_DIR = "build/libs/caclient"
CAVERIF_INCLUDE_DIR = "libs/caverif"
CAVERIF_LIB_DIR = "build/libs/caverif"
CASIMRUN_INCLUDE_DIR = "libs/casimrun"
CASIMRUN_LIB_DIR = "build/libs/casimrun"
CAOCTAVE_INCLUDE_DIR = "libs/caoctave"
|
Kobzol/kaira
|
ptp/base/paths.py
|
Python
|
gpl-3.0
| 1,340
|
# Copyright 2014 Mirantis.inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
def upgrade(migrate_engine):
if migrate_engine.name == 'mysql':
meta = sa.MetaData(bind=migrate_engine)
endpoint = sa.Table('endpoint', meta, autoload=True)
# NOTE(i159): MySQL requires indexes on referencing columns, and those
# indexes create automatically. That those indexes will have different
# names, depending on version of MySQL used. We shoud make this naming
# consistent, by reverting index name to a consistent condition.
if any(i for i in endpoint.indexes if
i.columns.keys() == ['service_id'] and i.name != 'service_id'):
# NOTE(i159): by this action will be made re-creation of an index
# with the new name. This can be considered as renaming under the
# MySQL rules.
sa.Index('service_id', endpoint.c.service_id).create()
user_group_membership = sa.Table('user_group_membership',
meta, autoload=True)
if any(i for i in user_group_membership.indexes if
i.columns.keys() == ['group_id'] and i.name != 'group_id'):
sa.Index('group_id', user_group_membership.c.group_id).create()
def downgrade(migrate_engine):
# NOTE(i159): index exists only in MySQL schemas, and got an inconsistent
# name only when MySQL 5.5 renamed it after re-creation
# (during migrations). So we just fixed inconsistency, there is no
# necessity to revert it.
pass
|
rushiagr/keystone
|
keystone/common/sql/migrate_repo/versions/050_fk_consistent_indexes.py
|
Python
|
apache-2.0
| 2,112
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'configdialog.ui'
#
# by: PyQt4 UI code generator 4.5.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(993, 455)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icons/kaddressbook.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Dialog.setWindowIcon(icon)
self.verticalLayout_6 = QtGui.QVBoxLayout(Dialog)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.splitter = QtGui.QSplitter(Dialog)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.pagelist = QtGui.QListWidget(self.splitter)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pagelist.sizePolicy().hasHeightForWidth())
self.pagelist.setSizePolicy(sizePolicy)
self.pagelist.setMaximumSize(QtCore.QSize(180, 16777215))
self.pagelist.setObjectName("pagelist")
self.layoutWidget = QtGui.QWidget(self.splitter)
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtGui.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setObjectName("verticalLayout")
self.container = QtGui.QScrollArea(self.layoutWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(5)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.container.sizePolicy().hasHeightForWidth())
self.container.setSizePolicy(sizePolicy)
self.container.setFrameShape(QtGui.QFrame.NoFrame)
self.container.setWidgetResizable(True)
self.container.setObjectName("container")
self.scrollAreaWidgetContents = QtGui.QWidget(self.container)
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 241, 399))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.verticalLayout_3 = QtGui.QVBoxLayout(self.scrollAreaWidgetContents)
self.verticalLayout_3.setMargin(0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.layout = QtGui.QVBoxLayout()
self.layout.setObjectName("layout")
self.verticalLayout_3.addLayout(self.layout)
self.container.setWidget(self.scrollAreaWidgetContents)
self.verticalLayout.addWidget(self.container)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.pushButton_2 = QtGui.QPushButton(self.layoutWidget)
self.pushButton_2.setObjectName("pushButton_2")
self.horizontalLayout.addWidget(self.pushButton_2)
self.verticalLayout.addLayout(self.horizontalLayout)
self.tabWidget = QtGui.QTabWidget(self.splitter)
self.tabWidget.setObjectName("tabWidget")
self.tab = QtGui.QWidget()
self.tab.setObjectName("tab")
self.verticalLayout_2 = QtGui.QVBoxLayout(self.tab)
self.verticalLayout_2.setMargin(0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.scrollArea = QtGui.QScrollArea(self.tab)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents_2 = QtGui.QWidget(self.scrollArea)
self.scrollAreaWidgetContents_2.setGeometry(QtCore.QRect(0, 0, 532, 405))
self.scrollAreaWidgetContents_2.setObjectName("scrollAreaWidgetContents_2")
self.verticalLayout_4 = QtGui.QVBoxLayout(self.scrollAreaWidgetContents_2)
self.verticalLayout_4.setSpacing(3)
self.verticalLayout_4.setContentsMargins(0, 3, 0, -1)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.zoomin = QtGui.QToolButton(self.scrollAreaWidgetContents_2)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/icons/viewmag+.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.zoomin.setIcon(icon1)
self.zoomin.setObjectName("zoomin")
self.horizontalLayout_4.addWidget(self.zoomin)
self.zoomout = QtGui.QToolButton(self.scrollAreaWidgetContents_2)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/icons/viewmag-.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.zoomout.setIcon(icon2)
self.zoomout.setObjectName("zoomout")
self.horizontalLayout_4.addWidget(self.zoomout)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem1)
self.verticalLayout_4.addLayout(self.horizontalLayout_4)
self.preview = QtGui.QLabel(self.scrollAreaWidgetContents_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.preview.sizePolicy().hasHeightForWidth())
self.preview.setSizePolicy(sizePolicy)
self.preview.setFrameShape(QtGui.QFrame.NoFrame)
self.preview.setObjectName("preview")
self.verticalLayout_4.addWidget(self.preview)
spacerItem2 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_4.addItem(spacerItem2)
self.scrollArea.setWidget(self.scrollAreaWidgetContents_2)
self.verticalLayout_2.addWidget(self.scrollArea)
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtGui.QWidget()
self.tab_2.setObjectName("tab_2")
self.verticalLayout_5 = QtGui.QVBoxLayout(self.tab_2)
self.verticalLayout_5.setMargin(0)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.snippet = QtGui.QTextBrowser(self.tab_2)
self.snippet.setObjectName("snippet")
self.verticalLayout_5.addWidget(self.snippet)
self.tabWidget.addTab(self.tab_2, "")
self.verticalLayout_6.addWidget(self.splitter)
self.retranslateUi(Dialog)
self.tabWidget.setCurrentIndex(0)
QtCore.QObject.connect(self.pushButton_2, QtCore.SIGNAL("clicked()"), Dialog.accept)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "Bookrest Settings", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_2.setText(QtGui.QApplication.translate("Dialog", "Close", None, QtGui.QApplication.UnicodeUTF8))
self.zoomin.setText(QtGui.QApplication.translate("Dialog", "...", None, QtGui.QApplication.UnicodeUTF8))
self.zoomout.setText(QtGui.QApplication.translate("Dialog", "...", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), QtGui.QApplication.translate("Dialog", "Preview", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), QtGui.QApplication.translate("Dialog", "Output", None, QtGui.QApplication.UnicodeUTF8))
import icons_rc
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Dialog = QtGui.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
shakna-israel/rst2pdf
|
gui/Ui_configdialog.py
|
Python
|
mit
| 7,900
|
"""
Module for Image annotations using annotator.
"""
from lxml import etree
from pkg_resources import resource_string
from xmodule.x_module import XModule
from xmodule.raw_module import RawDescriptor
from xblock.core import Scope, String
from xmodule.annotator_mixin import get_instructions, html_to_text
from xmodule.annotator_token import retrieve_token
from xblock.fragment import Fragment
import textwrap
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
class AnnotatableFields(object):
""" Fields for `ImageModule` and `ImageDescriptor`. """
data = String(help=_("XML data for the annotation"),
scope=Scope.content,
default=textwrap.dedent("""\
<annotatable>
<instructions>
<p>
Add the instructions to the assignment here.
</p>
</instructions>
<p>
Lorem ipsum dolor sit amet, at amet animal petentium nec. Id augue nemore postulant mea. Ex eam dicant noluisse expetenda, alia admodum abhorreant qui et. An ceteros expetenda mea, tale natum ipsum quo no, ut pro paulo alienum noluisse.
</p>
<json>
navigatorSizeRatio: 0.25,
wrapHorizontal: false,
showNavigator: true,
navigatorPosition: "BOTTOM_LEFT",
showNavigationControl: true,
tileSources: [{"profile": "http://library.stanford.edu/iiif/image-api/1.1/compliance.html#level2", "scale_factors": [1, 2, 4, 8, 16, 32, 64], "tile_height": 1024, "height": 3466, "width": 113793, "tile_width": 1024, "qualities": ["native", "bitonal", "grey", "color"], "formats": ["jpg", "png", "gif"], "@context": "http://library.stanford.edu/iiif/image-api/1.1/context.json", "@id": "http://54.187.32.48/loris/suzhou_orig.jp2"}],
</json>
</annotatable>
"""))
display_name = String(
display_name=_("Display Name"),
help=_("Display name for this module"),
scope=Scope.settings,
default=_('Image Annotation'),
)
instructor_tags = String(
display_name=_("Tags for Assignments"),
help=_("Add tags that automatically highlight in a certain color using the comma-separated form, i.e. imagery:red,parallelism:blue"),
scope=Scope.settings,
default='professor:green,teachingAssistant:blue',
)
annotation_storage_url = String(
help=_("Location of Annotation backend"),
scope=Scope.settings,
default="http://your_annotation_storage.com",
display_name=_("Url for Annotation Storage")
)
annotation_token_secret = String(
help=_("Secret string for annotation storage"),
scope=Scope.settings,
default="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
display_name=_("Secret Token String for Annotation")
)
default_tab = String(
display_name=_("Default Annotations Tab"),
help=_("Select which tab will be the default in the annotations table: myNotes, Instructor, or Public."),
scope=Scope.settings,
default="myNotes",
)
# currently only supports one instructor, will build functionality for multiple later
instructor_email = String(
display_name=_("Email for 'Instructor' Annotations"),
help=_("Email of the user that will be attached to all annotations that will be found in 'Instructor' tab."),
scope=Scope.settings,
default="",
)
annotation_mode = String(
display_name=_("Mode for Annotation Tool"),
help=_("Type in number corresponding to following modes: 'instructor' or 'everyone'"),
scope=Scope.settings,
default="everyone",
)
class ImageAnnotationModule(AnnotatableFields, XModule):
'''Image Annotation Module'''
js = {
'coffee': [
resource_string(__name__, 'js/src/javascript_loader.coffee'),
resource_string(__name__, 'js/src/html/display.coffee'),
resource_string(__name__, 'js/src/annotatable/display.coffee'),
],
'js': [
resource_string(__name__, 'js/src/collapsible.js'),
]
}
css = {'scss': [resource_string(__name__, 'css/annotatable/display.scss')]}
icon_class = 'imageannotation'
def __init__(self, *args, **kwargs):
super(ImageAnnotationModule, self).__init__(*args, **kwargs)
xmltree = etree.fromstring(self.data)
self.instructions = self._extract_instructions(xmltree)
self.openseadragonjson = html_to_text(etree.tostring(xmltree.find('json'), encoding='unicode'))
self.user_email = ""
self.is_course_staff = False
if self.runtime.get_user_role() in ['instructor', 'staff']:
self.is_course_staff = True
if self.runtime.get_real_user is not None:
try:
self.user_email = self.runtime.get_real_user(self.runtime.anonymous_student_id).email
except Exception: # pylint: disable=broad-except
self.user_email = _("No email address found.")
def _extract_instructions(self, xmltree):
""" Removes <instructions> from the xmltree and returns them as a string, otherwise None. """
return get_instructions(xmltree)
def student_view(self, context):
""" Renders parameters to template. """
context = {
'display_name': self.display_name_with_default,
'instructions_html': self.instructions,
'token': retrieve_token(self.user_email, self.annotation_token_secret),
'tag': self.instructor_tags,
'openseadragonjson': self.openseadragonjson,
'annotation_storage': self.annotation_storage_url,
'default_tab': self.default_tab,
'instructor_email': self.instructor_email,
'annotation_mode': self.annotation_mode,
'is_course_staff': self.is_course_staff,
}
fragment = Fragment(self.system.render_template('imageannotation.html', context))
# TinyMCE already exists in Studio so we should not load the files again
# get_real_user always returns "None" in Studio since its runtimes contains no anonymous ids
if self.runtime.get_real_user is not None:
fragment.add_javascript_url(self.runtime.STATIC_URL + "js/vendor/tinymce/js/tinymce/tinymce.full.min.js")
fragment.add_javascript_url(self.runtime.STATIC_URL + "js/vendor/tinymce/js/tinymce/jquery.tinymce.min.js")
return fragment
class ImageAnnotationDescriptor(AnnotatableFields, RawDescriptor): # pylint: disable=abstract-method
''' Image annotation descriptor '''
module_class = ImageAnnotationModule
mako_template = "widgets/raw-edit.html"
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(ImageAnnotationDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([
ImageAnnotationDescriptor.annotation_storage_url,
ImageAnnotationDescriptor.annotation_token_secret,
])
return non_editable_fields
|
c0710204/edx-platform
|
common/lib/xmodule/xmodule/imageannotation_module.py
|
Python
|
agpl-3.0
| 7,154
|
"""A Mailman newsletter subscription interface.
To use this plugin, enable the newsletter module and set the newsletter module and name settings
in the admin settings page.
"""
from django.utils.translation import ugettext as _
from Mailman import MailList, Errors
from models import Subscription
from livesettings import config_value
import logging
import sys
log = logging.getLogger('newsletter.mailman')
class UserDesc: pass
def is_subscribed(contact):
return Subscription.email_is_subscribed(contact.email)
def update_contact(contact, subscribe, attributes={}):
email = contact.email
current = Subscription.email_is_subscribed(email)
attributesChanged = False
sub = None
if attributes:
sub, created = Subscription.objects.get_or_create(email=email)
if created:
attributesChanged = True
else:
oldAttr = [(a.name,a.value) for a in sub.attributes.all()]
oldAttr.sort()
sub.update_attributes(attributes)
newAttr = [(a.name,a.value) for a in sub.attributes.all()]
newAttr.sort()
if not created:
attributesChanged = oldAttr != newAttr
if current == subscribe:
if subscribe:
if attributesChanged:
result = _("Updated subscription for %(email)s.")
else:
result = _("Already subscribed %(email)s.")
else:
result = _("Already removed %(email)s.")
else:
if not sub:
sub, created = Subscription.objects.get_or_create(email=email)
sub.subscribed = subscribe
sub.save()
if subscribe:
mailman_add(contact)
result = _("Subscribed: %(email)s")
else:
mailman_remove(contact)
result = _("Unsubscribed: %(email)s")
return result % { 'email' : email }
def mailman_add(contact, listname=None, send_welcome_msg=None, admin_notify=None):
"""Add a Satchmo contact to a mailman mailing list.
Parameters:
- `Contact`: A Satchmo Contact
- `listname`: the Mailman listname, defaulting to whatever you have set in settings.NEWSLETTER_NAME
- `send_welcome_msg`: True or False, defaulting to the list default
- `admin_notify`: True of False, defaulting to the list default
"""
mm, listname = _get_maillist(listname)
print >> sys.stderr, 'mailman adding %s to %s' % (contact.email, listname)
if send_welcome_msg is None:
send_welcome_msg = mm.send_welcome_msg
userdesc = UserDesc()
userdesc.fullname = contact.full_name
userdesc.address = contact.email
userdesc.digest = False
if mm.isMember(contact.email):
print >> sys.stderr, _('Already Subscribed: %s' % contact.email)
else:
try:
try:
mm.Lock()
mm.ApprovedAddMember(userdesc, send_welcome_msg, admin_notify)
mm.Save()
print >> sys.stderr, _('Subscribed: %(email)s') % { 'email' : contact.email }
except Errors.MMAlreadyAMember:
print >> sys.stderr, _('Already a member: %(email)s') % { 'email' : contact.email }
except Errors.MMBadEmailError:
if userdesc.address == '':
print >> sys.stderr, _('Bad/Invalid email address: blank line')
else:
print >> sys.stderr, _('Bad/Invalid email address: %(email)s') % { 'email' : contact.email }
except Errors.MMHostileAddress:
print >> sys.stderr, _('Hostile address (illegal characters): %(email)s') % { 'email' : contact.email }
finally:
mm.Unlock()
def mailman_remove(contact, listname=None, userack=None, admin_notify=None):
"""Remove a Satchmo contact from a Mailman mailing list
Parameters:
- `contact`: A Satchmo contact
- `listname`: the Mailman listname, defaulting to whatever you have set in settings.NEWSLETTER_NAME
- `userack`: True or False, whether to notify the user, defaulting to the list default
- `admin_notify`: True or False, defaulting to the list default
"""
mm, listname = _get_maillist(listname)
print >> sys.stderr, 'mailman removing %s from %s' % (contact.email, listname)
if mm.isMember(contact.email):
try:
mm.Lock()
mm.ApprovedDeleteMember(contact.email, 'satchmo_ext.newsletter', admin_notify, userack)
mm.Save()
finally:
mm.Unlock()
def _get_maillist(listname):
try:
if not listname:
listname = config_value('NEWSLETTER', 'NEWSLETTER_NAME')
if listname == "":
log.warn("NEWSLETTER_NAME not set in store settings")
raise NameError('No NEWSLETTER_NAME in settings')
return MailList.MailList(listname, lock=0), listname
except Errors.MMUnknownListError:
print >> sys.stderr, "Can't find the MailMan newsletter: %s" % listname
raise NameError('No such newsletter, "%s"' % listname)
|
mitchellzen/pops
|
satchmo/apps/satchmo_ext/newsletter/mailman.py
|
Python
|
bsd-3-clause
| 5,119
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import print_function
# pylint: disable=C0103
"""
BibEdit CLI tool.
Usage: bibedit [options]
General options::
-h, --help print this help
-V, --version print version number
Options to inspect record history::
--list-revisions [recid] list all revisions of a record
--list-revisions-details [recid] list detailed revisions of a record
--get-revision [recid.revdate] print MARCXML of given record revision
--diff-revisions [recidA.revdateB] [recidC.revdateD] print MARCXML difference between
record A dated B and record C dated D
--revert-to-revision [recid.revdate] submit given record revision to
become current revision
--check-revisions [recid] check if revisions are not corrupted
(* stands for all records)
--fix-revisions [recid] fix revisions that are corrupted
(* stands for all records)
--clean-revisions [recid] clean duplicate revisions
(* stands for all records)
"""
__revision__ = "$Id$"
import sys
import zlib
from invenio.legacy.dbquery import run_sql
from intbitset import intbitset
from invenio.legacy.bibedit.utils import get_marcxml_of_revision_id, \
get_record_revision_ids, get_xml_comparison, record_locked_by_other_user, \
record_locked_by_queue, revision_format_valid_p, save_xml_record, \
split_revid, get_info_of_revision_id, get_record_revisions
from invenio.legacy.bibrecord import create_record, records_identical
def print_usage():
"""Print help."""
print(__doc__)
def print_version():
"""Print version information."""
print(__revision__)
def cli_clean_revisions(recid, dry_run=True, verbose=True):
"""Clean revisions of the given recid, by removing duplicate revisions
that do not change the content of the record."""
if recid == '*':
recids = intbitset(run_sql("SELECT DISTINCT id_bibrec FROM hstRECORD"))
else:
try:
recids = [int(recid)]
except ValueError:
print('ERROR: record ID must be integer, not %s.' % recid)
sys.exit(1)
for recid in recids:
all_revisions = run_sql("SELECT marcxml, job_id, job_name, job_person, job_date FROM hstRECORD WHERE id_bibrec=%s ORDER BY job_date ASC", (recid,))
previous_rec = {}
deleted_revisions = 0
for marcxml, job_id, job_name, job_person, job_date in all_revisions:
try:
current_rec = create_record(zlib.decompress(marcxml))[0]
except Exception:
print("ERROR: corrupted revisions found. Please run %s --fix-revisions '*'" % sys.argv[0], file=sys.stderr)
sys.exit(1)
if records_identical(current_rec, previous_rec):
deleted_revisions += 1
if not dry_run:
run_sql("DELETE FROM hstRECORD WHERE id_bibrec=%s AND job_id=%s AND job_name=%s AND job_person=%s AND job_date=%s", (recid, job_id, job_name, job_person, job_date))
previous_rec = current_rec
if verbose and deleted_revisions:
print("record %s: deleted %s duplicate revisions out of %s" % (recid, deleted_revisions, len(all_revisions)))
if verbose:
print("DONE")
def cli_list_revisions(recid, details=False):
"""Print list of all known record revisions (=RECID.REVDATE) for record
RECID.
"""
try:
recid = int(recid)
except ValueError:
print('ERROR: record ID must be integer, not %s.' % recid)
sys.exit(1)
record_rev_list = get_record_revision_ids(recid)
if not details:
out = '\n'.join(record_rev_list)
else:
out = "%s %s %s %s\n" % ("# Revision".ljust(22), "# Task ID".ljust(15),
"# Author".ljust(15), "# Job Details")
out += '\n'.join([get_info_of_revision_id(revid) for revid in record_rev_list])
if out:
print(out)
else:
print('ERROR: Record %s not found.' % recid)
def cli_get_revision(revid):
"""Return MARCXML for record revision REVID (=RECID.REVDATE) of a record."""
if not revision_format_valid_p(revid):
print('ERROR: revision %s is invalid; ' \
'must be NNN.YYYYMMDDhhmmss.' % revid)
sys.exit(1)
out = get_marcxml_of_revision_id(revid)
if out:
print(out)
else:
print('ERROR: Revision %s not found.' % revid)
def cli_diff_revisions(revid1, revid2):
"""Return diffs of MARCXML for record revisions REVID1, REVID2."""
for revid in [revid1, revid2]:
if not revision_format_valid_p(revid):
print('ERROR: revision %s is invalid; ' \
'must be NNN.YYYYMMDDhhmmss.' % revid)
sys.exit(1)
xml1 = get_marcxml_of_revision_id(revid1)
if not xml1:
print('ERROR: Revision %s not found. ' % revid1)
sys.exit(1)
xml2 = get_marcxml_of_revision_id(revid2)
if not xml2:
print('ERROR: Revision %s not found. ' % revid2)
sys.exit(1)
print(get_xml_comparison(revid1, revid2, xml1, xml2))
def cli_revert_to_revision(revid):
"""Submit specified record revision REVID upload, to replace current
version.
"""
if not revision_format_valid_p(revid):
print('ERROR: revision %s is invalid; ' \
'must be NNN.YYYYMMDDhhmmss.' % revid)
sys.exit(1)
xml_record = get_marcxml_of_revision_id(revid)
if xml_record == '':
print('ERROR: Revision %s does not exist. ' % revid)
sys.exit(1)
recid = split_revid(revid)[0]
if record_locked_by_other_user(recid, -1):
print('The record is currently being edited. ' \
'Please try again in a few minutes.')
sys.exit(1)
if record_locked_by_queue(recid):
print('The record is locked because of unfinished upload tasks. ' \
'Please try again in a few minutes.')
sys.exit(1)
save_xml_record(recid, 0, xml_record)
print('Your modifications have now been submitted. They will be ' \
'processed as soon as the task queue is empty.')
def check_rev(recid, verbose=True, fix=False):
revisions = get_record_revisions(recid)
for recid, job_date in revisions:
rev = '%s.%s' % (recid, job_date)
try:
get_marcxml_of_revision_id(rev)
if verbose:
print('%s: ok' % rev)
except zlib.error:
print('%s: invalid' % rev)
if fix:
fix_rev(recid, job_date, verbose)
def fix_rev(recid, job_date, verbose=True):
sql = 'DELETE FROM hstRECORD WHERE id_bibrec = %s AND job_date = "%s"'
run_sql(sql, (recid, job_date))
def cli_check_revisions(recid):
if recid == '*':
print('Checking all records')
recids = intbitset(run_sql("SELECT id FROM bibrec ORDER BY id"))
for index, rec in enumerate(recids):
if index % 1000 == 0 and index:
print(index, 'records processed')
check_rev(rec, verbose=False)
else:
check_rev(recid)
def cli_fix_revisions(recid):
if recid == '*':
print('Fixing all records')
recids = intbitset(run_sql("SELECT id FROM bibrec ORDER BY id"))
for index, rec in enumerate(recids):
if index % 1000 == 0 and index:
print(index, 'records processed')
check_rev(rec, verbose=False, fix=True)
else:
check_rev(recid, fix=True)
def main():
"""Main entry point."""
if '--help' in sys.argv or \
'-h' in sys.argv:
print_usage()
elif '--version' in sys.argv or \
'-V' in sys.argv:
print_version()
else:
try:
cmd = sys.argv[1]
opts = sys.argv[2:]
if not opts:
raise IndexError
except IndexError:
print_usage()
sys.exit(1)
if cmd == '--list-revisions':
try:
recid = opts[0]
except IndexError:
print_usage()
sys.exit(1)
cli_list_revisions(recid, details=False)
elif cmd == '--list-revisions-details':
try:
recid = opts[0]
except IndexError:
print_usage()
sys.exit(1)
cli_list_revisions(recid, details=True)
elif cmd == '--get-revision':
try:
revid = opts[0]
except IndexError:
print_usage()
sys.exit(1)
cli_get_revision(revid)
elif cmd == '--diff-revisions':
try:
revid1 = opts[0]
revid2 = opts[1]
except IndexError:
print_usage()
sys.exit(1)
cli_diff_revisions(revid1, revid2)
elif cmd == '--revert-to-revision':
try:
revid = opts[0]
except IndexError:
print_usage()
sys.exit(1)
cli_revert_to_revision(revid)
elif cmd == '--check-revisions':
try:
recid = opts[0]
except IndexError:
recid = '*'
cli_check_revisions(recid)
elif cmd == '--fix-revisions':
try:
recid = opts[0]
except IndexError:
recid = '*'
cli_fix_revisions(recid)
elif cmd == '--clean-revisions':
try:
recid = opts[0]
except IndexError:
recid = '*'
cli_clean_revisions(recid, dry_run=False)
else:
print("ERROR: Please specify a command. Please see '--help'.")
sys.exit(1)
if __name__ == '__main__':
main()
|
Lilykos/invenio
|
invenio/legacy/bibedit/cli.py
|
Python
|
gpl-2.0
| 10,826
|
#!/usr/bin/env python
"""Execute the tests for the samcat program.
The golden test outputs are generated by the script generate_outputs.sh.
You have to give the root paths to the source and the binaries as arguments to
the program. These are the paths to the directory that contains the 'projects'
directory.
Usage: run_tests.py SOURCE_ROOT_PATH BINARY_ROOT_PATH
"""
import logging
import os.path
import sys
# Automagically add util/py_lib to PYTHONPATH environment variable.
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
'..', '..', 'util', 'py_lib'))
sys.path.insert(0, path)
import seqan.app_tests as app_tests
def main(source_base, binary_base):
"""Main entry point of the script."""
print 'Executing test for samcat'
print '========================='
print
ph = app_tests.TestPathHelper(
source_base, binary_base,
'apps/samcat/tests') # tests dir
# ============================================================
# Auto-detect the binary path.
# ============================================================
path_to_program = app_tests.autolocateBinary(
binary_base, 'apps/samcat', 'samcat')
# ============================================================
# Built TestConf list.
# ============================================================
# Build list with TestConf objects, analoguely to how the output
# was generated in generate_outputs.sh.
conf_list = []
# ============================================================
# Run on DNA (Adenoviruses).
# ============================================================
conf = app_tests.TestConf(
program=path_to_program,
args=[ph.inFile('ex1_a1.sam'),
ph.inFile('ex1_a2.sam'),
ph.inFile('ex1_a3.sam'),
'-o', ph.outFile('ex1_merged.sam')],
to_diff=[(ph.inFile('ex1_merged.sam'),
ph.outFile('ex1_merged.sam'))])
conf_list.append(conf)
conf = app_tests.TestConf(
program=path_to_program,
args=[ph.inFile('ex1_a1.sam'),
ph.inFile('ex1_a2.sam'),
ph.inFile('ex1_a3.sam'),
'-o', ph.outFile('ex1_merged.bam')],
to_diff=[(ph.inFile('ex1_merged.bam'),
ph.outFile('ex1_merged.bam'), "gunzip")])
conf_list.append(conf)
# Execute the tests.
failures = 0
for conf in conf_list:
res = app_tests.runTest(conf)
# Output to the user.
print ' '.join(conf.commandLineArgs())
if res:
print 'OK'
else:
failures += 1
print 'FAILED'
# Cleanup.
ph.deleteTempDir()
print '=============================='
print ' total tests: %d' % len(conf_list)
print ' failed tests: %d' % failures
print 'successful tests: %d' % (len(conf_list) - failures)
print '=============================='
# Compute and return return code.
return failures != 0
if __name__ == '__main__':
sys.exit(app_tests.main(main))
|
rrahn/gdf_tools
|
include/seqan/apps/samcat/tests/run_tests.py
|
Python
|
gpl-3.0
| 3,112
|
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from neutron.agent.linux import external_process
from neutron.agent.linux import keepalived
from neutron.agent.linux import utils
from neutron.tests import base
from neutron.tests.unit.agent.linux import test_keepalived
LOG = logging.getLogger(__name__)
class KeepalivedManagerTestCase(base.BaseTestCase,
test_keepalived.KeepalivedConfBaseMixin):
def setUp(self):
super(KeepalivedManagerTestCase, self).setUp()
cfg.CONF.set_override('check_child_processes_interval', 1, 'AGENT')
self.expected_config = self._get_config()
self.process_monitor = external_process.ProcessMonitor(cfg.CONF,
'router')
self.manager = keepalived.KeepalivedManager(
'router1', self.expected_config, self.process_monitor,
conf_path=cfg.CONF.state_path)
self.addCleanup(self.manager.disable)
def test_keepalived_spawn(self):
self.manager.spawn()
process = external_process.ProcessManager(
cfg.CONF,
'router1',
namespace=None,
pids_path=cfg.CONF.state_path)
self.assertTrue(process.active)
self.assertEqual(self.expected_config.get_config_str(),
self.manager.get_conf_on_disk())
def test_keepalived_respawns(self):
self.manager.spawn()
process = self.manager.get_process()
pid = process.pid
utils.wait_until_true(
lambda: process.active,
timeout=5,
sleep=0.01,
exception=RuntimeError(_("Keepalived didn't spawn")))
# force process crash, and see that when it comes back
# it's indeed a different process
utils.execute(['kill', '-9', pid], run_as_root=True)
utils.wait_until_true(
lambda: process.active and pid != process.pid,
timeout=5,
sleep=0.01,
exception=RuntimeError(_("Keepalived didn't respawn")))
|
mattt416/neutron
|
neutron/tests/functional/agent/linux/test_keepalived.py
|
Python
|
apache-2.0
| 2,730
|
"""
A pure python (slow) implementation of rijndael with a decent interface
To include -
from rijndael import rijndael
To do a key setup -
r = rijndael(key, block_size = 16)
key must be a string of length 16, 24, or 32
blocksize must be 16, 24, or 32. Default is 16
To use -
ciphertext = r.encrypt(plaintext)
plaintext = r.decrypt(ciphertext)
If any strings are of the wrong length a ValueError is thrown
"""
# ported from the Java reference code by Bram Cohen, April 2001
# this code is public domain, unless someone makes
# an intellectual property claim against the reference
# code, in which case it can be made public domain by
# deleting all the comments and renaming all the variables
import copy
import string
shifts = [[[0, 0], [1, 3], [2, 2], [3, 1]],
[[0, 0], [1, 5], [2, 4], [3, 3]],
[[0, 0], [1, 7], [3, 5], [4, 4]]]
# [keysize][block_size]
num_rounds = {16: {16: 10, 24: 12, 32: 14}, 24: {16: 12, 24: 12, 32: 14}, 32: {16: 14, 24: 14, 32: 14}}
A = [[1, 1, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 1]]
# produce log and alog tables, needed for multiplying in the
# field GF(2^m) (generator = 3)
alog = [1]
for i in range(255):
j = (alog[-1] << 1) ^ alog[-1]
if j & 0x100 != 0:
j ^= 0x11B
alog.append(j)
log = [0] * 256
for i in range(1, 255):
log[alog[i]] = i
# multiply two elements of GF(2^m)
def mul(a, b):
if a == 0 or b == 0:
return 0
return alog[(log[a & 0xFF] + log[b & 0xFF]) % 255]
# substitution box based on F^{-1}(x)
box = [[0] * 8 for i in range(256)]
box[1][7] = 1
for i in range(2, 256):
j = alog[255 - log[i]]
for t in range(8):
box[i][t] = (j >> (7 - t)) & 0x01
B = [0, 1, 1, 0, 0, 0, 1, 1]
# affine transform: box[i] <- B + A*box[i]
cox = [[0] * 8 for i in range(256)]
for i in range(256):
for t in range(8):
cox[i][t] = B[t]
for j in range(8):
cox[i][t] ^= A[t][j] * box[i][j]
# S-boxes and inverse S-boxes
S = [0] * 256
Si = [0] * 256
for i in range(256):
S[i] = cox[i][0] << 7
for t in range(1, 8):
S[i] ^= cox[i][t] << (7 - t)
Si[S[i] & 0xFF] = i
# T-boxes
G = [[2, 1, 1, 3],
[3, 2, 1, 1],
[1, 3, 2, 1],
[1, 1, 3, 2]]
AA = [[0] * 8 for i in range(4)]
for i in range(4):
for j in range(4):
AA[i][j] = G[i][j]
AA[i][i + 4] = 1
for i in range(4):
pivot = AA[i][i]
if pivot == 0:
t = i + 1
while AA[t][i] == 0 and t < 4:
t += 1
assert t != 4, 'G matrix must be invertible'
for j in range(8):
AA[i][j], AA[t][j] = AA[t][j], AA[i][j]
pivot = AA[i][i]
for j in range(8):
if AA[i][j] != 0:
AA[i][j] = alog[(255 + log[AA[i][j] & 0xFF] - log[pivot & 0xFF]) % 255]
for t in range(4):
if i != t:
for j in range(i + 1, 8):
AA[t][j] ^= mul(AA[i][j], AA[t][i])
AA[t][i] = 0
iG = [[0] * 4 for i in range(4)]
for i in range(4):
for j in range(4):
iG[i][j] = AA[i][j + 4]
def mul4(a, bs):
if a == 0:
return 0
r = 0
for b in bs:
r <<= 8
if b != 0:
r = r | mul(a, b)
return r
T1 = []
T2 = []
T3 = []
T4 = []
T5 = []
T6 = []
T7 = []
T8 = []
U1 = []
U2 = []
U3 = []
U4 = []
for t in range(256):
s = S[t]
T1.append(mul4(s, G[0]))
T2.append(mul4(s, G[1]))
T3.append(mul4(s, G[2]))
T4.append(mul4(s, G[3]))
s = Si[t]
T5.append(mul4(s, iG[0]))
T6.append(mul4(s, iG[1]))
T7.append(mul4(s, iG[2]))
T8.append(mul4(s, iG[3]))
U1.append(mul4(t, iG[0]))
U2.append(mul4(t, iG[1]))
U3.append(mul4(t, iG[2]))
U4.append(mul4(t, iG[3]))
# round constants
rcon = [1]
r = 1
for t in range(1, 30):
r = mul(2, r)
rcon.append(r)
del A
del AA
del pivot
del B
del G
del box
del log
del alog
del i
del j
del r
del s
del t
del mul
del mul4
del cox
del iG
class rijndael:
def __init__(self, key, block_size=16):
if block_size != 16 and block_size != 24 and block_size != 32:
raise ValueError('Invalid block size: ' + str(block_size))
if len(key) != 16 and len(key) != 24 and len(key) != 32:
raise ValueError('Invalid key size: ' + str(len(key)))
self.block_size = block_size
ROUNDS = num_rounds[len(key)][block_size]
BC = block_size // 4
# encryption round keys
Ke = [[0] * BC for i in range(ROUNDS + 1)]
# decryption round keys
Kd = [[0] * BC for i in range(ROUNDS + 1)]
ROUND_KEY_COUNT = (ROUNDS + 1) * BC
KC = len(key) // 4
# copy user material bytes into temporary ints
tk = []
for i in range(0, KC):
tk.append((key[i * 4] << 24) | (key[i * 4 + 1] << 16) |
(key[i * 4 + 2] << 8) | key[i * 4 + 3])
# copy values into round key arrays
t = 0
j = 0
while j < KC and t < ROUND_KEY_COUNT:
Ke[t // BC][t % BC] = tk[j]
Kd[ROUNDS - (t // BC)][t % BC] = tk[j]
j += 1
t += 1
tt = 0
rconpointer = 0
while t < ROUND_KEY_COUNT:
# extrapolate using phi (the round key evolution function)
tt = tk[KC - 1]
tk[0] ^= (S[(tt >> 16) & 0xFF] & 0xFF) << 24 ^ \
(S[(tt >> 8) & 0xFF] & 0xFF) << 16 ^ \
(S[ tt & 0xFF] & 0xFF) << 8 ^ \
(S[(tt >> 24) & 0xFF] & 0xFF) ^ \
(rcon[rconpointer] & 0xFF) << 24
rconpointer += 1
if KC != 8:
for i in range(1, KC):
tk[i] ^= tk[i - 1]
else:
for i in range(1, KC // 2):
tk[i] ^= tk[i - 1]
tt = tk[KC // 2 - 1]
tk[KC // 2] ^= (S[ tt & 0xFF] & 0xFF) ^ \
(S[(tt >> 8) & 0xFF] & 0xFF) << 8 ^ \
(S[(tt >> 16) & 0xFF] & 0xFF) << 16 ^ \
(S[(tt >> 24) & 0xFF] & 0xFF) << 24
for i in range(KC // 2 + 1, KC):
tk[i] ^= tk[i - 1]
# copy values into round key arrays
j = 0
while j < KC and t < ROUND_KEY_COUNT:
Ke[t // BC][t % BC] = tk[j]
Kd[ROUNDS - (t // BC)][t % BC] = tk[j]
j += 1
t += 1
# inverse MixColumn where needed
for r in range(1, ROUNDS):
for j in range(BC):
tt = Kd[r][j]
Kd[r][j] = U1[(tt >> 24) & 0xFF] ^ \
U2[(tt >> 16) & 0xFF] ^ \
U3[(tt >> 8) & 0xFF] ^ \
U4[tt & 0xFF]
self.Ke = Ke
self.Kd = Kd
def encrypt(self, plaintext):
if len(plaintext) != self.block_size:
raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(plaintext)))
Ke = self.Ke
BC = self.block_size // 4
ROUNDS = len(Ke) - 1
if BC == 4:
SC = 0
elif BC == 6:
SC = 1
else:
SC = 2
s1 = shifts[SC][1][0]
s2 = shifts[SC][2][0]
s3 = shifts[SC][3][0]
a = [0] * BC
# temporary work array
t = []
# plaintext to ints + key
for i in range(BC):
t.append((ord(plaintext[i * 4]) << 24 |
ord(plaintext[i * 4 + 1]) << 16 |
ord(plaintext[i * 4 + 2]) << 8 |
ord(plaintext[i * 4 + 3])) ^ Ke[0][i])
# apply round transforms
for r in range(1, ROUNDS):
for i in range(BC):
a[i] = (T1[(t[i] >> 24) & 0xFF] ^
T2[(t[(i + s1) % BC] >> 16) & 0xFF] ^
T3[(t[(i + s2) % BC] >> 8) & 0xFF] ^
T4[t[(i + s3) % BC] & 0xFF]) ^ Ke[r][i]
t = copy.copy(a)
# last round is special
result = []
for i in range(BC):
tt = Ke[ROUNDS][i]
result.append((S[(t[i] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
result.append((S[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
result.append((S[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
result.append((S[t[(i + s3) % BC] & 0xFF] ^ tt) & 0xFF)
return ''.join(map(chr, result))
def decrypt(self, ciphertext):
if len(ciphertext) != self.block_size:
raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(ciphertext)))
Kd = self.Kd
BC = self.block_size // 4
ROUNDS = len(Kd) - 1
if BC == 4:
SC = 0
elif BC == 6:
SC = 1
else:
SC = 2
s1 = shifts[SC][1][1]
s2 = shifts[SC][2][1]
s3 = shifts[SC][3][1]
a = [0] * BC
# temporary work array
t = [0] * BC
# ciphertext to ints + key
for i in range(BC):
t[i] = (ciphertext[i * 4] << 24 |
ciphertext[i * 4 + 1] << 16 |
ciphertext[i * 4 + 2] << 8 |
ciphertext[i * 4 + 3]) ^ Kd[0][i]
# apply round transforms
for r in range(1, ROUNDS):
for i in range(BC):
a[i] = (T5[(t[i] >> 24) & 0xFF] ^
T6[(t[(i + s1) % BC] >> 16) & 0xFF] ^
T7[(t[(i + s2) % BC] >> 8) & 0xFF] ^
T8[t[(i + s3) % BC] & 0xFF]) ^ Kd[r][i]
t = copy.copy(a)
# last round is special
result = []
for i in range(BC):
tt = Kd[ROUNDS][i]
result.append((Si[(t[i] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
result.append((Si[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
result.append((Si[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
result.append((Si[t[(i + s3) % BC] & 0xFF] ^ tt) & 0xFF)
return ''.join(map(chr, result))
def encrypt(key, block):
return rijndael(key, len(block)).encrypt(block)
def decrypt(key, block):
return rijndael(key, len(block)).decrypt(block)
|
azumimuo/family-xbmc-addon
|
script.mrknow.urlresolver/lib/urlresolver9/plugins/lib/rijndael.py
|
Python
|
gpl-2.0
| 10,498
|
def format_date(dt):
return dt.strftime('%Y-%m-%d %H:%M:%S')
|
aabed/mhn
|
server/mhn/common/templatetags.py
|
Python
|
lgpl-2.1
| 65
|
# ==================================================================================================
# Copyright 2013 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from twitter.common.rpc.finagle.protocol import (
TFinagleProtocol,
TFinagleProtocolWithClientId)
__all__ = [
'TFinagleProtocol',
'TFinagleProtocolWithClientId'
]
|
WCCCEDU/twitter-commons
|
src/python/twitter/common/rpc/finagle/__init__.py
|
Python
|
apache-2.0
| 1,073
|
from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.postgresql.introspection import DatabaseIntrospection
class GeoIntrospectionError(Exception):
pass
class PostGISIntrospection(DatabaseIntrospection):
# Reverse dictionary for PostGIS geometry types not populated until
# introspection is actually performed.
postgis_types_reverse = {}
ignored_tables = DatabaseIntrospection.ignored_tables + [
'geography_columns',
'geometry_columns',
'raster_columns',
'spatial_ref_sys',
'raster_overviews',
]
def get_postgis_types(self):
"""
Return a dictionary with keys that are the PostgreSQL object
identification integers for the PostGIS geometry and/or
geography types (if supported).
"""
field_types = [
('geometry', 'GeometryField'),
# The value for the geography type is actually a tuple
# to pass in the `geography=True` keyword to the field
# definition.
('geography', ('GeometryField', {'geography': True})),
]
postgis_types = {}
# The OID integers associated with the geometry type may
# be different across versions; hence, this is why we have
# to query the PostgreSQL pg_type table corresponding to the
# PostGIS custom data types.
oid_sql = 'SELECT "oid" FROM "pg_type" WHERE "typname" = %s'
with self.connection.cursor() as cursor:
for field_type in field_types:
cursor.execute(oid_sql, (field_type[0],))
for result in cursor.fetchall():
postgis_types[result[0]] = field_type[1]
return postgis_types
def get_field_type(self, data_type, description):
if not self.postgis_types_reverse:
# If the PostGIS types reverse dictionary is not populated, do so
# now. In order to prevent unnecessary requests upon connection
# initialization, the `data_types_reverse` dictionary is not updated
# with the PostGIS custom types until introspection is actually
# performed -- in other words, when this function is called.
self.postgis_types_reverse = self.get_postgis_types()
self.data_types_reverse.update(self.postgis_types_reverse)
return super().get_field_type(data_type, description)
def get_geometry_type(self, table_name, geo_col):
"""
The geometry type OID used by PostGIS does not indicate the particular
type of field that a geometry column is (e.g., whether it's a
PointField or a PolygonField). Thus, this routine queries the PostGIS
metadata tables to determine the geometry type.
"""
with self.connection.cursor() as cursor:
try:
# First seeing if this geometry column is in the `geometry_columns`
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geometry_columns" '
'WHERE "f_table_name"=%s AND "f_geometry_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise GeoIntrospectionError
except GeoIntrospectionError:
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geography_columns" '
'WHERE "f_table_name"=%s AND "f_geography_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise Exception('Could not find a geometry or geography column for "%s"."%s"' %
(table_name, geo_col))
# OGRGeomType does not require GDAL and makes it easy to convert
# from OGC geom type name to Django field.
field_type = OGRGeomType(row[2]).django
# Getting any GeometryField keyword arguments that are not the default.
dim = row[0]
srid = row[1]
field_params = {}
if srid != 4326:
field_params['srid'] = srid
if dim != 2:
field_params['dim'] = dim
return field_type, field_params
|
sametmax/Django--an-app-at-a-time
|
ignore_this_directory/django/contrib/gis/db/backends/postgis/introspection.py
|
Python
|
mit
| 4,388
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test functionality of coursera module.
"""
import json
import os.path
import pytest
from six import iteritems
from mock import patch, Mock, mock_open
from coursera import coursera_dl
# JSon Handling
@pytest.fixture
def get_page(monkeypatch):
monkeypatch.setattr(coursera_dl, 'get_page', Mock())
@pytest.fixture
def json_path():
return os.path.join(os.path.dirname(__file__), "fixtures", "json")
def test_that_should_not_dl_if_file_exist(get_page, json_path):
coursera_dl.get_page = Mock()
coursera_dl.download_about(object(), "matrix-002", json_path)
assert coursera_dl.get_page.called is False
def test_that_we_parse_and_write_json_correctly(get_page, json_path):
unprocessed_json = os.path.join(os.path.dirname(__file__),
"fixtures", "json", "unprocessed.json")
raw_data = open(unprocessed_json).read()
coursera_dl.get_page = lambda x, y: raw_data
open_mock = mock_open()
with patch('coursera.coursera_dl.open', open_mock, create=True):
coursera_dl.download_about(object(), "networksonline-002", json_path)
about_json = os.path.join(json_path, 'networksonline-002-about.json')
open_mock.assert_called_once_with(about_json, 'w')
data = json.loads(open_mock().write.call_args[0][0])
assert data['id'] == 394
assert data['shortName'] == 'networksonline'
# Test Syllabus Parsing
@pytest.fixture
def get_video(monkeypatch):
"""
Mock some methods that would, otherwise, create repeateadly many web
requests.
More specifically, we mock:
* the search for hidden videos
* the actual download of videos
"""
# Mock coursera_dl.grab_hidden_video_url
monkeypatch.setattr(coursera_dl, 'grab_hidden_video_url',
lambda session, href: None)
# Mock coursera_dl.get_video
monkeypatch.setattr(coursera_dl, 'get_video',
lambda session, href: None)
@pytest.mark.parametrize(
"filename,num_sections,num_lectures,num_resources,num_videos", [
("regular-syllabus.html", 23, 102, 502, 102),
("links-to-wikipedia.html", 5, 37, 158, 36),
("preview.html", 20, 106, 106, 106),
("sections-not-to-be-missed.html", 9, 61, 224, 61),
("sections-not-to-be-missed-2.html", 20, 121, 397, 121),
("parsing-datasci-001-with-bs4.html", 10, 97, 358, 97), # issue 134
("parsing-startup-001-with-bs4.html", 4, 44, 136, 44), # issue 137
("parsing-wealthofnations-001-with-bs4.html", 8, 74, 296, 74), # issue 131
("parsing-malsoftware-001-with-bs4.html", 3, 18, 56, 16), # issue 148
("multiple-resources-with-the-same-format.html", 18, 97, 478, 97),
]
)
def test_parse(get_video, filename, num_sections, num_lectures, num_resources, num_videos):
filename = os.path.join(os.path.dirname(__file__), "fixtures", "html",
filename)
with open(filename) as syllabus:
syllabus_page = syllabus.read()
sections = coursera_dl.parse_syllabus(None, syllabus_page, None)
# section count
assert len(sections) == num_sections
# lecture count
lectures = [lec for sec in sections for lec in sec[1]]
assert len(lectures) == num_lectures
# resource count
resources = [(res[0], len(res[1]))
for lec in lectures for res in iteritems(lec[1])]
assert sum(r for f, r in resources) == num_resources
# mp4 count
assert sum(r for f, r in resources if f == "mp4") == num_videos
|
rihbyne/coursera-dl
|
coursera/test/test_parsing.py
|
Python
|
lgpl-3.0
| 3,627
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
update_url_query,
)
class NaverIE(InfoExtractor):
_VALID_URL = r'https?://(?:m\.)?tv(?:cast)?\.naver\.com/v/(?P<id>\d+)'
_TESTS = [{
'url': 'http://tv.naver.com/v/81652',
'info_dict': {
'id': '81652',
'ext': 'mp4',
'title': '[9월 모의고사 해설강의][수학_김상희] 수학 A형 16~20번',
'description': '합격불변의 법칙 메가스터디 | 메가스터디 수학 김상희 선생님이 9월 모의고사 수학A형 16번에서 20번까지 해설강의를 공개합니다.',
'upload_date': '20130903',
},
}, {
'url': 'http://tv.naver.com/v/395837',
'md5': '638ed4c12012c458fefcddfd01f173cd',
'info_dict': {
'id': '395837',
'ext': 'mp4',
'title': '9년이 지나도 아픈 기억, 전효성의 아버지',
'description': 'md5:5bf200dcbf4b66eb1b350d1eb9c753f7',
'upload_date': '20150519',
},
'skip': 'Georestricted',
}, {
'url': 'http://tvcast.naver.com/v/81652',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
vid = self._search_regex(
r'videoId["\']\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage,
'video id', fatal=None, group='value')
in_key = self._search_regex(
r'inKey["\']\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage,
'key', default=None, group='value')
if not vid or not in_key:
error = self._html_search_regex(
r'(?s)<div class="(?:nation_error|nation_box|error_box)">\s*(?:<!--.*?-->)?\s*<p class="[^"]+">(?P<msg>.+?)</p>\s*</div>',
webpage, 'error', default=None)
if error:
raise ExtractorError(error, expected=True)
raise ExtractorError('couldn\'t extract vid and key')
video_data = self._download_json(
'http://play.rmcnmv.naver.com/vod/play/v2.0/' + vid,
video_id, query={
'key': in_key,
})
meta = video_data['meta']
title = meta['subject']
formats = []
def extract_formats(streams, stream_type, query={}):
for stream in streams:
stream_url = stream.get('source')
if not stream_url:
continue
stream_url = update_url_query(stream_url, query)
encoding_option = stream.get('encodingOption', {})
bitrate = stream.get('bitrate', {})
formats.append({
'format_id': '%s_%s' % (stream.get('type') or stream_type, encoding_option.get('id') or encoding_option.get('name')),
'url': stream_url,
'width': int_or_none(encoding_option.get('width')),
'height': int_or_none(encoding_option.get('height')),
'vbr': int_or_none(bitrate.get('video')),
'abr': int_or_none(bitrate.get('audio')),
'filesize': int_or_none(stream.get('size')),
'protocol': 'm3u8_native' if stream_type == 'HLS' else None,
})
extract_formats(video_data.get('videos', {}).get('list', []), 'H264')
for stream_set in video_data.get('streams', []):
query = {}
for param in stream_set.get('keys', []):
query[param['name']] = param['value']
stream_type = stream_set.get('type')
videos = stream_set.get('videos')
if videos:
extract_formats(videos, stream_type, query)
elif stream_type == 'HLS':
stream_url = stream_set.get('source')
if not stream_url:
continue
formats.extend(self._extract_m3u8_formats(
update_url_query(stream_url, query), video_id,
'mp4', 'm3u8_native', m3u8_id=stream_type, fatal=False))
self._sort_formats(formats)
subtitles = {}
for caption in video_data.get('captions', {}).get('list', []):
caption_url = caption.get('source')
if not caption_url:
continue
subtitles.setdefault(caption.get('language') or caption.get('locale'), []).append({
'url': caption_url,
})
upload_date = self._search_regex(
r'<span[^>]+class="date".*?(\d{4}\.\d{2}\.\d{2})',
webpage, 'upload date', fatal=False)
if upload_date:
upload_date = upload_date.replace('.', '')
return {
'id': video_id,
'title': title,
'formats': formats,
'subtitles': subtitles,
'description': self._og_search_description(webpage),
'thumbnail': meta.get('cover', {}).get('source') or self._og_search_thumbnail(webpage),
'view_count': int_or_none(meta.get('count')),
'upload_date': upload_date,
}
|
epitron/youtube-dl
|
youtube_dl/extractor/naver.py
|
Python
|
unlicense
| 5,293
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pystachio import Empty, Struct
from pystachio.composite import Structural
__all__ = ('Cluster',)
# TODO(wickman) It seems like some of this Trait/Mixin stuff should be a
# first-class construct in Pystachio. It could be a solution for extensible
# Job/Task definitions.
class Cluster(dict):
"""Cluster encapsulates a set of K/V attributes describing cluster configurations.
Given a cluster, attributes may be accessed directly on them, e.g.
cluster.name
cluster.scheduler_zk_path
In order to enforce particular "traits" of Cluster, use Cluster.Trait to construct
enforceable schemas, e.g.
class ResolverTrait(Cluster.Trait):
scheduler_zk_ensemble = Required(String)
scheduler_zk_path = Default(String, '/twitter/service/mesos/prod/scheduler')
cluster = Cluster(name = 'west', scheduler_zk_ensemble = 'zookeeper.west.twttr.net')
# Ensures that scheduler_zk_ensemble is defined in the cluster or it will raise a TypeError
cluster.with_trait(ResolverTrait).scheduler_zk_ensemble
# Will use the default if none is provided on Cluster.
cluster.with_trait(ResolverTrait).scheduler_zk_path
"""
Trait = Struct # noqa
def __init__(self, **kwargs):
self._traits = ()
super(Cluster, self).__init__(**kwargs)
def get_trait(self, trait):
"""Given a Cluster.Trait, extract that trait."""
if not issubclass(trait, Structural):
raise TypeError('provided trait must be a Cluster.Trait subclass, got %s' % type(trait))
# TODO(wickman) Expose this in pystachio as a non-private or add a load method with strict=
return trait(trait._filter_against_schema(self))
def check_trait(self, trait):
"""Given a Cluster.Trait, typecheck that trait."""
trait_check = self.get_trait(trait).check()
if not trait_check.ok():
raise TypeError(trait_check.message())
def with_traits(self, *traits):
"""Return a cluster annotated with a set of traits."""
new_cluster = self.__class__(**self)
for trait in traits:
new_cluster.check_trait(trait)
new_cluster._traits = traits
return new_cluster
def with_trait(self, trait):
"""Return a cluster annotated with a single trait (helper for self.with_traits)."""
return self.with_traits(trait)
def __setitem__(self, key, value):
raise TypeError('Clusters are immutable.')
def __getattr__(self, attribute):
for trait in self._traits:
expressed_trait = self.get_trait(trait)
if hasattr(expressed_trait, attribute):
value = getattr(expressed_trait, attribute)()
return None if value is Empty else value.get()
try:
return self[attribute]
except KeyError:
return self.__getattribute__(attribute)
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
|
rosmo/aurora
|
src/main/python/apache/aurora/common/cluster.py
|
Python
|
apache-2.0
| 3,365
|
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class sale_order_line(osv.osv):
_inherit = "sale.order.line"
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False, context=None):
res = super(sale_order_line, self).product_id_change(cr, uid, ids, pricelist, product, qty=qty,
uom=uom, qty_uos=qty_uos, uos=uos, name=name, partner_id=partner_id,
lang=lang, update_tax=update_tax, date_order=date_order, packaging=packaging, fiscal_position=fiscal_position, flag=flag, context=context)
if not pricelist:
return res
if context is None:
context = {}
frm_cur = self.pool.get('res.users').browse(cr, uid, uid).company_id.currency_id.id
to_cur = self.pool.get('product.pricelist').browse(cr, uid, [pricelist])[0].currency_id.id
if product:
product = self.pool['product.product'].browse(cr, uid, product, context=context)
purchase_price = product.standard_price
to_uom = res.get('product_uom', uom)
if to_uom != product.uom_id.id:
purchase_price = self.pool['product.uom']._compute_price(cr, uid, product.uom_id.id, purchase_price, to_uom)
ctx = context.copy()
ctx['date'] = date_order
price = self.pool.get('res.currency').compute(cr, uid, frm_cur, to_cur, purchase_price, round=False, context=ctx)
res['value'].update({'purchase_price': price})
return res
def _product_margin(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = 0
if line.product_id:
res[line.id] = round(line.price_subtotal - ((line.purchase_price or line.product_id.standard_price) * line.product_uos_qty), 2)
return res
_columns = {
'margin': fields.function(_product_margin, string='Margin',
store = True),
'purchase_price': fields.float('Cost Price', digits=(16,2))
}
sale_order_line()
class sale_order(osv.osv):
_inherit = "sale.order"
def _product_margin(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for sale in self.browse(cr, uid, ids, context=context):
result[sale.id] = 0.0
for line in sale.order_line:
result[sale.id] += line.margin or 0.0
return result
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('sale.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
_columns = {
'margin': fields.function(_product_margin, string='Margin', help="It gives profitability by calculating the difference between the Unit Price and the cost price.", store={
'sale.order.line': (_get_order, ['margin'], 20),
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 20),
}),
}
sale_order()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jaggu303619/asylum
|
openerp/addons/sale_margin/sale_margin.py
|
Python
|
agpl-3.0
| 4,272
|
#!/usr/bin/env python3
"""Utility program to post a comment to a github PR"""
import argparse
import json
import os
import sys
import urllib.parse
from urllib.error import HTTPError, URLError
from urllib.request import urlopen, Request
def _parse_args():
pr_link_var = "ghprbPullLink"
pr_link_option = "--pr-link"
github_oauth_key_var = "GITHUB_OAUTH_KEY"
github_oauth_key_option = "--github-oauth-key"
parser = argparse.ArgumentParser()
parser.add_argument(
"-pr",
pr_link_option,
default=os.environ.get(pr_link_var, ""),
help="Specify pull request link",
)
parser.add_argument(
github_oauth_key_option,
default=os.environ.get(github_oauth_key_var, ""),
help="Specify github oauth key",
)
args = parser.parse_args()
if not args.pr_link:
parser.error(
"Specify either environment variable {} or option {}".format(
pr_link_var, pr_link_option
)
)
if not args.github_oauth_key:
parser.error(
"Specify either environment variable {} or option {}".format(
github_oauth_key_var, github_oauth_key_option
)
)
return args
def post_message_to_github(msg, github_oauth_key, pr_link):
print("Attempting to post to Github...")
ghprb_pull_id = os.environ["ghprbPullId"]
api_url = os.getenv("GITHUB_API_BASE", "https://api.github.com/repos/apache/spark")
url = api_url + "/issues/" + ghprb_pull_id + "/comments"
posted_message = json.dumps({"body": msg})
request = Request(
url,
headers={
"Authorization": "token {}".format(github_oauth_key),
"Content-Type": "application/json",
},
data=posted_message.encode("utf-8"),
)
try:
response = urlopen(request)
if response.getcode() == 201:
print(" > Post successful.")
else:
print_err("Surprising post response.")
print_err(" > http_code: {}".format(response.getcode()))
print_err(" > api_response: {}".format(response.read()))
print_err(" > data: {}".format(posted_message))
except HTTPError as http_e:
print_err("Failed to post message to Github.")
print_err(" > http_code: {}".format(http_e.code))
print_err(" > api_response: {}".format(http_e.read()))
print_err(" > data: {}".format(posted_message))
except URLError as url_e:
print_err("Failed to post message to Github.")
print_err(" > urllib_status: {}".format(url_e.reason[1]))
print_err(" > data: {}".format(posted_message))
def print_err(msg):
print(msg, file=sys.stderr)
def _main():
args = _parse_args()
msg = sys.stdin.read()
post_message_to_github(msg, args.github_oauth_key, args.pr_link)
return 0
if __name__ == "__main__":
sys.exit(_main())
|
zero323/spark
|
dev/ansible-for-test-node/roles/jenkins-worker/files/util_scripts/post_github_pr_comment.py
|
Python
|
apache-2.0
| 2,930
|
#
# configparse.py
#
# an example of using the parsing module to be able to process a .INI configuration file
#
# Copyright (c) 2003, Paul McGuire
#
from pyparsing import \
Literal, Word, ZeroOrMore, Group, Dict, Optional, \
printables, ParseException, restOfLine
import pprint
inibnf = None
def inifile_BNF():
global inibnf
if not inibnf:
# punctuation
lbrack = Literal("[").suppress()
rbrack = Literal("]").suppress()
equals = Literal("=").suppress()
semi = Literal(";")
comment = semi + Optional( restOfLine )
nonrbrack = "".join( [ c for c in printables if c != "]" ] ) + " \t"
nonequals = "".join( [ c for c in printables if c != "=" ] ) + " \t"
sectionDef = lbrack + Word( nonrbrack ) + rbrack
keyDef = ~lbrack + Word( nonequals ) + equals + restOfLine
# using Dict will allow retrieval of named data fields as attributes of the parsed results
inibnf = Dict( ZeroOrMore( Group( sectionDef + Dict( ZeroOrMore( Group( keyDef ) ) ) ) ) )
inibnf.ignore( comment )
return inibnf
pp = pprint.PrettyPrinter(2)
def test( strng ):
print strng
try:
iniFile = file(strng)
iniData = "".join( iniFile.readlines() )
bnf = inifile_BNF()
tokens = bnf.parseString( iniData )
pp.pprint( tokens.asList() )
except ParseException, err:
print err.line
print " "*(err.column-1) + "^"
print err
iniFile.close()
print
return tokens
ini = test("setup.ini")
print "ini['Startup']['modemid'] =", ini['Startup']['modemid']
print "ini.Startup =", ini.Startup
print "ini.Startup.modemid =", ini.Startup.modemid
|
dbbhattacharya/kitsune
|
vendor/packages/pyparsing/examples/configParse.py
|
Python
|
bsd-3-clause
| 1,856
|
from browser import window
_kids=['Marsha', 'Jan', 'Cindy']
def continue1(event):
_objectStore.get('Jan', onsuccess=exists, onerror=continue2)
def continue2(event):
for _kid in _kids:
_rec={'name': _kid}
_objectStore.put(_rec, _kid, onsuccess=printmsg, onerror=printerr)
_objectStore.get('Jan', onsuccess=continue3, onerror=printerr)
def continue3(event):
print ("Async operations complete..")
def exists(event):
if event.target.pyresult() is None:
#handle cause of when get returns undefined if the key doesn't exist
#in the db..
continue2(event)
else:
print(event.result)
#this shouldn't get called, output message if called
print("this shouldn't get called")
def printrec(event):
_obj=event.target.pyresult()
assert isinstance(_obj, dict)
assert _obj['name']=='Jan'
def printmsg(event):
_obj=event.target.pyresult()
assert _obj in _kids
def printerr(event):
print("Error: %s" % (event.result))
def onsuccess(event):
global db
db = request.result
def onupgradeneeded(e):
print("event: ", e, "target", e.target)
print("event type: ", e.type)
print("e.oldVersion: ", e.oldVersion)
print("e.newVersion: ", e.newVersion)
# todo.. override createObjectStore to take options (ie, like OS.put)
#e.target.result.createObjectStore("BradyKids")
db = request.result
for _kid in _kids:
print(_kid, db)
_rec={'name': _kid}
req = db.put(_rec, _kid)
req.onsuccess=printmsg
req.onerror=printerr
db = None
request = window.indexedDB.open("BradyKids", 3)
request.onsuccess = onsuccess
request.onupgradeneeded=onupgradeneeded
print(db)
print("allowing async operations to complete")
|
jonathanverner/brython
|
www/tests/test_indexedDB.py
|
Python
|
bsd-3-clause
| 1,790
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
def setup_module(module):
from nose import SkipTest
from nltk.parse.malt import MaltParser
try:
depparser = MaltParser('maltparser-1.7.2')
except LookupError:
raise SkipTest("MaltParser is not available")
|
sdoran35/hate-to-hugs
|
venv/lib/python3.6/site-packages/nltk/test/gluesemantics_malt_fixt.py
|
Python
|
mit
| 302
|
#! /usr/bin/env python
# $Id: test_inline_markup.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Tests for inline markup in PEPs (readers/pep.py).
"""
from __init__ import DocutilsTestSupport
def suite():
s = DocutilsTestSupport.PEPParserTestSuite()
s.generateTests(totest)
return s
totest = {}
totest['standalone_references'] = [
["""\
See PEP 287 (pep-0287.txt),
and RFC 2822 (which obsoletes RFC822 and RFC-733).
""",
"""\
<document source="test data">
<paragraph>
See \n\
<reference refuri="http://www.python.org/dev/peps/pep-0287">
PEP 287
(
<reference refuri="http://www.python.org/dev/peps/pep-0287">
pep-0287.txt
),
and \n\
<reference refuri="http://www.faqs.org/rfcs/rfc2822.html">
RFC 2822
(which obsoletes \n\
<reference refuri="http://www.faqs.org/rfcs/rfc822.html">
RFC822
and \n\
<reference refuri="http://www.faqs.org/rfcs/rfc733.html">
RFC-733
).
"""],
["""\
References split across lines:
PEP
287
RFC
2822
""",
"""\
<document source="test data">
<paragraph>
References split across lines:
<paragraph>
<reference refuri="http://www.python.org/dev/peps/pep-0287">
PEP
287
<paragraph>
<reference refuri="http://www.faqs.org/rfcs/rfc2822.html">
RFC
2822
"""],
["""\
Test PEP-specific implicit references before a URL:
PEP 287 (http://www.python.org/dev/peps/pep-0287), RFC 2822.
""",
"""\
<document source="test data">
<paragraph>
Test PEP-specific implicit references before a URL:
<paragraph>
<reference refuri="http://www.python.org/dev/peps/pep-0287">
PEP 287
(
<reference refuri="http://www.python.org/dev/peps/pep-0287">
http://www.python.org/dev/peps/pep-0287
), \n\
<reference refuri="http://www.faqs.org/rfcs/rfc2822.html">
RFC 2822
.
"""],
]
totest['miscellaneous'] = [
["""\
For *completeness*, _`let's` ``test`` **other** forms_
|of| `inline markup` [*]_.
.. [*] See http://docutils.sf.net/docs/ref/rst/restructuredtext.html.
""",
"""\
<document source="test data">
<paragraph>
For \n\
<emphasis>
completeness
, \n\
<target ids="let-s" names="let's">
let's
\n\
<literal>
test
\n\
<strong>
other
\n\
<reference name="forms" refname="forms">
forms
\n\
<substitution_reference refname="of">
of
\n\
<title_reference>
inline markup
\n\
<footnote_reference auto="*" ids="id1">
.
<footnote auto="*" ids="id2">
<paragraph>
See \n\
<reference refuri="http://docutils.sf.net/docs/ref/rst/restructuredtext.html">
http://docutils.sf.net/docs/ref/rst/restructuredtext.html
.
"""],
]
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
|
waseem18/oh-mainline
|
vendor/packages/docutils/test/test_readers/test_pep/test_inline_markup.py
|
Python
|
agpl-3.0
| 3,252
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe import _
class ItemAttribute(Document):
def validate(self):
self.validate_duplication()
self.validate_attribute_values()
def validate_duplication(self):
values, abbrs = [], []
for d in self.item_attribute_values:
d.abbr = d.abbr.upper()
if d.attribute_value in values:
frappe.throw(_("{0} must appear only once").format(d.attribute_value))
values.append(d.attribute_value)
if d.abbr in abbrs:
frappe.throw(_("{0} must appear only once").format(d.abbr))
abbrs.append(d.abbr)
def validate_attribute_values(self):
attribute_values = []
for d in self.item_attribute_values:
attribute_values.append(d.attribute_value)
variant_attributes = frappe.db.sql("select DISTINCT attribute_value from `tabVariant Attribute` where attribute=%s", self.name)
if variant_attributes:
for d in variant_attributes:
if d[0] not in attribute_values:
frappe.throw(_("Attribute Value {0} cannot be removed from {1} as Item Variants exist with this Attribute.").format(d[0], self.name))
|
treejames/erpnext
|
erpnext/stock/doctype/item_attribute/item_attribute.py
|
Python
|
agpl-3.0
| 1,275
|
#!/usr/bin/env python
#
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# a simple command line OF-CONFIG client
#
# a usage example:
# % PYTHONPATH=. ./bin/of_config_cli \
# --peers=sw1=localhost:1830:username:password
# (Cmd) raw_get sw1
import ryu.contrib
ryu.contrib.update_module_path()
from ryu import cfg
import cmd
import sys
import lxml.etree as ET
from ryu.lib import of_config
from ryu.lib.of_config import capable_switch
from ncclient.operations.rpc import RPCError
import ryu.lib.of_config.classes as ofc
CONF = cfg.CONF
CONF.register_cli_opts([
cfg.ListOpt('peers', default=[], help='list of peers')
])
class Peer(capable_switch.OFCapableSwitch):
def __init__(self, name, host, port, username, password):
self._name = name
super(Peer, self).__init__(
host=host, port=port, username=username, password=password,
unknown_host_cb=lambda host, fingeprint: True)
peers = {}
def add_peer(name, host, port, username, password):
peers[name] = Peer(name, host, port, username, password)
def et_tostring_pp(tree):
# pretty_print is an lxml feature, not available in ElementTree
try:
return ET.tostring(tree, pretty_print=True)
except TypeError:
return ET.tostring(tree)
def validate(tree):
schema = ET.XMLSchema(file=of_config.OF_CONFIG_1_1_1_XSD)
if not schema(tree):
print(schema.error_log)
class Cmd(cmd.Cmd):
def __init__(self, *args, **kwargs):
self._in_onecmd = False
cmd.Cmd.__init__(self, *args, **kwargs)
def _request(self, line, f):
args = line.split()
try:
peer = args[0]
except:
print("argument error")
return
try:
p = peers[peer]
except KeyError:
print("unknown peer %s" % peer)
return
try:
f(p, args[1:])
except RPCError as e:
print("RPC Error %s" % e)
except EOFError:
print("disconnected")
def _complete_peer(self, text, line, _begidx, _endidx):
if len((line + 'x').split()) >= 3:
return []
return [name for name in peers if name.startswith(text)]
def do_list_cap(self, line):
"""list_cap <peer>
"""
def f(p, args):
for i in p.netconf.server_capabilities:
print(i)
self._request(line, f)
def do_raw_get(self, line):
"""raw_get <peer>
"""
def f(p, args):
result = p.raw_get()
tree = ET.fromstring(result)
validate(tree)
print(et_tostring_pp(tree))
self._request(line, f)
def do_raw_get_config(self, line):
"""raw_get_config <peer> <source>
"""
def f(p, args):
try:
source = args[0]
except:
print("argument error")
return
result = p.raw_get_config(source)
tree = ET.fromstring(result)
validate(tree)
print(et_tostring_pp(tree))
self._request(line, f)
def do_get(self, line):
"""get <peer>
eg. get sw1
"""
def f(p, args):
print(p.get())
self._request(line, f)
def do_commit(self, line):
"""commit <peer>
eg. commit sw1
"""
def f(p, args):
print(p.commit())
self._request(line, f)
def do_discard(self, line):
"""discard <peer>
eg. discard sw1
"""
def f(p, args):
print(p.discard_changes())
self._request(line, f)
def do_get_config(self, line):
"""get_config <peer> <source>
eg. get_config sw1 startup
"""
def f(p, args):
try:
source = args[0]
except:
print("argument error")
return
print(p.get_config(source))
self._request(line, f)
def do_delete_config(self, line):
"""delete_config <peer> <source>
eg. delete_config sw1 startup
"""
def f(p, args):
try:
source = args[0]
except:
print("argument error")
return
print(p.delete_config(source))
self._request(line, f)
def do_copy_config(self, line):
"""copy_config <peer> <source> <target>
eg. copy_config sw1 running startup
"""
def f(p, args):
try:
source, target = args
except:
print("argument error")
return
print(p.copy_config(source, target))
self._request(line, f)
def do_list_port(self, line):
"""list_port <peer>
"""
def f(p, args):
o = p.get()
for p in o.resources.port:
print('%s %s %s' % (p.resource_id, p.name, p.number))
self._request(line, f)
_port_settings = [
'admin-state',
'no-forward',
'no-packet-in',
'no-receive',
]
def do_get_port_config(self, line):
"""get_config_port <peer> <source> <port>
eg. get_port_config sw1 running LogicalSwitch7-Port2
"""
def f(p, args):
try:
source, port = args
except:
print("argument error")
return
o = p.get_config(source)
for p in o.resources.port:
if p.resource_id != port:
continue
print(p.resource_id)
conf = p.configuration
for k in self._port_settings:
try:
v = getattr(conf, k)
except AttributeError:
continue
print('%s %s' % (k, v))
self._request(line, f)
def do_set_port_config(self, line):
"""set_port_config <peer> <target> <port> <key> <value>
eg. set_port_config sw1 running LogicalSwitch7-Port2 admin-state down
eg. set_port_config sw1 running LogicalSwitch7-Port2 no-forward false
"""
def f(p, args):
try:
target, port, key, value = args
except:
print("argument error")
print(args)
return
# get switch id
o = p.get()
capable_switch_id = o.id
try:
capable_switch = ofc.OFCapableSwitchType(
id=capable_switch_id,
resources=ofc.OFCapableSwitchResourcesType(
port=[
ofc.OFPortType(
resource_id=port,
configuration=ofc.OFPortConfigurationType(
**{key: value}))
]
)
)
except TypeError:
print("argument error")
return
try:
p.edit_config(target, capable_switch)
except Exception as e:
print(e)
self._request(line, f)
def do_list_queue(self, line):
"""list_queue <peer>
"""
def f(p, args):
o = p.get()
if o.resources.queue:
for q in o.resources.queue:
print('%s %s' % (q.resource_id, q.port))
self._request(line, f)
_queue_settings = [
'max-rate',
'min-rate',
'experimenter',
]
def do_get_queue_config(self, line):
"""get_queue_port <peer> <source> <queue>
eg. get_queue_config sw1 running LogicalSwitch7-Port1-Queue922
"""
def f(p, args):
try:
source, queue = args
except:
print("argument error")
return
o = p.get_config(source)
for q in o.resources.queue:
if q.resource_id != queue:
continue
print(q.resource_id)
conf = q.properties
for k in self._queue_settings:
try:
v = getattr(conf, k)
except AttributeError:
continue
print('%s %s' % (k, v))
self._request(line, f)
def do_set_queue_config(self, line):
"""set_queue_config <peer> <target> <queue> <key> <value>
eg. set_queue_config sw1 running LogicalSwitch7-Port1-Queue922 \
max-rate 100
"""
def f(p, args):
try:
target, queue, key, value = args
except:
print("argument error")
print(args)
return
# get switch id
o = p.get()
capable_switch_id = o.id
try:
capable_switch = ofc.OFCapableSwitchType(
id=capable_switch_id,
resources=ofc.OFCapableSwitchResourcesType(
queue=[
ofc.OFQueueType(
resource_id=queue,
properties=ofc.OFQueuePropertiesType(
**{key: value})),
]
)
)
except TypeError:
print("argument error")
return
try:
p.edit_config(target, capable_switch)
except Exception as e:
print(e)
self._request(line, f)
def do_add_queue(self, line):
"""add_queue <peer> <target> <logical-switch> <queue>
eg. add_queue sw1 running LogicalSwitch7 NameOfNewQueue
"""
def f(p, args):
try:
target, lsw, queue = args
except:
print("argument error")
print(args)
return
# get switch id
o = p.get()
capable_switch_id = o.id
try:
capable_switch = ofc.OFCapableSwitchType(
id=capable_switch_id,
resources=ofc.OFCapableSwitchResourcesType(
queue=[
ofc.OFQueueType(resource_id=queue)
]
),
logical_switches=ofc.OFCapableSwitchLogicalSwitchesType(
switch=[ofc.OFLogicalSwitchType(
id=lsw,
resources=ofc.OFLogicalSwitchResourcesType(
queue=[queue])
)]
)
)
except TypeError:
print("argument error")
return
try:
p.edit_config(target, capable_switch)
except Exception as e:
print(e)
self._request(line, f)
def do_list_logical_switch(self, line):
"""list_logical_switch <peer>
"""
def f(p, args):
o = p.get()
for s in o.logical_switches.switch:
print('%s %s' % (s.id, s.datapath_id))
self._request(line, f)
def do_show_logical_switch(self, line):
"""show_logical_switch <peer> <logical switch>
"""
def f(p, args):
try:
(lsw,) = args
except:
print("argument error")
return
o = p.get()
for s in o.logical_switches.switch:
if s.id != lsw:
continue
print(s.id)
print('datapath-id %s' % s.datapath_id)
if s.resources.queue:
print('queues:')
for q in s.resources.queue:
print('\t %s' % q)
if s.resources.port:
print('ports:')
for p in s.resources.port:
print('\t %s' % p)
self._request(line, f)
_lsw_settings = [
'lost-connection-behavior',
]
def do_get_logical_switch_config(self, line):
"""get_logical_switch_config <peer> <source> <logical switch>
"""
def f(p, args):
try:
source, lsw = args
except:
print("argument error")
return
o = p.get_config(source)
for l in o.logical_switches.switch:
if l.id != lsw:
continue
print(l.id)
for k in self._lsw_settings:
try:
v = getattr(l, k)
except AttributeError:
continue
print('%s %s' % (k, v))
self._request(line, f)
def do_set_logical_switch_config(self, line):
"""set_logical_switch_config <peer> <logical switch> <key> <value>
eg. set_logical_switch_config sw1 running LogicalSwitch7 \
lost-connection-behavior failStandaloneMode
"""
def f(p, args):
try:
target, lsw, key, value = args
except:
print("argument error")
return
# get switch id
o = p.get_config(target)
capable_switch_id = o.id
try:
capable_switch = ofc.OFCapableSwitchType(
id=capable_switch_id,
logical_switches=ofc.OFCapableSwitchLogicalSwitchesType(
switch=[ofc.OFLogicalSwitchType(
id=lsw,
**{key: value}
)]
)
)
except TypeError:
print("argument error")
return
try:
p.edit_config(target, capable_switch)
except Exception as e:
print(e)
self._request(line, f)
completedefault = _complete_peer
def complete_EOF(self, _text, _line, _begidx, _endidx):
return []
def do_EOF(self, _line):
sys.exit(0)
def onecmd(self, string):
self._in_onecmd = True
try:
return cmd.Cmd.onecmd(self, string)
finally:
self._in_onecmd = False
def main(args=None, prog=None):
CONF(args=args, prog=prog,
project='of-config-cli', version='of-config-cli')
for p_str in CONF.peers:
name, addr = p_str.split('=')
host, port, username, password = addr.rsplit(':', 3)
add_peer(name, host, port, username, password)
Cmd().cmdloop()
if __name__ == "__main__":
main()
|
sivaramakrishnansr/ryu
|
ryu/cmd/of_config_cli.py
|
Python
|
apache-2.0
| 15,609
|
import os
import sys
import shutil
from glob import glob
# --------------------------------------------------------------------------
DOC_DIR = 'hr-html'
PYWRAPS_FN = 'idaapi.py'
# --------------------------------------------------------------------------
def add_footer(lines):
S1 = 'Generated by Epydoc'
S2 = '</table>'
p = lines.find(S1)
if p == -1:
return None
p = lines.find(S2, p)
if p == -1:
return None
p += len(S2)
return lines[0:p] + '\n<!--#include virtual="/footer.shtml" -->' + lines[p:]
# --------------------------------------------------------------------------
def define_idaapi_resolver():
"""
Whenever a module named \"idaapi_<something>\" is
spotted, turn it into \"idaapi\".
"""
import epydoc.apidoc
dn = epydoc.apidoc.DottedName.__init__
def resolver(piece):
if piece is not None and isinstance(piece, basestring) and piece.startswith("idaapi_"):
return "idaapi"
else:
return piece
def wrapper(self, *pieces, **options):
return dn(self, *map(resolver, pieces), **options);
epydoc.apidoc.DottedName.__init__ = wrapper
# --------------------------------------------------------------------------
def gen_docs():
import epydoc.cli
import swigdocs
define_idaapi_resolver()
swigdocs.gen_docs(outfn = 'pywraps.py')
# append obj/x86_win_vc_32/idaapi.py to it
# os.system(r'copy /b idaapi.py+..\obj\x86_win_vc_32\idaapi.py idaapi.py')
# delete all output files
for fn in glob('hr-html/*'):
os.unlink(fn)
epydoc.cli.optparse.sys.argv = [ 'epydoc',
'--config', '../hrdoc.cfg',
'--simple-term'
]
# Generate the documentation
epydoc.cli.cli()
# --------------------------------------------------------------------------
def patch_docs():
shutil.copy('../../hrdoc.css', 'epydoc.css')
os.system('chmod +w epydoc.css')
for fn in glob('*.html'):
f = open(fn, 'r')
lines = f.read()
f.close()
r = add_footer(lines)
if not r:
print "-",
continue
f = open(fn, 'w')
f.write(r)
f.close()
print "+",
print "\nDocumentation patched!"
# --------------------------------------------------------------------------
def main():
# Save old directory and adjust import path
curdir = os.getcwd() + os.sep
sys.path.append(curdir + 'python')
sys.path.append(curdir + 'tools')
sys.path.append(curdir + 'docs')
old_dir = os.getcwd()
try:
print "Generating documentation....."
os.chdir('docs')
gen_docs()
os.chdir(DOC_DIR)
patch_docs()
print "Documentation generated!"
finally:
os.chdir(old_dir)
# --------------------------------------------------------------------------
if __name__ == '__main__':
main()
Exit(0)
|
zachriggle/idapython
|
hrdoc.py
|
Python
|
bsd-3-clause
| 3,025
|
import agents as ag
def HW2Agent() -> object:
"An agent that keeps track of what locations are clean or dirty."
oldPercepts = [('None', 'Clean')]
oldActions = ['NoOp']
actionScores = [{
'Right': 0,
'Left': 0,
'Up': -1,
'Down': -1,
'NoOp': -100,
}]
level = 0
def program(percept):
"Same as ReflexVacuumAgent, except if everything is clean, do NoOp."
level = len(actionScores) - 1
bump, status = percept
lastBump, lastStatus = oldPercepts[-1]
lastAction = oldActions[-1]
if status == 'Dirty':
action = 'Suck'
actionScores[level][lastAction] += 2
else:
if bump == 'Bump':
actionScores[level][lastAction] -= 10
else:
if lastAction == 'Up' or lastAction == 'Down':
actionScores.append({
'Right': 0,
'Left': 0,
'Up': -1,
'Down': -1,
})
highest = -80
for actionType, score in actionScores[level].items():
if score > highest:
highest = score
action = actionType
print(actionScores)
oldPercepts.append(percept)
oldActions.append(action)
return action
return ag.Agent(program)
|
WhittKinley/aima-python
|
submissions/Sery/vacuum2.py
|
Python
|
mit
| 1,432
|
# example for using Python with cython bindings as a [HAL]HALFILE
# in the ini file, add as last HALFILE:
#[HAL]
#HALFILE = haltest.py
from machinekit.halfile import rt, hal
rt.loadrt("supply")
hal.addf("supply.0.update","servo-thread")
|
ArcEye/machinekit-testing
|
configs/sim/axis/haltest.py
|
Python
|
lgpl-2.1
| 242
|
# Support for the GeoRSS format
# Copyright 2010-2015 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, unicode_literals
from ..util import FeedParserDict
class Namespace(object):
supported_namespaces = {
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://www.georss.org/georss': 'georss',
'http://www.opengis.net/gml': 'gml',
}
def __init__(self):
self.ingeometry = 0
super(Namespace, self).__init__()
def _start_georssgeom(self, attrsD):
self.push('geometry', 0)
context = self._getContext()
context['where'] = FeedParserDict()
_start_georss_point = _start_georssgeom
_start_georss_line = _start_georssgeom
_start_georss_polygon = _start_georssgeom
_start_georss_box = _start_georssgeom
def _save_where(self, geometry):
context = self._getContext()
context['where'].update(geometry)
def _end_georss_point(self):
geometry = _parse_georss_point(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _end_georss_line(self):
geometry = _parse_georss_line(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _end_georss_polygon(self):
this = self.pop('geometry')
geometry = _parse_georss_polygon(this)
if geometry:
self._save_where(geometry)
def _end_georss_box(self):
geometry = _parse_georss_box(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _start_where(self, attrsD):
self.push('where', 0)
context = self._getContext()
context['where'] = FeedParserDict()
_start_georss_where = _start_where
def _parse_srs_attrs(self, attrsD):
srsName = attrsD.get('srsname')
try:
srsDimension = int(attrsD.get('srsdimension', '2'))
except ValueError:
srsDimension = 2
context = self._getContext()
context['where']['srsName'] = srsName
context['where']['srsDimension'] = srsDimension
def _start_gml_point(self, attrsD):
self._parse_srs_attrs(attrsD)
self.ingeometry = 1
self.push('geometry', 0)
def _start_gml_linestring(self, attrsD):
self._parse_srs_attrs(attrsD)
self.ingeometry = 'linestring'
self.push('geometry', 0)
def _start_gml_polygon(self, attrsD):
self._parse_srs_attrs(attrsD)
self.push('geometry', 0)
def _start_gml_exterior(self, attrsD):
self.push('geometry', 0)
def _start_gml_linearring(self, attrsD):
self.ingeometry = 'polygon'
self.push('geometry', 0)
def _start_gml_pos(self, attrsD):
self.push('pos', 0)
def _end_gml_pos(self):
this = self.pop('pos')
context = self._getContext()
srsName = context['where'].get('srsName')
srsDimension = context['where'].get('srsDimension', 2)
swap = True
if srsName and "EPSG" in srsName:
epsg = int(srsName.split(":")[-1])
swap = bool(epsg in _geogCS)
geometry = _parse_georss_point(this, swap=swap, dims=srsDimension)
if geometry:
self._save_where(geometry)
def _start_gml_poslist(self, attrsD):
self.push('pos', 0)
def _end_gml_poslist(self):
this = self.pop('pos')
context = self._getContext()
srsName = context['where'].get('srsName')
srsDimension = context['where'].get('srsDimension', 2)
swap = True
if srsName and "EPSG" in srsName:
epsg = int(srsName.split(":")[-1])
swap = bool(epsg in _geogCS)
geometry = _parse_poslist(
this, self.ingeometry, swap=swap, dims=srsDimension)
if geometry:
self._save_where(geometry)
def _end_geom(self):
self.ingeometry = 0
self.pop('geometry')
_end_gml_point = _end_geom
_end_gml_linestring = _end_geom
_end_gml_linearring = _end_geom
_end_gml_exterior = _end_geom
_end_gml_polygon = _end_geom
def _end_where(self):
self.pop('where')
_end_georss_where = _end_where
# GeoRSS geometry parsers. Each return a dict with 'type' and 'coordinates'
# items, or None in the case of a parsing error.
def _parse_poslist(value, geom_type, swap=True, dims=2):
if geom_type == 'linestring':
return _parse_georss_line(value, swap, dims)
elif geom_type == 'polygon':
ring = _parse_georss_line(value, swap, dims)
return {'type': 'Polygon', 'coordinates': (ring['coordinates'],)}
else:
return None
def _gen_georss_coords(value, swap=True, dims=2):
# A generator of (lon, lat) pairs from a string of encoded GeoRSS
# coordinates. Converts to floats and swaps order.
latlons = (float(ll) for ll in value.replace(',', ' ').split())
while True:
t = [next(latlons), next(latlons)][::swap and -1 or 1]
if dims == 3:
t.append(next(latlons))
yield tuple(t)
def _parse_georss_point(value, swap=True, dims=2):
# A point contains a single latitude-longitude pair, separated by
# whitespace. We'll also handle comma separators.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {'type': 'Point', 'coordinates': coords[0]}
except (IndexError, ValueError):
return None
def _parse_georss_line(value, swap=True, dims=2):
# A line contains a space separated list of latitude-longitude pairs in
# WGS84 coordinate reference system, with each pair separated by
# whitespace. There must be at least two pairs.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {'type': 'LineString', 'coordinates': coords}
except (IndexError, ValueError):
return None
def _parse_georss_polygon(value, swap=True, dims=2):
# A polygon contains a space separated list of latitude-longitude pairs,
# with each pair separated by whitespace. There must be at least four
# pairs, with the last being identical to the first (so a polygon has a
# minimum of three actual points).
try:
ring = list(_gen_georss_coords(value, swap, dims))
except (IndexError, ValueError):
return None
if len(ring) < 4:
return None
return {'type': 'Polygon', 'coordinates': (ring,)}
def _parse_georss_box(value, swap=True, dims=2):
# A bounding box is a rectangular region, often used to define the extents
# of a map or a rough area of interest. A box contains two space separate
# latitude-longitude pairs, with each pair separated by whitespace. The
# first pair is the lower corner, the second is the upper corner.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {'type': 'Box', 'coordinates': tuple(coords)}
except (IndexError, ValueError):
return None
# The list of EPSG codes for geographic (latitude/longitude) coordinate
# systems to support decoding of GeoRSS GML profiles.
_geogCS = [
3819, 3821, 3824, 3889, 3906, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008,
4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4018, 4019, 4020, 4021, 4022,
4023, 4024, 4025, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036,
4041, 4042, 4043, 4044, 4045, 4046, 4047, 4052, 4053, 4054, 4055, 4075, 4081,
4120, 4121, 4122, 4123, 4124, 4125, 4126, 4127, 4128, 4129, 4130, 4131, 4132,
4133, 4134, 4135, 4136, 4137, 4138, 4139, 4140, 4141, 4142, 4143, 4144, 4145,
4146, 4147, 4148, 4149, 4150, 4151, 4152, 4153, 4154, 4155, 4156, 4157, 4158,
4159, 4160, 4161, 4162, 4163, 4164, 4165, 4166, 4167, 4168, 4169, 4170, 4171,
4172, 4173, 4174, 4175, 4176, 4178, 4179, 4180, 4181, 4182, 4183, 4184, 4185,
4188, 4189, 4190, 4191, 4192, 4193, 4194, 4195, 4196, 4197, 4198, 4199, 4200,
4201, 4202, 4203, 4204, 4205, 4206, 4207, 4208, 4209, 4210, 4211, 4212, 4213,
4214, 4215, 4216, 4218, 4219, 4220, 4221, 4222, 4223, 4224, 4225, 4226, 4227,
4228, 4229, 4230, 4231, 4232, 4233, 4234, 4235, 4236, 4237, 4238, 4239, 4240,
4241, 4242, 4243, 4244, 4245, 4246, 4247, 4248, 4249, 4250, 4251, 4252, 4253,
4254, 4255, 4256, 4257, 4258, 4259, 4260, 4261, 4262, 4263, 4264, 4265, 4266,
4267, 4268, 4269, 4270, 4271, 4272, 4273, 4274, 4275, 4276, 4277, 4278, 4279,
4280, 4281, 4282, 4283, 4284, 4285, 4286, 4287, 4288, 4289, 4291, 4292, 4293,
4294, 4295, 4296, 4297, 4298, 4299, 4300, 4301, 4302, 4303, 4304, 4306, 4307,
4308, 4309, 4310, 4311, 4312, 4313, 4314, 4315, 4316, 4317, 4318, 4319, 4322,
4324, 4326, 4463, 4470, 4475, 4483, 4490, 4555, 4558, 4600, 4601, 4602, 4603,
4604, 4605, 4606, 4607, 4608, 4609, 4610, 4611, 4612, 4613, 4614, 4615, 4616,
4617, 4618, 4619, 4620, 4621, 4622, 4623, 4624, 4625, 4626, 4627, 4628, 4629,
4630, 4631, 4632, 4633, 4634, 4635, 4636, 4637, 4638, 4639, 4640, 4641, 4642,
4643, 4644, 4645, 4646, 4657, 4658, 4659, 4660, 4661, 4662, 4663, 4664, 4665,
4666, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674, 4675, 4676, 4677, 4678,
4679, 4680, 4681, 4682, 4683, 4684, 4685, 4686, 4687, 4688, 4689, 4690, 4691,
4692, 4693, 4694, 4695, 4696, 4697, 4698, 4699, 4700, 4701, 4702, 4703, 4704,
4705, 4706, 4707, 4708, 4709, 4710, 4711, 4712, 4713, 4714, 4715, 4716, 4717,
4718, 4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729, 4730,
4731, 4732, 4733, 4734, 4735, 4736, 4737, 4738, 4739, 4740, 4741, 4742, 4743,
4744, 4745, 4746, 4747, 4748, 4749, 4750, 4751, 4752, 4753, 4754, 4755, 4756,
4757, 4758, 4759, 4760, 4761, 4762, 4763, 4764, 4765, 4801, 4802, 4803, 4804,
4805, 4806, 4807, 4808, 4809, 4810, 4811, 4813, 4814, 4815, 4816, 4817, 4818,
4819, 4820, 4821, 4823, 4824, 4901, 4902, 4903, 4904, 4979 ]
|
terbolous/SickRage
|
lib/feedparser/namespaces/georss.py
|
Python
|
gpl-3.0
| 11,117
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for local command-line-interface debug wrapper session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from tensorflow.python.client import session
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.wrappers import local_cli_wrapper
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class LocalCLIDebuggerWrapperSessionForTest(
local_cli_wrapper.LocalCLIDebugWrapperSession):
"""Subclasses the wrapper class for testing.
Overrides its CLI-related methods for headless testing environments.
Inserts observer variables for assertions.
"""
def __init__(self, command_args_sequence, sess, dump_root=None):
"""Constructor of the for-test subclass.
Args:
command_args_sequence: (list of list of str) A list of arguments for the
"run" command.
sess: See the doc string of LocalCLIDebugWrapperSession.__init__.
dump_root: See the doc string of LocalCLIDebugWrapperSession.__init__.
"""
local_cli_wrapper.LocalCLIDebugWrapperSession.__init__(
self, sess, dump_root=dump_root, log_usage=False)
self._command_args_sequence = command_args_sequence
self._response_pointer = 0
# Observer variables.
self.observers = {
"debug_dumps": [],
"tf_errors": [],
"run_start_cli_run_numbers": [],
"run_end_cli_run_numbers": [],
}
def _prep_cli_for_run_start(self):
pass
def _prep_cli_for_run_end(self, debug_dump, tf_error, passed_filter):
self.observers["debug_dumps"].append(debug_dump)
self.observers["tf_errors"].append(tf_error)
def _launch_cli(self):
if self._is_run_start:
self.observers["run_start_cli_run_numbers"].append(self._run_call_count)
else:
self.observers["run_end_cli_run_numbers"].append(self._run_call_count)
command_args = self._command_args_sequence[self._response_pointer]
self._response_pointer += 1
try:
self._run_handler(command_args)
except debugger_cli_common.CommandLineExit as e:
response = e.exit_token
return response
class LocalCLIDebugWrapperSessionTest(test_util.TensorFlowTestCase):
def setUp(self):
self._tmp_dir = tempfile.mktemp()
self.v = variables.Variable(10.0, name="v")
self.delta = constant_op.constant(1.0, name="delta")
self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v")
self.ph = array_ops.placeholder(dtypes.float32, name="ph")
self.xph = array_ops.transpose(self.ph, name="xph")
self.m = constant_op.constant(
[[0.0, 1.0, 2.0], [-4.0, -1.0, 0.0]], dtype=dtypes.float32, name="m")
self.y = math_ops.matmul(self.m, self.xph, name="y")
self.sess = session.Session()
# Initialize variable.
self.sess.run(self.v.initializer)
def tearDown(self):
ops.reset_default_graph()
if os.path.isdir(self._tmp_dir):
shutil.rmtree(self._tmp_dir)
def testConstructWrapper(self):
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), log_usage=False)
def testConstructWrapperWithExistingEmptyDumpRoot(self):
os.mkdir(self._tmp_dir)
self.assertTrue(os.path.isdir(self._tmp_dir))
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), dump_root=self._tmp_dir, log_usage=False)
def testConstructWrapperWithExistingNonEmptyDumpRoot(self):
os.mkdir(self._tmp_dir)
dir_path = os.path.join(self._tmp_dir, "foo")
os.mkdir(dir_path)
self.assertTrue(os.path.isdir(dir_path))
with self.assertRaisesRegexp(
ValueError, "dump_root path points to a non-empty directory"):
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), dump_root=self._tmp_dir, log_usage=False)
def testConstructWrapperWithExistingFileDumpRoot(self):
os.mkdir(self._tmp_dir)
file_path = os.path.join(self._tmp_dir, "foo")
open(file_path, "a").close() # Create the file
self.assertTrue(os.path.isfile(file_path))
with self.assertRaisesRegexp(ValueError, "dump_root path points to a file"):
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), dump_root=file_path, log_usage=False)
def testRunsUnderDebugMode(self):
# Test command sequence: run; run; run;
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[[], [], []], self.sess, dump_root=self._tmp_dir)
# run under debug mode twice.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
# Verify that the assign_add op did take effect.
self.assertAllClose(12.0, self.sess.run(self.v))
# Assert correct run call numbers for which the CLI has been launched at
# run-start and run-end.
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([1, 2], wrapped_sess.observers["run_end_cli_run_numbers"])
# Verify that the dumps have been generated and picked up during run-end.
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
# Verify that the TensorFlow runtime errors are picked up and in this case,
# they should be both None.
self.assertEqual([None, None], wrapped_sess.observers["tf_errors"])
def testRunInfoOutputAtRunEndIsCorrect(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[[], [], []], self.sess, dump_root=self._tmp_dir)
wrapped_sess.run(self.inc_v)
run_info_output = wrapped_sess._run_info_handler([])
tfdbg_logo = cli_shared.get_tfdbg_logo()
# The run_info output in the first run() call should contain the tfdbg logo.
self.assertEqual(tfdbg_logo.lines,
run_info_output.lines[:len(tfdbg_logo.lines)])
menu = run_info_output.annotations[debugger_cli_common.MAIN_MENU_KEY]
self.assertIn("list_tensors", menu.captions())
wrapped_sess.run(self.inc_v)
run_info_output = wrapped_sess._run_info_handler([])
# The run_info output in the second run() call should NOT contain the logo.
self.assertNotEqual(tfdbg_logo.lines,
run_info_output.lines[:len(tfdbg_logo.lines)])
menu = run_info_output.annotations[debugger_cli_common.MAIN_MENU_KEY]
self.assertIn("list_tensors", menu.captions())
def testRunsUnderNonDebugMode(self):
# Test command sequence: run -n; run -n; run -n;
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["-n"], ["-n"], ["-n"]], self.sess, dump_root=self._tmp_dir)
# run three times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(13.0, self.sess.run(self.v))
self.assertEqual([1, 2, 3],
wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([], wrapped_sess.observers["run_end_cli_run_numbers"])
def testRunsUnderNonDebugThenDebugMode(self):
# Test command sequence: run -n; run -n; run; run;
# Do two NON_DEBUG_RUNs, followed by DEBUG_RUNs.
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["-n"], ["-n"], [], []], self.sess, dump_root=self._tmp_dir)
# run three times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(13.0, self.sess.run(self.v))
self.assertEqual([1, 2, 3],
wrapped_sess.observers["run_start_cli_run_numbers"])
# Here, the CLI should have been launched only under the third run,
# because the first and second runs are NON_DEBUG.
self.assertEqual([3], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None], wrapped_sess.observers["tf_errors"])
def testRunMultipleTimesWithinLimit(self):
# Test command sequence: run -t 3; run;
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["-t", "3"], []], self.sess, dump_root=self._tmp_dir)
# run three times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(13.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([3], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None], wrapped_sess.observers["tf_errors"])
def testRunMultipleTimesOverLimit(self):
# Test command sequence: run -t 3;
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["-t", "3"]], self.sess, dump_root=self._tmp_dir)
# run twice, which is less than the number of times specified by the
# command.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(12.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(0, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([], wrapped_sess.observers["tf_errors"])
def testRunMixingDebugModeAndMultpleTimes(self):
# Test command sequence: run -n; run -t 2; run; run;
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["-n"], ["-t", "2"], [], []], self.sess, dump_root=self._tmp_dir)
# run four times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(14.0, self.sess.run(self.v))
self.assertEqual([1, 2],
wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([3, 4], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None, None], wrapped_sess.observers["tf_errors"])
def testRuntimeErrorShouldBeCaught(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[[], []], self.sess, dump_root=self._tmp_dir)
# Do a run that should lead to an TensorFlow runtime error.
wrapped_sess.run(self.y, feed_dict={self.ph: [[0.0], [1.0], [2.0]]})
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([1], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
# Verify that the runtime error is caught by the wrapped session properly.
self.assertEqual(1, len(wrapped_sess.observers["tf_errors"]))
tf_error = wrapped_sess.observers["tf_errors"][0]
self.assertEqual("y", tf_error.op.name)
def testRunTillFilterPassesShouldLaunchCLIAtCorrectRun(self):
# Test command sequence:
# run -f greater_than_twelve; run -f greater_than_twelve; run;
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["-f", "v_greater_than_twelve"], ["-f", "v_greater_than_twelve"], []],
self.sess,
dump_root=self._tmp_dir)
def v_greater_than_twelve(datum, tensor):
return datum.node_name == "v" and tensor > 12.0
wrapped_sess.add_tensor_filter("v_greater_than_twelve",
v_greater_than_twelve)
# run five times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(15.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
# run-end CLI should NOT have been launched for run #2 and #3, because only
# starting from run #4 v becomes greater than 12.0.
self.assertEqual([4, 5], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None, None], wrapped_sess.observers["tf_errors"])
if __name__ == "__main__":
googletest.main()
|
odejesush/tensorflow
|
tensorflow/python/debug/wrappers/local_cli_wrapper_test.py
|
Python
|
apache-2.0
| 13,116
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
from frappe.model.document import Document
class ChapterMember(Document):
pass
|
ovresko/erpnext
|
erpnext/non_profit/doctype/chapter_member/chapter_member.py
|
Python
|
gpl-3.0
| 267
|
# coding: utf8
{
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"Uaktualnij" jest dodatkowym wyra\xc5\xbceniem postaci "pole1=\'nowawarto\xc5\x9b\xc4\x87\'". Nie mo\xc5\xbcesz uaktualni\xc4\x87 lub usun\xc4\x85\xc4\x87 wynik\xc3\xb3w z JOIN:',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'%s rows deleted': 'Wierszy usuni\xc4\x99tych: %s',
'%s rows updated': 'Wierszy uaktualnionych: %s',
'Available databases and tables': 'Dost\xc4\x99pne bazy danych i tabele',
'Cannot be empty': 'Nie mo\xc5\xbce by\xc4\x87 puste',
'Change Password': 'Change Password',
'Check to delete': 'Zaznacz aby usun\xc4\x85\xc4\x87',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Current request': 'Aktualne \xc5\xbc\xc4\x85danie',
'Current response': 'Aktualna odpowied\xc5\xba',
'Current session': 'Aktualna sesja',
'DB Model': 'DB Model',
'Database': 'Database',
'Delete:': 'Usu\xc5\x84:',
'Edit': 'Edit',
'Edit Profile': 'Edit Profile',
'Edit This App': 'Edit This App',
'Edit current record': 'Edytuj aktualny rekord',
'Hello World': 'Witaj \xc5\x9awiecie',
'Import/Export': 'Importuj/eksportuj',
'Index': 'Index',
'Internal State': 'Stan wewn\xc4\x99trzny',
'Invalid Query': 'B\xc5\x82\xc4\x99dne zapytanie',
'Layout': 'Layout',
'Login': 'Zaloguj',
'Logout': 'Logout',
'Lost Password': 'Przypomnij has\xc5\x82o',
'Main Menu': 'Main Menu',
'Menu Model': 'Menu Model',
'New Record': 'Nowy rekord',
'No databases in this application': 'Brak baz danych w tej aplikacji',
'Powered by': 'Powered by',
'Query:': 'Zapytanie:',
'Register': 'Zarejestruj',
'Rows in table': 'Wiersze w tabeli',
'Rows selected': 'Wybrane wiersze',
'Stylesheet': 'Stylesheet',
'Sure you want to delete this object?': 'Czy na pewno chcesz usun\xc4\x85\xc4\x87 ten obiekt?',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"Zapytanie" jest warunkiem postaci "db.tabela1.pole1==\'warto\xc5\x9b\xc4\x87\'". Takie co\xc5\x9b jak "db.tabela1.pole1==db.tabela2.pole2" oznacza SQL JOIN.',
'Update:': 'Uaktualnij:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'U\xc5\xbcyj (...)&(...) jako AND, (...)|(...) jako OR oraz ~(...) jako NOT do tworzenia bardziej skomplikowanych zapyta\xc5\x84.',
'View': 'View',
'Welcome %s': 'Welcome %s',
'Welcome to web2py': 'Witaj w web2py',
'appadmin is disabled because insecure channel': 'appadmin is disabled because insecure channel',
'cache': 'cache',
'change password': 'change password',
'Online examples': 'Kliknij aby przej\xc5\x9b\xc4\x87 do interaktywnych przyk\xc5\x82ad\xc3\xb3w',
'Administrative interface': 'Kliknij aby przej\xc5\x9b\xc4\x87 do panelu administracyjnego',
'customize me!': 'dostosuj mnie!',
'data uploaded': 'dane wys\xc5\x82ane',
'database': 'baza danych',
'database %s select': 'wyb\xc3\xb3r z bazy danych %s',
'db': 'baza danych',
'design': 'projektuj',
'done!': 'zrobione!',
'edit profile': 'edit profile',
'export as csv file': 'eksportuj jako plik csv',
'insert new': 'wstaw nowy rekord tabeli',
'insert new %s': 'wstaw nowy rekord do tabeli %s',
'invalid request': 'B\xc5\x82\xc4\x99dne \xc5\xbc\xc4\x85danie',
'login': 'login',
'logout': 'logout',
'new record inserted': 'nowy rekord zosta\xc5\x82 wstawiony',
'next 100 rows': 'nast\xc4\x99pne 100 wierszy',
'or import from csv file': 'lub zaimportuj z pliku csv',
'previous 100 rows': 'poprzednie 100 wierszy',
'record': 'record',
'record does not exist': 'rekord nie istnieje',
'record id': 'id rekordu',
'register': 'register',
'selected': 'wybranych',
'state': 'stan',
'table': 'tabela',
'unable to parse csv file': 'nie mo\xc5\xbcna sparsowa\xc4\x87 pliku csv',
}
|
montaggroup/montag-token-redeemer
|
web2py/applications/token_redeemer/languages/pl-pl.py
|
Python
|
gpl-3.0
| 3,788
|
# -*- coding: utf-8 -*-
"""
Models for Credit Eligibility for courses.
Credit courses allow students to receive university credit for
successful completion of a course on EdX
"""
import datetime
from collections import defaultdict
import logging
import pytz
from django.conf import settings
from django.core.cache import cache
from django.dispatch import receiver
from django.db import models, transaction, IntegrityError
from django.core.validators import RegexValidator
from simple_history.models import HistoricalRecords
from jsonfield.fields import JSONField
from model_utils.models import TimeStampedModel
from xmodule_django.models import CourseKeyField
from django.utils.translation import ugettext_lazy
log = logging.getLogger(__name__)
class CreditProvider(TimeStampedModel):
"""
This model represents an institution that can grant credit for a course.
Each provider is identified by unique ID (e.g., 'ASU'). CreditProvider also
includes a `url` where the student will be sent when he/she will try to
get credit for course. Eligibility duration will be use to set duration
for which credit eligible message appears on dashboard.
"""
provider_id = models.CharField(
max_length=255,
unique=True,
validators=[
RegexValidator(
regex=r"^[a-z,A-Z,0-9,\-]+$",
message="Only alphanumeric characters and hyphens (-) are allowed",
code="invalid_provider_id",
)
],
help_text=ugettext_lazy(
"Unique identifier for this credit provider. "
"Only alphanumeric characters and hyphens (-) are allowed. "
"The identifier is case-sensitive."
)
)
active = models.BooleanField(
default=True,
help_text=ugettext_lazy("Whether the credit provider is currently enabled.")
)
display_name = models.CharField(
max_length=255,
help_text=ugettext_lazy("Name of the credit provider displayed to users")
)
enable_integration = models.BooleanField(
default=False,
help_text=ugettext_lazy(
"When true, automatically notify the credit provider "
"when a user requests credit. "
"In order for this to work, a shared secret key MUST be configured "
"for the credit provider in secure auth settings."
)
)
provider_url = models.URLField(
default="",
help_text=ugettext_lazy(
"URL of the credit provider. If automatic integration is "
"enabled, this will the the end-point that we POST to "
"to notify the provider of a credit request. Otherwise, the "
"user will be shown a link to this URL, so the user can "
"request credit from the provider directly."
)
)
provider_status_url = models.URLField(
default="",
help_text=ugettext_lazy(
"URL from the credit provider where the user can check the status "
"of his or her request for credit. This is displayed to students "
"*after* they have requested credit."
)
)
provider_description = models.TextField(
default="",
help_text=ugettext_lazy(
"Description for the credit provider displayed to users."
)
)
fulfillment_instructions = models.TextField(
null=True,
blank=True,
help_text=ugettext_lazy(
"Plain text or html content for displaying further steps on "
"receipt page *after* paying for the credit to get credit for a "
"credit course against a credit provider."
)
)
eligibility_email_message = models.TextField(
default="",
help_text=ugettext_lazy(
"Plain text or html content for displaying custom message inside "
"credit eligibility email content which is sent when user has met "
"all credit eligibility requirements."
)
)
receipt_email_message = models.TextField(
default="",
help_text=ugettext_lazy(
"Plain text or html content for displaying custom message inside "
"credit receipt email content which is sent *after* paying to get "
"credit for a credit course."
)
)
thumbnail_url = models.URLField(
default="",
max_length=255,
help_text=ugettext_lazy(
"Thumbnail image url of the credit provider."
)
)
CREDIT_PROVIDERS_CACHE_KEY = "credit.providers.list"
@classmethod
def get_credit_providers(cls, providers_list=None):
"""
Retrieve a list of all credit providers or filter on providers_list, represented
as dictionaries.
Arguments:
provider_list (list of strings or None): contains list of ids if required results
to be filtered, None for all providers.
Returns:
list of providers represented as dictionaries.
"""
# Attempt to retrieve the credit provider list from the cache if provider_list is None
# The cache key is invalidated when the provider list is updated
# (a post-save signal handler on the CreditProvider model)
# This doesn't happen very often, so we would expect a *very* high
# cache hit rate.
credit_providers = cache.get(cls.CREDIT_PROVIDERS_CACHE_KEY)
if credit_providers is None:
# Cache miss: construct the provider list and save it in the cache
credit_providers = CreditProvider.objects.filter(active=True)
credit_providers = [
{
"id": provider.provider_id,
"display_name": provider.display_name,
"url": provider.provider_url,
"status_url": provider.provider_status_url,
"description": provider.provider_description,
"enable_integration": provider.enable_integration,
"fulfillment_instructions": provider.fulfillment_instructions,
"thumbnail_url": provider.thumbnail_url,
}
for provider in credit_providers
]
cache.set(cls.CREDIT_PROVIDERS_CACHE_KEY, credit_providers)
if providers_list:
credit_providers = [provider for provider in credit_providers if provider['id'] in providers_list]
return credit_providers
@classmethod
def get_credit_provider(cls, provider_id):
"""
Retrieve a credit provider with provided 'provider_id'.
"""
try:
return CreditProvider.objects.get(active=True, provider_id=provider_id)
except cls.DoesNotExist:
return None
def __unicode__(self):
"""Unicode representation of the credit provider. """
return self.provider_id
@receiver(models.signals.post_save, sender=CreditProvider)
@receiver(models.signals.post_delete, sender=CreditProvider)
def invalidate_provider_cache(sender, **kwargs): # pylint: disable=unused-argument
"""Invalidate the cache of credit providers. """
cache.delete(CreditProvider.CREDIT_PROVIDERS_CACHE_KEY)
class CreditCourse(models.Model):
"""
Model for tracking a credit course.
"""
course_key = CourseKeyField(max_length=255, db_index=True, unique=True)
enabled = models.BooleanField(default=False)
CREDIT_COURSES_CACHE_KEY = "credit.courses.set"
@classmethod
def is_credit_course(cls, course_key):
"""
Check whether the course has been configured for credit.
Args:
course_key (CourseKey): Identifier of the course.
Returns:
bool: True iff this is a credit course.
"""
credit_courses = cache.get(cls.CREDIT_COURSES_CACHE_KEY)
if credit_courses is None:
credit_courses = set(
unicode(course.course_key)
for course in cls.objects.filter(enabled=True)
)
cache.set(cls.CREDIT_COURSES_CACHE_KEY, credit_courses)
return unicode(course_key) in credit_courses
@classmethod
def get_credit_course(cls, course_key):
"""
Get the credit course if exists for the given 'course_key'.
Args:
course_key(CourseKey): The course identifier
Raises:
DoesNotExist if no CreditCourse exists for the given course key.
Returns:
CreditCourse if one exists for the given course key.
"""
return cls.objects.get(course_key=course_key, enabled=True)
def __unicode__(self):
"""Unicode representation of the credit course. """
return unicode(self.course_key)
@receiver(models.signals.post_save, sender=CreditCourse)
@receiver(models.signals.post_delete, sender=CreditCourse)
def invalidate_credit_courses_cache(sender, **kwargs): # pylint: disable=unused-argument
"""Invalidate the cache of credit courses. """
cache.delete(CreditCourse.CREDIT_COURSES_CACHE_KEY)
class CreditRequirement(TimeStampedModel):
"""
This model represents a credit requirement.
Each requirement is uniquely identified by its 'namespace' and
'name' fields.
The 'name' field stores the unique name or location (in case of XBlock)
for a requirement, which serves as the unique identifier for that
requirement.
The 'display_name' field stores the display name of the requirement.
The 'criteria' field dictionary provides additional information, clients
may need to determine whether a user has satisfied the requirement.
"""
course = models.ForeignKey(CreditCourse, related_name="credit_requirements")
namespace = models.CharField(max_length=255)
name = models.CharField(max_length=255)
display_name = models.CharField(max_length=255, default="")
order = models.PositiveIntegerField(default=0)
criteria = JSONField()
active = models.BooleanField(default=True)
class Meta(object):
"""
Model metadata.
"""
unique_together = ('namespace', 'name', 'course')
ordering = ["order"]
@classmethod
def add_or_update_course_requirement(cls, credit_course, requirement, order):
"""
Add requirement to a given course.
Args:
credit_course(CreditCourse): The identifier for credit course
requirement(dict): Requirement dict to be added
Returns:
(CreditRequirement, created) tuple
"""
credit_requirement, created = cls.objects.get_or_create(
course=credit_course,
namespace=requirement["namespace"],
name=requirement["name"],
defaults={
"display_name": requirement["display_name"],
"criteria": requirement["criteria"],
"order": order,
"active": True
}
)
if not created:
credit_requirement.criteria = requirement["criteria"]
credit_requirement.active = True
credit_requirement.order = order
credit_requirement.display_name = requirement["display_name"]
credit_requirement.save()
return credit_requirement, created
@classmethod
def get_course_requirements(cls, course_key, namespace=None, name=None):
"""
Get credit requirements of a given course.
Args:
course_key (CourseKey): The identifier for a course
Keyword Arguments
namespace (str): Optionally filter credit requirements by namespace.
name (str): Optionally filter credit requirements by name.
Returns:
QuerySet of CreditRequirement model
"""
# order credit requirements according to their appearance in courseware
requirements = CreditRequirement.objects.filter(course__course_key=course_key, active=True)
if namespace is not None:
requirements = requirements.filter(namespace=namespace)
if name is not None:
requirements = requirements.filter(name=name)
return requirements
@classmethod
def disable_credit_requirements(cls, requirement_ids):
"""
Mark the given requirements inactive.
Args:
requirement_ids(list): List of ids
Returns:
None
"""
cls.objects.filter(id__in=requirement_ids).update(active=False)
@classmethod
def get_course_requirement(cls, course_key, namespace, name):
"""
Get credit requirement of a given course.
Args:
course_key(CourseKey): The identifier for a course
namespace(str): Namespace of credit course requirements
name(str): Name of credit course requirement
Returns:
CreditRequirement object if exists
"""
try:
return cls.objects.get(
course__course_key=course_key, active=True, namespace=namespace, name=name
)
except cls.DoesNotExist:
return None
class CreditRequirementStatus(TimeStampedModel):
"""
This model represents the status of each requirement.
For a particular credit requirement, a user can either:
1) Have satisfied the requirement (example: approved in-course reverification)
2) Have failed the requirement (example: denied in-course reverification)
3) Neither satisfied nor failed (example: the user hasn't yet attempted in-course reverification).
Cases (1) and (2) are represented by having a CreditRequirementStatus with
the status set to "satisfied" or "failed", respectively.
In case (3), no CreditRequirementStatus record will exist for the requirement and user.
"""
REQUIREMENT_STATUS_CHOICES = (
("satisfied", "satisfied"),
("failed", "failed"),
)
username = models.CharField(max_length=255, db_index=True)
requirement = models.ForeignKey(CreditRequirement, related_name="statuses")
status = models.CharField(max_length=32, choices=REQUIREMENT_STATUS_CHOICES)
# Include additional information about why the user satisfied or failed
# the requirement. This is specific to the type of requirement.
# For example, the minimum grade requirement might record the user's
# final grade when the user completes the course. This allows us to display
# the grade to users later and to send the information to credit providers.
reason = JSONField(default={})
# Maintain a history of requirement status updates for auditing purposes
history = HistoricalRecords()
class Meta(object): # pylint: disable=missing-docstring
unique_together = ('username', 'requirement')
@classmethod
def get_statuses(cls, requirements, username):
"""
Get credit requirement statuses of given requirement and username
Args:
requirement(CreditRequirement): The identifier for a requirement
username(str): username of the user
Returns:
Queryset 'CreditRequirementStatus' objects
"""
return cls.objects.filter(requirement__in=requirements, username=username)
@classmethod
@transaction.commit_on_success
def add_or_update_requirement_status(cls, username, requirement, status="satisfied", reason=None):
"""
Add credit requirement status for given username.
Args:
username(str): Username of the user
requirement(CreditRequirement): 'CreditRequirement' object
status(str): Status of the requirement
reason(dict): Reason of the status
"""
requirement_status, created = cls.objects.get_or_create(
username=username,
requirement=requirement,
defaults={"reason": reason, "status": status}
)
if not created:
requirement_status.status = status
requirement_status.reason = reason if reason else {}
requirement_status.save()
class CreditEligibility(TimeStampedModel):
"""
A record of a user's eligibility for credit from a specific credit
provider for a specific course.
"""
username = models.CharField(max_length=255, db_index=True)
course = models.ForeignKey(CreditCourse, related_name="eligibilities")
# Deadline for when credit eligibility will expire.
# Once eligibility expires, users will no longer be able to purchase
# or request credit.
# We save the deadline as a database field just in case
# we need to override the deadline for particular students.
deadline = models.DateTimeField(
default=lambda: (
datetime.datetime.now(pytz.UTC) + datetime.timedelta(
days=getattr(settings, "CREDIT_ELIGIBILITY_EXPIRATION_DAYS", 365)
)
),
help_text=ugettext_lazy("Deadline for purchasing and requesting credit.")
)
class Meta(object): # pylint: disable=missing-docstring
unique_together = ('username', 'course')
verbose_name_plural = "Credit eligibilities"
@classmethod
def update_eligibility(cls, requirements, username, course_key):
"""
Update the user's credit eligibility for a course.
A user is eligible for credit when the user has satisfied
all requirements for credit in the course.
Arguments:
requirements (Queryset): Queryset of `CreditRequirement`s to check.
username (str): Identifier of the user being updated.
course_key (CourseKey): Identifier of the course.
Returns: tuple
"""
# Check all requirements for the course to determine if the user
# is eligible. We need to check all the *requirements*
# (not just the *statuses*) in case the user doesn't yet have
# a status for a particular requirement.
status_by_req = defaultdict(lambda: False)
for status in CreditRequirementStatus.get_statuses(requirements, username):
status_by_req[status.requirement.id] = status.status
is_eligible = all(status_by_req[req.id] == "satisfied" for req in requirements)
# If we're eligible, then mark the user as being eligible for credit.
if is_eligible:
try:
CreditEligibility.objects.create(
username=username,
course=CreditCourse.objects.get(course_key=course_key),
)
return is_eligible, True
except IntegrityError:
return is_eligible, False
else:
return is_eligible, False
@classmethod
def get_user_eligibilities(cls, username):
"""
Returns the eligibilities of given user.
Args:
username(str): Username of the user
Returns:
CreditEligibility queryset for the user
"""
return cls.objects.filter(
username=username,
course__enabled=True,
deadline__gt=datetime.datetime.now(pytz.UTC)
).select_related('course')
@classmethod
def is_user_eligible_for_credit(cls, course_key, username):
"""
Check if the given user is eligible for the provided credit course
Args:
course_key(CourseKey): The course identifier
username(str): The username of the user
Returns:
Bool True if the user eligible for credit course else False
"""
return cls.objects.filter(
course__course_key=course_key,
course__enabled=True,
username=username,
deadline__gt=datetime.datetime.now(pytz.UTC),
).exists()
def __unicode__(self):
"""Unicode representation of the credit eligibility. """
return u"{user}, {course}".format(
user=self.username,
course=self.course.course_key,
)
class CreditRequest(TimeStampedModel):
"""
A request for credit from a particular credit provider.
When a user initiates a request for credit, a CreditRequest record will be created.
Each CreditRequest is assigned a unique identifier so we can find it when the request
is approved by the provider. The CreditRequest record stores the parameters to be sent
at the time the request is made. If the user re-issues the request
(perhaps because the user did not finish filling in forms on the credit provider's site),
the request record will be updated, but the UUID will remain the same.
"""
uuid = models.CharField(max_length=32, unique=True, db_index=True)
username = models.CharField(max_length=255, db_index=True)
course = models.ForeignKey(CreditCourse, related_name="credit_requests")
provider = models.ForeignKey(CreditProvider, related_name="credit_requests")
parameters = JSONField()
REQUEST_STATUS_PENDING = "pending"
REQUEST_STATUS_APPROVED = "approved"
REQUEST_STATUS_REJECTED = "rejected"
REQUEST_STATUS_CHOICES = (
(REQUEST_STATUS_PENDING, "Pending"),
(REQUEST_STATUS_APPROVED, "Approved"),
(REQUEST_STATUS_REJECTED, "Rejected"),
)
status = models.CharField(
max_length=255,
choices=REQUEST_STATUS_CHOICES,
default=REQUEST_STATUS_PENDING
)
history = HistoricalRecords()
class Meta(object): # pylint: disable=missing-docstring
# Enforce the constraint that each user can have exactly one outstanding
# request to a given provider. Multiple requests use the same UUID.
unique_together = ('username', 'course', 'provider')
get_latest_by = 'created'
@classmethod
def credit_requests_for_user(cls, username):
"""
Retrieve all credit requests for a user.
Arguments:
username (unicode): The username of the user.
Returns: list
Example Usage:
>>> CreditRequest.credit_requests_for_user("bob")
[
{
"uuid": "557168d0f7664fe59097106c67c3f847",
"timestamp": 1434631630,
"course_key": "course-v1:HogwartsX+Potions101+1T2015",
"provider": {
"id": "HogwartsX",
"display_name": "Hogwarts School of Witchcraft and Wizardry",
},
"status": "pending" # or "approved" or "rejected"
}
]
"""
return [
{
"uuid": request.uuid,
"timestamp": request.parameters.get("timestamp"),
"course_key": request.course.course_key,
"provider": {
"id": request.provider.provider_id,
"display_name": request.provider.display_name
},
"status": request.status
}
for request in cls.objects.select_related('course', 'provider').filter(username=username)
]
@classmethod
def get_user_request_status(cls, username, course_key):
"""
Returns the latest credit request of user against the given course.
Args:
username(str): The username of requesting user
course_key(CourseKey): The course identifier
Returns:
CreditRequest if any otherwise None
"""
try:
return cls.objects.filter(
username=username, course__course_key=course_key
).select_related('course', 'provider').latest()
except cls.DoesNotExist:
return None
def __unicode__(self):
"""Unicode representation of a credit request."""
return u"{course}, {provider}, {status}".format(
course=self.course.course_key,
provider=self.provider.provider_id, # pylint: disable=no-member
status=self.status,
)
|
tiagochiavericosta/edx-platform
|
openedx/core/djangoapps/credit/models.py
|
Python
|
agpl-3.0
| 24,095
|
# -*- coding: utf-8 -*-
"""
flaskext.babel
~~~~~~~~~~~~~~
Implements i18n/l10n support for Flask applications based on Babel.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import os
# this is a workaround for a snow leopard bug that babel does not
# work around :)
if os.environ.get('LC_CTYPE', '').lower() == 'utf-8':
os.environ['LC_CTYPE'] = 'en_US.utf-8'
from datetime import datetime
from flask import _request_ctx_stack
from babel import dates, numbers, support, Locale
from werkzeug import ImmutableDict
try:
from pytz.gae import pytz
except ImportError:
from pytz import timezone, UTC
else:
timezone = pytz.timezone
UTC = pytz.UTC
class Babel(object):
"""Central controller class that can be used to configure how
Flask-Babel behaves. Each application that wants to use Flask-Babel
has to create, or run :meth:`init_app` on, an instance of this class
after the configuration was initialized.
"""
default_date_formats = ImmutableDict({
'time': 'medium',
'date': 'medium',
'datetime': 'medium',
'time.short': None,
'time.medium': None,
'time.full': None,
'time.long': None,
'date.short': None,
'date.medium': None,
'date.full': None,
'date.long': None,
'datetime.short': None,
'datetime.medium': None,
'datetime.full': None,
'datetime.long': None,
})
def __init__(self, app=None, default_locale='en', default_timezone='UTC',
date_formats=None, configure_jinja=True):
self._default_locale = default_locale
self._default_timezone = default_timezone
self._date_formats = date_formats
self._configure_jinja = configure_jinja
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
"""Set up this instance for use with *app*, if no app was passed to
the constructor.
"""
self.app = app
app.babel_instance = self
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['babel'] = self
app.config.setdefault('BABEL_DEFAULT_LOCALE', self._default_locale)
app.config.setdefault('BABEL_DEFAULT_TIMEZONE', self._default_timezone)
if self._date_formats is None:
self._date_formats = self.default_date_formats.copy()
#: a mapping of Babel datetime format strings that can be modified
#: to change the defaults. If you invoke :func:`format_datetime`
#: and do not provide any format string Flask-Babel will do the
#: following things:
#:
#: 1. look up ``date_formats['datetime']``. By default ``'medium'``
#: is returned to enforce medium length datetime formats.
#: 2. ``date_formats['datetime.medium'] (if ``'medium'`` was
#: returned in step one) is looked up. If the return value
#: is anything but `None` this is used as new format string.
#: otherwise the default for that language is used.
self.date_formats = self._date_formats
self.locale_selector_func = None
self.timezone_selector_func = None
if self._configure_jinja:
app.jinja_env.filters.update(
datetimeformat=format_datetime,
dateformat=format_date,
timeformat=format_time,
timedeltaformat=format_timedelta,
numberformat=format_number,
decimalformat=format_decimal,
currencyformat=format_currency,
percentformat=format_percent,
scientificformat=format_scientific,
)
app.jinja_env.add_extension('jinja2.ext.i18n')
app.jinja_env.install_gettext_callables(
lambda x: get_translations().ugettext(x),
lambda s, p, n: get_translations().ungettext(s, p, n),
newstyle=True
)
def localeselector(self, f):
"""Registers a callback function for locale selection. The default
behaves as if a function was registered that returns `None` all the
time. If `None` is returned, the locale falls back to the one from
the configuration.
This has to return the locale as string (eg: ``'de_AT'``, ''`en_US`'')
"""
assert self.locale_selector_func is None, \
'a localeselector function is already registered'
self.locale_selector_func = f
return f
def timezoneselector(self, f):
"""Registers a callback function for timezone selection. The default
behaves as if a function was registered that returns `None` all the
time. If `None` is returned, the timezone falls back to the one from
the configuration.
This has to return the timezone as string (eg: ``'Europe/Vienna'``)
"""
assert self.timezone_selector_func is None, \
'a timezoneselector function is already registered'
self.timezone_selector_func = f
return f
def list_translations(self):
"""Returns a list of all the locales translations exist for. The
list returned will be filled with actual locale objects and not just
strings.
.. versionadded:: 0.6
"""
dirname = os.path.join(self.app.root_path, 'translations')
if not os.path.isdir(dirname):
return []
result = []
for folder in os.listdir(dirname):
locale_dir = os.path.join(dirname, folder, 'LC_MESSAGES')
if not os.path.isdir(locale_dir):
continue
if filter(lambda x: x.endswith('.mo'), os.listdir(locale_dir)):
result.append(Locale.parse(folder))
if not result:
result.append(Locale.parse(self._default_locale))
return result
@property
def default_locale(self):
"""The default locale from the configuration as instance of a
`babel.Locale` object.
"""
return Locale.parse(self.app.config['BABEL_DEFAULT_LOCALE'])
@property
def default_timezone(self):
"""The default timezone from the configuration as instance of a
`pytz.timezone` object.
"""
return timezone(self.app.config['BABEL_DEFAULT_TIMEZONE'])
def get_translations():
"""Returns the correct gettext translations that should be used for
this request. This will never fail and return a dummy translation
object if used outside of the request or if a translation cannot be
found.
"""
ctx = _request_ctx_stack.top
if ctx is None:
return None
translations = getattr(ctx, 'babel_translations', None)
if translations is None:
dirname = os.path.join(ctx.app.root_path, 'translations')
translations = support.Translations.load(dirname, [get_locale()])
ctx.babel_translations = translations
return translations
def get_locale():
"""Returns the locale that should be used for this request as
`babel.Locale` object. This returns `None` if used outside of
a request.
"""
ctx = _request_ctx_stack.top
if ctx is None:
return None
locale = getattr(ctx, 'babel_locale', None)
if locale is None:
babel = ctx.app.extensions['babel']
if babel.locale_selector_func is None:
locale = babel.default_locale
else:
rv = babel.locale_selector_func()
if rv is None:
locale = babel.default_locale
else:
locale = Locale.parse(rv)
ctx.babel_locale = locale
return locale
def get_timezone():
"""Returns the timezone that should be used for this request as
`pytz.timezone` object. This returns `None` if used outside of
a request.
"""
ctx = _request_ctx_stack.top
tzinfo = getattr(ctx, 'babel_tzinfo', None)
if tzinfo is None:
babel = ctx.app.extensions['babel']
if babel.timezone_selector_func is None:
tzinfo = babel.default_timezone
else:
rv = babel.timezone_selector_func()
if rv is None:
tzinfo = babel.default_timezone
else:
if isinstance(rv, basestring):
tzinfo = timezone(rv)
else:
tzinfo = rv
ctx.babel_tzinfo = tzinfo
return tzinfo
def refresh():
"""Refreshes the cached timezones and locale information. This can
be used to switch a translation between a request and if you want
the changes to take place immediately, not just with the next request::
user.timezone = request.form['timezone']
user.locale = request.form['locale']
refresh()
flash(gettext('Language was changed'))
Without that refresh, the :func:`~flask.flash` function would probably
return English text and a now German page.
"""
ctx = _request_ctx_stack.top
for key in 'babel_locale', 'babel_tzinfo', 'babel_translations':
if hasattr(ctx, key):
delattr(ctx, key)
def _get_format(key, format):
"""A small helper for the datetime formatting functions. Looks up
format defaults for different kinds.
"""
babel = _request_ctx_stack.top.app.extensions['babel']
if format is None:
format = babel.date_formats[key]
if format in ('short', 'medium', 'full', 'long'):
rv = babel.date_formats['%s.%s' % (key, format)]
if rv is not None:
format = rv
return format
def to_user_timezone(datetime):
"""Convert a datetime object to the user's timezone. This automatically
happens on all date formatting unless rebasing is disabled. If you need
to convert a :class:`datetime.datetime` object at any time to the user's
timezone (as returned by :func:`get_timezone` this function can be used).
"""
if datetime.tzinfo is None:
datetime = datetime.replace(tzinfo=UTC)
tzinfo = get_timezone()
return tzinfo.normalize(datetime.astimezone(tzinfo))
def to_utc(datetime):
"""Convert a datetime object to UTC and drop tzinfo. This is the
opposite operation to :func:`to_user_timezone`.
"""
if datetime.tzinfo is None:
datetime = get_timezone().localize(datetime)
return datetime.astimezone(UTC).replace(tzinfo=None)
def format_datetime(datetime=None, format=None, rebase=True):
"""Return a date formatted according to the given pattern. If no
:class:`~datetime.datetime` object is passed, the current time is
assumed. By default rebasing happens which causes the object to
be converted to the users's timezone (as returned by
:func:`to_user_timezone`). This function formats both date and
time.
The format parameter can either be ``'short'``, ``'medium'``,
``'long'`` or ``'full'`` (in which cause the language's default for
that setting is used, or the default from the :attr:`Babel.date_formats`
mapping is used) or a format string as documented by Babel.
This function is also available in the template context as filter
named `datetimeformat`.
"""
format = _get_format('datetime', format)
return _date_format(dates.format_datetime, datetime, format, rebase)
def format_date(date=None, format=None, rebase=True):
"""Return a date formatted according to the given pattern. If no
:class:`~datetime.datetime` or :class:`~datetime.date` object is passed,
the current time is assumed. By default rebasing happens which causes
the object to be converted to the users's timezone (as returned by
:func:`to_user_timezone`). This function only formats the date part
of a :class:`~datetime.datetime` object.
The format parameter can either be ``'short'``, ``'medium'``,
``'long'`` or ``'full'`` (in which cause the language's default for
that setting is used, or the default from the :attr:`Babel.date_formats`
mapping is used) or a format string as documented by Babel.
This function is also available in the template context as filter
named `dateformat`.
"""
if rebase and isinstance(date, datetime):
date = to_user_timezone(date)
format = _get_format('date', format)
return _date_format(dates.format_date, date, format, rebase)
def format_time(time=None, format=None, rebase=True):
"""Return a time formatted according to the given pattern. If no
:class:`~datetime.datetime` object is passed, the current time is
assumed. By default rebasing happens which causes the object to
be converted to the users's timezone (as returned by
:func:`to_user_timezone`). This function formats both date and
time.
The format parameter can either be ``'short'``, ``'medium'``,
``'long'`` or ``'full'`` (in which cause the language's default for
that setting is used, or the default from the :attr:`Babel.date_formats`
mapping is used) or a format string as documented by Babel.
This function is also available in the template context as filter
named `timeformat`.
"""
format = _get_format('time', format)
return _date_format(dates.format_time, time, format, rebase)
def format_timedelta(datetime_or_timedelta, granularity='second'):
"""Format the elapsed time from the given date to now or the given
timedelta. This currently requires an unreleased development
version of Babel.
This function is also available in the template context as filter
named `timedeltaformat`.
"""
if isinstance(datetime_or_timedelta, datetime):
datetime_or_timedelta = datetime.utcnow() - datetime_or_timedelta
return dates.format_timedelta(datetime_or_timedelta, granularity,
locale=get_locale())
def _date_format(formatter, obj, format, rebase, **extra):
"""Internal helper that formats the date."""
locale = get_locale()
extra = {}
if formatter is not dates.format_date and rebase:
extra['tzinfo'] = get_timezone()
return formatter(obj, format, locale=locale, **extra)
def format_number(number):
"""Return the given number formatted for the locale in request
:param number: the number to format
:return: the formatted number
:rtype: unicode
"""
locale = get_locale()
return numbers.format_number(number, locale=locale)
def format_decimal(number, format=None):
"""Return the given decimal number formatted for the locale in request
:param number: the number to format
:param format: the format to use
:return: the formatted number
:rtype: unicode
"""
locale = get_locale()
return numbers.format_decimal(number, format=format, locale=locale)
def format_currency(number, currency, format=None):
"""Return the given number formatted for the locale in request
:param number: the number to format
:param currency: the currency code
:param format: the format to use
:return: the formatted number
:rtype: unicode
"""
locale = get_locale()
return numbers.format_currency(
number, currency, format=format, locale=locale
)
def format_percent(number, format=None):
"""Return formatted percent value for the locale in request
:param number: the number to format
:param format: the format to use
:return: the formatted percent number
:rtype: unicode
"""
locale = get_locale()
return numbers.format_percent(number, format=format, locale=locale)
def format_scientific(number, format=None):
"""Return value formatted in scientific notation for the locale in request
:param number: the number to format
:param format: the format to use
:return: the formatted percent number
:rtype: unicode
"""
locale = get_locale()
return numbers.format_scientific(number, format=format, locale=locale)
def gettext(string, **variables):
"""Translates a string with the current locale and passes in the
given keyword arguments as mapping to a string formatting string.
::
gettext(u'Hello World!')
gettext(u'Hello %(name)s!', name='World')
"""
t = get_translations()
if t is None:
return string % variables
return t.ugettext(string) % variables
_ = gettext
def ngettext(singular, plural, num, **variables):
"""Translates a string with the current locale and passes in the
given keyword arguments as mapping to a string formatting string.
The `num` parameter is used to dispatch between singular and various
plural forms of the message. It is available in the format string
as ``%(num)d`` or ``%(num)s``. The source language should be
English or a similar language which only has one plural form.
::
ngettext(u'%(num)d Apple', u'%(num)d Apples', num=len(apples))
"""
variables.setdefault('num', num)
t = get_translations()
if t is None:
return (singular if num == 1 else plural) % variables
return t.ungettext(singular, plural, num) % variables
def pgettext(context, string, **variables):
"""Like :func:`gettext` but with a context.
.. versionadded:: 0.7
"""
t = get_translations()
if t is None:
return string % variables
return t.upgettext(context, string) % variables
def npgettext(context, singular, plural, num, **variables):
"""Like :func:`ngettext` but with a context.
.. versionadded:: 0.7
"""
variables.setdefault('num', num)
t = get_translations()
if t is None:
return (singular if num == 1 else plural) % variables
return t.unpgettext(context, singular, plural, num) % variables
def lazy_gettext(string, **variables):
"""Like :func:`gettext` but the string returned is lazy which means
it will be translated when it is used as an actual string.
Example::
hello = lazy_gettext(u'Hello World')
@app.route('/')
def index():
return unicode(hello)
"""
from speaklater import make_lazy_string
return make_lazy_string(gettext, string, **variables)
def lazy_pgettext(context, string, **variables):
"""Like :func:`pgettext` but the string returned is lazy which means
it will be translated when it is used as an actual string.
.. versionadded:: 0.7
"""
from speaklater import make_lazy_string
return make_lazy_string(pgettext, context, string, **variables)
|
SohKai/ChronoLogger
|
web/flask/lib/python2.7/site-packages/flaskext/babel.py
|
Python
|
mit
| 18,730
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
from pyspark import pandas as ps
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.pandas.missing.window import (
MissingPandasLikeExpanding,
MissingPandasLikeRolling,
MissingPandasLikeExpandingGroupby,
MissingPandasLikeRollingGroupby,
)
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
class ExpandingRollingTest(PandasOnSparkTestCase, TestUtils):
def test_missing(self):
psdf = ps.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9]})
# Expanding functions
missing_functions = inspect.getmembers(MissingPandasLikeExpanding, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.expanding(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.a.expanding(1), name)() # Series
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Expanding.*{}.*is deprecated".format(name)
):
getattr(psdf.expanding(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Expanding.*{}.*is deprecated".format(name)
):
getattr(psdf.a.expanding(1), name)() # Series
# Rolling functions
missing_functions = inspect.getmembers(MissingPandasLikeRolling, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.a.rolling(1), name)() # Series
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Rolling.*{}.*is deprecated".format(name)
):
getattr(psdf.rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Rolling.*{}.*is deprecated".format(name)
):
getattr(psdf.a.rolling(1), name)() # Series
# Expanding properties
missing_properties = inspect.getmembers(
MissingPandasLikeExpanding, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.expanding(1), name) # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.a.expanding(1), name) # Series
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Expanding.*{}.*is deprecated".format(name)
):
getattr(psdf.expanding(1), name) # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Expanding.*{}.*is deprecated".format(name)
):
getattr(psdf.a.expanding(1), name) # Series
# Rolling properties
missing_properties = inspect.getmembers(
MissingPandasLikeRolling, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.a.rolling(1), name)() # Series
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Rolling.*{}.*is deprecated".format(name)
):
getattr(psdf.rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Rolling.*{}.*is deprecated".format(name)
):
getattr(psdf.a.rolling(1), name)() # Series
def test_missing_groupby(self):
psdf = ps.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9]})
# Expanding functions
missing_functions = inspect.getmembers(
MissingPandasLikeExpandingGroupby, inspect.isfunction
)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.groupby("a").expanding(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.a.groupby(psdf.a).expanding(1), name)() # Series
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Expanding.*{}.*is deprecated".format(name)
):
getattr(psdf.groupby("a").expanding(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Expanding.*{}.*is deprecated".format(name)
):
getattr(psdf.a.groupby(psdf.a).expanding(1), name)() # Series
# Rolling functions
missing_functions = inspect.getmembers(MissingPandasLikeRollingGroupby, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.groupby("a").rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.a.groupby(psdf.a).rolling(1), name)() # Series
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Rolling.*{}.*is deprecated".format(name)
):
getattr(psdf.rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Rolling.*{}.*is deprecated".format(name)
):
getattr(psdf.a.rolling(1), name)() # Series
# Expanding properties
missing_properties = inspect.getmembers(
MissingPandasLikeExpandingGroupby, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.groupby("a").expanding(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.a.groupby(psdf.a).expanding(1), name)() # Series
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Expanding.*{}.*is deprecated".format(name)
):
getattr(psdf.expanding(1), name) # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Expanding.*{}.*is deprecated".format(name)
):
getattr(psdf.a.expanding(1), name) # Series
# Rolling properties
missing_properties = inspect.getmembers(
MissingPandasLikeRollingGroupby, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.groupby("a").rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.a.groupby(psdf.a).rolling(1), name)() # Series
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Rolling.*{}.*is deprecated".format(name)
):
getattr(psdf.rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Rolling.*{}.*is deprecated".format(name)
):
getattr(psdf.a.rolling(1), name)() # Series
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_window import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
ueshin/apache-spark
|
python/pyspark/pandas/tests/test_window.py
|
Python
|
apache-2.0
| 13,671
|
#!/usr/bin/env python
# normalDate.py - version 1.0 - 20000717
#hacked by Robin Becker 10/Apr/2001
#major changes include
# using Types instead of type(0) etc
# BusinessDate class
# __radd__, __rsub__ methods
# formatMS stuff
# derived from an original version created
# by Jeff Bauer of Rubicon Research and used
# with his kind permission
__version__=''' $Id$ '''
__doc__="Jeff Bauer's lightweight date class, extended by us. Predates Python's datetime module."
_bigBangScalar = -4345732 # based on (-9999, 1, 1) BC/BCE minimum
_bigCrunchScalar = 2958463 # based on (9999,12,31) AD/CE maximum
_daysInMonthNormal = [31,28,31,30,31,30,31,31,30,31,30,31]
_daysInMonthLeapYear = [31,29,31,30,31,30,31,31,30,31,30,31]
_dayOfWeekName = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday']
_monthName = ['January', 'February', 'March', 'April', 'May', 'June',
'July','August','September','October','November','December']
import string, re, time, datetime
if hasattr(time,'struct_time'):
_DateSeqTypes = (list,tuple,time.struct_time)
else:
_DateSeqTypes = (list,tuple)
_fmtPat = re.compile('\\{(m{1,5}|yyyy|yy|d{1,4})\\}',re.MULTILINE|re.IGNORECASE)
_iso_re = re.compile(r'(\d\d\d\d|\d\d)-(\d\d)-(\d\d)')
def getStdMonthNames():
return list(map(string.lower,_monthName))
def getStdShortMonthNames():
return [x[:3] for x in getStdMonthNames()]
def getStdDayNames():
return list(map(string.lower,_dayOfWeekName))
def getStdShortDayNames():
return [x[:3] for x in getStdDayNames()]
def isLeapYear(year):
"""determine if specified year is leap year, returns Python boolean"""
if year < 1600:
if year % 4:
return 0
else:
return 1
elif year % 4 != 0:
return 0
elif year % 100 != 0:
return 1
elif year % 400 != 0:
return 0
else:
return 1
class NormalDateException(Exception):
"""Exception class for NormalDate"""
pass
class NormalDate:
"""
NormalDate is a specialized class to handle dates without
all the excess baggage (time zones, daylight savings, leap
seconds, etc.) of other date structures. The minimalist
strategy greatly simplifies its implementation and use.
Internally, NormalDate is stored as an integer with values
in a discontinuous range of -99990101 to 99991231. The
integer value is used principally for storage and to simplify
the user interface. Internal calculations are performed by
a scalar based on Jan 1, 1900.
Valid NormalDate ranges include (-9999,1,1) B.C.E. through
(9999,12,31) C.E./A.D.
1.0
No changes, except the version number. After 3 years of use by
various parties I think we can consider it stable.
0.8
Added Prof. Stephen Walton's suggestion for a range method
- module author resisted the temptation to use lambda <0.5 wink>
0.7
Added Dan Winkler's suggestions for __add__, __sub__ methods
0.6
Modifications suggested by Kevin Digweed to fix:
- dayOfWeek, dayOfWeekAbbrev, clone methods
- Permit NormalDate to be a better behaved superclass
0.5
Minor tweaking
0.4
- Added methods __cmp__, __hash__
- Added Epoch variable, scoped to the module
- Added setDay, setMonth, setYear methods
0.3
Minor touch-ups
0.2
- Fixed bug for certain B.C.E leap years
- Added Jim Fulton's suggestions for short alias class name =ND
and __getstate__, __setstate__ methods
Special thanks: Roedy Green
"""
def __init__(self, normalDate=None):
"""
Accept 1 of 4 values to initialize a NormalDate:
1. None - creates a NormalDate for the current day
2. integer in yyyymmdd format
3. string in yyyymmdd format
4. tuple in (yyyy, mm, dd) - localtime/gmtime can also be used
"""
if normalDate is None:
self.setNormalDate(time.localtime(time.time()))
else:
self.setNormalDate(normalDate)
def add(self, days):
"""add days to date; use negative integers to subtract"""
if not isinstance(days,int):
raise NormalDateException( \
'add method parameter must be integer type')
self.normalize(self.scalar() + days)
def __add__(self, days):
"""add integer to normalDate and return a new, calculated value"""
if not isinstance(days,int):
raise NormalDateException( \
'__add__ parameter must be integer type')
cloned = self.clone()
cloned.add(days)
return cloned
def __radd__(self,days):
'''for completeness'''
return self.__add__(days)
def clone(self):
"""return a cloned instance of this normalDate"""
return self.__class__(self.normalDate)
def __cmp__(self, target):
if target is None:
return 1
elif not hasattr(target, 'normalDate'):
return 1
else:
return cmp(self.normalDate, target.normalDate)
def day(self):
"""return the day as integer 1-31"""
return int(repr(self.normalDate)[-2:])
def dayOfWeek(self):
"""return integer representing day of week, Mon=0, Tue=1, etc."""
return dayOfWeek(*self.toTuple())
def dayOfWeekAbbrev(self):
"""return day of week abbreviation for current date: Mon, Tue, etc."""
return _dayOfWeekName[self.dayOfWeek()][:3]
def dayOfWeekName(self):
"""return day of week name for current date: Monday, Tuesday, etc."""
return _dayOfWeekName[self.dayOfWeek()]
def dayOfYear(self):
"""day of year"""
if self.isLeapYear():
daysByMonth = _daysInMonthLeapYear
else:
daysByMonth = _daysInMonthNormal
priorMonthDays = 0
for m in range(self.month() - 1):
priorMonthDays = priorMonthDays + daysByMonth[m]
return self.day() + priorMonthDays
def daysBetweenDates(self, normalDate):
"""
return value may be negative, since calculation is
self.scalar() - arg
"""
if isinstance(normalDate,NormalDate):
return self.scalar() - normalDate.scalar()
else:
return self.scalar() - NormalDate(normalDate).scalar()
def equals(self, target):
if isinstance(target,NormalDate):
if target is None:
return self.normalDate is None
else:
return self.normalDate == target.normalDate
else:
return 0
def endOfMonth(self):
"""returns (cloned) last day of month"""
return self.__class__(self.__repr__()[-8:-2]+str(self.lastDayOfMonth()))
def firstDayOfMonth(self):
"""returns (cloned) first day of month"""
return self.__class__(self.__repr__()[-8:-2]+"01")
def formatUS(self):
"""return date as string in common US format: MM/DD/YY"""
d = self.__repr__()
return "%s/%s/%s" % (d[-4:-2], d[-2:], d[-6:-4])
def formatUSCentury(self):
"""return date as string in 4-digit year US format: MM/DD/YYYY"""
d = self.__repr__()
return "%s/%s/%s" % (d[-4:-2], d[-2:], d[-8:-4])
def _fmtM(self):
return str(self.month())
def _fmtMM(self):
return '%02d' % self.month()
def _fmtMMM(self):
return self.monthAbbrev()
def _fmtMMMM(self):
return self.monthName()
def _fmtMMMMM(self):
return self.monthName()[0]
def _fmtD(self):
return str(self.day())
def _fmtDD(self):
return '%02d' % self.day()
def _fmtDDD(self):
return self.dayOfWeekAbbrev()
def _fmtDDDD(self):
return self.dayOfWeekName()
def _fmtYY(self):
return '%02d' % (self.year()%100)
def _fmtYYYY(self):
return str(self.year())
def formatMS(self,fmt):
'''format like MS date using the notation
{YY} --> 2 digit year
{YYYY} --> 4 digit year
{M} --> month as digit
{MM} --> 2 digit month
{MMM} --> abbreviated month name
{MMMM} --> monthname
{MMMMM} --> first character of monthname
{D} --> day of month as digit
{DD} --> 2 digit day of month
{DDD} --> abrreviated weekday name
{DDDD} --> weekday name
'''
r = fmt[:]
f = 0
while 1:
m = _fmtPat.search(r,f)
if m:
y = getattr(self,'_fmt'+string.upper(m.group()[1:-1]))()
i, j = m.span()
r = (r[0:i] + y) + r[j:]
f = i + len(y)
else:
return r
def __getstate__(self):
"""minimize persistent storage requirements"""
return self.normalDate
def __hash__(self):
return hash(self.normalDate)
def __int__(self):
return self.normalDate
def isLeapYear(self):
"""
determine if specified year is leap year, returning true (1) or
false (0)
"""
return isLeapYear(self.year())
def _isValidNormalDate(self, normalDate):
"""checks for date validity in [-]yyyymmdd format"""
if not isinstance(normalDate,int):
return 0
if len(repr(normalDate)) > 9:
return 0
if normalDate < 0:
dateStr = "%09d" % normalDate
else:
dateStr = "%08d" % normalDate
if len(dateStr) < 8:
return 0
elif len(dateStr) == 9:
if (dateStr[0] != '-' and dateStr[0] != '+'):
return 0
year = int(dateStr[:-4])
if year < -9999 or year > 9999 or year == 0:
return 0 # note: zero (0) is not a valid year
month = int(dateStr[-4:-2])
if month < 1 or month > 12:
return 0
if isLeapYear(year):
maxDay = _daysInMonthLeapYear[month - 1]
else:
maxDay = _daysInMonthNormal[month - 1]
day = int(dateStr[-2:])
if day < 1 or day > maxDay:
return 0
if year == 1582 and month == 10 and day > 4 and day < 15:
return 0 # special case of 10 days dropped: Oct 5-14, 1582
return 1
def lastDayOfMonth(self):
"""returns last day of the month as integer 28-31"""
if self.isLeapYear():
return _daysInMonthLeapYear[self.month() - 1]
else:
return _daysInMonthNormal[self.month() - 1]
def localeFormat(self):
"""override this method to use your preferred locale format"""
return self.formatUS()
def month(self):
"""returns month as integer 1-12"""
return int(repr(self.normalDate)[-4:-2])
def monthAbbrev(self):
"""returns month as a 3-character abbreviation, i.e. Jan, Feb, etc."""
return _monthName[self.month() - 1][:3]
def monthName(self):
"""returns month name, i.e. January, February, etc."""
return _monthName[self.month() - 1]
def normalize(self, scalar):
"""convert scalar to normalDate"""
if scalar < _bigBangScalar:
msg = "normalize(%d): scalar below minimum" % \
_bigBangScalar
raise NormalDateException(msg)
if scalar > _bigCrunchScalar:
msg = "normalize(%d): scalar exceeds maximum" % \
_bigCrunchScalar
raise NormalDateException(msg)
from math import floor
if scalar >= -115860:
year = 1600 + int(floor((scalar + 109573) / 365.2425))
elif scalar >= -693597:
year = 4 + int(floor((scalar + 692502) / 365.2425))
else:
year = -4 + int(floor((scalar + 695058) / 365.2425))
days = scalar - firstDayOfYear(year) + 1
if days <= 0:
year = year - 1
days = scalar - firstDayOfYear(year) + 1
daysInYear = 365
if isLeapYear(year):
daysInYear = daysInYear + 1
if days > daysInYear:
year = year + 1
days = scalar - firstDayOfYear(year) + 1
# add 10 days if between Oct 15, 1582 and Dec 31, 1582
if (scalar >= -115860 and scalar <= -115783):
days = days + 10
if isLeapYear(year):
daysByMonth = _daysInMonthLeapYear
else:
daysByMonth = _daysInMonthNormal
dc = 0; month = 12
for m in range(len(daysByMonth)):
dc = dc + daysByMonth[m]
if dc >= days:
month = m + 1
break
# add up the days in prior months
priorMonthDays = 0
for m in range(month - 1):
priorMonthDays = priorMonthDays + daysByMonth[m]
day = days - priorMonthDays
self.setNormalDate((year, month, day))
def range(self, days):
"""Return a range of normalDates as a list. Parameter
may be an int or normalDate."""
if not isinstance(days,int):
days = days - self # if not int, assume arg is normalDate type
r = []
for i in range(days):
r.append(self + i)
return r
def __repr__(self):
"""print format: [-]yyyymmdd"""
# Note: When disassembling a NormalDate string, be sure to
# count from the right, i.e. epochMonth = int(repr(Epoch)[-4:-2]),
# or the slice won't work for dates B.C.
if self.normalDate < 0:
return "%09d" % self.normalDate
else:
return "%08d" % self.normalDate
def scalar(self):
"""days since baseline date: Jan 1, 1900"""
(year, month, day) = self.toTuple()
days = firstDayOfYear(year) + day - 1
if self.isLeapYear():
for m in range(month - 1):
days = days + _daysInMonthLeapYear[m]
else:
for m in range(month - 1):
days = days + _daysInMonthNormal[m]
if year == 1582:
if month > 10 or (month == 10 and day > 4):
days = days - 10
return days
def setDay(self, day):
"""set the day of the month"""
maxDay = self.lastDayOfMonth()
if day < 1 or day > maxDay:
msg = "day is outside of range 1 to %d" % maxDay
raise NormalDateException(msg)
(y, m, d) = self.toTuple()
self.setNormalDate((y, m, day))
def setMonth(self, month):
"""set the month [1-12]"""
if month < 1 or month > 12:
raise NormalDateException('month is outside range 1 to 12')
(y, m, d) = self.toTuple()
self.setNormalDate((y, month, d))
def setNormalDate(self, normalDate):
"""
accepts date as scalar string/integer (yyyymmdd) or tuple
(year, month, day, ...)"""
if isinstance(normalDate,int):
self.normalDate = normalDate
elif isinstance(normalDate,str):
try:
self.normalDate = int(normalDate)
except:
m = _iso_re.match(normalDate)
if m:
self.setNormalDate(m.group(1)+m.group(2)+m.group(3))
else:
raise NormalDateException("unable to setNormalDate(%s)" % repr(normalDate))
elif isinstance(normalDate,_DateSeqTypes):
self.normalDate = int("%04d%02d%02d" % normalDate[:3])
elif isinstance(normalDate,NormalDate):
self.normalDate = normalDate.normalDate
elif isinstance(normalDate,(datetime.datetime,datetime.date)):
self.normalDate = (normalDate.year*100+normalDate.month)*100+normalDate.day
if not self._isValidNormalDate(self.normalDate):
raise NormalDateException("unable to setNormalDate(%s)" % repr(normalDate))
def setYear(self, year):
if year == 0:
raise NormalDateException('cannot set year to zero')
elif year < -9999:
raise NormalDateException('year cannot be less than -9999')
elif year > 9999:
raise NormalDateException('year cannot be greater than 9999')
(y, m, d) = self.toTuple()
self.setNormalDate((year, m, d))
__setstate__ = setNormalDate
def __sub__(self, v):
if isinstance(v,int):
return self.__add__(-v)
return self.scalar() - v.scalar()
def __rsub__(self,v):
if isinstance(v,int):
return NormalDate(v) - self
else:
return v.scalar() - self.scalar()
def toTuple(self):
"""return date as (year, month, day) tuple"""
return (self.year(), self.month(), self.day())
def year(self):
"""return year in yyyy format, negative values indicate B.C."""
return int(repr(self.normalDate)[:-4])
################# Utility functions #################
def bigBang():
"""return lower boundary as a NormalDate"""
return NormalDate((-9999, 1, 1))
def bigCrunch():
"""return upper boundary as a NormalDate"""
return NormalDate((9999, 12, 31))
def dayOfWeek(y, m, d):
"""return integer representing day of week, Mon=0, Tue=1, etc."""
if m == 1 or m == 2:
m = m + 12
y = y - 1
return (d + 2*m + 3*(m+1)//5 + y + y//4 - y//100 + y//400) % 7
def firstDayOfYear(year):
"""number of days to the first of the year, relative to Jan 1, 1900"""
if not isinstance(year,int):
msg = "firstDayOfYear() expected integer, got %s" % type(year)
raise NormalDateException(msg)
if year == 0:
raise NormalDateException('first day of year cannot be zero (0)')
elif year < 0: # BCE calculation
firstDay = (year * 365) + int((year - 1) / 4) - 693596
else: # CE calculation
leapAdjust = int((year + 3) / 4)
if year > 1600:
leapAdjust = leapAdjust - int((year + 99 - 1600) / 100) + \
int((year + 399 - 1600) / 400)
firstDay = year * 365 + leapAdjust - 693963
if year > 1582:
firstDay = firstDay - 10
return firstDay
def FND(d):
'''convert to ND if required'''
return isinstance(d,NormalDate) and d or ND(d)
Epoch=bigBang()
ND=NormalDate
BDEpoch=ND(15821018)
BDEpochScalar = -115857
class BusinessDate(NormalDate):
"""
Specialised NormalDate
"""
def add(self, days):
"""add days to date; use negative integers to subtract"""
if not isinstance(days,int):
raise NormalDateException('add method parameter must be integer')
self.normalize(self.scalar() + days)
def __add__(self, days):
"""add integer to BusinessDate and return a new, calculated value"""
if not isinstance(days,int):
raise NormalDateException('__add__ parameter must be integer')
cloned = self.clone()
cloned.add(days)
return cloned
def __sub__(self, v):
return isinstance(v,int) and self.__add__(-v) or self.scalar() - v.scalar()
def asNormalDate(self):
return ND(self.normalDate)
def daysBetweenDates(self, normalDate):
return self.asNormalDate.daysBetweenDates(normalDate)
def _checkDOW(self):
if self.dayOfWeek()>4: raise NormalDateException("%r isn't a business day" % self.normalDate)
def normalize(self, i):
i = int(i)
NormalDate.normalize(self,(i//5)*7+i%5+BDEpochScalar)
def scalar(self):
d = self.asNormalDate()
i = d - BDEpoch #luckily BDEpoch is a Monday so we don't have a problem
#concerning the relative weekday
return 5*(i//7) + i%7
def setNormalDate(self, normalDate):
NormalDate.setNormalDate(self,normalDate)
self._checkDOW()
if __name__ == '__main__':
today = NormalDate()
print("NormalDate test:")
print(" Today (%s) is: %s %s" % (today, today.dayOfWeekAbbrev(), today.localeFormat()))
yesterday = today - 1
print(" Yesterday was: %s %s" % (yesterday.dayOfWeekAbbrev(), yesterday.localeFormat()))
tomorrow = today + 1
print(" Tomorrow will be: %s %s" % (tomorrow.dayOfWeekAbbrev(), tomorrow.localeFormat()))
print(" Days between tomorrow and yesterday: %d" % (tomorrow - yesterday))
print(today.formatMS('{d}/{m}/{yy}'))
print(today.formatMS('{dd}/{m}/{yy}'))
print(today.formatMS('{ddd} {d}/{m}/{yy}'))
print(today.formatMS('{dddd} {d}/{m}/{yy}'))
print(today.formatMS('{d}/{mm}/{yy}'))
print(today.formatMS('{d}/{mmm}/{yy}'))
print(today.formatMS('{d}/{mmmm}/{yy}'))
print(today.formatMS('{d}/{m}/{yyyy}'))
b = BusinessDate('20010116')
print('b=',b,'b.scalar()', b.scalar())
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/python/lib/reportlab/lib/normalDate.py
|
Python
|
gpl-3.0
| 20,867
|
__author__ = 'matjaz'
|
anirudhvenkats/clowdflows
|
workflows/management/commands/__init__.py
|
Python
|
gpl-3.0
| 23
|
PRIORITY_EMAIL_NOW = 0
PRIORITY_HIGH = 1
PRIORITY_NORMAL = 3
PRIORITY_LOW = 5
RESULT_SENT = 0
RESULT_SKIPPED = 1
RESULT_FAILED = 2
PRIORITIES = {
'now': PRIORITY_EMAIL_NOW,
'high': PRIORITY_HIGH,
'normal': PRIORITY_NORMAL,
'low': PRIORITY_LOW,
}
PRIORITY_HEADER = 'X-Mail-Queue-Priority'
try:
from django.core.mail import get_connection
EMAIL_BACKEND_SUPPORT = True
except ImportError:
# Django version < 1.2
EMAIL_BACKEND_SUPPORT = False
|
mfwarren/django-mailer-2
|
django_mailer/constants.py
|
Python
|
mit
| 475
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_vmkernel_ip_config
short_description: Configure the VMkernel IP Address
description:
- Configure the VMkernel IP Address
version_added: 2.0
author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
vmk_name:
description:
- VMkernel interface name
required: True
ip_address:
description:
- IP address to assign to VMkernel interface
required: True
subnet_mask:
description:
- Subnet Mask to assign to VMkernel interface
required: True
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example command from Ansible Playbook
- name: Configure IP address on ESX host
local_action:
module: vmware_vmkernel_ip_config
hostname: esxi_hostname
username: esxi_username
password: esxi_password
vmk_name: vmk0
ip_address: 10.0.0.10
subnet_mask: 255.255.255.0
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import HAS_PYVMOMI, connect_to_api, get_all_objs, vmware_argument_spec
def configure_vmkernel_ip_address(host_system, vmk_name, ip_address, subnet_mask):
host_config_manager = host_system.configManager
host_network_system = host_config_manager.networkSystem
for vnic in host_network_system.networkConfig.vnic:
if vnic.device == vmk_name:
spec = vnic.spec
if spec.ip.ipAddress != ip_address:
spec.ip.dhcp = False
spec.ip.ipAddress = ip_address
spec.ip.subnetMask = subnet_mask
host_network_system.UpdateVirtualNic(vmk_name, spec)
return True
return False
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(vmk_name=dict(required=True, type='str'),
ip_address=dict(required=True, type='str'),
subnet_mask=dict(required=True, type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
vmk_name = module.params['vmk_name']
ip_address = module.params['ip_address']
subnet_mask = module.params['subnet_mask']
try:
content = connect_to_api(module, False)
host = get_all_objs(content, [vim.HostSystem])
if not host:
module.fail_json(msg="Unable to locate Physical Host.")
host_system = host.keys()[0]
changed = configure_vmkernel_ip_address(host_system, vmk_name, ip_address, subnet_mask)
module.exit_json(changed=changed)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(msg=method_fault.msg)
except Exception as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/cloud/vmware/vmware_vmkernel_ip_config.py
|
Python
|
bsd-3-clause
| 3,594
|
def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
_CSP = "media-src " + url1 + "; script-src 'self' 'unsafe-inline'"
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Zhang, Zhiqiang <zhiqiang.zhang@intel.com>
-->
<html>
<head>
<title>CSP Test: csp_media-src_cross-origin_audio_allowed_ext</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#media-src"/>
<meta name="flags" content=""/>
<meta charset="utf-8"/>
<script src="../resources/testharness.js"></script>
<script src="../resources/testharnessreport.js"></script>
</head>
<body>
<div id="log"></div>
<audio id="m"></audio>
<script>
var t = async_test(document.title);
var m = document.getElementById("m");
m.src = '""" + url1 + """/tests/csp/support/khronos/red-green.theora.ogv';
window.setTimeout(function() {
t.step(function() {
assert_false(m.currentSrc == "",
"audio.currentSrc should not be empty after setting src attribute");
});
t.done();
}, 0);
</script>
</body>
</html> """
|
kaixinjxq/web-testing-service
|
wts/tests/csp/csp_media-src_corss-origin_audio_allowed_ext.py
|
Python
|
bsd-3-clause
| 3,027
|
# head.py
# Copyright (C) 2008-2010 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
import commit
class Head(object):
"""
A Head is a named reference to a Commit. Every Head instance contains a name
and a Commit object.
Examples::
>>> repo = Repo("/path/to/repo")
>>> head = repo.heads[0]
>>> head.name
'master'
>>> head.commit
<git.Commit "1c09f116cbc2cb4100fb6935bb162daa4723f455">
>>> head.commit.id
'1c09f116cbc2cb4100fb6935bb162daa4723f455'
"""
def __init__(self, name, commit):
"""
Initialize a newly instanced Head
`name`
is the name of the head
`commit`
is the Commit object that the head points to
"""
self.name = name
self.commit = commit
@classmethod
def find_all(cls, repo, **kwargs):
"""
Find all Heads in the repository
`repo`
is the Repo
`kwargs`
Additional options given as keyword arguments, will be passed
to git-for-each-ref
Returns
git.Head[]
List is sorted by committerdate
"""
options = {'sort': "committerdate",
'format': "%(refname)%00%(objectname)"}
options.update(kwargs)
output = repo.git.for_each_ref("refs/heads", **options)
return cls.list_from_string(repo, output)
@classmethod
def list_from_string(cls, repo, text):
"""
Parse out head information into a list of head objects
``repo``
is the Repo
``text``
is the text output from the git-for-each-ref command
Returns
git.Head[]
"""
heads = []
for line in text.splitlines():
heads.append(cls.from_string(repo, line))
return heads
@classmethod
def from_string(cls, repo, line):
"""
Create a new Head instance from the given string.
``repo``
is the Repo
``line``
is the formatted head information
Format::
name: [a-zA-Z_/]+
<null byte>
id: [0-9A-Fa-f]{40}
Returns
git.Head
"""
full_name, ids = line.split("\x00")
if full_name.startswith('refs/heads/'):
name = full_name[len('refs/heads/'):]
else:
name = full_name
c = commit.Commit(repo, id=ids)
return Head(name, c)
def __repr__(self):
return '<git.Head "%s">' % self.name
|
biswajitsahu/kuma
|
vendor/packages/git/head.py
|
Python
|
mpl-2.0
| 2,739
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_ami_copy
short_description: copies AMI between AWS regions, return new image id
description:
- Copies AMI from a source region to a destination region. B(Since version 2.3 this module depends on boto3.)
version_added: "2.0"
options:
source_region:
description:
- The source region the AMI should be copied from.
required: true
source_image_id:
description:
- The ID of the AMI in source region that should be copied.
required: true
name:
description:
- The name of the new AMI to copy. (As of 2.3 the default is 'default', in prior versions it was 'null'.)
required: false
default: "default"
description:
description:
- An optional human-readable string describing the contents and purpose of the new AMI.
required: false
default: null
encrypted:
description:
- Whether or not the destination snapshots of the copied AMI should be encrypted.
required: false
default: null
version_added: "2.2"
kms_key_id:
description:
- KMS key id used to encrypt image. If not specified, uses default EBS Customer Master Key (CMK) for your account.
required: false
default: null
version_added: "2.2"
wait:
description:
- Wait for the copied AMI to be in state 'available' before returning.
required: false
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- How long before wait gives up, in seconds. (As of 2.3 this option is deprecated. See boto3 Waiters)
required: false
default: 1200
tags:
description:
- A hash/dictionary of tags to add to the new copied AMI; '{"key":"value"}' and '{"key":"value","key":"value"}'
required: false
default: null
author: "Amir Moulavi <amir.moulavi@gmail.com>, Tim C <defunct@defunct.io>"
extends_documentation_fragment:
- aws
- ec2
requirements:
- boto3
'''
EXAMPLES = '''
# Basic AMI Copy
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
# AMI copy wait until available
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
wait: yes
register: image_id
# Named AMI copy
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
name: My-Awesome-AMI
description: latest patch
# Tagged AMI copy
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
tags:
Name: My-Super-AMI
Patch: 1.2.3
# Encrypted AMI copy
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
encrypted: yes
# Encrypted AMI copy with specified key
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
encrypted: yes
kms_key_id: arn:aws:kms:us-east-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (boto3_conn, ec2_argument_spec, get_aws_connection_info)
try:
import boto
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
import boto3
from botocore.exceptions import ClientError, NoCredentialsError, NoRegionError, WaiterError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def copy_image(module, ec2):
"""
Copies an AMI
module : AnsibleModule object
ec2: ec2 connection object
"""
tags = module.params.get('tags')
params = {'SourceRegion': module.params.get('source_region'),
'SourceImageId': module.params.get('source_image_id'),
'Name': module.params.get('name'),
'Description': module.params.get('description'),
'Encrypted': module.params.get('encrypted'),
}
if module.params.get('kms_key_id'):
params['KmsKeyId'] = module.params.get('kms_key_id')
try:
image_id = ec2.copy_image(**params)['ImageId']
if module.params.get('wait'):
ec2.get_waiter('image_available').wait(ImageIds=[image_id])
if module.params.get('tags'):
ec2.create_tags(
Resources=[image_id],
Tags=[{'Key' : k, 'Value': v} for k,v in module.params.get('tags').items()]
)
module.exit_json(changed=True, image_id=image_id)
except WaiterError as we:
module.fail_json(msg='An error occured waiting for the image to become available. (%s)' % we.reason)
except ClientError as ce:
module.fail_json(msg=ce.message)
except NoCredentialsError:
module.fail_json(msg='Unable to authenticate, AWS credentials are invalid.')
except Exception as e:
module.fail_json(msg='Unhandled exception. (%s)' % str(e))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
source_region=dict(required=True),
source_image_id=dict(required=True),
name=dict(default='default'),
description=dict(default=''),
encrypted=dict(type='bool', default=False, required=False),
kms_key_id=dict(type='str', required=False),
wait=dict(type='bool', default=False),
wait_timeout=dict(default=1200),
tags=dict(type='dict')))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
# TODO: Check botocore version
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if HAS_BOTO3:
try:
ec2 = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url,
**aws_connect_params)
except NoRegionError:
module.fail_json(msg='AWS Region is required')
else:
module.fail_json(msg='boto3 required for this module')
copy_image(module, ec2)
if __name__ == '__main__':
main()
|
j00bar/ansible
|
lib/ansible/modules/cloud/amazon/ec2_ami_copy.py
|
Python
|
gpl-3.0
| 6,886
|
from ..models import models
class RasterModel(models.Model):
rast = models.RasterField('A Verbose Raster Name', null=True, srid=4326, spatial_index=True, blank=True)
class Meta:
required_db_features = ['supports_raster']
def __str__(self):
return str(self.id)
|
DONIKAN/django
|
tests/gis_tests/rasterapp/models.py
|
Python
|
bsd-3-clause
| 292
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_file_copy
version_added: "2.4"
short_description: Copy a file to a remote cloudengine device over SCP on HUAWEI CloudEngine switches.
description:
- Copy a file to a remote cloudengine device over SCP on HUAWEI CloudEngine switches.
author:
- Zhou Zhijin (@CloudEngine-Ansible)
notes:
- The feature must be enabled with feature scp-server.
- If the file is already present, no transfer will take place.
requirements:
- paramiko
options:
local_file:
description:
- Path to local file. Local directory must exist.
The maximum length of I(local_file) is C(4096).
required: true
remote_file:
description:
- Remote file path of the copy. Remote directories must exist.
If omitted, the name of the local file will be used.
The maximum length of I(remote_file) is C(4096).
file_system:
description:
- The remote file system of the device. If omitted,
devices that support a I(file_system) parameter will use
their default values.
File system indicates the storage medium and can be set to as follows,
1) C(flash) is root directory of the flash memory on the master MPU.
2) C(slave#flash) is root directory of the flash memory on the slave MPU.
If no slave MPU exists, this drive is unavailable.
3) C(chassis ID/slot number#flash) is root directory of the flash memory on
a device in a stack. For example, C(1/5#flash) indicates the flash memory
whose chassis ID is 1 and slot number is 5.
default: 'flash:'
'''
EXAMPLES = '''
- name: File copy test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Copy a local file to remote device"
ce_file_copy:
local_file: /usr/vrpcfg.cfg
remote_file: /vrpcfg.cfg
file_system: 'flash:'
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
transfer_result:
description: information about transfer result.
returned: always
type: string
sample: 'The local file has been successfully transferred to the device.'
local_file:
description: The path of the local file.
returned: always
type: string
sample: '/usr/work/vrpcfg.zip'
remote_file:
description: The path of the remote file.
returned: always
type: string
sample: '/vrpcfg.zip'
'''
import re
import os
import time
from xml.etree import ElementTree
from ansible.module_utils.basic import get_exception, AnsibleModule
from ansible.module_utils.network.cloudengine.ce import ce_argument_spec, run_commands, get_nc_config
try:
import paramiko
HAS_PARAMIKO = True
except ImportError:
HAS_PARAMIKO = False
try:
from scp import SCPClient
HAS_SCP = True
except ImportError:
HAS_SCP = False
CE_NC_GET_FILE_INFO = """
<filter type="subtree">
<vfm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<dirs>
<dir>
<fileName>%s</fileName>
<dirName>%s</dirName>
<DirSize></DirSize>
</dir>
</dirs>
</vfm>
</filter>
"""
CE_NC_GET_SCP_ENABLE = """
<filter type="subtree">
<sshs xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<sshServer>
<scpEnable></scpEnable>
</sshServer>
</sshs>
</filter>
"""
def get_cli_exception(exc=None):
"""Get cli exception message"""
msg = list()
if not exc:
exc = get_exception()
if exc:
errs = str(exc).split("\r\n")
for err in errs:
if not err:
continue
if "matched error in response:" in err:
continue
if " at '^' position" in err:
err = err.replace(" at '^' position", "")
if err.replace(" ", "") == "^":
continue
if len(err) > 2 and err[0] in ["<", "["] and err[-1] in [">", "]"]:
continue
if err[-1] == ".":
err = err[:-1]
if err.replace(" ", "") == "":
continue
msg.append(err)
else:
msg = ["Error: Fail to get cli exception message."]
while msg[-1][-1] == ' ':
msg[-1] = msg[-1][:-1]
if msg[-1][-1] != ".":
msg[-1] += "."
return ", ".join(msg).capitalize()
class FileCopy(object):
"""File copy function class"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# file copy parameters
self.local_file = self.module.params['local_file']
self.remote_file = self.module.params['remote_file']
self.file_system = self.module.params['file_system']
# state
self.transfer_result = None
self.changed = False
def init_module(self):
"""Init module"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def remote_file_exists(self, dst, file_system='flash:'):
"""Remote file whether exists"""
full_path = file_system + dst
file_name = os.path.basename(full_path)
file_path = os.path.dirname(full_path)
file_path = file_path + '/'
xml_str = CE_NC_GET_FILE_INFO % (file_name, file_path)
ret_xml = get_nc_config(self.module, xml_str)
if "<data/>" in ret_xml:
return False, 0
xml_str = ret_xml.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get file info
root = ElementTree.fromstring(xml_str)
topo = root.find("data/vfm/dirs/dir")
if topo is None:
return False, 0
for eles in topo:
if eles.tag in ["DirSize"]:
return True, int(eles.text.replace(',', ''))
return False, 0
def local_file_exists(self):
"""Local file whether exists"""
return os.path.isfile(self.local_file)
def enough_space(self):
"""Whether device has enough space"""
commands = list()
cmd = 'dir %s' % self.file_system
commands.append(cmd)
output = run_commands(self.module, commands)
if not output:
return True
match = re.search(r'\((.*) KB free\)', output[0])
kbytes_free = match.group(1)
kbytes_free = kbytes_free.replace(',', '')
file_size = os.path.getsize(self.local_file)
if int(kbytes_free) * 1024 > file_size:
return True
return False
def transfer_file(self, dest):
"""Begin to transfer file by scp"""
if not self.local_file_exists():
self.module.fail_json(
msg='Could not transfer file. Local file doesn\'t exist.')
if not self.enough_space():
self.module.fail_json(
msg='Could not transfer file. Not enough space on device.')
hostname = self.module.params['provider']['host']
username = self.module.params['provider']['username']
password = self.module.params['provider']['password']
port = self.module.params['provider']['port']
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=hostname, username=username, password=password, port=port)
full_remote_path = '{}{}'.format(self.file_system, dest)
scp = SCPClient(ssh.get_transport())
try:
scp.put(self.local_file, full_remote_path)
except:
time.sleep(10)
file_exists, temp_size = self.remote_file_exists(
dest, self.file_system)
file_size = os.path.getsize(self.local_file)
if file_exists and int(temp_size) == int(file_size):
pass
else:
scp.close()
self.module.fail_json(msg='Could not transfer file. There was an error '
'during transfer. Please make sure the format of '
'input parameters is right.')
scp.close()
return True
def get_scp_enable(self):
"""Get scp enable state"""
xml_str = CE_NC_GET_SCP_ENABLE
ret_xml = get_nc_config(self.module, xml_str)
if "<data/>" in ret_xml:
return False
xml_str = ret_xml.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get file info
root = ElementTree.fromstring(xml_str)
topo = root.find("data/sshs/sshServer")
if topo is None:
return False
for eles in topo:
if eles.tag in ["scpEnable"]:
return True, eles.text
return False
def work(self):
"""Excute task """
if not HAS_SCP:
self.module.fail_json(
msg="'Error: No scp package, please install it.'")
if not HAS_PARAMIKO:
self.module.fail_json(
msg="'Error: No paramiko package, please install it.'")
if self.local_file and len(self.local_file) > 4096:
self.module.fail_json(
msg="'Error: The maximum length of local_file is 4096.'")
if self.remote_file and len(self.remote_file) > 4096:
self.module.fail_json(
msg="'Error: The maximum length of remote_file is 4096.'")
retcode, cur_state = self.get_scp_enable()
if retcode and cur_state == 'Disable':
self.module.fail_json(
msg="'Error: Please ensure SCP server is enabled.'")
if not os.path.isfile(self.local_file):
self.module.fail_json(
msg="Local file {} not found".format(self.local_file))
dest = self.remote_file or ('/' + os.path.basename(self.local_file))
remote_exists, file_size = self.remote_file_exists(
dest, file_system=self.file_system)
if remote_exists and (os.path.getsize(self.local_file) != file_size):
remote_exists = False
if not remote_exists:
self.changed = True
file_exists = False
else:
file_exists = True
self.transfer_result = 'The local file already exists on the device.'
if not file_exists:
self.transfer_file(dest)
self.transfer_result = 'The local file has been successfully transferred to the device.'
if self.remote_file is None:
self.remote_file = '/' + os.path.basename(self.local_file)
self.module.exit_json(
changed=self.changed,
transfer_result=self.transfer_result,
local_file=self.local_file,
remote_file=self.remote_file,
file_system=self.file_system)
def main():
"""Main function entry"""
argument_spec = dict(
local_file=dict(required=True),
remote_file=dict(required=False),
file_system=dict(required=False, default='flash:')
)
argument_spec.update(ce_argument_spec)
filecopy_obj = FileCopy(argument_spec)
filecopy_obj.work()
if __name__ == '__main__':
main()
|
hryamzik/ansible
|
lib/ansible/modules/network/cloudengine/ce_file_copy.py
|
Python
|
gpl-3.0
| 12,627
|
import logging
from django.template import Context, Engine, Variable, VariableDoesNotExist
from django.test import SimpleTestCase, ignore_warnings
from django.utils.deprecation import RemovedInDjango21Warning
class TestHandler(logging.Handler):
def __init__(self):
super().__init__()
self.log_record = None
def emit(self, record):
self.log_record = record
class BaseTemplateLoggingTestCase(SimpleTestCase):
def setUp(self):
self.test_handler = TestHandler()
self.logger = logging.getLogger('django.template')
self.original_level = self.logger.level
self.logger.addHandler(self.test_handler)
self.logger.setLevel(self.loglevel)
def tearDown(self):
self.logger.removeHandler(self.test_handler)
self.logger.level = self.original_level
class VariableResolveLoggingTests(BaseTemplateLoggingTestCase):
loglevel = logging.DEBUG
def test_log_on_variable_does_not_exist_silent(self):
class TestObject:
class SilentDoesNotExist(Exception):
silent_variable_failure = True
@property
def template_name(self):
return "template_name"
@property
def template(self):
return Engine().from_string('')
@property
def article(self):
raise TestObject.SilentDoesNotExist("Attribute does not exist.")
def __iter__(self):
return iter(attr for attr in dir(TestObject) if attr[:2] != "__")
def __getitem__(self, item):
return self.__dict__[item]
Variable('article').resolve(TestObject())
self.assertEqual(
self.test_handler.log_record.getMessage(),
"Exception while resolving variable 'article' in template 'template_name'."
)
self.assertIsNotNone(self.test_handler.log_record.exc_info)
raised_exception = self.test_handler.log_record.exc_info[1]
self.assertEqual(str(raised_exception), 'Attribute does not exist.')
def test_log_on_variable_does_not_exist_not_silent(self):
with self.assertRaises(VariableDoesNotExist):
Variable('article.author').resolve({'article': {'section': 'News'}})
self.assertEqual(
self.test_handler.log_record.getMessage(),
"Exception while resolving variable 'author' in template 'unknown'."
)
self.assertIsNotNone(self.test_handler.log_record.exc_info)
raised_exception = self.test_handler.log_record.exc_info[1]
self.assertEqual(
str(raised_exception),
"Failed lookup for key [author] in {'section': 'News'}"
)
def test_no_log_when_variable_exists(self):
Variable('article.section').resolve({'article': {'section': 'News'}})
self.assertIsNone(self.test_handler.log_record)
class IncludeNodeLoggingTests(BaseTemplateLoggingTestCase):
loglevel = logging.WARN
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.engine = Engine(loaders=[
('django.template.loaders.locmem.Loader', {
'child': '{{ raises_exception }}',
}),
], debug=False)
def error_method():
raise IndexError("some generic exception")
cls.ctx = Context({'raises_exception': error_method})
def test_logs_exceptions_during_rendering_with_debug_disabled(self):
template = self.engine.from_string('{% include "child" %}')
template.name = 'template_name'
with ignore_warnings(category=RemovedInDjango21Warning):
self.assertEqual(template.render(self.ctx), '')
self.assertEqual(
self.test_handler.log_record.getMessage(),
"Exception raised while rendering {% include %} for template "
"'template_name'. Empty string rendered instead."
)
self.assertIsNotNone(self.test_handler.log_record.exc_info)
self.assertEqual(self.test_handler.log_record.levelno, logging.WARN)
def test_logs_exceptions_during_rendering_with_no_template_name(self):
template = self.engine.from_string('{% include "child" %}')
with ignore_warnings(category=RemovedInDjango21Warning):
self.assertEqual(template.render(self.ctx), '')
self.assertEqual(
self.test_handler.log_record.getMessage(),
"Exception raised while rendering {% include %} for template "
"'unknown'. Empty string rendered instead."
)
self.assertIsNotNone(self.test_handler.log_record.exc_info)
self.assertEqual(self.test_handler.log_record.levelno, logging.WARN)
|
tysonclugg/django
|
tests/template_tests/test_logging.py
|
Python
|
bsd-3-clause
| 4,731
|
import cbor
with open("/tmp/data.cbor", "rb") as f:
serialized = f.read()
data = cbor.loads(serialized)
print(data)
assert(data["name"] == "python-cbor")
assert(data["versions"] == ["1", "2"])
assert(data["group"]["is_a_package"] is True)
assert(data["group"]["value"] == 42)
|
masahir0y/buildroot-yamada
|
support/testing/tests/package/sample_python_cbor_dec.py
|
Python
|
gpl-2.0
| 281
|
import os as _os
__path__.append(_os.path.join(__path__[0], '..', '..', 'gen', 'ortools', 'algorithms'))
__path__.append(_os.path.join(__path__[0], '..', '..', '..', 'lib'))
|
capturePointer/or-tools
|
src/ortools/algorithms/__init__.py
|
Python
|
apache-2.0
| 174
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`LinearOperator` that wraps a [batch] matrix."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorFullMatrix"]
@tf_export("linalg.LinearOperatorFullMatrix")
class LinearOperatorFullMatrix(linear_operator.LinearOperator):
"""`LinearOperator` that wraps a [batch] matrix.
This operator wraps a [batch] matrix `A` (which is a `Tensor`) with shape
`[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `M x N` matrix.
```python
# Create a 2 x 2 linear operator.
matrix = [[1., 2.], [3., 4.]]
operator = LinearOperatorFullMatrix(matrix)
operator.to_dense()
==> [[1., 2.]
[3., 4.]]
operator.shape
==> [2, 2]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> Shape [2, 4] Tensor
# Create a [2, 3] batch of 4 x 4 linear operators.
matrix = tf.random_normal(shape=[2, 3, 4, 4])
operator = LinearOperatorFullMatrix(matrix)
```
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [M, N], with b >= 0
x.shape = [B1,...,Bb] + [N, R], with R >= 0.
```
#### Performance
`LinearOperatorFullMatrix` has exactly the same performance as would be
achieved by using standard `TensorFlow` matrix ops. Intelligent choices are
made based on the following initialization hints.
* If `dtype` is real, and `is_self_adjoint` and `is_positive_definite`, a
Cholesky factorization is used for the determinant and solve.
In all cases, suppose `operator` is a `LinearOperatorFullMatrix` of shape
`[M, N]`, and `x.shape = [N, R]`. Then
* `operator.matmul(x)` is `O(M * N * R)`.
* If `M=N`, `operator.solve(x)` is `O(N^3 * R)`.
* If `M=N`, `operator.determinant()` is `O(N^3)`.
If instead `operator` and `x` have shape `[B1,...,Bb, M, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
matrix,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorFullMatrix"):
r"""Initialize a `LinearOperatorFullMatrix`.
Args:
matrix: Shape `[B1,...,Bb, M, N]` with `b >= 0`, `M, N >= 0`.
Allowed dtypes: `float16`, `float32`, `float64`, `complex64`,
`complex128`.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
TypeError: If `diag.dtype` is not an allowed type.
"""
with ops.name_scope(name, values=[matrix]):
self._matrix = ops.convert_to_tensor(matrix, name="matrix")
self._check_matrix(self._matrix)
super(LinearOperatorFullMatrix, self).__init__(
dtype=self._matrix.dtype,
graph_parents=[self._matrix],
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
def _check_matrix(self, matrix):
"""Static check of the `matrix` argument."""
allowed_dtypes = [
dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.complex64,
dtypes.complex128,
]
matrix = ops.convert_to_tensor(matrix, name="matrix")
dtype = matrix.dtype
if dtype not in allowed_dtypes:
raise TypeError(
"Argument matrix must have dtype in %s. Found: %s"
% (allowed_dtypes, dtype))
if matrix.get_shape().ndims is not None and matrix.get_shape().ndims < 2:
raise ValueError(
"Argument matrix must have at least 2 dimensions. Found: %s"
% matrix)
def _shape(self):
return self._matrix.get_shape()
def _shape_tensor(self):
return array_ops.shape(self._matrix)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
return linear_operator_util.matmul_with_broadcast(
self._matrix, x, adjoint_a=adjoint, adjoint_b=adjoint_arg)
def _to_dense(self):
return self._matrix
|
benoitsteiner/tensorflow-xsmm
|
tensorflow/python/ops/linalg/linear_operator_full_matrix.py
|
Python
|
apache-2.0
| 6,537
|
#!/usr/bin/python
# (c) 2018-2019, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Create/Delete cluster peer relations on ONTAP
extends_documentation_fragment:
- netapp.na_ontap
module: na_ontap_cluster_peer
options:
state:
choices: ['present', 'absent']
description:
- Whether the specified cluster peer should exist or not.
default: present
source_intercluster_lifs:
description:
- List of intercluster addresses of the source cluster.
- Used as peer-addresses in destination cluster.
- All these intercluster lifs should belong to the source cluster.
version_added: "2.8"
aliases:
- source_intercluster_lif
dest_intercluster_lifs:
description:
- List of intercluster addresses of the destination cluster.
- Used as peer-addresses in source cluster.
- All these intercluster lifs should belong to the destination cluster.
version_added: "2.8"
aliases:
- dest_intercluster_lif
passphrase:
description:
- The arbitrary passphrase that matches the one given to the peer cluster.
source_cluster_name:
description:
- The name of the source cluster name in the peer relation to be deleted.
dest_cluster_name:
description:
- The name of the destination cluster name in the peer relation to be deleted.
- Required for delete
dest_hostname:
description:
- Destination cluster IP or hostname which needs to be peered
- Required to complete the peering process at destination cluster.
required: True
dest_username:
description:
- Destination username.
- Optional if this is same as source username.
dest_password:
description:
- Destination password.
- Optional if this is same as source password.
short_description: NetApp ONTAP Manage Cluster peering
version_added: "2.7"
'''
EXAMPLES = """
- name: Create cluster peer
na_ontap_cluster_peer:
state: present
source_intercluster_lifs: 1.2.3.4,1.2.3.5
dest_intercluster_lifs: 1.2.3.6,1.2.3.7
passphrase: XXXX
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
dest_hostname: "{{ dest_netapp_hostname }}"
- name: Delete cluster peer
na_ontap_cluster_peer:
state: absent
source_cluster_name: test-source-cluster
dest_cluster_name: test-dest-cluster
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
dest_hostname: "{{ dest_netapp_hostname }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_module import NetAppModule
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppONTAPClusterPeer(object):
"""
Class with cluster peer methods
"""
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
source_intercluster_lifs=dict(required=False, type='list', aliases=['source_intercluster_lif']),
dest_intercluster_lifs=dict(required=False, type='list', aliases=['dest_intercluster_lif']),
passphrase=dict(required=False, type='str', no_log=True),
dest_hostname=dict(required=True, type='str'),
dest_username=dict(required=False, type='str'),
dest_password=dict(required=False, type='str', no_log=True),
source_cluster_name=dict(required=False, type='str'),
dest_cluster_name=dict(required=False, type='str')
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_together=[['source_intercluster_lifs', 'dest_intercluster_lifs']],
required_if=[('state', 'absent', ['source_cluster_name', 'dest_cluster_name'])],
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
# set destination server connection
self.module.params['hostname'] = self.parameters['dest_hostname']
if self.parameters.get('dest_username'):
self.module.params['username'] = self.parameters['dest_username']
if self.parameters.get('dest_password'):
self.module.params['password'] = self.parameters['dest_password']
self.dest_server = netapp_utils.setup_na_ontap_zapi(module=self.module)
# reset to source host connection for asup logs
self.module.params['hostname'] = self.parameters['hostname']
def cluster_peer_get_iter(self, cluster):
"""
Compose NaElement object to query current source cluster using peer-cluster-name and peer-addresses parameters
:param cluster: type of cluster (source or destination)
:return: NaElement object for cluster-get-iter with query
"""
cluster_peer_get = netapp_utils.zapi.NaElement('cluster-peer-get-iter')
query = netapp_utils.zapi.NaElement('query')
cluster_peer_info = netapp_utils.zapi.NaElement('cluster-peer-info')
if cluster == 'source':
peer_lifs, peer_cluster = 'dest_intercluster_lifs', 'dest_cluster_name'
else:
peer_lifs, peer_cluster = 'source_intercluster_lifs', 'source_cluster_name'
if self.parameters.get(peer_lifs):
peer_addresses = netapp_utils.zapi.NaElement('peer-addresses')
for peer in self.parameters.get(peer_lifs):
peer_addresses.add_new_child('remote-inet-address', peer)
cluster_peer_info.add_child_elem(peer_addresses)
if self.parameters.get(peer_cluster):
cluster_peer_info.add_new_child('cluster-name', self.parameters[peer_cluster])
query.add_child_elem(cluster_peer_info)
cluster_peer_get.add_child_elem(query)
return cluster_peer_get
def cluster_peer_get(self, cluster):
"""
Get current cluster peer info
:param cluster: type of cluster (source or destination)
:return: Dictionary of current cluster peer details if query successful, else return None
"""
cluster_peer_get_iter = self.cluster_peer_get_iter(cluster)
result, cluster_info = None, dict()
if cluster == 'source':
server = self.server
else:
server = self.dest_server
try:
result = server.invoke_successfully(cluster_peer_get_iter, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error fetching cluster peer %s: %s'
% (self.parameters['dest_cluster_name'], to_native(error)),
exception=traceback.format_exc())
# return cluster peer details
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) >= 1:
cluster_peer_info = result.get_child_by_name('attributes-list').get_child_by_name('cluster-peer-info')
cluster_info['cluster_name'] = cluster_peer_info.get_child_content('cluster-name')
peers = cluster_peer_info.get_child_by_name('peer-addresses')
cluster_info['peer-addresses'] = [peer.get_content() for peer in peers.get_children()]
return cluster_info
return None
def cluster_peer_delete(self, cluster):
"""
Delete a cluster peer on source or destination
For source cluster, peer cluster-name = destination cluster name and vice-versa
:param cluster: type of cluster (source or destination)
:return:
"""
if cluster == 'source':
server, peer_cluster_name = self.server, self.parameters['dest_cluster_name']
else:
server, peer_cluster_name = self.dest_server, self.parameters['source_cluster_name']
cluster_peer_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'cluster-peer-delete', **{'cluster-name': peer_cluster_name})
try:
server.invoke_successfully(cluster_peer_delete, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error deleting cluster peer %s: %s'
% (peer_cluster_name, to_native(error)),
exception=traceback.format_exc())
def cluster_peer_create(self, cluster):
"""
Create a cluster peer on source or destination
For source cluster, peer addresses = destination inter-cluster LIFs and vice-versa
:param cluster: type of cluster (source or destination)
:return: None
"""
cluster_peer_create = netapp_utils.zapi.NaElement.create_node_with_children('cluster-peer-create')
if self.parameters.get('passphrase') is not None:
cluster_peer_create.add_new_child('passphrase', self.parameters['passphrase'])
peer_addresses = netapp_utils.zapi.NaElement('peer-addresses')
if cluster == 'source':
server, peer_address = self.server, self.parameters['dest_intercluster_lifs']
else:
server, peer_address = self.dest_server, self.parameters['source_intercluster_lifs']
for each in peer_address:
peer_addresses.add_new_child('remote-inet-address', each)
cluster_peer_create.add_child_elem(peer_addresses)
try:
server.invoke_successfully(cluster_peer_create, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error creating cluster peer %s: %s'
% (peer_address, to_native(error)),
exception=traceback.format_exc())
def apply(self):
"""
Apply action to cluster peer
:return: None
"""
self.asup_log_for_cserver("na_ontap_cluster_peer")
source = self.cluster_peer_get('source')
destination = self.cluster_peer_get('destination')
source_action = self.na_helper.get_cd_action(source, self.parameters)
destination_action = self.na_helper.get_cd_action(destination, self.parameters)
self.na_helper.changed = False
# create only if expected cluster peer relation is not present on both source and destination clusters
if source_action == 'create' and destination_action == 'create':
self.cluster_peer_create('source')
self.cluster_peer_create('destination')
self.na_helper.changed = True
# delete peer relation in cluster where relation is present
else:
if source_action == 'delete':
self.cluster_peer_delete('source')
self.na_helper.changed = True
if destination_action == 'delete':
self.cluster_peer_delete('destination')
self.na_helper.changed = True
self.module.exit_json(changed=self.na_helper.changed)
def asup_log_for_cserver(self, event_name):
"""
Fetch admin vserver for the given cluster
Create and Autosupport log event with the given module name
:param event_name: Name of the event log
:return: None
"""
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
netapp_utils.ems_log_event(event_name, cserver)
def main():
"""
Execute action
:return: None
"""
community_obj = NetAppONTAPClusterPeer()
community_obj.apply()
if __name__ == '__main__':
main()
|
kustodian/ansible
|
lib/ansible/modules/storage/netapp/na_ontap_cluster_peer.py
|
Python
|
gpl-3.0
| 12,647
|
import sys
import glob
def read_fileb(filename, mode='rb'):
f = open(filename, mode)
try:
return f.read()
finally:
f.close()
def write_fileb(filename, value, mode='wb'):
f = open(filename, mode)
try:
f.write(value)
finally:
f.close()
for filename in glob.glob(sys.argv[1]):
data1 = read_fileb(filename)
write_fileb(filename + '.bak2', data1)
data2lines = read_fileb(filename).strip().split('\n')
data2 = '\n'.join([line.rstrip(
).replace('\t', ' ' * 2) for line in data2lines]) + '\n'
write_fileb(filename, data2)
print filename, len(data1) - len(data2)
|
pouyana/teireader
|
webui/scripts/fixws.py
|
Python
|
mit
| 646
|
from __future__ import absolute_import
from django.test import TestCase
from .models import Person
class SaveDeleteHookTests(TestCase):
def test_basic(self):
p = Person(first_name="John", last_name="Smith")
self.assertEqual(p.data, [])
p.save()
self.assertEqual(p.data, [
"Before save",
"After save",
])
self.assertQuerysetEqual(
Person.objects.all(), [
"John Smith",
],
unicode
)
p.delete()
self.assertEqual(p.data, [
"Before save",
"After save",
"Before deletion",
"After deletion",
])
self.assertQuerysetEqual(Person.objects.all(), [])
|
LethusTI/supportcenter
|
vendor/django/tests/modeltests/save_delete_hooks/tests.py
|
Python
|
gpl-3.0
| 761
|
'''
Bubble
======
.. versionadded:: 1.1.0
.. image:: images/bubble.jpg
:align: right
The Bubble widget is a form of menu or a small popup where the menu options
are stacked either vertically or horizontally.
The :class:`Bubble` contains an arrow pointing in the direction you
choose.
Simple example
--------------
.. include:: ../../examples/widgets/bubble_test.py
:literal:
Customize the Bubble
--------------------
You can choose the direction in which the arrow points::
Bubble(arrow_pos='top_mid')
The widgets added to the Bubble are ordered horizontally by default, like a
Boxlayout. You can change that by::
orientation = 'vertical'
To add items to the bubble::
bubble = Bubble(orientation = 'vertical')
bubble.add_widget(your_widget_instance)
To remove items::
bubble.remove_widget(widget)
or
bubble.clear_widgets()
To access the list of children, use content.children::
bubble.content.children
.. warning::
This is important! Do not use bubble.children
To change the appearance of the bubble::
bubble.background_color = (1, 0, 0, .5) #50% translucent red
bubble.border = [0, 0, 0, 0]
background_image = 'path/to/background/image'
arrow_image = 'path/to/arrow/image'
'''
__all__ = ('Bubble', 'BubbleButton', 'BubbleContent')
from kivy.uix.image import Image
from kivy.uix.widget import Widget
from kivy.uix.scatter import Scatter
from kivy.uix.gridlayout import GridLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.properties import ObjectProperty, StringProperty, OptionProperty, \
ListProperty, BooleanProperty
from kivy.clock import Clock
from kivy.base import EventLoop
from kivy.metrics import dp
class BubbleButton(Button):
'''A button intended for use in a Bubble widget.
You can use a "normal" button class, but it will not look good unless
the background is changed.
Rather use this BubbleButton widget that is already defined and provides a
suitable background for you.
'''
pass
class BubbleContent(GridLayout):
pass
class Bubble(GridLayout):
'''Bubble class. See module documentation for more information.
'''
background_color = ListProperty([1, 1, 1, 1])
'''Background color, in the format (r, g, b, a).
:attr:`background_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [1, 1, 1, 1].
'''
border = ListProperty([16, 16, 16, 16])
'''Border used for :class:`~kivy.graphics.vertex_instructions.BorderImage`
graphics instruction. Used with the :attr:`background_image`.
It should be used when using custom backgrounds.
It must be a list of 4 values: (top, right, bottom, left). Read the
BorderImage instructions for more information about how to use it.
:attr:`border` is a :class:`~kivy.properties.ListProperty` and defaults to
(16, 16, 16, 16)
'''
background_image = StringProperty(
'atlas://data/images/defaulttheme/bubble')
'''Background image of the bubble.
:attr:`background_image` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/bubble'.
'''
arrow_image = StringProperty(
'atlas://data/images/defaulttheme/bubble_arrow')
''' Image of the arrow pointing to the bubble.
:attr:`arrow_image` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/bubble_arrow'.
'''
show_arrow = BooleanProperty(True)
''' Indicates whether to show arrow.
.. versionadded:: 1.8.0
:attr:`show_arrow` is a :class:`~kivy.properties.BooleanProperty` and
defaults to `True`.
'''
arrow_pos = OptionProperty('bottom_mid', options=(
'left_top', 'left_mid', 'left_bottom', 'top_left', 'top_mid',
'top_right', 'right_top', 'right_mid', 'right_bottom',
'bottom_left', 'bottom_mid', 'bottom_right'))
'''Specifies the position of the arrow relative to the bubble.
Can be one of: left_top, left_mid, left_bottom top_left, top_mid, top_right
right_top, right_mid, right_bottom bottom_left, bottom_mid, bottom_right.
:attr:`arrow_pos` is a :class:`~kivy.properties.OptionProperty` and
defaults to 'bottom_mid'.
'''
content = ObjectProperty(None)
'''This is the object where the main content of the bubble is held.
:attr:`content` is a :class:`~kivy.properties.ObjectProperty` and
defaults to 'None'.
'''
orientation = OptionProperty('horizontal',
options=('horizontal', 'vertical'))
'''This specifies the manner in which the children inside bubble
are arranged. Can be one of 'vertical' or 'horizontal'.
:attr:`orientation` is a :class:`~kivy.properties.OptionProperty` and
defaults to 'horizontal'.
'''
limit_to = ObjectProperty(None, allownone=True)
'''Specifies the widget to which the bubbles position is restricted.
.. versionadded:: 1.6.0
:attr:`limit_to` is a :class:`~kivy.properties.ObjectProperty` and
defaults to 'None'.
'''
def __init__(self, **kwargs):
self._prev_arrow_pos = None
self._arrow_layout = BoxLayout()
self._bk_img = Image(
source=self.background_image, allow_stretch=True,
keep_ratio=False, color=self.background_color)
self.background_texture = self._bk_img.texture
self._arrow_img = Image(source=self.arrow_image,
allow_stretch=True,
color=self.background_color)
self.content = content = BubbleContent(parent=self)
super(Bubble, self).__init__(**kwargs)
content.parent = None
self.add_widget(content)
self.on_arrow_pos()
def add_widget(self, *l):
content = self.content
if content is None:
return
if l[0] == content or l[0] == self._arrow_img\
or l[0] == self._arrow_layout:
super(Bubble, self).add_widget(*l)
else:
content.add_widget(*l)
def remove_widget(self, *l):
content = self.content
if not content:
return
if l[0] == content or l[0] == self._arrow_img\
or l[0] == self._arrow_layout:
super(Bubble, self).remove_widget(*l)
else:
content.remove_widget(l[0])
def clear_widgets(self, **kwargs):
content = self.content
if not content:
return
if kwargs.get('do_super', False):
super(Bubble, self).clear_widgets()
else:
content.clear_widgets()
def on_show_arrow(self, instance, value):
self._arrow_img.opacity = int(value)
def on_parent(self, instance, value):
Clock.schedule_once(self._update_arrow)
def on_pos(self, instance, pos):
lt = self.limit_to
if lt:
self.limit_to = None
if lt is EventLoop.window:
x = y = 0
top = lt.height
right = lt.width
else:
x, y = lt.x, lt.y
top, right = lt.top, lt.right
self.x = max(self.x, x)
self.right = min(self.right, right)
self.top = min(self.top, top)
self.y = max(self.y, y)
self.limit_to = lt
def on_background_image(self, *l):
self._bk_img.source = self.background_image
def on_background_color(self, *l):
if self.content is None:
return
self._arrow_img.color = self._bk_img.color = self.background_color
def on_orientation(self, *l):
content = self.content
if not content:
return
if self.orientation[0] == 'v':
content.cols = 1
content.rows = 99
else:
content.cols = 99
content.rows = 1
def on_arrow_image(self, *l):
self._arrow_img.source = self.arrow_image
def on_arrow_pos(self, *l):
self_content = self.content
if not self_content:
Clock.schedule_once(self.on_arrow_pos)
return
if self_content not in self.children:
Clock.schedule_once(self.on_arrow_pos)
return
self_arrow_pos = self.arrow_pos
if self._prev_arrow_pos == self_arrow_pos:
return
self._prev_arrow_pos = self_arrow_pos
self_arrow_layout = self._arrow_layout
self_arrow_layout.clear_widgets()
self_arrow_img = self._arrow_img
self._sctr = self._arrow_img
self.clear_widgets(do_super=True)
self_content.parent = None
self_arrow_img.size_hint = (1, None)
self_arrow_img.height = dp(self_arrow_img.texture_size[1])
self_arrow_img.pos = 0, 0
widget_list = []
arrow_list = []
parent = self_arrow_img.parent
if parent:
parent.remove_widget(self_arrow_img)
if self_arrow_pos[0] == 'b' or self_arrow_pos[0] == 't':
self.cols = 1
self.rows = 3
self_arrow_layout.orientation = 'horizontal'
self_arrow_img.width = self.width / 3
self_arrow_layout.size_hint = (1, None)
self_arrow_layout.height = self_arrow_img.height
if self_arrow_pos[0] == 'b':
if self_arrow_pos == 'bottom_mid':
widget_list = (self_content, self_arrow_img)
else:
if self_arrow_pos == 'bottom_left':
arrow_list = (self_arrow_img, Widget(), Widget())
elif self_arrow_pos == 'bottom_right':
#add two dummy widgets
arrow_list = (Widget(), Widget(), self_arrow_img)
widget_list = (self_content, self_arrow_layout)
else:
sctr = Scatter(do_translation=False,
rotation=180,
do_rotation=False,
do_scale=False,
size_hint=(None, None),
size=self_arrow_img.size)
sctr.add_widget(self_arrow_img)
if self_arrow_pos == 'top_mid':
#add two dummy widgets
arrow_list = (Widget(), sctr, Widget())
elif self_arrow_pos == 'top_left':
arrow_list = (sctr, Widget(), Widget())
elif self_arrow_pos == 'top_right':
arrow_list = (Widget(), Widget(), sctr)
widget_list = (self_arrow_layout, self_content)
elif self_arrow_pos[0] == 'l' or self_arrow_pos[0] == 'r':
self.cols = 3
self.rows = 1
self_arrow_img.width = self.height / 3
self_arrow_layout.orientation = 'vertical'
self_arrow_layout.cols = 1
self_arrow_layout.size_hint = (None, 1)
self_arrow_layout.width = self_arrow_img.height
rotation = -90 if self_arrow_pos[0] == 'l' else 90
self._sctr = sctr = Scatter(do_translation=False,
rotation=rotation,
do_rotation=False,
do_scale=False,
size_hint=(None, None),
size=(self_arrow_img.size))
sctr.add_widget(self_arrow_img)
if self_arrow_pos[-4:] == '_top':
arrow_list = (Widget(size_hint=(1, .07)),
sctr, Widget(size_hint=(1, .3)))
elif self_arrow_pos[-4:] == '_mid':
arrow_list = (Widget(), sctr, Widget())
Clock.schedule_once(self._update_arrow)
elif self_arrow_pos[-7:] == '_bottom':
arrow_list = (Widget(), Widget(), sctr)
if self_arrow_pos[0] == 'l':
widget_list = (self_arrow_layout, self_content)
else:
widget_list = (self_content, self_arrow_layout)
# add widgets to arrow_layout
add = self_arrow_layout.add_widget
for widg in arrow_list:
add(widg)
# add widgets to self
add = self.add_widget
for widg in widget_list:
add(widg)
def _update_arrow(self, *dt):
if self.arrow_pos in ('left_mid', 'right_mid'):
self._sctr.center_y = self._arrow_layout.center_y
|
BillBillBillBill/Tickeys-linux
|
tickeys/kivy_32/kivy/uix/bubble.py
|
Python
|
mit
| 12,590
|