repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
levibostian/myBlanky | refs/heads/master | googleAppEngine/google/appengine/ext/deferred/__init__.py | 15 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from deferred import *
if __name__ == "__main__":
main()
|
antoinecarme/pyaf | refs/heads/master | tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_PolyTrend_BestCycle_MLP.py | 1 | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['PolyTrend'] , ['BestCycle'] , ['MLP'] ); |
amir-qayyum-khan/lore | refs/heads/master | rest/tests/test_members.py | 2 | """
Unit tests for REST api
"""
from __future__ import unicode_literals
from operator import itemgetter
from django.contrib.auth.models import User
from rest_framework.status import (
HTTP_400_BAD_REQUEST,
HTTP_403_FORBIDDEN,
HTTP_404_NOT_FOUND,
)
from roles.api import (
assign_user_to_repo_group,
remove_user_from_repo_group,
list_users_in_repo,
)
from roles.permissions import GroupTypes, BaseGroupTypes
from rest.tests.base import (
RESTTestCase,
RESTAuthTestCase,
)
class TestMembers(RESTTestCase):
"""
Specific Class for the members because test need different users
"""
def setUp(self):
super(TestMembers, self).setUp()
# Create some new users.
self.author_user = User.objects.create_user(
username="author_user", password=self.PASSWORD
)
self.curator_user = User.objects.create_user(
username="curator_user", password=self.PASSWORD
)
self.admin_user = User.objects.create_user(
username="admin_user", password=self.PASSWORD
)
self.admin_user2 = User.objects.create_user(
username="admin_user2", password=self.PASSWORD
)
# List of all user.
self.all_user_repo = [
self.author_user,
self.curator_user,
self.admin_user
]
self.remove_all_users_from_repo()
# No user is logged in by default.
self.logout()
def remove_all_users_from_repo(self):
"""
Helper method to remove all users from all repo groups.
"""
# Remove all the users from all the groups in the repo.
for user_group in list_users_in_repo(self.repo):
user = User.objects.get(username=user_group.username)
group_type = GroupTypes.get_repo_groupname_by_base(
user_group.group_type
)
remove_user_from_repo_group(user, self.repo, group_type)
def add_users_to_repo(self):
"""
Helper function to add some users to the repository groups.
"""
# add only 3 users to 3 different groups
assign_user_to_repo_group(
self.author_user, self.repo, GroupTypes.REPO_AUTHOR)
assign_user_to_repo_group(
self.curator_user, self.repo, GroupTypes.REPO_CURATOR)
assign_user_to_repo_group(
self.admin_user, self.repo, GroupTypes.REPO_ADMINISTRATOR)
def assert_users_count(self, authors=0, curators=0, admins=0):
"""
Helper function to count users in the different groups
"""
total = authors + curators + admins
self.assertEqual(len(list_users_in_repo(self.repo)), total)
self.assertEqual(
len(list_users_in_repo(self.repo, BaseGroupTypes.AUTHORS)),
authors
)
self.assertEqual(
len(list_users_in_repo(self.repo, BaseGroupTypes.CURATORS)),
curators
)
self.assertEqual(
len(list_users_in_repo(self.repo, BaseGroupTypes.ADMINISTRATORS)),
admins
)
def test_members_get(self):
"""
Members GET
"""
all_members = sorted(
[
{'username': 'admin_user', 'group_type': 'administrators'},
{'username': 'curator_user', 'group_type': 'curators'},
{'username': 'author_user', 'group_type': 'authors'}
],
key=itemgetter('username')
)
author_member = [{'group_type': 'authors'}]
curator_member = [{'group_type': 'curators'}]
admin_member = [{'group_type': 'administrators'}]
author_user_member = [{'username': 'author_user'}]
curator_user_member = [{'username': 'curator_user'}]
admin_user_member = [{'username': 'admin_user'}]
# Populate repo groups.
self.add_users_to_repo()
# all kind of users will get the same results
for repo_user in self.all_user_repo:
self.login(repo_user.username)
# get all the users for all the groups
resp_dict = self.get_members(
urlfor='base',
repo_slug=self.repo.slug
)
self.assertListEqual(
sorted(
resp_dict['results'],
key=itemgetter('username')
),
all_members
)
# get all the groups for specific users
resp_dict = self.get_members(
urlfor='users',
repo_slug=self.repo.slug,
username=self.author_user.username
)
self.assertListEqual(resp_dict['results'], author_member)
resp_dict = self.get_members(
urlfor='users',
repo_slug=self.repo.slug,
username=self.curator_user.username
)
self.assertListEqual(resp_dict['results'], curator_member)
resp_dict = self.get_members(
urlfor='users',
repo_slug=self.repo.slug,
username=self.admin_user.username
)
self.assertListEqual(resp_dict['results'], admin_member)
# get one group for a specific users
# author
resp_dict = self.get_members(
urlfor='users',
repo_slug=self.repo.slug,
username=self.author_user.username,
group_type=BaseGroupTypes.AUTHORS
)
self.assertEqual(resp_dict, author_member[0])
resp_dict = self.get_members(
urlfor='users',
repo_slug=self.repo.slug,
username=self.author_user.username,
group_type=BaseGroupTypes.CURATORS,
expected_status=HTTP_404_NOT_FOUND,
skip_options_head_test=True
)
resp_dict = self.get_members(
urlfor='users',
repo_slug=self.repo.slug,
username=self.author_user.username,
group_type=BaseGroupTypes.ADMINISTRATORS,
expected_status=HTTP_404_NOT_FOUND,
skip_options_head_test=True
)
# curator
resp_dict = self.get_members(
urlfor='users',
repo_slug=self.repo.slug,
username=self.curator_user.username,
group_type=BaseGroupTypes.AUTHORS,
expected_status=HTTP_404_NOT_FOUND,
skip_options_head_test=True
)
resp_dict = self.get_members(
urlfor='users',
repo_slug=self.repo.slug,
username=self.curator_user.username,
group_type=BaseGroupTypes.CURATORS
)
self.assertEqual(resp_dict, curator_member[0])
resp_dict = self.get_members(
urlfor='users',
repo_slug=self.repo.slug,
username=self.curator_user.username,
group_type=BaseGroupTypes.ADMINISTRATORS,
expected_status=HTTP_404_NOT_FOUND,
skip_options_head_test=True
)
# administrator
resp_dict = self.get_members(
urlfor='users',
repo_slug=self.repo.slug,
username=self.admin_user.username,
group_type=BaseGroupTypes.AUTHORS,
expected_status=HTTP_404_NOT_FOUND,
skip_options_head_test=True
)
resp_dict = self.get_members(
urlfor='users',
repo_slug=self.repo.slug,
username=self.admin_user.username,
group_type=BaseGroupTypes.CURATORS,
expected_status=HTTP_404_NOT_FOUND,
skip_options_head_test=True
)
resp_dict = self.get_members(
urlfor='users',
repo_slug=self.repo.slug,
username=self.admin_user.username,
group_type=BaseGroupTypes.ADMINISTRATORS
)
self.assertEqual(resp_dict, admin_member[0])
# get all the users for specific group
resp_dict = self.get_members(
urlfor='groups',
repo_slug=self.repo.slug,
group_type=BaseGroupTypes.AUTHORS
)
self.assertListEqual(resp_dict['results'], author_user_member)
resp_dict = self.get_members(
urlfor='groups',
repo_slug=self.repo.slug,
group_type=BaseGroupTypes.CURATORS
)
self.assertListEqual(resp_dict['results'], curator_user_member)
resp_dict = self.get_members(
urlfor='groups',
repo_slug=self.repo.slug,
group_type=BaseGroupTypes.ADMINISTRATORS
)
self.assertListEqual(resp_dict['results'], admin_user_member)
# get one user for a specific group
# author
resp_dict = self.get_members(
urlfor='groups',
repo_slug=self.repo.slug,
username=self.author_user.username,
group_type=BaseGroupTypes.AUTHORS
)
self.assertEqual(resp_dict, author_user_member[0])
resp_dict = self.get_members(
urlfor='groups',
repo_slug=self.repo.slug,
username=self.author_user.username,
group_type=BaseGroupTypes.CURATORS,
expected_status=HTTP_404_NOT_FOUND,
skip_options_head_test=True
)
resp_dict = self.get_members(
urlfor='groups',
repo_slug=self.repo.slug,
username=self.author_user.username,
group_type=BaseGroupTypes.ADMINISTRATORS,
expected_status=HTTP_404_NOT_FOUND,
skip_options_head_test=True
)
# curator
resp_dict = self.get_members(
urlfor='groups',
repo_slug=self.repo.slug,
username=self.curator_user.username,
group_type=BaseGroupTypes.AUTHORS,
expected_status=HTTP_404_NOT_FOUND,
skip_options_head_test=True
)
resp_dict = self.get_members(
urlfor='groups',
repo_slug=self.repo.slug,
username=self.curator_user.username,
group_type=BaseGroupTypes.CURATORS
)
self.assertEqual(resp_dict, curator_user_member[0])
resp_dict = self.get_members(
urlfor='groups',
repo_slug=self.repo.slug,
username=self.curator_user.username,
group_type=BaseGroupTypes.ADMINISTRATORS,
expected_status=HTTP_404_NOT_FOUND,
skip_options_head_test=True
)
# administrator
resp_dict = self.get_members(
urlfor='groups',
repo_slug=self.repo.slug,
username=self.admin_user.username,
group_type=BaseGroupTypes.AUTHORS,
expected_status=HTTP_404_NOT_FOUND,
skip_options_head_test=True
)
resp_dict = self.get_members(
urlfor='groups',
repo_slug=self.repo.slug,
username=self.admin_user.username,
group_type=BaseGroupTypes.CURATORS,
expected_status=HTTP_404_NOT_FOUND,
skip_options_head_test=True
)
resp_dict = self.get_members(
urlfor='groups',
repo_slug=self.repo.slug,
username=self.admin_user.username,
group_type=BaseGroupTypes.ADMINISTRATORS
)
self.assertEqual(resp_dict, admin_user_member[0])
def test_members_create(self):
"""
Members POST
Testing only using an administrator used logged in.
All the other kind of users don't have permissions to handle users.
(tests for other users in rest_test_authorization.py)
"""
# No users in the repo
self.assert_users_count()
# One user must be admin.
assign_user_to_repo_group(
self.admin_user, self.repo, GroupTypes.REPO_ADMINISTRATOR)
self.assert_users_count(admins=1)
self.login(self.admin_user.username)
# First part: assign group to user
# Assign unexpected group will fail
self.create_member(
urlfor='users',
repo_slug=self.repo.slug,
mem_dict={'group_type': 'foo'},
username=self.author_user.username,
expected_status=HTTP_400_BAD_REQUEST
)
# Assign real group to nonexistent user will fail
self.create_member(
urlfor='users',
repo_slug=self.repo.slug,
mem_dict={'group_type': GroupTypes.REPO_ADMINISTRATOR},
username='foo_username',
expected_status=HTTP_404_NOT_FOUND
)
# Assign authors group
self.create_member(
urlfor='users',
repo_slug=self.repo.slug,
mem_dict={'group_type': BaseGroupTypes.AUTHORS},
username=self.author_user.username,
reverse_str='repo-members-user-group-detail'
)
self.assert_users_count(admins=1, authors=1)
# Repeating the same assignment has no effect
self.create_member(
urlfor='users',
repo_slug=self.repo.slug,
mem_dict={'group_type': BaseGroupTypes.AUTHORS},
username=self.author_user.username,
reverse_str='repo-members-user-group-detail'
)
self.assert_users_count(admins=1, authors=1)
# Assign curators group
self.create_member(
urlfor='users',
repo_slug=self.repo.slug,
mem_dict={'group_type': BaseGroupTypes.CURATORS},
username=self.curator_user.username,
reverse_str='repo-members-user-group-detail'
)
self.assert_users_count(admins=1, curators=1, authors=1)
# Assign admin group
self.create_member(
urlfor='users',
repo_slug=self.repo.slug,
mem_dict={'group_type': BaseGroupTypes.ADMINISTRATORS},
username=self.admin_user2.username,
reverse_str='repo-members-user-group-detail'
)
self.assert_users_count(admins=2, curators=1, authors=1)
# Assign admin group to curator user (user can have multiple groups)
self.create_member(
urlfor='users',
repo_slug=self.repo.slug,
mem_dict={'group_type': BaseGroupTypes.ADMINISTRATORS},
username=self.curator_user.username,
reverse_str='repo-members-user-group-detail'
)
self.assert_users_count(admins=3, curators=1, authors=1)
# Reset users configuration
self.remove_all_users_from_repo()
self.assert_users_count()
assign_user_to_repo_group(
self.admin_user, self.repo, GroupTypes.REPO_ADMINISTRATOR)
self.assert_users_count(admins=1)
# Second part: assign user to group
# Assign unexpected group will fail
self.create_member(
urlfor='groups',
repo_slug=self.repo.slug,
mem_dict={'username': self.author_user.username},
group_type='foo',
expected_status=HTTP_404_NOT_FOUND
)
# Assign real group to nonexistent user will fail
self.create_member(
urlfor='groups',
repo_slug=self.repo.slug,
mem_dict={'username': 'foo_username'},
group_type=BaseGroupTypes.AUTHORS,
expected_status=HTTP_400_BAD_REQUEST
)
# Assign user to authors group
self.create_member(
urlfor='groups',
repo_slug=self.repo.slug,
mem_dict={'username': self.author_user.username},
group_type=BaseGroupTypes.AUTHORS,
reverse_str='repo-members-group-user-detail'
)
self.assert_users_count(admins=1, authors=1)
# Repeating the same assignment has no effect
self.create_member(
urlfor='groups',
repo_slug=self.repo.slug,
mem_dict={'username': self.author_user.username},
group_type=BaseGroupTypes.AUTHORS,
reverse_str='repo-members-group-user-detail'
)
self.assert_users_count(admins=1, authors=1)
# Assign user to curators group
self.create_member(
urlfor='groups',
repo_slug=self.repo.slug,
mem_dict={'username': self.curator_user.username},
group_type=BaseGroupTypes.CURATORS,
reverse_str='repo-members-group-user-detail'
)
self.assert_users_count(admins=1, curators=1, authors=1)
# Assign user to admins group
self.create_member(
urlfor='groups',
repo_slug=self.repo.slug,
mem_dict={'username': self.admin_user2.username},
group_type=BaseGroupTypes.ADMINISTRATORS,
reverse_str='repo-members-group-user-detail'
)
self.assert_users_count(admins=2, curators=1, authors=1)
# Assign curator user to admin group (user can have multiple groups)
self.create_member(
urlfor='groups',
repo_slug=self.repo.slug,
mem_dict={'username': self.curator_user.username},
group_type=BaseGroupTypes.ADMINISTRATORS,
reverse_str='repo-members-group-user-detail'
)
self.assert_users_count(admins=3, curators=1, authors=1)
def test_members_delete(self):
"""
Members DELETE
Testing only using an administrator used logged in.
All the other kind of users don't have permissions to handle users.
(tests for other users in rest_test_authorization.py)
"""
# No users in the repo
self.assert_users_count()
# Populate repo groups.
self.add_users_to_repo()
# Add extra administrator
assign_user_to_repo_group(
self.admin_user2, self.repo, GroupTypes.REPO_ADMINISTRATOR)
self.assert_users_count(admins=2, curators=1, authors=1)
self.login(self.admin_user2)
# First part: delete group from user
# Delete authors group
self.delete_member(
urlfor='users',
repo_slug=self.repo.slug,
username=self.author_user.username,
group_type=BaseGroupTypes.AUTHORS,
)
self.assert_users_count(admins=2, curators=1)
# Delete curators group
self.delete_member(
urlfor='users',
repo_slug=self.repo.slug,
username=self.curator_user.username,
group_type=BaseGroupTypes.CURATORS,
)
self.assert_users_count(admins=2)
# Delete administrators group
self.delete_member(
urlfor='users',
repo_slug=self.repo.slug,
username=self.admin_user.username,
group_type=BaseGroupTypes.ADMINISTRATORS,
)
self.assert_users_count(admins=1)
# Trying to delete the last of administrators group will fail
self.delete_member(
urlfor='users',
repo_slug=self.repo.slug,
username=self.admin_user2.username,
group_type=BaseGroupTypes.ADMINISTRATORS,
expected_status=HTTP_400_BAD_REQUEST
)
self.assert_users_count(admins=1)
# Add back second admin
assign_user_to_repo_group(
self.admin_user, self.repo, GroupTypes.REPO_ADMINISTRATOR)
self.assert_users_count(admins=2)
# Admin is able to delete self if there is another admin
self.delete_member(
urlfor='users',
repo_slug=self.repo.slug,
username=self.admin_user2.username,
group_type=BaseGroupTypes.ADMINISTRATORS
)
self.assert_users_count(admins=1)
# Reset users configuration
# Populate repo groups.
self.add_users_to_repo()
# Add extra administrator
assign_user_to_repo_group(
self.admin_user2, self.repo, GroupTypes.REPO_ADMINISTRATOR)
self.assert_users_count(admins=2, curators=1, authors=1)
# Second part: delete user from group
# Delete user from authors group
self.delete_member(
urlfor='groups',
repo_slug=self.repo.slug,
username=self.author_user.username,
group_type=BaseGroupTypes.AUTHORS,
)
self.assert_users_count(admins=2, curators=1)
# Delete user from curators group
self.delete_member(
urlfor='groups',
repo_slug=self.repo.slug,
username=self.curator_user.username,
group_type=BaseGroupTypes.CURATORS,
)
self.assert_users_count(admins=2)
# Delete user from administrators group
self.delete_member(
urlfor='groups',
repo_slug=self.repo.slug,
username=self.admin_user.username,
group_type=BaseGroupTypes.ADMINISTRATORS,
)
self.assert_users_count(admins=1)
# Trying to delete the last of administrators group will fail
self.delete_member(
urlfor='groups',
repo_slug=self.repo.slug,
username=self.admin_user2.username,
group_type=BaseGroupTypes.ADMINISTRATORS,
expected_status=HTTP_400_BAD_REQUEST
)
self.assert_users_count(admins=1)
# Add back second admin
assign_user_to_repo_group(
self.admin_user, self.repo, GroupTypes.REPO_ADMINISTRATOR)
self.assert_users_count(admins=2)
# Admin is able to delete self if there is another admin
self.delete_member(
urlfor='groups',
repo_slug=self.repo.slug,
username=self.admin_user2.username,
group_type=BaseGroupTypes.ADMINISTRATORS
)
self.assert_users_count(admins=1)
class TestMembersAuthorization(RESTAuthTestCase):
"""Tests for member authorization via REST"""
def test_members_get(self):
"""
Tests for members.
Get requests: an user can see members if has at least basic permissions
"""
# add an user to all groups
for group_type in [GroupTypes.REPO_ADMINISTRATOR,
GroupTypes.REPO_CURATOR, GroupTypes.REPO_AUTHOR]:
assign_user_to_repo_group(
self.user, self.repo, group_type)
self.logout()
# as anonymous
self.get_members(urlfor='base', repo_slug=self.repo.slug,
expected_status=HTTP_403_FORBIDDEN)
# list of all groups for an user
self.get_members(urlfor='users', repo_slug=self.repo.slug,
username=self.user.username,
expected_status=HTTP_403_FORBIDDEN)
for group_type in BaseGroupTypes.all_base_groups():
# specific group for an user
self.get_members(urlfor='users', repo_slug=self.repo.slug,
username=self.user.username,
group_type=group_type,
expected_status=HTTP_403_FORBIDDEN)
# list of all users for a group
self.get_members(urlfor='groups', repo_slug=self.repo.slug,
group_type=group_type,
expected_status=HTTP_403_FORBIDDEN)
# specific user for a group
self.get_members(urlfor='groups', repo_slug=self.repo.slug,
username=self.user.username,
group_type=group_type,
expected_status=HTTP_403_FORBIDDEN)
# any kind of user in the repo groups can retrieve infos
for user in [self.author_user.username, self.curator_user.username,
self.user.username]:
self.logout()
self.login(user)
# list of all groups for an user
self.get_members(urlfor='base', repo_slug=self.repo.slug)
# specific group for an user
self.get_members(urlfor='users', repo_slug=self.repo.slug,
username=self.user.username)
for group_type in BaseGroupTypes.all_base_groups():
self.get_members(urlfor='users', repo_slug=self.repo.slug,
username=self.user.username,
group_type=group_type)
# list of all users for a group
self.get_members(urlfor='groups', repo_slug=self.repo.slug,
group_type=group_type)
# specific user for a group
self.get_members(urlfor='groups', repo_slug=self.repo.slug,
username=self.user.username,
group_type=group_type)
def test_members_create(self):
"""
Tests for members.
Post requests: an user can create members only if s/he is admin
The only URLS where users can be assigned to group or vice versa are
/api/v1/repositories/<repo>/members/groups/<group_type>/users/
/api/v1/repositories/<repo>/members/users/<username>/groups/
"""
self.logout()
mem_dict_user = {'group_type': 'administrators'}
mem_dict_groups = {'username': self.user_norepo.username}
# as anonymous
self.create_member(urlfor='users', repo_slug=self.repo.slug,
mem_dict=mem_dict_user, username=self.user.username,
expected_status=HTTP_403_FORBIDDEN)
for group_type in BaseGroupTypes.all_base_groups():
self.create_member(urlfor='groups', repo_slug=self.repo.slug,
mem_dict=mem_dict_groups,
group_type=group_type,
expected_status=HTTP_403_FORBIDDEN)
# as author
self.login(self.author_user.username)
self.create_member(urlfor='users', repo_slug=self.repo.slug,
mem_dict=mem_dict_user, username=self.user.username,
expected_status=HTTP_403_FORBIDDEN)
for group_type in BaseGroupTypes.all_base_groups():
self.create_member(urlfor='groups', repo_slug=self.repo.slug,
mem_dict=mem_dict_groups,
group_type=group_type,
expected_status=HTTP_403_FORBIDDEN)
# as curator
self.logout()
self.login(self.curator_user.username)
self.create_member(urlfor='users', repo_slug=self.repo.slug,
mem_dict=mem_dict_user, username=self.user.username,
expected_status=HTTP_403_FORBIDDEN)
for group_type in BaseGroupTypes.all_base_groups():
self.create_member(urlfor='groups', repo_slug=self.repo.slug,
mem_dict=mem_dict_groups,
group_type=group_type,
expected_status=HTTP_403_FORBIDDEN)
# as administrator
self.logout()
self.login(self.user.username)
self.create_member(urlfor='users', repo_slug=self.repo.slug,
mem_dict=mem_dict_user, username=self.user.username)
for group_type in BaseGroupTypes.all_base_groups():
self.create_member(urlfor='groups', repo_slug=self.repo.slug,
mem_dict=mem_dict_groups,
group_type=group_type)
def test_members_delete(self):
"""
Tests for members.
Delete requests: an user can delete members only if s/he is admin
The only URLS where users can be deleted from a group or vice versa are
/api/v1/repositories/<repo>/members/groups/<group_type>/users/<username>
/api/v1/repositories/<repo>/members/users/<username>/groups/<group_type>
"""
for group_type in BaseGroupTypes.all_base_groups():
# as anonymous
self.logout()
self.delete_member(urlfor='users', repo_slug=self.repo.slug,
username=self.user.username,
group_type=group_type,
expected_status=HTTP_403_FORBIDDEN)
self.delete_member(urlfor='groups', repo_slug=self.repo.slug,
username=self.user.username,
group_type=group_type,
expected_status=HTTP_403_FORBIDDEN)
# as author
self.login(self.author_user.username)
self.delete_member(urlfor='users', repo_slug=self.repo.slug,
username=self.user.username,
group_type=group_type,
expected_status=HTTP_403_FORBIDDEN)
self.delete_member(urlfor='groups', repo_slug=self.repo.slug,
username=self.user.username,
group_type=group_type,
expected_status=HTTP_403_FORBIDDEN)
# as curator
self.logout()
self.login(self.curator_user.username)
self.delete_member(urlfor='users', repo_slug=self.repo.slug,
username=self.user.username,
group_type=group_type,
expected_status=HTTP_403_FORBIDDEN)
self.delete_member(urlfor='groups', repo_slug=self.repo.slug,
username=self.user.username,
group_type=group_type,
expected_status=HTTP_403_FORBIDDEN)
# different loop because the actual deletion can impact the other tests
for group_type in BaseGroupTypes.all_base_groups():
# as administrator
# deleting a different username because deleting self from admin is
# a special case (handled in different tests)
self.logout()
self.login(self.user.username)
self.delete_member(urlfor='users', repo_slug=self.repo.slug,
username=self.author_user.username,
group_type=group_type)
self.delete_member(urlfor='groups', repo_slug=self.repo.slug,
username=self.author_user.username,
group_type=group_type)
|
carlTLR/gyp | refs/heads/master | test/make_global_settings/full-toolchain/my_nm.py | 203 | #!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
print sys.argv
with open('RAN_MY_NM', 'w') as f:
f.write('RAN_MY_NM')
|
MattCCS/PyVault | refs/heads/master | site-packages/pycparser/_build_tables.py | 79 | #-----------------------------------------------------------------
# pycparser: _build_tables.py
#
# A dummy for generating the lexing/parsing tables and and
# compiling them into .pyc for faster execution in optimized mode.
# Also generates AST code from the configuration file.
# Should be called from the pycparser directory.
#
# Copyright (C) 2008-2015, Eli Bendersky
# License: BSD
#-----------------------------------------------------------------
# Generate c_ast.py
from _ast_gen import ASTCodeGenerator
ast_gen = ASTCodeGenerator('_c_ast.cfg')
ast_gen.generate(open('c_ast.py', 'w'))
import sys
sys.path[0:0] = ['.', '..']
from pycparser import c_parser
# Generates the tables
#
c_parser.CParser(
lex_optimize=True,
yacc_debug=False,
yacc_optimize=True)
# Load to compile into .pyc
#
import lextab
import yacctab
import c_ast
|
codeforamerica/skillcamp | refs/heads/master | ENV/lib/python2.7/site-packages/sqlalchemy/dialects/mysql/oursql.py | 79 | # mysql/oursql.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+oursql
:name: OurSQL
:dbapi: oursql
:connectstring: mysql+oursql://<user>:<password>@<host>[:<port>]/<dbname>
:url: http://packages.python.org/oursql/
Unicode
-------
oursql defaults to using ``utf8`` as the connection charset, but other
encodings may be used instead. Like the MySQL-Python driver, unicode support
can be completely disabled::
# oursql sets the connection charset to utf8 automatically; all strings come
# back as utf8 str
create_engine('mysql+oursql:///mydb?use_unicode=0')
To not automatically use ``utf8`` and instead use whatever the connection
defaults to, there is a separate parameter::
# use the default connection charset; all strings come back as unicode
create_engine('mysql+oursql:///mydb?default_charset=1')
# use latin1 as the connection charset; all strings come back as unicode
create_engine('mysql+oursql:///mydb?charset=latin1')
"""
import re
from .base import (BIT, MySQLDialect, MySQLExecutionContext)
from ... import types as sqltypes, util
class _oursqlBIT(BIT):
def result_processor(self, dialect, coltype):
"""oursql already converts mysql bits, so."""
return None
class MySQLExecutionContext_oursql(MySQLExecutionContext):
@property
def plain_query(self):
return self.execution_options.get('_oursql_plain_query', False)
class MySQLDialect_oursql(MySQLDialect):
driver = 'oursql'
if util.py2k:
supports_unicode_binds = True
supports_unicode_statements = True
supports_native_decimal = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
execution_ctx_cls = MySQLExecutionContext_oursql
colspecs = util.update_copy(
MySQLDialect.colspecs,
{
sqltypes.Time: sqltypes.Time,
BIT: _oursqlBIT,
}
)
@classmethod
def dbapi(cls):
return __import__('oursql')
def do_execute(self, cursor, statement, parameters, context=None):
"""Provide an implementation of *cursor.execute(statement, parameters)*."""
if context and context.plain_query:
cursor.execute(statement, plain_query=True)
else:
cursor.execute(statement, parameters)
def do_begin(self, connection):
connection.cursor().execute('BEGIN', plain_query=True)
def _xa_query(self, connection, query, xid):
if util.py2k:
arg = connection.connection._escape_string(xid)
else:
charset = self._connection_charset
arg = connection.connection._escape_string(xid.encode(charset)).decode(charset)
arg = "'%s'" % arg
connection.execution_options(_oursql_plain_query=True).execute(query % arg)
# Because mysql is bad, these methods have to be
# reimplemented to use _PlainQuery. Basically, some queries
# refuse to return any data if they're run through
# the parameterized query API, or refuse to be parameterized
# in the first place.
def do_begin_twophase(self, connection, xid):
self._xa_query(connection, 'XA BEGIN %s', xid)
def do_prepare_twophase(self, connection, xid):
self._xa_query(connection, 'XA END %s', xid)
self._xa_query(connection, 'XA PREPARE %s', xid)
def do_rollback_twophase(self, connection, xid, is_prepared=True,
recover=False):
if not is_prepared:
self._xa_query(connection, 'XA END %s', xid)
self._xa_query(connection, 'XA ROLLBACK %s', xid)
def do_commit_twophase(self, connection, xid, is_prepared=True,
recover=False):
if not is_prepared:
self.do_prepare_twophase(connection, xid)
self._xa_query(connection, 'XA COMMIT %s', xid)
# Q: why didn't we need all these "plain_query" overrides earlier ?
# am i on a newer/older version of OurSQL ?
def has_table(self, connection, table_name, schema=None):
return MySQLDialect.has_table(
self,
connection.connect().execution_options(_oursql_plain_query=True),
table_name,
schema
)
def get_table_options(self, connection, table_name, schema=None, **kw):
return MySQLDialect.get_table_options(
self,
connection.connect().execution_options(_oursql_plain_query=True),
table_name,
schema=schema,
**kw
)
def get_columns(self, connection, table_name, schema=None, **kw):
return MySQLDialect.get_columns(
self,
connection.connect().execution_options(_oursql_plain_query=True),
table_name,
schema=schema,
**kw
)
def get_view_names(self, connection, schema=None, **kw):
return MySQLDialect.get_view_names(
self,
connection.connect().execution_options(_oursql_plain_query=True),
schema=schema,
**kw
)
def get_table_names(self, connection, schema=None, **kw):
return MySQLDialect.get_table_names(
self,
connection.connect().execution_options(_oursql_plain_query=True),
schema
)
def get_schema_names(self, connection, **kw):
return MySQLDialect.get_schema_names(
self,
connection.connect().execution_options(_oursql_plain_query=True),
**kw
)
def initialize(self, connection):
return MySQLDialect.initialize(
self,
connection.execution_options(_oursql_plain_query=True)
)
def _show_create_table(self, connection, table, charset=None,
full_name=None):
return MySQLDialect._show_create_table(
self,
connection.contextual_connect(close_with_result=True).
execution_options(_oursql_plain_query=True),
table, charset, full_name
)
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.ProgrammingError):
return e.errno is None and 'cursor' not in e.args[1] and e.args[1].endswith('closed')
else:
return e.errno in (2006, 2013, 2014, 2045, 2055)
def create_connect_args(self, url):
opts = url.translate_connect_args(database='db', username='user',
password='passwd')
opts.update(url.query)
util.coerce_kw_type(opts, 'port', int)
util.coerce_kw_type(opts, 'compress', bool)
util.coerce_kw_type(opts, 'autoping', bool)
util.coerce_kw_type(opts, 'raise_on_warnings', bool)
util.coerce_kw_type(opts, 'default_charset', bool)
if opts.pop('default_charset', False):
opts['charset'] = None
else:
util.coerce_kw_type(opts, 'charset', str)
opts['use_unicode'] = opts.get('use_unicode', True)
util.coerce_kw_type(opts, 'use_unicode', bool)
# FOUND_ROWS must be set in CLIENT_FLAGS to enable
# supports_sane_rowcount.
opts.setdefault('found_rows', True)
ssl = {}
for key in ['ssl_ca', 'ssl_key', 'ssl_cert',
'ssl_capath', 'ssl_cipher']:
if key in opts:
ssl[key[4:]] = opts[key]
util.coerce_kw_type(ssl, key[4:], str)
del opts[key]
if ssl:
opts['ssl'] = ssl
return [[], opts]
def _get_server_version_info(self, connection):
dbapi_con = connection.connection
version = []
r = re.compile('[.\-]')
for n in r.split(dbapi_con.server_info):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
def _extract_error_code(self, exception):
return exception.errno
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
return connection.connection.charset
def _compat_fetchall(self, rp, charset=None):
"""oursql isn't super-broken like MySQLdb, yaaay."""
return rp.fetchall()
def _compat_fetchone(self, rp, charset=None):
"""oursql isn't super-broken like MySQLdb, yaaay."""
return rp.fetchone()
def _compat_first(self, rp, charset=None):
return rp.first()
dialect = MySQLDialect_oursql
|
aselle/tensorflow | refs/heads/master | tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op.py | 29 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tensorflow op performing fused conv2d bias_add and relu."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.fused_conv.ops import gen_fused_conv2d_bias_activation_op
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
_fused_conv2d_bias_activation_op_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_fused_conv2d_bias_activation_op.so"))
# pylint: disable=redefined-builtin
def fused_conv2d_bias_activation(conv_input,
filter,
bias,
strides=None,
padding=None,
conv_input_scale=1.0,
side_input_scale=0.0,
side_input=None,
activation_mode="Relu",
data_format=None,
filter_format=None,
name=None):
"""Fused 2D conv, bias and activation with optional side input.
Computes a fused 2-D convolution scaled by conv_input_scale,
adds an optional side input scaled by side_input_scale, adds biases,
and applies ReLU. As an equation:
output = ReLU(conv_input_scale * Conv(conv_input, filter) +
side_input_scale * side_input + bias)
Note: In int8 mode, The ReLU will clip the output to the range [0..127].
Args:
conv_input: A `Tensor` of the format specified by `data_format`.
filter: A `Tensor` whose format depends on `data_format`:
if `data_format` is "NCHW_VECT_C", filter should be "OIHW_VECT_I"
otherwise, it should be "HWIO" format.
bias: A 1-D `Tensor` of type `float32`, and dimensions equal to the
number of output channels.
strides: A list of 4 `ints` specifying convolution strides.
if `data_format` is "NCHW" or "NCHW_VECT_C", the order should be NCHW.
if `data_format` is "NHWC", the order should be NHWC.
padding: A `string` from: `"SAME", "VALID"`.
conv_input_scale: A scalar `float32` that will be multiplied by conv_input.
This is optional and defaults to 1. However it should be set to
specify the quantization scale when `data_format` is "NCHW_VECT_C".
side_input_scale: A scalar `float32` that will be multiplied by side_input.
This is optional and defaults to 0.
side_input: A `Tensor` of the format specified by `data_format`.
This is useful for implementing ResNet blocks.
activation_mode: (optional) currently supports the default "Relu", or
"None" activation function.
Note: in qint8 mode, "None" actually clips to the range [-128, 127],
while "Relu" clips to the range [0, 127].
data_format: Specifies the data format.
Possible values are:
"NHWC" float [batch, height, width, channels]
"NCHW" float [batch, channels, height, width]
"NCHW_VECT_C" qint8 [batch, channels / 4, height, width, channels % 4]
Defaults to `"NHWC"`.
Performance is worst for `"NHWC"` and best for `"NCHW_VECT_C"`.
filter_format: Specifies the filter format.
Possible values are:
"HWIO" float [kernel_height, kernel_width, input_channels,
output_channels ]
"OIHW" float [output_channels, input_channels, kernel_height,
kernel_width ]
"OIHW_VECT_I" qint8 [ output_channels, input_channels / 4,
kernel_height, kernel_width, input_channels % 4 ]
Defaults to `"HWIO"`.
name: A name for the operation (optional).
Returns:
A `Tensor` of the format specified by `data_format`.
"""
if strides is None:
strides = [1, 1, 1, 1]
if side_input is None:
side_input = []
return gen_fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
conv_input,
filter,
bias,
side_input,
conv_input_scale,
side_input_scale,
padding=padding,
strides=strides,
activation_mode=activation_mode,
data_format=data_format,
filter_format=filter_format,
name=name)
|
netsamir/dotfiles | refs/heads/master | files/vim/bundle/YouCompleteMe/third_party/ycmd/third_party/waitress/waitress/tests/fixtureapps/writecb.py | 40 | def app(environ, start_response): # pragma: no cover
path_info = environ['PATH_INFO']
if path_info == '/no_content_length':
headers = []
else:
headers = [('Content-Length', '9')]
write = start_response('200 OK', headers)
if path_info == '/long_body':
write(b'abcdefghij')
elif path_info == '/short_body':
write(b'abcdefgh')
else:
write(b'abcdefghi')
return []
|
mcus/SickRage | refs/heads/master | tests/test_searches.py | 3 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Author: echel0n <sickrage.tv@gmail.com>
# URL: http://www.github.com/sickragetv/sickrage/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from __future__ import unicode_literals
import os.path
import sys
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '../lib')))
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import unittest
from tests import SiCKRAGETestCase, SiCKRAGETestDBCase
import sickbeard
import sickbeard.common as c
from sickbeard.tv import TVEpisode, TVShow
from sickbeard.providers.generic import GenericProvider
tests = {"Game of Thrones":
{"tvdbid": 121361, "s": 5, "e": [10],
"s_strings": [{"Season": ["Game of Thrones S05"]}],
"e_strings": [{"Episode": ["Game of Thrones S05E10"]}]}}
class SearchTest(SiCKRAGETestDBCase):
def __init__(self, something):
super(SearchTest, self).__init__(something)
def test_generator(curData, name, provider, forceSearch):
def test(self):
show = TVShow(1, int(curData[b"tvdbid"]))
show.name = name
show.quality = c.ANY | c.Quality.UNKNOWN | c.Quality.RAWHDTV
show.saveToDB()
sickbeard.showList.append(show)
for epNumber in curData[b"e"]:
episode = TVEpisode(show, curData[b"s"], epNumber)
episode.status = c.WANTED
# We arent updating scene numbers, so fake it here
episode.scene_season = curData[b"s"]
episode.scene_episode = epNumber
episode.saveToDB()
provider.show = show
season_strings = provider._get_season_search_strings(episode)
episode_strings = provider._get_episode_search_strings(episode)
fail = False
for cur_string in season_strings, episode_strings:
if not all([isinstance(cur_string, list), isinstance(cur_string[0], dict)]):
print(" %s is using a wrong string format!" % provider.name)
print(cur_string)
fail = True
continue
if fail:
continue
try:
assert (season_strings == curData[b"s_strings"])
assert (episode_strings == curData[b"e_strings"])
except AssertionError:
continue
search_strings = episode_strings[0]
# search_strings.update(season_strings[0])
# search_strings.update({"RSS":['']})
# print search_strings
if not provider.public:
continue
items = provider._doSearch(search_strings)
if not items:
print("No results from provider?")
continue
title, url = provider._get_title_and_url(items[0])
for word in show.name.split(" "):
if not word.lower() in title.lower():
print("Show name not in title: %s. URL: %s" % (title, url))
continue
if not url:
print("url is empty")
continue
quality = provider.getQuality(items[0])
size = provider._get_size(items[0])
if not show.quality & quality:
print("Quality not in common.ANY, %r" % quality)
continue
return test
# create the test methods
for forceSearch in (True, False):
for name, curData in tests.items():
fname = name.replace(' ', '_')
for provider in sickbeard.providers.sortedProviderList():
if provider.providerType == GenericProvider.TORRENT:
if forceSearch:
test_name = 'test_manual_%s_%s_%s' % (fname, curData[b"tvdbid"], provider.name)
else:
test_name = 'test_%s_%s_%s' % (fname, curData[b"tvdbid"], provider.name)
test = test_generator(curData, name, provider, forceSearch)
setattr(SearchTest, test_name, test)
if __name__ == '__main__':
print("==================")
print("STARTING - SEARCH TESTS")
print("==================")
print("######################################################################")
unittest.main()
|
jthelin/rainbowstream | refs/heads/master | rainbowstream/c_image.py | 7 | # -*- coding: utf-8 -*-
from PIL import Image
from os.path import join, dirname, getmtime, exists, expanduser
from .config import *
from .py3patch import *
import ctypes
import sys
import os
def call_c():
"""
Call the C program for converting RGB to Ansi colors
"""
library = expanduser('~/.image.so')
sauce = join(dirname(__file__), 'image.c')
if not exists(library) or getmtime(sauce) > getmtime(library):
build = "cc -fPIC -shared -o %s %s" % (library, sauce)
os.system(build + " >/dev/null 2>&1")
image_c = ctypes.cdll.LoadLibrary(library)
image_c.init()
return image_c.rgb_to_ansi
rgb2short = call_c()
def pixel_print(pixel):
"""
Print a pixel with given Ansi color
"""
r, g, b = pixel[:3]
if c['24BIT'] is True:
sys.stdout.write('\033[48;2;%d;%d;%dm \033[0m'
% (r, g, b))
else:
ansicolor = rgb2short(r, g, b)
sys.stdout.write('\033[48;5;%sm \033[0m' % (ansicolor))
def block_print(higher, lower):
"""
Print two pixels arranged above each other with Ansi color.
Abuses Unicode to print two pixels in the space of one terminal block.
"""
r0, g0, b0 = lower[:3]
r1, g1, b1 = higher[:3]
if c['24BIT'] is True:
sys.stdout.write('\033[38;2;%d;%d;%dm\033[48;2;%d;%d;%dm▄\033[0m'
% (r1, g1, b1, r0, g0, b0))
else:
i0 = rgb2short(r0, g0, b0)
i1 = rgb2short(r1, g1, b1)
sys.stdout.write('\033[38;5;%sm\033[48;5;%sm▄\033[0m' % (i1, i0))
def image_to_display(path, start=None, length=None):
"""
Display an image
"""
rows, columns = os.popen('stty size', 'r').read().split()
if not start:
start = c['IMAGE_SHIFT']
if not length:
length = int(columns) - 2 * start
i = Image.open(path)
i = i.convert('RGBA')
w, h = i.size
i.load()
width = min(w, length)
height = int(float(h) * (float(width) / float(w)))
if c['IMAGE_RESIZE_TO_FIT'] is True:
# If it image won't fit in the terminal without scrolling shrink it
# Subtract 3 from rows so the tweet message fits in too.
h = 2 * (int(rows) - 3)
if height >= h:
width = int(float(width) * (float(h) / float(height)))
height = h
if (height <= 0) or (width <= 0):
raise ValueError("image has negative dimensions")
i = i.resize((width, height), Image.ANTIALIAS)
height = min(height, c['IMAGE_MAX_HEIGHT'])
for real_y in xrange(height // 2):
sys.stdout.write(' ' * start)
for x in xrange(width):
y = real_y * 2
p0 = i.getpixel((x, y))
p1 = i.getpixel((x, y + 1))
block_print(p1, p0)
sys.stdout.write('\n')
"""
For direct using purpose
"""
if __name__ == '__main__':
image_to_display(sys.argv[1])
|
darshanthaker/nupic | refs/heads/master | tests/integration/nupic/opf/opf_experiments_test.py | 22 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from optparse import OptionParser
import os
import sys
import traceback
import unittest2 as unittest
from pkg_resources import resource_filename
from nupic.frameworks.opf.experiment_runner import (
runExperiment, initExperimentPrng)
# Globals
EXCLUDED_EXPERIMENTS = [] # none for now
NUPIC_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"..", "..", "..", "..")
PREDICTION_DIR = os.path.join(NUPIC_DIR, "examples", "opf")
RUN_ALL_ITERATIONS = False
def getAllDirectoriesWithFile(path, filename, excludeDirs):
"""
Returns a list of directories in the <path> with a given <filename>, excluding
<excludeDirs>
"""
directoryList = []
for dirpath, dirnames, filenames in os.walk(path):
for d in dirnames[:]:
if d in excludeDirs:
dirnames.remove(d)
print "EXCLUDING %s..." % (os.path.join(dirpath, d))
# If this directory is UNDER_DEVELOPMENT, exclude it
elif 'UNDER_DEVELOPMENT' in os.listdir(os.path.join(dirpath, d)):
dirnames.remove(d)
print "EXCLUDING %s..." % (os.path.join(dirpath, d))
for f in filenames:
if f==filename:
directoryList.append(dirpath)
return directoryList
def getAllExperimentDirectories(excludedExperiments=[]):
"""
Experiment directories are the directories with a description.py file
"""
excludedDirectories = ['exp', 'inference', 'networks', 'legacy']
excludedDirectories.extend(excludedExperiments)
return getAllDirectoriesWithFile(
path="experiments",
filename="description.py",
excludeDirs=excludedDirectories)
def runReducedExperiment(path, reduced=True):
"""
Run the experiment in the <path> with a reduced iteration count
"""
initExperimentPrng()
# Load experiment
if reduced:
args = [path, '--testMode']
else:
args = [path]
runExperiment(args)
class OPFExperimentsTest(unittest.TestCase):
def testExperiments(self):
os.chdir(PREDICTION_DIR)
expDirPathList = getAllExperimentDirectories(EXCLUDED_EXPERIMENTS)
self.assertTrue(len(expDirPathList) > 0)
failedExperiments = []
successExperiments = []
for expDirPath in expDirPathList:
if os.path.exists(os.path.join(expDirPath, "UNDER_DEVELOPMENT")):
print "Skipping experiment: %s -- under development" % expDirPath
continue
print "Running experiment: %s" % expDirPath
try:
if RUN_ALL_ITERATIONS:
runReducedExperiment(expDirPath, False)
else:
runReducedExperiment(expDirPath)
except KeyboardInterrupt:
print "Keyboard interrupt received. Exiting"
sys.exit(1)
except:
failedExperiments.append(expDirPath)
print
print "Unable to run experiment: %s" % expDirPath
print "See the trace below-"
traceback.print_exc()
else:
print "Successfully ran experiment: %s" % expDirPath
successExperiments.append(expDirPath)
self.assertEqual(len(failedExperiments), 0)
if __name__ == "__main__":
description = \
"Test all experiments in opf/experiments with reduced iterations.\
Currently excludes %s in the default mode" % str(EXCLUDED_EXPERIMENTS)
parser = OptionParser(description=description)
parser.add_option("-a", "--all", action="store_true",
dest="runAllExperiments", default=False,
help="Don't exclude any experiments.")
parser.add_option("-l", "--long", action="store_true",
dest="runAllIterations", default=False,
help="Don't reduce iterations.")
(options, args) = parser.parse_args()
if len(args) > 0:
PREDICTION_DIR = args[0]
if options.runAllExperiments:
EXCLUDED_EXPERIMENTS=[]
RUN_ALL_ITERATIONS = options.runAllIterations
unittest.main()
|
denny820909/builder | refs/heads/master | lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/steps/package/__init__.py | 28 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Portions Copyright Buildbot Team Members
# Portions Copyright Steve 'Ashcrow' Milner <smilner+buildbot@redhat.com>
"""
Steps specific to package formats.
"""
|
agiovann/Constrained_NMF | refs/heads/master | caiman/source_extraction/cnmf/params.py | 1 | import logging
import numpy as np
import os
import pkg_resources
from pprint import pformat
import scipy
from scipy.ndimage.morphology import generate_binary_structure, iterate_structure
import caiman.utils.utils
from ...paths import caiman_datadir
from .utilities import dict_compare, get_file_size
class CNMFParams(object):
"""Class for setting and changing the various parameters."""
def __init__(self, fnames=None, dims=None, dxy=(1, 1),
border_pix=0, del_duplicates=False, low_rank_background=True,
memory_fact=1, n_processes=1, nb_patch=1, p_ssub=2, p_tsub=2,
remove_very_bad_comps=False, rf=None, stride=None,
check_nan=True, n_pixels_per_process=None,
k=30, alpha_snmf=100, center_psf=False, gSig=[5, 5], gSiz=None,
init_iter=2, method_init='greedy_roi', min_corr=.85,
min_pnr=20, gnb=1, normalize_init=True, options_local_NMF=None,
ring_size_factor=1.5, rolling_length=100, rolling_sum=True,
ssub=2, ssub_B=2, tsub=2,
block_size_spat=5000, num_blocks_per_run_spat=20,
block_size_temp=5000, num_blocks_per_run_temp=20,
update_background_components=True,
method_deconvolution='oasis', p=2, s_min=None,
do_merge=True, merge_thresh=0.8,
decay_time=0.4, fr=30, min_SNR=2.5, rval_thr=0.8,
N_samples_exceptionality=None, batch_update_suff_stat=False,
expected_comps=500, iters_shape=5, max_comp_update_shape=np.inf,
max_num_added=5, min_num_trial=5, minibatch_shape=100, minibatch_suff_stat=5,
n_refit=0, num_times_comp_updated=np.inf, simultaneously=False,
sniper_mode=False, test_both=False, thresh_CNN_noisy=0.5,
thresh_fitness_delta=-50, thresh_fitness_raw=None, thresh_overlap=0.5,
update_freq=200, update_num_comps=True, use_dense=True, use_peak_max=True,
only_init_patch=True, var_name_hdf5='mov', max_merge_area=None,
use_corr_img=False, params_dict={},
):
"""Class for setting the processing parameters. All parameters for CNMF, online-CNMF, quality testing,
and motion correction can be set here and then used in the various processing pipeline steps.
The prefered way to set parameters is by using the set function, where a subclass is determined and a
dictionary is passed. The whole dictionary can also be initialized at once by passing a dictionary params_dict
when initializing the CNMFParams object. Direct setting of the positional arguments in CNMFParams is only
present for backwards compatibility reasons and should not be used if possible.
Args:
Any parameter that is not set get a default value specified
by the dictionary default options
DATA PARAMETERS (CNMFParams.data) #####
fnames: list[str]
list of complete paths to files that need to be processed
dims: (int, int), default: computed from fnames
dimensions of the FOV in pixels
fr: float, default: 30
imaging rate in frames per second
decay_time: float, default: 0.4
length of typical transient in seconds
dxy: (float, float)
spatial resolution of FOV in pixels per um
var_name_hdf5: str, default: 'mov'
if loading from hdf5 name of the variable to load
caiman_version: str
version of CaImAn being used
last_commit: str
hash of last commit in the caiman repo
mmap_F: list[str]
paths to F-order memory mapped files after motion correction
mmap_C: str
path to C-order memory mapped file after motion correction
PATCH PARAMS (CNMFParams.patch)######
rf: int or None, default: None
Half-size of patch in pixels. If None, no patches are constructed and the whole FOV is processed jointly
stride: int or None, default: None
Overlap between neighboring patches in pixels.
nb_patch: int, default: 1
Number of (local) background components per patch
border_pix: int, default: 0
Number of pixels to exclude around each border.
low_rank_background: bool, default: True
Whether to update the background using a low rank approximation.
If False all the nonzero elements of the background components are updated using hals
(to be used with one background per patch)
del_duplicates: bool, default: False
Delete duplicate components in the overlaping regions between neighboring patches. If False,
then merging is used.
only_init: bool, default: True
whether to run only the initialization
p_patch: int, default: 0
order of AR dynamics when processing within a patch
skip_refinement: bool, default: False
Whether to skip refinement of components (deprecated?)
remove_very_bad_comps: bool, default: True
Whether to remove (very) bad quality components during patch processing
p_ssub: float, default: 2
Spatial downsampling factor
p_tsub: float, default: 2
Temporal downsampling factor
memory_fact: float, default: 1
unitless number for increasing the amount of available memory
n_processes: int
Number of processes used for processing patches in parallel
in_memory: bool, default: True
Whether to load patches in memory
PRE-PROCESS PARAMS (CNMFParams.preprocess) #############
sn: np.array or None, default: None
noise level for each pixel
noise_range: [float, float], default: [.25, .5]
range of normalized frequencies over which to compute the PSD for noise determination
noise_method: 'mean'|'median'|'logmexp', default: 'mean'
PSD averaging method for computing the noise std
max_num_samples_fft: int, default: 3*1024
Chunk size for computing the PSD of the data (for memory considerations)
n_pixels_per_process: int, default: 1000
Number of pixels to be allocated to each process
compute_g': bool, default: False
whether to estimate global time constant
p: int, default: 2
order of AR indicator dynamics
lags: int, default: 5
number of lags to be considered for time constant estimation
include_noise: bool, default: False
flag for using noise values when estimating g
pixels: list, default: None
pixels to be excluded due to saturation
check_nan: bool, default: True
whether to check for NaNs
INIT PARAMS (CNMFParams.init)###############
K: int, default: 30
number of components to be found (per patch or whole FOV depending on whether rf=None)
SC_kernel: {'heat', 'cos', binary'}, default: 'heat'
kernel for graph affinity matrix
SC_sigma: float, default: 1
variance for SC kernel
SC_thr: float, default: 0,
threshold for affinity matrix
SC_normalize: bool, default: True
standardize entries prior to computing the affinity matrix
SC_use_NN: bool, default: False
sparsify affinity matrix by using only nearest neighbors
SC_nnn: int, default: 20
number of nearest neighbors to use
gSig: [int, int], default: [5, 5]
radius of average neurons (in pixels)
gSiz: [int, int], default: [int(round((x * 2) + 1)) for x in gSig],
half-size of bounding box for each neuron
center_psf: bool, default: False
whether to use 1p data processing mode. Set to true for 1p
ssub: float, default: 2
spatial downsampling factor
tsub: float, default: 2
temporal downsampling factor
nb: int, default: 1
number of background components
lambda_gnmf: float, default: 1.
regularization weight for graph NMF
maxIter: int, default: 5
number of HALS iterations during initialization
method_init: 'greedy_roi'|'corr_pnr'|'sparse_NMF'|'local_NMF' default: 'greedy_roi'
initialization method. use 'corr_pnr' for 1p processing and 'sparse_NMF' for dendritic processing.
min_corr: float, default: 0.85
minimum value of correlation image for determining a candidate component during corr_pnr
min_pnr: float, default: 20
minimum value of psnr image for determining a candidate component during corr_pnr
seed_method: str {'auto', 'manual', 'semi'}
methods for choosing seed pixels during greedy_roi or corr_pnr initialization
'semi' detects nr components automatically and allows to add more manually
if running as notebook 'semi' and 'manual' require a backend that does not
inline figures, e.g. %matplotlib tk
ring_size_factor: float, default: 1.5
radius of ring (*gSig) for computing background during corr_pnr
ssub_B: float, default: 2
downsampling factor for background during corr_pnr
init_iter: int, default: 2
number of iterations during corr_pnr (1p) initialization
nIter: int, default: 5
number of rank-1 refinement iterations during greedy_roi initialization
rolling_sum: bool, default: True
use rolling sum (as opposed to full sum) for determining candidate centroids during greedy_roi
rolling_length: int, default: 100
width of rolling window for rolling sum option
kernel: np.array or None, default: None
user specified template for greedyROI
max_iter_snmf : int, default: 500
maximum number of iterations for sparse NMF initialization
alpha_snmf: float, default: 100
sparse NMF sparsity regularization weight
sigma_smooth_snmf : (float, float, float), default: (.5,.5,.5)
std of Gaussian kernel for smoothing data in sparse_NMF
perc_baseline_snmf: float, default: 20
percentile to be removed from the data in sparse_NMF prior to decomposition
normalize_init: bool, default: True
whether to equalize the movies during initialization
options_local_NMF: dict
dictionary with parameters to pass to local_NMF initializer
SPATIAL PARAMS (CNMFParams.spatial) ##########
method_exp: 'dilate'|'ellipse', default: 'dilate'
method for expanding footprint of spatial components
dist: float, default: 3
expansion factor of ellipse
expandCore: morphological element, default: None(?)
morphological element for expanding footprints under dilate
nb: int, default: 1
number of global background components
n_pixels_per_process: int, default: 1000
number of pixels to be processed by each worker
thr_method: 'nrg'|'max', default: 'nrg'
thresholding method
maxthr: float, default: 0.1
Max threshold
nrgthr: float, default: 0.9999
Energy threshold
extract_cc: bool, default: True
whether to extract connected components during thresholding
(might want to turn to False for dendritic imaging)
medw: (int, int) default: None
window of median filter (set to (3,)*len(dims) in cnmf.fit)
se: np.array or None, default: None
Morphological closing structuring element (set to np.ones((3,)*len(dims), dtype=np.uint8) in cnmf.fit)
ss: np.array or None, default: None
Binary element for determining connectivity (set to np.ones((3,)*len(dims), dtype=np.uint8) in cnmf.fit)
update_background_components: bool, default: True
whether to update the spatial background components
method_ls: 'lasso_lars'|'nnls_L0', default: 'lasso_lars'
'nnls_L0'. Nonnegative least square with L0 penalty
'lasso_lars' lasso lars function from scikit learn
block_size : int, default: 5000
Number of pixels to process at the same time for dot product. Reduce if you face memory problems
num_blocks_per_run: int, default: 20
Parallelization of A'*Y operation
normalize_yyt_one: bool, default: True
Whether to normalize the C and A matrices so that diag(C*C.T) = 1 during update spatial
TEMPORAL PARAMS (CNMFParams.temporal)###########
ITER: int, default: 2
block coordinate descent iterations
method_deconvolution: 'oasis'|'cvxpy'|'oasis', default: 'oasis'
method for solving the constrained deconvolution problem ('oasis','cvx' or 'cvxpy')
if method cvxpy, primary and secondary (if problem unfeasible for approx solution)
solvers: 'ECOS'|'SCS', default: ['ECOS', 'SCS']
solvers to be used with cvxpy, can be 'ECOS','SCS' or 'CVXOPT'
p: 0|1|2, default: 2
order of AR indicator dynamics
memory_efficient: False
bas_nonneg: bool, default: True
whether to set a non-negative baseline (otherwise b >= min(y))
noise_range: [float, float], default: [.25, .5]
range of normalized frequencies over which to compute the PSD for noise determination
noise_method: 'mean'|'median'|'logmexp', default: 'mean'
PSD averaging method for computing the noise std
lags: int, default: 5
number of autocovariance lags to be considered for time constant estimation
optimize_g: bool, default: False
flag for optimizing time constants
fudge_factor: float (close but smaller than 1) default: .96
bias correction factor for discrete time constants
nb: int, default: 1
number of global background components
verbosity: bool, default: False
whether to be verbose
block_size : int, default: 5000
Number of pixels to process at the same time for dot product. Reduce if you face memory problems
num_blocks_per_run: int, default: 20
Parallelization of A'*Y operation
s_min: float or None, default: None
Minimum spike threshold amplitude (computed in the code if used).
MERGE PARAMS (CNMFParams.merge)#####
do_merge: bool, default: True
Whether or not to merge
thr: float, default: 0.8
Trace correlation threshold for merging two components.
merge_parallel: bool, default: False
Perform merging in parallel
max_merge_area: int or None, default: None
maximum area (in pixels) of merged components, used to determine whether to merge components during fitting process
QUALITY EVALUATION PARAMETERS (CNMFParams.quality)###########
min_SNR: float, default: 2.5
trace SNR threshold. Traces with SNR above this will get accepted
SNR_lowest: float, default: 0.5
minimum required trace SNR. Traces with SNR below this will get rejected
rval_thr: float, default: 0.8
space correlation threshold. Components with correlation higher than this will get accepted
rval_lowest: float, default: -1
minimum required space correlation. Components with correlation below this will get rejected
use_cnn: bool, default: True
flag for using the CNN classifier.
min_cnn_thr: float, default: 0.9
CNN classifier threshold. Components with score higher than this will get accepted
cnn_lowest: float, default: 0.1
minimum required CNN threshold. Components with score lower than this will get rejected.
gSig_range: list or integers, default: None
gSig scale values for CNN classifier. In not None, multiple values are tested in the CNN classifier.
ONLINE CNMF (ONACID) PARAMETERS (CNMFParams.online)#####
N_samples_exceptionality: int, default: np.ceil(decay_time*fr),
Number of frames over which trace SNR is computed (usually length of a typical transient)
batch_update_suff_stat: bool, default: False
Whether to update sufficient statistics in batch mode
ds_factor: int, default: 1,
spatial downsampling factor for faster processing (if > 1)
dist_shape_update: bool, default: False,
update shapes in a distributed fashion
epochs: int, default: 1,
number of times to go over data
expected_comps: int, default: 500
number of expected components (for memory allocation purposes)
full_XXt: bool, default: False
save the full residual sufficient statistic matrix for updating W in 1p.
If set to False, a list of submatrices is saved (typically faster).
init_batch: int, default: 200,
length of mini batch used for initialization
init_method: 'bare'|'cnmf'|'seeded', default: 'bare',
initialization method
iters_shape: int, default: 5
Number of block-coordinate decent iterations for each shape update
max_comp_update_shape: int, default: np.inf
Maximum number of spatial components to be updated at each time
max_num_added: int, default: 5
Maximum number of new components to be added in each frame
max_shifts_online: int, default: 10,
Maximum shifts for motion correction during online processing
min_SNR: float, default: 2.5
Trace SNR threshold for accepting a new component
min_num_trial: int, default: 5
Number of mew possible components for each frame
minibatch_shape: int, default: 100
Number of frames stored in rolling buffer
minibatch_suff_stat: int, default: 5
mini batch size for updating sufficient statistics
motion_correct: bool, default: True
Whether to perform motion correction during online processing
movie_name_online: str, default: 'online_movie.avi'
Name of saved movie (appended in the data directory)
normalize: bool, default: False
Whether to normalize each frame prior to online processing
n_refit: int, default: 0
Number of additional iterations for computing traces
num_times_comp_updated: int, default: np.inf
opencv_codec: str, default: 'H264'
FourCC video codec for saving movie. Check http://www.fourcc.org/codecs.php
path_to_model: str, default: os.path.join(caiman_datadir(), 'model', 'cnn_model_online.h5')
Path to online CNN classifier
rval_thr: float, default: 0.8
space correlation threshold for accepting a new component
save_online_movie: bool, default: False
Whether to save the results movie
show_movie: bool, default: False
Whether to display movie of online processing
simultaneously: bool, default: False
Whether to demix and deconvolve simultaneously
sniper_mode: bool, default: False
Whether to use the online CNN classifier for screening candidate components (otherwise space
correlation is used)
test_both: bool, default: False
Whether to use both the CNN and space correlation for screening new components
thresh_CNN_noisy: float, default: 0,5,
Threshold for the online CNN classifier
thresh_fitness_delta: float (negative)
Derivative test for detecting traces
thresh_fitness_raw: float (negative), default: computed from min_SNR
Threshold value for testing trace SNR
thresh_overlap: float, default: 0.5
Intersection-over-Union space overlap threshold for screening new components
update_freq: int, default: 200
Update each shape at least once every X frames when in distributed mode
update_num_comps: bool, default: True
Whether to search for new components
use_dense: bool, default: True
Whether to store and represent A and b as a dense matrix
use_peak_max: bool, default: True
Whether to find candidate centroids using skimage's find local peaks function
MOTION CORRECTION PARAMETERS (CNMFParams.motion)####
border_nan: bool or str, default: 'copy'
flag for allowing NaN in the boundaries. True allows NaN, whereas 'copy' copies the value of the
nearest data point.
gSig_filt: int or None, default: None
size of kernel for high pass spatial filtering in 1p data. If None no spatial filtering is performed
is3D: bool, default: False
flag for 3D recordings for motion correction
max_deviation_rigid: int, default: 3
maximum deviation in pixels between rigid shifts and shifts of individual patches
max_shifts: (int, int), default: (6,6)
maximum shifts per dimension in pixels.
min_mov: float or None, default: None
minimum value of movie. If None it get computed.
niter_rig: int, default: 1
number of iterations rigid motion correction.
nonneg_movie: bool, default: True
flag for producing a non-negative movie.
num_frames_split: int, default: 80
split movie every x frames for parallel processing
num_splits_to_process_els, default: [7, None]
num_splits_to_process_rig, default: None
overlaps: (int, int), default: (24, 24)
overlap between patches in pixels in pw-rigid motion correction.
pw_rigid: bool, default: False
flag for performing pw-rigid motion correction.
shifts_opencv: bool, default: True
flag for applying shifts using cubic interpolation (otherwise FFT)
splits_els: int, default: 14
number of splits across time for pw-rigid registration
splits_rig: int, default: 14
number of splits across time for rigid registration
strides: (int, int), default: (96, 96)
how often to start a new patch in pw-rigid registration. Size of each patch will be strides + overlaps
upsample_factor_grid" int, default: 4
motion field upsampling factor during FFT shifts.
use_cuda: bool, default: False
flag for using a GPU.
indices: tuple(slice), default: (slice(None), slice(None))
Use that to apply motion correction only on a part of the FOV
RING CNN PARAMETERS (CNMFParams.ring_CNN)
n_channels: int, default: 2
Number of "ring" kernels
use_bias: bool, default: False
Flag for using bias in the convolutions
use_add: bool, default: False
Flag for using an additive layer
pct: float between 0 and 1, default: 0.01
Quantile used during training with quantile loss function
patience: int, default: 3
Number of epochs to wait before early stopping
max_epochs: int, default: 100
Maximum number of epochs to be used during training
width: int, default: 5
Width of "ring" kernel
loss_fn: str, default: 'pct'
Loss function specification ('pct' for quantile loss function,
'mse' for mean squared error)
lr: float, default: 1e-3
(initial) learning rate
lr_scheduler: function, default: None
Learning rate scheduler function
path_to_model: str, default: None
Path to saved weights (if training then path to saved model weights)
remove_activity: bool, default: False
Flag for removing activity of last frame prior to background extraction
reuse_model: bool, default: False
Flag for reusing an already trained model (saved in path to model)
"""
self.data = {
'fnames': fnames,
'dims': dims,
'fr': fr,
'decay_time': decay_time,
'dxy': dxy,
'var_name_hdf5': var_name_hdf5,
'caiman_version': pkg_resources.get_distribution('caiman').version,
'last_commit': None,
'mmap_F': None,
'mmap_C': None
}
self.patch = {
'border_pix': border_pix,
'del_duplicates': del_duplicates,
'in_memory': True,
'low_rank_background': low_rank_background,
'memory_fact': memory_fact,
'n_processes': n_processes,
'nb_patch': nb_patch,
'only_init': only_init_patch,
'p_patch': 0, # AR order within patch
'remove_very_bad_comps': remove_very_bad_comps,
'rf': rf,
'skip_refinement': False,
'p_ssub': p_ssub, # spatial downsampling factor
'stride': stride,
'p_tsub': p_tsub, # temporal downsampling factor
}
self.preprocess = {
'check_nan': check_nan,
'compute_g': False, # flag for estimating global time constant
'include_noise': False, # flag for using noise values when estimating g
# number of autocovariance lags to be considered for time constant estimation
'lags': 5,
'max_num_samples_fft': 3 * 1024,
'n_pixels_per_process': n_pixels_per_process,
'noise_method': 'mean', # averaging method ('mean','median','logmexp')
'noise_range': [0.25, 0.5], # range of normalized frequencies over which to average
'p': p, # order of AR indicator dynamics
'pixels': None, # pixels to be excluded due to saturation
'sn': None, # noise level for each pixel
}
self.init = {
'K': k, # number of components,
'SC_kernel': 'heat', # kernel for graph affinity matrix
'SC_sigma' : 1, # std for SC kernel
'SC_thr': 0, # threshold for affinity matrix
'SC_normalize': True, # standardize entries prior to
# computing affinity matrix
'SC_use_NN': False, # sparsify affinity matrix by using
# only nearest neighbors
'SC_nnn': 20, # number of nearest neighbors to use
'alpha_snmf': alpha_snmf,
'center_psf': center_psf,
'gSig': gSig,
# size of bounding box
'gSiz': gSiz,
'init_iter': init_iter,
'kernel': None, # user specified template for greedyROI
'lambda_gnmf' :1, # regularization weight for graph NMF
'maxIter': 5, # number of HALS iterations
'max_iter_snmf': 500,
'method_init': method_init, # can be greedy_roi, corr_pnr sparse_nmf, local_NMF
'min_corr': min_corr,
'min_pnr': min_pnr,
'nIter': 5, # number of refinement iterations
'nb': gnb, # number of global background components
# whether to pixelwise equalize the movies during initialization
'normalize_init': normalize_init,
# dictionary with parameters to pass to local_NMF initializaer
'options_local_NMF': options_local_NMF,
'perc_baseline_snmf': 20,
'ring_size_factor': ring_size_factor,
'rolling_length': rolling_length,
'rolling_sum': rolling_sum,
'seed_method': 'auto',
'sigma_smooth_snmf': (.5, .5, .5),
'ssub': ssub, # spatial downsampling factor
'ssub_B': ssub_B,
'tsub': tsub, # temporal downsampling factor
}
self.spatial = {
'block_size_spat': block_size_spat, # number of pixels to parallelize residual computation ** DECREASE IF MEMORY ISSUES
'dist': 3, # expansion factor of ellipse
'expandCore': iterate_structure(generate_binary_structure(2, 1), 2).astype(int),
# Flag to extract connected components (might want to turn to False for dendritic imaging)
'extract_cc': True,
'maxthr': 0.1, # Max threshold
'medw': None, # window of median filter
# method for determining footprint of spatial components ('ellipse' or 'dilate')
'method_exp': 'dilate',
# 'nnls_L0'. Nonnegative least square with L0 penalty
# 'lasso_lars' lasso lars function from scikit learn
'method_ls': 'lasso_lars',
# number of pixels to be processed by each worker
'n_pixels_per_process': n_pixels_per_process,
'nb': gnb, # number of background components
'normalize_yyt_one': True,
'nrgthr': 0.9999, # Energy threshold
'num_blocks_per_run_spat': num_blocks_per_run_spat, # number of process to parallelize residual computation ** DECREASE IF MEMORY ISSUES
'se': np.ones((3, 3), dtype='uint8'), # Morphological closing structuring element
'ss': np.ones((3, 3), dtype='uint8'), # Binary element for determining connectivity
'thr_method': 'nrg', # Method of thresholding ('max' or 'nrg')
# whether to update the background components in the spatial phase
'update_background_components': update_background_components,
}
self.temporal = {
'ITER': 2, # block coordinate descent iterations
# flag for setting non-negative baseline (otherwise b >= min(y))
'bas_nonneg': False,
# number of pixels to process at the same time for dot product. Make it
# smaller if memory problems
'block_size_temp': block_size_temp, # number of pixels to parallelize residual computation ** DECREASE IF MEMORY ISSUES
# bias correction factor (between 0 and 1, close to 1)
'fudge_factor': .96,
# number of autocovariance lags to be considered for time constant estimation
'lags': 5,
'optimize_g': False, # flag for optimizing time constants
'memory_efficient': False,
# method for solving the constrained deconvolution problem ('oasis','cvx' or 'cvxpy')
# if method cvxpy, primary and secondary (if problem unfeasible for approx
# solution) solvers to be used with cvxpy, can be 'ECOS','SCS' or 'CVXOPT'
'method_deconvolution': method_deconvolution, # 'cvxpy', # 'oasis'
'nb': gnb, # number of background components
'noise_method': 'mean', # averaging method ('mean','median','logmexp')
'noise_range': [.25, .5], # range of normalized frequencies over which to average
'num_blocks_per_run_temp': num_blocks_per_run_temp, # number of process to parallelize residual computation ** DECREASE IF MEMORY ISSUES
'p': p, # order of AR indicator dynamics
's_min': s_min, # minimum spike threshold
'solvers': ['ECOS', 'SCS'],
'verbosity': False,
}
self.merging = {
'do_merge': do_merge,
'merge_thr': merge_thresh,
'merge_parallel': False,
'max_merge_area': max_merge_area
}
self.quality = {
'SNR_lowest': 0.5, # minimum accepted SNR value
'cnn_lowest': 0.1, # minimum accepted value for CNN classifier
'gSig_range': None, # range for gSig scale for CNN classifier
'min_SNR': min_SNR, # transient SNR threshold
'min_cnn_thr': 0.9, # threshold for CNN classifier
'rval_lowest': -1, # minimum accepted space correlation
'rval_thr': rval_thr, # space correlation threshold
'use_cnn': True, # use CNN based classifier
'use_ecc': False, # flag for eccentricity based filtering
'max_ecc': 3
}
self.online = {
'N_samples_exceptionality': N_samples_exceptionality, # timesteps to compute SNR
'batch_update_suff_stat': batch_update_suff_stat,
'dist_shape_update': False, # update shapes in a distributed way
'ds_factor': 1, # spatial downsampling for faster processing
'epochs': 1, # number of epochs
'expected_comps': expected_comps, # number of expected components
'full_XXt': False, # store entire XXt matrix (as opposed to a list of sub-matrices)
'init_batch': 200, # length of mini batch for initialization
'init_method': 'bare', # initialization method for first batch,
'iters_shape': iters_shape, # number of block-CD iterations
'max_comp_update_shape': max_comp_update_shape,
'max_num_added': max_num_added, # maximum number of new components for each frame
'max_shifts_online': 10, # maximum shifts during motion correction
'min_SNR': min_SNR, # minimum SNR for accepting a new trace
'min_num_trial': min_num_trial, # number of mew possible components for each frame
'minibatch_shape': minibatch_shape, # number of frames in each minibatch
'minibatch_suff_stat': minibatch_suff_stat,
'motion_correct': True, # flag for motion correction
'movie_name_online': 'online_movie.mp4', # filename of saved movie (appended to directory where data is located)
'normalize': False, # normalize frame
'n_refit': n_refit, # Additional iterations to simultaneously refit
# path to CNN model for testing new comps
'num_times_comp_updated': num_times_comp_updated,
'opencv_codec': 'H264', # FourCC video codec for saving movie. Check http://www.fourcc.org/codecs.php
'path_to_model': os.path.join(caiman_datadir(), 'model',
'cnn_model_online.h5'),
'ring_CNN': False, # flag for using a ring CNN background model
'rval_thr': rval_thr, # space correlation threshold
'save_online_movie': False, # flag for saving online movie
'show_movie': False, # display movie online
'simultaneously': simultaneously, # demix and deconvolve simultaneously
'sniper_mode': sniper_mode, # flag for using CNN
'stop_detection': False, # flag for stop detecting new neurons at the last epoch
'test_both': test_both, # flag for using both CNN and space correlation
'thresh_CNN_noisy': thresh_CNN_noisy, # threshold for online CNN classifier
'thresh_fitness_delta': thresh_fitness_delta,
'thresh_fitness_raw': thresh_fitness_raw, # threshold for trace SNR (computed below)
'thresh_overlap': thresh_overlap,
'update_freq': update_freq, # update every shape at least once every update_freq steps
'update_num_comps': update_num_comps, # flag for searching for new components
'use_corr_img': use_corr_img, # flag for using correlation image to detect new components
'use_dense': use_dense, # flag for representation and storing of A and b
'use_peak_max': use_peak_max, # flag for finding candidate centroids
'W_update_factor': 1, # update W less often than shapes by a given factor
}
self.motion = {
'border_nan': 'copy', # flag for allowing NaN in the boundaries
'gSig_filt': None, # size of kernel for high pass spatial filtering in 1p data
'is3D': False, # flag for 3D recordings for motion correction
'max_deviation_rigid': 3, # maximum deviation between rigid and non-rigid
'max_shifts': (6, 6), # maximum shifts per dimension (in pixels)
'min_mov': None, # minimum value of movie
'niter_rig': 1, # number of iterations rigid motion correction
'nonneg_movie': True, # flag for producing a non-negative movie
'num_frames_split': 80, # split across time every x frames
'num_splits_to_process_els': None, # DO NOT MODIFY
'num_splits_to_process_rig': None, # DO NOT MODIFY
'overlaps': (32, 32), # overlap between patches in pw-rigid motion correction
'pw_rigid': False, # flag for performing pw-rigid motion correction
'shifts_opencv': True, # flag for applying shifts using cubic interpolation (otherwise FFT)
'splits_els': 14, # number of splits across time for pw-rigid registration
'splits_rig': 14, # number of splits across time for rigid registration
'strides': (96, 96), # how often to start a new patch in pw-rigid registration
'upsample_factor_grid': 4, # motion field upsampling factor during FFT shifts
'use_cuda': False, # flag for using a GPU
'indices': (slice(None), slice(None)) # part of FOV to be corrected
}
self.ring_CNN = {
'n_channels' : 2, # number of "ring" kernels
'use_bias' : False, # use bias in the convolutions
'use_add' : False, # use an additive layer
'pct' : 0.01, # quantile loss specification
'patience' : 3, # patience for early stopping
'max_epochs': 100, # maximum number of epochs
'width': 5, # width of "ring" kernel
'loss_fn': 'pct', # loss function
'lr': 1e-3, # (initial) learning rate
'lr_scheduler': None, # learning rate scheduler function
'path_to_model': None, # path to saved weights
'remove_activity': False, # remove activity of last frame prior to background extraction
'reuse_model': False # reuse an already trained model
}
self.change_params(params_dict)
def check_consistency(self):
""" Populates the params object with some dataset dependent values
and ensures that certain constraints are satisfied.
"""
self.data['last_commit'] = '-'.join(caiman.utils.utils.get_caiman_version())
if self.data['dims'] is None and self.data['fnames'] is not None:
self.data['dims'] = get_file_size(self.data['fnames'], var_name_hdf5=self.data['var_name_hdf5'])[0]
if self.data['fnames'] is not None:
if isinstance(self.data['fnames'], str):
self.data['fnames'] = [self.data['fnames']]
if self.motion['is3D']:
T = get_file_size(self.data['fnames'], var_name_hdf5=self.data['var_name_hdf5'])[0][0]
else:
T = get_file_size(self.data['fnames'], var_name_hdf5=self.data['var_name_hdf5'])[1]
if len(self.data['fnames']) > 1:
T = T[0]
num_splits = max(T//max(self.motion['num_frames_split'], 10), 1)
self.motion['splits_els'] = num_splits
self.motion['splits_rig'] = num_splits
if isinstance(self.data['fnames'][0],tuple):
self.online['movie_name_online'] = os.path.join(os.path.dirname(self.data['fnames'][0][0]), self.online['movie_name_online'])
else:
self.online['movie_name_online'] = os.path.join(os.path.dirname(self.data['fnames'][0]), self.online['movie_name_online'])
if self.online['N_samples_exceptionality'] is None:
self.online['N_samples_exceptionality'] = np.ceil(self.data['fr'] * self.data['decay_time']).astype('int')
if self.online['thresh_fitness_raw'] is None:
self.online['thresh_fitness_raw'] = scipy.special.log_ndtr(
-self.online['min_SNR']) * self.online['N_samples_exceptionality']
self.online['max_shifts_online'] = (np.array(self.online['max_shifts_online']) / self.online['ds_factor']).astype(int)
if self.init['gSig'] is None:
self.init['gSig'] = [-1, -1]
if self.init['gSiz'] is None:
self.init['gSiz'] = [2*gs + 1 for gs in self.init['gSig']]
self.init['gSiz'] = tuple([gs + 1 if gs % 2 == 0 else gs for gs in self.init['gSiz']])
if self.patch['rf'] is not None:
if self.patch['rf'] <= self.init['gSiz'][0]:
logging.warning("Changing rf from {0} to {1} ".format(self.patch['rf'], 2*self.init['gSiz'][0]) +
"because the constraint rf > gSiz was not satisfied.")
# if self.motion['gSig_filt'] is None:
# self.motion['gSig_filt'] = self.init['gSig']
if self.init['nb'] <= 0 and (self.patch['nb_patch'] != self.init['nb'] or
self.patch['low_rank_background'] is not None):
logging.warning("gnb={0}, hence setting keys nb_patch ".format(self.init['nb']) +
"and low_rank_background in group patch automatically.")
self.set('patch', {'nb_patch': self.init['nb'], 'low_rank_background': None})
if self.init['nb'] == -1 and self.spatial['update_background_components']:
logging.warning("gnb=-1, hence setting key update_background_components " +
"in group spatial automatically to False.")
self.set('spatial', {'update_background_components': False})
if self.init['method_init'] == 'corr_pnr' and self.init['ring_size_factor'] is not None \
and self.init['normalize_init']:
logging.warning("using CNMF-E's ringmodel for background hence setting key " +
"normalize_init in group init automatically to False.")
self.set('init', {'normalize_init': False})
if self.motion['is3D']:
for a in ('indices', 'max_shifts', 'strides', 'overlaps'):
if len(self.motion[a]) != 3:
if self.motion[a][0] == self.motion[a][1]:
self.motion[a] = (self.motion[a][0],) * 3
logging.warning("is3D=True, hence setting key " + a +
" automatically to " + str(self.motion[a]))
else:
raise ValueError(a + ' has to be a tuple of length 3 for volumetric 3D data')
def set(self, group, val_dict, set_if_not_exists=False, verbose=False):
""" Add key-value pairs to a group. Existing key-value pairs will be overwritten
if specified in val_dict, but not deleted.
Args:
group: The name of the group.
val_dict: A dictionary with key-value pairs to be set for the group.
set_if_not_exists: Whether to set a key-value pair in a group if the key does not currently exist in the group.
"""
if not hasattr(self, group):
raise KeyError('No group in CNMFParams named {0}'.format(group))
d = getattr(self, group)
for k, v in val_dict.items():
if k not in d and not set_if_not_exists:
if verbose:
logging.warning(
"NOT setting value of key {0} in group {1}, because no prior key existed...".format(k, group))
else:
if np.any(d[k] != v):
logging.info(
"Changing key {0} in group {1} from {2} to {3}".format(k, group, d[k], v))
d[k] = v
def get(self, group, key):
""" Get a value for a given group and key. Raises an exception if no such group/key combination exists.
Args:
group: The name of the group.
key: The key for the property in the group of interest.
Returns: The value for the group/key combination.
"""
if not hasattr(self, group):
raise KeyError('No group in CNMFParams named {0}'.format(group))
d = getattr(self, group)
if key not in d:
raise KeyError('No key {0} in group {1}'.format(key, group))
return d[key]
def get_group(self, group):
""" Get the dictionary of key-value pairs for a group.
Args:
group: The name of the group.
"""
if not hasattr(self, group):
raise KeyError('No group in CNMFParams named {0}'.format(group))
return getattr(self, group)
def __eq__(self, other):
if type(other) != CNMFParams:
return False
parent_dict1 = self.to_dict()
parent_dict2 = other.to_dict()
key_diff = np.setdiff1d(parent_dict1.keys(), parent_dict2.keys())
if len(key_diff) > 0:
return False
for k1, child_dict1 in parent_dict1.items():
child_dict2 = parent_dict2[k1]
added, removed, modified, same = dict_compare(child_dict1, child_dict2)
if len(added) != 0 or len(removed) != 0 or len(modified) != 0 or len(same) != len(child_dict1):
return False
return True
def to_dict(self):
"""Returns the params class as a dictionary with subdictionaries for each
catergory."""
return {'data': self.data, 'spatial_params': self.spatial, 'temporal_params': self.temporal,
'init_params': self.init, 'preprocess_params': self.preprocess,
'patch_params': self.patch, 'online': self.online, 'quality': self.quality,
'merging': self.merging, 'motion': self.motion, 'ring_CNN': self.ring_CNN
}
def __repr__(self):
formatted_outputs = [
'{}:\n\n{}'.format(group_name, pformat(group_dict))
for group_name, group_dict in self.to_dict().items()
]
return 'CNMFParams:\n\n' + '\n\n'.join(formatted_outputs)
def change_params(self, params_dict, verbose=False):
""" Method for updating the params object by providing a single dictionary.
For each key in the provided dictionary the method will search in all
subdictionaries and will update the value if it finds a match.
Args:
params_dict: dictionary with parameters to be changed and new values
verbose: bool (False). Print message for all keys
"""
for gr in list(self.__dict__.keys()):
self.set(gr, params_dict, verbose=verbose)
for k, v in params_dict.items():
flag = True
for gr in list(self.__dict__.keys()):
d = getattr(self, gr)
if k in d:
flag = False
if flag:
logging.warning('No parameter {0} found!'.format(k))
self.check_consistency()
return self
|
joolswills/plugin.video.youtube | refs/heads/master | resources/lib/youtube/youtube_exceptions.py | 27 | from resources.lib import kodion
__author__ = 'bromix'
class LoginException(kodion.KodionException):
pass
class YouTubeException(kodion.KodionException):
pass
|
wfxiang08/django197 | refs/heads/master | django/db/migrations/operations/models.py | 48 | from __future__ import unicode_literals
from django.db import models
from django.db.migrations.operations.base import Operation
from django.db.migrations.state import ModelState
from django.db.models.options import normalize_together
from django.utils import six
from django.utils.functional import cached_property
class CreateModel(Operation):
"""
Create a model's table.
"""
serialization_expand_args = ['fields', 'options', 'managers']
def __init__(self, name, fields, options=None, bases=None, managers=None):
self.name = name
self.fields = fields
self.options = options or {}
self.bases = bases or (models.Model,)
self.managers = managers or []
@cached_property
def name_lower(self):
return self.name.lower()
def deconstruct(self):
kwargs = {
'name': self.name,
'fields': self.fields,
}
if self.options:
kwargs['options'] = self.options
if self.bases and self.bases != (models.Model,):
kwargs['bases'] = self.bases
if self.managers and self.managers != [('objects', models.Manager())]:
kwargs['managers'] = self.managers
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
state.add_model(ModelState(
app_label,
self.name,
list(self.fields),
dict(self.options),
tuple(self.bases),
list(self.managers),
))
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def describe(self):
return "Create %smodel %s" % ("proxy " if self.options.get("proxy", False) else "", self.name)
def references_model(self, name, app_label=None):
strings_to_check = [self.name]
# Check we didn't inherit from the model
for base in self.bases:
if isinstance(base, six.string_types):
strings_to_check.append(base.split(".")[-1])
# Check we have no FKs/M2Ms with it
for fname, field in self.fields:
if field.remote_field:
if isinstance(field.remote_field.model, six.string_types):
strings_to_check.append(field.remote_field.model.split(".")[-1])
# Now go over all the strings and compare them
for string in strings_to_check:
if string.lower() == name.lower():
return True
return False
class DeleteModel(Operation):
"""
Drops a model's table.
"""
def __init__(self, name):
self.name = name
@cached_property
def name_lower(self):
return self.name.lower()
def deconstruct(self):
kwargs = {
'name': self.name,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
state.remove_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def references_model(self, name, app_label=None):
return name.lower() == self.name_lower
def describe(self):
return "Delete model %s" % (self.name, )
class RenameModel(Operation):
"""
Renames a model.
"""
def __init__(self, old_name, new_name):
self.old_name = old_name
self.new_name = new_name
@cached_property
def old_name_lower(self):
return self.old_name.lower()
@cached_property
def new_name_lower(self):
return self.new_name.lower()
def deconstruct(self):
kwargs = {
'old_name': self.old_name,
'new_name': self.new_name,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
apps = state.apps
model = apps.get_model(app_label, self.old_name)
model._meta.apps = apps
# Get all of the related objects we need to repoint
all_related_objects = (
f for f in model._meta.get_fields(include_hidden=True)
if f.auto_created and not f.concrete and (not f.hidden or f.many_to_many)
)
# Rename the model
state.models[app_label, self.new_name_lower] = state.models[app_label, self.old_name_lower]
state.models[app_label, self.new_name_lower].name = self.new_name
state.remove_model(app_label, self.old_name_lower)
# Repoint the FKs and M2Ms pointing to us
for related_object in all_related_objects:
if related_object.model is not model:
# The model being renamed does not participate in this relation
# directly. Rather, a superclass does.
continue
# Use the new related key for self referential related objects.
if related_object.related_model == model:
related_key = (app_label, self.new_name_lower)
else:
related_key = (
related_object.related_model._meta.app_label,
related_object.related_model._meta.model_name,
)
new_fields = []
for name, field in state.models[related_key].fields:
if name == related_object.field.name:
field = field.clone()
field.remote_field.model = "%s.%s" % (app_label, self.new_name)
new_fields.append((name, field))
state.models[related_key].fields = new_fields
state.reload_model(*related_key)
state.reload_model(app_label, self.new_name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.new_name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.old_name)
# Move the main table
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
# Alter the fields pointing to us
for related_object in old_model._meta.related_objects:
if related_object.related_model == old_model:
model = new_model
related_key = (app_label, self.new_name_lower)
else:
model = related_object.related_model
related_key = (
related_object.related_model._meta.app_label,
related_object.related_model._meta.model_name,
)
to_field = to_state.apps.get_model(
*related_key
)._meta.get_field(related_object.field.name)
schema_editor.alter_field(
model,
related_object.field,
to_field,
)
# Rename M2M fields whose name is based on this model's name.
fields = zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many)
for (old_field, new_field) in fields:
# Skip self-referential fields as these are renamed above.
if new_field.model == new_field.related_model or not new_field.remote_field.through._meta.auto_created:
continue
# Rename the M2M table that's based on this model's name.
old_m2m_model = old_field.remote_field.through
new_m2m_model = new_field.remote_field.through
schema_editor.alter_db_table(
new_m2m_model,
old_m2m_model._meta.db_table,
new_m2m_model._meta.db_table,
)
# Rename the column in the M2M table that's based on this
# model's name.
schema_editor.alter_field(
new_m2m_model,
old_m2m_model._meta.get_field(old_model._meta.model_name),
new_m2m_model._meta.get_field(new_model._meta.model_name),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.new_name_lower, self.old_name_lower = self.old_name_lower, self.new_name_lower
self.new_name, self.old_name = self.old_name, self.new_name
self.database_forwards(app_label, schema_editor, from_state, to_state)
self.new_name_lower, self.old_name_lower = self.old_name_lower, self.new_name_lower
self.new_name, self.old_name = self.old_name, self.new_name
def references_model(self, name, app_label=None):
return (
name.lower() == self.old_name_lower or
name.lower() == self.new_name_lower
)
def describe(self):
return "Rename model %s to %s" % (self.old_name, self.new_name)
class AlterModelTable(Operation):
"""
Renames a model's table
"""
def __init__(self, name, table):
self.name = name
self.table = table
@cached_property
def name_lower(self):
return self.name.lower()
def deconstruct(self):
kwargs = {
'name': self.name,
'table': self.table,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
state.models[app_label, self.name_lower].options["db_table"] = self.table
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
# Rename M2M fields whose name is based on this model's db_table
for (old_field, new_field) in zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many):
if new_field.remote_field.through._meta.auto_created:
schema_editor.alter_db_table(
new_field.remote_field.through,
old_field.remote_field.through._meta.db_table,
new_field.remote_field.through._meta.db_table,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name_lower
def describe(self):
return "Rename table for %s to %s" % (self.name, self.table)
class AlterUniqueTogether(Operation):
"""
Changes the value of unique_together to the target one.
Input value of unique_together must be a set of tuples.
"""
option_name = "unique_together"
def __init__(self, name, unique_together):
self.name = name
unique_together = normalize_together(unique_together)
self.unique_together = set(tuple(cons) for cons in unique_together)
@cached_property
def name_lower(self):
return self.name.lower()
def deconstruct(self):
kwargs = {
'name': self.name,
'unique_together': self.unique_together,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options[self.option_name] = self.unique_together
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
schema_editor.alter_unique_together(
new_model,
getattr(old_model._meta, self.option_name, set()),
getattr(new_model._meta, self.option_name, set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name_lower
def references_field(self, model_name, name, app_label=None):
return (
self.references_model(model_name, app_label) and
(
not self.unique_together or
any((name in together) for together in self.unique_together)
)
)
def describe(self):
return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.unique_together or ''))
class AlterIndexTogether(Operation):
"""
Changes the value of index_together to the target one.
Input value of index_together must be a set of tuples.
"""
option_name = "index_together"
def __init__(self, name, index_together):
self.name = name
index_together = normalize_together(index_together)
self.index_together = set(tuple(cons) for cons in index_together)
@cached_property
def name_lower(self):
return self.name.lower()
def deconstruct(self):
kwargs = {
'name': self.name,
'index_together': self.index_together,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options[self.option_name] = self.index_together
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
schema_editor.alter_index_together(
new_model,
getattr(old_model._meta, self.option_name, set()),
getattr(new_model._meta, self.option_name, set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name_lower
def references_field(self, model_name, name, app_label=None):
return (
self.references_model(model_name, app_label) and
(
not self.index_together or
any((name in together) for together in self.index_together)
)
)
def describe(self):
return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.index_together or ''))
class AlterOrderWithRespectTo(Operation):
"""
Represents a change with the order_with_respect_to option.
"""
def __init__(self, name, order_with_respect_to):
self.name = name
self.order_with_respect_to = order_with_respect_to
@cached_property
def name_lower(self):
return self.name.lower()
def deconstruct(self):
kwargs = {
'name': self.name,
'order_with_respect_to': self.order_with_respect_to,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options['order_with_respect_to'] = self.order_with_respect_to
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, to_model):
from_model = from_state.apps.get_model(app_label, self.name)
# Remove a field if we need to
if from_model._meta.order_with_respect_to and not to_model._meta.order_with_respect_to:
schema_editor.remove_field(from_model, from_model._meta.get_field("_order"))
# Add a field if we need to (altering the column is untouched as
# it's likely a rename)
elif to_model._meta.order_with_respect_to and not from_model._meta.order_with_respect_to:
field = to_model._meta.get_field("_order")
if not field.has_default():
field.default = 0
schema_editor.add_field(
from_model,
field,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name_lower
def references_field(self, model_name, name, app_label=None):
return (
self.references_model(model_name, app_label) and
(
self.order_with_respect_to is None or
name == self.order_with_respect_to
)
)
def describe(self):
return "Set order_with_respect_to on %s to %s" % (self.name, self.order_with_respect_to)
class AlterModelOptions(Operation):
"""
Sets new model options that don't directly affect the database schema
(like verbose_name, permissions, ordering). Python code in migrations
may still need them.
"""
# Model options we want to compare and preserve in an AlterModelOptions op
ALTER_OPTION_KEYS = [
"get_latest_by",
"managed",
"ordering",
"permissions",
"default_permissions",
"select_on_save",
"verbose_name",
"verbose_name_plural",
]
def __init__(self, name, options):
self.name = name
self.options = options
@cached_property
def name_lower(self):
return self.name.lower()
def deconstruct(self):
kwargs = {
'name': self.name,
'options': self.options,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options = dict(model_state.options)
model_state.options.update(self.options)
for key in self.ALTER_OPTION_KEYS:
if key not in self.options and key in model_state.options:
del model_state.options[key]
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
pass
def database_backwards(self, app_label, schema_editor, from_state, to_state):
pass
def references_model(self, name, app_label=None):
return name.lower() == self.name_lower
def describe(self):
return "Change Meta options on %s" % (self.name, )
class AlterModelManagers(Operation):
"""
Alters the model's managers
"""
serialization_expand_args = ['managers']
def __init__(self, name, managers):
self.name = name
self.managers = managers
@cached_property
def name_lower(self):
return self.name.lower()
def deconstruct(self):
return (
self.__class__.__name__,
[self.name, self.managers],
{}
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.managers = list(self.managers)
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
pass
def database_backwards(self, app_label, schema_editor, from_state, to_state):
pass
def references_model(self, name, app_label=None):
return name.lower() == self.name_lower
def describe(self):
return "Change managers on %s" % (self.name, )
|
mavit/ansible | refs/heads/devel | lib/ansible/modules/network/system/net_ping.py | 55 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: net_ping
version_added: "2.4"
author: "Jacob McGill (@jmcgill298)"
short_description: Tests reachability using ping from a network device
description:
- Tests reachability using ping from network device to a remote destination.
- For Windows targets, use the M(win_ping) module instead.
- For targets running Python, use the M(ping) module instead.
options:
count:
description:
- Number of packets to send.
default: 5
dest:
description:
- The IP Address or hostname (resolvable by switch) of the remote node.
required: true
source:
description:
- The source IP Address.
state:
description:
- Determines if the expected result is success or fail.
choices: [ absent, present ]
default: present
vrf:
description:
- The VRF to use for forwarding.
default: default
notes:
- For Windows targets, use the M(win_ping) module instead.
- For targets running Python, use the M(ping) module instead.
'''
EXAMPLES = r'''
- name: Test reachability to 10.10.10.10 using default vrf
net_ping:
dest: 10.10.10.10
- name: Test reachability to 10.20.20.20 using prod vrf
net_ping:
dest: 10.20.20.20
vrf: prod
- name: Test unreachability to 10.30.30.30 using default vrf
net_ping:
dest: 10.30.30.30
state: absent
- name: Test reachability to 10.40.40.40 using prod vrf and setting count and source
net_ping:
dest: 10.40.40.40
source: loopback0
vrf: prod
count: 20
'''
RETURN = r'''
commands:
description: Show the command sent.
returned: always
type: list
sample: ["ping vrf prod 10.40.40.40 count 20 source loopback0"]
packet_loss:
description: Percentage of packets lost.
returned: always
type: str
sample: "0%"
packets_rx:
description: Packets successfully received.
returned: always
type: int
sample: 20
packets_tx:
description: Packets successfully transmitted.
returned: always
type: int
sample: 20
rtt:
description: Show RTT stats.
returned: always
type: dict
sample: {"avg": 2, "max": 8, "min": 1}
'''
|
LinusU/ansible | refs/heads/devel | lib/ansible/plugins/strategy/free.py | 47 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import time
from ansible.errors import *
from ansible.playbook.included_file import IncludedFile
from ansible.plugins.strategy import StrategyBase
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class StrategyModule(StrategyBase):
def run(self, iterator, play_context):
'''
The "free" strategy is a bit more complex, in that it allows tasks to
be sent to hosts as quickly as they can be processed. This means that
some hosts may finish very quickly if run tasks result in little or no
work being done versus other systems.
The algorithm used here also tries to be more "fair" when iterating
through hosts by remembering the last host in the list to be given a task
and starting the search from there as opposed to the top of the hosts
list again, which would end up favoring hosts near the beginning of the
list.
'''
# the last host to be given a task
last_host = 0
result = True
work_to_do = True
while work_to_do and not self._tqm._terminated:
hosts_left = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
if len(hosts_left) == 0:
self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
result = False
break
work_to_do = False # assume we have no more work to do
starting_host = last_host # save current position so we know when we've
# looped back around and need to break
# try and find an unblocked host with a task to run
host_results = []
while True:
host = hosts_left[last_host]
self._display.debug("next free host: %s" % host)
host_name = host.get_name()
# peek at the next task for the host, to see if there's
# anything to do do for this host
(state, task) = iterator.get_next_task_for_host(host, peek=True)
self._display.debug("free host state: %s" % state)
self._display.debug("free host task: %s" % task)
if host_name not in self._tqm._failed_hosts and host_name not in self._tqm._unreachable_hosts and task:
# set the flag so the outer loop knows we've still found
# some work which needs to be done
work_to_do = True
self._display.debug("this host has work to do")
# check to see if this host is blocked (still executing a previous task)
if not host_name in self._blocked_hosts or not self._blocked_hosts[host_name]:
# pop the task, mark the host blocked, and queue it
self._blocked_hosts[host_name] = True
(state, task) = iterator.get_next_task_for_host(host)
self._display.debug("getting variables")
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
self._display.debug("done getting variables")
# check to see if this task should be skipped, due to it being a member of a
# role which has already run (and whether that role allows duplicate execution)
if task._role and task._role.has_run(host):
# If there is no metadata, the default behavior is to not allow duplicates,
# if there is metadata, check to see if the allow_duplicates flag was set to true
if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
self._display.debug("'%s' skipped because role has already run" % task)
continue
if not task.evaluate_tags(play_context.only_tags, play_context.skip_tags, task_vars) and task.action != 'setup':
self._display.debug("'%s' failed tag evaluation" % task)
continue
if task.action == 'meta':
# meta tasks store their args in the _raw_params field of args,
# since they do not use k=v pairs, so get that
meta_action = task.args.get('_raw_params')
if meta_action == 'noop':
# FIXME: issue a callback for the noop here?
continue
elif meta_action == 'flush_handlers':
# FIXME: in the 'free' mode, flushing handlers should result in
# only those handlers notified for the host doing the flush
self.run_handlers(iterator, play_context)
else:
raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
self._blocked_hosts[host_name] = False
else:
# handle step if needed, skip meta actions as they are used internally
if not self._step or self._take_step(task, host_name):
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
self._queue_task(host, task, task_vars, play_context)
# move on to the next host and make sure we
# haven't gone past the end of our hosts list
last_host += 1
if last_host > len(hosts_left) - 1:
last_host = 0
# if we've looped around back to the start, break out
if last_host == starting_host:
break
results = self._process_pending_results(iterator)
host_results.extend(results)
try:
included_files = IncludedFile.process_include_results(host_results, self._tqm, iterator=iterator, loader=self._loader, variable_manager=self._variable_manager)
except AnsibleError as e:
return False
if len(included_files) > 0:
for included_file in included_files:
# included hosts get the task list while those excluded get an equal-length
# list of noop tasks, to make sure that they continue running in lock-step
try:
new_blocks = self._load_included_file(included_file, iterator=iterator)
except AnsibleError as e:
for host in included_file._hosts:
iterator.mark_host_failed(host)
self._display.warning(str(e))
continue
for host in hosts_left:
if host in included_file._hosts:
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=included_file._task)
final_blocks = []
for new_block in new_blocks:
final_blocks.append(new_block.filter_tagged_tasks(play_context, task_vars))
iterator.add_tasks(host, final_blocks)
# pause briefly so we don't spin lock
time.sleep(0.05)
try:
results = self._wait_on_pending_results(iterator)
host_results.extend(results)
except Exception as e:
# FIXME: ctrl+c can cause some failures here, so catch them
# with the appropriate error type
pass
# run the base class run() method, which executes the cleanup function
# and runs any outstanding handlers which have been triggered
return super(StrategyModule, self).run(iterator, play_context, result)
|
dexterx17/nodoSocket | refs/heads/master | clients/Python-2.7.6/Lib/test/crashers/borrowed_ref_1.py | 168 | """
_PyType_Lookup() returns a borrowed reference.
This attacks the call in dictobject.c.
"""
class A(object):
pass
class B(object):
def __del__(self):
print 'hi'
del D.__missing__
class D(dict):
class __missing__:
def __init__(self, *args):
pass
d = D()
a = A()
a.cycle = a
a.other = B()
del a
prev = None
while 1:
d[5]
prev = (prev,)
|
caveman-dick/ansible | refs/heads/devel | lib/ansible/modules/crypto/openssl_privatekey.py | 26 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Yanis Guenane <yanis+ansible@guenane.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: openssl_privatekey
author: "Yanis Guenane (@Spredzy)"
version_added: "2.3"
short_description: Generate OpenSSL private keys.
description:
- "This module allows one to (re)generate OpenSSL private keys. It uses
the pyOpenSSL python library to interact with openssl. One can generate
either RSA or DSA private keys. Keys are generated in PEM format.
This module uses file common arguments to specify generated file permissions."
requirements:
- "python-pyOpenSSL"
options:
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the private key should exist or not, taking action if the state is different from what is stated.
size:
required: false
default: 4096
description:
- Size (in bits) of the TLS/SSL key to generate
type:
required: false
default: "RSA"
choices: [ RSA, DSA ]
description:
- The algorithm used to generate the TLS/SSL private key
force:
required: false
default: False
choices: [ True, False ]
description:
- Should the key be regenerated even it it already exists
path:
required: true
description:
- Name of the file in which the generated TLS/SSL private key will be written. It will have 0600 mode.
passphrase:
required: false
description:
- The passphrase for the private key.
version_added: "2.4"
cipher:
required: false
description:
- The cipher to encrypt the private key. (cipher can be found by running `openssl list-cipher-algorithms`)
version_added: "2.4"
'''
EXAMPLES = '''
# Generate an OpenSSL private key with the default values (4096 bits, RSA)
- openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
# Generate an OpenSSL private key with the default values (4096 bits, RSA)
# and a passphrase
- openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
passphrase: ansible
cipher: aes256
# Generate an OpenSSL private key with a different size (2048 bits)
- openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
size: 2048
# Force regenerate an OpenSSL private key if it already exists
- openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
force: True
# Generate an OpenSSL private key with a different algorithm (DSA)
- openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
type: DSA
'''
RETURN = '''
size:
description: Size (in bits) of the TLS/SSL private key
returned: changed or success
type: int
sample: 4096
type:
description: Algorithm used to generate the TLS/SSL private key
returned: changed or success
type: string
sample: RSA
filename:
description: Path to the generated TLS/SSL private key file
returned: changed or success
type: string
sample: /etc/ssl/private/ansible.com.pem
fingerprint:
description: The fingerprint of the public key. Fingerprint will be generated for each hashlib.algorithms available.
Requires PyOpenSSL >= 16.0 for meaningful output.
returned: changed or success
type: dict
sample:
md5: "84:75:71:72:8d:04:b5:6c:4d:37:6d:66:83:f5:4c:29"
sha1: "51:cc:7c:68:5d:eb:41:43:88:7e:1a:ae:c7:f8:24:72:ee:71:f6:10"
sha224: "b1:19:a6:6c:14:ac:33:1d:ed:18:50:d3:06:5c:b2:32:91:f1:f1:52:8c:cb:d5:75:e9:f5:9b:46"
sha256: "41:ab:c7:cb:d5:5f:30:60:46:99:ac:d4:00:70:cf:a1:76:4f:24:5d:10:24:57:5d:51:6e:09:97:df:2f:de:c7"
sha384: "85:39:50:4e:de:d9:19:33:40:70:ae:10:ab:59:24:19:51:c3:a2:e4:0b:1c:b1:6e:dd:b3:0c:d9:9e:6a:46:af:da:18:f8:ef:ae:2e:c0:9a:75:2c:9b:b3:0f:3a:5f:3d"
sha512: "fd:ed:5e:39:48:5f:9f:fe:7f:25:06:3f:79:08:cd:ee:a5:e7:b3:3d:13:82:87:1f:84:e1:f5:c7:28:77:53:94:86:56:38:69:f0:d9:35:22:01:1e:a6:60:...:0f:9b"
'''
import os
try:
from OpenSSL import crypto
except ImportError:
pyopenssl_found = False
else:
pyopenssl_found = True
from ansible.module_utils import crypto as crypto_utils
from ansible.module_utils._text import to_native, to_bytes
from ansible.module_utils.basic import AnsibleModule
class PrivateKeyError(crypto_utils.OpenSSLObjectError):
pass
class PrivateKey(crypto_utils.OpenSSLObject):
def __init__(self, module):
super(PrivateKey, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.size = module.params['size']
self.passphrase = module.params['passphrase']
self.cipher = module.params['cipher']
self.privatekey = None
self.fingerprint = {}
self.mode = module.params['mode']
if not self.mode:
self.mode = int('0600', 8)
self.type = crypto.TYPE_RSA
if module.params['type'] == 'DSA':
self.type = crypto.TYPE_DSA
def generate(self, module):
"""Generate a keypair."""
if not self.check(module, perms_required=False) or self.force:
self.privatekey = crypto.PKey()
try:
self.privatekey.generate_key(self.type, self.size)
except (TypeError, ValueError) as exc:
raise PrivateKeyError(exc)
try:
privatekey_file = os.open(self.path,
os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
self.mode)
if self.cipher and self.passphrase:
os.write(privatekey_file, crypto.dump_privatekey(crypto.FILETYPE_PEM, self.privatekey,
self.cipher, to_bytes(self.passphrase)))
else:
os.write(privatekey_file, crypto.dump_privatekey(crypto.FILETYPE_PEM, self.privatekey))
os.close(privatekey_file)
self.changed = True
except IOError as exc:
self.remove()
raise PrivateKeyError(exc)
self.fingerprint = crypto_utils.get_fingerprint(self.path, self.passphrase)
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(PrivateKey, self).check(module, perms_required)
def _check_size(privatekey):
return self.size == privatekey.bits()
def _check_type(privatekey):
return self.type == privatekey.type()
def _check_passphrase():
try:
crypto_utils.load_privatekey(self.path, self.passphrase)
return True
except crypto.Error:
return False
if not state_and_perms or not _check_passphrase():
return False
privatekey = crypto_utils.load_privatekey(self.path, self.passphrase)
return _check_size(privatekey) and _check_type(privatekey)
def dump(self):
"""Serialize the object into a dictionary."""
result = {
'size': self.size,
'filename': self.path,
'changed': self.changed,
'fingerprint': self.fingerprint,
}
if self.type == crypto.TYPE_RSA:
result['type'] = 'RSA'
else:
result['type'] = 'DSA'
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
size=dict(default=4096, type='int'),
type=dict(default='RSA', choices=['RSA', 'DSA'], type='str'),
force=dict(default=False, type='bool'),
path=dict(required=True, type='path'),
passphrase=dict(type='str', no_log=True),
cipher=dict(type='str'),
),
supports_check_mode=True,
add_file_common_args=True,
required_together=[['cipher', 'passphrase']],
)
if not pyopenssl_found:
module.fail_json(msg='the python pyOpenSSL module is required')
base_dir = os.path.dirname(module.params['path'])
if not os.path.isdir(base_dir):
module.fail_json(
name=base_dir,
msg='The directory %s does not exist or the file is not a directory' % base_dir
)
private_key = PrivateKey(module)
if private_key.state == 'present':
if module.check_mode:
result = private_key.dump()
result['changed'] = module.params['force'] or not private_key.check(module)
module.exit_json(**result)
try:
private_key.generate(module)
except PrivateKeyError as exc:
module.fail_json(msg=to_native(exc))
else:
if module.check_mode:
result = private_key.dump()
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
try:
private_key.remove()
except PrivateKeyError as exc:
module.fail_json(msg=to_native(exc))
result = private_key.dump()
module.exit_json(**result)
if __name__ == '__main__':
main()
|
2014cdbg4/2015cd_midterm | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/sre_constants.py | 692 | #
# Secret Labs' Regular Expression Engine
#
# various symbols used by the regular expression engine.
# run this script to update the _sre include files!
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# update when constants are added or removed
MAGIC = 20031017
#MAXREPEAT = 2147483648
#from _sre import MAXREPEAT
# SRE standard exception (access as sre.error)
# should this really be here?
class error(Exception):
pass
# operators
FAILURE = "failure"
SUCCESS = "success"
ANY = "any"
ANY_ALL = "any_all"
ASSERT = "assert"
ASSERT_NOT = "assert_not"
AT = "at"
BIGCHARSET = "bigcharset"
BRANCH = "branch"
CALL = "call"
CATEGORY = "category"
CHARSET = "charset"
GROUPREF = "groupref"
GROUPREF_IGNORE = "groupref_ignore"
GROUPREF_EXISTS = "groupref_exists"
IN = "in"
IN_IGNORE = "in_ignore"
INFO = "info"
JUMP = "jump"
LITERAL = "literal"
LITERAL_IGNORE = "literal_ignore"
MARK = "mark"
MAX_REPEAT = "max_repeat"
MAX_UNTIL = "max_until"
MIN_REPEAT = "min_repeat"
MIN_UNTIL = "min_until"
NEGATE = "negate"
NOT_LITERAL = "not_literal"
NOT_LITERAL_IGNORE = "not_literal_ignore"
RANGE = "range"
REPEAT = "repeat"
REPEAT_ONE = "repeat_one"
SUBPATTERN = "subpattern"
MIN_REPEAT_ONE = "min_repeat_one"
# positions
AT_BEGINNING = "at_beginning"
AT_BEGINNING_LINE = "at_beginning_line"
AT_BEGINNING_STRING = "at_beginning_string"
AT_BOUNDARY = "at_boundary"
AT_NON_BOUNDARY = "at_non_boundary"
AT_END = "at_end"
AT_END_LINE = "at_end_line"
AT_END_STRING = "at_end_string"
AT_LOC_BOUNDARY = "at_loc_boundary"
AT_LOC_NON_BOUNDARY = "at_loc_non_boundary"
AT_UNI_BOUNDARY = "at_uni_boundary"
AT_UNI_NON_BOUNDARY = "at_uni_non_boundary"
# categories
CATEGORY_DIGIT = "category_digit"
CATEGORY_NOT_DIGIT = "category_not_digit"
CATEGORY_SPACE = "category_space"
CATEGORY_NOT_SPACE = "category_not_space"
CATEGORY_WORD = "category_word"
CATEGORY_NOT_WORD = "category_not_word"
CATEGORY_LINEBREAK = "category_linebreak"
CATEGORY_NOT_LINEBREAK = "category_not_linebreak"
CATEGORY_LOC_WORD = "category_loc_word"
CATEGORY_LOC_NOT_WORD = "category_loc_not_word"
CATEGORY_UNI_DIGIT = "category_uni_digit"
CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit"
CATEGORY_UNI_SPACE = "category_uni_space"
CATEGORY_UNI_NOT_SPACE = "category_uni_not_space"
CATEGORY_UNI_WORD = "category_uni_word"
CATEGORY_UNI_NOT_WORD = "category_uni_not_word"
CATEGORY_UNI_LINEBREAK = "category_uni_linebreak"
CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak"
OPCODES = [
# failure=0 success=1 (just because it looks better that way :-)
FAILURE, SUCCESS,
ANY, ANY_ALL,
ASSERT, ASSERT_NOT,
AT,
BRANCH,
CALL,
CATEGORY,
CHARSET, BIGCHARSET,
GROUPREF, GROUPREF_EXISTS, GROUPREF_IGNORE,
IN, IN_IGNORE,
INFO,
JUMP,
LITERAL, LITERAL_IGNORE,
MARK,
MAX_UNTIL,
MIN_UNTIL,
NOT_LITERAL, NOT_LITERAL_IGNORE,
NEGATE,
RANGE,
REPEAT,
REPEAT_ONE,
SUBPATTERN,
MIN_REPEAT_ONE
]
ATCODES = [
AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY,
AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING,
AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY,
AT_UNI_NON_BOUNDARY
]
CHCODES = [
CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE,
CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD,
CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD,
CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT,
CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD,
CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK,
CATEGORY_UNI_NOT_LINEBREAK
]
def makedict(list):
d = {}
i = 0
for item in list:
d[item] = i
i = i + 1
return d
OPCODES = makedict(OPCODES)
ATCODES = makedict(ATCODES)
CHCODES = makedict(CHCODES)
# replacement operations for "ignore case" mode
OP_IGNORE = {
GROUPREF: GROUPREF_IGNORE,
IN: IN_IGNORE,
LITERAL: LITERAL_IGNORE,
NOT_LITERAL: NOT_LITERAL_IGNORE
}
AT_MULTILINE = {
AT_BEGINNING: AT_BEGINNING_LINE,
AT_END: AT_END_LINE
}
AT_LOCALE = {
AT_BOUNDARY: AT_LOC_BOUNDARY,
AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY
}
AT_UNICODE = {
AT_BOUNDARY: AT_UNI_BOUNDARY,
AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY
}
CH_LOCALE = {
CATEGORY_DIGIT: CATEGORY_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
CATEGORY_WORD: CATEGORY_LOC_WORD,
CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
}
CH_UNICODE = {
CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_UNI_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
CATEGORY_WORD: CATEGORY_UNI_WORD,
CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
}
# flags
SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
SRE_FLAG_IGNORECASE = 2 # case insensitive
SRE_FLAG_LOCALE = 4 # honour system locale
SRE_FLAG_MULTILINE = 8 # treat target as multiline string
SRE_FLAG_DOTALL = 16 # treat target as a single string
SRE_FLAG_UNICODE = 32 # use unicode "locale"
SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
SRE_FLAG_DEBUG = 128 # debugging
SRE_FLAG_ASCII = 256 # use ascii "locale"
# flags for INFO primitive
SRE_INFO_PREFIX = 1 # has prefix
SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
SRE_INFO_CHARSET = 4 # pattern starts with character from given set
if __name__ == "__main__":
def dump(f, d, prefix):
items = sorted(d.items(), key=lambda a: a[1])
for k, v in items:
f.write("#define %s_%s %s\n" % (prefix, k.upper(), v))
f = open("sre_constants.h", "w")
f.write("""\
/*
* Secret Labs' Regular Expression Engine
*
* regular expression matching engine
*
* NOTE: This file is generated by sre_constants.py. If you need
* to change anything in here, edit sre_constants.py and run it.
*
* Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
*
* See the _sre.c file for information on usage and redistribution.
*/
""")
f.write("#define SRE_MAGIC %d\n" % MAGIC)
dump(f, OPCODES, "SRE_OP")
dump(f, ATCODES, "SRE")
dump(f, CHCODES, "SRE")
f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE)
f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE)
f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE)
f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE)
f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL)
f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE)
f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE)
f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX)
f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL)
f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET)
f.close()
print("done")
|
seeminglee/pyglet64 | refs/heads/master | tests/window/WINDOW_STYLE_TOOL.py | 30 | #!/usr/bin/env python
'''Test that window style can be tool.
Expected behaviour:
One tool-styled window will be opened.
Close the window to end the test.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: WINDOW_SET_MOUSE_CURSOR.py 717 2007-03-03 07:04:10Z Alex.Holkner $'
import unittest
from pyglet.gl import *
from pyglet import window
class TEST_WINDOW_STYLE_TOOL(unittest.TestCase):
def test_style_tool(self):
self.width, self.height = 200, 200
self.w = w = window.Window(self.width, self.height,
style=window.Window.WINDOW_STYLE_TOOL)
glClearColor(1, 1, 1, 1)
while not w.has_exit:
glClear(GL_COLOR_BUFFER_BIT)
w.dispatch_events()
w.flip()
w.close()
if __name__ == '__main__':
unittest.main()
|
GGoussar/scikit-image | refs/heads/master | skimage/morphology/tests/test_ccomp.py | 11 | import numpy as np
from numpy.testing import assert_array_equal, run_module_suite
from skimage.measure import label
import skimage.measure._ccomp as ccomp
from skimage._shared._warnings import expected_warnings
# Background value
BG = 0
class TestConnectedComponents:
def setup(self):
self.x = np.array([[0, 0, 3, 2, 1, 9],
[0, 1, 1, 9, 2, 9],
[0, 0, 1, 9, 9, 9],
[3, 1, 1, 5, 3, 0]])
self.labels = np.array([[0, 0, 1, 2, 3, 4],
[0, 5, 5, 4, 2, 4],
[0, 0, 5, 4, 4, 4],
[6, 5, 5, 7, 8, 0]])
def test_basic(self):
assert_array_equal(label(self.x), self.labels)
# Make sure data wasn't modified
assert self.x[0, 2] == 3
def test_random(self):
x = (np.random.rand(20, 30) * 5).astype(np.int)
labels = label(x)
n = labels.max()
for i in range(n):
values = x[labels == i]
assert np.all(values == values[0])
def test_diag(self):
x = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0]])
assert_array_equal(label(x), x)
def test_4_vs_8(self):
x = np.array([[0, 1],
[1, 0]], dtype=int)
assert_array_equal(label(x, 4),
[[0, 1],
[2, 0]])
assert_array_equal(label(x, 8),
[[0, 1],
[1, 0]])
def test_background(self):
x = np.array([[1, 0, 0],
[1, 1, 5],
[0, 0, 0]])
assert_array_equal(label(x), [[1, 0, 0],
[1, 1, 2],
[0, 0, 0]])
assert_array_equal(label(x, background=0),
[[1, 0, 0],
[1, 1, 2],
[0, 0, 0]])
def test_background_two_regions(self):
x = np.array([[0, 0, 6],
[0, 0, 6],
[5, 5, 5]])
res = label(x, background=0)
assert_array_equal(res,
[[0, 0, 1],
[0, 0, 1],
[2, 2, 2]])
def test_background_one_region_center(self):
x = np.array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
assert_array_equal(label(x, neighbors=4, background=0),
[[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
def test_return_num(self):
x = np.array([[1, 0, 6],
[0, 0, 6],
[5, 5, 5]])
assert_array_equal(label(x, return_num=True)[1], 3)
assert_array_equal(label(x, background=-1, return_num=True)[1], 4)
class TestConnectedComponents3d:
def setup(self):
self.x = np.zeros((3, 4, 5), int)
self.x[0] = np.array([[0, 3, 2, 1, 9],
[0, 1, 9, 2, 9],
[0, 1, 9, 9, 9],
[3, 1, 5, 3, 0]])
self.x[1] = np.array([[3, 3, 2, 1, 9],
[0, 3, 9, 2, 1],
[0, 3, 3, 1, 1],
[3, 1, 3, 3, 0]])
self.x[2] = np.array([[3, 3, 8, 8, 0],
[2, 3, 9, 8, 8],
[2, 3, 0, 8, 0],
[2, 1, 0, 0, 0]])
self.labels = np.zeros((3, 4, 5), int)
self.labels[0] = np.array([[0, 1, 2, 3, 4],
[0, 5, 4, 2, 4],
[0, 5, 4, 4, 4],
[1, 5, 6, 1, 0]])
self.labels[1] = np.array([[1, 1, 2, 3, 4],
[0, 1, 4, 2, 3],
[0, 1, 1, 3, 3],
[1, 5, 1, 1, 0]])
self.labels[2] = np.array([[1, 1, 7, 7, 0],
[8, 1, 4, 7, 7],
[8, 1, 0, 7, 0],
[8, 5, 0, 0, 0]])
def test_basic(self):
labels = label(self.x)
assert_array_equal(labels, self.labels)
assert self.x[0, 0, 2] == 2, \
"Data was modified!"
def test_random(self):
x = (np.random.rand(20, 30) * 5).astype(np.int)
labels = label(x)
n = labels.max()
for i in range(n):
values = x[labels == i]
assert np.all(values == values[0])
def test_diag(self):
x = np.zeros((3, 3, 3), int)
x[0, 2, 2] = 1
x[1, 1, 1] = 1
x[2, 0, 0] = 1
assert_array_equal(label(x), x)
def test_4_vs_8(self):
x = np.zeros((2, 2, 2), int)
x[0, 1, 1] = 1
x[1, 0, 0] = 1
label4 = x.copy()
label4[1, 0, 0] = 2
assert_array_equal(label(x, 4), label4)
assert_array_equal(label(x, 8), x)
def test_background(self):
x = np.zeros((2, 3, 3), int)
x[0] = np.array([[1, 0, 0],
[1, 0, 0],
[0, 0, 0]])
x[1] = np.array([[0, 0, 0],
[0, 1, 5],
[0, 0, 0]])
lnb = x.copy()
lnb[0] = np.array([[1, 2, 2],
[1, 2, 2],
[2, 2, 2]])
lnb[1] = np.array([[2, 2, 2],
[2, 1, 3],
[2, 2, 2]])
lb = x.copy()
lb[0] = np.array([[1, BG, BG],
[1, BG, BG],
[BG, BG, BG]])
lb[1] = np.array([[BG, BG, BG],
[BG, 1, 2],
[BG, BG, BG]])
assert_array_equal(label(x), lb)
assert_array_equal(label(x, background=-1), lnb)
def test_background_two_regions(self):
x = np.zeros((2, 3, 3), int)
x[0] = np.array([[0, 0, 6],
[0, 0, 6],
[5, 5, 5]])
x[1] = np.array([[6, 6, 0],
[5, 0, 0],
[0, 0, 0]])
lb = x.copy()
lb[0] = np.array([[BG, BG, 1],
[BG, BG, 1],
[2, 2, 2]])
lb[1] = np.array([[1, 1, BG],
[2, BG, BG],
[BG, BG, BG]])
res = label(x, background=0)
assert_array_equal(res, lb)
def test_background_one_region_center(self):
x = np.zeros((3, 3, 3), int)
x[1, 1, 1] = 1
lb = np.ones_like(x) * BG
lb[1, 1, 1] = 1
assert_array_equal(label(x, neighbors=4, background=0), lb)
def test_return_num(self):
x = np.array([[1, 0, 6],
[0, 0, 6],
[5, 5, 5]])
assert_array_equal(label(x, return_num=True)[1], 3)
assert_array_equal(label(x, background=-1, return_num=True)[1], 4)
def test_1D(self):
x = np.array((0, 1, 2, 2, 1, 1, 0, 0))
xlen = len(x)
y = np.array((0, 1, 2, 2, 3, 3, 0, 0))
reshapes = ((xlen,),
(1, xlen), (xlen, 1),
(1, xlen, 1), (xlen, 1, 1), (1, 1, xlen))
for reshape in reshapes:
x2 = x.reshape(reshape)
labelled = label(x2)
assert_array_equal(y, labelled.flatten())
def test_nd(self):
x = np.ones((1, 2, 3, 4))
np.testing.assert_raises(NotImplementedError, label, x)
class TestSupport:
def test_reshape(self):
shapes_in = ((3, 1, 2), (1, 4, 5), (3, 1, 1), (2, 1), (1,))
for shape in shapes_in:
shape = np.array(shape)
numones = sum(shape == 1)
inp = np.random.random(shape)
fixed, swaps = ccomp.reshape_array(inp)
shape2 = fixed.shape
# now check that all ones are at the beginning
for i in range(numones):
assert shape2[i] == 1
back = ccomp.undo_reshape_array(fixed, swaps)
# check that the undo works as expected
assert_array_equal(inp, back)
if __name__ == "__main__":
run_module_suite()
|
souravsingh/sympy | refs/heads/master | sympy/holonomic/tests/test_recurrence.py | 33 | from sympy.holonomic.recurrence import RecurrenceOperators, RecurrenceOperator
from sympy import symbols, ZZ, QQ
def test_RecurrenceOperator():
n = symbols('n', integer=True)
R, Sn = RecurrenceOperators(QQ.old_poly_ring(n), 'Sn')
assert Sn*n == (n + 1)*Sn
assert Sn*n**2 == (n**2+1+2*n)*Sn
assert Sn**2*n**2 == (n**2 + 4*n + 4)*Sn**2
p = (Sn**3*n**2 + Sn*n)**2
q = (n**2 + 3*n + 2)*Sn**2 + (2*n**3 + 19*n**2 + 57*n + 52)*Sn**4 + (n**4 + 18*n**3 + \
117*n**2 + 324*n + 324)*Sn**6
assert p == q
|
pdellaert/ansible | refs/heads/devel | lib/ansible/plugins/connection/qubes.py | 47 | # Based on the buildah connection plugin
# Copyright (c) 2017 Ansible Project
# 2018 Kushal Das
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
#
# Written by: Kushal Das (https://github.com/kushaldas)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
connection: qubes
short_description: Interact with an existing QubesOS AppVM
description:
- Run commands or put/fetch files to an existing Qubes AppVM using qubes tools.
author: Kushal Das (@kushaldas)
version_added: "2.8"
options:
remote_addr:
description:
- vm name
default: inventory_hostname
vars:
- name: ansible_host
remote_user:
description:
- The user to execute as inside the vm.
default: The *user* account as default in Qubes OS.
vars:
- name: ansible_user
# keyword:
# - name: hosts
"""
import shlex
import shutil
import os
import base64
import subprocess
import ansible.constants as C
from ansible.module_utils._text import to_bytes, to_native
from ansible.plugins.connection import ConnectionBase, ensure_connect
from ansible.errors import AnsibleConnectionFailure
from ansible.utils.display import Display
display = Display()
# this _has to be_ named Connection
class Connection(ConnectionBase):
"""This is a connection plugin for qubes: it uses qubes-run-vm binary to interact with the containers."""
# String used to identify this Connection class from other classes
transport = 'qubes'
has_pipelining = True
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self._remote_vmname = self._play_context.remote_addr
self._connected = False
# Default username in Qubes
self.user = "user"
if self._play_context.remote_user:
self.user = self._play_context.remote_user
def _qubes(self, cmd=None, in_data=None, shell="qubes.VMShell"):
"""run qvm-run executable
:param cmd: cmd string for remote system
:param in_data: data passed to qvm-run-vm's stdin
:return: return code, stdout, stderr
"""
display.vvvv("CMD: ", cmd)
if not cmd.endswith("\n"):
cmd = cmd + "\n"
local_cmd = []
# For dom0
local_cmd.extend(["qvm-run", "--pass-io", "--service"])
if self.user != "user":
# Means we have a remote_user value
local_cmd.extend(["-u", self.user])
local_cmd.append(self._remote_vmname)
local_cmd.append(shell)
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
display.vvvv("Local cmd: ", local_cmd)
display.vvv("RUN %s" % (local_cmd,), host=self._remote_vmname)
p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Here we are writing the actual command to the remote bash
p.stdin.write(to_bytes(cmd, errors='surrogate_or_strict'))
stdout, stderr = p.communicate(input=in_data)
return p.returncode, stdout, stderr
def _connect(self):
"""No persistent connection is being maintained."""
super(Connection, self)._connect()
self._connected = True
@ensure_connect
def exec_command(self, cmd, in_data=None, sudoable=False):
"""Run specified command in a running QubesVM """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
display.vvvv("CMD IS: %s" % cmd)
rc, stdout, stderr = self._qubes(cmd)
display.vvvvv("STDOUT %r STDERR %r" % (stderr, stderr))
return rc, stdout, stderr
def put_file(self, in_path, out_path):
""" Place a local file located in 'in_path' inside VM at 'out_path' """
super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._remote_vmname)
with open(in_path, "rb") as fobj:
source_data = fobj.read()
retcode, dummy, dummy = self._qubes('cat > "{0}"\n'.format(out_path), source_data, "qubes.VMRootShell")
# if qubes.VMRootShell service not supported, fallback to qubes.VMShell and
# hope it will have appropriate permissions
if retcode == 127:
retcode, dummy, dummy = self._qubes('cat > "{0}"\n'.format(out_path), source_data)
if retcode != 0:
raise AnsibleConnectionFailure('Failed to put_file to {0}'.format(out_path))
def fetch_file(self, in_path, out_path):
"""Obtain file specified via 'in_path' from the container and place it at 'out_path' """
super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._remote_vmname)
# We are running in dom0
cmd_args_list = ["qvm-run", "--pass-io", self._remote_vmname, "cat {0}".format(in_path)]
with open(out_path, "wb") as fobj:
p = subprocess.Popen(cmd_args_list, shell=False, stdout=fobj)
p.communicate()
if p.returncode != 0:
raise AnsibleConnectionFailure('Failed to fetch file to {0}'.format(out_path))
def close(self):
""" Closing the connection """
super(Connection, self).close()
self._connected = False
|
joeythesaint/yocto-autobuilder | refs/heads/master | lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/python/test/test_versions.py | 7 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
import sys
from cStringIO import StringIO
from twisted.python.versions import getVersionString, IncomparableVersions
from twisted.python.versions import Version, _inf
from twisted.python.filepath import FilePath
from twisted.trial import unittest
VERSION_4_ENTRIES = """\
<?xml version="1.0" encoding="utf-8"?>
<wc-entries
xmlns="svn:">
<entry
committed-rev="18210"
name=""
committed-date="2006-09-21T04:43:09.542953Z"
url="svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk/twisted"
last-author="exarkun"
kind="dir"
uuid="bbbe8e31-12d6-0310-92fd-ac37d47ddeeb"
repos="svn+ssh://svn.twistedmatrix.com/svn/Twisted"
revision="18211"/>
</wc-entries>
"""
VERSION_8_ENTRIES = """\
8
dir
22715
svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk
"""
VERSION_9_ENTRIES = """\
9
dir
22715
svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk
"""
VERSION_10_ENTRIES = """\
10
dir
22715
svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk
"""
class VersionsTest(unittest.TestCase):
def test_versionComparison(self):
"""
Versions can be compared for equality and order.
"""
va = Version("dummy", 1, 0, 0)
vb = Version("dummy", 0, 1, 0)
self.failUnless(va > vb)
self.failUnless(vb < va)
self.failUnless(va >= vb)
self.failUnless(vb <= va)
self.failUnless(va != vb)
self.failUnless(vb == Version("dummy", 0, 1, 0))
self.failUnless(vb == vb)
# BREAK IT DOWN@!!
self.failIf(va < vb)
self.failIf(vb > va)
self.failIf(va <= vb)
self.failIf(vb >= va)
self.failIf(va == vb)
self.failIf(vb != Version("dummy", 0, 1, 0))
self.failIf(vb != vb)
def test_comparingPrereleasesWithReleases(self):
"""
Prereleases are always less than versions without prereleases.
"""
va = Version("whatever", 1, 0, 0, prerelease=1)
vb = Version("whatever", 1, 0, 0)
self.assertTrue(va < vb)
self.assertFalse(va > vb)
self.assertNotEquals(vb, va)
def test_comparingPrereleases(self):
"""
The value specified as the prerelease is used in version comparisons.
"""
va = Version("whatever", 1, 0, 0, prerelease=1)
vb = Version("whatever", 1, 0, 0, prerelease=2)
self.assertTrue(va < vb)
self.assertFalse(va > vb)
self.assertNotEqual(va, vb)
def test_infComparison(self):
"""
L{_inf} is equal to L{_inf}.
This is a regression test.
"""
self.assertEqual(_inf, _inf)
def testDontAllowBuggyComparisons(self):
self.assertRaises(IncomparableVersions,
cmp,
Version("dummy", 1, 0, 0),
Version("dumym", 1, 0, 0))
def test_repr(self):
"""
Calling C{repr} on a version returns a human-readable string
representation of the version.
"""
self.assertEqual(repr(Version("dummy", 1, 2, 3)),
"Version('dummy', 1, 2, 3)")
def test_reprWithPrerelease(self):
"""
Calling C{repr} on a version with a prerelease returns a human-readable
string representation of the version including the prerelease.
"""
self.assertEqual(repr(Version("dummy", 1, 2, 3, prerelease=4)),
"Version('dummy', 1, 2, 3, prerelease=4)")
def test_str(self):
"""
Calling C{str} on a version returns a human-readable string
representation of the version.
"""
self.assertEqual(str(Version("dummy", 1, 2, 3)),
"[dummy, version 1.2.3]")
def test_strWithPrerelease(self):
"""
Calling C{str} on a version with a prerelease includes the prerelease.
"""
self.assertEqual(str(Version("dummy", 1, 0, 0, prerelease=1)),
"[dummy, version 1.0.0pre1]")
def testShort(self):
self.assertEqual(Version('dummy', 1, 2, 3).short(), '1.2.3')
def test_goodSVNEntries_4(self):
"""
Version should be able to parse an SVN format 4 entries file.
"""
version = Version("dummy", 1, 0, 0)
self.assertEqual(
version._parseSVNEntries_4(StringIO(VERSION_4_ENTRIES)), '18211')
def test_goodSVNEntries_8(self):
"""
Version should be able to parse an SVN format 8 entries file.
"""
version = Version("dummy", 1, 0, 0)
self.assertEqual(
version._parseSVNEntries_8(StringIO(VERSION_8_ENTRIES)), '22715')
def test_goodSVNEntries_9(self):
"""
Version should be able to parse an SVN format 9 entries file.
"""
version = Version("dummy", 1, 0, 0)
self.assertEqual(
version._parseSVNEntries_9(StringIO(VERSION_9_ENTRIES)), '22715')
def test_goodSVNEntriesTenPlus(self):
"""
Version should be able to parse an SVN format 10 entries file.
"""
version = Version("dummy", 1, 0, 0)
self.assertEqual(
version._parseSVNEntriesTenPlus(StringIO(VERSION_10_ENTRIES)), '22715')
def test_getVersionString(self):
"""
L{getVersionString} returns a string with the package name and the
short version number.
"""
self.assertEqual(
'Twisted 8.0.0', getVersionString(Version('Twisted', 8, 0, 0)))
def test_getVersionStringWithPrerelease(self):
"""
L{getVersionString} includes the prerelease, if any.
"""
self.assertEqual(
getVersionString(Version("whatever", 8, 0, 0, prerelease=1)),
"whatever 8.0.0pre1")
def test_base(self):
"""
The L{base} method returns a very simple representation of the version.
"""
self.assertEqual(Version("foo", 1, 0, 0).base(), "1.0.0")
def test_baseWithPrerelease(self):
"""
The base version includes 'preX' for versions with prereleases.
"""
self.assertEqual(Version("foo", 1, 0, 0, prerelease=8).base(),
"1.0.0pre8")
class FormatDiscoveryTests(unittest.TestCase):
"""
Tests which discover the parsing method based on the imported module name.
"""
def setUp(self):
"""
Create a temporary directory with a package structure in it.
"""
self.entry = FilePath(self.mktemp())
self.preTestModules = sys.modules.copy()
sys.path.append(self.entry.path)
pkg = self.entry.child("twisted_python_versions_package")
pkg.makedirs()
pkg.child("__init__.py").setContent(
"from twisted.python.versions import Version\n"
"version = Version('twisted_python_versions_package', 1, 0, 0)\n")
self.svnEntries = pkg.child(".svn")
self.svnEntries.makedirs()
def tearDown(self):
"""
Remove the imported modules and sys.path modifications.
"""
sys.modules.clear()
sys.modules.update(self.preTestModules)
sys.path.remove(self.entry.path)
def checkSVNFormat(self, formatVersion, entriesText, expectedRevision):
"""
Check for the given revision being detected after setting the SVN
entries text and format version of the test directory structure.
"""
self.svnEntries.child("format").setContent(formatVersion+"\n")
self.svnEntries.child("entries").setContent(entriesText)
self.assertEqual(self.getVersion()._getSVNVersion(), expectedRevision)
def getVersion(self):
"""
Import and retrieve the Version object from our dynamically created
package.
"""
import twisted_python_versions_package
return twisted_python_versions_package.version
def test_detectVersion4(self):
"""
Verify that version 4 format file will be properly detected and parsed.
"""
self.checkSVNFormat("4", VERSION_4_ENTRIES, '18211')
def test_detectVersion8(self):
"""
Verify that version 8 format files will be properly detected and
parsed.
"""
self.checkSVNFormat("8", VERSION_8_ENTRIES, '22715')
def test_detectVersion9(self):
"""
Verify that version 9 format files will be properly detected and
parsed.
"""
self.checkSVNFormat("9", VERSION_9_ENTRIES, '22715')
def test_detectVersion10(self):
"""
Verify that version 10 format files will be properly detected and
parsed.
Differing from previous formats, the version 10 format lacks a
I{format} file and B{only} has the version information on the first
line of the I{entries} file.
"""
self.svnEntries.child("entries").setContent(VERSION_10_ENTRIES)
self.assertEqual(self.getVersion()._getSVNVersion(), '22715')
def test_detectUnknownVersion(self):
"""
Verify that a new version of SVN will result in the revision 'Unknown'.
"""
self.checkSVNFormat("some-random-new-version", "ooga booga!", 'Unknown')
|
gangadharkadam/smrtfrappe | refs/heads/develop | frappe/core/doctype/page_role/__init__.py | 2292 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
|
misdoro/python-ase | refs/heads/master | ase/gui/ag.py | 2 | from __future__ import print_function
# Copyright 2008, 2009
# CAMd (see accompanying license files for details).
from __future__ import print_function
import sys
from optparse import OptionParser
import ase.gui.i18n
from gettext import gettext as _
# Grrr, older versions (pre-python2.7) of optparse have a bug
# which prevents non-ascii descriptions. How do we circumvent this?
# For now, we'll have to use English in the command line options then.
def build_parser():
parser = OptionParser(usage='%prog [options] [file[, file2, ...]]',
version='%prog 0.1',
description='See the online manual ' +
'(https://wiki.fysik.dtu.dk/ase/ase/gui/gui.html) ' +
'for more information.')
parser.add_option('-n', '--image-number',
default=':', metavar='NUMBER',
help='Pick image(s) from trajectory. NUMBER can be a '
'single number (use a negative number to count from '
'the back) or a range: start:stop:step, where the '
'":step" part can be left out - default values are '
'0:nimages:1.')
parser.add_option('-u', '--show-unit-cell', type='int',
default=1, metavar='I',
help="0: Don't show unit cell. 1: Show unit cell. "
'2: Show all of unit cell.')
parser.add_option('-r', '--repeat',
default='1',
help='Repeat unit cell. Use "-r 2" or "-r 2,3,1".')
parser.add_option('-R', '--rotations', default='',
help='Examples: "-R -90x", "-R 90z,-30x".')
parser.add_option('-o', '--output', metavar='FILE',
help='Write configurations to FILE.')
parser.add_option('-g', '--graph',
# TRANSLATORS: EXPR abbreviates 'expression'
metavar='EXPR',
help='Plot x,y1,y2,... graph from configurations or '
'write data to sdtout in terminal mode. Use the '
'symbols: i, s, d, fmax, e, ekin, A, R, E and F. See '
'https://wiki.fysik.dtu.dk/ase/ase/gui/gui.html'
'#plotting-data for more details.')
parser.add_option('-t', '--terminal',
action='store_true',
default=False,
help='Run in terminal window - no GUI.')
parser.add_option('--aneb',
action='store_true',
default=False,
help='Read ANEB data.')
parser.add_option('--interpolate',
type='int', metavar='N',
help='Interpolate N images between 2 given images.')
parser.add_option('-b', '--bonds',
action='store_true',
default=False,
help='Draw bonds between atoms.')
parser.add_option('-s', '--scale', dest='radii_scale', metavar='FLOAT',
default=None, type=float,
help='Scale covalent radii.')
parser.add_option('-v', '--verbose', action='store_true',
help='Verbose mode.')
return parser
def main():
parser = build_parser()
opt, args = parser.parse_args()
try:
import ase
except ImportError:
from os.path import dirname, join, pardir
sys.path.append(join(dirname(__file__), pardir))
from ase.gui.images import Images
from ase.atoms import Atoms
def run(opt, args):
images = Images()
if opt.aneb:
opt.image_number = '-1'
if len(args) > 0:
from ase.io import string2index
try:
images.read(args, string2index(opt.image_number))
except IOError as e:
if len(e.args) == 1:
parser.error(e.args[0])
else:
parser.error(e.args[1] + ': ' + e.filename)
else:
images.initialize([Atoms()])
if opt.interpolate:
images.interpolate(opt.interpolate)
if opt.aneb:
images.aneb()
if opt.repeat != '1':
r = opt.repeat.split(',')
if len(r) == 1:
r = 3 * r
images.repeat_images([int(c) for c in r])
if opt.radii_scale:
images.set_radii(opt.radii_scale)
if opt.output is not None:
images.write(opt.output, rotations=opt.rotations,
show_unit_cell=opt.show_unit_cell)
opt.terminal = True
if opt.terminal:
if opt.graph is not None:
data = images.graph(opt.graph)
for line in data.T:
for x in line:
print(x, end=' ')
print()
else:
from ase.gui.gui import GUI
import ase.gui.gtkexcepthook
gui = GUI(images, opt.rotations, opt.show_unit_cell, opt.bonds)
gui.run(opt.graph)
try:
run(opt, args)
except KeyboardInterrupt:
pass
except Exception as x:
if opt.verbose:
raise
else:
print('{0}: {1}'.format(x.__class__.__name__, x), file=sys.stderr)
print(_('To get a full traceback, use: ase-gui --verbose'),
file=sys.stderr)
|
totallybradical/temp_servo2 | refs/heads/master | tests/wpt/web-platform-tests/tools/wptserve/wptserve/constants.py | 141 | from . import utils
content_types = utils.invert_dict({"text/html": ["htm", "html"],
"application/json": ["json"],
"application/xhtml+xml": ["xht", "xhtm", "xhtml"],
"application/xml": ["xml"],
"application/x-xpinstall": ["xpi"],
"text/javascript": ["js"],
"text/css": ["css"],
"text/plain": ["txt", "md"],
"image/svg+xml": ["svg"],
"image/gif": ["gif"],
"image/jpeg": ["jpg", "jpeg"],
"image/png": ["png"],
"image/bmp": ["bmp"],
"text/event-stream": ["event_stream"],
"text/cache-manifest": ["manifest"],
"video/mp4": ["mp4", "m4v"],
"audio/mp4": ["m4a"],
"audio/mpeg": ["mp3"],
"video/webm": ["webm"],
"audio/webm": ["weba"],
"video/ogg": ["ogg", "ogv"],
"audio/ogg": ["oga"],
"audio/x-wav": ["wav"],
"text/vtt": ["vtt"],})
response_codes = {
100: ('Continue', 'Request received, please continue'),
101: ('Switching Protocols',
'Switching to new protocol; obey Upgrade header'),
200: ('OK', 'Request fulfilled, document follows'),
201: ('Created', 'Document created, URL follows'),
202: ('Accepted',
'Request accepted, processing continues off-line'),
203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
204: ('No Content', 'Request fulfilled, nothing follows'),
205: ('Reset Content', 'Clear input form for further input.'),
206: ('Partial Content', 'Partial content follows.'),
300: ('Multiple Choices',
'Object has several resources -- see URI list'),
301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
302: ('Found', 'Object moved temporarily -- see URI list'),
303: ('See Other', 'Object moved -- see Method and URL list'),
304: ('Not Modified',
'Document has not changed since given time'),
305: ('Use Proxy',
'You must use proxy specified in Location to access this '
'resource.'),
307: ('Temporary Redirect',
'Object moved temporarily -- see URI list'),
400: ('Bad Request',
'Bad request syntax or unsupported method'),
401: ('Unauthorized',
'No permission -- see authorization schemes'),
402: ('Payment Required',
'No payment -- see charging schemes'),
403: ('Forbidden',
'Request forbidden -- authorization will not help'),
404: ('Not Found', 'Nothing matches the given URI'),
405: ('Method Not Allowed',
'Specified method is invalid for this resource.'),
406: ('Not Acceptable', 'URI not available in preferred format.'),
407: ('Proxy Authentication Required', 'You must authenticate with '
'this proxy before proceeding.'),
408: ('Request Timeout', 'Request timed out; try again later.'),
409: ('Conflict', 'Request conflict.'),
410: ('Gone',
'URI no longer exists and has been permanently removed.'),
411: ('Length Required', 'Client must specify Content-Length.'),
412: ('Precondition Failed', 'Precondition in headers is false.'),
413: ('Request Entity Too Large', 'Entity is too large.'),
414: ('Request-URI Too Long', 'URI is too long.'),
415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
416: ('Requested Range Not Satisfiable',
'Cannot satisfy request range.'),
417: ('Expectation Failed',
'Expect condition could not be satisfied.'),
500: ('Internal Server Error', 'Server got itself in trouble'),
501: ('Not Implemented',
'Server does not support this operation'),
502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
503: ('Service Unavailable',
'The server cannot process the request due to a high load'),
504: ('Gateway Timeout',
'The gateway server did not receive a timely response'),
505: ('HTTP Version Not Supported', 'Cannot fulfill request.'),
}
|
ArneBab/pypyjs | refs/heads/master | website/demo/home/rfk/repos/pypy/lib-python/2.7/plat-mac/lib-scriptpackages/Terminal/Terminal_Suite.py | 82 | """Suite Terminal Suite: Terms and Events for controlling the Terminal application
Level 1, version 1
Generated from /Applications/Utilities/Terminal.app
AETE/AEUT resource version 1/0, language 0, script 0
"""
import aetools
import MacOS
_code = 'trmx'
class Terminal_Suite_Events:
def GetURL(self, _object, _attributes={}, **_arguments):
"""GetURL: Opens a telnet: URL
Required argument: the object for the command
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'GURL'
_subcode = 'GURL'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_do_script = {
'in_' : 'kfil',
'with_command' : 'cmnd',
}
def do_script(self, _object, _attributes={}, **_arguments):
"""do script: Run a UNIX shell script or command
Required argument: the object for the command
Keyword argument in_: the window in which to execute the command
Keyword argument with_command: data to be passed to the Terminal application as the command line, deprecated, use direct parameter
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: the reply for the command
"""
_code = 'core'
_subcode = 'dosc'
aetools.keysubst(_arguments, self._argmap_do_script)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
class application(aetools.ComponentItem):
"""application - The Terminal program """
want = 'capp'
class _Prop__3c_Inheritance_3e_(aetools.NProperty):
"""<Inheritance> - All of the properties of the superclass. """
which = 'c@#^'
want = 'capp'
_3c_Inheritance_3e_ = _Prop__3c_Inheritance_3e_()
class _Prop_properties(aetools.NProperty):
"""properties - every property of the Terminal program """
which = 'pALL'
want = '****'
properties = _Prop_properties()
# element 'cwin' as ['name', 'indx', 'rele', 'rang', 'test', 'ID ']
# element 'docu' as ['name', 'indx', 'rele', 'rang', 'test']
applications = application
class window(aetools.ComponentItem):
"""window - A Terminal window """
want = 'cwin'
class _Prop_background_color(aetools.NProperty):
"""background color - the background color for the window """
which = 'pbcl'
want = '****'
class _Prop_bold_text_color(aetools.NProperty):
"""bold text color - the bold text color for the window """
which = 'pbtc'
want = '****'
class _Prop_bounds(aetools.NProperty):
"""bounds - the boundary rectangle for the window, relative to the upper left corner of the screen """
which = 'pbnd'
want = '****'
class _Prop_busy(aetools.NProperty):
"""busy - Is the window busy running a process? """
which = 'busy'
want = 'bool'
class _Prop_contents(aetools.NProperty):
"""contents - the currently visible contents of the window """
which = 'pcnt'
want = 'utxt'
class _Prop_cursor_color(aetools.NProperty):
"""cursor color - the cursor color for the window """
which = 'pcuc'
want = '****'
class _Prop_custom_title(aetools.NProperty):
"""custom title - the custom title for the window """
which = 'titl'
want = 'utxt'
class _Prop_frame(aetools.NProperty):
"""frame - the origin and size of the window """
which = 'pfra'
want = '****'
class _Prop_frontmost(aetools.NProperty):
"""frontmost - Is the window in front of the other Terminal windows? """
which = 'pisf'
want = 'bool'
class _Prop_history(aetools.NProperty):
"""history - the contents of the entire scrolling buffer of the window """
which = 'hist'
want = 'utxt'
class _Prop_normal_text_color(aetools.NProperty):
"""normal text color - the normal text color for the window """
which = 'ptxc'
want = '****'
class _Prop_number_of_columns(aetools.NProperty):
"""number of columns - the number of columns in the window """
which = 'ccol'
want = 'long'
class _Prop_number_of_rows(aetools.NProperty):
"""number of rows - the number of rows in the window """
which = 'crow'
want = 'long'
class _Prop_origin(aetools.NProperty):
"""origin - the lower left coordinates of the window, relative to the lower left corner of the screen """
which = 'pori'
want = '****'
class _Prop_position(aetools.NProperty):
"""position - the upper left coordinates of the window, relative to the upper left corner of the screen """
which = 'ppos'
want = '****'
class _Prop_processes(aetools.NProperty):
"""processes - a list of the currently running processes """
which = 'prcs'
want = 'utxt'
class _Prop_size(aetools.NProperty):
"""size - the width and height of the window """
which = 'psiz'
want = '****'
class _Prop_title_displays_custom_title(aetools.NProperty):
"""title displays custom title - Does the title for the window contain a custom title? """
which = 'tdct'
want = 'bool'
class _Prop_title_displays_device_name(aetools.NProperty):
"""title displays device name - Does the title for the window contain the device name? """
which = 'tddn'
want = 'bool'
class _Prop_title_displays_file_name(aetools.NProperty):
"""title displays file name - Does the title for the window contain the file name? """
which = 'tdfn'
want = 'bool'
class _Prop_title_displays_shell_path(aetools.NProperty):
"""title displays shell path - Does the title for the window contain the shell path? """
which = 'tdsp'
want = 'bool'
class _Prop_title_displays_window_size(aetools.NProperty):
"""title displays window size - Does the title for the window contain the window size? """
which = 'tdws'
want = 'bool'
windows = window
application._superclassnames = []
import Standard_Suite
application._privpropdict = {
'_3c_Inheritance_3e_' : _Prop__3c_Inheritance_3e_,
'properties' : _Prop_properties,
}
application._privelemdict = {
'document' : Standard_Suite.document,
'window' : window,
}
window._superclassnames = []
window._privpropdict = {
'_3c_Inheritance_3e_' : _Prop__3c_Inheritance_3e_,
'background_color' : _Prop_background_color,
'bold_text_color' : _Prop_bold_text_color,
'bounds' : _Prop_bounds,
'busy' : _Prop_busy,
'contents' : _Prop_contents,
'cursor_color' : _Prop_cursor_color,
'custom_title' : _Prop_custom_title,
'frame' : _Prop_frame,
'frontmost' : _Prop_frontmost,
'history' : _Prop_history,
'normal_text_color' : _Prop_normal_text_color,
'number_of_columns' : _Prop_number_of_columns,
'number_of_rows' : _Prop_number_of_rows,
'origin' : _Prop_origin,
'position' : _Prop_position,
'processes' : _Prop_processes,
'properties' : _Prop_properties,
'size' : _Prop_size,
'title_displays_custom_title' : _Prop_title_displays_custom_title,
'title_displays_device_name' : _Prop_title_displays_device_name,
'title_displays_file_name' : _Prop_title_displays_file_name,
'title_displays_shell_path' : _Prop_title_displays_shell_path,
'title_displays_window_size' : _Prop_title_displays_window_size,
}
window._privelemdict = {
}
#
# Indices of types declared in this module
#
_classdeclarations = {
'capp' : application,
'cwin' : window,
}
_propdeclarations = {
'busy' : _Prop_busy,
'c@#^' : _Prop__3c_Inheritance_3e_,
'ccol' : _Prop_number_of_columns,
'crow' : _Prop_number_of_rows,
'hist' : _Prop_history,
'pALL' : _Prop_properties,
'pbcl' : _Prop_background_color,
'pbnd' : _Prop_bounds,
'pbtc' : _Prop_bold_text_color,
'pcnt' : _Prop_contents,
'pcuc' : _Prop_cursor_color,
'pfra' : _Prop_frame,
'pisf' : _Prop_frontmost,
'pori' : _Prop_origin,
'ppos' : _Prop_position,
'prcs' : _Prop_processes,
'psiz' : _Prop_size,
'ptxc' : _Prop_normal_text_color,
'tdct' : _Prop_title_displays_custom_title,
'tddn' : _Prop_title_displays_device_name,
'tdfn' : _Prop_title_displays_file_name,
'tdsp' : _Prop_title_displays_shell_path,
'tdws' : _Prop_title_displays_window_size,
'titl' : _Prop_custom_title,
}
_compdeclarations = {
}
_enumdeclarations = {
}
|
huiren/ece511 | refs/heads/master | src/arch/arm/kvm/ArmKvmCPU.py | 38 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.params import *
from BaseKvmCPU import BaseKvmCPU
class ArmKvmCPU(BaseKvmCPU):
type = 'ArmKvmCPU'
cxx_header = "arch/arm/kvm/arm_cpu.hh"
|
zstackio/zstack-woodpecker | refs/heads/master | integrationtest/vm/mini/multiclusters/paths/multi_path51.py | 1 | import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=1, faild_point=100000, path_list=[
[TestAction.create_mini_vm, 'vm1', 'cluster=cluster1'],
[TestAction.change_vm_ha, 'vm1'],
[TestAction.create_vm_backup, 'vm1', 'vm1-backup1'],
[TestAction.change_vm_ha, 'vm1'],
[TestAction.stop_vm, 'vm1'],
[TestAction.create_mini_vm, 'vm2', 'cluster=cluster2'],
[TestAction.create_image_from_volume, 'vm2', 'vm2-image1'],
[TestAction.poweroff_only, 'cluster=cluster2'],
[TestAction.create_volume, 'volume1', 'cluster=cluster2', 'flag=scsi'],
[TestAction.attach_volume, 'vm2', 'volume1'],
[TestAction.detach_volume, 'volume1'],
[TestAction.create_volume, 'volume2', 'cluster=cluster1', 'flag=thin,scsi'],
[TestAction.add_image, 'image2', 'root', 'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'],
[TestAction.delete_vm_backup, 'vm1-backup1'],
[TestAction.delete_image, 'vm2-image1'],
[TestAction.recover_image, 'vm2-image1'],
[TestAction.delete_image, 'vm2-image1'],
[TestAction.expunge_image, 'vm2-image1'],
[TestAction.start_vm, 'vm2'],
[TestAction.create_vm_backup, 'vm2', 'vm2-backup2'],
[TestAction.stop_vm, 'vm2'],
[TestAction.create_mini_vm, 'vm3', 'cluster=cluster1'],
[TestAction.poweroff_only, 'cluster=cluster2'],
[TestAction.create_image_from_volume, 'vm2', 'vm2-image3'],
[TestAction.create_volume, 'volume3', 'size=random', 'cluster=cluster2', 'flag=scsi'],
[TestAction.create_volume, 'volume4', 'cluster=cluster1', 'flag=thin,scsi'],
[TestAction.use_vm_backup, 'vm2-backup2'],
[TestAction.create_mini_vm, 'vm4', 'memory=random', 'cluster=cluster2'],
[TestAction.delete_volume, 'volume4'],
[TestAction.expunge_volume, 'volume4'],
[TestAction.create_mini_vm, 'vm5', 'data_volume=true', 'cluster=cluster2'],
[TestAction.attach_volume, 'vm5', 'volume1'],
[TestAction.create_volume_backup, 'volume1', 'volume1-backup3'],
[TestAction.resize_volume, 'vm4', 5*1024*1024],
[TestAction.poweroff_only, 'cluster=cluster1'],
[TestAction.use_vm_backup, 'vm2-backup2'],
])
'''
The final status:
Running:['vm4', 'vm5']
Stopped:['vm2', 'vm3', 'vm1']
Enadbled:['vm2-backup2', 'volume1-backup3', 'image2', 'vm2-image3']
attached:['auto-volume5', 'volume1']
Detached:['volume2', 'volume3']
Deleted:['vm1-backup1']
Expunged:['volume4', 'vm2-image1']
Ha:[]
Group:
vm_backup1:['vm2-backup2']---vm2@
''' |
adrienbrault/home-assistant | refs/heads/dev | homeassistant/components/nmbs/sensor.py | 3 | """Get ride details and liveboard details for NMBS (Belgian railway)."""
import logging
from pyrail import iRail
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_NAME,
CONF_SHOW_ON_MAP,
TIME_MINUTES,
)
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "NMBS"
DEFAULT_ICON = "mdi:train"
DEFAULT_ICON_ALERT = "mdi:alert-octagon"
CONF_STATION_FROM = "station_from"
CONF_STATION_TO = "station_to"
CONF_STATION_LIVE = "station_live"
CONF_EXCLUDE_VIAS = "exclude_vias"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_STATION_FROM): cv.string,
vol.Required(CONF_STATION_TO): cv.string,
vol.Optional(CONF_STATION_LIVE): cv.string,
vol.Optional(CONF_EXCLUDE_VIAS, default=False): cv.boolean,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SHOW_ON_MAP, default=False): cv.boolean,
}
)
def get_time_until(departure_time=None):
"""Calculate the time between now and a train's departure time."""
if departure_time is None:
return 0
delta = dt_util.utc_from_timestamp(int(departure_time)) - dt_util.now()
return round(delta.total_seconds() / 60)
def get_delay_in_minutes(delay=0):
"""Get the delay in minutes from a delay in seconds."""
return round(int(delay) / 60)
def get_ride_duration(departure_time, arrival_time, delay=0):
"""Calculate the total travel time in minutes."""
duration = dt_util.utc_from_timestamp(
int(arrival_time)
) - dt_util.utc_from_timestamp(int(departure_time))
duration_time = int(round(duration.total_seconds() / 60))
return duration_time + get_delay_in_minutes(delay)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the NMBS sensor with iRail API."""
api_client = iRail()
name = config[CONF_NAME]
show_on_map = config[CONF_SHOW_ON_MAP]
station_from = config[CONF_STATION_FROM]
station_to = config[CONF_STATION_TO]
station_live = config.get(CONF_STATION_LIVE)
excl_vias = config[CONF_EXCLUDE_VIAS]
sensors = [
NMBSSensor(api_client, name, show_on_map, station_from, station_to, excl_vias)
]
if station_live is not None:
sensors.append(
NMBSLiveBoard(api_client, station_live, station_from, station_to)
)
add_entities(sensors, True)
class NMBSLiveBoard(SensorEntity):
"""Get the next train from a station's liveboard."""
def __init__(self, api_client, live_station, station_from, station_to):
"""Initialize the sensor for getting liveboard data."""
self._station = live_station
self._api_client = api_client
self._station_from = station_from
self._station_to = station_to
self._attrs = {}
self._state = None
@property
def name(self):
"""Return the sensor default name."""
return f"NMBS Live ({self._station})"
@property
def unique_id(self):
"""Return a unique ID."""
unique_id = f"{self._station}_{self._station_from}_{self._station_to}"
return f"nmbs_live_{unique_id}"
@property
def icon(self):
"""Return the default icon or an alert icon if delays."""
if self._attrs and int(self._attrs["delay"]) > 0:
return DEFAULT_ICON_ALERT
return DEFAULT_ICON
@property
def state(self):
"""Return sensor state."""
return self._state
@property
def extra_state_attributes(self):
"""Return the sensor attributes if data is available."""
if self._state is None or not self._attrs:
return None
delay = get_delay_in_minutes(self._attrs["delay"])
departure = get_time_until(self._attrs["time"])
attrs = {
"departure": f"In {departure} minutes",
"departure_minutes": departure,
"extra_train": int(self._attrs["isExtra"]) > 0,
"vehicle_id": self._attrs["vehicle"],
"monitored_station": self._station,
ATTR_ATTRIBUTION: "https://api.irail.be/",
}
if delay > 0:
attrs["delay"] = f"{delay} minutes"
attrs["delay_minutes"] = delay
return attrs
def update(self):
"""Set the state equal to the next departure."""
liveboard = self._api_client.get_liveboard(self._station)
if liveboard is None or not liveboard["departures"]:
return
next_departure = liveboard["departures"]["departure"][0]
self._attrs = next_departure
self._state = (
f"Track {next_departure['platform']} - {next_departure['station']}"
)
class NMBSSensor(SensorEntity):
"""Get the the total travel time for a given connection."""
def __init__(
self, api_client, name, show_on_map, station_from, station_to, excl_vias
):
"""Initialize the NMBS connection sensor."""
self._name = name
self._show_on_map = show_on_map
self._api_client = api_client
self._station_from = station_from
self._station_to = station_to
self._excl_vias = excl_vias
self._attrs = {}
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return TIME_MINUTES
@property
def icon(self):
"""Return the sensor default icon or an alert icon if any delay."""
if self._attrs:
delay = get_delay_in_minutes(self._attrs["departure"]["delay"])
if delay > 0:
return "mdi:alert-octagon"
return "mdi:train"
@property
def extra_state_attributes(self):
"""Return sensor attributes if data is available."""
if self._state is None or not self._attrs:
return None
delay = get_delay_in_minutes(self._attrs["departure"]["delay"])
departure = get_time_until(self._attrs["departure"]["time"])
attrs = {
"departure": f"In {departure} minutes",
"departure_minutes": departure,
"destination": self._station_to,
"direction": self._attrs["departure"]["direction"]["name"],
"platform_arriving": self._attrs["arrival"]["platform"],
"platform_departing": self._attrs["departure"]["platform"],
"vehicle_id": self._attrs["departure"]["vehicle"],
ATTR_ATTRIBUTION: "https://api.irail.be/",
}
if self._show_on_map and self.station_coordinates:
attrs[ATTR_LATITUDE] = self.station_coordinates[0]
attrs[ATTR_LONGITUDE] = self.station_coordinates[1]
if self.is_via_connection and not self._excl_vias:
via = self._attrs["vias"]["via"][0]
attrs["via"] = via["station"]
attrs["via_arrival_platform"] = via["arrival"]["platform"]
attrs["via_transfer_platform"] = via["departure"]["platform"]
attrs["via_transfer_time"] = get_delay_in_minutes(
via["timeBetween"]
) + get_delay_in_minutes(via["departure"]["delay"])
if delay > 0:
attrs["delay"] = f"{delay} minutes"
attrs["delay_minutes"] = delay
return attrs
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def station_coordinates(self):
"""Get the lat, long coordinates for station."""
if self._state is None or not self._attrs:
return []
latitude = float(self._attrs["departure"]["stationinfo"]["locationY"])
longitude = float(self._attrs["departure"]["stationinfo"]["locationX"])
return [latitude, longitude]
@property
def is_via_connection(self):
"""Return whether the connection goes through another station."""
if not self._attrs:
return False
return "vias" in self._attrs and int(self._attrs["vias"]["number"]) > 0
def update(self):
"""Set the state to the duration of a connection."""
connections = self._api_client.get_connections(
self._station_from, self._station_to
)
if connections is None or not connections["connection"]:
return
if int(connections["connection"][0]["departure"]["left"]) > 0:
next_connection = connections["connection"][1]
else:
next_connection = connections["connection"][0]
self._attrs = next_connection
if self._excl_vias and self.is_via_connection:
_LOGGER.debug(
"Skipping update of NMBSSensor \
because this connection is a via"
)
return
duration = get_ride_duration(
next_connection["departure"]["time"],
next_connection["arrival"]["time"],
next_connection["departure"]["delay"],
)
self._state = duration
|
mhrivnak/pulp | refs/heads/master | server/pulp/server/exceptions.py | 2 | from datetime import timedelta
from gettext import gettext as _
from pprint import pformat
import httplib
from pulp.common import error_codes, auth_utils
class PulpException(Exception):
"""
Base exception class for Pulp.
Provides base class __str__ and data_dict implementations
"""
http_status_code = httplib.INTERNAL_SERVER_ERROR
def __init__(self, *args):
super(PulpException, self).__init__(*args)
self.error_code = error_codes.PLP0000
self.error_data = {}
# child exceptions are those that are wrapped within this exception, validation errors
# for example would have one overall validation error and then a separate sub error
# for each validation that failed
self.child_exceptions = []
def add_child_exception(self, exception):
self.child_exceptions.append(exception)
def to_dict(self):
"""
The to_dict method is used to provide a standardized dictionary
of the exception information for usage storing to the database
or converting to json to send back via an API call
"""
result = {
'code': self.error_code.code,
'description': str(self),
'data': self.error_data,
'sub_errors': []
}
for error in self.child_exceptions:
if isinstance(error, PulpException):
result['sub_errors'].append(error.to_dict())
else:
result['sub_errors'].append({'code': 'PLP0000',
'description': str(error),
'data': {},
'sub_errors': []})
return result
def __str__(self):
class_name = self.__class__.__name__
msg = _('Pulp exception occurred: %(c)s') % {'c': class_name}
if self.args and isinstance(self.args[0], basestring):
msg = self.args[0]
return msg.encode('utf-8')
def data_dict(self):
return {'args': self.args}
class PulpExecutionException(PulpException):
"""
Base class of exceptions raised during the execution of Pulp.
This class should be used as a graceful server-side error while running
an operation. It is acceptable to instantiate and use this class directly.
Subclasses to this exception can be used to further describe any problems
encountered by the server.
"""
# NOTE intermediate exception class, no overrides will be provided
pass
class PulpCodedException(PulpException):
"""
Base class for exceptions that put the error_code and data as init arguments
"""
def __init__(self, error_code=error_codes.PLP0001, **kwargs):
super(PulpCodedException, self).__init__()
self.error_code = error_code
if kwargs:
self.error_data = kwargs
# Validate that the coded exception was raised with all the error_data fields that
# are required
for key in self.error_code.required_fields:
if key not in self.error_data:
raise PulpCodedException(error_codes.PLP0008, code=self.error_code.code,
field=key)
def __str__(self):
msg = self.error_code.message % self.error_data
return msg.encode('utf-8')
class PulpCodedValidationException(PulpCodedException):
"""
Class for wrapping collections of coded validation errors.
:param error_code: The particular error code that should be used for this validation exception
:type error_code: pulp.common.error_codes.Error
:param validation_exceptions: List of coded exceptions for each validation error that occurred
:type validation_exceptions: list of PulpCodedException
"""
http_status_code = httplib.BAD_REQUEST
def __init__(self, validation_exceptions=None, error_code=error_codes.PLP1000, **kwargs):
super(PulpCodedValidationException, self).__init__(error_code=error_code, **kwargs)
if validation_exceptions:
self.child_exceptions = validation_exceptions
class PulpCodedAuthenticationException(PulpCodedException):
"""
Class for coded authentication exceptions. Raising this exception results in a
401 Unauthorized code being returned.
:param error_code: The particular error code that should be used for this authentication
exception
:type error_code: pulp.common.error_codes.Error
"""
http_status_code = httplib.UNAUTHORIZED
def __init__(self, error_code=error_codes.PLP0025, **kwargs):
super(PulpCodedAuthenticationException, self).__init__(error_code=error_code, **kwargs)
# For backwards compatibility, get the old error code
self.old_error_code = auth_utils.generate_failure_response(error_code)
def data_dict(self):
return self.old_error_code
class MissingResource(PulpExecutionException):
""""
Base class for exceptions raised due to requesting a resource that does not
exist.
"""
http_status_code = httplib.NOT_FOUND
def __init__(self, *args, **resources):
"""
@param args: backward compatibility for for positional resource_id argument
@param resources: keyword arguments of resource_type=resource_id
"""
# backward compatibility for for previous 'resource_id' positional argument
if args:
resources['resource_id'] = args[0]
super(MissingResource, self).__init__(resources)
self.error_code = error_codes.PLP0009
self.resources = resources
self.error_data = {'resources': resources}
def __str__(self):
resources_str = ', '.join('%s=%s' % (k, v) for k, v in self.resources.items())
msg = self.error_code.message % {'resources': resources_str}
return msg.encode('utf-8')
def data_dict(self):
return {'resources': self.resources}
class ConflictingOperation(PulpExecutionException):
"""
Base class for exceptions raised when an operation cannot be completed due
to another operation already in progress.
"""
http_status_code = httplib.CONFLICT
def __init__(self, reasons):
"""
@param reasons: list of dicts describing why the requested operation was denied;
this is retrieved from the call report instance that indicated the conflict
@type reasons: list
"""
super(ConflictingOperation, self).__init__(reasons)
self.error_code = error_codes.PLP0010
self.error_data = {'reasons': reasons}
self.reasons = reasons
def __str__(self):
msg = self.error_code.message % self.error_data
return msg.encode('utf-8')
def data_dict(self):
return {'reasons': self.reasons}
class OperationTimedOut(PulpExecutionException):
"""
Base class for exceptions raised when an operation cannot be completed
because it failed to start before a predetermined amount of time had passed.
"""
http_status_code = httplib.SERVICE_UNAVAILABLE
def __init__(self, timeout):
"""
@param timeout: the timeout that expired
@type timeout: datetime.timedelta or str
"""
if isinstance(timeout, timedelta):
timeout = str(timeout)
super(OperationTimedOut, self).__init__(timeout)
self.error_code = error_codes.PLP0011
self.error_data = {'timeout': timeout}
self.timeout = timeout
def __str__(self):
msg = self.error_code.message % self.error_data
return msg.encode('utf-8')
def data_dict(self):
return {'timeout': self.timeout}
class NoWorkers(PulpExecutionException):
"""
This Exception is raised when there are no Celery workers available to perform asynchronous
tasks.
"""
http_status_code = httplib.SERVICE_UNAVAILABLE
def __init__(self):
"""
Initialize the NoWorkers Exception by setting its error code and message.
"""
super(NoWorkers, self).__init__()
self.error_code = error_codes.PLP0024
def __str__(self):
"""
Return a string representation of self.
:return: str of self
:rtype: str
"""
msg = self.error_code.message
return msg.encode('utf-8')
def data_dict(self):
"""
Return an empty dictionary, as there is no data for this error.
:return: empty dictionary
:rtype: dict
"""
return {}
class OperationPostponed(PulpExecutionException):
"""
Base class for handling operations postponed by the coordinator.
"""
http_status_code = httplib.ACCEPTED
def __init__(self, call_report):
"""
@param call_report: call report for postponed operation
@type call_report: CallReport or pulp.server.async.task.TaskResult
"""
super(OperationPostponed, self).__init__(call_report)
self.error_code = error_codes.PLP0012
self.call_report = call_report
self.error_data = {'call_report': call_report}
def __str__(self):
msg = self.error_code.message % self.error_data
return msg.encode('utf-8')
def data_dict(self):
return {'call_report': self.call_report}
class NotImplemented(PulpExecutionException):
"""
Base class for exceptions raised in place-holders for future functionality
or for missing control hooks in asynchronous operations, like 'cancel'.
"""
http_status_code = httplib.NOT_IMPLEMENTED
def __init__(self, operation_name):
"""
@param operation_name: the name of the operation that is not implemented
@type operation_name: str
"""
super(NotImplemented, self).__init__(operation_name)
self.operation_name = operation_name
self.error_code = error_codes.PLP0013
self.error_data = {'operation_name': operation_name}
def __str__(self):
msg = self.error_code.message % self.error_data
return msg.encode('utf-8')
def data_dict(self):
return {'operation_name': self.operation_name}
class PulpDataException(PulpException):
"""
Base class of exceptions raised due to data validation errors.
"""
# NOTE intermediate exception class, no overrides will be provided
http_status_code = httplib.BAD_REQUEST
class InvalidValue(PulpDataException):
"""
Base class of exceptions raised due invalid data values. The names of all
properties that were invalid are specified in the constructor.
"""
def __init__(self, property_names):
"""
@param property_names: list of all properties that were invalid
if a single property_name is passed, it is converted to a list
@type property_names: list
"""
super(InvalidValue, self).__init__(property_names)
if not isinstance(property_names, (list, tuple)):
property_names = [property_names]
self.error_code = error_codes.PLP0015
self.error_data = {'property_names': property_names,
'properties': pformat(property_names)}
self.property_names = property_names
def __str__(self):
msg = self.error_code.message % self.error_data
return msg.encode('utf-8')
def data_dict(self):
return {'property_names': self.property_names}
class MissingValue(PulpDataException):
"""
Base class of exceptions raised due to missing required data. The names of
all properties that are missing are specified in the constructor.
"""
def __init__(self, property_names):
"""
@param property_names: list of all properties that were missing
@type property_names: list
"""
super(MissingValue, self).__init__(property_names)
if not isinstance(property_names, (list, tuple)):
property_names = [property_names]
self.error_code = error_codes.PLP0016
self.error_data = {'property_names': property_names,
'properties': pformat(property_names)}
self.property_names = property_names
def __str__(self):
msg = self.error_code.message % self.error_data
return msg.encode('utf-8')
def data_dict(self):
return {'missing_property_names': self.property_names}
class UnsupportedValue(PulpDataException):
"""
Base class of exceptions raised due to unsupported data. The names of all
the properties that are unsupported are specified in the constructor.
"""
def __init__(self, property_names):
super(UnsupportedValue, self).__init__(property_names)
if not isinstance(property_names, (list, tuple)):
property_names = [property_names]
self.error_code = error_codes.PLP0017
self.error_data = {'property_names': property_names,
'properties': pformat(property_names)}
self.property_names = property_names
def __str__(self):
msg = self.error_code.message % self.error_data
return msg.encode('utf-8')
def data_dict(self):
return {'unsupported_property_names': self.property_names}
class DuplicateResource(PulpDataException):
"""
Bass class of exceptions raised due to duplicate resource ids.
"""
http_status_code = httplib.CONFLICT
def __init__(self, resource_id):
"""
@param resource_id: ID of the resource that was duplicated
@type resource_id: str
"""
super(DuplicateResource, self).__init__(resource_id)
self.error_code = error_codes.PLP0018
self.error_data = {'resource_id': resource_id}
self.resource_id = resource_id
def __str__(self):
msg = self.error_code.message % self.error_data
return msg.encode('utf-8')
def data_dict(self):
return {'resource_id': self.resource_id}
class InputEncodingError(PulpDataException):
"""
Error raised when input strings are not encoded in utf-8
"""
def __init__(self, value):
super(InputEncodingError, self).__init__(value)
self.error_code = error_codes.PLP0019
self.error_data = {'value': value}
self.value = value
def __str__(self):
return self.error_code.message % self.error_data
def data_dict(self):
return {'value': self.value}
class PulpCodedTaskException(PulpCodedException):
"""
Base class for exceptions that put the error_code and data as init arguments
"""
def __init__(self, error_code=error_codes.PLP1000, **kwargs):
super(PulpCodedTaskException, self).__init__(error_code=error_code, **kwargs)
class PulpCodedTaskFailedException(PulpCodedException):
"""
Class for wrapping collections of coded task errors.
:param error_code: The particular error code that should be used for this validation exception
:type error_code: pulp.common.error_codes.Error
:param task_exceptions: List of coded exceptions for each validation error that occurred
:type task_exceptions: list of PulpCodedException
"""
|
prakritish/ansible | refs/heads/devel | lib/ansible/modules/cloud/rackspace/rax_facts.py | 70 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_facts
short_description: Gather facts for Rackspace Cloud Servers
description:
- Gather facts for Rackspace Cloud Servers.
version_added: "1.4"
options:
address:
description:
- Server IP address to retrieve facts for, will match any IP assigned to
the server
id:
description:
- Server ID to retrieve facts for
name:
description:
- Server name to retrieve facts for
default: null
author: "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Gather info about servers
hosts: all
gather_facts: False
tasks:
- name: Get facts about servers
local_action:
module: rax_facts
credentials: ~/.raxpub
name: "{{ inventory_hostname }}"
region: DFW
- name: Map some facts
set_fact:
ansible_ssh_host: "{{ rax_accessipv4 }}"
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def rax_facts(module, address, name, server_id):
changed = False
cs = pyrax.cloudservers
if cs is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
ansible_facts = {}
search_opts = {}
if name:
search_opts = dict(name='^%s$' % name)
try:
servers = cs.servers.list(search_opts=search_opts)
except Exception as e:
module.fail_json(msg='%s' % e.message)
elif address:
servers = []
try:
for server in cs.servers.list():
for addresses in server.networks.values():
if address in addresses:
servers.append(server)
break
except Exception as e:
module.fail_json(msg='%s' % e.message)
elif server_id:
servers = []
try:
servers.append(cs.servers.get(server_id))
except Exception as e:
pass
servers[:] = [server for server in servers if server.status != "DELETED"]
if len(servers) > 1:
module.fail_json(msg='Multiple servers found matching provided '
'search parameters')
elif len(servers) == 1:
ansible_facts = rax_to_dict(servers[0], 'server')
module.exit_json(changed=changed, ansible_facts=ansible_facts)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
address=dict(),
id=dict(),
name=dict(),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
mutually_exclusive=[['address', 'id', 'name']],
required_one_of=[['address', 'id', 'name']],
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
address = module.params.get('address')
server_id = module.params.get('id')
name = module.params.get('name')
setup_rax_module(module, pyrax)
rax_facts(module, address, name, server_id)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
if __name__ == '__main__':
main()
|
cgstudiomap/cgstudiomap | refs/heads/develop | main/local_modules/frontend_base/tests/test_res_industry.py | 1 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) cgstudiomap <cgstudiomap@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests import common
class TestTagUrlLinkDetails(common.TransactionCase):
"""Test suite of the static method of tag_url_link_details."""
def setUp(self):
super(TestTagUrlLinkDetails, self).setUp()
self.industry_pool = self.env['res.industry']
def test_whenListingIsProvided_thenUrlWithDirectoryListIsReturned(self):
self.assertEqual(
(
'<a itemprop="name" href="/directory/list?company_status=open'
'&search=vfx"><span class="label label-info">vfx</span></a>'
),
self.industry_pool.tag_url_link_details('vfx', 'open', True)
)
def test_whenListingIsFalse_thenNoUrlTokenIsAdded(self):
self.assertEqual(
(
'<a itemprop="name" href="?company_status=open&search=vfx">'
'<span class="label label-info">vfx</span></a>'
),
self.industry_pool.tag_url_link_details('vfx', 'open', False)
)
|
wonder-sk/QGIS | refs/heads/master | scripts/qgis_fixes/fix_absolute_import.py | 77 | from libfuturize.fixes.fix_absolute_import import FixAbsoluteImport as FixAbsoluteImportOrig
class FixAbsoluteImport(FixAbsoluteImportOrig):
def probably_a_local_import(self, imp_name):
if imp_name.startswith(u"PyQt"):
return False
if imp_name == "AlgorithmsTestBase":
return False
return super(FixAbsoluteImport, self).probably_a_local_import(imp_name)
|
baylee/django | refs/heads/master | django/contrib/postgres/aggregates/general.py | 419 | from django.db.models.aggregates import Aggregate
__all__ = [
'ArrayAgg', 'BitAnd', 'BitOr', 'BoolAnd', 'BoolOr', 'StringAgg',
]
class ArrayAgg(Aggregate):
function = 'ARRAY_AGG'
def convert_value(self, value, expression, connection, context):
if not value:
return []
return value
class BitAnd(Aggregate):
function = 'BIT_AND'
class BitOr(Aggregate):
function = 'BIT_OR'
class BoolAnd(Aggregate):
function = 'BOOL_AND'
class BoolOr(Aggregate):
function = 'BOOL_OR'
class StringAgg(Aggregate):
function = 'STRING_AGG'
template = "%(function)s(%(expressions)s, '%(delimiter)s')"
def __init__(self, expression, delimiter, **extra):
super(StringAgg, self).__init__(expression, delimiter=delimiter, **extra)
def convert_value(self, value, expression, connection, context):
if not value:
return ''
return value
|
RealImpactAnalytics/airflow | refs/heads/master | airflow/migrations/versions/338e90f54d61_more_logging_into_task_isntance.py | 9 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""More logging into task_isntance
Revision ID: 338e90f54d61
Revises: 13eb55f81627
Create Date: 2015-08-25 06:09:20.460147
"""
# revision identifiers, used by Alembic.
revision = '338e90f54d61'
down_revision = '13eb55f81627'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('task_instance', sa.Column('operator', sa.String(length=1000), nullable=True))
op.add_column('task_instance', sa.Column('queued_dttm', sa.DateTime(), nullable=True))
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('task_instance', 'queued_dttm')
op.drop_column('task_instance', 'operator')
### end Alembic commands ###
|
Blue-Lightning-Rom/external_chromium | refs/heads/jb-mr1 | testing/gtest/scripts/fuse_gtest_files.py | 2577 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gtest_files.py v0.2.0
Fuses Google Test source code into a .h file and a .cc file.
SYNOPSIS
fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR
Scans GTEST_ROOT_DIR for Google Test source code, and generates
two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc.
Then you can build your tests by adding OUTPUT_DIR to the include
search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These
two files contain everything you need to use Google Test. Hence
you can "install" Google Test by copying them to wherever you want.
GTEST_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gtest_files.py fused_gtest
./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Test headers. Please report any
problems to googletestframework@googlegroups.com. You can read
http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide for
more information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Test root directory.
DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# Regex for matching '#include "gtest/..."'.
INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gtest/.+)"')
# Regex for matching '#include "src/..."'.
INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"')
# Where to find the source seed files.
GTEST_H_SEED = 'include/gtest/gtest.h'
GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h'
GTEST_ALL_CC_SEED = 'src/gtest-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc'
def VerifyFileExists(directory, relative_path):
"""Verifies that the given file exists; aborts on failure.
relative_path is the file path relative to the given directory.
"""
if not os.path.isfile(os.path.join(directory, relative_path)):
print 'ERROR: Cannot find %s in directory %s.' % (relative_path,
directory)
print ('Please either specify a valid project root directory '
'or omit it on the command line.')
sys.exit(1)
def ValidateGTestRootDir(gtest_root):
"""Makes sure gtest_root points to a valid gtest root directory.
The function aborts the program on failure.
"""
VerifyFileExists(gtest_root, GTEST_H_SEED)
VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED)
def VerifyOutputFile(output_dir, relative_path):
"""Verifies that the given output file path is valid.
relative_path is relative to the output_dir directory.
"""
# Makes sure the output file either doesn't exist or can be overwritten.
output_file = os.path.join(output_dir, relative_path)
if os.path.exists(output_file):
# TODO(wan@google.com): The following user-interaction doesn't
# work with automated processes. We should provide a way for the
# Makefile to force overwriting the files.
print ('%s already exists in directory %s - overwrite it? (y/N) ' %
(relative_path, output_dir))
answer = sys.stdin.readline().strip()
if answer not in ['y', 'Y']:
print 'ABORTED.'
sys.exit(1)
# Makes sure the directory holding the output file exists; creates
# it and all its ancestors if necessary.
parent_directory = os.path.dirname(output_file)
if not os.path.isdir(parent_directory):
os.makedirs(parent_directory)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
VerifyOutputFile(output_dir, GTEST_H_OUTPUT)
VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT)
def FuseGTestH(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest.h in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gtest headers we've processed.
def ProcessFile(gtest_header_path):
"""Processes the given gtest header file."""
# We don't process the same header twice.
if gtest_header_path in processed_files:
return
processed_files.add(gtest_header_path)
# Reads each line in the given gtest header.
for line in file(os.path.join(gtest_root, gtest_header_path), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GTEST_H_SEED)
output_file.close()
def FuseGTestAllCcToFile(gtest_root, output_file):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_file."""
processed_files = sets.Set()
def ProcessFile(gtest_source_file):
"""Processes the given gtest source file."""
# We don't process the same #included file twice.
if gtest_source_file in processed_files:
return
processed_files.add(gtest_source_file)
# Reads each line in the given gtest source file.
for line in file(os.path.join(gtest_root, gtest_source_file), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
if 'include/' + m.group(1) == GTEST_SPI_H_SEED:
# It's '#include "gtest/gtest-spi.h"'. This file is not
# #included by "gtest/gtest.h", so we need to process it.
ProcessFile(GTEST_SPI_H_SEED)
else:
# It's '#include "gtest/foo.h"' where foo is not gtest-spi.
# We treat it as '#include "gtest/gtest.h"', as all other
# gtest headers are being fused into gtest.h and cannot be
# #included directly.
# There is no need to #include "gtest/gtest.h" more than once.
if not GTEST_H_SEED in processed_files:
processed_files.add(GTEST_H_SEED)
output_file.write('#include "%s"\n' % (GTEST_H_OUTPUT,))
else:
m = INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
output_file.write(line)
ProcessFile(GTEST_ALL_CC_SEED)
def FuseGTestAllCc(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w')
FuseGTestAllCcToFile(gtest_root, output_file)
output_file.close()
def FuseGTest(gtest_root, output_dir):
"""Fuses gtest.h and gtest-all.cc."""
ValidateGTestRootDir(gtest_root)
ValidateOutputDir(output_dir)
FuseGTestH(gtest_root, output_dir)
FuseGTestAllCc(gtest_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gtest_files.py OUTPUT_DIR
FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR
FuseGTest(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
|
nikolay-fedotov/networking-cisco | refs/heads/master | networking_cisco/tests/unit/ml2/drivers/cisco/apic/test_cisco_apic_topology_agent.py | 20 | # Copyright (c) 2014 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
sys.modules["apicapi"] = mock.Mock()
from neutron.plugins.ml2.drivers.cisco.apic import apic_topology
from neutron.tests import base
from neutron.tests.unit.ml2.drivers.cisco.apic import (
test_cisco_apic_common as mocked)
NOTIFIER = ('neutron.plugins.ml2.drivers.cisco.apic.'
'apic_topology.ApicTopologyServiceNotifierApi')
RPC_CONNECTION = 'neutron.common.rpc.Connection'
AGENTS_DB = 'neutron.db.agents_db'
PERIODIC_TASK = 'neutron.openstack.common.periodic_task'
DEV_EXISTS = 'neutron.agent.linux.ip_lib.device_exists'
IP_DEVICE = 'neutron.agent.linux.ip_lib.IPDevice'
EXECUTE = 'neutron.agent.linux.utils.execute'
LLDP_CMD = ['lldpctl', '-f', 'keyvalue']
ETH0 = mocked.SERVICE_HOST_IFACE
LLDPCTL_RES = (
'lldp.' + ETH0 + '.via=LLDP\n'
'lldp.' + ETH0 + '.rid=1\n'
'lldp.' + ETH0 + '.age=0 day, 20:55:54\n'
'lldp.' + ETH0 + '.chassis.mac=' + mocked.SERVICE_HOST_MAC + '\n'
'lldp.' + ETH0 + '.chassis.name=' + mocked.SERVICE_PEER_CHASSIS_NAME + '\n'
'lldp.' + ETH0 + '.chassis.descr=' + mocked.SERVICE_PEER_CHASSIS + '\n'
'lldp.' + ETH0 + '.chassis.Bridge.enabled=on\n'
'lldp.' + ETH0 + '.chassis.Router.enabled=on\n'
'lldp.' + ETH0 + '.port.local=' + mocked.SERVICE_PEER_PORT_LOCAL + '\n'
'lldp.' + ETH0 + '.port.descr=' + mocked.SERVICE_PEER_PORT_DESC)
class TestCiscoApicTopologyService(base.BaseTestCase,
mocked.ControllerMixin,
mocked.ConfigMixin):
def setUp(self):
super(TestCiscoApicTopologyService, self).setUp()
mocked.ControllerMixin.set_up_mocks(self)
mocked.ConfigMixin.set_up_mocks(self)
# Patch notifier
notifier_c = mock.patch(NOTIFIER).start()
self.notifier = mock.Mock()
notifier_c.return_value = self.notifier
# Patch Connection
connection_c = mock.patch(RPC_CONNECTION).start()
self.connection = mock.Mock()
connection_c.return_value = self.connection
# Patch agents db
self.agents_db = mock.patch(AGENTS_DB).start()
self.service = apic_topology.ApicTopologyService()
self.service.apic_manager = mock.Mock()
def test_init_host(self):
self.service.init_host()
self.connection.create_consumer.ensure_called_once()
self.connection.consume_in_threads.ensure_called_once()
def test_update_link_add_nopeers(self):
self.service.peers = {}
args = (mocked.SERVICE_HOST, mocked.SERVICE_HOST_IFACE,
mocked.SERVICE_HOST_MAC, mocked.APIC_EXT_SWITCH,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT)
self.service.update_link(None, *args)
self.service.apic_manager.add_hostlink.assert_called_once_with(*args)
self.assertEqual(args,
self.service.peers[(mocked.SERVICE_HOST,
mocked.SERVICE_HOST_IFACE)])
def test_update_link_add_with_peers_diff(self):
args = (mocked.SERVICE_HOST, mocked.SERVICE_HOST_IFACE,
mocked.SERVICE_HOST_MAC, mocked.APIC_EXT_SWITCH,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT)
args_prime = args[:2] + tuple(x + '1' for x in args[2:])
self.service.peers = {args_prime[:2]: args_prime}
self.service.update_link(None, *args)
self.service.apic_manager.remove_hostlink.assert_called_once_with(
*args_prime)
self.service.apic_manager.add_hostlink.assert_called_once_with(*args)
self.assertEqual(
args, self.service.peers[
(mocked.SERVICE_HOST, mocked.SERVICE_HOST_IFACE)])
def test_update_link_add_with_peers_eq(self):
args = (mocked.SERVICE_HOST, mocked.SERVICE_HOST_IFACE,
mocked.SERVICE_HOST_MAC,
mocked.APIC_EXT_SWITCH,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT)
self.service.peers = {args[:2]: args}
self.service.update_link(None, *args)
def test_update_link_rem_with_peers(self):
args = (mocked.SERVICE_HOST, mocked.SERVICE_HOST_IFACE,
mocked.SERVICE_HOST_MAC, 0,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT)
self.service.peers = {args[:2]: args}
self.service.update_link(None, *args)
self.service.apic_manager.remove_hostlink.assert_called_once_with(
*args)
self.assertFalse(bool(self.service.peers))
def test_update_link_rem_no_peers(self):
args = (mocked.SERVICE_HOST, mocked.SERVICE_HOST_IFACE,
mocked.SERVICE_HOST_MAC, 0,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT)
self.service.update_link(None, *args)
class TestCiscoApicTopologyAgent(base.BaseTestCase,
mocked.ControllerMixin,
mocked.ConfigMixin):
def setUp(self):
super(TestCiscoApicTopologyAgent, self).setUp()
mocked.ControllerMixin.set_up_mocks(self)
mocked.ConfigMixin.set_up_mocks(self)
# Patch notifier
notifier_c = mock.patch(NOTIFIER).start()
self.notifier = mock.Mock()
notifier_c.return_value = self.notifier
# Patch device_exists
self.dev_exists = mock.patch(DEV_EXISTS).start()
# Patch IPDevice
ipdev_c = mock.patch(IP_DEVICE).start()
self.ipdev = mock.Mock()
ipdev_c.return_value = self.ipdev
self.ipdev.link.address = mocked.SERVICE_HOST_MAC
# Patch execute
self.execute = mock.patch(EXECUTE).start()
self.execute.return_value = LLDPCTL_RES
# Patch tasks
self.periodic_task = mock.patch(PERIODIC_TASK).start()
self.agent = apic_topology.ApicTopologyAgent()
self.agent.host = mocked.SERVICE_HOST
self.agent.service_agent = mock.Mock()
self.agent.lldpcmd = LLDP_CMD
def test_init_host_device_exists(self):
self.agent.lldpcmd = None
self.dev_exists.return_value = True
self.agent.init_host()
self.assertEqual(LLDP_CMD + mocked.APIC_UPLINK_PORTS,
self.agent.lldpcmd)
def test_init_host_device_not_exist(self):
self.agent.lldpcmd = None
self.dev_exists.return_value = False
self.agent.init_host()
self.assertEqual(LLDP_CMD, self.agent.lldpcmd)
def test_get_peers(self):
self.agent.peers = {}
peers = self.agent._get_peers()
expected = [(mocked.SERVICE_HOST, mocked.SERVICE_HOST_IFACE,
mocked.SERVICE_HOST_MAC, mocked.APIC_EXT_SWITCH,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT)]
self.assertEqual(expected,
peers[mocked.SERVICE_HOST_IFACE])
def test_check_for_new_peers_no_peers(self):
self.agent.peers = {}
expected = (mocked.SERVICE_HOST, mocked.SERVICE_HOST_IFACE,
mocked.SERVICE_HOST_MAC, mocked.APIC_EXT_SWITCH,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT)
peers = {mocked.SERVICE_HOST_IFACE: [expected]}
context = mock.Mock()
with mock.patch.object(self.agent, '_get_peers',
return_value=peers):
self.agent._check_for_new_peers(context)
self.assertEqual(expected,
self.agent.peers[mocked.SERVICE_HOST_IFACE])
self.agent.service_agent.update_link.assert_called_once_with(
context, *expected)
def test_check_for_new_peers_with_peers(self):
expected = (mocked.SERVICE_HOST, mocked.SERVICE_HOST_IFACE,
mocked.SERVICE_HOST_MAC, mocked.APIC_EXT_SWITCH,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT)
peers = {mocked.SERVICE_HOST_IFACE: [expected]}
self.agent.peers = {mocked.SERVICE_HOST_IFACE:
[tuple(x + '1' for x in expected)]}
context = mock.Mock()
with mock.patch.object(self.agent, '_get_peers',
return_value=peers):
self.agent._check_for_new_peers(context)
self.agent.service_agent.update_link.assert_called_with(
context, *expected)
|
bastianeicher/0repo | refs/heads/master | repo/registry.py | 3 | # Copyright (C) 2013, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
import json
from zeroinstall import SafeException
from zeroinstall.support import basedir
def lookup(uri, missing_ok = False):
"""Search repositories.json for the repository which hosts 'uri'."""
path = basedir.load_first_config('0install.net', '0repo', 'repositories.json')
if path:
with open(path, 'rb') as stream:
db = json.load(stream)
else:
db = {}
from_registry = None
for key, value in db.items():
if uri.startswith(key):
if from_registry:
raise SafeException("Multiple matching repositories! {a} and {b}".format(
a = from_registry, b = value))
from_registry = value
if not from_registry:
if missing_ok:
return None
else:
raise SafeException("No registered repository for {uri} (hint: use '0repo register')".format(uri = uri))
return from_registry
|
emfcamp/micropython | refs/heads/tilda-master | tests/basics/fun3.py | 119 | # function with large number of arguments
def fun(a, b, c, d, e, f, g):
return a + b + c * d + e * f * g
print(fun(1, 2, 3, 4, 5, 6, 7))
|
JohnVinyard/zounds | refs/heads/master | zounds/timeseries/test_constantrate.py | 1 | import unittest2
from .constantrate import ConstantRateTimeSeries
from zounds.core import ArrayWithUnits, IdentityDimension
from zounds.timeseries import TimeDimension, Seconds, Milliseconds, TimeSlice
import numpy as np
class ConstantRateTimeSeriesTests(unittest2.TestCase):
def test_raises_when_not_array_with_units_instance(self):
arr = np.zeros(10)
self.assertRaises(ValueError, lambda: ConstantRateTimeSeries(arr))
def test_raises_when_first_dimension_is_not_time_dimension(self):
raw = np.zeros((10, 3))
arr = ArrayWithUnits(raw, dimensions=[
IdentityDimension(), TimeDimension(frequency=Seconds(1))])
self.assertRaises(ValueError, lambda: ConstantRateTimeSeries(arr))
def test_iter_slices_yields_evenly_spaced_time_slices(self):
raw = np.random.random_sample((10, 3))
arr = ArrayWithUnits(raw, dimensions=[
TimeDimension(frequency=Milliseconds(500), duration=Seconds(1)),
IdentityDimension()
])
crts = ConstantRateTimeSeries(arr)
slices = list(crts.iter_slices())
self.assertEqual(10, len(slices))
ts1, d1 = slices[0]
self.assertEqual(
TimeSlice(start=Seconds(0), duration=Seconds(1)), ts1)
np.testing.assert_allclose(raw[0], d1)
ts2, d2 = slices[1]
self.assertEqual(
TimeSlice(start=Milliseconds(500), duration=Seconds(1)), ts2)
np.testing.assert_allclose(raw[1], d2)
|
wwj718/edx-platform | refs/heads/master | lms/djangoapps/courseware/tests/test_view_authentication.py | 89 | import datetime
import pytz
from django.core.urlresolvers import reverse
from mock import patch
from nose.plugins.attrib import attr
from courseware.access import has_access
from courseware.tests.helpers import CourseAccessTestMixin, LoginEnrollmentTestCase
from courseware.tests.factories import (
BetaTesterFactory,
StaffFactory,
GlobalStaffFactory,
InstructorFactory,
OrgStaffFactory,
OrgInstructorFactory,
)
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from student.tests.factories import UserFactory, CourseEnrollmentFactory
@attr('shard_1')
class TestViewAuth(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Check that view authentication works properly.
"""
ACCOUNT_INFO = [('view@test.com', 'foo'), ('view2@test.com', 'foo')]
@staticmethod
def _reverse_urls(names, course):
"""
Reverse a list of course urls.
`names` is a list of URL names that correspond to sections in a course.
`course` is the instance of CourseDescriptor whose section URLs are to be returned.
Returns a list URLs corresponding to section in the passed in course.
"""
return [reverse(name, kwargs={'course_id': course.id.to_deprecated_string()})
for name in names]
def _check_non_staff_light(self, course):
"""
Check that non-staff have access to light urls.
`course` is an instance of CourseDescriptor.
"""
urls = [reverse('about_course', kwargs={'course_id': course.id.to_deprecated_string()}),
reverse('courses')]
for url in urls:
self.assert_request_status_code(200, url)
def _check_non_staff_dark(self, course):
"""
Check that non-staff don't have access to dark urls.
"""
names = ['courseware', 'instructor_dashboard', 'progress']
urls = self._reverse_urls(names, course)
urls.extend([
reverse('book', kwargs={'course_id': course.id.to_deprecated_string(),
'book_index': index})
for index, __ in enumerate(course.textbooks)
])
for url in urls:
self.assert_request_status_code(404, url)
def _check_staff(self, course):
"""
Check that access is right for staff in course.
"""
names = ['about_course', 'instructor_dashboard', 'progress']
urls = self._reverse_urls(names, course)
urls.extend([
reverse('book', kwargs={'course_id': course.id.to_deprecated_string(),
'book_index': index})
for index in xrange(len(course.textbooks))
])
for url in urls:
self.assert_request_status_code(200, url)
# The student progress tab is not accessible to a student
# before launch, so the instructor view-as-student feature
# should return a 404 as well.
# TODO (vshnayder): If this is not the behavior we want, will need
# to make access checking smarter and understand both the effective
# user (the student), and the requesting user (the prof)
url = reverse(
'student_progress',
kwargs={
'course_id': course.id.to_deprecated_string(),
'student_id': self.enrolled_user.id,
}
)
self.assert_request_status_code(404, url)
# The courseware url should redirect, not 200
url = self._reverse_urls(['courseware'], course)[0]
self.assert_request_status_code(302, url)
def login(self, user):
return super(TestViewAuth, self).login(user.email, 'test')
def setUp(self):
super(TestViewAuth, self).setUp()
self.course = CourseFactory.create(number='999', display_name='Robot_Super_Course')
self.courseware_chapter = ItemFactory.create(display_name='courseware')
self.overview_chapter = ItemFactory.create(
parent_location=self.course.location,
display_name='Super Overview'
)
self.welcome_section = ItemFactory.create(
parent_location=self.overview_chapter.location,
display_name='Super Welcome'
)
self.welcome_unit = ItemFactory.create(
parent_location=self.welcome_section.location,
display_name='Super Unit'
)
self.course = modulestore().get_course(self.course.id)
self.test_course = CourseFactory.create(org=self.course.id.org)
self.other_org_course = CourseFactory.create(org='Other_Org_Course')
self.sub_courseware_chapter = ItemFactory.create(
parent_location=self.test_course.location,
display_name='courseware'
)
self.sub_overview_chapter = ItemFactory.create(
parent_location=self.sub_courseware_chapter.location,
display_name='Overview'
)
self.sub_welcome_section = ItemFactory.create(
parent_location=self.sub_overview_chapter.location,
display_name='Welcome'
)
self.sub_welcome_unit = ItemFactory.create(
parent_location=self.sub_welcome_section.location,
display_name='New Unit'
)
self.test_course = modulestore().get_course(self.test_course.id)
self.global_staff_user = GlobalStaffFactory()
self.unenrolled_user = UserFactory(last_name="Unenrolled")
self.enrolled_user = UserFactory(last_name="Enrolled")
CourseEnrollmentFactory(user=self.enrolled_user, course_id=self.course.id)
CourseEnrollmentFactory(user=self.enrolled_user, course_id=self.test_course.id)
self.staff_user = StaffFactory(course_key=self.course.id)
self.instructor_user = InstructorFactory(course_key=self.course.id)
self.org_staff_user = OrgStaffFactory(course_key=self.course.id)
self.org_instructor_user = OrgInstructorFactory(course_key=self.course.id)
def test_redirection_unenrolled(self):
"""
Verify unenrolled student is redirected to the 'about' section of the chapter
instead of the 'Welcome' section after clicking on the courseware tab.
"""
self.login(self.unenrolled_user)
response = self.client.get(reverse('courseware',
kwargs={'course_id': self.course.id.to_deprecated_string()}))
self.assertRedirects(
response,
reverse(
'about_course',
args=[self.course.id.to_deprecated_string()]
)
)
def test_redirection_enrolled(self):
"""
Verify enrolled student is redirected to the 'Welcome' section of
the chapter after clicking on the courseware tab.
"""
self.login(self.enrolled_user)
response = self.client.get(
reverse(
'courseware',
kwargs={'course_id': self.course.id.to_deprecated_string()}
)
)
self.assertRedirects(
response,
reverse(
'courseware_section',
kwargs={'course_id': self.course.id.to_deprecated_string(),
'chapter': self.overview_chapter.url_name,
'section': self.welcome_section.url_name}
)
)
def test_instructor_page_access_nonstaff(self):
"""
Verify non-staff cannot load the instructor
dashboard, the grade views, and student profile pages.
"""
self.login(self.enrolled_user)
urls = [reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()}),
reverse('instructor_dashboard', kwargs={'course_id': self.test_course.id.to_deprecated_string()})]
# Shouldn't be able to get to the instructor pages
for url in urls:
self.assert_request_status_code(404, url)
def test_staff_course_access(self):
"""
Verify staff can load the staff dashboard, the grade views,
and student profile pages for their course.
"""
self.login(self.staff_user)
# Now should be able to get to self.course, but not self.test_course
url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.assert_request_status_code(200, url)
url = reverse('instructor_dashboard', kwargs={'course_id': self.test_course.id.to_deprecated_string()})
self.assert_request_status_code(404, url)
def test_instructor_course_access(self):
"""
Verify instructor can load the instructor dashboard, the grade views,
and student profile pages for their course.
"""
self.login(self.instructor_user)
# Now should be able to get to self.course, but not self.test_course
url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.assert_request_status_code(200, url)
url = reverse('instructor_dashboard', kwargs={'course_id': self.test_course.id.to_deprecated_string()})
self.assert_request_status_code(404, url)
def test_org_staff_access(self):
"""
Verify org staff can load the instructor dashboard, the grade views,
and student profile pages for course in their org.
"""
self.login(self.org_staff_user)
url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.assert_request_status_code(200, url)
url = reverse('instructor_dashboard', kwargs={'course_id': self.test_course.id.to_deprecated_string()})
self.assert_request_status_code(200, url)
url = reverse('instructor_dashboard', kwargs={'course_id': self.other_org_course.id.to_deprecated_string()})
self.assert_request_status_code(404, url)
def test_org_instructor_access(self):
"""
Verify org instructor can load the instructor dashboard, the grade views,
and student profile pages for course in their org.
"""
self.login(self.org_instructor_user)
url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.assert_request_status_code(200, url)
url = reverse('instructor_dashboard', kwargs={'course_id': self.test_course.id.to_deprecated_string()})
self.assert_request_status_code(200, url)
url = reverse('instructor_dashboard', kwargs={'course_id': self.other_org_course.id.to_deprecated_string()})
self.assert_request_status_code(404, url)
def test_global_staff_access(self):
"""
Verify the global staff user can access any course.
"""
self.login(self.global_staff_user)
# and now should be able to load both
urls = [reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()}),
reverse('instructor_dashboard', kwargs={'course_id': self.test_course.id.to_deprecated_string()})]
for url in urls:
self.assert_request_status_code(200, url)
@patch.dict('courseware.access.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_dark_launch_enrolled_student(self):
"""
Make sure that before course start, students can't access course
pages.
"""
# Make courses start in the future
now = datetime.datetime.now(pytz.UTC)
tomorrow = now + datetime.timedelta(days=1)
self.course.start = tomorrow
self.test_course.start = tomorrow
self.course = self.update_course(self.course, self.user.id)
self.test_course = self.update_course(self.test_course, self.user.id)
self.assertFalse(self.course.has_started())
self.assertFalse(self.test_course.has_started())
# First, try with an enrolled student
self.login(self.enrolled_user)
# shouldn't be able to get to anything except the light pages
self._check_non_staff_light(self.course)
self._check_non_staff_dark(self.course)
self._check_non_staff_light(self.test_course)
self._check_non_staff_dark(self.test_course)
@patch.dict('courseware.access.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_dark_launch_instructor(self):
"""
Make sure that before course start instructors can access the
page for their course.
"""
now = datetime.datetime.now(pytz.UTC)
tomorrow = now + datetime.timedelta(days=1)
self.course.start = tomorrow
self.test_course.start = tomorrow
self.course = self.update_course(self.course, self.user.id)
self.test_course = self.update_course(self.test_course, self.user.id)
self.login(self.instructor_user)
# Enroll in the classes---can't see courseware otherwise.
self.enroll(self.course, True)
self.enroll(self.test_course, True)
# should now be able to get to everything for self.course
self._check_non_staff_light(self.test_course)
self._check_non_staff_dark(self.test_course)
self._check_staff(self.course)
@patch.dict('courseware.access.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_dark_launch_global_staff(self):
"""
Make sure that before course start staff can access
course pages.
"""
now = datetime.datetime.now(pytz.UTC)
tomorrow = now + datetime.timedelta(days=1)
self.course.start = tomorrow
self.test_course.start = tomorrow
self.course = self.update_course(self.course, self.user.id)
self.test_course = self.update_course(self.test_course, self.user.id)
self.login(self.global_staff_user)
self.enroll(self.course, True)
self.enroll(self.test_course, True)
# and now should be able to load both
self._check_staff(self.course)
self._check_staff(self.test_course)
@patch.dict('courseware.access.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_enrollment_period(self):
"""
Check that enrollment periods work.
"""
# Make courses start in the future
now = datetime.datetime.now(pytz.UTC)
tomorrow = now + datetime.timedelta(days=1)
nextday = tomorrow + datetime.timedelta(days=1)
yesterday = now - datetime.timedelta(days=1)
# self.course's enrollment period hasn't started
self.course.enrollment_start = tomorrow
self.course.enrollment_end = nextday
# test_course course's has
self.test_course.enrollment_start = yesterday
self.test_course.enrollment_end = tomorrow
self.course = self.update_course(self.course, self.user.id)
self.test_course = self.update_course(self.test_course, self.user.id)
# First, try with an enrolled student
self.login(self.unenrolled_user)
self.assertFalse(self.enroll(self.course))
self.assertTrue(self.enroll(self.test_course))
self.logout()
self.login(self.instructor_user)
self.assertTrue(self.enroll(self.course))
# unenroll and try again
self.login(self.global_staff_user)
self.assertTrue(self.enroll(self.course))
@attr('shard_1')
class TestBetatesterAccess(ModuleStoreTestCase, CourseAccessTestMixin):
"""
Tests for the beta tester feature
"""
def setUp(self):
super(TestBetatesterAccess, self).setUp()
now = datetime.datetime.now(pytz.UTC)
tomorrow = now + datetime.timedelta(days=1)
self.course = CourseFactory(days_early_for_beta=2, start=tomorrow)
self.content = ItemFactory(parent=self.course)
self.normal_student = UserFactory()
self.beta_tester = BetaTesterFactory(course_key=self.course.id)
@patch.dict('courseware.access.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_course_beta_period(self):
"""
Check that beta-test access works for courses.
"""
self.assertFalse(self.course.has_started())
self.assertCannotAccessCourse(self.normal_student, 'load', self.course)
self.assertCanAccessCourse(self.beta_tester, 'load', self.course)
@patch.dict('courseware.access.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_content_beta_period(self):
"""
Check that beta-test access works for content.
"""
# student user shouldn't see it
self.assertFalse(has_access(self.normal_student, 'load', self.content, self.course.id))
# now the student should see it
self.assertTrue(has_access(self.beta_tester, 'load', self.content, self.course.id))
|
ewiseblatt/spinnaker | refs/heads/master | dev/buildtool/__main__.py | 3 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main program for organizing the buildtool.
This module is reponsible for determining the configuration
then acquiring and dispatching commands.
Commands are introduced into modules, and modules are explicitly
plugged into the command_modules[] list in main() where they
will be initialized and their commands registered into the registry.
From there this module will be able to process arguments and
dispatch commands.
"""
import argparse
import datetime
import logging
import os
import sys
import time
import yaml
from buildtool.metrics import MetricsManager
from buildtool import (
add_parser_argument,
maybe_log_exception,
GitRunner)
STANDARD_LOG_LEVELS = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR
}
# This is so tests can disable it
CHECK_HOME_FOR_CONFIG = True
def add_standard_parser_args(parser, defaults):
"""Init argparser with command-independent options.
Args:
parser: [argparse.Parser]
defaults: [dict] Default value overrides keyed by option name.
"""
parser.add_argument(
'components', nargs='*', default=defaults.get('components', None),
help='Restrict commands to these components or repository names')
add_parser_argument(
parser, 'default_args_file', defaults, None,
help='Path to YAML file containing command-line option overrides.'
' The default is $HOME/.spinnaker/buildtool.yml if present.'
' This parameter will overload the defaults. Embedded'
' default_args_file will also be read at a lower precedence than'
' the containing file.')
add_parser_argument(
parser, 'log_level', defaults, 'info',
choices=STANDARD_LOG_LEVELS.keys(),
help='Set the logging level')
add_parser_argument(
parser, 'output_dir', defaults, 'output',
help='Directory to write working files.')
add_parser_argument(
parser, 'input_dir', defaults, 'source_code',
help='Directory to cache input files, such as cloning git repos.')
add_parser_argument(
parser, 'one_at_a_time', defaults, False, type=bool,
help='Do not perform applicable concurrency, for debugging.')
add_parser_argument(
parser, 'parent_invocation_id', defaults,
'{:%y%m%d}.{}'.format(datetime.datetime.utcnow(), os.getpid()),
help='For identifying the context of the metrics data to be produced.')
def __load_defaults_from_path(path, visited=None):
"""Helper function for loading defaults from yaml file."""
visited = visited or []
if path in visited:
raise ValueError('Circular "default_args_file" dependency in %s' % path)
visited.append(path)
with open(path, 'r') as f:
defaults = yaml.safe_load(f)
# Allow these files to be recursive
# So that there can be some overall default file
# that is then overwridden by another file where
# the override file references the default one
# and the CLI argument points to the override file.
base_defaults_file = defaults.get('default_args_file')
if base_defaults_file:
base_defaults = __load_defaults_from_path(base_defaults_file)
base_defaults.update(defaults) # base is lower precedence.
defaults = base_defaults # defaults is what we want to return.
return defaults
def preprocess_args(args, default_home_path_filename='buildtool.yml'):
"""Preprocess the args to determine the defaults to use.
This recognizes the --default_args_file override and, if present loads them.
Returns:
args, defaults
Where:
args are remaining arguments (with--default_args_file removed
defaults are overriden defaults from the default_args_file, if present.
"""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--default_args_file', default=None)
options, args = parser.parse_known_args(args)
home_path = os.path.join(os.environ['HOME'], '.spinnaker',
default_home_path_filename)
if CHECK_HOME_FOR_CONFIG and os.path.exists(home_path):
defaults = __load_defaults_from_path(home_path)
defaults['default_args_file'] = home_path
else:
defaults = {}
if options.default_args_file:
override_defaults = __load_defaults_from_path(options.default_args_file)
override_defaults['default_args_file'] = options.default_args_file
defaults.update(override_defaults)
return args, defaults
def make_registry(command_modules, parser, defaults):
"""Creates a command registry, adding command arguments to the parser.
Args:
command_modules: [list of modules] The modules that have commands
to register. Each module should have a function
register_commands(registry, subparsers, defaults)
that will register CommandFactory instances into the registry.
parser: [ArgumentParser] The parser to add commands to
This adds a 'command' subparser to capture the requested command choice.
defaults: [dict] Default values to specify when adding arguments.
"""
registry = {}
subparsers = parser.add_subparsers(title='command', dest='command')
for module in command_modules:
module.register_commands(registry, subparsers, defaults)
return registry
def add_monitoring_context_labels(options):
option_dict = vars(options)
version_name = option_dict.get('git_branch', None)
if not version_name:
bom_name = option_dict.get('bom_version') or option_dict.get('bom_path')
if bom_name:
if bom_name.find('-unbuilt') > 0:
version_name = bom_name[:bom_name.find('-unbuilt')]
elif bom_name.find('-latest') > 0:
version_name = bom_name[:bom_name.find('-latest')]
else:
version_name = bom_name[:bom_name.rfind('-')]
if version_name:
context_labels = ['version=' + version_name]
if (version_name == 'master'
or version_name.startswith('release-')
or version_name.startswith('master-latest-')):
context_labels.append('official_version='+version_name)
if options.monitoring_context_labels:
context_labels.append(options.monitoring_context_labels)
options.monitoring_context_labels = ','.join(context_labels)
def init_options_and_registry(args, command_modules):
"""Register command modules and determine options from commandline.
These are coupled together for implementation simplicity. Conceptually
they are unrelated but they share implementation details that can be
encapsulated by combining them this way.
Args:
args: [list of command-line arguments]
command_modules: See make_registry.
Returns:
options, registry
Where:
options: [Namespace] From parsed args.
registry: [dict] of (<command-name>: <CommandFactory>)
"""
args, defaults = preprocess_args(args)
parser = argparse.ArgumentParser(prog='buildtool.sh')
add_standard_parser_args(parser, defaults)
MetricsManager.init_argument_parser(parser, defaults)
registry = make_registry(command_modules, parser, defaults)
options = parser.parse_args(args)
options.program = 'buildtool'
# Determine the version for monitoring purposes.
# Depending on the options defined, this is either the branch or bom prefix.
add_monitoring_context_labels(options)
return options, registry
def main():
"""The main command dispatcher."""
start_time = time.time()
from importlib import import_module
command_modules = [
import_module(name + '_commands') for name in [
'apidocs',
'bom',
'changelog',
'container',
'debian',
'halyard',
'image',
'rpm',
'source',
'spinnaker',
'inspection',
'spin',
]]
GitRunner.stash_and_clear_auth_env_vars()
options, command_registry = init_options_and_registry(
sys.argv[1:], command_modules)
logging.basicConfig(
format='%(levelname).1s %(asctime)s.%(msecs)03d'
' [%(threadName)s.%(process)d] %(message)s',
datefmt='%H:%M:%S',
level=STANDARD_LOG_LEVELS[options.log_level])
logging.debug(
'Running with options:\n %s',
'\n '.join(yaml.safe_dump(vars(options), default_flow_style=False)
.split('\n')))
factory = command_registry.get(options.command)
if not factory:
logging.error('Unknown command "%s"', options.command)
return -1
MetricsManager.startup_metrics(options)
labels = {'command': options.command}
success = False
try:
command = factory.make_command(options)
command()
success = True
finally:
labels['success'] = success
MetricsManager.singleton().observe_timer(
'BuildTool_Outcome', labels,
time.time() - start_time)
MetricsManager.shutdown_metrics()
return 0
def dump_threads():
"""Dump current threads to facilitate debugging possible deadlock.
A process did not exit when log file suggested it was. Maybe there was
a background thread it was joining on. If so, this might give a clue
should it happen again.
"""
import threading
threads = []
for thread in threading.enumerate():
threads.append(' name={name} daemon={d} id={id}'.format(
name=thread.name, d=thread.daemon, id=thread.ident))
if len(threads) > 1:
logging.info('The following threads still running:\n%s', '\n'.join(threads))
def wrapped_main():
"""Run main and dump outstanding threads when done."""
# pylint: disable=broad-except
try:
retcode = main()
except Exception as ex:
sys.stdout.flush()
maybe_log_exception('main()', ex, action_msg='Terminating')
logging.error("FAILED")
retcode = -1
dump_threads()
return retcode
if __name__ == '__main__':
sys.exit(wrapped_main())
|
ncareol/lrose-soloPy | refs/heads/master | lrose_solopy/radial_demo.py | 1 | # *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2014
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** See LICENSE.TXT for license details
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
#!/usr/bin/python
# colors.py
import time
import sys
from PySide import QtGui, QtCore
import math
class Colors(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setGeometry(900, 900, 900, 900)
self.setWindowTitle('Colors')
self._colors = [ QtGui.QColor(255, 0, 0),
QtGui.QColor(0, 255, 0),
QtGui.QColor(0, 0, 255),
QtGui.QColor(50, 50, 0),
QtGui.QColor(0, 50, 50),
QtGui.QColor(50, 0, 50),
QtGui.QColor(150, 150, 0),
QtGui.QColor(0, 150, 150),
QtGui.QColor(150, 0, 150)
]
self.origin = [350, 350]
self.colorIndex = 0
self.drawIndex = self.colorIndex
def drawRay(self, painter, angle, beamSpacing, pixPerGate, maxGates):
sin1 = math.sin(angle)
cos1 = math.cos(angle)
sin2 = math.sin(angle+beamSpacing)
cos2 = math.cos(angle+beamSpacing)
XC = self.origin[0]
YC = self.origin[1]
for g in range(maxGates):
painter.setBrush(self._colors[self.colorIndex])
r0 = g*pixPerGate
r1 = (g+1)*pixPerGate
poly = QtGui.QPolygon([
QtCore.QPoint( XC + cos1 * r0, YC + sin1 * r0),
QtCore.QPoint( XC + cos1 * r1, YC + sin1 * r1),
QtCore.QPoint( XC + cos2 * r1, YC + sin2 * r1),
QtCore.QPoint( XC + cos2 * r0, YC + sin2 * r0),
])
painter.drawConvexPolygon(poly)
self.colorIndex = (self.colorIndex + 1) % len(self._colors)
def paintEvent(self, event):
paint = QtGui.QPainter()
paint.begin(self)
color = QtGui.QColor(0, 0, 0)
color.setNamedColor('#d4d4d4')
paint.setPen(color)
self.drawIndex = (self.drawIndex + 1) % len(self._colors)
self.colorIndex = self.drawIndex
start = time.time()
a = 0
beamSpacing = 1.0
while a < 360:
print "paintEvent: drawing %f\n" % a
self.drawRay(paint, a * math.pi/180.0, beamSpacing * math.pi/180.0, 6, 199)
a = a + beamSpacing
paint.end()
elapsed = time.time() - start
print "draw took %5.1f seconds\n" % elapsed
app = QtGui.QApplication(sys.argv)
dt = Colors()
dt.show()
app.exec_()
|
dvliman/jaikuengine | refs/heads/master | .google_appengine/lib/django-1.4/django/conf/locale/te/formats.py | 433 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'g:i:s A'
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j M Y'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
# DECIMAL_SEPARATOR =
# THOUSAND_SEPARATOR =
# NUMBER_GROUPING =
|
nikhiljan93/sony_yuga_kernel | refs/heads/rebase | tools/perf/python/twatch.py | 7370 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
|
kerneltask/micropython | refs/heads/master | tests/basics/builtin_hasattr.py | 23 | class A:
var = 132
def __init__(self):
self.var2 = 34
def meth(self, i):
return 42 + i
a = A()
print(hasattr(a, "var"))
print(hasattr(a, "var2"))
print(hasattr(a, "meth"))
print(hasattr(a, "_none_such"))
print(hasattr(list, "foo"))
class C:
def __getattr__(self, attr):
if attr == "exists":
return attr
elif attr == "raise":
raise Exception(123)
raise AttributeError
c = C()
print(hasattr(c, "exists"))
print(hasattr(c, "doesnt_exist"))
# ensure that non-AttributeError exceptions propagate out of hasattr
try:
hasattr(c, "raise")
except Exception as er:
print(er)
try:
hasattr(1, b'123')
except TypeError:
print('TypeError')
try:
hasattr(1, 123)
except TypeError:
print('TypeError')
|
ribag/ganeti-experiments | refs/heads/topic-cli-quote | lib/rapi/client_utils.py | 11 | #
#
# Copyright (C) 2010 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""RAPI client utilities.
"""
from ganeti import constants
from ganeti import cli
from ganeti.rapi import client
# Local constant to avoid importing ganeti.http
HTTP_NOT_FOUND = 404
class RapiJobPollCb(cli.JobPollCbBase):
def __init__(self, cl):
"""Initializes this class.
@param cl: RAPI client instance
"""
cli.JobPollCbBase.__init__(self)
self.cl = cl
def WaitForJobChangeOnce(self, job_id, fields,
prev_job_info, prev_log_serial):
"""Waits for changes on a job.
"""
try:
result = self.cl.WaitForJobChange(job_id, fields,
prev_job_info, prev_log_serial)
except client.GanetiApiError, err:
if err.code == HTTP_NOT_FOUND:
return None
raise
if result is None:
return constants.JOB_NOTCHANGED
return (result["job_info"], result["log_entries"])
def QueryJobs(self, job_ids, fields):
"""Returns the given fields for the selected job IDs.
@type job_ids: list of numbers
@param job_ids: Job IDs
@type fields: list of strings
@param fields: Fields
"""
if len(job_ids) != 1:
raise NotImplementedError("Only one job supported at this time")
try:
result = self.cl.GetJobStatus(job_ids[0])
except client.GanetiApiError, err:
if err.code == HTTP_NOT_FOUND:
return [None]
raise
return [[result[name] for name in fields], ]
def PollJob(rapi_client, job_id, reporter):
"""Function to poll for the result of a job.
@param rapi_client: RAPI client instance
@type job_id: number
@param job_id: Job ID
@type reporter: L{cli.JobPollReportCbBase}
@param reporter: PollJob reporter instance
"""
return cli.GenericPollJob(job_id, RapiJobPollCb(rapi_client), reporter)
|
atosatto/ansible | refs/heads/devel | lib/ansible/module_utils/urls.py | 11 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com>, 2015
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The match_hostname function and supporting code is under the terms and
# conditions of the Python Software Foundation License. They were taken from
# the Python3 standard library and adapted for use in Python2. See comments in the
# source for which code precisely is under this License. PSF License text
# follows:
#
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
# --------------------------------------------
#
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
# otherwise using this software ("Python") in source or binary form and
# its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
# analyze, test, perform and/or display publicly, prepare derivative works,
# distribute, and otherwise use Python alone or in any derivative version,
# provided, however, that PSF's License Agreement and PSF's notice of copyright,
# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013, 2014 Python Software Foundation; All Rights Reserved" are
# retained in Python alone or in any derivative version prepared by Licensee.
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python.
#
# 4. PSF is making Python available to Licensee on an "AS IS"
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between PSF and
# Licensee. This License Agreement does not grant permission to use PSF
# trademarks or trade name in a trademark sense to endorse or promote
# products or services of Licensee, or any third party.
#
# 8. By copying, installing or otherwise using Python, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
'''
The **urls** utils module offers a replacement for the urllib2 python library.
urllib2 is the python stdlib way to retrieve files from the Internet but it
lacks some security features (around verifying SSL certificates) that users
should care about in most situations. Using the functions in this module corrects
deficiencies in the urllib2 module wherever possible.
There are also third-party libraries (for instance, requests) which can be used
to replace urllib2 with a more secure library. However, all third party libraries
require that the library be installed on the managed machine. That is an extra step
for users making use of a module. If possible, avoid third party libraries by using
this code instead.
'''
import netrc
import os
import re
import sys
import socket
import platform
import tempfile
import base64
try:
import httplib
except ImportError:
# Python 3
import http.client as httplib
import ansible.module_utils.six.moves.urllib.request as urllib_request
import ansible.module_utils.six.moves.urllib.error as urllib_error
from ansible.module_utils.basic import get_distribution, get_exception
from ansible.module_utils.six import b
from ansible.module_utils._text import to_bytes, to_native, to_text
try:
# python3
import urllib.request as urllib_request
from urllib.request import AbstractHTTPHandler
except ImportError:
# python2
import urllib2 as urllib_request
from urllib2 import AbstractHTTPHandler
try:
from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
HAS_URLPARSE = True
except:
HAS_URLPARSE = False
try:
import ssl
HAS_SSL = True
except:
HAS_SSL = False
try:
# SNI Handling needs python2.7.9's SSLContext
from ssl import create_default_context, SSLContext
HAS_SSLCONTEXT = True
except ImportError:
HAS_SSLCONTEXT = False
# SNI Handling for python < 2.7.9 with urllib3 support
try:
# urllib3>=1.15
HAS_URLLIB3_SSL_WRAP_SOCKET = False
try:
from urllib3.contrib.pyopenssl import PyOpenSSLContext
except ImportError:
from requests.packages.urllib3.contrib.pyopenssl import PyOpenSSLContext
HAS_URLLIB3_PYOPENSSLCONTEXT = True
except ImportError:
# urllib3<1.15,>=1.6
HAS_URLLIB3_PYOPENSSLCONTEXT = False
try:
try:
from urllib3.contrib.pyopenssl import ssl_wrap_socket
except ImportError:
from requests.packages.urllib3.contrib.pyopenssl import ssl_wrap_socket
HAS_URLLIB3_SSL_WRAP_SOCKET = True
except ImportError:
pass
# Select a protocol that includes all secure tls protocols
# Exclude insecure ssl protocols if possible
if HAS_SSL:
# If we can't find extra tls methods, ssl.PROTOCOL_TLSv1 is sufficient
PROTOCOL = ssl.PROTOCOL_TLSv1
if not HAS_SSLCONTEXT and HAS_SSL:
try:
import ctypes
import ctypes.util
except ImportError:
# python 2.4 (likely rhel5 which doesn't have tls1.1 support in its openssl)
pass
else:
libssl_name = ctypes.util.find_library('ssl')
libssl = ctypes.CDLL(libssl_name)
for method in ('TLSv1_1_method', 'TLSv1_2_method'):
try:
libssl[method]
# Found something - we'll let openssl autonegotiate and hope
# the server has disabled sslv2 and 3. best we can do.
PROTOCOL = ssl.PROTOCOL_SSLv23
break
except AttributeError:
pass
del libssl
LOADED_VERIFY_LOCATIONS = set()
HAS_MATCH_HOSTNAME = True
try:
from ssl import match_hostname, CertificateError
except ImportError:
try:
from backports.ssl_match_hostname import match_hostname, CertificateError
except ImportError:
HAS_MATCH_HOSTNAME = False
if not HAS_MATCH_HOSTNAME:
###
### The following block of code is under the terms and conditions of the
### Python Software Foundation License
###
"""The match_hostname() function from Python 3.4, essential when using SSL."""
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
###
### End of Python Software Foundation Licensed code
###
HAS_MATCH_HOSTNAME = True
# This is a dummy cacert provided for Mac OS since you need at least 1
# ca cert, regardless of validity, for Python on Mac OS to use the
# keychain functionality in OpenSSL for validating SSL certificates.
# See: http://mercurial.selenic.com/wiki/CACertificates#Mac_OS_X_10.6_and_higher
b_DUMMY_CA_CERT = b("""-----BEGIN CERTIFICATE-----
MIICvDCCAiWgAwIBAgIJAO8E12S7/qEpMA0GCSqGSIb3DQEBBQUAMEkxCzAJBgNV
BAYTAlVTMRcwFQYDVQQIEw5Ob3J0aCBDYXJvbGluYTEPMA0GA1UEBxMGRHVyaGFt
MRAwDgYDVQQKEwdBbnNpYmxlMB4XDTE0MDMxODIyMDAyMloXDTI0MDMxNTIyMDAy
MlowSTELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYD
VQQHEwZEdXJoYW0xEDAOBgNVBAoTB0Fuc2libGUwgZ8wDQYJKoZIhvcNAQEBBQAD
gY0AMIGJAoGBANtvpPq3IlNlRbCHhZAcP6WCzhc5RbsDqyh1zrkmLi0GwcQ3z/r9
gaWfQBYhHpobK2Tiq11TfraHeNB3/VfNImjZcGpN8Fl3MWwu7LfVkJy3gNNnxkA1
4Go0/LmIvRFHhbzgfuo9NFgjPmmab9eqXJceqZIlz2C8xA7EeG7ku0+vAgMBAAGj
gaswgagwHQYDVR0OBBYEFPnN1nPRqNDXGlCqCvdZchRNi/FaMHkGA1UdIwRyMHCA
FPnN1nPRqNDXGlCqCvdZchRNi/FaoU2kSzBJMQswCQYDVQQGEwJVUzEXMBUGA1UE
CBMOTm9ydGggQ2Fyb2xpbmExDzANBgNVBAcTBkR1cmhhbTEQMA4GA1UEChMHQW5z
aWJsZYIJAO8E12S7/qEpMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEA
MUB80IR6knq9K/tY+hvPsZer6eFMzO3JGkRFBh2kn6JdMDnhYGX7AXVHGflrwNQH
qFy+aenWXsC0ZvrikFxbQnX8GVtDADtVznxOi7XzFw7JOxdsVrpXgSN0eh0aMzvV
zKPZsZ2miVGclicJHzm5q080b1p/sZtuKIEZk6vZqEg=
-----END CERTIFICATE-----
""")
#
# Exceptions
#
class ConnectionError(Exception):
"""Failed to connect to the server"""
pass
class ProxyError(ConnectionError):
"""Failure to connect because of a proxy"""
pass
class SSLValidationError(ConnectionError):
"""Failure to connect due to SSL validation failing"""
pass
class NoSSLError(SSLValidationError):
"""Needed to connect to an HTTPS url but no ssl library available to verify the certificate"""
pass
# Some environments (Google Compute Engine's CoreOS deploys) do not compile
# against openssl and thus do not have any HTTPS support.
CustomHTTPSConnection = CustomHTTPSHandler = None
if hasattr(httplib, 'HTTPSConnection') and hasattr(urllib_request, 'HTTPSHandler'):
class CustomHTTPSConnection(httplib.HTTPSConnection):
def __init__(self, *args, **kwargs):
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
self.context = None
if HAS_SSLCONTEXT:
self.context = create_default_context()
elif HAS_URLLIB3_PYOPENSSLCONTEXT:
self.context = PyOpenSSLContext(PROTOCOL)
if self.context and self.cert_file:
self.context.load_cert_chain(self.cert_file, self.key_file)
def connect(self):
"Connect to a host on a given (SSL) port."
if hasattr(self, 'source_address'):
sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address)
else:
sock = socket.create_connection((self.host, self.port), self.timeout)
server_hostname = self.host
# Note: self._tunnel_host is not available on py < 2.6 but this code
# isn't used on py < 2.6 (lack of create_connection)
if self._tunnel_host:
self.sock = sock
self._tunnel()
server_hostname = self._tunnel_host
if HAS_SSLCONTEXT or HAS_URLLIB3_PYOPENSSLCONTEXT:
self.sock = self.context.wrap_socket(sock, server_hostname=server_hostname)
elif HAS_URLLIB3_SSL_WRAP_SOCKET:
self.sock = ssl_wrap_socket(sock, keyfile=self.key_file, cert_reqs=ssl.CERT_NONE, certfile=self.cert_file, ssl_version=PROTOCOL,
server_hostname=server_hostname)
else:
self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=PROTOCOL)
class CustomHTTPSHandler(urllib_request.HTTPSHandler):
def https_open(self, req):
return self.do_open(CustomHTTPSConnection, req)
https_request = AbstractHTTPHandler.do_request_
class HTTPSClientAuthHandler(urllib_request.HTTPSHandler):
'''Handles client authentication via cert/key
This is a fairly lightweight extension on HTTPSHandler, and can be used
in place of HTTPSHandler
'''
def __init__(self, client_cert=None, client_key=None, **kwargs):
urllib_request.HTTPSHandler.__init__(self, **kwargs)
self.client_cert = client_cert
self.client_key = client_key
def https_open(self, req):
return self.do_open(self._build_https_connection, req)
def _build_https_connection(self, host, **kwargs):
kwargs.update({
'cert_file': self.client_cert,
'key_file': self.client_key,
})
try:
kwargs['context'] = self._context
except AttributeError:
pass
return httplib.HTTPSConnection(host, **kwargs)
def generic_urlparse(parts):
'''
Returns a dictionary of url parts as parsed by urlparse,
but accounts for the fact that older versions of that
library do not support named attributes (ie. .netloc)
'''
generic_parts = dict()
if hasattr(parts, 'netloc'):
# urlparse is newer, just read the fields straight
# from the parts object
generic_parts['scheme'] = parts.scheme
generic_parts['netloc'] = parts.netloc
generic_parts['path'] = parts.path
generic_parts['params'] = parts.params
generic_parts['query'] = parts.query
generic_parts['fragment'] = parts.fragment
generic_parts['username'] = parts.username
generic_parts['password'] = parts.password
generic_parts['hostname'] = parts.hostname
generic_parts['port'] = parts.port
else:
# we have to use indexes, and then parse out
# the other parts not supported by indexing
generic_parts['scheme'] = parts[0]
generic_parts['netloc'] = parts[1]
generic_parts['path'] = parts[2]
generic_parts['params'] = parts[3]
generic_parts['query'] = parts[4]
generic_parts['fragment'] = parts[5]
# get the username, password, etc.
try:
netloc_re = re.compile(r'^((?:\w)+(?::(?:\w)+)?@)?([A-Za-z0-9.-]+)(:\d+)?$')
match = netloc_re.match(parts[1])
auth = match.group(1)
hostname = match.group(2)
port = match.group(3)
if port:
# the capture group for the port will include the ':',
# so remove it and convert the port to an integer
port = int(port[1:])
if auth:
# the capture group above inclues the @, so remove it
# and then split it up based on the first ':' found
auth = auth[:-1]
username, password = auth.split(':', 1)
else:
username = password = None
generic_parts['username'] = username
generic_parts['password'] = password
generic_parts['hostname'] = hostname
generic_parts['port'] = port
except:
generic_parts['username'] = None
generic_parts['password'] = None
generic_parts['hostname'] = parts[1]
generic_parts['port'] = None
return generic_parts
class RequestWithMethod(urllib_request.Request):
'''
Workaround for using DELETE/PUT/etc with urllib2
Originally contained in library/net_infrastructure/dnsmadeeasy
'''
def __init__(self, url, method, data=None, headers=None):
if headers is None:
headers = {}
self._method = method.upper()
urllib_request.Request.__init__(self, url, data, headers)
def get_method(self):
if self._method:
return self._method
else:
return urllib_request.Request.get_method(self)
def RedirectHandlerFactory(follow_redirects=None, validate_certs=True):
"""This is a class factory that closes over the value of
``follow_redirects`` so that the RedirectHandler class has access to
that value without having to use globals, and potentially cause problems
where ``open_url`` or ``fetch_url`` are used multiple times in a module.
"""
class RedirectHandler(urllib_request.HTTPRedirectHandler):
"""This is an implementation of a RedirectHandler to match the
functionality provided by httplib2. It will utilize the value of
``follow_redirects`` that is passed into ``RedirectHandlerFactory``
to determine how redirects should be handled in urllib2.
"""
def redirect_request(self, req, fp, code, msg, hdrs, newurl):
handler = maybe_add_ssl_handler(newurl, validate_certs)
if handler:
urllib_request._opener.add_handler(handler)
if follow_redirects == 'urllib2':
return urllib_request.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, hdrs, newurl)
elif follow_redirects in ['no', 'none', False]:
raise urllib_error.HTTPError(newurl, code, msg, hdrs, fp)
do_redirect = False
if follow_redirects in ['all', 'yes', True]:
do_redirect = (code >= 300 and code < 400)
elif follow_redirects == 'safe':
m = req.get_method()
do_redirect = (code >= 300 and code < 400 and m in ('GET', 'HEAD'))
if do_redirect:
# be conciliant with URIs containing a space
newurl = newurl.replace(' ', '%20')
newheaders = dict((k,v) for k,v in req.headers.items()
if k.lower() not in ("content-length", "content-type")
)
try:
# Python 2-3.3
origin_req_host = req.get_origin_req_host()
except AttributeError:
# Python 3.4+
origin_req_host = req.origin_req_host
return urllib_request.Request(newurl,
headers=newheaders,
origin_req_host=origin_req_host,
unverifiable=True)
else:
raise urllib_error.HTTPError(req.get_full_url(), code, msg, hdrs, fp)
return RedirectHandler
def build_ssl_validation_error(hostname, port, paths, exc=None):
'''Inteligently build out the SSLValidationError based on what support
you have installed
'''
msg = [
('Failed to validate the SSL certificate for %s:%s.'
' Make sure your managed systems have a valid CA'
' certificate installed.')
]
if not HAS_SSLCONTEXT:
msg.append('If the website serving the url uses SNI you need'
' python >= 2.7.9 on your managed machine')
if not HAS_URLLIB3_PYOPENSSLCONTEXT or not HAS_URLLIB3_SSL_WRAP_SOCKET:
msg.append('or you can install the `urllib3`, `pyOpenSSL`,'
' `ndg-httpsclient`, and `pyasn1` python modules')
msg.append('to perform SNI verification in python >= 2.6.')
msg.append('You can use validate_certs=False if you do'
' not need to confirm the servers identity but this is'
' unsafe and not recommended.'
' Paths checked for this platform: %s.')
if exc:
msg.append('The exception msg was: %s.' % to_native(exc))
raise SSLValidationError(' '.join(msg) % (hostname, port, ", ".join(paths)))
class SSLValidationHandler(urllib_request.BaseHandler):
'''
A custom handler class for SSL validation.
Based on:
http://stackoverflow.com/questions/1087227/validate-ssl-certificates-with-python
http://techknack.net/python-urllib2-handlers/
'''
CONNECT_COMMAND = "CONNECT %s:%s HTTP/1.0\r\nConnection: close\r\n"
def __init__(self, hostname, port):
self.hostname = hostname
self.port = port
def get_ca_certs(self):
# tries to find a valid CA cert in one of the
# standard locations for the current distribution
ca_certs = []
paths_checked = []
system = to_text(platform.system(), errors='surrogate_or_strict')
# build a list of paths to check for .crt/.pem files
# based on the platform type
paths_checked.append('/etc/ssl/certs')
if system == u'Linux':
paths_checked.append('/etc/pki/ca-trust/extracted/pem')
paths_checked.append('/etc/pki/tls/certs')
paths_checked.append('/usr/share/ca-certificates/cacert.org')
elif system == u'FreeBSD':
paths_checked.append('/usr/local/share/certs')
elif system == u'OpenBSD':
paths_checked.append('/etc/ssl')
elif system == u'NetBSD':
ca_certs.append('/etc/openssl/certs')
elif system == u'SunOS':
paths_checked.append('/opt/local/etc/openssl/certs')
# fall back to a user-deployed cert in a standard
# location if the OS platform one is not available
paths_checked.append('/etc/ansible')
tmp_fd, tmp_path = tempfile.mkstemp()
to_add_fd, to_add_path = tempfile.mkstemp()
to_add = False
# Write the dummy ca cert if we are running on Mac OS X
if system == u'Darwin':
os.write(tmp_fd, b_DUMMY_CA_CERT)
# Default Homebrew path for OpenSSL certs
paths_checked.append('/usr/local/etc/openssl')
# for all of the paths, find any .crt or .pem files
# and compile them into single temp file for use
# in the ssl check to speed up the test
for path in paths_checked:
if os.path.exists(path) and os.path.isdir(path):
dir_contents = os.listdir(path)
for f in dir_contents:
full_path = os.path.join(path, f)
if os.path.isfile(full_path) and os.path.splitext(f)[1] in ('.crt','.pem'):
try:
cert_file = open(full_path, 'rb')
cert = cert_file.read()
cert_file.close()
os.write(tmp_fd, cert)
os.write(tmp_fd, b('\n'))
if full_path not in LOADED_VERIFY_LOCATIONS:
to_add = True
os.write(to_add_fd, cert)
os.write(to_add_fd, b('\n'))
LOADED_VERIFY_LOCATIONS.add(full_path)
except (OSError, IOError):
pass
if not to_add:
to_add_path = None
return (tmp_path, to_add_path, paths_checked)
def validate_proxy_response(self, response, valid_codes=[200]):
'''
make sure we get back a valid code from the proxy
'''
try:
(http_version, resp_code, msg) = re.match(r'(HTTP/\d\.\d) (\d\d\d) (.*)', response).groups()
if int(resp_code) not in valid_codes:
raise Exception
except:
raise ProxyError('Connection to proxy failed')
def detect_no_proxy(self, url):
'''
Detect if the 'no_proxy' environment variable is set and honor those locations.
'''
env_no_proxy = os.environ.get('no_proxy')
if env_no_proxy:
env_no_proxy = env_no_proxy.split(',')
netloc = urlparse(url).netloc
for host in env_no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# Our requested URL matches something in no_proxy, so don't
# use the proxy for this
return False
return True
def _make_context(self, to_add_ca_cert_path):
if HAS_URLLIB3_PYOPENSSLCONTEXT:
context = PyOpenSSLContext(PROTOCOL)
else:
context = create_default_context()
if to_add_ca_cert_path:
context.load_verify_locations(to_add_ca_cert_path)
return context
def http_request(self, req):
tmp_ca_cert_path, to_add_ca_cert_path, paths_checked = self.get_ca_certs()
https_proxy = os.environ.get('https_proxy')
context = None
if HAS_SSLCONTEXT or HAS_URLLIB3_PYOPENSSLCONTEXT:
context = self._make_context(to_add_ca_cert_path)
# Detect if 'no_proxy' environment variable is set and if our URL is included
use_proxy = self.detect_no_proxy(req.get_full_url())
if not use_proxy:
# ignore proxy settings for this host request
return req
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if https_proxy:
proxy_parts = generic_urlparse(urlparse(https_proxy))
port = proxy_parts.get('port') or 443
s.connect((proxy_parts.get('hostname'), port))
if proxy_parts.get('scheme') == 'http':
s.sendall(self.CONNECT_COMMAND % (self.hostname, self.port))
if proxy_parts.get('username'):
credentials = "%s:%s" % (proxy_parts.get('username',''), proxy_parts.get('password',''))
s.sendall(b('Proxy-Authorization: Basic %s\r\n') % base64.b64encode(to_bytes(credentials, errors='surrogate_or_strict')).strip())
s.sendall(b('\r\n'))
connect_result = b("")
while connect_result.find(b("\r\n\r\n")) <= 0:
connect_result += s.recv(4096)
# 128 kilobytes of headers should be enough for everyone.
if len(connect_result) > 131072:
raise ProxyError('Proxy sent too verbose headers. Only 128KiB allowed.')
self.validate_proxy_response(connect_result)
if context:
ssl_s = context.wrap_socket(s, server_hostname=self.hostname)
elif HAS_URLLIB3_SSL_WRAP_SOCKET:
ssl_s = ssl_wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL, server_hostname=self.hostname)
else:
ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL)
match_hostname(ssl_s.getpeercert(), self.hostname)
else:
raise ProxyError('Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme'))
else:
s.connect((self.hostname, self.port))
if context:
ssl_s = context.wrap_socket(s, server_hostname=self.hostname)
elif HAS_URLLIB3_SSL_WRAP_SOCKET:
ssl_s = ssl_wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL, server_hostname=self.hostname)
else:
ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL)
match_hostname(ssl_s.getpeercert(), self.hostname)
# close the ssl connection
#ssl_s.unwrap()
s.close()
except (ssl.SSLError, CertificateError):
e = get_exception()
build_ssl_validation_error(self.hostname, self.port, paths_checked, e)
except socket.error:
e = get_exception()
raise ConnectionError('Failed to connect to %s at port %s: %s' % (self.hostname, self.port, to_native(e)))
try:
# cleanup the temp file created, don't worry
# if it fails for some reason
os.remove(tmp_ca_cert_path)
except:
pass
try:
# cleanup the temp file created, don't worry
# if it fails for some reason
if to_add_ca_cert_path:
os.remove(to_add_ca_cert_path)
except:
pass
return req
https_request = http_request
def maybe_add_ssl_handler(url, validate_certs):
# FIXME: change the following to use the generic_urlparse function
# to remove the indexed references for 'parsed'
parsed = urlparse(url)
if parsed[0] == 'https' and validate_certs:
if not HAS_SSL:
raise NoSSLError('SSL validation is not available in your version of python. You can use validate_certs=False,'
' however this is unsafe and not recommended')
# do the cert validation
netloc = parsed[1]
if '@' in netloc:
netloc = netloc.split('@', 1)[1]
if ':' in netloc:
hostname, port = netloc.split(':', 1)
port = int(port)
else:
hostname = netloc
port = 443
# create the SSL validation handler and
# add it to the list of handlers
return SSLValidationHandler(hostname, port)
def open_url(url, data=None, headers=None, method=None, use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None,
force_basic_auth=False, follow_redirects='urllib2',
client_cert=None, client_key=None):
'''
Sends a request via HTTP(S) or FTP using urllib2 (Python2) or urllib (Python3)
Does not require the module environment
'''
handlers = []
ssl_handler = maybe_add_ssl_handler(url, validate_certs)
if ssl_handler:
handlers.append(ssl_handler)
# FIXME: change the following to use the generic_urlparse function
# to remove the indexed references for 'parsed'
parsed = urlparse(url)
if parsed[0] != 'ftp':
username = url_username
if headers is None:
headers = {}
if username:
password = url_password
netloc = parsed[1]
elif '@' in parsed[1]:
credentials, netloc = parsed[1].split('@', 1)
if ':' in credentials:
username, password = credentials.split(':', 1)
else:
username = credentials
password = ''
parsed = list(parsed)
parsed[1] = netloc
# reconstruct url without credentials
url = urlunparse(parsed)
if username and not force_basic_auth:
passman = urllib_request.HTTPPasswordMgrWithDefaultRealm()
# this creates a password manager
passman.add_password(None, netloc, username, password)
# because we have put None at the start it will always
# use this username/password combination for urls
# for which `theurl` is a super-url
authhandler = urllib_request.HTTPBasicAuthHandler(passman)
digest_authhandler = urllib_request.HTTPDigestAuthHandler(passman)
# create the AuthHandler
handlers.append(authhandler)
handlers.append(digest_authhandler)
elif username and force_basic_auth:
headers["Authorization"] = basic_auth_header(username, password)
else:
try:
rc = netrc.netrc(os.environ.get('NETRC'))
login = rc.authenticators(parsed[1])
except IOError:
login = None
if login:
username, _, password = login
if username and password:
headers["Authorization"] = basic_auth_header(username, password)
if not use_proxy:
proxyhandler = urllib_request.ProxyHandler({})
handlers.append(proxyhandler)
if HAS_SSLCONTEXT and not validate_certs:
# In 2.7.9, the default context validates certificates
context = SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
context.options |= ssl.OP_NO_SSLv3
context.verify_mode = ssl.CERT_NONE
context.check_hostname = False
handlers.append(HTTPSClientAuthHandler(client_cert=client_cert,
client_key=client_key,
context=context))
elif client_cert:
handlers.append(HTTPSClientAuthHandler(client_cert=client_cert,
client_key=client_key))
# pre-2.6 versions of python cannot use the custom https
# handler, since the socket class is lacking create_connection.
# Some python builds lack HTTPS support.
if hasattr(socket, 'create_connection') and CustomHTTPSHandler:
handlers.append(CustomHTTPSHandler)
handlers.append(RedirectHandlerFactory(follow_redirects, validate_certs))
opener = urllib_request.build_opener(*handlers)
urllib_request.install_opener(opener)
data = to_bytes(data, nonstring='passthru')
if method:
if method.upper() not in ('OPTIONS','GET','HEAD','POST','PUT','DELETE','TRACE','CONNECT','PATCH'):
raise ConnectionError('invalid HTTP request method; %s' % method.upper())
request = RequestWithMethod(url, method.upper(), data)
else:
request = urllib_request.Request(url, data)
# add the custom agent header, to help prevent issues
# with sites that block the default urllib agent string
if http_agent:
request.add_header('User-agent', http_agent)
# Cache control
# Either we directly force a cache refresh
if force:
request.add_header('cache-control', 'no-cache')
# or we do it if the original is more recent than our copy
elif last_mod_time:
tstamp = last_mod_time.strftime('%a, %d %b %Y %H:%M:%S +0000')
request.add_header('If-Modified-Since', tstamp)
# user defined headers now, which may override things we've set above
if headers:
if not isinstance(headers, dict):
raise ValueError("headers provided to fetch_url() must be a dict")
for header in headers:
request.add_header(header, headers[header])
urlopen_args = [request, None]
if sys.version_info >= (2,6,0):
# urlopen in python prior to 2.6.0 did not
# have a timeout parameter
urlopen_args.append(timeout)
r = urllib_request.urlopen(*urlopen_args)
return r
#
# Module-related functions
#
def basic_auth_header(username, password):
"""Takes a username and password and returns a byte string suitable for
using as value of an Authorization header to do basic auth.
"""
return b("Basic %s") % base64.b64encode(to_bytes("%s:%s" % (username, password), errors='surrogate_or_strict'))
def url_argument_spec():
'''
Creates an argument spec that can be used with any module
that will be requesting content via urllib/urllib2
'''
return dict(
url=dict(),
force=dict(default='no', aliases=['thirsty'], type='bool'),
http_agent=dict(default='ansible-httpget'),
use_proxy=dict(default='yes', type='bool'),
validate_certs=dict(default='yes', type='bool'),
url_username=dict(required=False),
url_password=dict(required=False, no_log=True),
force_basic_auth=dict(required=False, type='bool', default='no'),
client_cert=dict(required=False, type='path', default=None),
client_key=dict(required=False, type='path', default=None),
)
def fetch_url(module, url, data=None, headers=None, method=None,
use_proxy=True, force=False, last_mod_time=None, timeout=10):
'''Sends a request via HTTP(S) or FTP (needs the module as parameter)
:arg module: The AnsibleModule (used to get username, password etc. (s.b.).
:arg url: The url to use.
:kwarg data: The data to be sent (in case of POST/PUT).
:kwarg headers: A dict with the request headers.
:kwarg method: "POST", "PUT", etc.
:kwarg boolean use_proxy: Default: True
:kwarg boolean force: If True: Do not get a cached copy (Default: False)
:kwarg last_mod_time: Default: None
:kwarg int timeout: Default: 10
:returns: A tuple of (**response**, **info**). Use ``response.body()`` to read the data.
The **info** contains the 'status' and other meta data. When a HttpError (status > 400)
occurred then ``info['body']`` contains the error response data::
Example::
data={...}
resp, info = fetch_url("http://example.com",
data=module.jsonify(data)
header={Content-type': 'application/json'},
method="POST")
status_code = info["status"]
body = resp.read()
if status_code >= 400 :
body = info['body']
'''
if not HAS_URLPARSE:
module.fail_json(msg='urlparse is not installed')
# Get validate_certs from the module params
validate_certs = module.params.get('validate_certs', True)
username = module.params.get('url_username', '')
password = module.params.get('url_password', '')
http_agent = module.params.get('http_agent', None)
force_basic_auth = module.params.get('force_basic_auth', '')
follow_redirects = module.params.get('follow_redirects', 'urllib2')
client_cert = module.params.get('client_cert')
client_key = module.params.get('client_key')
r = None
info = dict(url=url)
try:
r = open_url(url, data=data, headers=headers, method=method,
use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout,
validate_certs=validate_certs, url_username=username,
url_password=password, http_agent=http_agent, force_basic_auth=force_basic_auth,
follow_redirects=follow_redirects, client_cert=client_cert,
client_key=client_key)
info.update(r.info())
info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), url=r.geturl(), status=r.code))
except NoSSLError:
e = get_exception()
distribution = get_distribution()
if distribution is not None and distribution.lower() == 'redhat':
module.fail_json(msg='%s. You can also install python-ssl from EPEL' % str(e))
else:
module.fail_json(msg='%s' % str(e))
except (ConnectionError, ValueError):
e = get_exception()
module.fail_json(msg=str(e))
except urllib_error.HTTPError:
e = get_exception()
try:
body = e.read()
except AttributeError:
body = ''
# Try to add exception info to the output but don't fail if we can't
exc_info = e.info()
try:
info.update(dict(**e.info()))
except:
pass
info.update({'msg': str(e), 'body': body, 'status': e.code})
except urllib_error.URLError:
e = get_exception()
code = int(getattr(e, 'code', -1))
info.update(dict(msg="Request failed: %s" % str(e), status=code))
except socket.error:
e = get_exception()
info.update(dict(msg="Connection failure: %s" % str(e), status=-1))
except Exception:
e = get_exception()
info.update(dict(msg="An unknown error occurred: %s" % str(e), status=-1))
return r, info
|
francisar/rds_manager | refs/heads/master | aliyun/api/rest/Mkvstore20150301DeactivateInstanceRequest.py | 1 | '''
Created by auto_sdk on 2015.06.23
'''
from aliyun.api.base import RestApi
class Mkvstore20150301DeactivateInstanceRequest(RestApi):
def __init__(self,domain='m-kvstore.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.InstanceId = None
def getapiname(self):
return 'm-kvstore.aliyuncs.com.DeactivateInstance.2015-03-01'
|
hockeybuggy/sudoku_solver | refs/heads/master | python/tests/test_puzzle.py | 1 | import os
from unittest import TestCase
from ..sudoku_solver.puzzle import Puzzle
from ..sudoku_solver.puzzle import GroupFactory
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
class PuzzleTestCase(TestCase):
def setUp(self):
return Puzzle([str(x) for x in range(64)])
def test_puzzle(self):
pass
class GroupFactoryTestCase(TestCase):
def setUp(self):
fixture_file = os.path.join(BASE_DIR, "fixtures/group_factory.txt")
with open(fixture_file, "r") as r:
self.bgf = GroupFactory(list(r.readline()))
self.cgf = GroupFactory(list(r.readline()))
self.rgf = GroupFactory(list(r.readline()))
def assert_group(self, group, value):
self.assertIn(str(value), group)
for cell_num in range(9):
self.assertNotIn("0", group)
def test_boxs(self):
for group_num in range(1, 9):
self.assert_group(self.bgf.box(group_num), group_num)
# def test_cols(self):
# assert_group(self.c)
# def test_rows(self):
# assert_group(self.r_cells)
|
Azure/azure-sdk-for-python | refs/heads/sync-eng/common-js-nightly-docs-2-1768-ForTestPipeline | sdk/mixedreality/azure-mixedreality-authentication/tests/_constants.py | 1 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# Fake account details matching recordings.
MIXEDREALITY_ACCOUNT_DOMAIN="mixedreality.azure.com"
MIXEDREALITY_ACCOUNT_ID="68321d5a-7978-4ceb-b880-0f49751daae9"
MIXEDREALITY_ACCOUNT_KEY="NjgzMjFkNWEtNzk3OC00Y2ViLWI4ODAtMGY0OTc1MWRhYWU5" |
doismellburning/edx-platform | refs/heads/master | common/djangoapps/student/management/commands/set_superuser.py | 95 | """Management command to grant or revoke superuser access for one or more users"""
from optparse import make_option
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
"""Management command to grant or revoke superuser access for one or more users"""
option_list = BaseCommand.option_list + (
make_option('--unset',
action='store_true',
dest='unset',
default=False,
help='Set is_superuser to False instead of True'),
)
args = '<user|email> [user|email ...]>'
help = """
This command will set is_superuser to true for one or more users.
Lookup by username or email address, assumes usernames
do not look like email addresses.
"""
def handle(self, *args, **options):
if len(args) < 1:
raise CommandError('Usage is set_superuser {0}'.format(self.args))
for user in args:
try:
if '@' in user:
userobj = User.objects.get(email=user)
else:
userobj = User.objects.get(username=user)
if options['unset']:
userobj.is_superuser = False
else:
userobj.is_superuser = True
userobj.save()
except Exception as err: # pylint: disable=broad-except
print "Error modifying user with identifier {}: {}: {}".format(user, type(err).__name__, err.message)
print 'Success!'
|
iniju/ankidroid-triage | refs/heads/master | mapreduce/lib/pipeline/util.py | 43 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for use with the Google App Engine Pipeline API."""
__all__ = ["for_name", "is_generator_function"]
import inspect
import logging
def for_name(fq_name, recursive=False):
"""Find class/function/method specified by its fully qualified name.
Fully qualified can be specified as:
* <module_name>.<class_name>
* <module_name>.<function_name>
* <module_name>.<class_name>.<method_name> (an unbound method will be
returned in this case).
for_name works by doing __import__ for <module_name>, and looks for
<class_name>/<function_name> in module's __dict__/attrs. If fully qualified
name doesn't contain '.', the current module will be used.
Args:
fq_name: fully qualified name of something to find
Returns:
class object.
Raises:
ImportError: when specified module could not be loaded or the class
was not found in the module.
"""
fq_name = str(fq_name)
module_name = __name__
short_name = fq_name
if fq_name.rfind(".") >= 0:
(module_name, short_name) = (fq_name[:fq_name.rfind(".")],
fq_name[fq_name.rfind(".") + 1:])
try:
result = __import__(module_name, None, None, [short_name])
return result.__dict__[short_name]
except KeyError:
# If we're recursively inside a for_name() chain, then we want to raise
# this error as a key error so we can report the actual source of the
# problem. If we're *not* recursively being called, that means the
# module was found and the specific item could not be loaded, and thus
# we want to raise an ImportError directly.
if recursive:
raise
else:
raise ImportError("Could not find '%s' on path '%s'" % (
short_name, module_name))
except ImportError, e:
# module_name is not actually a module. Try for_name for it to figure
# out what's this.
try:
module = for_name(module_name, recursive=True)
if hasattr(module, short_name):
return getattr(module, short_name)
else:
# The module was found, but the function component is missing.
raise KeyError()
except KeyError:
raise ImportError("Could not find '%s' on path '%s'" % (
short_name, module_name))
except ImportError:
# This means recursive import attempts failed, thus we will raise the
# first ImportError we encountered, since it's likely the most accurate.
pass
# Raise the original import error that caused all of this, since it is
# likely the real cause of the overall problem.
raise
def is_generator_function(obj):
"""Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See isfunction.__doc__ for attributes listing.
Adapted from Python 2.6.
Args:
obj: an object to test.
Returns:
true if the object is generator function.
"""
CO_GENERATOR = 0x20
return bool(((inspect.isfunction(obj) or inspect.ismethod(obj)) and
obj.func_code.co_flags & CO_GENERATOR))
|
EmanueleCannizzaro/scons | refs/heads/master | test/TEX/PDFTEXCOM.py | 1 | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/TEX/PDFTEXCOM.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Test the ability to configure the $PDFTEXCOM construction variable.
"""
import TestSCons
_python_ = TestSCons._python_
_exe = TestSCons._exe
test = TestSCons.TestSCons()
test.write('mypdftex.py', r"""
import sys
outfile = open(sys.argv[1], 'wb')
infile = open(sys.argv[2], 'rb')
for l in [l for l in infile.readlines() if l != '/*tex*/\n']:
outfile.write(l)
sys.exit(0)
""")
test.write('SConstruct', """
env = Environment(TOOLS = ['pdftex'],
PDFTEXCOM = r'%(_python_)s mypdftex.py $TARGET $SOURCE')
env.PDF('test1')
""" % locals())
test.write('test1.tex', """\
test1.tex
/*tex*/
""")
test.run()
test.must_match('test1.pdf', "test1.tex\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
nattee/cafe-grader-web | refs/heads/master | lib/assets/Lib/reprlib.py | 19 | """Redo the builtin repr() (representation) but with limits on most sizes."""
__all__ = ["Repr", "repr", "recursive_repr"]
import builtins
from itertools import islice
try:
from _thread import get_ident
except ImportError:
from _dummy_thread import get_ident
def recursive_repr(fillvalue='...'):
'Decorator to make a repr function return fillvalue for a recursive call'
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
class Repr:
def __init__(self):
self.maxlevel = 6
self.maxtuple = 6
self.maxlist = 6
self.maxarray = 5
self.maxdict = 4
self.maxset = 6
self.maxfrozenset = 6
self.maxdeque = 6
self.maxstring = 30
self.maxlong = 40
self.maxother = 30
def repr(self, x):
return self.repr1(x, self.maxlevel)
def repr1(self, x, level):
typename = type(x).__name__
if ' ' in typename:
parts = typename.split()
typename = '_'.join(parts)
if hasattr(self, 'repr_' + typename):
return getattr(self, 'repr_' + typename)(x, level)
else:
return self.repr_instance(x, level)
def _repr_iterable(self, x, level, left, right, maxiter, trail=''):
n = len(x)
if level <= 0 and n:
s = '...'
else:
newlevel = level - 1
repr1 = self.repr1
pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)]
if n > maxiter: pieces.append('...')
s = ', '.join(pieces)
if n == 1 and trail: right = trail + right
return '%s%s%s' % (left, s, right)
def repr_tuple(self, x, level):
return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',')
def repr_list(self, x, level):
return self._repr_iterable(x, level, '[', ']', self.maxlist)
def repr_array(self, x, level):
header = "array('%s', [" % x.typecode
return self._repr_iterable(x, level, header, '])', self.maxarray)
def repr_set(self, x, level):
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'set([', '])', self.maxset)
def repr_frozenset(self, x, level):
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'frozenset([', '])',
self.maxfrozenset)
def repr_deque(self, x, level):
return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque)
def repr_dict(self, x, level):
n = len(x)
if n == 0: return '{}'
if level <= 0: return '{...}'
newlevel = level - 1
repr1 = self.repr1
pieces = []
for key in islice(_possibly_sorted(x), self.maxdict):
keyrepr = repr1(key, newlevel)
valrepr = repr1(x[key], newlevel)
pieces.append('%s: %s' % (keyrepr, valrepr))
if n > self.maxdict: pieces.append('...')
s = ', '.join(pieces)
return '{%s}' % (s,)
def repr_str(self, x, level):
s = builtins.repr(x[:self.maxstring])
if len(s) > self.maxstring:
i = max(0, (self.maxstring-3)//2)
j = max(0, self.maxstring-3-i)
s = builtins.repr(x[:i] + x[len(x)-j:])
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_int(self, x, level):
s = builtins.repr(x) # XXX Hope this isn't too slow...
if len(s) > self.maxlong:
i = max(0, (self.maxlong-3)//2)
j = max(0, self.maxlong-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_instance(self, x, level):
try:
s = builtins.repr(x)
# Bugs in x.__repr__() can cause arbitrary
# exceptions -- then make up something
except Exception:
return '<%s instance at %x>' % (x.__class__.__name__, id(x))
if len(s) > self.maxother:
i = max(0, (self.maxother-3)//2)
j = max(0, self.maxother-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def _possibly_sorted(x):
# Since not all sequences of items can be sorted and comparison
# functions may raise arbitrary exceptions, return an unsorted
# sequence in that case.
try:
return sorted(x)
except Exception:
return list(x)
aRepr = Repr()
repr = aRepr.repr
|
RevelSystems/django | refs/heads/master | docs/_ext/literals_to_xrefs.py | 42 | """
Runs through a reST file looking for old-style literals, and helps replace them
with new-style references.
"""
import re
import shelve
import sys
refre = re.compile(r'``([^`\s]+?)``')
ROLES = (
'attr',
'class',
"djadmin",
'data',
'exc',
'file',
'func',
'lookup',
'meth',
'mod',
"djadminopt",
"ref",
"setting",
"term",
"tfilter",
"ttag",
# special
"skip"
)
ALWAYS_SKIP = [
"NULL",
"True",
"False",
]
def fixliterals(fname):
with open(fname) as fp:
data = fp.read()
last = 0
new = []
storage = shelve.open("/tmp/literals_to_xref.shelve")
lastvalues = storage.get("lastvalues", {})
for m in refre.finditer(data):
new.append(data[last:m.start()])
last = m.end()
line_start = data.rfind("\n", 0, m.start())
line_end = data.find("\n", m.end())
prev_start = data.rfind("\n", 0, line_start)
next_end = data.find("\n", line_end + 1)
# Skip always-skip stuff
if m.group(1) in ALWAYS_SKIP:
new.append(m.group(0))
continue
# skip when the next line is a title
next_line = data[m.end():next_end].strip()
if next_line[0] in "!-/:-@[-`{-~" and all(c == next_line[0] for c in next_line):
new.append(m.group(0))
continue
sys.stdout.write("\n" + "-" * 80 + "\n")
sys.stdout.write(data[prev_start + 1:m.start()])
sys.stdout.write(colorize(m.group(0), fg="red"))
sys.stdout.write(data[m.end():next_end])
sys.stdout.write("\n\n")
replace_type = None
while replace_type is None:
replace_type = raw_input(
colorize("Replace role: ", fg="yellow")
).strip().lower()
if replace_type and replace_type not in ROLES:
replace_type = None
if replace_type == "":
new.append(m.group(0))
continue
if replace_type == "skip":
new.append(m.group(0))
ALWAYS_SKIP.append(m.group(1))
continue
default = lastvalues.get(m.group(1), m.group(1))
if default.endswith("()") and replace_type in ("class", "func", "meth"):
default = default[:-2]
replace_value = raw_input(
colorize("Text <target> [", fg="yellow") + default + colorize("]: ", fg="yellow")
).strip()
if not replace_value:
replace_value = default
new.append(":%s:`%s`" % (replace_type, replace_value))
lastvalues[m.group(1)] = replace_value
new.append(data[last:])
with open(fname, "w") as fp:
fp.write("".join(new))
storage["lastvalues"] = lastvalues
storage.close()
#
# The following is taken from django.utils.termcolors and is copied here to
# avoid the dependency.
#
def colorize(text='', opts=(), **kwargs):
"""
Returns your text, enclosed in ANSI graphics codes.
Depends on the keyword arguments 'fg' and 'bg', and the contents of
the opts tuple/list.
Returns the RESET code if no parameters are given.
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold'
'underscore'
'blink'
'reverse'
'conceal'
'noreset' - string will not be auto-terminated with the RESET code
Examples:
colorize('hello', fg='red', bg='blue', opts=('blink',))
colorize()
colorize('goodbye', opts=('underscore',))
print(colorize('first line', fg='red', opts=('noreset',)))
print('this should be red too')
print(colorize('and so should this'))
print('this should not be red')
"""
color_names = ('black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white')
foreground = {color_names[x]: '3%s' % x for x in range(8)}
background = {color_names[x]: '4%s' % x for x in range(8)}
RESET = '0'
opt_dict = {'bold': '1', 'underscore': '4', 'blink': '5', 'reverse': '7', 'conceal': '8'}
text = str(text)
code_list = []
if text == '' and len(opts) == 1 and opts[0] == 'reset':
return '\x1b[%sm' % RESET
for k, v in kwargs.iteritems():
if k == 'fg':
code_list.append(foreground[v])
elif k == 'bg':
code_list.append(background[v])
for o in opts:
if o in opt_dict:
code_list.append(opt_dict[o])
if 'noreset' not in opts:
text = text + '\x1b[%sm' % RESET
return ('\x1b[%sm' % ';'.join(code_list)) + text
if __name__ == '__main__':
try:
fixliterals(sys.argv[1])
except (KeyboardInterrupt, SystemExit):
print('')
|
MalloyPower/parsing-python | refs/heads/master | front-end/testsuite-python-lib/Python-3.2/Lib/ctypes/test/test_repr.py | 170 | from ctypes import *
import unittest
subclasses = []
for base in [c_byte, c_short, c_int, c_long, c_longlong,
c_ubyte, c_ushort, c_uint, c_ulong, c_ulonglong,
c_float, c_double, c_longdouble, c_bool]:
class X(base):
pass
subclasses.append(X)
class X(c_char):
pass
# This test checks if the __repr__ is correct for subclasses of simple types
class ReprTest(unittest.TestCase):
def test_numbers(self):
for typ in subclasses:
base = typ.__bases__[0]
self.assertTrue(repr(base(42)).startswith(base.__name__))
self.assertEqual("<X object at", repr(typ(42))[:12])
def test_char(self):
self.assertEqual("c_char(b'x')", repr(c_char(b'x')))
self.assertEqual("<X object at", repr(X(b'x'))[:12])
if __name__ == "__main__":
unittest.main()
|
RDXT/django-guardian | refs/heads/devel | guardian/utils.py | 2 | """
django-guardian helper functions.
Functions defined within this module should be considered as django-guardian's
internal functionality. They are **not** guaranteed to be stable - which means
they actual input parameters/output type may change in future releases.
"""
from __future__ import unicode_literals
import os
import logging
from itertools import chain
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.models import AnonymousUser, Group
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied
from django.db.models import Model
from django.http import HttpResponseForbidden, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext, TemplateDoesNotExist
from django.utils.http import urlquote
from guardian.compat import get_user_model
from guardian.conf import settings as guardian_settings
from guardian.exceptions import NotUserNorGroup
from django.contrib.auth.views import redirect_to_login
import django
logger = logging.getLogger(__name__)
abspath = lambda *p: os.path.abspath(os.path.join(*p))
def get_anonymous_user():
"""
Returns ``User`` instance (not ``AnonymousUser``) depending on
``ANONYMOUS_USER_ID`` configuration.
"""
return get_user_model().objects.get(pk=guardian_settings.ANONYMOUS_USER_ID)
def get_identity(identity):
"""
Returns (user_obj, None) or (None, group_obj) tuple depending on what is
given. Also accepts AnonymousUser instance but would return ``User``
instead - it is convenient and needed for authorization backend to support
anonymous users.
:param identity: either ``User`` or ``Group`` instance
:raises ``NotUserNorGroup``: if cannot return proper identity instance
**Examples**::
>>> from django.contrib.auth.models import User
>>> user = User.objects.create(username='joe')
>>> get_identity(user)
(<User: joe>, None)
>>> group = Group.objects.create(name='users')
>>> get_identity(group)
(None, <Group: users>)
>>> anon = AnonymousUser()
>>> get_identity(anon)
(<User: AnonymousUser>, None)
>>> get_identity("not instance")
...
NotUserNorGroup: User/AnonymousUser or Group instance is required (got )
"""
if isinstance(identity, AnonymousUser):
identity = get_anonymous_user()
if isinstance(identity, get_user_model()):
return identity, None
elif isinstance(identity, Group):
return None, identity
raise NotUserNorGroup("User/AnonymousUser or Group instance is required "
"(got %s)" % identity)
def get_403_or_None(request, perms, obj=None, login_url=None,
redirect_field_name=None, return_403=False, accept_global_perms=False):
login_url = login_url or settings.LOGIN_URL
redirect_field_name = redirect_field_name or REDIRECT_FIELD_NAME
# Handles both original and with object provided permission check
# as ``obj`` defaults to None
has_permissions = False
# global perms check first (if accept_global_perms)
if accept_global_perms:
has_permissions = all(request.user.has_perm(perm) for perm in perms)
# if still no permission granted, try obj perms
if not has_permissions:
has_permissions = all(request.user.has_perm(perm, obj) for perm in perms)
if not has_permissions:
if return_403:
if guardian_settings.RENDER_403:
try:
response = render_to_response(
guardian_settings.TEMPLATE_403, {},
RequestContext(request))
response.status_code = 403
return response
except TemplateDoesNotExist as e:
if settings.DEBUG:
raise e
elif guardian_settings.RAISE_403:
raise PermissionDenied
return HttpResponseForbidden()
else:
return redirect_to_login(request.get_full_path(),
login_url,
redirect_field_name)
def clean_orphan_obj_perms():
"""
Seeks and removes all object permissions entries pointing at non-existing
targets.
Returns number of removed objects.
"""
from guardian.models import UserObjectPermission
from guardian.models import GroupObjectPermission
deleted = 0
# TODO: optimise
for perm in chain(UserObjectPermission.objects.all(),
GroupObjectPermission.objects.all()):
if perm.content_object is None:
logger.debug("Removing %s (pk=%d)" % (perm, perm.pk))
perm.delete()
deleted += 1
logger.info("Total removed orphan object permissions instances: %d" %
deleted)
return deleted
# TODO: should raise error when multiple UserObjectPermission direct relations
# are defined
def get_obj_perms_model(obj, base_cls, generic_cls):
if isinstance(obj, Model):
obj = obj.__class__
ctype = ContentType.objects.get_for_model(obj)
for attr in obj._meta.get_all_related_objects():
if django.VERSION < (1, 8):
model = getattr(attr, 'model', None)
else:
model = getattr(attr, 'related_model', None)
if (model and issubclass(model, base_cls) and
model is not generic_cls):
# if model is generic one it would be returned anyway
if not model.objects.is_generic():
# make sure that content_object's content_type is same as
# the one of given obj
fk = model._meta.get_field_by_name('content_object')[0]
if ctype == ContentType.objects.get_for_model(fk.rel.to):
return model
return generic_cls
def get_user_obj_perms_model(obj):
"""
Returns model class that connects given ``obj`` and User class.
"""
from guardian.models import UserObjectPermissionBase
from guardian.models import UserObjectPermission
return get_obj_perms_model(obj, UserObjectPermissionBase, UserObjectPermission)
def get_group_obj_perms_model(obj):
"""
Returns model class that connects given ``obj`` and Group class.
"""
from guardian.models import GroupObjectPermissionBase
from guardian.models import GroupObjectPermission
return get_obj_perms_model(obj, GroupObjectPermissionBase, GroupObjectPermission)
|
an7oine/WinVHS | refs/heads/master | Cygwin/lib/python2.7/multifile.py | 313 | """A readline()-style interface to the parts of a multipart message.
The MultiFile class makes each part of a multipart message "feel" like
an ordinary file, as long as you use fp.readline(). Allows recursive
use, for nested multipart messages. Probably best used together
with module mimetools.
Suggested use:
real_fp = open(...)
fp = MultiFile(real_fp)
"read some lines from fp"
fp.push(separator)
while 1:
"read lines from fp until it returns an empty string" (A)
if not fp.next(): break
fp.pop()
"read remaining lines from fp until it returns an empty string"
The latter sequence may be used recursively at (A).
It is also allowed to use multiple push()...pop() sequences.
If seekable is given as 0, the class code will not do the bookkeeping
it normally attempts in order to make seeks relative to the beginning of the
current file part. This may be useful when using MultiFile with a non-
seekable stream object.
"""
from warnings import warn
warn("the multifile module has been deprecated since Python 2.5",
DeprecationWarning, stacklevel=2)
del warn
__all__ = ["MultiFile","Error"]
class Error(Exception):
pass
class MultiFile:
seekable = 0
def __init__(self, fp, seekable=1):
self.fp = fp
self.stack = []
self.level = 0
self.last = 0
if seekable:
self.seekable = 1
self.start = self.fp.tell()
self.posstack = []
def tell(self):
if self.level > 0:
return self.lastpos
return self.fp.tell() - self.start
def seek(self, pos, whence=0):
here = self.tell()
if whence:
if whence == 1:
pos = pos + here
elif whence == 2:
if self.level > 0:
pos = pos + self.lastpos
else:
raise Error, "can't use whence=2 yet"
if not 0 <= pos <= here or \
self.level > 0 and pos > self.lastpos:
raise Error, 'bad MultiFile.seek() call'
self.fp.seek(pos + self.start)
self.level = 0
self.last = 0
def readline(self):
if self.level > 0:
return ''
line = self.fp.readline()
# Real EOF?
if not line:
self.level = len(self.stack)
self.last = (self.level > 0)
if self.last:
raise Error, 'sudden EOF in MultiFile.readline()'
return ''
assert self.level == 0
# Fast check to see if this is just data
if self.is_data(line):
return line
else:
# Ignore trailing whitespace on marker lines
marker = line.rstrip()
# No? OK, try to match a boundary.
# Return the line (unstripped) if we don't.
for i, sep in enumerate(reversed(self.stack)):
if marker == self.section_divider(sep):
self.last = 0
break
elif marker == self.end_marker(sep):
self.last = 1
break
else:
return line
# We only get here if we see a section divider or EOM line
if self.seekable:
self.lastpos = self.tell() - len(line)
self.level = i+1
if self.level > 1:
raise Error,'Missing endmarker in MultiFile.readline()'
return ''
def readlines(self):
list = []
while 1:
line = self.readline()
if not line: break
list.append(line)
return list
def read(self): # Note: no size argument -- read until EOF only!
return ''.join(self.readlines())
def next(self):
while self.readline(): pass
if self.level > 1 or self.last:
return 0
self.level = 0
self.last = 0
if self.seekable:
self.start = self.fp.tell()
return 1
def push(self, sep):
if self.level > 0:
raise Error, 'bad MultiFile.push() call'
self.stack.append(sep)
if self.seekable:
self.posstack.append(self.start)
self.start = self.fp.tell()
def pop(self):
if self.stack == []:
raise Error, 'bad MultiFile.pop() call'
if self.level <= 1:
self.last = 0
else:
abslastpos = self.lastpos + self.start
self.level = max(0, self.level - 1)
self.stack.pop()
if self.seekable:
self.start = self.posstack.pop()
if self.level > 0:
self.lastpos = abslastpos - self.start
def is_data(self, line):
return line[:2] != '--'
def section_divider(self, str):
return "--" + str
def end_marker(self, str):
return "--" + str + "--"
|
majidaldo/ansible | refs/heads/devel | v2/ansible/playbook/role/definition.py | 24 | # (c) 2014 Michael DeHaan, <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six import iteritems, string_types
import os
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.become import Become
from ansible.playbook.conditional import Conditional
from ansible.playbook.taggable import Taggable
from ansible.utils.path import unfrackpath
__all__ = ['RoleDefinition']
class RoleDefinition(Base, Become, Conditional, Taggable):
_role = FieldAttribute(isa='string')
def __init__(self, role_basedir=None):
self._role_path = None
self._role_basedir = role_basedir
self._role_params = dict()
super(RoleDefinition, self).__init__()
#def __repr__(self):
# return 'ROLEDEF: ' + self._attributes.get('role', '<no name set>')
@staticmethod
def load(data, variable_manager=None, loader=None):
raise AnsibleError("not implemented")
def preprocess_data(self, ds):
assert isinstance(ds, dict) or isinstance(ds, string_types)
if isinstance(ds, dict):
ds = super(RoleDefinition, self).preprocess_data(ds)
# we create a new data structure here, using the same
# object used internally by the YAML parsing code so we
# can preserve file:line:column information if it exists
new_ds = AnsibleMapping()
if isinstance(ds, AnsibleBaseYAMLObject):
new_ds.ansible_pos = ds.ansible_pos
# first we pull the role name out of the data structure,
# and then use that to determine the role path (which may
# result in a new role name, if it was a file path)
role_name = self._load_role_name(ds)
(role_name, role_path) = self._load_role_path(role_name)
# next, we split the role params out from the valid role
# attributes and update the new datastructure with that
# result and the role name
if isinstance(ds, dict):
(new_role_def, role_params) = self._split_role_params(ds)
new_ds.update(new_role_def)
self._role_params = role_params
# set the role name in the new ds
new_ds['role'] = role_name
# we store the role path internally
self._role_path = role_path
# save the original ds for use later
self._ds = ds
# and return the cleaned-up data structure
return new_ds
def _load_role_name(self, ds):
'''
Returns the role name (either the role: or name: field) from
the role definition, or (when the role definition is a simple
string), just that string
'''
if isinstance(ds, string_types):
return ds
role_name = ds.get('role', ds.get('name'))
if not role_name:
raise AnsibleError('role definitions must contain a role name', obj=ds)
return role_name
def _load_role_path(self, role_name):
'''
the 'role', as specified in the ds (or as a bare string), can either
be a simple name or a full path. If it is a full path, we use the
basename as the role name, otherwise we take the name as-given and
append it to the default role path
'''
role_path = unfrackpath(role_name)
if self._loader.path_exists(role_path):
role_name = os.path.basename(role_name)
return (role_name, role_path)
else:
# we always start the search for roles in the base directory of the playbook
role_search_paths = [os.path.join(self._loader.get_basedir(), 'roles'), './roles', './']
# also search in the configured roles path
if C.DEFAULT_ROLES_PATH:
configured_paths = C.DEFAULT_ROLES_PATH.split(os.pathsep)
role_search_paths.extend(configured_paths)
# finally, append the roles basedir, if it was set, so we can
# search relative to that directory for dependent roles
if self._role_basedir:
role_search_paths.append(self._role_basedir)
# now iterate through the possible paths and return the first one we find
for path in role_search_paths:
role_path = unfrackpath(os.path.join(path, role_name))
if self._loader.path_exists(role_path):
return (role_name, role_path)
# FIXME: make the parser smart about list/string entries in
# the yaml so the error line/file can be reported here
raise AnsibleError("the role '%s' was not found" % role_name)
def _split_role_params(self, ds):
'''
Splits any random role params off from the role spec and store
them in a dictionary of params for parsing later
'''
role_def = dict()
role_params = dict()
for (key, value) in iteritems(ds):
# use the list of FieldAttribute values to determine what is and is not
# an extra parameter for this role (or sub-class of this role)
if key not in [attr_name for (attr_name, attr_value) in self._get_base_attributes().iteritems()]:
# this key does not match a field attribute, so it must be a role param
role_params[key] = value
else:
# this is a field attribute, so copy it over directly
role_def[key] = value
return (role_def, role_params)
def get_role_params(self):
return self._role_params.copy()
def get_role_path(self):
return self._role_path
|
papados/ordersys | refs/heads/master | Lib/site-packages/django/contrib/sessions/backends/db.py | 101 | import logging
from django.contrib.sessions.backends.base import SessionBase, CreateError
from django.core.exceptions import SuspiciousOperation
from django.db import IntegrityError, transaction, router
from django.utils import timezone
from django.utils.encoding import force_text
class SessionStore(SessionBase):
"""
Implements database session store.
"""
def __init__(self, session_key=None):
super(SessionStore, self).__init__(session_key)
def load(self):
try:
s = Session.objects.get(
session_key=self.session_key,
expire_date__gt=timezone.now()
)
return self.decode(s.session_data)
except (Session.DoesNotExist, SuspiciousOperation) as e:
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
logger.warning(force_text(e))
self.create()
return {}
def exists(self, session_key):
return Session.objects.filter(session_key=session_key).exists()
def create(self):
while True:
self._session_key = self._get_new_session_key()
try:
# Save immediately to ensure we have a unique entry in the
# database.
self.save(must_create=True)
except CreateError:
# Key wasn't unique. Try again.
continue
self.modified = True
self._session_cache = {}
return
def save(self, must_create=False):
"""
Saves the current session data to the database. If 'must_create' is
True, a database error will be raised if the saving operation doesn't
create a *new* entry (as opposed to possibly updating an existing
entry).
"""
obj = Session(
session_key=self._get_or_create_session_key(),
session_data=self.encode(self._get_session(no_load=must_create)),
expire_date=self.get_expiry_date()
)
using = router.db_for_write(Session, instance=obj)
try:
with transaction.atomic(using=using):
obj.save(force_insert=must_create, using=using)
except IntegrityError:
if must_create:
raise CreateError
raise
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
try:
Session.objects.get(session_key=session_key).delete()
except Session.DoesNotExist:
pass
@classmethod
def clear_expired(cls):
Session.objects.filter(expire_date__lt=timezone.now()).delete()
# At bottom to avoid circular import
from django.contrib.sessions.models import Session
|
snowcloud/engine-groups | refs/heads/master | engine_groups/views.py | 1 | from django.conf import settings
from django.contrib.auth.decorators import login_required, user_passes_test
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import render_to_response
from django.template import RequestContext
from mongoengine.base import ValidationError
from mongoengine.queryset import OperationError, MultipleObjectsReturned, DoesNotExist
from pymongo.objectid import ObjectId
from engine_groups.models import Account
from forms import AccountForm, NewAccountForm
def get_one_or_404(**kwargs):
try:
object = Account.objects.get(**kwargs)
return object
except (MultipleObjectsReturned, ValidationError, DoesNotExist):
raise Http404
def index(request):
objects = Account.objects
return render_to_response('engine_groups/index.html',
RequestContext( request, { 'objects': objects }))
def detail(request, object_id, template_name='engine_groups/detail.html'):
group = get_one_or_404(id=object_id)
return render_to_response(
template_name,
{'object': group},
RequestContext(request)
)
@user_passes_test(lambda u: u.is_staff)
def edit(request, object_id, template_name='engine_groups/edit.html'):
object = get_one_or_404(id=object_id)
if request.method == 'POST':
form = AccountForm(request.POST, instance=object)
if form.is_valid():
g = form.save()
return HttpResponseRedirect(reverse('group', args=[object.id]))
else:
form = AccountForm(instance=object)
template_context = {'form': form, 'new': False}
return render_to_response(
template_name,
template_context,
RequestContext(request)
)
@user_passes_test(lambda u: u.is_staff)
def new(request, template_name='engine_groups/edit.html'):
if request.method == 'POST':
form = NewAccountForm(request.POST)
if form.is_valid():
g = form.save()
return HttpResponseRedirect(reverse('group', args=[g.id]))
else:
form = NewAccountForm()
template_context = {'form': form, 'new': True}
return render_to_response(
template_name,
template_context,
RequestContext(request)
)
|
liulion/mayavi | refs/heads/master | docs/source/mayavi/auto/protein.py | 4 | """
Visualize a protein graph structure downloaded from the protein database in
standard pdb format.
We parse the pdb file, but extract only a very small amount of
information: the type of atoms, their positions, and the links between them.
Most of the complexity of this example comes from the code turning the
PDB information into a list of 3D positions, with associated scalar
and connection information.
We assign a scalar value for the atoms to differenciate the different
types of atoms, but it does not correspond to the atomic mass. The size
and the color of the atom on the visualization is therefore not
chemicaly-significant.
The atoms are plotted using mlab.points3d, and connections between atoms
are added to the dataset, and visualized using a surface module.
The graph is created by adding connection information to points. For this, each
point is designated by its number (in the order of the array passed to
mlab.points3d), and the connection array, made of pairs of these numbers, is
constructed. There is some slightly tedious data manipulation to go from the
named-node graph representation as stored in the pdb file, to the index-based
connection pairs. A similar technique to plot the graph is done in the
:ref:`example_flight_graph`. Another example of graph plotting, showing a
different technique to plot the graph, can be seen on
:ref:`example_delaunay_graph`.
To visualize the local atomic density, we use a gaussian splatter filter
that builds a kernel density estimation of the continuous density field:
each point is convoluted by a Gaussian kernel, and the sum of these
Gaussians form the resulting density field. We visualize this field using
volume rendering.
Reference for the pdb file standard:
http://mmcif.pdb.org/dictionaries/pdb-correspondence/pdb2mmcif.html
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# The pdb code for the protein.
protein_code = '2q09'
# Retrieve the file from the protein database #################################
import os
if not os.path.exists('pdb%s.ent.gz' % protein_code):
# Download the data
import urllib
print 'Downloading protein data, please wait'
opener = urllib.urlopen(
'ftp://ftp.wwpdb.org/pub/pdb/data/structures/divided/pdb/q0/pdb%s.ent.gz'
% protein_code)
open('pdb%s.ent.gz' % protein_code, 'wb').write(opener.read())
# Parse the pdb file ##########################################################
import gzip
infile = gzip.GzipFile('pdb%s.ent.gz' % protein_code, 'rb')
# A graph represented by a dictionary associating nodes with keys
# (numbers), and edges (pairs of node keys).
nodes = dict()
edges = list()
atoms = set()
# Build the graph from the PDB information
last_atom_label = None
last_chain_label = None
for line in infile:
line = line.split()
if line[0] in ('ATOM', 'HETATM'):
nodes[line[1]] = (line[2], line[6], line[7], line[8])
atoms.add(line[2])
chain_label = line[5]
if chain_label == last_chain_label:
edges.append((line[1], last_atom_label))
last_atom_label = line[1]
last_chain_label = chain_label
elif line[0] == 'CONECT':
for start, stop in zip(line[1:-1], line[2:]):
edges.append((start, stop))
atoms = list(atoms)
atoms.sort()
atoms = dict(zip(atoms, range(len(atoms))))
# Turn the graph into 3D positions, and a connection list.
labels = dict()
x = list()
y = list()
z = list()
scalars = list()
for index, label in enumerate(nodes):
labels[label] = index
this_scalar, this_x, this_y, this_z = nodes[label]
scalars.append(atoms[this_scalar])
x.append(float(this_x))
y.append(float(this_y))
z.append(float(this_z))
connections = list()
for start, stop in edges:
connections.append((labels[start], labels[stop]))
import numpy as np
x = np.array(x)
y = np.array(y)
z = np.array(z)
scalars = np.array(scalars)
# Visualize the data ##########################################################
from mayavi import mlab
mlab.figure(1, bgcolor=(0, 0, 0))
mlab.clf()
pts = mlab.points3d(x, y, z, 1.5 * scalars.max() - scalars,
scale_factor=0.015, resolution=10)
pts.mlab_source.dataset.lines = np.array(connections)
# Use a tube fiter to plot tubes on the link, varying the radius with the
# scalar value
tube = mlab.pipeline.tube(pts, tube_radius=0.15)
tube.filter.radius_factor = 1.
tube.filter.vary_radius = 'vary_radius_by_scalar'
mlab.pipeline.surface(tube, color=(0.8, 0.8, 0))
# Visualize the local atomic density
mlab.pipeline.volume(mlab.pipeline.gaussian_splatter(pts))
mlab.view(49, 31.5, 52.8, (4.2, 37.3, 20.6))
mlab.show()
|
dawnpower/nova | refs/heads/master | nova/db/sqlalchemy/migrate_repo/versions/258_placeholder.py | 200 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Juno backports.
# Do not use this number for new Kilo work. New Kilo work starts after
# all the placeholders.
#
# See blueprint backportable-db-migrations-kilo
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass
|
tectronics/photivo | refs/heads/master | scons-local-2.2.0/SCons/Options/PackageOption.py | 14 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Options/PackageOption.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
__doc__ = """Place-holder for the old SCons.Options module hierarchy
This is for backwards compatibility. The new equivalent is the Variables/
class hierarchy. These will have deprecation warnings added (some day),
and will then be removed entirely (some day).
"""
import SCons.Variables
import SCons.Warnings
warned = False
def PackageOption(*args, **kw):
global warned
if not warned:
msg = "The PackageOption() function is deprecated; use the PackageVariable() function instead."
SCons.Warnings.warn(SCons.Warnings.DeprecatedOptionsWarning, msg)
warned = True
return SCons.Variables.PackageVariable(*args, **kw)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
lixiangning888/whole_project | refs/heads/master | modules/signatures_orginal_20151110/martians_ie.py | 1 | # Copyright (C) 2015 Will Metcalf (william.metcalf@gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
try:
import re2 as re
except ImportError:
import re
from lib.cuckoo.common.abstracts import Signature
class MartiansIE(Signature):
name = "ie_martian_children"
description = "Martian Subprocess Started By IE"
severity = 3
categories = ["martians"]
authors = ["Will Metcalf"]
minimum = "0.5"
def go_deeper(self, pdict, result=None):
if result is None:
result = []
result.append(pdict["module_path"].lower())
for e in pdict["children"]:
self.go_deeper(e, result)
return result
def find_martians(self,ptree,pwlist):
result = []
if ptree["children"]:
children = self.go_deeper(ptree)
for child in children:
match_found = False
for entry in pwlist:
if entry.match(child):
match_found = True
if not match_found:
result.append(child)
return result
def run(self):
if self.results["target"]["category"] == "file":
return False
self.ie_paths_re = re.compile(r"^c:\\program files(?:\s\(x86\))?\\internet explorer\\iexplore.exe$",re.I)
#run through re.escape()
self.white_list_re = ["^C\\:\\\\Program Files(?:\s\\(x86\\))?\\\\Adobe\\\\Reader\\ \\d+\\.\\d+\\\\Reader\\\\AcroRd32\\.exe$",
"^C\\:\\\\Program Files(?:\s\\(x86\\))?\\\\Java\\\\jre\\d+\\\\bin\\\\j(?:avaw?|p2launcher)\\.exe$",
"^C\\:\\\\Program Files(?:\s\\(x86\\))?\\\\Microsoft SilverLight\\\\(?:\\d+\\.)+\\d\\\\agcp.exe$",
"^C\\:\\\\Windows\\\\System32\\\\ntvdm.exe$",
]
#means we can be evaded but also means we can have relatively tight paths between 32-bit and 64-bit
self.white_list_re_compiled = []
for entry in self.white_list_re:
self.white_list_re_compiled.append(re.compile(entry,re.I))
self.white_list_re_compiled.append(self.ie_paths_re)
# Sometimes if we get a service loaded we get out of order processes in tree need iterate over IE processes get the path of the initial monitored executable
self.initialpath = None
processes = self.results["behavior"]["processtree"]
if len(processes):
for p in processes:
initialpath = p["module_path"].lower()
if initialpath and self.ie_paths_re.match(initialpath) and p.has_key("children"):
self.martians = self.find_martians(p,self.white_list_re_compiled)
if len(self.martians) > 0:
for martian in self.martians:
self.data.append({"ie_martian": martian})
return True
return False
|
nafex/pyload | refs/heads/stable | module/plugins/hoster/BillionuploadsCom.py | 15 | # -*- coding: utf-8 -*-
from module.plugins.internal.DeadHoster import DeadHoster, create_getInfo
class BillionuploadsCom(DeadHoster):
__name__ = "BillionuploadsCom"
__type__ = "hoster"
__version__ = "0.07"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?billionuploads\.com/\w{12}'
__config__ = [] #@TODO: Remove in 0.4.10
__description__ = """Billionuploads.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
getInfo = create_getInfo(BillionuploadsCom)
|
duqiao/django | refs/heads/master | tests/flatpages_tests/test_models.py | 342 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.flatpages.models import FlatPage
from django.test import SimpleTestCase
from django.test.utils import override_script_prefix
class FlatpageModelTests(SimpleTestCase):
def test_get_absolute_url_urlencodes(self):
pf = FlatPage(title="Café!", url='/café/')
self.assertEqual(pf.get_absolute_url(), '/caf%C3%A9/')
@override_script_prefix('/beverages/')
def test_get_absolute_url_honors_script_prefix(self):
pf = FlatPage(title="Tea!", url='/tea/')
self.assertEqual(pf.get_absolute_url(), '/beverages/tea/')
|
ddico/odoo | refs/heads/master | addons/sms/wizard/sms_cancel.py | 2 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import _, api, fields, models
class SMSCancel(models.TransientModel):
_name = 'sms.cancel'
_description = 'Dismiss notification for resend by model'
model = fields.Char(string='Model', required=True)
help_message = fields.Char(string='Help message', compute='_compute_help_message')
@api.depends('model')
def _compute_help_message(self):
for wizard in self:
wizard.help_message = _("Are you sure you want to discard %s SMS delivery failures. You won't be able to re-send these SMS later!") % (wizard._context.get('unread_counter'))
def action_cancel(self):
# TDE CHECK: delete pending SMS
author_id = self.env.user.partner_id.id
for wizard in self:
self._cr.execute("""
SELECT notif.id, msg.id
FROM mail_message_res_partner_needaction_rel notif
JOIN mail_message msg
ON notif.mail_message_id = msg.id
WHERE notif.notification_type = 'sms' IS TRUE AND notif.notification_status IN ('bounce', 'exception')
AND msg.model = %s
AND msg.author_id = %s """, (wizard.model, author_id))
res = self._cr.fetchall()
notif_ids = [row[0] for row in res]
message_ids = list(set([row[1] for row in res]))
if notif_ids:
self.env['mail.notification'].browse(notif_ids).sudo().write({'notification_status': 'canceled'})
if message_ids:
self.env['mail.message'].browse(message_ids)._notify_message_notification_update()
return {'type': 'ir.actions.act_window_close'}
|
ros/ros | refs/heads/noetic-devel | core/roslib/src/roslib/resources.py | 1 | # Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
"""
Warning: do not use this library. It is unstable and most of the routines
here have been superseded by other libraries (e.g. rospkg). These
routines will likely be *deleted* in future releases.
"""
import os
import roslib.manifest
import roslib.names
import roslib.packages
def _get_manifest_by_dir(package_dir):
"""
Helper routine for loading Manifest instances
@param package_dir: package directory location
@type package_dir: str
@return: manifest for package
@rtype: Manifest
"""
f = os.path.join(package_dir, roslib.manifest.MANIFEST_FILE)
if f:
return roslib.manifest.parse_file(f)
else:
return None
def list_package_resources_by_dir(package_dir, include_depends, subdir, rfilter=os.path.isfile):
"""
List resources in a package directory within a particular
subdirectory. This is useful for listing messages, services, etc...
@param package_dir: package directory location
@type package_dir: str
@param subdir: name of subdirectory
@type subdir: str
@param include_depends: if True, include resources in dependencies as well
@type include_depends: bool
@param rfilter: resource filter function that returns true if filename is the desired resource type
@type rfilter: fn(filename)->bool
"""
package = os.path.basename(package_dir)
resources = []
dir = roslib.packages._get_pkg_subdir_by_dir(package_dir, subdir, False)
if os.path.isdir(dir):
resources = [roslib.names.resource_name(package, f, my_pkg=package)
for f in os.listdir(dir) if rfilter(os.path.join(dir, f))]
else:
resources = []
if include_depends:
depends = _get_manifest_by_dir(package_dir).depends
dirs = [roslib.packages.get_pkg_subdir(d.package, subdir, False) for d in depends]
for (dep, dir_) in zip(depends, dirs): # py3k
if not dir_ or not os.path.isdir(dir_):
continue
resources.extend(
[roslib.names.resource_name(dep.package, f, my_pkg=package)
for f in os.listdir(dir_) if rfilter(os.path.join(dir_, f))])
return resources
def list_package_resources(package, include_depends, subdir, rfilter=os.path.isfile):
"""
List resources in a package within a particular subdirectory. This is useful for listing
messages, services, etc...
@param package: package name
@type package: str
@param subdir: name of subdirectory
@type subdir: str
@param include_depends: if True, include resources in dependencies as well
@type include_depends: bool
@param rfilter: resource filter function that returns true if filename is the desired resource type
@type rfilter: fn(filename)->bool
"""
package_dir = roslib.packages.get_pkg_dir(package)
return list_package_resources_by_dir(package_dir, include_depends, subdir, rfilter)
|
xiaozhuchacha/OpenBottle | refs/heads/master | action_earley_srv/scripts/nltk/test/childes_fixt.py | 28 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
def setup_module(module):
from nose import SkipTest
import nltk.data
try:
nltk.data.find('corpora/childes/data-xml/Eng-USA-MOR/')
except LookupError as e:
print(e)
raise SkipTest("The CHILDES corpus is not found. "
"It should be manually downloaded and saved/unpacked "
"to [NLTK_Data_Dir]/corpora/childes/")
|
aarora79/sitapt | refs/heads/master | sitapt/visualize/wc.py | 1 | import pandas as pd
df = pd.read_csv('applications.csv')
df = df[df.columns[1:]]
mean = df.mean()
mean.sort_values(inplace=True, ascending=False)
mean[:100].to_csv('appl_mean.csv')
df2 = pd.read_csv('appl_mean.csv', names=['application', 'percentage'])
df2.to_csv('appl_top100.csv', index=False)
df = pd.read_csv('protocols.csv')
df = df[df.columns[1:]]
mean = df.mean()
mean.sort_values(inplace=True, ascending=False)
mean[:100].to_csv('protocols_mean.csv')
df2 = pd.read_csv('protocols_mean.csv', names=['protocols', 'percentage'])
df2.to_csv('protocols_top100.csv', index=False)
|
rlskoeser/loris | refs/heads/development | misc/openjpeg_transform.py | 3 |
# Here's why we can't use PIL's API for OpenJPEG
# See http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html#jpeg-2000
import timeit
import subprocess
from PIL import Image
from PIL.ImageFile import Parser
import subprocess
import sys
def mk_tile_with_PIL():
jp2_with_tiles_fp = '../tests/img/01/02/0001.jp2'
im = Image.open(jp2_with_tiles_fp)
im.reduce = 2
im = im.crop((0,0,256,256))
im.save('/tmp/out.jpg')
setup = 'from __main__ import mk_tile_with_PIL'
t = timeit.timeit('mk_tile_with_PIL()', setup=setup, number=3)
print 'Average with PIL: %0.6f' % (t,)
def mk_tile_subproc():
opj_bin = '/usr/local/bin/opj_decompress'
opj_lib = '/usr/local/lib/libopenjp2.so'
pipe_o = '/tmp/mypipe.bmp'
out_jpg = '/tmp/test.jpg'
mkfifo_cmd = '/usr/bin/mkfifo %s' % (pipe_o,)
rmfifo_cmd = '/bin/rm %s' % (pipe_o,)
i = '../tests/img/01/02/0001.jp2'
r = 2 # reduce
# d = '256,256,512,512'
d = '0,0,256,256'
opj_cmd = '%s -i %s -o %s -d %s -r %s' % (opj_bin, i, pipe_o, d, r)
# make a named pipe
mkfifo_resp = subprocess.check_call(mkfifo_cmd, shell=True)
if mkfifo_resp != 0:
sys.stderr.write('mkfifo not OK\n')
# write opj_decompress's output to the named pipe
opj_proc = subprocess.Popen(opj_cmd, shell=True,
bufsize=-1, stderr=subprocess.PIPE, stdout=subprocess.PIPE,
env={ 'LD_LIBRARY_PATH' : opj_lib })
# open the named pipe and parse the stream
im = None
with open(pipe_o, 'rb') as f:
p = Parser()
while True:
s = f.read(1024)
if not s:
break
p.feed(s)
im = p.close()
# finish opj
opj_exit = opj_proc.wait()
if opj_exit != 0:
map(sys.stderr.write, opj_proc.stderr)
else:
# opj was successful, save to a jpg
# map(sys.stdout.write, opj_proc.stdout)
im.save(out_jpg, quality=95)
# remove the named pipe
rmfifo_resp = subprocess.check_call(rmfifo_cmd, shell=True)
if rmfifo_resp != 0:
sys.stderr.write('rm fifo not OK\n')
setup = 'from __main__ import mk_tile_subproc'
t = timeit.timeit('mk_tile_subproc()', setup=setup, number=3)
print 'Average with shellout to subprocess: %0.6f' % (t,)
|
monash-merc/cvl-fabric-launcher | refs/heads/master | pyinstaller-2.1/PyInstaller/depend/utils.py | 10 | #-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
"""
Scan the code object for imports, __all__ and wierd stuff
"""
import dis
import os
from PyInstaller import compat
from PyInstaller.compat import ctypes
from PyInstaller.compat import is_unix, is_darwin, is_py25, is_py27
import PyInstaller.depend.utils
import PyInstaller.log as logging
logger = logging.getLogger(__name__)
IMPORT_NAME = dis.opname.index('IMPORT_NAME')
IMPORT_FROM = dis.opname.index('IMPORT_FROM')
try:
IMPORT_STAR = dis.opname.index('IMPORT_STAR')
except:
IMPORT_STAR = None
STORE_NAME = dis.opname.index('STORE_NAME')
STORE_FAST = dis.opname.index('STORE_FAST')
STORE_GLOBAL = dis.opname.index('STORE_GLOBAL')
try:
STORE_MAP = dis.opname.index('STORE_MAP')
except:
STORE_MAP = None
LOAD_GLOBAL = dis.opname.index('LOAD_GLOBAL')
LOAD_ATTR = dis.opname.index('LOAD_ATTR')
LOAD_NAME = dis.opname.index('LOAD_NAME')
EXEC_STMT = dis.opname.index('EXEC_STMT')
try:
SET_LINENO = dis.opname.index('SET_LINENO')
except ValueError:
SET_LINENO = None
BUILD_LIST = dis.opname.index('BUILD_LIST')
LOAD_CONST = dis.opname.index('LOAD_CONST')
if is_py25:
LOAD_CONST_level = LOAD_CONST
else:
LOAD_CONST_level = None
if is_py27:
COND_OPS = set([dis.opname.index('POP_JUMP_IF_TRUE'),
dis.opname.index('POP_JUMP_IF_FALSE'),
dis.opname.index('JUMP_IF_TRUE_OR_POP'),
dis.opname.index('JUMP_IF_FALSE_OR_POP'),
])
else:
COND_OPS = set([dis.opname.index('JUMP_IF_FALSE'),
dis.opname.index('JUMP_IF_TRUE'),
])
JUMP_FORWARD = dis.opname.index('JUMP_FORWARD')
try:
STORE_DEREF = dis.opname.index('STORE_DEREF')
except ValueError:
STORE_DEREF = None
STORE_OPS = set([STORE_NAME, STORE_FAST, STORE_GLOBAL, STORE_DEREF, STORE_MAP])
#IMPORT_STAR -> IMPORT_NAME mod ; IMPORT_STAR
#JUMP_IF_FALSE / JUMP_IF_TRUE / JUMP_FORWARD
HASJREL = set(dis.hasjrel)
def pass1(code):
instrs = []
i = 0
n = len(code)
curline = 0
incondition = 0
out = 0
while i < n:
if i >= out:
incondition = 0
c = code[i]
i = i + 1
op = ord(c)
if op >= dis.HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i + 1]) * 256
i = i + 2
else:
oparg = None
if not incondition and op in COND_OPS:
incondition = 1
out = oparg
if op in HASJREL:
out += i
elif incondition and op == JUMP_FORWARD:
out = max(out, i + oparg)
if op == SET_LINENO:
curline = oparg
else:
instrs.append((op, oparg, incondition, curline))
return instrs
def scan_code(co, m=None, w=None, b=None, nested=0):
instrs = pass1(co.co_code)
if m is None:
m = []
if w is None:
w = []
if b is None:
b = []
all = []
lastname = None
level = -1 # import-level, same behaviour as up to Python 2.4
for i, (op, oparg, conditional, curline) in enumerate(instrs):
if op == IMPORT_NAME:
if level <= 0:
name = lastname = co.co_names[oparg]
else:
name = lastname = co.co_names[oparg]
#print 'import_name', name, `lastname`, level
m.append((name, nested, conditional, level))
elif op == IMPORT_FROM:
name = co.co_names[oparg]
#print 'import_from', name, `lastname`, level,
if level > 0 and (not lastname or lastname[-1:] == '.'):
name = lastname + name
else:
name = lastname + '.' + name
#print name
m.append((name, nested, conditional, level))
assert lastname is not None
elif op == IMPORT_STAR:
assert lastname is not None
m.append((lastname + '.*', nested, conditional, level))
elif op == STORE_NAME:
if co.co_names[oparg] == "__all__":
j = i - 1
pop, poparg, pcondtl, pline = instrs[j]
if pop != BUILD_LIST:
w.append("W: __all__ is built strangely at line %s" % pline)
else:
all = []
while j > 0:
j = j - 1
pop, poparg, pcondtl, pline = instrs[j]
if pop == LOAD_CONST:
all.append(co.co_consts[poparg])
else:
break
elif op in STORE_OPS:
pass
elif op == LOAD_CONST_level:
# starting with Python 2.5, _each_ import is preceeded with a
# LOAD_CONST to indicate the relative level.
if isinstance(co.co_consts[oparg], (int, long)):
level = co.co_consts[oparg]
elif op == LOAD_GLOBAL:
name = co.co_names[oparg]
cndtl = ['', 'conditional'][conditional]
lvl = ['top-level', 'delayed'][nested]
if name == "__import__":
w.append("W: %s %s __import__ hack detected at line %s" % (lvl, cndtl, curline))
elif name == "eval":
w.append("W: %s %s eval hack detected at line %s" % (lvl, cndtl, curline))
elif op == EXEC_STMT:
cndtl = ['', 'conditional'][conditional]
lvl = ['top-level', 'delayed'][nested]
w.append("W: %s %s exec statement detected at line %s" % (lvl, cndtl, curline))
else:
lastname = None
if ctypes:
# ctypes scanning requires a scope wider than one bytecode instruction,
# so the code resides in a separate function for clarity.
ctypesb, ctypesw = scan_code_for_ctypes(co, instrs, i)
b.extend(ctypesb)
w.extend(ctypesw)
for c in co.co_consts:
if isinstance(c, type(co)):
# FIXME: "all" was not updated here nor returned. Was it the desired
# behaviour?
_, _, _, all_nested = scan_code(c, m, w, b, 1)
all.extend(all_nested)
return m, w, b, all
def scan_code_for_ctypes(co, instrs, i):
"""
Detects ctypes dependencies, using reasonable heuristics that should
cover most common ctypes usages; returns a tuple of two lists, one
containing names of binaries detected as dependencies, the other containing
warnings.
"""
def _libFromConst(i):
"""Extracts library name from an expected LOAD_CONST instruction and
appends it to local binaries list.
"""
op, oparg, conditional, curline = instrs[i]
if op == LOAD_CONST:
soname = co.co_consts[oparg]
b.append(soname)
b = []
op, oparg, conditional, curline = instrs[i]
if op in (LOAD_GLOBAL, LOAD_NAME):
name = co.co_names[oparg]
if name in ("CDLL", "WinDLL"):
# Guesses ctypes imports of this type: CDLL("library.so")
# LOAD_GLOBAL 0 (CDLL) <--- we "are" here right now
# LOAD_CONST 1 ('library.so')
_libFromConst(i + 1)
elif name == "ctypes":
# Guesses ctypes imports of this type: ctypes.DLL("library.so")
# LOAD_GLOBAL 0 (ctypes) <--- we "are" here right now
# LOAD_ATTR 1 (CDLL)
# LOAD_CONST 1 ('library.so')
op2, oparg2, conditional2, curline2 = instrs[i + 1]
if op2 == LOAD_ATTR:
if co.co_names[oparg2] in ("CDLL", "WinDLL"):
# Fetch next, and finally get the library name
_libFromConst(i + 2)
elif name in ("cdll", "windll"):
# Guesses ctypes imports of these types:
# * cdll.library (only valid on Windows)
# LOAD_GLOBAL 0 (cdll) <--- we "are" here right now
# LOAD_ATTR 1 (library)
# * cdll.LoadLibrary("library.so")
# LOAD_GLOBAL 0 (cdll) <--- we "are" here right now
# LOAD_ATTR 1 (LoadLibrary)
# LOAD_CONST 1 ('library.so')
op2, oparg2, conditional2, curline2 = instrs[i + 1]
if op2 == LOAD_ATTR:
if co.co_names[oparg2] != "LoadLibrary":
# First type
soname = co.co_names[oparg2] + ".dll"
b.append(soname)
else:
# Second type, needs to fetch one more instruction
_libFromConst(i + 2)
# If any of the libraries has been requested with anything different from
# the bare filename, drop that entry and warn the user - pyinstaller would
# need to patch the compiled pyc file to make it work correctly!
w = []
for binary in list(b):
# 'binary' might be in some cases None. Some Python modules might contain
# code like the following. For example PyObjC.objc._bridgesupport contain
# code like that.
#
# dll = ctypes.CDLL(None)
if binary:
if binary != os.path.basename(binary):
w.append("W: ignoring %s - ctypes imports only supported using bare filenames" % (binary,))
else:
# None values has to be removed too.
b.remove(binary)
return b, w
def _resolveCtypesImports(cbinaries):
"""Completes ctypes BINARY entries for modules with their full path.
"""
from ctypes.util import find_library
if is_unix:
envvar = "LD_LIBRARY_PATH"
elif is_darwin:
envvar = "DYLD_LIBRARY_PATH"
else:
envvar = "PATH"
def _setPaths():
path = os.pathsep.join(PyInstaller.__pathex__)
old = compat.getenv(envvar)
if old is not None:
path = os.pathsep.join((path, old))
compat.setenv(envvar, path)
return old
def _restorePaths(old):
if old is None:
compat.unsetenv(envvar)
else:
compat.setenv(envvar, old)
ret = []
# Try to locate the shared library on disk. This is done by
# executing ctypes.utile.find_library prepending ImportTracker's
# local paths to library search paths, then replaces original values.
old = _setPaths()
for cbin in cbinaries:
# Ignore annoying warnings like:
# 'W: library kernel32.dll required via ctypes not found'
# 'W: library coredll.dll required via ctypes not found'
if cbin in ['coredll.dll', 'kernel32.dll']:
continue
ext = os.path.splitext(cbin)[1]
# On Windows, only .dll files can be loaded.
if os.name == "nt" and ext.lower() in [".so", ".dylib"]:
continue
cpath = find_library(os.path.splitext(cbin)[0])
if is_unix:
# CAVEAT: find_library() is not the correct function. Ctype's
# documentation says that it is meant to resolve only the filename
# (as a *compiler* does) not the full path. Anyway, it works well
# enough on Windows and Mac. On Linux, we need to implement
# more code to find out the full path.
if cpath is None:
cpath = cbin
# "man ld.so" says that we should first search LD_LIBRARY_PATH
# and then the ldcache
for d in compat.getenv(envvar, '').split(os.pathsep):
if os.path.isfile(os.path.join(d, cpath)):
cpath = os.path.join(d, cpath)
break
else:
text = compat.exec_command("/sbin/ldconfig", "-p")
for L in text.strip().splitlines():
if cpath in L:
cpath = L.split("=>", 1)[1].strip()
assert os.path.isfile(cpath)
break
else:
cpath = None
if cpath is None:
logger.warn("library %s required via ctypes not found", cbin)
else:
ret.append((cbin, cpath, "BINARY"))
_restorePaths(old)
return ret
|
TeamExodus/external_chromium_org | refs/heads/EXODUS-5.1 | build/android/pylib/instrumentation/test_runner.py | 25 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Class for running instrumentation tests on a single device."""
import logging
import os
import re
import sys
import time
from pylib import constants
from pylib import flag_changer
from pylib import valgrind_tools
from pylib.base import base_test_result
from pylib.base import base_test_runner
from pylib.device import device_errors
from pylib.instrumentation import json_perf_parser
from pylib.instrumentation import test_result
sys.path.append(os.path.join(constants.DIR_SOURCE_ROOT, 'build', 'util', 'lib',
'common'))
import perf_tests_results_helper # pylint: disable=F0401
_PERF_TEST_ANNOTATION = 'PerfTest'
def _GetDataFilesForTestSuite(suite_basename):
"""Returns a list of data files/dirs needed by the test suite.
Args:
suite_basename: The test suite basename for which to return file paths.
Returns:
A list of test file and directory paths.
"""
test_files = []
if suite_basename in ['ChromeTest', 'ContentShellTest']:
test_files += [
'net/data/ssl/certificates/',
]
return test_files
class TestRunner(base_test_runner.BaseTestRunner):
"""Responsible for running a series of tests connected to a single device."""
_DEVICE_DATA_DIR = 'chrome/test/data'
_DEVICE_COVERAGE_DIR = 'chrome/test/coverage'
_HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile'
_DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR +
'/chrome-profile*')
_DEVICE_HAS_TEST_FILES = {}
def __init__(self, test_options, device, shard_index, test_pkg,
additional_flags=None):
"""Create a new TestRunner.
Args:
test_options: An InstrumentationOptions object.
device: Attached android device.
shard_index: Shard index.
test_pkg: A TestPackage object.
additional_flags: A list of additional flags to add to the command line.
"""
super(TestRunner, self).__init__(device, test_options.tool,
test_options.push_deps,
test_options.cleanup_test_files)
self._lighttp_port = constants.LIGHTTPD_RANDOM_PORT_FIRST + shard_index
self.coverage_device_file = None
self.coverage_dir = test_options.coverage_dir
self.coverage_host_file = None
self.options = test_options
self.test_pkg = test_pkg
# Use the correct command line file for the package under test.
cmdline_file = [a.cmdline_file for a in constants.PACKAGE_INFO.itervalues()
if a.test_package == self.test_pkg.GetPackageName()]
assert len(cmdline_file) < 2, 'Multiple packages have the same test package'
if len(cmdline_file) and cmdline_file[0]:
self.flags = flag_changer.FlagChanger(self.device, cmdline_file[0])
if additional_flags:
self.flags.AddFlags(additional_flags)
else:
self.flags = None
#override
def InstallTestPackage(self):
self.test_pkg.Install(self.device)
#override
def PushDataDeps(self):
# TODO(frankf): Implement a general approach for copying/installing
# once across test runners.
if TestRunner._DEVICE_HAS_TEST_FILES.get(self.device, False):
logging.warning('Already copied test files to device %s, skipping.',
str(self.device))
return
test_data = _GetDataFilesForTestSuite(self.test_pkg.GetApkName())
if test_data:
# Make sure SD card is ready.
self.device.WaitUntilFullyBooted(timeout=20)
for p in test_data:
self.device.PushChangedFiles(
os.path.join(constants.DIR_SOURCE_ROOT, p),
os.path.join(self.device.GetExternalStoragePath(), p))
# TODO(frankf): Specify test data in this file as opposed to passing
# as command-line.
for dest_host_pair in self.options.test_data:
dst_src = dest_host_pair.split(':', 1)
dst_layer = dst_src[0]
host_src = dst_src[1]
host_test_files_path = os.path.join(constants.DIR_SOURCE_ROOT,
host_src)
if os.path.exists(host_test_files_path):
self.device.PushChangedFiles(
host_test_files_path,
'%s/%s/%s' % (
self.device.GetExternalStoragePath(),
TestRunner._DEVICE_DATA_DIR,
dst_layer))
self.tool.CopyFiles()
TestRunner._DEVICE_HAS_TEST_FILES[str(self.device)] = True
def _GetInstrumentationArgs(self):
ret = {}
if self.options.wait_for_debugger:
ret['debug'] = 'true'
if self.coverage_dir:
ret['coverage'] = 'true'
ret['coverageFile'] = self.coverage_device_file
return ret
def _TakeScreenshot(self, test):
"""Takes a screenshot from the device."""
screenshot_name = os.path.join(constants.SCREENSHOTS_DIR, '%s.png' % test)
logging.info('Taking screenshot named %s', screenshot_name)
self.device.TakeScreenshot(screenshot_name)
def SetUp(self):
"""Sets up the test harness and device before all tests are run."""
super(TestRunner, self).SetUp()
if not self.device.HasRoot():
logging.warning('Unable to enable java asserts for %s, non rooted device',
str(self.device))
else:
if self.device.SetJavaAsserts(True):
# TODO(jbudorick) How to best do shell restart after the
# android_commands refactor?
self.device.RunShellCommand('stop')
self.device.RunShellCommand('start')
# We give different default value to launch HTTP server based on shard index
# because it may have race condition when multiple processes are trying to
# launch lighttpd with same port at same time.
self.LaunchTestHttpServer(
os.path.join(constants.DIR_SOURCE_ROOT), self._lighttp_port)
if self.flags:
self.flags.AddFlags(['--disable-fre', '--enable-test-intents'])
if self.options.device_flags:
with open(self.options.device_flags) as device_flags_file:
stripped_flags = (l.strip() for l in device_flags_file)
self.flags.AddFlags([flag for flag in stripped_flags if flag])
def TearDown(self):
"""Cleans up the test harness and saves outstanding data from test run."""
if self.flags:
self.flags.Restore()
super(TestRunner, self).TearDown()
def TestSetup(self, test):
"""Sets up the test harness for running a particular test.
Args:
test: The name of the test that will be run.
"""
self.SetupPerfMonitoringIfNeeded(test)
self._SetupIndividualTestTimeoutScale(test)
self.tool.SetupEnvironment()
# Make sure the forwarder is still running.
self._RestartHttpServerForwarderIfNecessary()
if self.coverage_dir:
coverage_basename = '%s.ec' % test
self.coverage_device_file = '%s/%s/%s' % (
self.device.GetExternalStoragePath(),
TestRunner._DEVICE_COVERAGE_DIR, coverage_basename)
self.coverage_host_file = os.path.join(
self.coverage_dir, coverage_basename)
def _IsPerfTest(self, test):
"""Determines whether a test is a performance test.
Args:
test: The name of the test to be checked.
Returns:
Whether the test is annotated as a performance test.
"""
return _PERF_TEST_ANNOTATION in self.test_pkg.GetTestAnnotations(test)
def SetupPerfMonitoringIfNeeded(self, test):
"""Sets up performance monitoring if the specified test requires it.
Args:
test: The name of the test to be run.
"""
if not self._IsPerfTest(test):
return
self.device.old_interface.Adb().SendCommand(
'shell rm ' + TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX)
self.device.old_interface.StartMonitoringLogcat()
def TestTeardown(self, test, result):
"""Cleans up the test harness after running a particular test.
Depending on the options of this TestRunner this might handle performance
tracking. This method will only be called if the test passed.
Args:
test: The name of the test that was just run.
result: result for this test.
"""
self.tool.CleanUpEnvironment()
# The logic below relies on the test passing.
if not result or not result.DidRunPass():
return
self.TearDownPerfMonitoring(test)
if self.coverage_dir:
self.device.PullFile(
self.coverage_device_file, self.coverage_host_file)
self.device.RunShellCommand(
'rm -f %s' % self.coverage_device_file)
def TearDownPerfMonitoring(self, test):
"""Cleans up performance monitoring if the specified test required it.
Args:
test: The name of the test that was just run.
Raises:
Exception: if there's anything wrong with the perf data.
"""
if not self._IsPerfTest(test):
return
raw_test_name = test.split('#')[1]
# Wait and grab annotation data so we can figure out which traces to parse
regex = self.device.old_interface.WaitForLogMatch(
re.compile('\*\*PERFANNOTATION\(' + raw_test_name + '\)\:(.*)'), None)
# If the test is set to run on a specific device type only (IE: only
# tablet or phone) and it is being run on the wrong device, the test
# just quits and does not do anything. The java test harness will still
# print the appropriate annotation for us, but will add --NORUN-- for
# us so we know to ignore the results.
# The --NORUN-- tag is managed by MainActivityTestBase.java
if regex.group(1) != '--NORUN--':
# Obtain the relevant perf data. The data is dumped to a
# JSON formatted file.
json_string = self.device.ReadFile(
'/data/data/com.google.android.apps.chrome/files/PerfTestData.txt',
as_root=True)
if json_string:
json_string = '\n'.join(json_string)
else:
raise Exception('Perf file does not exist or is empty')
if self.options.save_perf_json:
json_local_file = '/tmp/chromium-android-perf-json-' + raw_test_name
with open(json_local_file, 'w') as f:
f.write(json_string)
logging.info('Saving Perf UI JSON from test ' +
test + ' to ' + json_local_file)
raw_perf_data = regex.group(1).split(';')
for raw_perf_set in raw_perf_data:
if raw_perf_set:
perf_set = raw_perf_set.split(',')
if len(perf_set) != 3:
raise Exception('Unexpected number of tokens in perf annotation '
'string: ' + raw_perf_set)
# Process the performance data
result = json_perf_parser.GetAverageRunInfoFromJSONString(json_string,
perf_set[0])
perf_tests_results_helper.PrintPerfResult(perf_set[1], perf_set[2],
[result['average']],
result['units'])
def _SetupIndividualTestTimeoutScale(self, test):
timeout_scale = self._GetIndividualTestTimeoutScale(test)
valgrind_tools.SetChromeTimeoutScale(self.device, timeout_scale)
def _GetIndividualTestTimeoutScale(self, test):
"""Returns the timeout scale for the given |test|."""
annotations = self.test_pkg.GetTestAnnotations(test)
timeout_scale = 1
if 'TimeoutScale' in annotations:
for annotation in annotations:
scale_match = re.match('TimeoutScale:([0-9]+)', annotation)
if scale_match:
timeout_scale = int(scale_match.group(1))
if self.options.wait_for_debugger:
timeout_scale *= 100
return timeout_scale
def _GetIndividualTestTimeoutSecs(self, test):
"""Returns the timeout in seconds for the given |test|."""
annotations = self.test_pkg.GetTestAnnotations(test)
if 'Manual' in annotations:
return 10 * 60 * 60
if 'IntegrationTest' in annotations:
return 30 * 60
if 'External' in annotations:
return 10 * 60
if 'EnormousTest' in annotations:
return 10 * 60
if 'LargeTest' in annotations or _PERF_TEST_ANNOTATION in annotations:
return 5 * 60
if 'MediumTest' in annotations:
return 3 * 60
if 'SmallTest' in annotations:
return 1 * 60
logging.warn(("Test size not found in annotations for test '{0}', using " +
"1 minute for timeout.").format(test))
return 1 * 60
def _RunTest(self, test, timeout):
"""Runs a single instrumentation test.
Args:
test: Test class/method.
timeout: Timeout time in seconds.
Returns:
The raw output of am instrument as a list of lines.
"""
# Build the 'am instrument' command
instrumentation_path = (
'%s/%s' % (self.test_pkg.GetPackageName(), self.options.test_runner))
cmd = ['am', 'instrument', '-r']
for k, v in self._GetInstrumentationArgs().iteritems():
cmd.extend(['-e', k, "'%s'" % v])
cmd.extend(['-e', 'class', "'%s'" % test])
cmd.extend(['-w', instrumentation_path])
return self.device.RunShellCommand(cmd, timeout=timeout, retries=0)
@staticmethod
def _ParseAmInstrumentRawOutput(raw_output):
"""Parses the output of an |am instrument -r| call.
Args:
raw_output: the output of an |am instrument -r| call as a list of lines
Returns:
A 3-tuple containing:
- the instrumentation code as an integer
- the instrumentation result as a list of lines
- the instrumentation statuses received as a list of 2-tuples
containing:
- the status code as an integer
- the bundle dump as a dict mapping string keys to a list of
strings, one for each line.
"""
INSTR_STATUS = 'INSTRUMENTATION_STATUS: '
INSTR_STATUS_CODE = 'INSTRUMENTATION_STATUS_CODE: '
INSTR_RESULT = 'INSTRUMENTATION_RESULT: '
INSTR_CODE = 'INSTRUMENTATION_CODE: '
last = None
instr_code = None
instr_result = []
instr_statuses = []
bundle = {}
for line in raw_output:
if line.startswith(INSTR_STATUS):
instr_var = line[len(INSTR_STATUS):]
if '=' in instr_var:
k, v = instr_var.split('=', 1)
bundle[k] = [v]
last = INSTR_STATUS
last_key = k
else:
logging.debug('Unknown "%s" line: %s' % (INSTR_STATUS, line))
elif line.startswith(INSTR_STATUS_CODE):
instr_status = line[len(INSTR_STATUS_CODE):]
instr_statuses.append((int(instr_status), bundle))
bundle = {}
last = INSTR_STATUS_CODE
elif line.startswith(INSTR_RESULT):
instr_result.append(line[len(INSTR_RESULT):])
last = INSTR_RESULT
elif line.startswith(INSTR_CODE):
instr_code = int(line[len(INSTR_CODE):])
last = INSTR_CODE
elif last == INSTR_STATUS:
bundle[last_key].append(line)
elif last == INSTR_RESULT:
instr_result.append(line)
return (instr_code, instr_result, instr_statuses)
def _GenerateTestResult(self, test, instr_statuses, start_ms, duration_ms):
"""Generate the result of |test| from |instr_statuses|.
Args:
instr_statuses: A list of 2-tuples containing:
- the status code as an integer
- the bundle dump as a dict mapping string keys to string values
Note that this is the same as the third item in the 3-tuple returned by
|_ParseAmInstrumentRawOutput|.
start_ms: The start time of the test in milliseconds.
duration_ms: The duration of the test in milliseconds.
Returns:
An InstrumentationTestResult object.
"""
INSTR_STATUS_CODE_START = 1
INSTR_STATUS_CODE_OK = 0
INSTR_STATUS_CODE_ERROR = -1
INSTR_STATUS_CODE_FAIL = -2
log = ''
result_type = base_test_result.ResultType.UNKNOWN
for status_code, bundle in instr_statuses:
if status_code == INSTR_STATUS_CODE_START:
pass
elif status_code == INSTR_STATUS_CODE_OK:
bundle_test = '%s#%s' % (
''.join(bundle.get('class', [''])),
''.join(bundle.get('test', [''])))
skipped = ''.join(bundle.get('test_skipped', ['']))
if (test == bundle_test and
result_type == base_test_result.ResultType.UNKNOWN):
result_type = base_test_result.ResultType.PASS
elif skipped.lower() in ('true', '1', 'yes'):
result_type = base_test_result.ResultType.SKIP
logging.info('Skipped ' + test)
else:
if status_code not in (INSTR_STATUS_CODE_ERROR,
INSTR_STATUS_CODE_FAIL):
logging.info('Unrecognized status code %d. Handling as an error.',
status_code)
result_type = base_test_result.ResultType.FAIL
if 'stack' in bundle:
log = '\n'.join(bundle['stack'])
# Dismiss any error dialogs. Limit the number in case we have an error
# loop or we are failing to dismiss.
for _ in xrange(10):
package = self.device.old_interface.DismissCrashDialogIfNeeded()
if not package:
break
# Assume test package convention of ".test" suffix
if package in self.test_pkg.GetPackageName():
result_type = base_test_result.ResultType.CRASH
break
return test_result.InstrumentationTestResult(
test, result_type, start_ms, duration_ms, log=log)
#override
def RunTest(self, test):
results = base_test_result.TestRunResults()
timeout = (self._GetIndividualTestTimeoutSecs(test) *
self._GetIndividualTestTimeoutScale(test) *
self.tool.GetTimeoutScale())
start_ms = 0
duration_ms = 0
try:
self.TestSetup(test)
time_ms = lambda: int(time.time() * 1000)
start_ms = time_ms()
raw_output = self._RunTest(test, timeout)
duration_ms = time_ms() - start_ms
# Parse the test output
_, _, statuses = self._ParseAmInstrumentRawOutput(raw_output)
result = self._GenerateTestResult(test, statuses, start_ms, duration_ms)
results.AddResult(result)
except device_errors.CommandTimeoutError as e:
results.AddResult(test_result.InstrumentationTestResult(
test, base_test_result.ResultType.TIMEOUT, start_ms, duration_ms,
log=str(e) or 'No information'))
except device_errors.DeviceUnreachableError as e:
results.AddResult(test_result.InstrumentationTestResult(
test, base_test_result.ResultType.CRASH, start_ms, duration_ms,
log=str(e) or 'No information'))
self.TestTeardown(test, results)
return (results, None if results.DidRunPass() else test)
|
argentumproject/electrum-arg | refs/heads/master | gui/kivy/uix/dialogs/wallets.py | 1 | from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from electrum_arg_gui.kivy.i18n import _
from electrum_arg.util import base_units
import os
from label_dialog import LabelDialog
Builder.load_string('''
#:import os os
<WalletDialog@Popup>:
title: _('Wallets')
id: popup
path: os.path.dirname(app.get_wallet_path())
BoxLayout:
orientation: 'vertical'
padding: '10dp'
FileChooserListView:
id: wallet_selector
dirselect: False
filter_dirs: True
filter: '*.*'
path: root.path
rootpath: root.path
size_hint_y: 0.6
Widget
size_hint_y: 0.1
GridLayout:
cols: 3
size_hint_y: 0.1
Button:
id: open_button
size_hint: 0.1, None
height: '48dp'
text: _('New')
on_release:
popup.dismiss()
root.new_wallet(app, wallet_selector.path)
Button:
id: open_button
size_hint: 0.1, None
height: '48dp'
text: _('Open')
disabled: not wallet_selector.selection
on_release:
popup.dismiss()
root.open_wallet(app)
''')
class WalletDialog(Factory.Popup):
def new_wallet(self, app, dirname):
def cb(text):
if text:
app.load_wallet_by_name(os.path.join(dirname, text))
d = LabelDialog(_('Enter wallet name'), '', cb)
d.open()
def open_wallet(self, app):
app.load_wallet_by_name(self.ids.wallet_selector.selection[0])
|
jcftang/ansible | refs/heads/devel | lib/ansible/errors/__init__.py | 40 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors.yaml_strings import ( YAML_POSITION_DETAILS,
YAML_COMMON_UNQUOTED_VARIABLE_ERROR,
YAML_COMMON_DICT_ERROR,
YAML_COMMON_UNQUOTED_COLON_ERROR,
YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR,
YAML_COMMON_UNBALANCED_QUOTES_ERROR,
YAML_COMMON_LEADING_TAB_ERROR)
from ansible.module_utils._text import to_native, to_text
class AnsibleError(Exception):
'''
This is the base class for all errors raised from Ansible code,
and can be instantiated with two optional parameters beyond the
error message to control whether detailed information is displayed
when the error occurred while parsing a data file of some kind.
Usage:
raise AnsibleError('some message here', obj=obj, show_content=True)
Where "obj" is some subclass of ansible.parsing.yaml.objects.AnsibleBaseYAMLObject,
which should be returned by the DataLoader() class.
'''
def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False):
# we import this here to prevent an import loop problem,
# since the objects code also imports ansible.errors
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
self._obj = obj
self._show_content = show_content
if obj and isinstance(obj, AnsibleBaseYAMLObject):
extended_error = self._get_extended_error()
if extended_error and not suppress_extended_error:
self.message = '%s\n\n%s' % (to_native(message), to_native(extended_error))
else:
self.message = '%s' % to_native(message)
else:
self.message = '%s' % to_native(message)
def __str__(self):
return self.message
def __repr__(self):
return self.message
def _get_error_lines_from_file(self, file_name, line_number):
'''
Returns the line in the file which corresponds to the reported error
location, as well as the line preceding it (if the error did not
occur on the first line), to provide context to the error.
'''
target_line = ''
prev_line = ''
with open(file_name, 'r') as f:
lines = f.readlines()
target_line = lines[line_number]
if line_number > 0:
prev_line = lines[line_number - 1]
return (target_line, prev_line)
def _get_extended_error(self):
'''
Given an object reporting the location of the exception in a file, return
detailed information regarding it including:
* the line which caused the error as well as the one preceding it
* causes and suggested remedies for common syntax errors
If this error was created with show_content=False, the reporting of content
is suppressed, as the file contents may be sensitive (ie. vault data).
'''
error_message = ''
try:
(src_file, line_number, col_number) = self._obj.ansible_pos
error_message += YAML_POSITION_DETAILS % (src_file, line_number, col_number)
if src_file not in ('<string>', '<unicode>') and self._show_content:
(target_line, prev_line) = self._get_error_lines_from_file(src_file, line_number - 1)
target_line = to_text(target_line)
prev_line = to_text(prev_line)
if target_line:
stripped_line = target_line.replace(" ","")
arrow_line = (" " * (col_number-1)) + "^ here"
#header_line = ("=" * 73)
error_message += "\nThe offending line appears to be:\n\n%s\n%s\n%s\n" % (prev_line.rstrip(), target_line.rstrip(), arrow_line)
# TODO: There may be cases where there is a valid tab in a line that has other errors.
if '\t' in target_line:
error_message += YAML_COMMON_LEADING_TAB_ERROR
# common error/remediation checking here:
# check for unquoted vars starting lines
if ('{{' in target_line and '}}' in target_line) and ('"{{' not in target_line or "'{{" not in target_line):
error_message += YAML_COMMON_UNQUOTED_VARIABLE_ERROR
# check for common dictionary mistakes
elif ":{{" in stripped_line and "}}" in stripped_line:
error_message += YAML_COMMON_DICT_ERROR
# check for common unquoted colon mistakes
elif len(target_line) and len(target_line) > 1 and len(target_line) > col_number and target_line[col_number] == ":" and target_line.count(':') > 1:
error_message += YAML_COMMON_UNQUOTED_COLON_ERROR
# otherwise, check for some common quoting mistakes
else:
parts = target_line.split(":")
if len(parts) > 1:
middle = parts[1].strip()
match = False
unbalanced = False
if middle.startswith("'") and not middle.endswith("'"):
match = True
elif middle.startswith('"') and not middle.endswith('"'):
match = True
if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and target_line.count("'") > 2 or target_line.count('"') > 2:
unbalanced = True
if match:
error_message += YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR
if unbalanced:
error_message += YAML_COMMON_UNBALANCED_QUOTES_ERROR
except (IOError, TypeError):
error_message += '\n(could not open file to display line)'
except IndexError:
error_message += '\n(specified line no longer in file, maybe it changed?)'
return error_message
class AnsibleOptionsError(AnsibleError):
''' bad or incomplete options passed '''
pass
class AnsibleParserError(AnsibleError):
''' something was detected early that is wrong about a playbook or data file '''
pass
class AnsibleInternalError(AnsibleError):
''' internal safeguards tripped, something happened in the code that should never happen '''
pass
class AnsibleRuntimeError(AnsibleError):
''' ansible had a problem while running a playbook '''
pass
class AnsibleModuleError(AnsibleRuntimeError):
''' a module failed somehow '''
pass
class AnsibleConnectionFailure(AnsibleRuntimeError):
''' the transport / connection_plugin had a fatal error '''
pass
class AnsibleFilterError(AnsibleRuntimeError):
''' a templating failure '''
pass
class AnsibleLookupError(AnsibleRuntimeError):
''' a lookup failure '''
pass
class AnsibleCallbackError(AnsibleRuntimeError):
''' a callback failure '''
pass
class AnsibleUndefinedVariable(AnsibleRuntimeError):
''' a templating failure '''
pass
class AnsibleFileNotFound(AnsibleRuntimeError):
''' a file missing failure '''
pass
class AnsibleModuleExit(Exception):
''' local module exit '''
def __init__(self, result):
self.result = result
|
WQuanfeng/wagtail | refs/heads/master | wagtail/tests/testapp/migrations/0006_image_file_size.py | 23 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('tests', '0005_streampage'),
]
operations = [
migrations.AddField(
model_name='customimagewithadminformfields',
name='file_size',
field=models.PositiveIntegerField(null=True, editable=False),
),
migrations.AddField(
model_name='customimagewithoutadminformfields',
name='file_size',
field=models.PositiveIntegerField(null=True, editable=False),
),
]
|
40223247/2015test2-1 | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/tokenize.py | 728 | """Tokenization help for Python programs.
tokenize(readline) is a generator that breaks a stream of bytes into
Python tokens. It decodes the bytes according to PEP-0263 for
determining source file encoding.
It accepts a readline-like method which is called repeatedly to get the
next line of input (or b"" for EOF). It generates 5-tuples with these
members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators. Additionally, all token lists start with an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
'Michael Foord')
import builtins
import re
import sys
from token import *
from codecs import lookup, BOM_UTF8
import collections
from io import TextIOWrapper
cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
import token
__all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
"NL", "untokenize", "ENCODING", "TokenInfo"]
del token
COMMENT = N_TOKENS
tok_name[COMMENT] = 'COMMENT'
NL = N_TOKENS + 1
tok_name[NL] = 'NL'
ENCODING = N_TOKENS + 2
tok_name[ENCODING] = 'ENCODING'
N_TOKENS += 3
EXACT_TOKEN_TYPES = {
'(': LPAR,
')': RPAR,
'[': LSQB,
']': RSQB,
':': COLON,
',': COMMA,
';': SEMI,
'+': PLUS,
'-': MINUS,
'*': STAR,
'/': SLASH,
'|': VBAR,
'&': AMPER,
'<': LESS,
'>': GREATER,
'=': EQUAL,
'.': DOT,
'%': PERCENT,
'{': LBRACE,
'}': RBRACE,
'==': EQEQUAL,
'!=': NOTEQUAL,
'<=': LESSEQUAL,
'>=': GREATEREQUAL,
'~': TILDE,
'^': CIRCUMFLEX,
'<<': LEFTSHIFT,
'>>': RIGHTSHIFT,
'**': DOUBLESTAR,
'+=': PLUSEQUAL,
'-=': MINEQUAL,
'*=': STAREQUAL,
'/=': SLASHEQUAL,
'%=': PERCENTEQUAL,
'&=': AMPEREQUAL,
'|=': VBAREQUAL,
'^=': CIRCUMFLEXEQUAL,
'<<=': LEFTSHIFTEQUAL,
'>>=': RIGHTSHIFTEQUAL,
'**=': DOUBLESTAREQUAL,
'//': DOUBLESLASH,
'//=': DOUBLESLASHEQUAL,
'@': AT
}
class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
def __repr__(self):
annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
self._replace(type=annotated_type))
@property
def exact_type(self):
if self.type == OP and self.string in EXACT_TOKEN_TYPES:
return EXACT_TOKEN_TYPES[self.string]
else:
return self.type
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
# Note: we use unicode matching for names ("\w") but ascii matching for
# number literals.
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'\w+'
Hexnumber = r'0[xX][0-9a-fA-F]+'
Binnumber = r'0[bB][01]+'
Octnumber = r'0[oO][0-7]+'
Decnumber = r'(?:0+|[1-9][0-9]*)'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?[0-9]+'
Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent)
Expfloat = r'[0-9]+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
StringPrefix = r'(?:[bB][rR]?|[rR][bB]?|[uU])?'
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group(StringPrefix + "'''", StringPrefix + '"""')
# Single-line ' or " string.
String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
r"//=?", r"->",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
def _compile(expr):
return re.compile(expr, re.UNICODE)
endpats = {"'": Single, '"': Double,
"'''": Single3, '"""': Double3,
"r'''": Single3, 'r"""': Double3,
"b'''": Single3, 'b"""': Double3,
"R'''": Single3, 'R"""': Double3,
"B'''": Single3, 'B"""': Double3,
"br'''": Single3, 'br"""': Double3,
"bR'''": Single3, 'bR"""': Double3,
"Br'''": Single3, 'Br"""': Double3,
"BR'''": Single3, 'BR"""': Double3,
"rb'''": Single3, 'rb"""': Double3,
"Rb'''": Single3, 'Rb"""': Double3,
"rB'''": Single3, 'rB"""': Double3,
"RB'''": Single3, 'RB"""': Double3,
"u'''": Single3, 'u"""': Double3,
"R'''": Single3, 'R"""': Double3,
"U'''": Single3, 'U"""': Double3,
'r': None, 'R': None, 'b': None, 'B': None,
'u': None, 'U': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"b'''", 'b"""', "B'''", 'B"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""',
"rb'''", 'rb"""', "rB'''", 'rB"""',
"Rb'''", 'Rb"""', "RB'''", 'RB"""',
"u'''", 'u"""', "U'''", 'U"""',
):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"b'", 'b"', "B'", 'B"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"' ,
"rb'", 'rb"', "rB'", 'rB"',
"Rb'", 'Rb"', "RB'", 'RB"' ,
"u'", 'u"', "U'", 'U"',
):
single_quoted[t] = t
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
self.encoding = None
def add_whitespace(self, start):
row, col = start
assert row <= self.prev_row
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
for t in iterable:
if len(t) == 2:
self.compat(t, iterable)
break
tok_type, token, start, end, line = t
if tok_type == ENCODING:
self.encoding = token
continue
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
startline = False
indents = []
toks_append = self.tokens.append
toknum, tokval = token
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum in (NEWLINE, NL):
startline = True
prevstring = False
for tok in iterable:
toknum, tokval = tok[:2]
if toknum == ENCODING:
self.encoding = tokval
continue
if toknum in (NAME, NUMBER):
tokval += ' '
# Insert a space between two consecutive strings
if toknum == STRING:
if prevstring:
tokval = ' ' + tokval
prevstring = True
else:
prevstring = False
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
def untokenize(iterable):
"""Transform tokens back into Python source code.
It returns a bytes object, encoded using the ENCODING
token, which is the first token sequence output by tokenize.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output bytes will tokenize the back to the input
t1 = [tok[:2] for tok in tokenize(f.readline)]
newcode = untokenize(t1)
readline = BytesIO(newcode).readline
t2 = [tok[:2] for tok in tokenize(readline)]
assert t1 == t2
"""
ut = Untokenizer()
out = ut.untokenize(iterable)
if ut.encoding is not None:
out = out.encode(ut.encoding)
return out
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argment, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present,
but disagree, a SyntaxError will be raised. If the encoding cookie is an
invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
try:
filename = readline.__self__.name
except AttributeError:
filename = None
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
# Decode as UTF-8. Either the line is an encoding declaration,
# in which case it should be pure ASCII, or it must be UTF-8
# per default encoding.
line_string = line.decode('utf-8')
except UnicodeDecodeError:
msg = "invalid or missing encoding declaration"
if filename is not None:
msg = '{} for {!r}'.format(msg, filename)
raise SyntaxError(msg)
match = cookie_re.match(line_string)
if not match:
return None
encoding = _get_normal_name(match.group(1))
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
if filename is None:
msg = "unknown encoding: " + encoding
else:
msg = "unknown encoding for {!r}: {}".format(filename,
encoding)
raise SyntaxError(msg)
if bom_found:
if encoding != 'utf-8':
# This behaviour mimics the Python interpreter
if filename is None:
msg = 'encoding problem: utf-8'
else:
msg = 'encoding problem for {!r}: utf-8'.format(filename)
raise SyntaxError(msg)
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def open(filename):
"""Open a file in read only mode using the encoding detected by
detect_encoding().
"""
buffer = builtins.open(filename, 'rb')
encoding, lines = detect_encoding(buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, line_buffering=True)
text.mode = 'r'
return text
def tokenize(readline):
"""
The tokenize() generator requires one argment, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as bytes. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile, 'rb').__next__ # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
The first token sequence will always be an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""
# This import is here to avoid problems when the itertools module is not
# built yet and tokenize is imported.
from itertools import chain, repeat
encoding, consumed = detect_encoding(readline)
rl_gen = iter(readline, b"")
empty = repeat(b"")
return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
def _tokenize(readline, encoding):
lnum = parenlev = continued = 0
numchars = '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
if encoding is not None:
if encoding == "utf-8-sig":
# BOM will already have been stripped.
encoding = "utf-8"
yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
while True: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = b''
if encoding is not None:
line = line.decode(encoding)
lnum += 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield TokenInfo(STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield TokenInfo(ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ':
column += 1
elif line[pos] == '\t':
column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f':
column = 0
else:
break
pos += 1
if pos == max:
break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield TokenInfo(COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield TokenInfo(NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield TokenInfo((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = _compile(PseudoToken).match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
if start == end:
continue
token, initial = line[start:end], line[start]
if (initial in numchars or # ordinary number
(initial == '.' and token != '.' and token != '...')):
yield TokenInfo(NUMBER, token, spos, epos, line)
elif initial in '\r\n':
yield TokenInfo(NL if parenlev > 0 else NEWLINE,
token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield TokenInfo(COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = _compile(endpats[token])
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield TokenInfo(STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = _compile(endpats[initial] or
endpats[token[1]] or
endpats[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield TokenInfo(STRING, token, spos, epos, line)
elif initial.isidentifier(): # ordinary name
yield TokenInfo(NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
continued = 1
else:
if initial in '([{':
parenlev += 1
elif initial in ')]}':
parenlev -= 1
yield TokenInfo(OP, token, spos, epos, line)
else:
yield TokenInfo(ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos += 1
for indent in indents[1:]: # pop remaining indent levels
yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
# An undocumented, backwards compatible, API for all the places in the standard
# library that expect to be able to use tokenize with strings
def generate_tokens(readline):
return _tokenize(readline, None)
def main():
import argparse
# Helper error handling routines
def perror(message):
print(message, file=sys.stderr)
def error(message, filename=None, location=None):
if location:
args = (filename,) + location + (message,)
perror("%s:%d:%d: error: %s" % args)
elif filename:
perror("%s: error: %s" % (filename, message))
else:
perror("error: %s" % message)
sys.exit(1)
# Parse the arguments and options
parser = argparse.ArgumentParser(prog='python -m tokenize')
parser.add_argument(dest='filename', nargs='?',
metavar='filename.py',
help='the file to tokenize; defaults to stdin')
parser.add_argument('-e', '--exact', dest='exact', action='store_true',
help='display token names using the exact type')
args = parser.parse_args()
try:
# Tokenize the input
if args.filename:
filename = args.filename
with builtins.open(filename, 'rb') as f:
tokens = list(tokenize(f.readline))
else:
filename = "<stdin>"
tokens = _tokenize(sys.stdin.readline, None)
# Output the tokenization
for token in tokens:
token_type = token.type
if args.exact:
token_type = token.exact_type
token_range = "%d,%d-%d,%d:" % (token.start + token.end)
print("%-20s%-15s%-15r" %
(token_range, tok_name[token_type], token.string))
except IndentationError as err:
line, column = err.args[1][1:3]
error(err.args[0], filename, (line, column))
except TokenError as err:
line, column = err.args[1]
error(err.args[0], filename, (line, column))
except SyntaxError as err:
error(err, filename)
except IOError as err:
error(err)
except KeyboardInterrupt:
print("interrupted\n")
except Exception as err:
perror("unexpected error: %s" % err)
raise
if __name__ == "__main__":
main()
|
drawquest/drawquest-web | refs/heads/master | website/compressor/filters/css_default.py | 3 | import os
import re
import posixpath
from compressor.cache import get_hexdigest, get_hashed_mtime
from compressor.conf import settings
from compressor.filters import FilterBase, FilterError
from compressor.utils import staticfiles
URL_PATTERN = re.compile(r'url\(([^\)]+)\)')
class CssAbsoluteFilter(FilterBase):
def __init__(self, *args, **kwargs):
super(CssAbsoluteFilter, self).__init__(*args, **kwargs)
self.root = settings.COMPRESS_ROOT
self.url = settings.COMPRESS_URL.rstrip('/')
self.url_path = self.url
self.has_scheme = False
def input(self, filename=None, basename=None, **kwargs):
if filename is not None:
filename = os.path.normcase(os.path.abspath(filename))
if (not (filename and filename.startswith(self.root)) and
not self.find(basename)):
return self.content
self.path = basename.replace(os.sep, '/')
self.path = self.path.lstrip('/')
if self.url.startswith(('http://', 'https://')):
self.has_scheme = True
parts = self.url.split('/')
self.url = '/'.join(parts[2:])
self.url_path = '/%s' % '/'.join(parts[3:])
self.protocol = '%s/' % '/'.join(parts[:2])
self.host = parts[2]
self.directory_name = '/'.join((self.url, os.path.dirname(self.path)))
return URL_PATTERN.sub(self.url_converter, self.content)
def find(self, basename):
if settings.DEBUG and basename and staticfiles.finders:
return staticfiles.finders.find(basename)
def guess_filename(self, url):
local_path = url
if self.has_scheme:
# COMPRESS_URL had a protocol,
# remove it and the hostname from our path.
local_path = local_path.replace(self.protocol + self.host, "", 1)
# Now, we just need to check if we can find
# the path from COMPRESS_URL in our url
if local_path.startswith(self.url_path):
local_path = local_path.replace(self.url_path, "", 1)
# Re-build the local full path by adding root
filename = os.path.join(self.root, local_path.lstrip('/'))
return os.path.exists(filename) and filename
def add_suffix(self, url):
filename = self.guess_filename(url)
suffix = None
if filename:
if settings.COMPRESS_CSS_HASHING_METHOD == "mtime":
suffix = get_hashed_mtime(filename)
elif settings.COMPRESS_CSS_HASHING_METHOD == "hash":
hash_file = open(filename)
try:
suffix = get_hexdigest(hash_file.read(), 12)
finally:
hash_file.close()
else:
raise FilterError('COMPRESS_CSS_HASHING_METHOD is configured '
'with an unknown method (%s).')
if suffix is None:
return url
if url.startswith(('http://', 'https://', '/')):
if "?" in url:
url = "%s&%s" % (url, suffix)
else:
url = "%s?%s" % (url, suffix)
return url
def url_converter(self, matchobj):
url = matchobj.group(1)
url = url.strip(' \'"')
if url.startswith(('http://', 'https://', '/', 'data:')):
return "url('%s')" % self.add_suffix(url)
full_url = posixpath.normpath('/'.join([str(self.directory_name),
url]))
if self.has_scheme:
full_url = "%s%s" % (self.protocol, full_url)
return "url('%s')" % self.add_suffix(full_url)
|
kustodian/ansible | refs/heads/devel | test/integration/targets/module_utils/other_mu_dir/facts.py | 294 | data = 'should not be visible facts.py'
|
thaumos/ansible | refs/heads/devel | lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualnetwork.py | 15 | #!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, <zikalino@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_devtestlabvirtualnetwork
version_added: "2.8"
short_description: Manage Azure DevTest Lab Virtual Network instance.
description:
- Create, update and delete instance of Azure DevTest Lab Virtual Network.
options:
resource_group:
description:
- The name of the resource group.
required: True
lab_name:
description:
- The name of the lab.
required: True
name:
description:
- The name of the virtual network.
required: True
location:
description:
- The location of the resource.
description:
description:
- The description of the virtual network.
state:
description:
- Assert the state of the Virtual Network.
- Use C(present) to create or update an Virtual Network and C(absent) to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Zim Kalinowski (@zikalino)"
'''
EXAMPLES = '''
- name: Create (or update) Virtual Network
azure_rm_devtestlabvirtualnetwork:
resource_group: myResourceGroup
lab_name: mylab
name: myvn
description: My Lab Virtual Network
'''
RETURN = '''
id:
description:
- The identifier of the resource.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/testrg/providers/microsoft.devtestlab/
mylab/mylab/virtualnetworks/myvn"
external_provider_resource_id:
description:
- The identifier of external virtual network.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/testrg/providers/Microsoft.Network/vi
rtualNetworks/myvn"
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.common.dict_transformations import _snake_to_camel
try:
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
from msrestazure.azure_operation import AzureOperationPoller
from azure.mgmt.devtestlabs import DevTestLabsClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMDevTestLabVirtualNetwork(AzureRMModuleBase):
"""Configuration class for an Azure RM Virtual Network resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
lab_name=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
location=dict(
type='str'
),
description=dict(
type='str'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.lab_name = None
self.name = None
self.virtual_network = {}
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.to_do = Actions.NoAction
super(AzureRMDevTestLabVirtualNetwork, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
self.virtual_network[key] = kwargs[key]
response = None
self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2018-10-15')
resource_group = self.get_resource_group(self.resource_group)
if self.virtual_network.get('location') is None:
self.virtual_network['location'] = resource_group.location
# subnet overrides for virtual network and subnet created by default
template = "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/virtualNetworks/{2}/subnets/{3}"
subnet_id = template.format(self.subscription_id,
self.resource_group,
self.name,
self.name + "Subnet")
self.virtual_network['subnet_overrides'] = [{
'resource_id': subnet_id,
'lab_subnet_name': self.name + "Subnet",
'use_in_vm_creation_permission': 'Allow',
'use_public_ip_address_permission': 'Allow'
}]
old_response = self.get_virtualnetwork()
if not old_response:
self.log("Virtual Network instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log("Virtual Network instance already exists")
if self.state == 'absent':
self.to_do = Actions.Delete
elif self.state == 'present':
if self.virtual_network.get('description') is not None and self.virtual_network.get('description') != old_response.get('description'):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log("Need to Create / Update the Virtual Network instance")
self.results['changed'] = True
if self.check_mode:
return self.results
response = self.create_update_virtualnetwork()
self.log("Creation / Update done")
elif self.to_do == Actions.Delete:
self.log("Virtual Network instance deleted")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_virtualnetwork()
# This currently doesnt' work as there is a bug in SDK / Service
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
else:
self.log("Virtual Network instance unchanged")
self.results['changed'] = False
response = old_response
if self.state == 'present':
self.results.update({
'id': response.get('id', None),
'external_provider_resource_id': response.get('external_provider_resource_id', None)
})
return self.results
def create_update_virtualnetwork(self):
'''
Creates or updates Virtual Network with the specified configuration.
:return: deserialized Virtual Network instance state dictionary
'''
self.log("Creating / Updating the Virtual Network instance {0}".format(self.name))
try:
response = self.mgmt_client.virtual_networks.create_or_update(resource_group_name=self.resource_group,
lab_name=self.lab_name,
name=self.name,
virtual_network=self.virtual_network)
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the Virtual Network instance.')
self.fail("Error creating the Virtual Network instance: {0}".format(str(exc)))
return response.as_dict()
def delete_virtualnetwork(self):
'''
Deletes specified Virtual Network instance in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the Virtual Network instance {0}".format(self.name))
try:
response = self.mgmt_client.virtual_networks.delete(resource_group_name=self.resource_group,
lab_name=self.lab_name,
name=self.name)
except CloudError as e:
self.log('Error attempting to delete the Virtual Network instance.')
self.fail("Error deleting the Virtual Network instance: {0}".format(str(e)))
return True
def get_virtualnetwork(self):
'''
Gets the properties of the specified Virtual Network.
:return: deserialized Virtual Network instance state dictionary
'''
self.log("Checking if the Virtual Network instance {0} is present".format(self.name))
found = False
try:
response = self.mgmt_client.virtual_networks.get(resource_group_name=self.resource_group,
lab_name=self.lab_name,
name=self.name)
found = True
self.log("Response : {0}".format(response))
self.log("Virtual Network instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the Virtual Network instance.')
if found is True:
return response.as_dict()
return False
def main():
"""Main execution"""
AzureRMDevTestLabVirtualNetwork()
if __name__ == '__main__':
main()
|
atsao72/sympy | refs/heads/master | sympy/polys/tests/test_orthopolys.py | 124 | """Tests for efficient functions for generating orthogonal polynomials. """
from sympy import Poly, S, Rational as Q
from sympy.utilities.pytest import raises
from sympy.polys.orthopolys import (
jacobi_poly,
gegenbauer_poly,
chebyshevt_poly,
chebyshevu_poly,
hermite_poly,
legendre_poly,
laguerre_poly,
)
from sympy.abc import x, a, b
def test_jacobi_poly():
raises(ValueError, lambda: jacobi_poly(-1, a, b, x))
assert jacobi_poly(1, a, b, x, polys=True) == Poly(
(a/2 + b/2 + 1)*x + a/2 - b/2, x, domain='ZZ(a,b)')
assert jacobi_poly(0, a, b, x) == 1
assert jacobi_poly(1, a, b, x) == a/2 - b/2 + x*(a/2 + b/2 + 1)
assert jacobi_poly(2, a, b, x) == (a**2/8 - a*b/4 - a/8 + b**2/8 - b/8 + x**2*(a**2/8 + a*b/4 + 7*a/8 +
b**2/8 + 7*b/8 + S(3)/2) + x*(a**2/4 + 3*a/4 - b**2/4 - 3*b/4) - S(1)/2)
assert jacobi_poly(1, a, b, polys=True) == Poly(
(a/2 + b/2 + 1)*x + a/2 - b/2, x, domain='ZZ(a,b)')
def test_gegenbauer_poly():
raises(ValueError, lambda: gegenbauer_poly(-1, a, x))
assert gegenbauer_poly(
1, a, x, polys=True) == Poly(2*a*x, x, domain='ZZ(a)')
assert gegenbauer_poly(0, a, x) == 1
assert gegenbauer_poly(1, a, x) == 2*a*x
assert gegenbauer_poly(2, a, x) == -a + x**2*(2*a**2 + 2*a)
assert gegenbauer_poly(
3, a, x) == x**3*(4*a**3/3 + 4*a**2 + 8*a/3) + x*(-2*a**2 - 2*a)
assert gegenbauer_poly(1, S.Half).dummy_eq(x)
assert gegenbauer_poly(1, a, polys=True) == Poly(2*a*x, x, domain='ZZ(a)')
def test_chebyshevt_poly():
raises(ValueError, lambda: chebyshevt_poly(-1, x))
assert chebyshevt_poly(1, x, polys=True) == Poly(x)
assert chebyshevt_poly(0, x) == 1
assert chebyshevt_poly(1, x) == x
assert chebyshevt_poly(2, x) == 2*x**2 - 1
assert chebyshevt_poly(3, x) == 4*x**3 - 3*x
assert chebyshevt_poly(4, x) == 8*x**4 - 8*x**2 + 1
assert chebyshevt_poly(5, x) == 16*x**5 - 20*x**3 + 5*x
assert chebyshevt_poly(6, x) == 32*x**6 - 48*x**4 + 18*x**2 - 1
assert chebyshevt_poly(1).dummy_eq(x)
assert chebyshevt_poly(1, polys=True) == Poly(x)
def test_chebyshevu_poly():
raises(ValueError, lambda: chebyshevu_poly(-1, x))
assert chebyshevu_poly(1, x, polys=True) == Poly(2*x)
assert chebyshevu_poly(0, x) == 1
assert chebyshevu_poly(1, x) == 2*x
assert chebyshevu_poly(2, x) == 4*x**2 - 1
assert chebyshevu_poly(3, x) == 8*x**3 - 4*x
assert chebyshevu_poly(4, x) == 16*x**4 - 12*x**2 + 1
assert chebyshevu_poly(5, x) == 32*x**5 - 32*x**3 + 6*x
assert chebyshevu_poly(6, x) == 64*x**6 - 80*x**4 + 24*x**2 - 1
assert chebyshevu_poly(1).dummy_eq(2*x)
assert chebyshevu_poly(1, polys=True) == Poly(2*x)
def test_hermite_poly():
raises(ValueError, lambda: hermite_poly(-1, x))
assert hermite_poly(1, x, polys=True) == Poly(2*x)
assert hermite_poly(0, x) == 1
assert hermite_poly(1, x) == 2*x
assert hermite_poly(2, x) == 4*x**2 - 2
assert hermite_poly(3, x) == 8*x**3 - 12*x
assert hermite_poly(4, x) == 16*x**4 - 48*x**2 + 12
assert hermite_poly(5, x) == 32*x**5 - 160*x**3 + 120*x
assert hermite_poly(6, x) == 64*x**6 - 480*x**4 + 720*x**2 - 120
assert hermite_poly(1).dummy_eq(2*x)
assert hermite_poly(1, polys=True) == Poly(2*x)
def test_legendre_poly():
raises(ValueError, lambda: legendre_poly(-1, x))
assert legendre_poly(1, x, polys=True) == Poly(x)
assert legendre_poly(0, x) == 1
assert legendre_poly(1, x) == x
assert legendre_poly(2, x) == Q(3, 2)*x**2 - Q(1, 2)
assert legendre_poly(3, x) == Q(5, 2)*x**3 - Q(3, 2)*x
assert legendre_poly(4, x) == Q(35, 8)*x**4 - Q(30, 8)*x**2 + Q(3, 8)
assert legendre_poly(5, x) == Q(63, 8)*x**5 - Q(70, 8)*x**3 + Q(15, 8)*x
assert legendre_poly(6, x) == Q(
231, 16)*x**6 - Q(315, 16)*x**4 + Q(105, 16)*x**2 - Q(5, 16)
assert legendre_poly(1).dummy_eq(x)
assert legendre_poly(1, polys=True) == Poly(x)
def test_laguerre_poly():
raises(ValueError, lambda: laguerre_poly(-1, x))
assert laguerre_poly(1, x, polys=True) == Poly(-x + 1)
assert laguerre_poly(0, x) == 1
assert laguerre_poly(1, x) == -x + 1
assert laguerre_poly(2, x) == Q(1, 2)*x**2 - Q(4, 2)*x + 1
assert laguerre_poly(3, x) == -Q(1, 6)*x**3 + Q(9, 6)*x**2 - Q(18, 6)*x + 1
assert laguerre_poly(4, x) == Q(
1, 24)*x**4 - Q(16, 24)*x**3 + Q(72, 24)*x**2 - Q(96, 24)*x + 1
assert laguerre_poly(5, x) == -Q(1, 120)*x**5 + Q(25, 120)*x**4 - Q(
200, 120)*x**3 + Q(600, 120)*x**2 - Q(600, 120)*x + 1
assert laguerre_poly(6, x) == Q(1, 720)*x**6 - Q(36, 720)*x**5 + Q(450, 720)*x**4 - Q(2400, 720)*x**3 + Q(5400, 720)*x**2 - Q(4320, 720)*x + 1
assert laguerre_poly(0, x, a) == 1
assert laguerre_poly(1, x, a) == -x + a + 1
assert laguerre_poly(2, x, a) == x**2/2 + (-a - 2)*x + a**2/2 + 3*a/2 + 1
assert laguerre_poly(3, x, a) == -x**3/6 + (a/2 + Q(
3)/2)*x**2 + (-a**2/2 - 5*a/2 - 3)*x + a**3/6 + a**2 + 11*a/6 + 1
assert laguerre_poly(1).dummy_eq(-x + 1)
assert laguerre_poly(1, polys=True) == Poly(-x + 1)
|
gromez/Sick-Beard | refs/heads/development | lib/imdb/Movie.py | 126 | """
Movie module (imdb package).
This module provides the Movie class, used to store information about
a given movie.
Copyright 2004-2010 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from copy import deepcopy
from imdb import linguistics
from imdb.utils import analyze_title, build_title, canonicalTitle, \
flatten, _Container, cmpMovies
class Movie(_Container):
"""A Movie.
Every information about a movie can be accessed as:
movieObject['information']
to get a list of the kind of information stored in a
Movie object, use the keys() method; some useful aliases
are defined (as "casting" for the "casting director" key); see
the keys_alias dictionary.
"""
# The default sets of information retrieved.
default_info = ('main', 'plot')
# Aliases for some not-so-intuitive keys.
keys_alias = {
'tv schedule': 'airing',
'user rating': 'rating',
'plot summary': 'plot',
'plot summaries': 'plot',
'directed by': 'director',
'created by': 'creator',
'writing credits': 'writer',
'produced by': 'producer',
'original music by': 'original music',
'non-original music by': 'non-original music',
'music': 'original music',
'cinematography by': 'cinematographer',
'cinematography': 'cinematographer',
'film editing by': 'editor',
'film editing': 'editor',
'editing': 'editor',
'actors': 'cast',
'actresses': 'cast',
'casting by': 'casting director',
'casting': 'casting director',
'art direction by': 'art direction',
'set decoration by': 'set decoration',
'costume design by': 'costume designer',
'costume design': 'costume designer',
'makeup department': 'make up',
'makeup': 'make up',
'make-up': 'make up',
'production management': 'production manager',
'production company': 'production companies',
'second unit director or assistant director':
'assistant director',
'second unit director': 'assistant director',
'sound department': 'sound crew',
'costume and wardrobe department': 'costume department',
'special effects by': 'special effects',
'visual effects by': 'visual effects',
'special effects company': 'special effects companies',
'stunts': 'stunt performer',
'other crew': 'miscellaneous crew',
'misc crew': 'miscellaneous crew',
'miscellaneouscrew': 'miscellaneous crew',
'crewmembers': 'miscellaneous crew',
'crew members': 'miscellaneous crew',
'other companies': 'miscellaneous companies',
'misc companies': 'miscellaneous companies',
'miscellaneous company': 'miscellaneous companies',
'misc company': 'miscellaneous companies',
'other company': 'miscellaneous companies',
'aka': 'akas',
'also known as': 'akas',
'country': 'countries',
'production country': 'countries',
'production countries': 'countries',
'genre': 'genres',
'runtime': 'runtimes',
'lang': 'languages',
'color': 'color info',
'cover': 'cover url',
'full-size cover': 'full-size cover url',
'seasons': 'number of seasons',
'language': 'languages',
'certificate': 'certificates',
'certifications': 'certificates',
'certification': 'certificates',
'miscellaneous links': 'misc links',
'miscellaneous': 'misc links',
'soundclips': 'sound clips',
'videoclips': 'video clips',
'photographs': 'photo sites',
'distributor': 'distributors',
'distribution': 'distributors',
'distribution companies': 'distributors',
'distribution company': 'distributors',
'guest': 'guests',
'guest appearances': 'guests',
'tv guests': 'guests',
'notable tv guest appearances': 'guests',
'episodes cast': 'guests',
'episodes number': 'number of episodes',
'amazon review': 'amazon reviews',
'merchandising': 'merchandising links',
'merchandise': 'merchandising links',
'sales': 'merchandising links',
'faq': 'faqs',
'parental guide': 'parents guide',
'frequently asked questions': 'faqs'}
keys_tomodify_list = ('plot', 'trivia', 'alternate versions', 'goofs',
'quotes', 'dvd', 'laserdisc', 'news', 'soundtrack',
'crazy credits', 'business', 'supplements',
'video review', 'faqs')
cmpFunct = cmpMovies
def _init(self, **kwds):
"""Initialize a Movie object.
*movieID* -- the unique identifier for the movie.
*title* -- the title of the Movie, if not in the data dictionary.
*myTitle* -- your personal title for the movie.
*myID* -- your personal identifier for the movie.
*data* -- a dictionary used to initialize the object.
*currentRole* -- a Character instance representing the current role
or duty of a person in this movie, or a Person
object representing the actor/actress who played
a given character in a Movie. If a string is
passed, an object is automatically build.
*roleID* -- if available, the characterID/personID of the currentRole
object.
*roleIsPerson* -- when False (default) the currentRole is assumed
to be a Character object, otherwise a Person.
*notes* -- notes for the person referred in the currentRole
attribute; e.g.: '(voice)'.
*accessSystem* -- a string representing the data access system used.
*titlesRefs* -- a dictionary with references to movies.
*namesRefs* -- a dictionary with references to persons.
*charactersRefs* -- a dictionary with references to characters.
*modFunct* -- function called returning text fields.
"""
title = kwds.get('title')
if title and not self.data.has_key('title'):
self.set_title(title)
self.movieID = kwds.get('movieID', None)
self.myTitle = kwds.get('myTitle', u'')
def _reset(self):
"""Reset the Movie object."""
self.movieID = None
self.myTitle = u''
def set_title(self, title):
"""Set the title of the movie."""
# XXX: convert title to unicode, if it's a plain string?
d_title = analyze_title(title)
self.data.update(d_title)
def _additional_keys(self):
"""Valid keys to append to the data.keys() list."""
addkeys = []
if self.data.has_key('title'):
addkeys += ['canonical title', 'long imdb title',
'long imdb canonical title',
'smart canonical title',
'smart long imdb canonical title']
if self.data.has_key('episode of'):
addkeys += ['long imdb episode title', 'series title',
'canonical series title', 'episode title',
'canonical episode title',
'smart canonical series title',
'smart canonical episode title']
if self.data.has_key('cover url'):
addkeys += ['full-size cover url']
return addkeys
def guessLanguage(self):
"""Guess the language of the title of this movie; returns None
if there are no hints."""
lang = self.get('languages')
if lang:
lang = lang[0]
else:
country = self.get('countries')
if country:
lang = linguistics.COUNTRY_LANG.get(country[0])
return lang
def smartCanonicalTitle(self, title=None, lang=None):
"""Return the canonical title, guessing its language.
The title can be forces with the 'title' argument (internally
used) and the language can be forced with the 'lang' argument,
otherwise it's auto-detected."""
if title is None:
title = self.data.get('title', u'')
if lang is None:
lang = self.guessLanguage()
return canonicalTitle(title, lang=lang)
def _getitem(self, key):
"""Handle special keys."""
if self.data.has_key('episode of'):
if key == 'long imdb episode title':
return build_title(self.data)
elif key == 'series title':
return self.data['episode of']['title']
elif key == 'canonical series title':
ser_title = self.data['episode of']['title']
return canonicalTitle(ser_title)
elif key == 'smart canonical series title':
ser_title = self.data['episode of']['title']
return self.smartCanonicalTitle(ser_title)
elif key == 'episode title':
return self.data.get('title', u'')
elif key == 'canonical episode title':
return canonicalTitle(self.data.get('title', u''))
elif key == 'smart canonical episode title':
return self.smartCanonicalTitle(self.data.get('title', u''))
if self.data.has_key('title'):
if key == 'title':
return self.data['title']
elif key == 'long imdb title':
return build_title(self.data)
elif key == 'canonical title':
return canonicalTitle(self.data['title'])
elif key == 'smart canonical title':
return self.smartCanonicalTitle(self.data['title'])
elif key == 'long imdb canonical title':
return build_title(self.data, canonical=1)
elif key == 'smart long imdb canonical title':
return build_title(self.data, canonical=1,
lang=self.guessLanguage())
if key == 'full-size cover url' and self.data.has_key('cover url'):
return self._re_fullsizeURL.sub('', self.data.get('cover url', ''))
return None
def getID(self):
"""Return the movieID."""
return self.movieID
def __nonzero__(self):
"""The Movie is "false" if the self.data does not contain a title."""
# XXX: check the title and the movieID?
if self.data.has_key('title'): return 1
return 0
def isSameTitle(self, other):
"""Return true if this and the compared object have the same
long imdb title and/or movieID.
"""
# XXX: obsolete?
if not isinstance(other, self.__class__): return 0
if self.data.has_key('title') and \
other.data.has_key('title') and \
build_title(self.data, canonical=0) == \
build_title(other.data, canonical=0):
return 1
if self.accessSystem == other.accessSystem and \
self.movieID is not None and self.movieID == other.movieID:
return 1
return 0
isSameMovie = isSameTitle # XXX: just for backward compatiblity.
def __contains__(self, item):
"""Return true if the given Person object is listed in this Movie,
or if the the given Character is represented in this Movie."""
from Person import Person
from Character import Character
from Company import Company
if isinstance(item, Person):
for p in flatten(self.data, yieldDictKeys=1, scalar=Person,
toDescend=(list, dict, tuple, Movie)):
if item.isSame(p):
return 1
elif isinstance(item, Character):
for p in flatten(self.data, yieldDictKeys=1, scalar=Person,
toDescend=(list, dict, tuple, Movie)):
if item.isSame(p.currentRole):
return 1
elif isinstance(item, Company):
for c in flatten(self.data, yieldDictKeys=1, scalar=Company,
toDescend=(list, dict, tuple, Movie)):
if item.isSame(c):
return 1
return 0
def __deepcopy__(self, memo):
"""Return a deep copy of a Movie instance."""
m = Movie(title=u'', movieID=self.movieID, myTitle=self.myTitle,
myID=self.myID, data=deepcopy(self.data, memo),
currentRole=deepcopy(self.currentRole, memo),
roleIsPerson=self._roleIsPerson,
notes=self.notes, accessSystem=self.accessSystem,
titlesRefs=deepcopy(self.titlesRefs, memo),
namesRefs=deepcopy(self.namesRefs, memo),
charactersRefs=deepcopy(self.charactersRefs, memo))
m.current_info = list(self.current_info)
m.set_mod_funct(self.modFunct)
return m
def __repr__(self):
"""String representation of a Movie object."""
# XXX: add also currentRole and notes, if present?
if self.has_key('long imdb episode title'):
title = self.get('long imdb episode title')
else:
title = self.get('long imdb title')
r = '<Movie id:%s[%s] title:_%s_>' % (self.movieID, self.accessSystem,
title)
if isinstance(r, unicode): r = r.encode('utf_8', 'replace')
return r
def __str__(self):
"""Simply print the short title."""
return self.get('title', u'').encode('utf_8', 'replace')
def __unicode__(self):
"""Simply print the short title."""
return self.get('title', u'')
def summary(self):
"""Return a string with a pretty-printed summary for the movie."""
if not self: return u''
def _nameAndRole(personList, joiner=u', '):
"""Build a pretty string with name and role."""
nl = []
for person in personList:
n = person.get('name', u'')
if person.currentRole: n += u' (%s)' % person.currentRole
nl.append(n)
return joiner.join(nl)
s = u'Movie\n=====\nTitle: %s\n' % \
self.get('long imdb canonical title', u'')
genres = self.get('genres')
if genres: s += u'Genres: %s.\n' % u', '.join(genres)
director = self.get('director')
if director:
s += u'Director: %s.\n' % _nameAndRole(director)
writer = self.get('writer')
if writer:
s += u'Writer: %s.\n' % _nameAndRole(writer)
cast = self.get('cast')
if cast:
cast = cast[:5]
s += u'Cast: %s.\n' % _nameAndRole(cast)
runtime = self.get('runtimes')
if runtime:
s += u'Runtime: %s.\n' % u', '.join(runtime)
countries = self.get('countries')
if countries:
s += u'Country: %s.\n' % u', '.join(countries)
lang = self.get('languages')
if lang:
s += u'Language: %s.\n' % u', '.join(lang)
rating = self.get('rating')
if rating:
s += u'Rating: %s' % rating
nr_votes = self.get('votes')
if nr_votes:
s += u' (%s votes)' % nr_votes
s += u'.\n'
plot = self.get('plot')
if not plot:
plot = self.get('plot summary')
if plot:
plot = [plot]
if plot:
plot = plot[0]
i = plot.find('::')
if i != -1:
plot = plot[:i]
s += u'Plot: %s' % plot
return s
|
48thct2jtnf/P | refs/heads/master | qa/rpc-tests/receivedby.py | 164 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listreceivedbyaddress API
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def get_sub_array_from_array(object_array, to_match):
'''
Finds and returns a sub array from an array of arrays.
to_match should be a unique idetifier of a sub array
'''
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
return item
return []
def check_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found in object_array
"""
if should_not_find == True:
expected = { }
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects was matched %s"%(str(to_match)))
class ReceivedByTest(BitcoinTestFramework):
def run_test(self):
'''
listreceivedbyaddress Test
'''
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
#Check not listed in listreceivedbyaddress because has 0 confirmations
check_array_result(self.nodes[1].listreceivedbyaddress(),
{"address":addr},
{ },
True)
#Bury Tx under 10 block so it will be returned by listreceivedbyaddress
self.nodes[1].setgenerate(True, 10)
self.sync_all()
check_array_result(self.nodes[1].listreceivedbyaddress(),
{"address":addr},
{"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
#With min confidence < 10
check_array_result(self.nodes[1].listreceivedbyaddress(5),
{"address":addr},
{"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
#With min confidence > 10, should not find Tx
check_array_result(self.nodes[1].listreceivedbyaddress(11),{"address":addr},{ },True)
#Empty Tx
addr = self.nodes[1].getnewaddress()
check_array_result(self.nodes[1].listreceivedbyaddress(0,True),
{"address":addr},
{"address":addr, "account":"", "amount":0, "confirmations":0, "txids":[]})
'''
getreceivedbyaddress Test
'''
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
#Check balance is 0 because of 0 confirmations
balance = self.nodes[1].getreceivedbyaddress(addr)
if balance != Decimal("0.0"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
#Check balance is 0.1
balance = self.nodes[1].getreceivedbyaddress(addr,0)
if balance != Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
#Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress
self.nodes[1].setgenerate(True, 10)
self.sync_all()
balance = self.nodes[1].getreceivedbyaddress(addr)
if balance != Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
'''
listreceivedbyaccount + getreceivedbyaccount Test
'''
#set pre-state
addrArr = self.nodes[1].getnewaddress()
account = self.nodes[1].getaccount(addrArr)
received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(),{"account":account})
if len(received_by_account_json) == 0:
raise AssertionError("No accounts found in node")
balance_by_account = rec_by_accountArr = self.nodes[1].getreceivedbyaccount(account)
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# listreceivedbyaccount should return received_by_account_json because of 0 confirmations
check_array_result(self.nodes[1].listreceivedbyaccount(),
{"account":account},
received_by_account_json)
# getreceivedbyaddress should return same balance because of 0 confirmations
balance = self.nodes[1].getreceivedbyaccount(account)
if balance != balance_by_account:
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
self.nodes[1].setgenerate(True, 10)
self.sync_all()
# listreceivedbyaccount should return updated account balance
check_array_result(self.nodes[1].listreceivedbyaccount(),
{"account":account},
{"account":received_by_account_json["account"], "amount":(received_by_account_json["amount"] + Decimal("0.1"))})
# getreceivedbyaddress should return updates balance
balance = self.nodes[1].getreceivedbyaccount(account)
if balance != balance_by_account + Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
#Create a new account named "mynewaccount" that has a 0 balance
self.nodes[1].getaccountaddress("mynewaccount")
received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(0,True),{"account":"mynewaccount"})
if len(received_by_account_json) == 0:
raise AssertionError("No accounts found in node")
# Test includeempty of listreceivedbyaccount
if received_by_account_json["amount"] != Decimal("0.0"):
raise AssertionError("Wrong balance returned by listreceivedbyaccount, %0.2f"%(received_by_account_json["amount"]))
# Test getreceivedbyaccount for 0 amount accounts
balance = self.nodes[1].getreceivedbyaccount("mynewaccount")
if balance != Decimal("0.0"):
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
if __name__ == '__main__':
ReceivedByTest().main()
|
ESS-LLP/frappe | refs/heads/develop | frappe/utils/file_lock.py | 22 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
'''
File based locking utility
'''
import os
from time import time
from frappe.utils import get_site_path, touch_file
class LockTimeoutError(Exception):
pass
def create_lock(name):
'''Creates a file in the /locks folder by the given name'''
lock_path = get_lock_path(name)
if not check_lock(lock_path):
return touch_file(lock_path)
else:
return False
def lock_exists(name):
'''Returns True if lock of the given name exists'''
return os.path.exists(get_lock_path(name))
def check_lock(path, timeout=600):
if not os.path.exists(path):
return False
if time() - os.path.getmtime(path) > timeout:
raise LockTimeoutError(path)
return True
def delete_lock(name):
lock_path = get_lock_path(name)
try:
os.remove(lock_path)
except OSError:
pass
return True
def get_lock_path(name):
name = name.lower()
locks_dir = 'locks'
lock_path = get_site_path(locks_dir, name + '.lock')
return lock_path
|
jkorell/PTVS | refs/heads/master | Python/Tests/TestData/DragDropCopyCutPaste/!Source/DraggedToOtherProject.py | 18 | #DraggedToOtherProject.py |
cleverhans-lab/cleverhans | refs/heads/master | cleverhans_v3.1.0/cleverhans/model_zoo/soft_nearest_neighbor_loss/SNNL_regularized_train.py | 1 | """
This model shows how to train a model with Soft Nearest Neighbor Loss
regularization. The paper which presents this method can be found at
https://arxiv.org/abs/1902.01889
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
from sklearn.manifold import TSNE
from cleverhans.compat import flags
from cleverhans.loss import SNNLCrossEntropy, CrossEntropy
from cleverhans.dataset import MNIST
from cleverhans.utils_tf import model_eval
from cleverhans.train import train
from cleverhans.utils import AccuracyReport, set_log_level
from cleverhans.model_zoo.soft_nearest_neighbor_loss.SNNL_regularized_model import (
ModelBasicCNN,
)
FLAGS = flags.FLAGS
NB_EPOCHS = 6
BATCH_SIZE = 128
LEARNING_RATE = 0.001
NB_FILTERS = 64
SNNL_FACTOR = -10.0
OUTPUT_DIR = "/tmp/"
def SNNL_example(
train_start=0,
train_end=60000,
test_start=0,
test_end=10000,
nb_epochs=NB_EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LEARNING_RATE,
nb_filters=NB_FILTERS,
SNNL_factor=SNNL_FACTOR,
output_dir=OUTPUT_DIR,
):
"""
A simple model trained to minimize Cross Entropy and Maximize Soft Nearest
Neighbor Loss at each internal layer. This outputs a TSNE of the sign of
the adversarial gradients of a trained model. A model with a negative
SNNL_factor will show little or no class clusters, while a model with a
0 SNNL_factor will have class clusters in the adversarial gradient direction.
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param nb_epochs: number of epochs to train model
:param batch_size: size of training batches
:param learning_rate: learning rate for training
:param SNNL_factor: multiplier for Soft Nearest Neighbor Loss
:return: an AccuracyReport object
"""
# Object used to keep track of (and return) key accuracies
report = AccuracyReport()
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Set logging level to see debug information
set_log_level(logging.DEBUG)
# Create TF session
sess = tf.Session()
# Get MNIST data
mnist = MNIST(
train_start=train_start,
train_end=train_end,
test_start=test_start,
test_end=test_end,
)
x_train, y_train = mnist.get_set("train")
x_test, y_test = mnist.get_set("test")
# Use Image Parameters
img_rows, img_cols, nchannels = x_train.shape[1:4]
nb_classes = y_train.shape[1]
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols, nchannels))
y = tf.placeholder(tf.float32, shape=(None, nb_classes))
# Train an MNIST model
train_params = {
"nb_epochs": nb_epochs,
"batch_size": batch_size,
"learning_rate": learning_rate,
}
eval_params = {"batch_size": batch_size}
rng = np.random.RandomState([2017, 8, 30])
def do_eval(preds, x_set, y_set, report_key):
acc = model_eval(sess, x, y, preds, x_set, y_set, args=eval_params)
setattr(report, report_key, acc)
print("Test accuracy on legitimate examples: %0.4f" % (acc))
model = ModelBasicCNN("model", nb_classes, nb_filters)
preds = model.get_logits(x)
cross_entropy_loss = CrossEntropy(model)
if not SNNL_factor:
loss = cross_entropy_loss
else:
loss = SNNLCrossEntropy(model, factor=SNNL_factor, optimize_temperature=False)
def evaluate():
do_eval(preds, x_test, y_test, "clean_train_clean_eval")
train(
sess,
loss,
x_train,
y_train,
evaluate=evaluate,
args=train_params,
rng=rng,
var_list=model.get_params(),
)
do_eval(preds, x_train, y_train, "train_clean_train_clean_eval")
def imscatter(points, images, ax=None, zoom=1, cmap="hot"):
if ax is None:
ax = plt.gca()
artists = []
i = 0
if not isinstance(cmap, list):
cmap = [cmap] * len(points)
for x0, y0 in points:
transformed = (images[i] - np.min(images[i])) / (
np.max(images[i]) - np.min(images[i])
)
im = OffsetImage(transformed[:, :, 0], zoom=zoom, cmap=cmap[i])
ab = AnnotationBbox(im, (x0, y0), xycoords="data", frameon=False)
artists.append(ax.add_artist(ab))
i += 1
ax.update_datalim(np.column_stack(np.transpose(points)))
ax.autoscale()
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
return artists
adv_grads = tf.sign(tf.gradients(cross_entropy_loss.fprop(x, y), x))
feed_dict = {x: x_test[:batch_size], y: y_test[:batch_size]}
adv_grads_val = sess.run(adv_grads, feed_dict=feed_dict)
adv_grads_val = np.reshape(adv_grads_val, (batch_size, img_rows * img_cols))
X_embedded = TSNE(n_components=2, verbose=0).fit_transform(adv_grads_val)
plt.figure(num=None, figsize=(50, 50), dpi=40, facecolor="w", edgecolor="k")
plt.title(
"TSNE of Sign of Adv Gradients, SNNLCrossEntropy Model, factor:"
+ str(FLAGS.SNNL_factor),
fontsize=42,
)
imscatter(X_embedded, x_test[:batch_size], zoom=2, cmap="Purples")
plt.savefig(
output_dir + "adversarial_gradients_SNNL_factor_" + str(SNNL_factor) + ".png"
)
def main(argv=None):
SNNL_example(
nb_epochs=FLAGS.nb_epochs,
batch_size=FLAGS.batch_size,
learning_rate=FLAGS.learning_rate,
nb_filters=FLAGS.nb_filters,
SNNL_factor=FLAGS.SNNL_factor,
output_dir=FLAGS.output_dir,
)
if __name__ == "__main__":
flags.DEFINE_integer("nb_filters", NB_FILTERS, "Model size multiplier")
flags.DEFINE_integer("nb_epochs", NB_EPOCHS, "Number of epochs to train model")
flags.DEFINE_integer("batch_size", BATCH_SIZE, "Size of training batches")
flags.DEFINE_float(
"SNNL_factor", SNNL_FACTOR, "Multiplier for Soft Nearest Neighbor Loss"
)
flags.DEFINE_float("learning_rate", LEARNING_RATE, "Learning rate for training")
flags.DEFINE_string("output_dir", OUTPUT_DIR, "output directory for saving figures")
tf.app.run()
|
leotrs/graph_homotopy | refs/heads/master | generate_graphs.py | 1 | """
generate_graphs.py
------------------
Generate small synthetic graphs whose complete CLD will be computed.
"""
import cycles
import numpy as np
import networkx as nx
from numpy import arange
def is_valid(graph):
"""Return whether the graph is valid to run experiments on."""
rank = cycles.fundamental_group_rank(graph)
# return nx.density(graph) < 0.3 and nx.is_connected(graph) and rank < 50
return nx.is_connected(graph) and rank < 50
def save_graph(graph, filename):
"""Save the graph to the given path.
filename should be the name of the target file, without the format
extension.
"""
component = max(nx.connected_component_subgraphs(graph), key=len)
matrix = nx.adjacency_matrix(component).A
np.savetxt(filename + '.txt', matrix, fmt='%1.1f')
def generate_erdos_renyi():
"""Generate small synthetic ER graphs."""
for num_nodes in range(10, 31, 5):
for prob in arange(0.05, 0.4, 0.05):
for i in range(20):
graph = nx.erdos_renyi_graph(num_nodes, prob)
if is_valid(graph):
rank = cycles.fundamental_group_rank(graph)
name = 'data/ER_N={}_p={}_R={}_i={}'.format(num_nodes, int(prob * 1000), rank, i)
save_graph(graph, name)
def generate_barabasi_albert():
"""Generate small synthetic BA graphs."""
for num_nodes in range(10, 31, 5):
for edges_per_step in range(2, 6):
for i in range(20):
graph = nx.barabasi_albert_graph(num_nodes, edges_per_step)
if is_valid(graph):
rank = cycles.fundamental_group_rank(graph)
name = 'data/BA_N={}_m={}_R={}_i={}'.format(num_nodes, edges_per_step, rank, i)
save_graph(graph, name)
def generate_watts_strogatz():
"""Generate small synthetic WS graphs."""
for num_nodes in range(10, 31, 5):
for degree in [2, 4]:
for prob in arange(0.05, 0.4, 0.05):
for i in range(20):
graph = nx.watts_strogatz_graph(num_nodes, degree, prob)
if is_valid(graph):
rank = cycles.fundamental_group_rank(graph)
name = 'data/WS_N={}_d={}_p={}_R={}_i={}'.format(num_nodes, degree, int(prob * 1000), rank, i)
save_graph(graph, name)
def generate_other():
"""Generate other small graphs."""
graph = nx.florentine_families_graph()
if is_valid(graph):
rank = cycles.fundamental_group_rank(graph)
filename = 'data/{}_N={}_R={}'.format('florentine', len(graph), rank)
save_graph(graph, filename)
graph = nx.karate_club_graph()
if is_valid(graph):
rank = cycles.fundamental_group_rank(graph)
filename = 'data/{}_N={}_R={}'.format('karate', len(graph), rank)
save_graph(graph, filename)
def main():
"""Generate small graphs of different kinds."""
generate_erdos_renyi()
generate_barabasi_albert()
generate_watts_strogatz()
generate_other()
if __name__ == '__main__':
main()
|
UIKit0/shaka-player | refs/heads/master | third_party/gjslint/closure_linter-2.3.13/closure_linter/common/erroraccumulator.py | 264 | #!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Linter error handler class that accumulates an array of errors."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
from closure_linter.common import errorhandler
class ErrorAccumulator(errorhandler.ErrorHandler):
"""Error handler object that accumulates errors in a list."""
def __init__(self):
self._errors = []
def HandleError(self, error):
"""Append the error to the list.
Args:
error: The error object
"""
self._errors.append(error)
def GetErrors(self):
"""Returns the accumulated errors.
Returns:
A sequence of errors.
"""
return self._errors
|
caguado/boinc-v2 | refs/heads/master | test/test_exit.py | 24 | #!/usr/bin/env python
## $Id$
# Make sure server hears that client exited with nonzero status.
from test_uc import *
class WorkExit(WorkUC):
def __init__(self):
WorkUC.__init__(self)
self.wu_template = "uc_exit_wu"
class ResultExit(ResultUCError):
def __init__(self):
ResultUCError.__init__(self)
self.stderr_out.append('<message>process exited with a non-zero exit code')
class ProjectExit(ProjectUC):
def __init__(self):
ProjectUC.__init__(self, short_name='test_exit', works=[WorkExit()])
def check(self):
self.check_client_error(ResultExit())
if __name__ == '__main__':
test_msg("application exit report mechanism")
ProjectExit()
run_check_all()
|
bkpathak/HackerRank-Problems | refs/heads/master | collections/binary_trees/identical_tree.py | 2 | class Node(object):
def __init__(self,x):
self.val = x
self.left = None
self.right = None
def is_identical(root1, root2):
if root1 == None and root2 == None:
return True
if root1 != None and root2 != None:
return (root1.val == root2.val and
is_identical(root1.left,root2.left) and
is_identical(root1.right,root2.right))
return False
tree1 = Node(1)
tree1.left= Node(2)
tree1.right = Node(3)
tree1.left.left = Node(4)
tree2 = Node(1)
tree2.left = Node(2)
tree2.right= Node(10)
tree1.left.left = Node(4)
if is_identical(tree1,tree2):
print("Tree are IDENTICAL!!")
else:
print("Tree are NOT IDENTICAL")
|
ankit-collective/ggrc-core | refs/heads/master | src/ggrc/login/common.py | 4 | """Handle the interface to GGRC models for all login methods.
"""
from ggrc import db
from ggrc.models.person import Person
def find_user_by_id(id):
"""Find Person object by some ``id``.
Note that ``id`` need not be Person().id, but should match the value
returned by ``Person().get_id()``.
"""
return Person.query.filter(Person.id==int(id)).first()
def find_user_by_email(email):
return Person.query.filter(Person.email==email).first()
def create_user(email, **kwargs):
user = Person(email=email, **kwargs)
db.session.add(user)
db.session.commit()
return user
def find_or_create_user_by_email(email, **kwargs):
user = find_user_by_email(email)
if not user:
user = create_user(email, **kwargs)
return user
def get_next_url(request, default_url):
if 'next' in request.args:
next_url = request.args['next']
return next_url
else:
return default_url
|
martinwicke/tensorflow | refs/heads/master | tensorflow/python/kernel_tests/fifo_queue_test.py | 7 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.FIFOQueue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import re
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
class FIFOQueueTest(tf.test.TestCase):
def testConstructor(self):
with tf.Graph().as_default():
q = tf.FIFOQueue(10, tf.float32, name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list { type: DT_FLOAT } } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 10 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testMultiQueueConstructor(self):
with tf.Graph().as_default():
q = tf.FIFOQueue(5, (tf.int32, tf.float32), shared_name="foo", name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
def testConstructorWithShapes(self):
with tf.Graph().as_default():
q = tf.FIFOQueue(5, (tf.int32, tf.float32),
shapes=(tf.TensorShape([1, 1, 2, 3]),
tf.TensorShape([5, 8])), name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testEnqueue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueHalf(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float16)
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32, shapes=(3, 2))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, q.size().eval())
def testEnqueueManyWithShape(self):
with self.test_session():
q = tf.FIFOQueue(10, [tf.int32, tf.int32],
shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertEqual(4, q.size().eval())
def testEnqueueDictWithoutNames(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
with self.assertRaisesRegexp(ValueError, "must have names"):
q.enqueue({"a": 12.0})
with self.assertRaisesRegexp(ValueError, "must have names"):
q.enqueue_many({"a": [12.0, 13.0]})
def testParallelEnqueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue, args=(e,))
for e in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = dequeued_t.eval()
self.assertEqual([elems[i]], vals)
def testDequeueHalf(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float16)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = dequeued_t.eval()
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(3, tf.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
sess.run(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(sess.run(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
for elem, result in zip(elems, results):
self.assertEqual([elem], result)
def testMultiEnqueueAndDequeue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.int32, tf.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
x_val, y_val = sess.run(dequeued_t)
x, y = elems[i]
self.assertEqual([x], x_val)
self.assertEqual([y], y_val)
def testQueueSizeEmpty(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
self.assertEqual([0], q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, size.eval())
dequeued_t.op.run()
self.assertEqual(0, size.eval())
def testEnqueueMany(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
vals = dequeued_t.eval()
self.assertEqual([elems[i % 4]], vals)
def testEmptyEnqueueMany(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
empty_t = tf.constant([], dtype=tf.float32,
shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual([0], size_t.eval())
enqueue_op.run()
self.assertEqual([0], size_t.eval())
def testEmptyDequeueMany(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueUpTo(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueManyWithNoShape(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError("specified shapes"):
q.dequeue_many(0).eval()
def testMultiEnqueueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.float32, tf.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testDequeueMany(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32, ())
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], dequeued_t.eval())
self.assertAllEqual(elems[4:8], dequeued_t.eval())
def testDequeueUpToNoBlocking(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32, ())
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], dequeued_t.eval())
self.assertAllEqual(elems[4:8], dequeued_t.eval())
def testMultiDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.float32, tf.int32),
shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
def testMultiDequeueUpToNoBlocking(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.float32, tf.int32),
shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_up_to(4)
enqueue_op.run()
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual([None], dequeued_t[0].get_shape().as_list())
self.assertEqual([None, 2], dequeued_t[1].get_shape().as_list())
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
def testHighDimension(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.int32, (4, 4, 4, 4))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testEnqueueWrongShape(self):
q = tf.FIFOQueue(10, (tf.int32, tf.int32), ((), (2)))
with self.assertRaises(ValueError):
q.enqueue(([1, 2], [2, 2]))
with self.assertRaises(ValueError):
q.enqueue_many((7, [[1, 2], [3, 4], [5, 6]]))
def testBatchSizeMismatch(self):
q = tf.FIFOQueue(10, (tf.int32, tf.int32, tf.int32), ((), (), ()))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], [1, 2, 3]))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], tf.placeholder(tf.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((tf.placeholder(tf.int32), [1, 2], [1, 2, 3]))
def testEnqueueManyEmptyTypeConversion(self):
q = tf.FIFOQueue(10, (tf.int32, tf.float32), ((), ()))
enq = q.enqueue_many(([], []))
self.assertEqual(tf.int32, enq.inputs[1].dtype)
self.assertEqual(tf.float32, enq.inputs[2].dtype)
def testEnqueueWrongType(self):
q = tf.FIFOQueue(10, (tf.int32, tf.float32), ((), ()))
with self.assertRaises(ValueError):
q.enqueue((tf.placeholder(tf.int32), tf.placeholder(tf.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((tf.placeholder(tf.int32), tf.placeholder(tf.int32)))
def testEnqueueWrongShapeAtRuntime(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.int32, tf.int32), ((2, 2), (3, 3)))
elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32)
elems_bad = tf.placeholder(tf.int32)
enqueue_op = q.enqueue((elems_ok, elems_bad))
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError, r"Expected \[3,3\], got \[3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
def testEnqueueDequeueManyWrongShape(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.int32, tf.int32), ((2, 2), (3, 3)))
elems_ok = np.array([1] * 8).reshape((2, 2, 2)).astype(np.int32)
elems_bad = tf.placeholder(tf.int32)
enqueue_op = q.enqueue_many((elems_ok, elems_bad))
dequeued_t = q.dequeue_many(2)
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
"Shape mismatch in tuple component 1. "
r"Expected \[2,3,3\], got \[2,3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
dequeued_t.eval()
def testParallelEnqueueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(1000, tf.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(1000, tf.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
with self.test_session() as sess:
q = tf.FIFOQueue(1000, tf.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(101)
enqueue_op.run()
close_op.run()
# Dequeue up to 101 items in parallel on 10 threads, from closed queue.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelEnqueueAndDequeue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(50, tf.float32, shapes=())
initial_elements = [10.0] * 49
q.enqueue_many((initial_elements,)).run()
enqueue_op = q.enqueue((20.0,))
dequeued_t = q.dequeue()
def enqueue():
for _ in xrange(100):
sess.run(enqueue_op)
def dequeue():
for _ in xrange(100):
self.assertTrue(sess.run(dequeued_t) in (10.0, 20.0))
enqueue_threads = [self.checkedThread(target=enqueue) for _ in range(10)]
dequeue_threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for enqueue_thread in enqueue_threads:
enqueue_thread.start()
for dequeue_thread in dequeue_threads:
dequeue_thread.start()
for enqueue_thread in enqueue_threads:
enqueue_thread.join()
for dequeue_thread in dequeue_threads:
dequeue_thread.join()
# Dequeue the initial count of elements to clean up.
cleanup_elems = q.dequeue_many(49).eval()
for elem in cleanup_elems:
self.assertTrue(elem in (10.0, 20.0))
def testMixtureOfEnqueueAndEnqueueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.int32, shapes=())
enqueue_placeholder = tf.placeholder(tf.int32, shape=())
enqueue_op = q.enqueue((enqueue_placeholder,))
enqueuemany_placeholder = tf.placeholder(
tf.int32, shape=(None,))
enqueuemany_op = q.enqueue_many((enqueuemany_placeholder,))
dequeued_t = q.dequeue()
close_op = q.close()
def dequeue():
for i in xrange(250):
self.assertEqual(i, sess.run(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
elements_enqueued = 0
while elements_enqueued < 250:
# With equal probability, run Enqueue or enqueue_many.
if random.random() > 0.5:
enqueue_op.run({enqueue_placeholder: elements_enqueued})
elements_enqueued += 1
else:
count = random.randint(0, min(20, 250 - elements_enqueued))
range_to_enqueue = np.arange(elements_enqueued,
elements_enqueued + count,
dtype=np.int32)
enqueuemany_op.run({enqueuemany_placeholder: range_to_enqueue})
elements_enqueued += count
close_op.run()
dequeue_thread.join()
self.assertEqual(0, q.size().eval())
def testMixtureOfDequeueAndDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.int32, shapes=())
enqueue_op = q.enqueue_many((np.arange(250, dtype=np.int32),))
dequeued_t = q.dequeue()
count_placeholder = tf.placeholder(tf.int32, shape=())
dequeuemany_t = q.dequeue_many(count_placeholder)
def enqueue():
sess.run(enqueue_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
elements_dequeued = 0
while elements_dequeued < 250:
# With equal probability, run Dequeue or dequeue_many.
if random.random() > 0.5:
self.assertEqual(elements_dequeued, dequeued_t.eval())
elements_dequeued += 1
else:
count = random.randint(0, min(20, 250 - elements_dequeued))
expected_range = np.arange(elements_dequeued,
elements_dequeued + count,
dtype=np.int32)
self.assertAllEqual(
expected_range, dequeuemany_t.eval({count_placeholder: count}))
elements_dequeued += count
q.close().run()
enqueue_thread.join()
self.assertEqual(0, q.size().eval())
def testBlockingDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = tf.FIFOQueue(100, tf.int32, ())
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = tf.FIFOQueue(total_count, tf.int32, ())
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
dequeued_t.eval()
def testBlockingDequeueFromClosedQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def dequeue():
for elem in elems:
self.assertEqual([elem], sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32)
close_op = q.close()
dequeued_t = q.dequeue()
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyFromClosedQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems, sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyButNotAllFromClosedQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], sess.run(dequeued_t))
self.assertAllEqual(elems[3:], sess.run(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, tf.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue()
def enqueue():
sess.run(enqueue_op)
def dequeue():
self.assertAllEqual(elems[0:3], sess.run(dequeued_t))
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(dequeued_t)
self.assertEqual(elems[3], sess.run(cleanup_dequeue_t))
def close():
sess.run(close_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_thread = self.checkedThread(target=close)
close_thread.start()
enqueue_thread.join()
dequeue_thread.join()
close_thread.join()
def testClosedBlockingDequeueManyRestoresPartialBatch(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, (tf.float32, tf.float32), ((), ()))
elems_a = [1.0, 2.0, 3.0]
elems_b = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems_a, elems_b))
dequeued_a_t, dequeued_b_t = q.dequeue_many(4)
cleanup_dequeue_a_t, cleanup_dequeue_b_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def dequeue():
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run([dequeued_a_t, dequeued_b_t])
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
# Test that the elements in the partially-dequeued batch are
# restored in the correct order.
for elem_a, elem_b in zip(elems_a, elems_b):
val_a, val_b = sess.run([cleanup_dequeue_a_t, cleanup_dequeue_b_t])
self.assertEqual(elem_a, val_a)
self.assertEqual(elem_b, val_b)
self.assertEqual(0, q.size().eval())
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32, ())
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32, ())
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
self.assertEqual([50.0], dequeued_t.eval())
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
time.sleep(0.01)
self.assertEqual([50.0], dequeued_t.eval())
self.assertEqual([60.0], dequeued_t.eval())
def testBlockingEnqueueBeforeClose(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed once the dequeue op runs.
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, dequeued_t.eval())
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 40.0, 50.0]:
self.assertEqual(elem, dequeued_t.eval())
self.assertEqual(0, q.size().eval())
def testBlockingEnqueueManyBeforeClose(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, tf.float32)
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, dequeued_t.eval())
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 50.0, 60.0]:
self.assertEqual(elem, dequeued_t.eval())
def testDoesNotLoseValue(self):
with self.test_session():
q = tf.FIFOQueue(1, tf.float32)
enqueue_op = q.enqueue((10.0,))
size_t = q.size()
enqueue_op.run()
for _ in range(500):
self.assertEqual(size_t.eval(), [1])
def testSharedQueueSameSession(self):
with self.test_session():
q1 = tf.FIFOQueue(
1, tf.float32, shared_name="shared_queue")
q1.enqueue((10.0,)).run()
q2 = tf.FIFOQueue(
1, tf.float32, shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q2.dequeue().eval(), [10.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q1.dequeue().eval(), [20.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
def testIncompatibleSharedQueueErrors(self):
with self.test_session():
q_a_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_a")
q_a_2 = tf.FIFOQueue(15, tf.float32, shared_name="q_a")
q_a_1.queue_ref.eval()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.eval()
q_b_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_b")
q_b_2 = tf.FIFOQueue(10, tf.int32, shared_name="q_b")
q_b_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_b_2.queue_ref.eval()
q_c_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_c")
q_c_2 = tf.FIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
q_c_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_c_2.queue_ref.eval()
q_d_1 = tf.FIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_2 = tf.FIFOQueue(10, tf.float32, shared_name="q_d")
q_d_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.eval()
q_e_1 = tf.FIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = tf.FIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
q_e_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.eval()
q_f_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_f")
q_f_2 = tf.FIFOQueue(
10, (tf.float32, tf.int32), shared_name="q_f")
q_f_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_f_2.queue_ref.eval()
def testSelectQueue(self):
with self.test_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(tf.FIFOQueue(10, tf.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = tf.FIFOQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.test_session():
q1 = tf.FIFOQueue(10, tf.float32)
q2 = tf.FIFOQueue(15, tf.float32)
enq_q = tf.FIFOQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("Index must be in the range"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_many_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.test_session() as sess:
q_empty = tf.FIFOQueue(5, tf.float32, ())
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
q_full = tf.FIFOQueue(5, tf.float32)
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(self._blockingDequeueMany, args=(sess,
dequeue_many_op)),
self.checkedThread(self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(self._blockingEnqueueMany, args=(sess,
enqueue_many_op))]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testBigEnqueueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(5, tf.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
sess.run(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertAllEqual(elem, results)
def testBigDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(2, tf.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(sess.run(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
sess.run(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertAllEqual(elem, results)
def testDtypes(self):
with self.test_session() as sess:
dtypes = [tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8,
tf.int64, tf.bool, tf.complex64, tf.complex128]
shape = (32, 4, 128)
q = tf.FIFOQueue(32, dtypes, [shape[1:]] * len(dtypes))
input_tuple = []
for dtype in dtypes:
np_dtype = dtype.as_numpy_dtype
np_array = np.random.randint(-10, 10, shape)
if dtype == tf.bool:
np_array = np_array > 0
elif dtype in (tf.complex64, tf.complex128):
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
input_tuple.append(np_array)
q.enqueue_many(input_tuple).run()
output_tuple_t = q.dequeue_many(32)
output_tuple = sess.run(output_tuple_t)
for (input_elem, output_elem) in zip(input_tuple, output_tuple):
self.assertAllEqual(input_elem, output_elem)
def testDeviceColocation(self):
with tf.device("/job:ps"):
q = tf.FIFOQueue(32, [tf.int32], name="q")
with tf.device("/job:worker/task:7"):
dequeued_t = q.dequeue()
self.assertDeviceEqual("/job:ps", dequeued_t.device)
self.assertEqual([b"loc:@q"], dequeued_t.op.colocation_groups())
class FIFOQueueDictTest(tf.test.TestCase):
def testConstructor(self):
with tf.Graph().as_default():
q = tf.FIFOQueue(5, (tf.int32, tf.float32), names=("i", "j"),
shared_name="foo", name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
self.assertEqual(["i", "j"], q.names)
def testConstructorWithShapes(self):
with tf.Graph().as_default():
q = tf.FIFOQueue(5, (tf.int32, tf.float32), names=("i", "f"),
shapes=(tf.TensorShape([1, 1, 2, 3]),
tf.TensorShape([5, 8])), name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
self.assertEqual(["i", "f"], q.names)
def testEnqueueDequeueOneComponent(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32, shapes=((),), names="f")
# Verify that enqueue() checks that when using names we must enqueue a
# dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue(10.0)
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue((10.0,))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"x": 12})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"f": 10.0, "s": "aa"})
enqueue_op = q.enqueue({"f": 10.0})
enqueue_op2 = q.enqueue({"f": 20.0})
enqueue_op3 = q.enqueue({"f": 30.0})
# Verify that enqueue_many() checks that when using names we must enqueue
# a dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op4 = q.enqueue_many([40.0, 50.0])
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"x": 12})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0], "s": ["aa", "bb"]})
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0]})
dequeue = q.dequeue()
dequeue_2 = q.dequeue_many(2)
sess.run(enqueue_op)
sess.run(enqueue_op2)
sess.run(enqueue_op3)
sess.run(enqueue_op4)
f = sess.run(dequeue["f"])
self.assertEqual(10.0, f)
f = sess.run(dequeue_2["f"])
self.assertEqual([20.0, 30.0], list(f))
f = sess.run(dequeue_2["f"])
self.assertEqual([40.0, 50.0], list(f))
def testEnqueueDequeueMultipleComponent(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.float32, tf.int32, tf.string),
shapes=((), (), ()), names=("f", "i", "s"))
# Verify that enqueue() checks that when using names we must enqueue a
# dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue((10.0, 123, "aa"))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"x": 10.0})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"i": 12, "s": "aa"})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"i": 123, "s": "aa", "f": 10.0, "x": 10.0})
enqueue_op = q.enqueue({"i": 123, "s": "aa", "f": 10.0})
enqueue_op2 = q.enqueue({"i": 124, "s": "bb", "f": 20.0})
enqueue_op3 = q.enqueue({"i": 125, "s": "cc", "f": 30.0})
# Verify that enqueue_many() checks that when using names we must enqueue
# a dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op4 = q.enqueue_many(([40.0, 50.0], [126, 127], ["dd", "ee"]))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"x": [10.0, 20.0]})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"i": [12, 12], "s": ["aa", "bb"]})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0], "i": [126, 127],
"s": ["dd", "ee"], "x": [1, 2]})
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0], "i": [126, 127],
"s": ["dd", "ee"]})
dequeue = q.dequeue()
dequeue_2 = q.dequeue_many(2)
sess.run(enqueue_op)
sess.run(enqueue_op2)
sess.run(enqueue_op3)
sess.run(enqueue_op4)
i, f, s = sess.run([dequeue["i"], dequeue["f"], dequeue["s"]])
self.assertEqual(123, i)
self.assertEqual(10.0, f)
self.assertEqual(tf.compat.as_bytes("aa"), s)
i, f, s = sess.run([dequeue_2["i"], dequeue_2["f"], dequeue_2["s"]])
self.assertEqual([124, 125], list(i))
self.assertTrue([20.0, 30.0], list(f))
self.assertTrue([tf.compat.as_bytes("bb"), tf.compat.as_bytes("cc")],
list(s))
i, f, s = sess.run([dequeue_2["i"], dequeue_2["f"], dequeue_2["s"]])
self.assertEqual([126, 127], list(i))
self.assertTrue([40.0, 50.0], list(f))
self.assertTrue([tf.compat.as_bytes("dd"), tf.compat.as_bytes("ee")],
list(s))
class FIFOQueueWithTimeoutTest(tf.test.TestCase):
def testDequeueWithTimeout(self):
with self.test_session(
config=tf.ConfigProto(operation_timeout_in_ms=20)) as sess:
q = tf.FIFOQueue(10, tf.float32)
self.assertEqual(tf.compat.as_bytes(""),
q.queue_ref.op.get_attr("container"))
dequeued_t = q.dequeue()
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaisesRegexp(tf.errors.DeadlineExceededError,
"Timed out waiting for notification"):
sess.run(dequeued_t)
def testReusableAfterTimeout(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32)
dequeued_t = q.dequeue()
enqueue_op = q.enqueue(37)
with self.assertRaisesRegexp(tf.errors.DeadlineExceededError,
"Timed out waiting for notification"):
sess.run(dequeued_t, options=tf.RunOptions(timeout_in_ms=10))
with self.assertRaisesRegexp(tf.errors.DeadlineExceededError,
"Timed out waiting for notification"):
sess.run(dequeued_t, options=tf.RunOptions(timeout_in_ms=10))
sess.run(enqueue_op)
self.assertEqual(37, sess.run(dequeued_t))
class QueueContainerTest(tf.test.TestCase):
def testContainer(self):
with tf.Graph().as_default():
with tf.container("test"):
q = tf.FIFOQueue(10, tf.float32)
self.assertEqual(tf.compat.as_bytes("test"),
q.queue_ref.op.get_attr("container"))
class FIFOQueueBenchmark(tf.test.Benchmark):
"""Benchmark FIFOQueue operations."""
def _build_graph(self):
"""Builds a graph that enqueues and dequeues a single float.
Returns:
A tuple with the graph init tensor and graph output tensor.
"""
q = tf.FIFOQueue(1, "float")
init = q.enqueue(1.0)
x = q.dequeue()
q_inc = q.enqueue(x + 1)
return init, q_inc
# TODO(suharshs): Add benchmarks for:
# - different capacities of the queue
# - various sizes of tensors
# - enqueue_many, dequeue_many
def _run(self, num_iters):
"""Benchmarks enqueueing and dequeueing from a FIFOQueue.
Args:
num_iters: The number of iterations to run.
Returns:
The duration of the run in seconds.
"""
graph = tf.Graph()
with graph.as_default():
init, output = self._build_graph()
with tf.Session(graph=graph) as session:
init.run()
_ = session.run(output) # warm up.
start_time = time.time()
for _ in range(num_iters):
_ = session.run(output)
duration = time.time() - start_time
print("%f secs per enqueue-dequeue" % (duration / num_iters))
self.report_benchmark(
name="fifo_queue", iters=num_iters, wall_time=duration / num_iters)
return duration
if __name__ == "__main__":
tf.test.main()
|
charbeljc/account-financial-tools | refs/heads/8.0 | account_fiscal_position_vat_check/__openerp__.py | 6 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Account Fiscal Position VAT Check module for Odoo
# Copyright (C) 2013-2014 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Account Fiscal Position VAT Check',
'version': '0.1',
'category': 'Accounting & Finance',
'license': 'AGPL-3',
'summary': 'Check VAT on invoice validation',
'description': """
Check that the Customer has a VAT number on invoice validation
==============================================================
This module adds an option **Customer must have VAT** on fiscal positions.
When a user tries to validate a customer invoice or refund
with a fiscal position that have this option, OpenERP will check that
the customer has a VAT number.
If it doesn't, OpenERP will block the validation of the invoice
and display an error message.
In the European Union (EU), when an EU company sends an invoice to
another EU company in another country, it can invoice without VAT
(most of the time) but the VAT number of the customer must be displayed
on the invoice.
This module also displays a warning when a user sets
a fiscal position with the option **Customer must have VAT** on a customer
and this customer doesn't have a VAT number in OpenERP yet.
Please contact Alexis de Lattre from Akretion <alexis.delattre@akretion.com>
for any help or question about this module.
""",
'author': 'Akretion',
'website': 'http://www.akretion.com',
'depends': ['account'],
'data': [
'account_fiscal_position_view.xml',
'partner_view.xml',
],
'installable': True,
}
|
agry/NGECore2 | refs/heads/master | scripts/object/tangible/quest/corellia/corellia_38_meatlump_armor.py | 85615 | import sys
def setup(core, object):
return |
anhstudios/swganh | refs/heads/develop | data/scripts/templates/object/tangible/ship/attachment/engine/shared_decimator_engine_s01.py | 2 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/attachment/engine/shared_decimator_engine_s01.iff"
result.attribute_template_id = 8
result.stfName("item_n","ship_attachment")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.