code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
"""HTTP endpoints for the Teams API."""
import logging
from django.shortcuts import get_object_or_404, render_to_response
from django.http import Http404
from django.conf import settings
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.views import APIView
from rest_framework.authentication import SessionAuthentication
from rest_framework_oauth.authentication import OAuth2Authentication
from rest_framework import status
from rest_framework import permissions
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.auth.models import User
from django_countries import countries
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop
from openedx.core.lib.api.parsers import MergePatchParser
from openedx.core.lib.api.permissions import IsStaffOrReadOnly
from openedx.core.lib.api.view_utils import (
RetrievePatchAPIView,
add_serializer_errors,
build_api_error,
ExpandableFieldViewMixin
)
from openedx.core.lib.api.paginators import paginate_search_results, DefaultPagination
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from courseware.courses import get_course_with_access, has_access
from student.models import CourseEnrollment, CourseAccessRole
from student.roles import CourseStaffRole
from django_comment_client.utils import has_discussion_privileges
from util.model_utils import truncate_fields
from . import is_feature_enabled
from lms.djangoapps.teams.models import CourseTeam, CourseTeamMembership
from .serializers import (
CourseTeamSerializer,
CourseTeamCreationSerializer,
TopicSerializer,
BulkTeamCountTopicSerializer,
MembershipSerializer,
add_team_count
)
from .search_indexes import CourseTeamIndexer
from .errors import AlreadyOnTeamInCourse, ElasticSearchConnectionError, NotEnrolledInCourseForTeam
from .utils import emit_team_event
TEAM_MEMBERSHIPS_PER_PAGE = 2
TOPICS_PER_PAGE = 12
MAXIMUM_SEARCH_SIZE = 100000
log = logging.getLogger(__name__)
@receiver(post_save, sender=CourseTeam)
def team_post_save_callback(sender, instance, **kwargs): # pylint: disable=unused-argument
""" Emits signal after the team is saved. """
changed_fields = instance.field_tracker.changed()
# Don't emit events when we are first creating the team.
if not kwargs['created']:
for field in changed_fields:
if field not in instance.FIELD_BLACKLIST:
truncated_fields = truncate_fields(unicode(changed_fields[field]), unicode(getattr(instance, field)))
truncated_fields['team_id'] = instance.team_id
truncated_fields['field'] = field
emit_team_event(
'edx.team.changed',
instance.course_id,
truncated_fields
)
class TopicsPagination(DefaultPagination):
"""Paginate topics. """
page_size = TOPICS_PER_PAGE
class MyTeamsPagination(DefaultPagination):
"""Paginate the user's teams. """
page_size = TEAM_MEMBERSHIPS_PER_PAGE
class TeamsDashboardView(GenericAPIView):
"""
View methods related to the teams dashboard.
"""
def get(self, request, course_id):
"""
Renders the teams dashboard, which is shown on the "Teams" tab.
Raises a 404 if the course specified by course_id does not exist, the
user is not registered for the course, or the teams feature is not enabled.
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, "load", course_key)
if not is_feature_enabled(course):
raise Http404
if not CourseEnrollment.is_enrolled(request.user, course.id) and \
not has_access(request.user, 'staff', course, course.id):
raise Http404
user = request.user
# Even though sorting is done outside of the serializer, sort_order needs to be passed
# to the serializer so that the paginated results indicate how they were sorted.
sort_order = 'name'
topics = get_alphabetical_topics(course)
# Paginate and serialize topic data
# BulkTeamCountPaginatedTopicSerializer will add team counts to the topics in a single
# bulk operation per page.
topics_data = self._serialize_and_paginate(
TopicsPagination,
topics,
request,
BulkTeamCountTopicSerializer,
{'course_id': course.id},
)
topics_data["sort_order"] = sort_order
user = request.user
user_teams = CourseTeam.objects.filter(membership__user=user, course_id=course.id)
user_teams_data = self._serialize_and_paginate(
MyTeamsPagination,
user_teams,
request,
CourseTeamSerializer,
{'expand': ('user',)}
)
context = {
"course": course,
"topics": topics_data,
# It is necessary to pass both privileged and staff because only privileged users can
# administer discussion threads, but both privileged and staff users are allowed to create
# multiple teams (since they are not automatically added to teams upon creation).
"user_info": {
"username": user.username,
"privileged": has_discussion_privileges(user, course_key),
"staff": bool(has_access(user, 'staff', course_key)),
"teams": user_teams_data
},
"topic_url": reverse(
'topics_detail', kwargs={'topic_id': 'topic_id', 'course_id': str(course_id)}, request=request
),
"topics_url": reverse('topics_list', request=request),
"teams_url": reverse('teams_list', request=request),
"teams_detail_url": reverse('teams_detail', args=['team_id']),
"team_memberships_url": reverse('team_membership_list', request=request),
"my_teams_url": reverse('teams_list', request=request),
"team_membership_detail_url": reverse('team_membership_detail', args=['team_id', user.username]),
"languages": [[lang[0], _(lang[1])] for lang in settings.ALL_LANGUAGES], # pylint: disable=translation-of-non-string
"countries": list(countries),
"disable_courseware_js": True,
"teams_base_url": reverse('teams_dashboard', request=request, kwargs={'course_id': course_id}),
}
return render_to_response("teams/teams.html", context)
def _serialize_and_paginate(self, pagination_cls, queryset, request, serializer_cls, serializer_ctx):
"""
Serialize and paginate objects in a queryset.
Arguments:
pagination_cls (pagination.Paginator class): Django Rest Framework Paginator subclass.
queryset (QuerySet): Django queryset to serialize/paginate.
serializer_cls (serializers.Serializer class): Django Rest Framework Serializer subclass.
serializer_ctx (dict): Context dictionary to pass to the serializer
Returns: dict
"""
# Django Rest Framework v3 requires that we pass the request
# into the serializer's context if the serialize contains
# hyperlink fields.
serializer_ctx["request"] = request
# Instantiate the paginator and use it to paginate the queryset
paginator = pagination_cls()
page = paginator.paginate_queryset(queryset, request)
# Serialize the page
serializer = serializer_cls(page, context=serializer_ctx, many=True)
# Use the paginator to construct the response data
# This will use the pagination subclass for the view to add additional
# fields to the response.
# For example, if the input data is a list, the output data would
# be a dictionary with keys "count", "next", "previous", and "results"
# (where "results" is set to the value of the original list)
return paginator.get_paginated_response(serializer.data).data
def has_team_api_access(user, course_key, access_username=None):
"""Returns True if the user has access to the Team API for the course
given by `course_key`. The user must either be enrolled in the course,
be course staff, be global staff, or have discussion privileges.
Args:
user (User): The user to check access for.
course_key (CourseKey): The key to the course which we are checking access to.
access_username (string): If provided, access_username must match user.username for non staff access.
Returns:
bool: True if the user has access, False otherwise.
"""
if user.is_staff:
return True
if CourseStaffRole(course_key).has_user(user):
return True
if has_discussion_privileges(user, course_key):
return True
if not access_username or access_username == user.username:
return CourseEnrollment.is_enrolled(user, course_key)
return False
class TeamsListView(ExpandableFieldViewMixin, GenericAPIView):
"""
**Use Cases**
Get or create a course team.
**Example Requests**:
GET /api/team/v0/teams
POST /api/team/v0/teams
**Query Parameters for GET**
* course_id: Filters the result to teams belonging to the given
course. Required.
* topic_id: Filters the result to teams associated with the given
topic.
* text_search: Searches for full word matches on the name, description,
country, and language fields. NOTES: Search is on full names for countries
and languages, not the ISO codes. Text_search cannot be requested along with
with order_by.
* order_by: Cannot be called along with with text_search. Must be one of the following:
* name: Orders results by case insensitive team name (default).
* open_slots: Orders results by most open slots (for tie-breaking,
last_activity_at is used, with most recent first).
* last_activity_at: Orders result by team activity, with most active first
(for tie-breaking, open_slots is used, with most open slots first).
* username: Return teams whose membership contains the given user.
* page_size: Number of results to return per page.
* page: Page number to retrieve.
* expand: Comma separated list of types for which to return
expanded representations. Supports "user" and "team".
**Response Values for GET**
If the user is logged in and enrolled, the response contains:
* count: The total number of teams matching the request.
* next: The URL to the next page of results, or null if this is the
last page.
* previous: The URL to the previous page of results, or null if this
is the first page.
* num_pages: The total number of pages in the result.
* results: A list of the teams matching the request.
* id: The team's unique identifier.
* discussion_topic_id: The unique id of the comments service
discussion topic associated with this team.
* name: The name of the team.
* course_id: The identifier for the course this team belongs to.
* topic_id: Optionally specifies which topic the team is associated
with.
* date_created: Date and time when the team was created.
* description: A description of the team.
* country: Optionally specifies which country the team is
associated with.
* language: Optionally specifies which language the team is
associated with.
* last_activity_at: The date of the last activity of any team member
within the team.
* membership: A list of the users that are members of the team.
See membership endpoint for more detail.
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If the user is not logged in, a 401 error is returned.
If the user is not enrolled in the course specified by course_id or
is not course or global staff, a 403 error is returned.
If the specified course_id is not valid or the user attempts to
use an unsupported query parameter, a 400 error is returned.
If the response does not exist, a 404 error is returned. For
example, the course_id may not reference a real course or the page
number may be beyond the last page.
If the server is unable to connect to Elasticsearch, and
the text_search parameter is supplied, a 503 error is returned.
**Response Values for POST**
Any logged in user who has verified their email address can create
a team. The format mirrors that of a GET for an individual team,
but does not include the id, date_created, or membership fields.
id is automatically computed based on name.
If the user is not logged in, a 401 error is returned.
If the user is not enrolled in the course, is not course or
global staff, or does not have discussion privileges a 403 error
is returned.
If the course_id is not valid or extra fields are included in the
request, a 400 error is returned.
If the specified course does not exist, a 404 error is returned.
"""
# OAuth2Authentication must come first to return a 401 for unauthenticated users
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
serializer_class = CourseTeamSerializer
def get(self, request):
"""GET /api/team/v0/teams/"""
result_filter = {}
if 'course_id' in request.query_params:
course_id_string = request.query_params['course_id']
try:
course_key = CourseKey.from_string(course_id_string)
# Ensure the course exists
course_module = modulestore().get_course(course_key)
if course_module is None:
return Response(status=status.HTTP_404_NOT_FOUND)
result_filter.update({'course_id': course_key})
except InvalidKeyError:
error = build_api_error(
ugettext_noop("The supplied course id {course_id} is not valid."),
course_id=course_id_string,
)
return Response(error, status=status.HTTP_400_BAD_REQUEST)
if not has_team_api_access(request.user, course_key):
return Response(status=status.HTTP_403_FORBIDDEN)
else:
return Response(
build_api_error(ugettext_noop("course_id must be provided")),
status=status.HTTP_400_BAD_REQUEST
)
text_search = request.query_params.get('text_search', None)
if text_search and request.query_params.get('order_by', None):
return Response(
build_api_error(ugettext_noop("text_search and order_by cannot be provided together")),
status=status.HTTP_400_BAD_REQUEST
)
username = request.query_params.get('username', None)
if username is not None:
result_filter.update({'membership__user__username': username})
topic_id = request.query_params.get('topic_id', None)
if topic_id is not None:
if topic_id not in [topic['id'] for topic in course_module.teams_configuration['topics']]:
error = build_api_error(
ugettext_noop('The supplied topic id {topic_id} is not valid'),
topic_id=topic_id
)
return Response(error, status=status.HTTP_400_BAD_REQUEST)
result_filter.update({'topic_id': topic_id})
if text_search and CourseTeamIndexer.search_is_enabled():
try:
search_engine = CourseTeamIndexer.engine()
except ElasticSearchConnectionError:
return Response(
build_api_error(ugettext_noop('Error connecting to elasticsearch')),
status=status.HTTP_503_SERVICE_UNAVAILABLE
)
result_filter.update({'course_id': course_id_string})
search_results = search_engine.search(
query_string=text_search,
field_dictionary=result_filter,
size=MAXIMUM_SEARCH_SIZE,
)
paginated_results = paginate_search_results(
CourseTeam,
search_results,
self.paginator.get_page_size(request),
self.get_page()
)
emit_team_event('edx.team.searched', course_key, {
"number_of_results": search_results['total'],
"search_text": text_search,
"topic_id": topic_id,
})
page = self.paginate_queryset(paginated_results)
serializer = self.get_serializer(page, many=True)
order_by_input = None
else:
queryset = CourseTeam.objects.filter(**result_filter)
order_by_input = request.query_params.get('order_by', 'name')
if order_by_input == 'name':
# MySQL does case-insensitive order_by.
queryset = queryset.order_by('name')
elif order_by_input == 'open_slots':
queryset = queryset.order_by('team_size', '-last_activity_at')
elif order_by_input == 'last_activity_at':
queryset = queryset.order_by('-last_activity_at', 'team_size')
else:
return Response({
'developer_message': "unsupported order_by value {ordering}".format(ordering=order_by_input),
# Translators: 'ordering' is a string describing a way
# of ordering a list. For example, {ordering} may be
# 'name', indicating that the user wants to sort the
# list by lower case name.
'user_message': _(u"The ordering {ordering} is not supported").format(ordering=order_by_input),
}, status=status.HTTP_400_BAD_REQUEST)
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, many=True)
response = self.get_paginated_response(serializer.data)
if order_by_input is not None:
response.data['sort_order'] = order_by_input
return response
def post(self, request):
"""POST /api/team/v0/teams/"""
field_errors = {}
course_key = None
course_id = request.data.get('course_id')
try:
course_key = CourseKey.from_string(course_id)
# Ensure the course exists
if not modulestore().has_course(course_key):
return Response(status=status.HTTP_404_NOT_FOUND)
except InvalidKeyError:
field_errors['course_id'] = build_api_error(
ugettext_noop('The supplied course_id {course_id} is not valid.'),
course_id=course_id
)
return Response({
'field_errors': field_errors,
}, status=status.HTTP_400_BAD_REQUEST)
# Course and global staff, as well as discussion "privileged" users, will not automatically
# be added to a team when they create it. They are allowed to create multiple teams.
team_administrator = (has_access(request.user, 'staff', course_key)
or has_discussion_privileges(request.user, course_key))
if not team_administrator and CourseTeamMembership.user_in_team_for_course(request.user, course_key):
error_message = build_api_error(
ugettext_noop('You are already in a team in this course.'),
course_id=course_id
)
return Response(error_message, status=status.HTTP_400_BAD_REQUEST)
if course_key and not has_team_api_access(request.user, course_key):
return Response(status=status.HTTP_403_FORBIDDEN)
data = request.data.copy()
data['course_id'] = course_key
serializer = CourseTeamCreationSerializer(data=data)
add_serializer_errors(serializer, data, field_errors)
if field_errors:
return Response({
'field_errors': field_errors,
}, status=status.HTTP_400_BAD_REQUEST)
else:
team = serializer.save()
emit_team_event('edx.team.created', course_key, {
'team_id': team.team_id
})
if not team_administrator:
# Add the creating user to the team.
team.add_user(request.user)
emit_team_event(
'edx.team.learner_added',
course_key,
{
'team_id': team.team_id,
'user_id': request.user.id,
'add_method': 'added_on_create'
}
)
data = CourseTeamSerializer(team, context={"request": request}).data
return Response(data)
def get_page(self):
""" Returns page number specified in args, params, or defaults to 1. """
# This code is taken from within the GenericAPIView#paginate_queryset method.
# We need need access to the page outside of that method for our paginate_search_results method
page_kwarg = self.kwargs.get(self.paginator.page_query_param)
page_query_param = self.request.query_params.get(self.paginator.page_query_param)
return page_kwarg or page_query_param or 1
class IsEnrolledOrIsStaff(permissions.BasePermission):
"""Permission that checks to see if the user is enrolled in the course or is staff."""
def has_object_permission(self, request, view, obj):
"""Returns true if the user is enrolled or is staff."""
return has_team_api_access(request.user, obj.course_id)
class IsStaffOrPrivilegedOrReadOnly(IsStaffOrReadOnly):
"""
Permission that checks to see if the user is global staff, course
staff, or has discussion privileges. If none of those conditions are
met, only read access will be granted.
"""
def has_object_permission(self, request, view, obj):
return (
has_discussion_privileges(request.user, obj.course_id) or
super(IsStaffOrPrivilegedOrReadOnly, self).has_object_permission(request, view, obj)
)
class TeamsDetailView(ExpandableFieldViewMixin, RetrievePatchAPIView):
"""
**Use Cases**
Get, update, or delete a course team's information. Updates are supported
only through merge patch.
**Example Requests**:
GET /api/team/v0/teams/{team_id}}
PATCH /api/team/v0/teams/{team_id} "application/merge-patch+json"
DELETE /api/team/v0/teams/{team_id}
**Query Parameters for GET**
* expand: Comma separated list of types for which to return
expanded representations. Supports "user" and "team".
**Response Values for GET**
If the user is logged in, the response contains the following fields:
* id: The team's unique identifier.
* discussion_topic_id: The unique id of the comments service
discussion topic associated with this team.
* name: The name of the team.
* course_id: The identifier for the course this team belongs to.
* topic_id: Optionally specifies which topic the team is
associated with.
* date_created: Date and time when the team was created.
* description: A description of the team.
* country: Optionally specifies which country the team is
associated with.
* language: Optionally specifies which language the team is
associated with.
* membership: A list of the users that are members of the team. See
membership endpoint for more detail.
* last_activity_at: The date of the last activity of any team member
within the team.
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If the user is not logged in, a 401 error is returned.
If the user is not course or global staff, a 403 error is returned.
If the specified team does not exist, a 404 error is returned.
**Response Values for PATCH**
Only staff can patch teams.
If the user is anonymous or inactive, a 401 is returned.
If the user is logged in and the team does not exist, a 404 is returned.
If the user is not course or global staff, does not have discussion
privileges, and the team does exist, a 403 is returned.
If "application/merge-patch+json" is not the specified content type,
a 415 error is returned.
If the update could not be completed due to validation errors, this
method returns a 400 error with all error messages in the
"field_errors" field of the returned JSON.
**Response Values for DELETE**
Only staff can delete teams. When a team is deleted, all
team memberships associated with that team are also
deleted. Returns 204 on successful deletion.
If the user is anonymous or inactive, a 401 is returned.
If the user is not course or global staff and does not
have discussion privileges, a 403 is returned.
If the user is logged in and the team does not exist, a 404 is returned.
"""
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated, IsStaffOrPrivilegedOrReadOnly, IsEnrolledOrIsStaff,)
lookup_field = 'team_id'
serializer_class = CourseTeamSerializer
parser_classes = (MergePatchParser,)
def get_queryset(self):
"""Returns the queryset used to access the given team."""
return CourseTeam.objects.all()
def delete(self, request, team_id):
"""DELETE /api/team/v0/teams/{team_id}"""
team = get_object_or_404(CourseTeam, team_id=team_id)
self.check_object_permissions(request, team)
# Note: list() forces the queryset to be evualuated before delete()
memberships = list(CourseTeamMembership.get_memberships(team_id=team_id))
# Note: also deletes all team memberships associated with this team
team.delete()
log.info('user %d deleted team %s', request.user.id, team_id)
emit_team_event('edx.team.deleted', team.course_id, {
'team_id': team_id,
})
for member in memberships:
emit_team_event('edx.team.learner_removed', team.course_id, {
'team_id': team_id,
'remove_method': 'team_deleted',
'user_id': member.user_id
})
return Response(status=status.HTTP_204_NO_CONTENT)
class TopicListView(GenericAPIView):
"""
**Use Cases**
Retrieve a list of topics associated with a single course.
**Example Requests**
GET /api/team/v0/topics/?course_id={course_id}
**Query Parameters for GET**
* course_id: Filters the result to topics belonging to the given
course (required).
* order_by: Orders the results. Currently only 'name' and 'team_count' are supported;
the default value is 'name'. If 'team_count' is specified, topics are returned first sorted
by number of teams per topic (descending), with a secondary sort of 'name'.
* page_size: Number of results to return per page.
* page: Page number to retrieve.
**Response Values for GET**
If the user is not logged in, a 401 error is returned.
If the course_id is not given or an unsupported value is passed for
order_by, returns a 400 error.
If the user is not logged in, is not enrolled in the course, or is
not course or global staff, returns a 403 error.
If the course does not exist, returns a 404 error.
Otherwise, a 200 response is returned containing the following
fields:
* count: The total number of topics matching the request.
* next: The URL to the next page of results, or null if this is the
last page.
* previous: The URL to the previous page of results, or null if this
is the first page.
* num_pages: The total number of pages in the result.
* results: A list of the topics matching the request.
* id: The topic's unique identifier.
* name: The name of the topic.
* description: A description of the topic.
"""
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
pagination_class = TopicsPagination
def get(self, request):
"""GET /api/team/v0/topics/?course_id={course_id}"""
course_id_string = request.query_params.get('course_id', None)
if course_id_string is None:
return Response({
'field_errors': {
'course_id': build_api_error(
ugettext_noop("The supplied course id {course_id} is not valid."),
course_id=course_id_string
)
}
}, status=status.HTTP_400_BAD_REQUEST)
try:
course_id = CourseKey.from_string(course_id_string)
except InvalidKeyError:
return Response(status=status.HTTP_404_NOT_FOUND)
# Ensure the course exists
course_module = modulestore().get_course(course_id)
if course_module is None: # course is None if not found
return Response(status=status.HTTP_404_NOT_FOUND)
if not has_team_api_access(request.user, course_id):
return Response(status=status.HTTP_403_FORBIDDEN)
ordering = request.query_params.get('order_by', 'name')
if ordering not in ['name', 'team_count']:
return Response({
'developer_message': "unsupported order_by value {ordering}".format(ordering=ordering),
# Translators: 'ordering' is a string describing a way
# of ordering a list. For example, {ordering} may be
# 'name', indicating that the user wants to sort the
# list by lower case name.
'user_message': _(u"The ordering {ordering} is not supported").format(ordering=ordering),
}, status=status.HTTP_400_BAD_REQUEST)
# Always sort alphabetically, as it will be used as secondary sort
# in the case of "team_count".
topics = get_alphabetical_topics(course_module)
if ordering == 'team_count':
add_team_count(topics, course_id)
topics.sort(key=lambda t: t['team_count'], reverse=True)
page = self.paginate_queryset(topics)
serializer = TopicSerializer(
page,
context={'course_id': course_id},
many=True,
)
else:
page = self.paginate_queryset(topics)
# Use the serializer that adds team_count in a bulk operation per page.
serializer = BulkTeamCountTopicSerializer(page, context={'course_id': course_id}, many=True)
response = self.get_paginated_response(serializer.data)
response.data['sort_order'] = ordering
return response
def get_alphabetical_topics(course_module):
"""Return a list of team topics sorted alphabetically.
Arguments:
course_module (xmodule): the course which owns the team topics
Returns:
list: a list of sorted team topics
"""
return sorted(course_module.teams_topics, key=lambda t: t['name'].lower())
class TopicDetailView(APIView):
"""
**Use Cases**
Retrieve a single topic from a course.
**Example Requests**
GET /api/team/v0/topics/{topic_id},{course_id}
**Query Parameters for GET**
* topic_id: The ID of the topic to retrieve (required).
* course_id: The ID of the course to retrieve the topic from
(required).
**Response Values for GET**
If the user is not logged in, a 401 error is returned.
If the topic_id course_id are not given or an unsupported value is
passed for order_by, returns a 400 error.
If the user is not enrolled in the course, or is not course or
global staff, returns a 403 error.
If the course does not exist, returns a 404 error.
Otherwise, a 200 response is returned containing the following fields:
* id: The topic's unique identifier.
* name: The name of the topic.
* description: A description of the topic.
"""
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, topic_id, course_id):
"""GET /api/team/v0/topics/{topic_id},{course_id}/"""
try:
course_id = CourseKey.from_string(course_id)
except InvalidKeyError:
return Response(status=status.HTTP_404_NOT_FOUND)
# Ensure the course exists
course_module = modulestore().get_course(course_id)
if course_module is None:
return Response(status=status.HTTP_404_NOT_FOUND)
if not has_team_api_access(request.user, course_id):
return Response(status=status.HTTP_403_FORBIDDEN)
topics = [t for t in course_module.teams_topics if t['id'] == topic_id]
if len(topics) == 0:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = TopicSerializer(topics[0], context={'course_id': course_id})
return Response(serializer.data)
class MembershipListView(ExpandableFieldViewMixin, GenericAPIView):
"""
**Use Cases**
List course team memberships or add a user to a course team.
**Example Requests**:
GET /api/team/v0/team_membership
POST /api/team/v0/team_membership
**Query Parameters for GET**
At least one of username and team_id must be provided.
* username: Returns membership records only for the specified user.
If the requesting user is not staff then only memberships for
teams associated with courses in which the requesting user is
enrolled are returned.
* team_id: Returns only membership records associated with the
specified team. The requesting user must be staff or enrolled in
the course associated with the team.
* course_id: Returns membership records only for the specified
course. Username must have access to this course, or else team_id
must be in this course.
* page_size: Number of results to return per page.
* page: Page number to retrieve.
* expand: Comma separated list of types for which to return
expanded representations. Supports "user" and "team".
**Response Values for GET**
If the user is logged in and enrolled, the response contains:
* count: The total number of memberships matching the request.
* next: The URL to the next page of results, or null if this is the
last page.
* previous: The URL to the previous page of results, or null if this
is the first page.
* num_pages: The total number of pages in the result.
* results: A list of the memberships matching the request.
* user: The user associated with the membership. This field may
contain an expanded or collapsed representation.
* team: The team associated with the membership. This field may
contain an expanded or collapsed representation.
* date_joined: The date and time the membership was created.
* last_activity_at: The date of the last activity of the user
within the team.
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If the user is not logged in and active, a 401 error is returned.
If neither team_id nor username are provided, a 400 error is
returned.
If team_id is provided but the team does not exist, a 404 error is
returned.
If the specified course_id is invalid, a 404 error is returned.
This endpoint uses 404 error codes to avoid leaking information
about team or user existence. Specifically, a 404 error will be
returned if a logged in user specifies a team_id for a course
they are not enrolled in.
Additionally, when username is specified the list of returned
memberships will be filtered to memberships in teams associated
with courses that the requesting user is enrolled in.
If the course specified by course_id does not contain the team
specified by team_id, a 400 error is returned.
If the user is not enrolled in the course specified by course_id,
and does not have staff access to the course, a 400 error is
returned.
**Response Values for POST**
Any logged in user enrolled in a course can enroll themselves in a
team in the course. Course staff, global staff, and discussion
privileged users can enroll any user in a team, with a few
exceptions noted below.
If the user is not logged in and active, a 401 error is returned.
If username and team are not provided in the posted JSON, a 400
error is returned describing the missing fields.
If the specified team does not exist, a 404 error is returned.
If the user is not staff, does not have discussion privileges,
and is not enrolled in the course associated with the team they
are trying to join, or if they are trying to add a user other
than themselves to a team, a 404 error is returned. This is to
prevent leaking information about the existence of teams and users.
If the specified user does not exist, a 404 error is returned.
If the user is already a member of a team in the course associated
with the team they are trying to join, a 400 error is returned.
This applies to both staff and students.
If the user is not enrolled in the course associated with the team
they are trying to join, a 400 error is returned. This can occur
when a staff or discussion privileged user posts a request adding
another user to a team.
"""
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
serializer_class = MembershipSerializer
def get(self, request):
"""GET /api/team/v0/team_membership"""
specified_username_or_team = False
username = None
team_id = None
requested_course_id = None
requested_course_key = None
accessible_course_ids = None
if 'course_id' in request.query_params:
requested_course_id = request.query_params['course_id']
try:
requested_course_key = CourseKey.from_string(requested_course_id)
except InvalidKeyError:
return Response(status=status.HTTP_404_NOT_FOUND)
if 'team_id' in request.query_params:
specified_username_or_team = True
team_id = request.query_params['team_id']
try:
team = CourseTeam.objects.get(team_id=team_id)
except CourseTeam.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if requested_course_key is not None and requested_course_key != team.course_id:
return Response(status=status.HTTP_400_BAD_REQUEST)
if not has_team_api_access(request.user, team.course_id):
return Response(status=status.HTTP_404_NOT_FOUND)
if 'username' in request.query_params:
specified_username_or_team = True
username = request.query_params['username']
if not request.user.is_staff:
enrolled_courses = (
CourseEnrollment.enrollments_for_user(request.user).values_list('course_id', flat=True)
)
staff_courses = (
CourseAccessRole.objects.filter(user=request.user, role='staff').values_list('course_id', flat=True)
)
accessible_course_ids = [item for sublist in (enrolled_courses, staff_courses) for item in sublist]
if requested_course_id is not None and requested_course_id not in accessible_course_ids:
return Response(status=status.HTTP_400_BAD_REQUEST)
if not specified_username_or_team:
return Response(
build_api_error(ugettext_noop("username or team_id must be specified.")),
status=status.HTTP_400_BAD_REQUEST
)
course_keys = None
if requested_course_key is not None:
course_keys = [requested_course_key]
elif accessible_course_ids is not None:
course_keys = [CourseKey.from_string(course_string) for course_string in accessible_course_ids]
queryset = CourseTeamMembership.get_memberships(username, course_keys, team_id)
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
def post(self, request):
"""POST /api/team/v0/team_membership"""
field_errors = {}
if 'username' not in request.data:
field_errors['username'] = build_api_error(ugettext_noop("Username is required."))
if 'team_id' not in request.data:
field_errors['team_id'] = build_api_error(ugettext_noop("Team id is required."))
if field_errors:
return Response({
'field_errors': field_errors,
}, status=status.HTTP_400_BAD_REQUEST)
try:
team = CourseTeam.objects.get(team_id=request.data['team_id'])
except CourseTeam.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
username = request.data['username']
if not has_team_api_access(request.user, team.course_id, access_username=username):
return Response(status=status.HTTP_404_NOT_FOUND)
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
course_module = modulestore().get_course(team.course_id)
if course_module.teams_max_size is not None and team.users.count() >= course_module.teams_max_size:
return Response(
build_api_error(ugettext_noop("This team is already full.")),
status=status.HTTP_400_BAD_REQUEST
)
try:
membership = team.add_user(user)
emit_team_event(
'edx.team.learner_added',
team.course_id,
{
'team_id': team.team_id,
'user_id': user.id,
'add_method': 'joined_from_team_view' if user == request.user else 'added_by_another_user'
}
)
except AlreadyOnTeamInCourse:
return Response(
build_api_error(
ugettext_noop("The user {username} is already a member of a team in this course."),
username=username
),
status=status.HTTP_400_BAD_REQUEST
)
except NotEnrolledInCourseForTeam:
return Response(
build_api_error(
ugettext_noop("The user {username} is not enrolled in the course associated with this team."),
username=username
),
status=status.HTTP_400_BAD_REQUEST
)
serializer = self.get_serializer(instance=membership)
return Response(serializer.data)
class MembershipDetailView(ExpandableFieldViewMixin, GenericAPIView):
"""
**Use Cases**
Gets individual course team memberships or removes a user from a course team.
**Example Requests**:
GET /api/team/v0/team_membership/{team_id},{username}
DELETE /api/team/v0/team_membership/{team_id},{username}
**Query Parameters for GET**
* expand: Comma separated list of types for which to return
expanded representations. Supports "user" and "team".
**Response Values for GET**
If the user is logged in and enrolled, or is course or global staff
the response contains:
* user: The user associated with the membership. This field may
contain an expanded or collapsed representation.
* team: The team associated with the membership. This field may
contain an expanded or collapsed representation.
* date_joined: The date and time the membership was created.
* last_activity_at: The date of the last activity of any team member
within the team.
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If the user is not logged in and active, a 401 error is returned.
If specified team does not exist, a 404 error is returned.
If the user is logged in but is not enrolled in the course
associated with the specified team, or is not staff, a 404 error is
returned. This avoids leaking information about course or team
existence.
If the membership does not exist, a 404 error is returned.
**Response Values for DELETE**
Any logged in user enrolled in a course can remove themselves from
a team in the course. Course staff, global staff, and discussion
privileged users can remove any user from a team. Successfully
deleting a membership will return a 204 response with no content.
If the user is not logged in and active, a 401 error is returned.
If the specified team or username does not exist, a 404 error is
returned.
If the user is not staff or a discussion privileged user and is
attempting to remove another user from a team, a 404 error is
returned. This prevents leaking information about team and user
existence.
If the membership does not exist, a 404 error is returned.
"""
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
serializer_class = MembershipSerializer
def get_team(self, team_id):
"""Returns the team with team_id, or throws Http404 if it does not exist."""
try:
return CourseTeam.objects.get(team_id=team_id)
except CourseTeam.DoesNotExist:
raise Http404
def get_membership(self, username, team):
"""Returns the membership for the given user and team, or throws Http404 if it does not exist."""
try:
return CourseTeamMembership.objects.get(user__username=username, team=team)
except CourseTeamMembership.DoesNotExist:
raise Http404
def get(self, request, team_id, username):
"""GET /api/team/v0/team_membership/{team_id},{username}"""
team = self.get_team(team_id)
if not has_team_api_access(request.user, team.course_id):
return Response(status=status.HTTP_404_NOT_FOUND)
membership = self.get_membership(username, team)
serializer = self.get_serializer(instance=membership)
return Response(serializer.data)
def delete(self, request, team_id, username):
"""DELETE /api/team/v0/team_membership/{team_id},{username}"""
team = self.get_team(team_id)
if has_team_api_access(request.user, team.course_id, access_username=username):
membership = self.get_membership(username, team)
removal_method = 'self_removal'
if 'admin' in request.query_params:
removal_method = 'removed_by_admin'
membership.delete()
emit_team_event(
'edx.team.learner_removed',
team.course_id,
{
'team_id': team.team_id,
'user_id': membership.user.id,
'remove_method': removal_method
}
)
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response(status=status.HTTP_404_NOT_FOUND)
| louyihua/edx-platform | lms/djangoapps/teams/views.py | Python | agpl-3.0 | 51,355 |
"""IIIF image and presentation logic."""
import logging
from itertools import chain
from typing import Dict, Iterable, List, Mapping, Optional, Tuple
from urllib.parse import urlencode
import shortuuid
from flask_sqlalchemy import Pagination
from iiif_prezi.factory import Manifest, ManifestFactory
from .mets import MetsDocument, PhysicalItem, TocEntry
#: Localized labels for metadata values
METAMAP = {
'title': {'en': 'Title', 'de': 'Titel'},
'language': {'en': 'Language', 'de': 'Sprache'},
'genre': {'en': 'Genre', 'de': 'Genre'},
'creator': {'en': 'Creator', 'de': 'Urheber'},
'other_persons': {'en': 'Other Persons', 'de': 'Andere Personen'},
'publisher': {'en': 'Publisher', 'de': 'Veröffentlicht von'},
'pub_place': {'en': 'Publication Place', 'de': 'Publikationsort'},
'pub_date': {'en': 'Publication Date', 'de': 'Erscheinungsdatum'}
}
#: Mapping from license shorthands to their full URIs
LICENSE_MAP = {
'pdm': 'http://creativecommons.org/licenses/publicdomain/',
'cc0': 'https://creativecommons.org/publicdomain/zero/1.0/',
'cc-by': 'http://creativecommons.org/licenses/by/4.0',
'cc-by-sa': 'http://creativecommons.org/licenses/by-sa/4.0',
'cc-by-nd': 'http://creativecommons.org/licenses/by-nd/4.0',
'cc-by-nc': 'http://creativecommons.org/licenses/by-nd/4.0',
'cc-by-nc-sa': 'http://creativecommons.org/licenses/by-nc-sa/4.0',
'cc-by-nc-nd': 'http://creativecommons.org/licenses/by-nc-nd/4.0'}
logger = logging.getLogger(__name__)
def make_label(mets_meta: dict) -> str:
"""Generate a descripte label for the given metadata set.
Will take the form '{creator}: {label} ({pub_place}, {pub_date})'.
:param mets_meta: Metadata to generate label from
:returns: Generated label
"""
label = mets_meta['title'][0]
if mets_meta.get('creator'):
label = "{creator}: {label}".format(
creator="/".join(mets_meta['creator']),
label=label)
if mets_meta.get('pub_place') and mets_meta.get('pub_date'):
label = "{label} ({pub_place}, {pub_date})".format(
label=label, pub_place=mets_meta['pub_place'],
pub_date=mets_meta['pub_date'])
elif mets_meta.get('pub_date'):
label = "{label} ({pub_date})".format(
label=label, pub_date=mets_meta['pub_date'])
elif mets_meta.get('pub_place'):
label = "{label} ({pub_place})".format(
label=label, pub_place=mets_meta['pub_place'])
return label
def make_metadata(mets_meta: dict) -> List[dict]:
"""Generate metadata according to the IIIF Presentation API specification.
:param mets_meta: Metadata as extracted from the METS/MODS data
:returns: The IIIF metadata set
"""
metadata = [{'label': METAMAP[k], 'value': v}
for k, v in mets_meta.items()
if k in METAMAP and v]
metadata.extend({'label': label, 'value': value}
for label, value in mets_meta.items()
if 'Identifier' in label and value)
return metadata
def _get_canvases(toc_entry: TocEntry, manifest: Manifest) -> List[str]:
"""Obtain list of canvas identifiers for a given TOC entry.
:param toc_entry: TOC entry to get canvases for
:param manifest: Manifest with canvases
:returns: All canvas ids for the given TOC entry
"""
canvases = []
for phys_id in toc_entry.physical_ids:
canvas = next((c for c in manifest.sequences[0].canvases
if c.id.endswith(f'{phys_id}.json')), None)
if canvas is None:
logger.warning(f'Could not find a matching canvas for {phys_id}')
continue
canvases.append(canvas)
if toc_entry.children:
canvases.extend(chain.from_iterable(
_get_canvases(child, manifest)
for child in toc_entry.children))
return canvases
def _add_toc_ranges(manifest: Manifest, toc_entries: Iterable[TocEntry]):
"""Add IIIF ranges to manifest for all given TOC entries.
:param manifest: The IIIF manifest to add the ranges to
:param toc_entries: TOC entries to add ranges for
"""
for entry in toc_entries:
if not entry.label or not entry.physical_ids:
continue
range = manifest.range(ident=entry.logical_id, label=entry.label)
for canvas in _get_canvases(entry, manifest):
range.add_canvas(canvas)
for child in entry.children:
range.range(ident=child.logical_id, label=child.label)
_add_toc_ranges(manifest, entry.children)
def _make_empty_manifest(ident: str, label: str, base_url: str) -> Manifest:
"""Generate an empty IIIF manifest.
:param ident: Identifier for the manifest, that is not a URL, but
the `<ident>` in `https://..../<ident>/manifest`
:param label: Label for the manifest
:param base_url: Root URL for the application, e.g. https://example.com
:returns: The empty manifest
"""
manifest_factory = ManifestFactory()
manifest_ident = f'{base_url}/iiif/{ident}/manifest'
manifest_factory.set_base_prezi_uri(f'{base_url}/iiif/{ident}')
manifest_factory.set_base_image_uri(f'{base_url}/iiif/image')
manifest_factory.set_iiif_image_info('2.0', 0)
manifest = manifest_factory.manifest(ident=manifest_ident, label=label)
return manifest
def _fill_manifest_metadata(manifest: Manifest, mets_metadata: dict) -> None:
"""Fill in metadata for an IIIF manifest.
:param manifest: Manifest to add metadata to
:param mets_metadata: Metadata extracted from a METS/MODS document
"""
for meta in make_metadata(mets_metadata):
manifest.set_metadata(meta)
manifest.description = mets_metadata.get('description', '')
manifest.seeAlso = mets_metadata.get('see_also', '')
manifest.related = mets_metadata.get('related', '')
manifest.attribution = mets_metadata.get('attribution', '')
manifest.logo = mets_metadata.get('logo', '')
manifest.license = LICENSE_MAP.get(mets_metadata.get('license', ''), '')
def make_image_info(itm: PhysicalItem, base_url: str) -> dict:
"""Create info.json data structures for all physical items."""
sizes = [(f.width, f.height) for f in itm.files
if f.width is not None and f.height is not None]
max_width, max_height = max(sizes)
return {
'@context': 'http://iiif.io/api/image/2/context.json',
'@id': f'{base_url}/iiif/image/{itm.image_ident}',
'protocol': 'http://iiif.io/api/image',
'profile': ['http://iiif.io/api/image/2/level0.json'],
'width': max_width,
'height': max_height,
'sizes': [{'width': w, 'height': h} for w, h in sorted(sizes)]}
def make_manifest(ident: str, mets_doc: MetsDocument,
base_url: str) -> dict:
"""Generate a IIIF manifest from the data extracted from METS document.
:param ident: Identifier of the document
:param mets_doc: METS document to generate manifest from
:param base_url: Root URL for the application,
:returns: Generated IIIF manifest
"""
manifest = _make_empty_manifest(ident=ident, base_url=base_url,
label=make_label(mets_doc.metadata))
_fill_manifest_metadata(manifest, mets_doc.metadata)
seq = manifest.sequence(ident='default')
for page_id, page in mets_doc.physical_items.items():
canvas = seq.canvas(ident=page_id, label=page.label or '?')
anno = canvas.annotation(ident=page_id)
img = anno.image(page.image_ident, iiif=True)
canvas.width, canvas.height = page.max_dimensions
img.set_hw(canvas.height, canvas.width)
thumb_w, thumb_h = page.min_dimensions
canvas.thumbnail = (
f'{base_url}/iiif/image/{page.image_ident}'
f'/full/{thumb_w},{thumb_h}/0/default.jpg')
_add_toc_ranges(manifest, mets_doc.toc_entries)
return manifest.toJSON(top=True)
def make_manifest_collection(
pagination: Pagination, label: str, collection_id: str,
per_page: int, base_url: str, page_num: Optional[int] = None,
coll_counts: Optional[Tuple[int, str, int]] = None) -> dict:
"""Generate a IIIF collection.
:param pagination: Pagination query for all manifests of the
collection
:param label: Label for the collection
:param collection_id: Identifier of the collection
:param base_url: Root URL for the application,
e.g. https://example.com
:param page_num: Number of the collection page to display
:returns: The generated IIIF collection
"""
collection_url = f'{base_url}/iiif/collection/{collection_id}'
if page_num is not None:
page_id = 'p{}'.format(page_num)
else:
page_id = 'top'
collection = {
"@context": "http://iiif.io/api/presentation/2/context.json",
"@id": f'{base_url}/iiif/collection/{collection_id}/{page_id}',
"@type": "sc:Collection",
"total": pagination.total,
"label": label,
}
if page_id == 'top':
collection.update({
"first": f'{collection_url}/p1',
"last": f'{collection_url}/p{pagination.pages}'
})
else:
if collection_id != 'index':
collection['within'] = f'{collection_url}/top'
collection.update({
'startIndex': (pagination.page - 1) * pagination.per_page,
'manifests': [{
'@id': f'{base_url}/iiif/{m.id}/manifest',
'@type': 'sc:Manifest',
'label': m.label,
'attribution': m.manifest['attribution'],
'logo': m.manifest['logo'],
'thumbnail': m.manifest.get(
'thumbnail',
m.manifest['sequences'][0]['canvases'][0]['thumbnail'])
} for m in pagination.items]
})
if page_num == 1:
collection['collections'] = []
for cid, label, num_manifs in coll_counts:
if not num_manifs:
continue
# We create a mock pagination object that does not have
# an underlying query, since we're only going to need
# the manifest count when generating the top-level collection
manifests_pagination = Pagination(
None, 1, per_page, num_manifs, None)
iiif_coll = make_manifest_collection(
manifests_pagination, label, cid, None)
collection['collections'].append(iiif_coll)
if 'collections' in collection and not collection['collections']:
del collection['collections']
if pagination.has_next:
collection['next'] = f'{collection_url}/p{pagination.next_num}'
if pagination.has_prev:
collection['prev'] = f'{collection_url}/p{pagination.prev_num}'
return collection
def make_annotation_list(pagination: Pagination, request_url: str,
request_args: dict, base_url: str) -> dict:
"""Create a IIIF annotation list.
:param pagination: Pagination of annotations
:param request_url: Request URL for the annotation list, will be its
IIIF identifier
:param request_args: Request arguments for the annotation list request
:param base_url: Root URL for the application, e.g. https://example.com
:returns: The IIIF annotation list
"""
def _make_link(page_no: int) -> str:
params = urlencode({'p': page_no, **request_args})
return f'{base_url}/iiif/annotation?{params}'
params_first = urlencode({k: v for k, v in request_args.items()
if k != 'p'})
out = {
'@context': 'http://iiif.io/api/presentation/2/context.json',
'@id': request_url,
'@type': 'sc:AnnotationList',
'within': {
'@type': 'sc:Layer',
'total': pagination.total,
'first': _make_link(1),
'last': _make_link(pagination.pages),
'ignored': [k for k in request_args
if k not in ('q', 'motivation', 'date', 'user', 'p')]
},
'startIndex': (pagination.page - 1) * pagination.per_page,
'resources': [a.annotation for a in pagination.items],
}
if pagination.has_next:
out['next'] = _make_link(pagination.next_num)
if pagination.has_prev:
out['next'] = _make_link(pagination.prev_num)
return out
| jbaiter/demetsiiify | demetsiiify/iiif.py | Python | agpl-3.0 | 12,787 |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2020 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from unittest import mock
import pytest
from snapcraft.internal.meta.snap import Snap
from snapcraft.project import Project
@pytest.fixture
def project(monkeypatch, tmp_work_path, request):
"""Return project variants for core and core18"""
monkeypatch.setattr(Project, "parallel_build_count", 2)
snapcraft_project = Project()
snapcraft_project._snap_meta = Snap(
name="test-snap", base="core18", confinement="strict"
)
return snapcraft_project
@pytest.fixture
def mock_common_run_output():
"""A no-op common.run_output mock."""
patcher = mock.patch("snapcraft.internal.common.run_output")
yield patcher.start()
patcher.stop()
@pytest.fixture
def mock_run():
"""A no-op run mock."""
patcher = mock.patch("snapcraft.plugins.v1.PluginV1.run")
yield patcher.start()
patcher.stop()
@pytest.fixture
def mock_run_output():
"""A no-op run_output mock."""
patcher = mock.patch("snapcraft.plugins.v1.PluginV1.run_output")
yield patcher.start()
patcher.stop()
@pytest.fixture
def mock_tar():
"""A no-op tar source mock."""
patcher = mock.patch("snapcraft.internal.sources.Tar")
yield patcher.start()
patcher.stop()
@pytest.fixture
def mock_zip():
"""A no-op zip source mock."""
patcher = mock.patch("snapcraft.internal.sources.Zip")
yield patcher.start()
patcher.stop()
| snapcore/snapcraft | tests/unit/plugins/v1/conftest.py | Python | gpl-3.0 | 2,053 |
# import argv lib from sys
from sys import argv
# there two argv, first is the program name, second is the filename
# which will be open
script, filename = argv
# use "open" funtion to open the file, and return to the parameter txt
txt = open(filename)
# print a string, tell the user what file be opened
print "Here's your file %r:" % filename
# use the read funtion of txt, and print what read
print txt.read()
# print the string to tell the user can open another file again
print "Type the filename again:"
# print the ">",and take the input to var of file_again
file_again = raw_input(">")
# open the file what be input the filename by user
txt_again = open(file_again)
# print the file contain again
print txt_again.read()
| elvinsys/python | ex/ex15.py | Python | gpl-3.0 | 734 |
from .dispatch import dispatch
from .compatibility import basestring
from blaze.expr.literal import BoundSymbol, data as bz_data
@dispatch(object, (basestring, list, tuple))
def create_index(t, column_name_or_names, name=None):
"""Create an index on a column.
Parameters
----------
o : table-like
index_name : str
The name of the index to create
column_name_or_names : string, list, tuple
A column name to index on, or a list or tuple for a composite index
Examples
--------
>>> # Using SQLite
>>> from blaze import SQL
>>> # create a table called 'tb', in memory
>>> sql = SQL('sqlite:///:memory:', 'tb',
... schema='{id: int64, value: float64, categ: string}')
>>> dta = [(1, 2.0, 'a'), (2, 3.0, 'b'), (3, 4.0, 'c')]
>>> sql.extend(dta)
>>> # create an index on the 'id' column (for SQL we must provide a name)
>>> sql.table.indexes
set()
>>> create_index(sql, 'id', name='id_index')
>>> sql.table.indexes
{Index('id_index', Column('id', BigInteger(), table=<tb>, nullable=False))}
"""
raise NotImplementedError("create_index not implemented for type %r" %
type(t).__name__)
@dispatch(BoundSymbol, (basestring, list, tuple))
def create_index(dta, column_name_or_names, name=None, **kwargs):
return create_index(dta.data, column_name_or_names, name=name, **kwargs)
@dispatch(basestring, (basestring, list, tuple))
def create_index(uri, column_name_or_names, name=None, **kwargs):
dta = bz_data(uri, **kwargs)
create_index(dta, column_name_or_names, name=name)
return dta
| ContinuumIO/blaze | blaze/index.py | Python | bsd-3-clause | 1,644 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Filename: Os.py
# Description: Functions for OS
# Author: Simon L. J. Robin | https://sljrobin.org
# Created: 2016-08-28 23:09:04
# Modified: 2016-08-30 01:18:27
#
################################################################################
import os
import sys
sys.path.insert(0, os.environ['HOME'] + "/.dzen2/lib")
import Colors
################################################################################
class Os(object):
"""Functions for OS.
"""
def get_os(self):
"""Prints the current Operating System.
"""
# Opening file containing information
try:
data_file = open("/proc/sys/kernel/osrelease")
data = data_file.read().split()
os_info = str(data)
data_file.close()
except:
sys.stdout.write("^fg(%s)!E! OSFile^fg()" % Colors.CL_BASE08)
# Looking for Arch / Debian
if "ARCH" in os_info:
sys.stdout.write("^fg(%s)A^fg()" % Colors.CL_BASE0D)
if "Debian" in os_info:
sys.stdout.write("^fg(%s)D^fg()" % Colors.CL_BASE0E)
| sljrobin/dotfiles | dzen2/.dzen2/scripts/Os.py | Python | gpl-2.0 | 1,179 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.io import registry
from .info import serialize_method_as
__all__ = ['TableRead', 'TableWrite']
__doctest_skip__ = ['TableRead', 'TableWrite']
class TableRead(registry.UnifiedReadWrite):
"""Read and parse a data table and return as a Table.
This function provides the Table interface to the astropy unified I/O
layer. This allows easily reading a file in many supported data formats
using syntax such as::
>>> from astropy.table import Table
>>> dat = Table.read('table.dat', format='ascii')
>>> events = Table.read('events.fits', format='fits')
Get help on the available readers for ``Table`` using the``help()`` method::
>>> Table.read.help() # Get help reading Table and list supported formats
>>> Table.read.help('fits') # Get detailed help on Table FITS reader
>>> Table.read.list_formats() # Print list of available formats
See also: https://docs.astropy.org/en/stable/io/unified.html
Parameters
----------
*args : tuple, optional
Positional arguments passed through to data reader. If supplied the
first argument is typically the input filename.
format : str
File format specifier.
units : list, dict, optional
List or dict of units to apply to columns
descriptions : list, dict, optional
List or dict of descriptions to apply to columns
**kwargs : dict, optional
Keyword arguments passed through to data reader.
Returns
-------
out : `~astropy.table.Table`
Table corresponding to file contents
Notes
-----
"""
def __init__(self, instance, cls):
super().__init__(instance, cls, 'read')
def __call__(self, *args, **kwargs):
cls = self._cls
units = kwargs.pop('units', None)
descriptions = kwargs.pop('descriptions', None)
out = registry.read(cls, *args, **kwargs)
# For some readers (e.g., ascii.ecsv), the returned `out` class is not
# guaranteed to be the same as the desired output `cls`. If so,
# try coercing to desired class without copying (io.registry.read
# would normally do a copy). The normal case here is swapping
# Table <=> QTable.
if cls is not out.__class__:
try:
out = cls(out, copy=False)
except Exception:
raise TypeError('could not convert reader output to {} '
'class.'.format(cls.__name__))
out._set_column_attribute('unit', units)
out._set_column_attribute('description', descriptions)
return out
class TableWrite(registry.UnifiedReadWrite):
"""
Write this Table object out in the specified format.
This function provides the Table interface to the astropy unified I/O
layer. This allows easily writing a file in many supported data formats
using syntax such as::
>>> from astropy.table import Table
>>> dat = Table([[1, 2], [3, 4]], names=('a', 'b'))
>>> dat.write('table.dat', format='ascii')
Get help on the available writers for ``Table`` using the``help()`` method::
>>> Table.write.help() # Get help writing Table and list supported formats
>>> Table.write.help('fits') # Get detailed help on Table FITS writer
>>> Table.write.list_formats() # Print list of available formats
The ``serialize_method`` argument is explained in the section on
`Table serialization methods
<https://docs.astropy.org/en/latest/io/unified.html#table-serialization-methods>`_.
See also: https://docs.astropy.org/en/stable/io/unified.html
Parameters
----------
*args : tuple, optional
Positional arguments passed through to data writer. If supplied the
first argument is the output filename.
format : str
File format specifier.
serialize_method : str, dict, optional
Serialization method specifier for columns.
**kwargs : dict, optional
Keyword arguments passed through to data writer.
Notes
-----
"""
def __init__(self, instance, cls):
super().__init__(instance, cls, 'write')
def __call__(self, *args, serialize_method=None, **kwargs):
instance = self._instance
with serialize_method_as(instance, serialize_method):
registry.write(instance, *args, **kwargs)
| aleksandr-bakanov/astropy | astropy/table/connect.py | Python | bsd-3-clause | 4,461 |
# -*- coding: utf-8 -*-
from sqlalchemy import Column, Integer, String
from settings import DATABASE_NAMES
class EntesMixin(object):
__table_args__ = {'schema': DATABASE_NAMES.get('entes')}
class ProfileMixin(object):
__table_args__ = {'schema': DATABASE_NAMES.get('perfis')}
class AdminMixin(object):
__table_args__ = {'schema': DATABASE_NAMES.get('admin')}
class UserMixin(object):
__table_args__ = {'schema': DATABASE_NAMES.get('usuarios')}
class ClassificacaoArtisticaMixin(object):
id = Column(Integer, primary_key=True)
descricao = Column(String(50))
| hackultura/siscult-migration | models/mixins.py | Python | gpl-2.0 | 593 |
#!/usr/bin/python
#import pdb
def first():
second()
return "hey i am first"
def second():
third()
return "hey i am second"
def third():
fourth()
return "hey i am third"
def fourth():
fifth()
return "hey i am fourth"
def fifth():
return "hey i am fifth"
# MAIN
#pdb.set_trace()
first()
| tuxfux-hlp-notes/python-batches | archieves/batch-61/debugging/third.py | Python | gpl-3.0 | 309 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014-2015 Université Catholique de Louvain.
#
# This file is part of INGInious.
#
# INGInious is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INGInious is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with INGInious. If not, see <http://www.gnu.org/licenses/>.
import os
import unittest
import webtest
import app_frontend
import common.base
import frontend
import frontend.session
if not os.path.basename(os.getcwd()) == 'doc':
app = app_frontend.get_app(os.path.dirname(os.path.realpath(__file__)) + "/configuration.json")
appt = webtest.TestApp(common.base.INGIniousConfiguration.get('tests', {}).get('host_url', app.wsgifunc()))
| GuillaumeDerval/INGInious | tests/__init__.py | Python | agpl-3.0 | 1,152 |
"""
Base class for any serializable list of things...
Copyright 2006-2009, Red Hat, Inc and Others
Michael DeHaan <michael.dehaan AT gmail>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import exceptions
from cexceptions import *
import utils
import glob
import time
import random
import os
from threading import Lock
import action_litesync
import item_system
import item_profile
import item_distro
import item_repo
import item_image
import item_mgmtclass
import item_package
import item_file
from utils import _
class Collection:
def __init__(self,config):
"""
Constructor.
"""
self.config = config
self.clear()
self.api = self.config.api
self.lite_sync = None
self.lock = Lock()
def factory_produce(self,config,seed_data):
"""
Must override in subclass. Factory_produce returns an Item object
from datastructure seed_data
"""
raise exceptions.NotImplementedError
def clear(self):
"""
Forget about objects in the collection.
"""
self.listing = {}
def get(self, name):
"""
Return object with name in the collection
"""
return self.listing.get(name.lower(), None)
def find(self, name=None, return_list=False, no_errors=False, **kargs):
"""
Return first object in the collection that maches all item='value'
pairs passed, else return None if no objects can be found.
When return_list is set, can also return a list. Empty list
would be returned instead of None in that case.
"""
matches = []
# support the old style innovation without kwargs
if name is not None:
kargs["name"] = name
kargs = self.__rekey(kargs)
# no arguments is an error, so we don't return a false match
if len(kargs) == 0:
raise CX(_("calling find with no arguments"))
# performance: if the only key is name we can skip the whole loop
if len(kargs) == 1 and kargs.has_key("name") and not return_list:
return self.listing.get(kargs["name"].lower(), None)
self.lock.acquire()
for (name, obj) in self.listing.iteritems():
if obj.find_match(kargs, no_errors=no_errors):
matches.append(obj)
self.lock.release()
if not return_list:
if len(matches) == 0:
return None
return matches[0]
else:
return matches
SEARCH_REKEY = {
'kopts' : 'kernel_options',
'kopts_post' : 'kernel_options_post',
'ksmeta' : 'ks_meta',
'inherit' : 'parent',
'ip' : 'ip_address',
'mac' : 'mac_address',
'virt-auto-boot' : 'virt_auto_boot',
'virt-file-size' : 'virt_file_size',
'virt-disk-driver': 'virt_disk_driver',
'virt-ram' : 'virt_ram',
'virt-path' : 'virt_path',
'virt-type' : 'virt_type',
'virt-bridge' : 'virt_bridge',
'virt-cpus' : 'virt_cpus',
'virt-host' : 'virt_host',
'virt-group' : 'virt_group',
'dhcp-tag' : 'dhcp_tag',
'netboot-enabled' : 'netboot_enabled',
'ldap-enabled' : 'ldap_enabled',
'monit-enabled' : 'monit_enabled'
}
def __rekey(self,hash):
"""
Find calls from the command line ("cobbler system find")
don't always match with the keys from the datastructs and this
makes them both line up without breaking compatibility with either.
Thankfully we don't have a LOT to remap.
"""
newhash = {}
for x in hash.keys():
if self.SEARCH_REKEY.has_key(x):
newkey = self.SEARCH_REKEY[x]
newhash[newkey] = hash[x]
else:
newhash[x] = hash[x]
return newhash
def to_datastruct(self):
"""
Serialize the collection
"""
self.lock.acquire()
datastruct = [x.to_datastruct() for x in self.listing.values()]
self.lock.release()
return datastruct
def from_datastruct(self,datastruct):
if datastruct is None:
return
for seed_data in datastruct:
item = self.factory_produce(self.config,seed_data)
self.add(item)
def copy(self,ref,newname,logger=None):
ref = ref.make_clone()
ref.uid = self.config.generate_uid()
ref.ctime = 0
ref.set_name(newname)
if ref.COLLECTION_TYPE == "system":
# this should only happen for systems
for iname in ref.interfaces.keys():
# clear all these out to avoid DHCP/DNS conflicts
ref.set_dns_name("",iname)
ref.set_mac_address("",iname)
ref.set_ip_address("",iname)
return self.add(ref,save=True,with_copy=True,with_triggers=True,with_sync=True,check_for_duplicate_names=True,check_for_duplicate_netinfo=False)
def rename(self,ref,newname,with_sync=True,with_triggers=True,logger=None):
"""
Allows an object "ref" to be given a newname without affecting the rest
of the object tree.
"""
# Nothing to do when it is the same name
if newname == ref.name:
return True
# make a copy of the object, but give it a new name.
oldname = ref.name
newref = ref.make_clone()
newref.set_name(newname)
self.add(newref, with_triggers=with_triggers,save=True)
# for mgmt classes, update all objects that use it
if ref.COLLECTION_TYPE == "mgmtclass":
for what in ["distro","profile","system"]:
items = self.api.find_items(what,{"mgmt_classes":oldname})
for item in items:
for i in range(0,len(item.mgmt_classes)):
if item.mgmt_classes[i] == oldname:
item.mgmt_classes[i] = newname
self.api.add_item(what,item,save=True)
# for a repo, rename the mirror directory
if ref.COLLECTION_TYPE == "repo":
path = "/var/www/cobbler/repo_mirror/%s" % ref.name
if os.path.exists(path):
newpath = "/var/www/cobbler/repo_mirror/%s" % newref.name
os.renames(path, newpath)
# for a distro, rename the mirror and references to it
if ref.COLLECTION_TYPE == 'distro':
path = utils.find_distro_path(self.api.settings(), ref)
# create a symlink for the new distro name
utils.link_distro(self.api.settings(), newref)
# test to see if the distro path is based directly
# on the name of the distro. If it is, things need
# to updated accordingly
if os.path.exists(path) and path == "/var/www/cobbler/ks_mirror/%s" % ref.name:
newpath = "/var/www/cobbler/ks_mirror/%s" % newref.name
os.renames(path, newpath)
# update any reference to this path ...
distros = self.api.distros()
for d in distros:
if d.kernel.find(path) == 0:
d.set_kernel(d.kernel.replace(path, newpath))
d.set_initrd(d.initrd.replace(path, newpath))
self.config.serialize_item(self, d)
# now descend to any direct ancestors and point them at the new object allowing
# the original object to be removed without orphanage. Direct ancestors
# will either be profiles or systems. Note that we do have to care as
# set_parent is only really meaningful for subprofiles. We ideally want a more
# generic set_parent.
kids = ref.get_children()
for k in kids:
if k.COLLECTION_TYPE == "distro":
raise CX(_("internal error, not expected to have distro child objects"))
elif k.COLLECTION_TYPE == "profile":
if k.parent != "":
k.set_parent(newname)
else:
k.set_distro(newname)
self.api.profiles().add(k, save=True, with_sync=with_sync, with_triggers=with_triggers)
elif k.COLLECTION_TYPE == "system":
k.set_profile(newname)
self.api.systems().add(k, save=True, with_sync=with_sync, with_triggers=with_triggers)
elif k.COLLECTION_TYPE == "repo":
raise CX(_("internal error, not expected to have repo child objects"))
else:
raise CX(_("internal error, unknown child type (%s), cannot finish rename" % k.COLLECTION_TYPE))
# now delete the old version
self.remove(oldname, with_delete=True, with_triggers=with_triggers)
return True
def add(self,ref,save=False,with_copy=False,with_triggers=True,with_sync=True,quick_pxe_update=False,check_for_duplicate_names=False,check_for_duplicate_netinfo=False,logger=None):
"""
Add an object to the collection, if it's valid. Returns True
if the object was added to the collection. Returns False if the
object specified by ref deems itself invalid (and therefore
won't be added to the collection).
with_copy is a bit of a misnomer, but lots of internal add operations
can run with "with_copy" as False. True means a real final commit, as if
entered from the command line (or basically, by a user).
With with_copy as False, the particular add call might just be being run
during deserialization, in which case extra semantics around the add don't really apply.
So, in that case, don't run any triggers and don't deal with any actual files.
"""
if ref is None or ref.name is None:
return False
try:
ref.check_if_valid()
except CX, error:
return False
if ref.uid == '':
ref.uid = self.config.generate_uid()
if save is True:
now = time.time()
if ref.ctime == 0:
ref.ctime = now
ref.mtime = now
if self.lite_sync is None:
self.lite_sync = action_litesync.BootLiteSync(self.config, logger=logger)
# migration path for old API parameter that I've renamed.
if with_copy and not save:
save = with_copy
if not save:
# for people that aren't quite aware of the API
# if not saving the object, you can't run these features
with_triggers = False
with_sync = False
# Avoid adding objects to the collection
# if an object of the same/ip/mac already exists.
self.__duplication_checks(ref,check_for_duplicate_names,check_for_duplicate_netinfo)
if ref.COLLECTION_TYPE != self.collection_type():
raise CX(_("API error: storing wrong data type in collection"))
if not save:
# don't need to run triggers, so add it already ...
self.listing[ref.name.lower()] = ref
# perform filesystem operations
if save:
# failure of a pre trigger will prevent the object from being added
if with_triggers:
utils.run_triggers(self.api, ref,"/var/lib/cobbler/triggers/add/%s/pre/*" % self.collection_type(), [], logger)
self.listing[ref.name.lower()] = ref
# save just this item if possible, if not, save
# the whole collection
self.config.serialize_item(self, ref)
if with_sync:
if isinstance(ref, item_system.System):
# we don't need openvz containers to be network bootable
if ref.virt_type == "openvz":
ref.netboot_enabled = False
self.lite_sync.add_single_system(ref.name)
elif isinstance(ref, item_profile.Profile):
# we don't need openvz containers to be network bootable
if ref.virt_type == "openvz":
ref.enable_menu = 0
self.lite_sync.add_single_profile(ref.name)
elif isinstance(ref, item_distro.Distro):
self.lite_sync.add_single_distro(ref.name)
elif isinstance(ref, item_image.Image):
self.lite_sync.add_single_image(ref.name)
elif isinstance(ref, item_repo.Repo):
pass
elif isinstance(ref, item_mgmtclass.Mgmtclass):
pass
elif isinstance(ref, item_package.Package):
pass
elif isinstance(ref, item_file.File):
pass
else:
print _("Internal error. Object type not recognized: %s") % type(ref)
if not with_sync and quick_pxe_update:
if isinstance(ref, item_system.System):
self.lite_sync.update_system_netboot_status(ref.name)
# save the tree, so if neccessary, scripts can examine it.
if with_triggers:
utils.run_triggers(self.api, ref, "/var/lib/cobbler/triggers/change/*", [], logger)
utils.run_triggers(self.api, ref,"/var/lib/cobbler/triggers/add/%s/post/*" % self.collection_type(), [], logger)
# update children cache in parent object
parent = ref.get_parent()
if parent != None:
parent.children[ref.name] = ref
return True
def __duplication_checks(self,ref,check_for_duplicate_names,check_for_duplicate_netinfo):
"""
Prevents adding objects with the same name.
Prevents adding or editing to provide the same IP, or MAC.
Enforcement is based on whether the API caller requests it.
"""
# always protect against duplicate names
if check_for_duplicate_names:
match = None
if isinstance(ref, item_system.System):
match = self.api.find_system(ref.name)
elif isinstance(ref, item_profile.Profile):
match = self.api.find_profile(ref.name)
elif isinstance(ref, item_distro.Distro):
match = self.api.find_distro(ref.name)
elif isinstance(ref, item_repo.Repo):
match = self.api.find_repo(ref.name)
elif isinstance(ref, item_image.Image):
match = self.api.find_image(ref.name)
elif isinstance(ref, item_mgmtclass.Mgmtclass):
match = self.api.find_mgmtclass(ref.name)
elif isinstance(ref, item_package.Package):
match = self.api.find_package(ref.name)
elif isinstance(ref, item_file.File):
match = self.api.find_file(ref.name)
else:
raise CX("internal error, unknown object type")
if match:
raise CX(_("An object already exists with that name. Try 'edit'?"))
# the duplicate mac/ip checks can be disabled.
if not check_for_duplicate_netinfo:
return
if isinstance(ref, item_system.System):
for (name, intf) in ref.interfaces.iteritems():
match_ip = []
match_mac = []
match_hosts = []
input_mac = intf["mac_address"]
input_ip = intf["ip_address"]
input_dns = intf["dns_name"]
if not self.api.settings().allow_duplicate_macs and input_mac is not None and input_mac != "":
match_mac = self.api.find_system(mac_address=input_mac,return_list=True)
if not self.api.settings().allow_duplicate_ips and input_ip is not None and input_ip != "":
match_ip = self.api.find_system(ip_address=input_ip,return_list=True)
# it's ok to conflict with your own net info.
if not self.api.settings().allow_duplicate_hostnames and input_dns is not None and input_dns != "":
match_hosts = self.api.find_system(dns_name=input_dns,return_list=True)
for x in match_mac:
if x.name != ref.name:
raise CX(_("Can't save system %s. The MAC address (%s) is already used by system %s (%s)") % (ref.name, intf["mac_address"], x.name, name))
for x in match_ip:
if x.name != ref.name:
raise CX(_("Can't save system %s. The IP address (%s) is already used by system %s (%s)") % (ref.name, intf["ip_address"], x.name, name))
for x in match_hosts:
if x.name != ref.name:
raise CX(_("Can't save system %s. The dns name (%s) is already used by system %s (%s)") % (ref.name, intf["dns_name"], x.name, name))
def printable(self):
"""
Creates a printable representation of the collection suitable
for reading by humans or parsing from scripts. Actually scripts
would be better off reading the YAML in the config files directly.
"""
values = self.listing.values()[:] # copy the values
values.sort() # sort the copy (2.3 fix)
results = []
for i,v in enumerate(values):
results.append(v.printable())
if len(values) > 0:
return "\n\n".join(results)
else:
return _("No objects found")
def __iter__(self):
"""
Iterator for the collection. Allows list comprehensions, etc
"""
for a in self.listing.values():
yield a
def __len__(self):
"""
Returns size of the collection
"""
return len(self.listing.values())
def collection_type(self):
"""
Returns the string key for the name of the collection (for use in messages for humans)
"""
return exceptions.NotImplementedError
| jantman/cobbler | cobbler/collection.py | Python | gpl-2.0 | 18,924 |
from util import box2d
import ContactListener
from time import sleep
class World(box2d.b2World):
gContactListener = None
gCamera = None
gGameClient = None
gMainChar = None
gDestroyQue = []
gDestroyed = False
gActors = []
def __init__(self, gravity, doSleep):
self.gContactListener = ContactListener.ContactListener()
super(World, self).__init__(gravity, doSleep, contactListener=self.gContactListener)
#self.SetContactListener(self.gContactListener)
self.gDestroyQue = []
self.gAddQue = []
self.gBodies = {}
self.gActors = []
def addContactListener(self, listener):
self.gContactListener.addListener(listener)
def removeContactListener(self, listener):
self.gContactListener.removeListener(listener)
def setCamera(self, cam):
self.gCamera = cam
#def getCamera(self):
# return self.gCamera
def getCameraOffsets(self):
if self.gCamera == None or self.gDestroyed: return (0,0)
return self.gCamera.getOffsets()
def addActor(self, actor):
if self.gCamera != None:
self.gActors.append(actor)
self.gCamera.addActor(actor)
def removeActor(self, actor):
if actor in self.gActors and not self.gDestroyed:
self.gActors.remove(actor)
self.gCamera.removeActor(actor)
def setGameClient(self, client):
self.gGameClient = client
def getGameClient(self):
return self.gGameClient
def setMainCharacter(self, mainChar):
self.gMainChar = mainChar
def addNewDynamicBody(self, bodyDef):
self.gAddQue.append(bodyDef)
def addDestroyBody(self, body):
self.gDestroyQue.append(body)
def destroy(self):
self.gDestroyed = True
for actor in self.gActors:
self.gCamera.removeActor(actor)
self.gActors.remove(actor)
del self.gMainChar
self.gContactListener.clearListeners()
#self.SetContactListener(None)
self.contactListener = None
del self.gContactListener
self.gCamera = None
self.gGameClient = None
def waitForBody(self, bodyDef):
while True:
if bodyDef in self.gBodies.keys():
body = self.gBodies[bodyDef]
del self.gBodies[bodyDef]
return body
sleep(.1)
def Step(self):
super(World, self).Step(1.0 / 60, 10, 10)
super(World, self).ClearForces()
super(World, self).Step(1.0 / 60, 10, 10)
super(World, self).ClearForces()
for bodyDef in self.gAddQue:
self.gBodies[bodyDef] = self.CreateDynamicBody(bodyDef)
self.gAddQue.remove(bodyDef)
for body in self.gDestroyQue:
if not self.gDestroyed:
self.gDestroyQue.remove(body)
body.ClearUserData()
self.DestroyBody(body)
| nemothekid/Colosseum--Year-3XXX | World.py | Python | mit | 2,975 |
# bedup - Btrfs deduplication
# Copyright (C) 2012 Gabriel de Perthuis <g2p.code+bedup@gmail.com>
#
# This file is part of bedup.
#
# bedup is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# bedup is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with bedup. If not, see <http://www.gnu.org/licenses/>.
from cffi import FFI
import os
from . import cffi_support
ffi = FFI()
ffi.cdef('''
int openat(int dirfd, const char *pathname, int flags);
''')
lib = cffi_support.verify(ffi, '''
#include <fcntl.h>
''')
def openat(base_fd, path, flags):
fd = lib.openat(base_fd, path, flags)
if fd < 0:
# There's a little bit of magic here:
# IOError.errno is only set if there are exactly two or three
# arguments.
raise IOError(ffi.errno, os.strerror(ffi.errno), (base_fd, path))
return fd
def fopenat(base_fd, path):
"""
Does openat read-only, then does fdopen to get a file object
"""
return os.fdopen(openat(base_fd, path, os.O_RDONLY), 'rb')
def fopenat_rw(base_fd, path):
"""
Does openat read-write, then does fdopen to get a file object
"""
return os.fdopen(openat(base_fd, path, os.O_RDWR), 'rb+')
| adonm/bedup | bedup/platform/openat.py | Python | gpl-2.0 | 1,628 |
# -*- coding: utf-8 -*-
import json
from django.shortcuts import get_object_or_404
from catmaid.models import Textlabel, TextlabelLocation
from .common import CatmaidApiTestCase
class TextlabelsApiTests(CatmaidApiTestCase):
def test_update_textlabel(self):
self.fake_authentication()
textlabel_id = 1
params = {
'tid': textlabel_id,
'pid': self.test_project_id,
'x': 3,
'y': 1,
'z': 4,
'r': 0,
'g': 0,
'b': 0,
'a': 0,
'type': 'text',
'text': 'Lets dance the Grim Fandango!',
'font_name': 'We may have years, we may have hours',
'font_style': 'But sooner or later we all push up flowers',
'font_size': 5555,
'scaling': 0}
response = self.client.post(
'/%d/textlabel/update' % self.test_project_id,
params)
expected_result = ' '
self.assertStatus(response)
self.assertEqual(expected_result, response.content.decode('utf-8'))
label = Textlabel.objects.filter(id=textlabel_id)[0]
label_location = TextlabelLocation.objects.filter(textlabel=textlabel_id)[0]
self.assertEqual(params['pid'], label.project_id)
self.assertEqual(params['x'], label_location.location.x)
self.assertEqual(params['y'], label_location.location.y)
self.assertEqual(params['z'], label_location.location.z)
self.assertEqual(params['type'], label.type)
self.assertEqual(params['text'], label.text)
self.assertEqual(params['font_name'], label.font_name)
self.assertEqual(params['font_style'], label.font_style)
self.assertEqual(params['font_size'], label.font_size)
self.assertEqual(False, label.scaling)
def test_update_textlabel_using_optionals(self):
"""
Omits some parameters and ensures corresponding
properties of label were unchanged.
"""
self.fake_authentication()
textlabel_id = 1
params = {
'tid': textlabel_id,
'text': 'Almost faltering, we held on to each other so that neither of us touched the ground.',
'type': 'bubble'}
label_before_update = Textlabel.objects.filter(id=textlabel_id)[0]
label_location_before_update = TextlabelLocation.objects.filter(textlabel=textlabel_id)[0]
response = self.client.post(
'/%d/textlabel/update' % self.test_project_id,
params)
expected_result = ' '
self.assertStatus(response)
self.assertEqual(expected_result, response.content.decode('utf-8'))
label = Textlabel.objects.filter(id=textlabel_id)[0]
label_location = TextlabelLocation.objects.filter(textlabel=textlabel_id)[0]
self.assertEqual(label_before_update.project_id, label.project_id)
self.assertEqual(label_location_before_update.location.x, label_location.location.x)
self.assertEqual(label_location_before_update.location.y, label_location.location.y)
self.assertEqual(label_location_before_update.location.z, label_location.location.z)
self.assertEqual(params['type'], label.type)
self.assertEqual(params['text'], label.text)
self.assertEqual(label_before_update.font_name, label.font_name)
self.assertEqual(label_before_update.font_style, label.font_style)
self.assertEqual(label_before_update.font_size, label.font_size)
self.assertEqual(label_before_update.scaling, label.scaling)
def test_update_textlabel_failure(self):
self.fake_authentication()
textlabel_id = 404
params = {'tid': textlabel_id, 'pid': self.test_project_id}
response = self.client.post(
'/%d/textlabel/update' % self.test_project_id,
params)
self.assertEqual(response.status_code, 400)
parsed_response = json.loads(response.content.decode('utf-8'))
expected_result = 'Failed to find Textlabel with id %s.' % textlabel_id
self.assertIn('error', parsed_response)
self.assertIn(expected_result, parsed_response['error'])
def test_delete_textlabel(self):
self.fake_authentication()
textlabel_id = 1
self.assertEqual(1, Textlabel.objects.filter(id=textlabel_id).count())
self.assertEqual(1, TextlabelLocation.objects.filter(textlabel=textlabel_id).count())
response = self.client.post(
'/%d/textlabel/delete' % self.test_project_id,
{'tid': textlabel_id})
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
expected_result = {'message': 'Success.'}
self.assertEqual(expected_result, parsed_response)
self.assertEqual(0, Textlabel.objects.filter(id=textlabel_id).count())
self.assertEqual(0, TextlabelLocation.objects.filter(textlabel=textlabel_id).count())
def test_create_textlabel(self):
self.fake_authentication()
label_data = [
# param-name, param values
('text', ['baba tiki dido', 'doop op', '']),
('type', ['text', 'bubble', 'non-valid-type']),
('font_name', [False, False, 'Times New Roman']),
('font_style', [False, 'bold', 'italic']),
('font_size', [55, 4, False]),
('x', [1, 2, 3]),
('y', [1, 100, 233]),
('z', [1, 0, 555]),
('r', [1, 2, 3]),
('g', [3, 4, 5]),
('b', [5, 7, 9]),
('a', [225, 225, 225])]
label_count = Textlabel.objects.all().count()
# Create and test labels
for i in range(len(label_data[0][1])):
params = {}
# Fill request with POST-data
for p, values in label_data:
if values[i]:
params[p] = values[i]
response = self.client.post(
'/%d/textlabel/create' % self.test_project_id,
params)
parsed_response = json.loads(response.content.decode('utf-8'))
self.assertStatus(response)
self.assertEqual(label_count + 1 + i, Textlabel.objects.all().count())
self.assertTrue('tid' in parsed_response.keys())
label = get_object_or_404(Textlabel, id=parsed_response['tid'])
label_location = TextlabelLocation.objects.get(textlabel=label.id)
# For each attribute, ensure new label is in accord with input
# label_location_data = Double3D(x=0, y=0, z=0)
for p, values in label_data:
value = values[i]
if value is False:
continue # Do not check for default values for now
if (p == 'type' and value != 'bubble'):
self.assertEqual('text', getattr(label, p))
elif (p == 'text' and value == ''):
self.assertEqual('Edit this text...', getattr(label, p))
elif (p in ['x', 'y', 'z']):
self.assertEqual(value, getattr(label_location.location, p))
elif (p in ['r', 'g', 'b', 'a']):
# Model does not include textlabel colour at the moment
pass
else:
self.assertEqual(value, getattr(label, p))
# self.assertEqual(label_location_data, label_location.location)
def test_textlabels_empty(self):
self.fake_authentication()
expected_result = {}
response = self.client.post('/%d/textlabel/all' % (self.test_project_id,), {
'sid': 3,
'z': 9,
'top': 0,
'left': 0,
'width': 10240,
'height': 7680,
'scale': 0.5,
'resolution': 5})
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
self.assertEqual(expected_result, parsed_response)
def test_textlabels_nonempty(self):
self.fake_authentication()
expected_result = {
'0': {
'tid': 1,
'type': 'text',
'text': 'World.',
'font_name': None,
'font_style': 'bold',
'font_size': 160,
'scaling': 1,
'z_diff': 0,
'colour': {'r': 255, 'g': 126, 'b': 0, 'a': 1},
'location': {'x': 3155, 'y': 1775, 'z': 27}},
'1': {
'tid': 2,
'type': 'text',
'text': 'Helo.',
'font_name': None,
'font_style': 'bold',
'font_size': 160,
'scaling': 1,
'z_diff': 0,
'colour': {'r': 255, 'g': 126, 'b': 0, 'a': 1},
'location': {'x': 2345, 'y': 1785, 'z': 27}}}
response = self.client.post('/%d/textlabel/all' % (self.test_project_id,), {
'sid': 3,
'z': 27,
'top': 0,
'left': 0,
'width': 10240,
'height': 7680,
'scale': 0.5,
'resolution': 5})
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
self.assertEqual(expected_result, parsed_response)
| tomka/CATMAID | django/applications/catmaid/tests/apis/test_textlabels.py | Python | gpl-3.0 | 9,740 |
from sys import argv, stdin, stdout, exit
from . import ansiprint, parse
if len(argv) == 1 and stdin.isatty():
from textwrap import dedent
usage = '''
Usage: python -m ansimarkup [<arg> [<arg> ...]]
Example usage:
python -m ansimarkup '<b>Bold</b>' '<r>Red</r>'
python -m ansimarkup '<b><r>Bold Red</r></b>'
python -m ansimarkup < input-with-markup.txt
echo '<b>Bold</b>' | python -m ansimarkup
'''
print(dedent(usage).strip())
exit(0)
if not stdin.isatty():
for line in stdin:
stdout.write(parse(line))
else:
ansiprint(*argv[1:])
| gvalkov/python-ansimarkup | ansimarkup/__main__.py | Python | bsd-3-clause | 604 |
d = 1
e = 2
f = 3
| I-Valchev/UrPas | coverage-3.7.1/tests/modules/pkg1/sub/ps1a.py | Python | apache-2.0 | 18 |
#!/usr/bin/env python
import os
import sys
import django
from django.conf import settings
DEFAULT_SETTINGS = dict(
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"pinax.images",
"pinax.images.tests"
],
MIDDLEWARE_CLASSES=[],
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
},
SITE_ID=1,
ROOT_URLCONF="pinax.images.tests.urls",
SECRET_KEY="notasecret",
)
def run(*args):
if not settings.configured:
settings.configure(**DEFAULT_SETTINGS)
django.setup()
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
django.core.management.call_command(
"makemigrations",
"pinax_images",
*args
)
if __name__ == "__main__":
run(*sys.argv[1:])
| arthur-wsw/pinax-images | makemigrations.py | Python | mit | 926 |
class event_system:
def __init__(self):
self.__listeners = {}
def on(self, event, func):
if event in self.__listeners:
self.__listeners[event].append(func)
else:
self.__listeners[event] = [func]
def trigger(self, event):
if event in self.__listeners:
for func in self.__listeners[event]:
func()
class broadcast(list):
def __call__(self, *args, **kwargs):
for f in self:
f(*args, **kwargs)
def __repr__(self):
return 'Broadcast(%s)' % list.__repr__(self)
| Stevearzh/irc-sha | isha/core/system.py | Python | mit | 590 |
import unittest
import numpy as np
from helper import plpy, fixture_file
import crankshaft.segmentation as segmentation
import json
class SegmentationTest(unittest.TestCase):
"""Testing class for Moran's I functions"""
def setUp(self):
plpy._reset()
def generate_random_data(self,n_samples,random_state, row_type=False):
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
y = x1+x2*x2+x3
cartodb_id = range(len(x1))
if row_type:
return [ {'features': vals} for vals in zip(x1,x2,x3)], y
else:
return [dict( zip(['x1','x2','x3','target', 'cartodb_id'],[x1,x2,x3,y,cartodb_id]))]
def test_replace_nan_with_mean(self):
test_array = np.array([1.2, np.nan, 3.2, np.nan, np.nan])
def test_create_and_predict_segment(self):
n_samples = 1000
random_state_train = np.random.RandomState(13)
random_state_test = np.random.RandomState(134)
training_data = self.generate_random_data(n_samples, random_state_train)
test_data, test_y = self.generate_random_data(n_samples, random_state_test, row_type=True)
ids = [{'cartodb_ids': range(len(test_data))}]
rows = [{'x1': 0,'x2':0,'x3':0,'y':0,'cartodb_id':0}]
plpy._define_result('select \* from \(select \* from training\) a limit 1',rows)
plpy._define_result('.*from \(select \* from training\) as a' ,training_data)
plpy._define_result('select array_agg\(cartodb\_id order by cartodb\_id\) as cartodb_ids from \(.*\) a',ids)
plpy._define_result('.*select \* from test.*' ,test_data)
model_parameters = {'n_estimators': 1200,
'max_depth': 3,
'subsample' : 0.5,
'learning_rate': 0.01,
'min_samples_leaf': 1}
result = segmentation.create_and_predict_segment(
'select * from training',
'target',
'select * from test',
model_parameters)
prediction = [r[1] for r in result]
accuracy =np.sqrt(np.mean( np.square( np.array(prediction) - np.array(test_y))))
self.assertEqual(len(result),len(test_data))
self.assertTrue( result[0][2] < 0.01)
self.assertTrue( accuracy < 0.5*np.mean(test_y) )
| CartoDB/crankshaft | release/python/0.8.1/crankshaft/test/test_segmentation.py | Python | bsd-3-clause | 2,464 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_worksessions
"""
from datetime import datetime, timedelta
import unittest
from unittest.mock import patch
from vulyk.models.exc import (
TaskNotFoundError,
WorkSessionUpdateError)
from vulyk.models.stats import WorkSession
from vulyk.models.tasks import AbstractTask, AbstractAnswer
from vulyk.models.user import User, Group
from .base import BaseTest
from .fixtures import FakeType
class TestTaskTypes(BaseTest):
FAKE_TYPE = FakeType({})
@classmethod
def setUpClass(cls):
super().setUpClass()
Group.objects.create(
description='test',
id='default',
allowed_types=[FakeType.type_name])
@classmethod
def tearDownClass(cls):
Group.objects.delete()
super().tearDownClass()
def tearDown(self):
User.objects.delete()
AbstractTask.objects.delete()
AbstractAnswer.objects.delete()
WorkSession.objects.delete()
super().tearDown()
# region Creation
def test_on_create_ok(self):
task_type = self.FAKE_TYPE
user = User(username='user0', email='user0@email.com').save()
task = task_type.task_model(
id='task0',
task_type=task_type.type_name,
batch=None,
closed=False,
users_count=0,
users_processed=[],
users_skipped=[],
task_data={'data': 'data'}).save()
task_type.work_session_manager.start_work_session(task, user.id)
ws = task_type.work_session_manager.work_session.objects.get(
user=user,
task=task)
self.assertEqual(ws.task, task)
self.assertEqual(ws.task_type, task_type.type_name)
self.assertEqual(ws.task_type, task.task_type)
self.assertEqual(ws.user, user)
def test_on_create_twice(self):
task_type = self.FAKE_TYPE
user = User(username='user0', email='user0@email.com').save()
task = task_type.task_model(
id='task0',
task_type=task_type.type_name,
batch=None,
closed=False,
users_count=0,
users_processed=[],
users_skipped=[],
task_data={'data': 'data'}).save()
fake_datetime = datetime.now() - timedelta(days=5)
with patch('vulyk.ext.worksession.datetime') as mock_date:
mock_date.now = lambda: fake_datetime
task_type.work_session_manager.start_work_session(task, user.id)
ws_first = task_type.work_session_manager.work_session.objects.get(
user=user,
task=task)
# re-create later
task_type.work_session_manager.start_work_session(task, user.id)
ws_second = task_type.work_session_manager.work_session.objects.get(
user=user,
task=task)
self.assertEqual(ws_first.id, ws_second.id)
self.assertEqual((ws_second.start_time - ws_first.start_time).days, 5)
# endregion Creation
# region Record activity
def test_update_session_normal(self):
task_type = self.FAKE_TYPE
user = User(username='user0', email='user0@email.com').save()
task = task_type.task_model(
id='task0',
task_type=task_type.type_name,
batch='any_batch',
closed=False,
users_count=0,
users_processed=[],
users_skipped=[],
task_data={'data': 'data'}).save()
duration = 50
fake_datetime = datetime.now() - timedelta(seconds=70)
with patch('vulyk.ext.worksession.datetime') as mock_date:
mock_date.now = lambda: fake_datetime
task_type.work_session_manager.start_work_session(task, user.id)
task_type.record_activity(user.id, task.id, duration)
session = WorkSession.objects.get(user=user.id, task=task)
self.assertEqual(session.activity, duration)
def test_update_session_twice(self):
task_type = self.FAKE_TYPE
user = User(username='user0', email='user0@email.com').save()
task = task_type.task_model(
id='task0',
task_type=task_type.type_name,
batch='any_batch',
closed=False,
users_count=0,
users_processed=[],
users_skipped=[],
task_data={'data': 'data'}).save()
duration = 50
fake_datetime = datetime.now() - timedelta(seconds=110)
with patch('vulyk.ext.worksession.datetime') as mock_date:
mock_date.now = lambda: fake_datetime
task_type.work_session_manager.start_work_session(task, user.id)
task_type.record_activity(user.id, task.id, duration)
task_type.record_activity(user.id, task.id, duration)
session = WorkSession.objects.get(user=user.id, task=task)
self.assertEqual(session.activity, duration * 2)
def test_update_session_overdrive(self):
task_type = self.FAKE_TYPE
user = User(username='user0', email='user0@email.com').save()
task = task_type.task_model(
id='task0',
task_type=task_type.type_name,
batch='any_batch',
closed=False,
users_count=0,
users_processed=[],
users_skipped=[],
task_data={'data': 'data'}).save()
duration = 50
fake_datetime = datetime.now() - timedelta(seconds=30)
with patch('vulyk.ext.worksession.datetime') as mock_date:
mock_date.now = lambda: fake_datetime
task_type.work_session_manager.start_work_session(task, user.id)
self.assertRaises(
WorkSessionUpdateError,
lambda: task_type.record_activity(user.id, task.id, duration)
)
session = WorkSession.objects.get(user=user.id, task=task)
self.assertEqual(session.activity, 0)
def test_update_session_negative(self):
task_type = self.FAKE_TYPE
user = User(username='user0', email='user0@email.com').save()
task = task_type.task_model(
id='task0',
task_type=task_type.type_name,
batch='any_batch',
closed=False,
users_count=0,
users_processed=[],
users_skipped=[],
task_data={'data': 'data'}).save()
duration = -50
fake_datetime = datetime.now() - timedelta(seconds=30)
with patch('vulyk.ext.worksession.datetime') as mock_date:
mock_date.now = lambda: fake_datetime
task_type.work_session_manager.start_work_session(task, user.id)
self.assertRaises(
WorkSessionUpdateError,
lambda: task_type.record_activity(user.id, task.id, duration)
)
session = WorkSession.objects.get(user=user.id, task=task)
self.assertEqual(session.activity, 0)
def test_update_session_not_found(self):
fake_type = self.FAKE_TYPE
self.assertRaises(TaskNotFoundError,
lambda: fake_type.record_activity('fake_id', '', 0))
# endregion Record activity
# region On task done
def test_on_done_ok(self):
task_type = self.FAKE_TYPE
user = User(username='user0', email='user0@email.com').save()
task = task_type.task_model(
id='task0',
task_type=task_type.type_name,
batch=None,
closed=False,
users_count=0,
users_processed=[],
users_skipped=[],
task_data={'data': 'data'}).save()
task_type.work_session_manager.start_work_session(task, user.id)
task_type.on_task_done(user, task.id, {'result': 'result'})
ws = task_type.work_session_manager.work_session.objects.get(
user=user,
task=task)
answer = task_type.answer_model.objects.get(created_by=user, task=task)
self.assertEqual(ws.answer, answer)
self.assertLess(ws.end_time - datetime.now(), timedelta(seconds=1))
# endregion On task done
# region Stats
def test_total_time_approximate(self):
task_type = self.FAKE_TYPE
user = User(username='user0', email='user0@email.com').save()
tasks = [
task_type.task_model(
id='task0',
task_type=task_type.type_name,
batch=None,
closed=False,
users_count=0,
users_processed=[],
users_skipped=[],
task_data={'data': 'data'}).save(),
task_type.task_model(
id='task1',
task_type=task_type.type_name,
batch=None,
closed=False,
users_count=0,
users_processed=[],
users_skipped=[],
task_data={'data': 'data'}).save(),
]
for i, task in enumerate(tasks):
fake_datetime = datetime.now() - timedelta(seconds=30 * (i + 1))
with patch('vulyk.ext.worksession.datetime') as mock_date:
mock_date.now = lambda: fake_datetime
task_type.work_session_manager.start_work_session(
task,
user.id)
task_type.on_task_done(user, task.id, {'result': 'result'})
ws = task_type.work_session_manager.work_session
self.assertEqual(
ws.get_total_user_time_approximate(user.id),
90
)
def test_total_time_precise(self):
task_type = self.FAKE_TYPE
user = User(username='user0', email='user0@email.com').save()
tasks = [
task_type.task_model(
id='task0',
task_type=task_type.type_name,
batch=None,
closed=False,
users_count=0,
users_processed=[],
users_skipped=[],
task_data={'data': 'data'}).save(),
task_type.task_model(
id='task1',
task_type=task_type.type_name,
batch=None,
closed=False,
users_count=0,
users_processed=[],
users_skipped=[],
task_data={'data': 'data'}).save(),
]
for i, task in enumerate(tasks):
fake_datetime = datetime.now() - timedelta(seconds=100 * (i + 1))
with patch('vulyk.ext.worksession.datetime') as mock_date:
mock_date.now = lambda: fake_datetime
task_type.work_session_manager.start_work_session(
task,
user.id)
task_type.record_activity(user.id, task.id, 50 * (i + 1))
task_type.on_task_done(user, task.id, {'result': 'result'})
ws = task_type.work_session_manager.work_session
self.assertEqual(
ws.get_total_user_time_precise(user.id),
150
)
# endregion Stats
if __name__ == '__main__':
unittest.main()
| mrgambal/vulyk | tests/test_worksessions.py | Python | bsd-3-clause | 11,133 |
"""
Copyright (C) 2018 Roberto Bruttomesso <roberto.bruttomesso@gmail.com>
This file is distributed under the terms of the 3-clause BSD License.
A copy of the license can be found in the root directory or at
https://opensource.org/licenses/BSD-3-Clause.
Author: Roberto Bruttomesso <roberto.bruttomesso@gmail.com>
Date: 29/10/2018
This module implements the main parsing routine of IEC61131 text
"""
from intrepyd.iec611312py.IEC61131ParserVisitor import IEC61131ParserVisitor
from intrepyd.iec611312py.statement import Assignment, IfThenElse, Case
from intrepyd.iec611312py.expression import VariableOcc, ConstantOcc, Expression, Range, FunctionOcc, ParamInit, TRUE
from intrepyd.iec611312py.variable import Variable
def isNumber(text):
for i in range(len(text)):
if not text[i].isdigit() and text[i] != '.':
return False
return True
def computeCompositeDatatype(var, name2var):
tokens = var.split('.')
if len(tokens) != 2:
raise RuntimeError('Cannot handle nested structures')
baseType = name2var[tokens[0]]
for name, variable in baseType.datatype.fields.iteritems():
if name == tokens[1]:
return variable.datatype
return None
class STMTBuilder(IEC61131ParserVisitor):
"""
Vistor that builds statements from the IEC program
"""
def __init__(self, name2var, pou2inputs):
self._statements = []
self._name2var = name2var
self._pou2inputs = pou2inputs
@property
def statements(self):
return self._statements
def visitBodyST(self, ctx):
self._statements = ctx.getChild(0).accept(self)
def visitStmt_block(self, ctx):
return [ctx.getChild(i).accept(self) for i in range(ctx.getChildCount())]
def visitSt_stmt(self, ctx):
return ctx.getChild(0).accept(self)
def visit_stmt(self, ctx):
return ctx.getChild(0).accept(self)
def visitAssignVariable(self, ctx):
lhs = ctx.getChild(0).accept(self)
rhs = ctx.getChild(2).accept(self)
return Assignment(lhs, rhs)
def visitAssignCompositeAccess(self, ctx):
lhs = ctx.getChild(0).accept(self)
rhs = ctx.getChild(2).accept(self)
return Assignment(lhs, rhs)
def visitExpression(self, ctx):
return ctx.getChild(0).accept(self)
def visitBinaryBoolExpression(self, ctx):
return self._binaryExpressionHelper(ctx)
def visitBinaryTermExpression(self, ctx):
return self._binaryExpressionHelper(ctx)
def visitUnaryBoolExpression(self, ctx):
return self._unaryExpressionHelper(ctx)
def visitUnaryTermExpression(self, ctx):
return self._unaryExpressionHelper(ctx)
def visitLeafBoolExpression(self, ctx):
return ctx.getChild(0).accept(self)
def visitParBoolExpression(self, ctx):
return ctx.subexpr.accept(self)
def visitParTermExpression(self, ctx):
return ctx.subexpr.accept(self)
def visitSimple_var(self, ctx):
var = ctx.getChild(0).getText()
if not var in self._name2var:
raise RuntimeError('Undeclared variable ' + var)
return VariableOcc(self._name2var[var])
def visitComposite_access(self, ctx):
base = ctx.getChild(0).getText()
if not base in self._name2var:
raise RuntimeError('Undeclared variable ' + base)
var = ctx.getText()
if not var in self._name2var:
datatype = computeCompositeDatatype(var, self._name2var)
self._name2var[var] = Variable(var, datatype, Variable.FIELD)
return VariableOcc(self._name2var[var])
def visitArray_access(self, ctx):
raise NotImplementedError
def visitVariable_bit_access(self, ctx):
raise NotImplementedError
def visitConstant(self, ctx):
cst = ctx.getText()
return ConstantOcc(cst)
def visitCallBoolExpression(self, ctx):
return self._callExpressionHelper(ctx)
def visitCallTermExpression(self, ctx):
return self._callExpressionHelper(ctx)
def visitCustomCallExpression(self, ctx):
pouName = ctx.getChild(0).getText()
if not pouName in self._pou2inputs:
raise('Could not find pou ' + pouName)
inputs = self._pou2inputs[pouName]
paramInits = []
param = 0
if ctx.getChildCount() > 2:
for i in range(2, ctx.getChildCount(), 2):
paramInit = ctx.getChild(i).accept(self)
paramInits.append(paramInit)
paramInit.rhs.datatype = inputs[param].datatype
param += 1
return FunctionOcc(ctx.getChild(0).getText(), paramInits)
def visitFunc_param_init(self, ctx):
param = ctx.getChild(0).getText()
value = ctx.getChild(2).getText()
if isNumber(value):
return ParamInit(param, ConstantOcc(value)) # type will be set by caller
return ParamInit(param, VariableOcc(Variable(value, None, Variable.TEMP))) # type will be set by caller
def visitIf_stmt(self, ctx):
return ctx.getChild(0).accept(self)
def visitIf_simple_stmt(self, ctx):
conditions = []
statements = []
conditions.append(ctx.ifexpr.accept(self))
statements.append(ctx.ifstmt.accept(self))
return IfThenElse(conditions, statements)
def visitIf_elseif_stmt(self, ctx):
conditions = []
statements = []
conditions.append(ctx.ifexpr.accept(self))
statements.append(ctx.ifstmt.accept(self))
conds, stmts = ctx.elsifstmt.accept(self)
for cond in conds:
conditions.append(cond)
for stmt in stmts:
statements.append(stmt)
return IfThenElse(conditions, statements)
def visitIf_else_stmt(self, ctx):
conditions = []
statements = []
conditions.append(ctx.ifexpr.accept(self))
statements.append(ctx.ifstmt.accept(self))
conditions.append(TRUE)
statements.append(ctx.elsestmt.accept(self))
return IfThenElse(conditions, statements)
def visitIf_complete_stmt(self, ctx):
conditions = []
statements = []
conditions.append(ctx.ifexpr.accept(self))
statements.append(ctx.ifstmt.accept(self))
conds, stmts = ctx.elsifstmt.accept(self)
for cond in conds:
conditions.append(cond)
for stmt in stmts:
statements.append(stmt)
conditions.append(TRUE)
statements.append(ctx.elsestmt.accept(self))
return IfThenElse(conditions, statements)
def visitElsif_stmt_list(self, ctx):
conditions = []
statements = []
for i in range(ctx.getChildCount()):
cond, stmt = ctx.getChild(i).accept(self)
conditions.append(cond)
statements.append(stmt)
return conditions, statements
def visitElsif_stmt(self, ctx):
return ctx.expr.accept(self), ctx.stmtblock.accept(self)
def visitCase_stmt(self, ctx):
expression = ctx.expr.accept(self)
selections, statements = ctx.casesel.accept(self)
if ctx.getChildCount() == 7:
# There is else too
selections.append([expression])
statements.append(ctx.elsestmt.accept(self))
return Case(expression, selections, statements)
def visitCase_selections(self, ctx):
selections = []
statements = []
for i in range(ctx.getChildCount()):
sel, stmt = ctx.getChild(i).accept(self)
selections.append(sel)
statements.append(stmt)
return selections, statements
def visitCase_selection(self, ctx):
return ctx.getChild(0).accept(self), ctx.getChild(2).accept(self)
def visitCase_list(self, ctx):
return [ctx.getChild(i).accept(self) for i in range(0, ctx.getChildCount(), 2)]
def visitCaseRange(self, ctx):
return Range(ctx.start.getText(), ctx.to.getText())
def visitCaseExpression(self, ctx):
return ctx.getChild(0).accept(self)
def _binaryExpressionHelper(self, ctx):
operator = ctx.op.text
arguments = [ctx.getChild(0).accept(self), ctx.getChild(2).accept(self)]
return Expression(operator, arguments)
def _unaryExpressionHelper(self, ctx):
operator = ctx.getChild(0).getText()
return Expression(operator, [ctx.getChild(1).accept(self)])
def _callExpressionHelper(self, ctx):
operator = ctx.getChild(0).getText()
arguments = [ctx.getChild(2).accept(self)]
return Expression(operator, arguments)
| formalmethods/intrepyd | intrepyd/iec611312py/stmtbuilder.py | Python | bsd-3-clause | 8,639 |
#!/usr/bin/env python
from distutils.core import setup
setup( name="slugifier",
version = "0.1",
description = "Add slugs to your mongoengine documents and use them in your flask views.",
author = "Manas Garg",
author_email = "manasgarg@gmail.com",
license = "BSD License",
url = "https://github.com/manasgarg/slugifier",
packages = ["slugifier"],
long_description = ""
)
| manasgarg/slugifier | setup.py | Python | bsd-3-clause | 407 |
import fitz
"""
This marks a longer, unique sentence on the page.
The parameters 'start', 'stop' and 'clip' are fully computed from the
returned hit rectangles.
"""
doc = fitz.open("search.pdf")
page = doc[0]
# Search for this text. It is show with hyphens on the page, which we can
# simply delete for our search. Line breaks can be handled like spaces.
text1 = (
"Erklären ließe sich die Veränderung, wenn Beteigeuze einen",
"Materieauswurf ins All geschleudert hat, der einen Teil",
"der Strahlung abfängt, meinen die Forscher der",
"Europäischen Südsternwarte ESO.",
)
rl = page.searchFor(
" ".join(text1), # reconstruct full sentence for searching
)
# You should check success here!
start = rl[0].tl # top-left of first rectangle
stop = rl[-1].br # bottom-right of last rectangle
clip = fitz.Rect() # build clip as union of the hit rectangles
for r in rl:
clip |= r
page.addHighlightAnnot(
start=start,
stop=stop,
clip=clip,
)
doc.save(__file__.replace(".py", ".pdf"), garbage=3, deflate=True)
| JorjMcKie/PyMuPDF-Utilities | word&line-marking/mark-lines2.py | Python | gpl-3.0 | 1,051 |
# Calculate length of an arc using radius and degree angle measurement
import math
from stdutils import prettyFunction, inputAsDict
vals = inputAsDict(('d','r'))
# Convert degrees to radians
vals['ra'] = vals['d']/180
# Calculate arc length
vals['len'] = vals['ra']*vals['r']
# Calculations with pi
vals['rap'] = vals['ra']*math.pi
vals['lenp'] = vals['rap']*vals['r']
# Print out values
print('')
print(prettyFunction("Arc Length = ({d}{degree} x {pi}/180) x {r}", vals))
print(prettyFunction("{len}{pi} = {ra}{pi} x {r}", vals))
print(prettyFunction("{len}{pi} {approx} {lenp}", vals))
| meta1203/Trigonometry-Programlets | arclength_degrees.py | Python | apache-2.0 | 592 |
from typing import Tuple
import numpy as np
import gdsfactory as gf
from gdsfactory import LAYER, Port
from gdsfactory.component import Component
@gf.cell
def big_device(
size: Tuple[float, float] = (400.0, 400.0),
nports: int = 16,
spacing: float = 15.0,
layer: Tuple[int, int] = LAYER.WG,
wg_width: float = 0.5,
) -> Component:
"""Big component with N ports on each side
Args:
size:
nports: number of ports
spacing:
layer:
wg_width: waveguide width
"""
component = gf.Component()
p0 = np.array((0, 0))
w, h = size
dx = w / 2
dy = h / 2
N = nports
points = [[dx, dy], [dx, -dy], [-dx, -dy], [-dx, dy]]
component.add_polygon(points, layer=layer)
for i in range(N):
port = Port(
name=f"W{i}",
midpoint=p0 + (-dx, (i - N / 2) * spacing),
orientation=180,
layer=layer,
width=wg_width,
)
component.add_port(port)
for i in range(N):
port = Port(
name=f"E{i}",
midpoint=p0 + (dx, (i - N / 2) * spacing),
orientation=0,
layer=layer,
width=wg_width,
)
component.add_port(port)
for i in range(N):
port = Port(
name=f"N{i}",
midpoint=p0 + ((i - N / 2) * spacing, dy),
orientation=90,
layer=layer,
width=wg_width,
)
component.add_port(port)
for i in range(N):
port = Port(
name=f"S{i}",
midpoint=p0 + ((i - N / 2) * spacing, -dy),
orientation=-90,
layer=layer,
width=wg_width,
)
component.add_port(port)
return component
if __name__ == "__main__":
c = big_device()
c.show()
| gdsfactory/gdsfactory | gdsfactory/samples/big_device.py | Python | mit | 1,843 |
#!/usr/bin/env python
# vim: expandtab:tabstop=4:shiftwidth=4
"""
Interface to OpenShift oc command
"""
#
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shlex
import atexit
import shutil
import string
import random
import yaml
import subprocess
# pylint: disable=bare-except
def cleanup_file(inc_file):
""" clean up """
try:
os.unlink(inc_file)
except:
pass
class OCUtil(object):
""" Wrapper for interfacing with OpenShift 'oc' utility """
def __init__(self, namespace='default', config_file='/tmp/admin.kubeconfig', verbose=False, logger=None):
"""
Take initial values for running 'oc'
Ensure to set non-default namespace if that is what is desired
"""
self.namespace = namespace
self.config_file = config_file
self.verbose = verbose
self.copy_kubeconfig()
self.logger = logger
def copy_kubeconfig(self):
""" make a copy of the kubeconfig """
file_name = os.path.join(
'/tmp',
''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(7))
)
shutil.copy(self.config_file, file_name)
atexit.register(cleanup_file, file_name)
self.config_file = file_name
def _run_cmd(self, cmd, base_cmd='oc', ):
""" Actually execute the command """
cmd = " ".join([base_cmd, '--config', self.config_file, '-n', self.namespace, cmd])
if self.logger:
self.logger.debug("ocutil._run_cmd( {} )".format(cmd))
cmd = shlex.split(cmd)
if self.verbose:
print "Running command: {}".format(str(cmd))
try:
return subprocess.check_output(cmd)
except subprocess.CalledProcessError as err:
if self.logger:
self.logger.exception('Error from server: %s' % err.output)
raise err
def _run_cmd_yaml(self, cmd, base_cmd='oc', yaml_cmd='-o yaml'):
""" Actually execute the command and expects yaml """
return yaml.safe_load(self._run_cmd(" ".join([cmd, yaml_cmd]), base_cmd=base_cmd))
def run_user_cmd(self, cmd, base_cmd='oc'):
""" Runs a custom user command """
return self._run_cmd(cmd, base_cmd=base_cmd)
def run_user_cmd_yaml(self, cmd, base_cmd='oc', yaml_cmd='-o yaml'):
"""Runs a custom user command and expects yaml"""
return self._run_cmd_yaml(cmd, base_cmd=base_cmd, yaml_cmd=yaml_cmd)
def get_secrets(self, name):
""" Get secrets from object 'name' """
return self._run_cmd_yaml("get secrets {}".format(name))
def get_endpoint(self, name):
""" Get endpoint details """
return self._run_cmd_yaml("get endpoints {}".format(name))
def get_service(self, name):
""" Get service details """
return self._run_cmd_yaml("get service {}".format(name))
def get_rc(self, name):
""" Get replication controller details """
return self._run_cmd_yaml("get rc {}".format(name))
def get_dc(self, name):
""" Get deployment config details """
return self._run_cmd_yaml("get dc {}".format(name))
def get_route(self, name):
""" Get routes details """
return self._run_cmd_yaml("get route {}".format(name))
def get_pods(self):
""" Get all the pods in the namespace """
return self._run_cmd_yaml("get pods")
def get_projects(self):
""" Get all projects in the cluster """
return self._run_cmd_yaml("get projects")
def get_nodes(self):
""" Get all the nodes in the cluster """
return self._run_cmd_yaml("get nodes")
def get_log(self, name):
""" Gets the log for the specified container """
return self._run_cmd("logs {}".format(name))
| rhdedgar/openshift-tools | openshift_tools/monitoring/ocutil.py | Python | apache-2.0 | 4,378 |
'''
Copyright (C) 2016 Turi
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
# python egg version
__VERSION__ = '1.9'#{{VERSION_STRING}}
version = '1.9'#{{VERSION_STRING}}
build_number = '0'#{{BUILD_NUMBER}}
| TobyRoseman/SFrame | oss_src/unity/python/sframe/version_info.py | Python | bsd-3-clause | 302 |
# Copyright iris-grib contributors
#
# This file is part of iris-grib and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Unit tests for the `iris_grib.load_pairs_from_fields` function."""
import iris_grib.tests as tests
from iris_grib import load_pairs_from_fields
from iris_grib.message import GribMessage
class TestAsCubes(tests.IrisTest):
def setUp(self):
# Load from the test file.
self.file_path = tests.get_data_path(
("GRIB", "time_processed", "time_bound.grib2")
)
def test_year_filter(self):
msgs = GribMessage.messages_from_filename(self.file_path)
chosen_messages = []
for gmsg in msgs:
if gmsg.sections[1]["year"] == 1998:
chosen_messages.append(gmsg)
cubes_msgs = list(load_pairs_from_fields(chosen_messages))
self.assertEqual(len(cubes_msgs), 1)
def test_year_filter_none(self):
msgs = GribMessage.messages_from_filename(self.file_path)
chosen_messages = []
for gmsg in msgs:
if gmsg.sections[1]["year"] == 1958:
chosen_messages.append(gmsg)
cubes_msgs = list(load_pairs_from_fields(chosen_messages))
self.assertEqual(len(cubes_msgs), 0)
def test_as_pairs(self):
messages = GribMessage.messages_from_filename(self.file_path)
cubes = []
cube_msg_pairs = load_pairs_from_fields(messages)
for cube, gmsg in cube_msg_pairs:
if gmsg.sections[1]["year"] == 1998:
cube.attributes["the year is"] = gmsg.sections[1]["year"]
cubes.append(cube)
self.assertEqual(len(cubes), 1)
self.assertEqual(cubes[0].attributes["the year is"], 1998)
if __name__ == "__main__":
tests.main()
| SciTools/iris-grib | iris_grib/tests/unit/test_load_pairs_from_fields.py | Python | lgpl-3.0 | 1,857 |
import logging
from ... import di
logger = logging.getLogger(__name__)
@di.desc('tracker', reg=False)
class MockTracker:
def event(self, *args, **kwargs):
logging.debug('event')
logging.debug(kwargs)
logging.debug(args)
def new_message(self, *args, **kwargs):
logging.debug('new_message')
logging.debug(kwargs)
logging.debug(args)
def new_user(self, *args, **kwargs):
logging.debug('new_user')
logging.debug(kwargs)
logging.debug(args)
def story(self, *args, **kwargs):
logging.debug('story')
logging.debug(kwargs)
logging.debug(args)
| hyzhak/bot-story | botstory/integrations/mocktracker/tracker.py | Python | mit | 653 |
"""Cutoff-based soft filtering of genomic variants.
"""
from distutils.version import LooseVersion
import math
import os
import shutil
import numpy
import toolz as tz
import yaml
from bcbio import broad, utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do, programs
from bcbio.variation import vcfutils
# ## General functionality
def cutoff_w_expression(vcf_file, expression, data, name="+", filterext="",
extra_cmd="", limit_regions="variant_regions"):
"""Perform cutoff-based soft filtering using bcftools expressions like %QUAL < 20 || DP < 4.
"""
base, ext = utils.splitext_plus(vcf_file)
out_file = "{base}-filter{filterext}{ext}".format(**locals())
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
if vcfutils.vcf_has_variants(vcf_file):
bcftools = config_utils.get_program("bcftools", data["config"])
bgzip_cmd = "| bgzip -c" if out_file.endswith(".gz") else ""
intervals = ""
if limit_regions == "variant_regions":
variant_regions = dd.get_variant_regions(data)
if variant_regions:
intervals = "-T %s" % vcfutils.bgzip_and_index(variant_regions, data["config"])
cmd = ("{bcftools} filter -O v {intervals} --soft-filter '{name}' "
"-e '{expression}' -m '+' {vcf_file} {extra_cmd} {bgzip_cmd} > {tx_out_file}")
do.run(cmd.format(**locals()),
"Cutoff-based soft filtering %s with %s" % (vcf_file, expression), data)
else:
shutil.copy(vcf_file, out_file)
if out_file.endswith(".vcf.gz"):
out_file = vcfutils.bgzip_and_index(out_file, data["config"])
return out_file
def genotype_filter(vcf_file, expression, data, name, filterext=""):
"""Perform genotype based filtering using GATK with the provided expression.
Adds FT tags to genotypes, rather than the general FILTER flag.
"""
base, ext = utils.splitext_plus(vcf_file)
out_file = "{base}-filter{filterext}{ext}".format(**locals())
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
params = ["-T", "VariantFiltration",
"-R", tz.get_in(["reference", "fasta", "base"], data),
"--variant", vcf_file,
"--out", tx_out_file,
"--genotypeFilterName", name,
"--genotypeFilterExpression", "'%s'" % expression]
jvm_opts = broad.get_gatk_framework_opts(data["config"], os.path.dirname(tx_out_file))
do.run(broad.gatk_cmd("gatk-framework", jvm_opts, params), "Filter with expression: %s" % expression)
if out_file.endswith(".vcf.gz"):
out_file = vcfutils.bgzip_and_index(out_file, data["config"])
return out_file
def genotype_filter_toref(vcf_file, expression, data, filterext=""):
"""Perform genotype filters by converting failing calls to reference, using bcftools
Prefer the FT approach used in genotype_filter, but bcftools handles complex filter
expressions that GATK will not.
"""
base, ext = utils.splitext_plus(vcf_file)
out_file = "{base}-filter{filterext}{ext}".format(**locals())
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
if vcfutils.vcf_has_variants(vcf_file):
bcftools = config_utils.get_program("bcftools", data["config"])
output_type = "z" if tx_out_file.endswith(".gz") else "v"
cmd = ("{bcftools} filter -O {output_type} "
"-e '{expression}' -S 0 {vcf_file} > {tx_out_file}")
do.run(cmd.format(**locals()), "Genotype filtering to ref %s with %s" % (vcf_file, expression), data)
else:
shutil.copy(vcf_file, out_file)
if out_file.endswith(".vcf.gz"):
out_file = vcfutils.bgzip_and_index(out_file, data["config"])
return out_file
# ## Caller specific
def freebayes(in_file, ref_file, vrn_files, data):
"""FreeBayes filters: cutoff-based soft filtering.
"""
out_file = _freebayes_cutoff(in_file, data)
#out_file = _freebayes_custom(in_file, ref_file, data)
return out_file
def _freebayes_custom(in_file, ref_file, data):
"""Custom FreeBayes filtering using bcbio.variation, tuned to human NA12878 results.
Experimental: for testing new methods.
"""
if vcfutils.get_paired_phenotype(data):
return None
config = data["config"]
bv_ver = programs.get_version("bcbio_variation", config=config)
if LooseVersion(bv_ver) < LooseVersion("0.1.1"):
return None
out_file = "%s-filter%s" % os.path.splitext(in_file)
if not utils.file_exists(out_file):
tmp_dir = utils.safe_makedir(os.path.join(os.path.dirname(in_file), "tmp"))
resources = config_utils.get_resources("bcbio_variation", config)
jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx2g"])
java_args = ["-Djava.io.tmpdir=%s" % tmp_dir]
cmd = ["bcbio-variation"] + jvm_opts + java_args + \
["variant-filter", "freebayes", in_file, ref_file]
do.run(cmd, "Custom FreeBayes filtering using bcbio.variation")
return out_file
def _freebayes_cutoff(in_file, data):
"""Perform filtering of FreeBayes results, flagging low confidence calls.
Filters using cutoffs on low depth based on Meynert et al's work modeling sensitivity
of homozygote and heterozygote calling on depth:
http://www.ncbi.nlm.nih.gov/pubmed/23773188
and high depth heterozygote SNP filtering based on Heng Li's work
evaluating variant calling artifacts:
http://arxiv.org/abs/1404.0929
Tuned based on NA12878 call comparisons to Genome in a Bottle reference genome.
"""
if not vcfutils.vcf_has_variants(in_file):
base, ext = utils.splitext_plus(in_file)
out_file = "{base}-filter{ext}".format(**locals())
if not utils.file_exists(out_file):
shutil.copy(in_file, out_file)
if out_file.endswith(".vcf.gz"):
out_file = vcfutils.bgzip_and_index(out_file, data["config"])
return out_file
depth_thresh, qual_thresh = None, None
if _do_high_depth_filter(data):
stats = _calc_vcf_stats(in_file)
if stats["avg_depth"] > 0:
depth_thresh = int(math.ceil(stats["avg_depth"] + 3 * math.pow(stats["avg_depth"], 0.5)))
qual_thresh = depth_thresh * 2.0 # Multiplier from default GATK QD cutoff filter
filters = ('(AF[0] <= 0.5 && (DP < 4 || (DP < 13 && %QUAL < 10))) || '
'(AF[0] > 0.5 && (DP < 4 && %QUAL < 50))')
if depth_thresh:
filters += ' || (%QUAL < {qual_thresh} && DP > {depth_thresh} && AF[0] <= 0.5)'.format(**locals())
return cutoff_w_expression(in_file, filters, data, name="FBQualDepth")
def _do_high_depth_filter(data):
"""Check if we should do high depth filtering -- only on germline non-regional calls.
"""
is_genome = tz.get_in(["config", "algorithm", "coverage_interval"], data, "").lower() == "genome"
is_paired = vcfutils.get_paired_phenotype(data)
return is_genome and not is_paired
def _calc_vcf_stats(in_file):
"""Calculate statistics on VCF for filtering, saving to a file for quick re-runs.
"""
out_file = "%s-stats.yaml" % utils.splitext_plus(in_file)[0]
if not utils.file_exists(out_file):
stats = {"avg_depth": _average_called_depth(in_file)}
with open(out_file, "w") as out_handle:
yaml.safe_dump(stats, out_handle, default_flow_style=False, allow_unicode=False)
return stats
else:
with open(out_file) as in_handle:
stats = yaml.safe_load(in_handle)
return stats
def _average_called_depth(in_file):
"""Retrieve the average depth of called reads in the provided VCF.
"""
import cyvcf2
depths = []
for rec in cyvcf2.VCF(str(in_file)):
d = rec.INFO.get("DP")
if d is not None:
depths.append(int(d))
if len(depths) > 0:
return int(math.ceil(numpy.mean(depths)))
else:
return 0
def platypus(in_file, data):
"""Filter Platypus calls, removing Q20 filter and replacing with depth and quality based filter.
Platypus uses its own VCF nomenclature: TC == DP, FR == AF
Platypus gVCF output appears to have an 0/1 index problem so the reference block
regions are 1 base outside regions of interest. We avoid limiting regions during
filtering when using it.
"""
filters = ('(FR[0] <= 0.5 && TC < 4 && %QUAL < 20) || '
'(TC < 13 && %QUAL < 10) || '
'(FR[0] > 0.5 && TC < 4 && %QUAL < 50)')
limit_regions = "variant_regions" if not vcfutils.is_gvcf_file(in_file) else None
return cutoff_w_expression(in_file, filters, data, name="PlatQualDepth",
extra_cmd="| sed 's/\\tQ20\\t/\\tPASS\\t/'", limit_regions=limit_regions)
def samtools(in_file, data):
"""Filter samtools calls based on depth and quality, using similar approaches to FreeBayes.
"""
filters = ('((AC[0] / AN) <= 0.5 && DP < 4 && %QUAL < 20) || '
'(DP < 13 && %QUAL < 10) || '
'((AC[0] / AN) > 0.5 && DP < 4 && %QUAL < 50)')
return cutoff_w_expression(in_file, filters, data, name="stQualDepth")
def _gatk_general():
"""General filters useful for both GATK SNPs and indels.
Remove low quality, low allele fraction variants at the ends of reads.
Generally useful metric identified by looking at 10x data.
https://community.10xgenomics.com/t5/Genome-Exome-Forum/Best-practices-for-trimming-adapters-when-variant-calling/m-p/473
https://github.com/bcbio/bcbio_validations/tree/master/gatk4#10x-adapter-trimming--low-frequency-allele-filter
"""
return ["(QD < 10.0 && AD[1] / (AD[1] + AD[0]) < 0.25 && ReadPosRankSum < 0.0)"]
def gatk_snp_cutoff(in_file, data):
"""Perform cutoff-based soft filtering on GATK SNPs using best-practice recommendations.
We have a more lenient mapping quality (MQ) filter compared to GATK defaults.
The recommended filter (MQ < 40) is too stringent, so we adjust to 30:
http://imgur.com/a/oHRVB
QD and FS are not calculated when generating gVCF output:
https://github.com/broadgsa/gatk-protected/blob/e91472ddc7d58ace52db0cab4d70a072a918d64c/protected/gatk-tools-protected/src/main/java/org/broadinstitute/gatk/tools/walkers/haplotypecaller/HaplotypeCaller.java#L300
The extra command removes escaped quotes in the VCF output which
pyVCF fails on.
Does not use the GATK best practice recommend SOR filter (SOR > 3.0) as it
has a negative impact on sensitivity relative to precision:
https://github.com/bcbio/bcbio_validations/tree/master/gatk4#na12878-hg38
"""
filters = ["MQ < 30.0", "MQRankSum < -12.5", "ReadPosRankSum < -8.0"]
filters += ["QD < 2.0", "FS > 60.0"]
filters += _gatk_general()
# GATK Haplotype caller (v2.2) appears to have much larger HaplotypeScores
# resulting in excessive filtering, so avoid this metric
variantcaller = utils.get_in(data, ("config", "algorithm", "variantcaller"))
if variantcaller not in ["gatk-haplotype", "haplotyper"]:
filters.append("HaplotypeScore > 13.0")
return cutoff_w_expression(in_file, 'TYPE="snp" && (%s)' % " || ".join(filters), data, "GATKCutoffSNP", "SNP",
extra_cmd=r"""| sed 's/\\"//g'""")
def gatk_indel_cutoff(in_file, data):
"""Perform cutoff-based soft filtering on GATK indels using best-practice recommendations.
"""
filters = ["ReadPosRankSum < -20.0"]
filters += ["QD < 2.0", "FS > 200.0", "SOR > 10.0"]
filters += _gatk_general()
return cutoff_w_expression(in_file, 'TYPE="indel" && (%s)' % " || ".join(filters), data, "GATKCutoffIndel",
"INDEL", extra_cmd=r"""| sed 's/\\"//g'""")
| biocyberman/bcbio-nextgen | bcbio/variation/vfilter.py | Python | mit | 12,226 |
# -*- coding: utf-8 -*-
import sys
import autofixture
from django.core.management import call_command
from decimal import Decimal
from datetime import date, datetime
from autofixture import generators, constraints
from autofixture.base import AutoFixture, CreateInstanceError, Link
from autofixture.compat import get_field
from autofixture.values import Values
from . import FileSystemCleanupTestCase
from ..models import y2k
from ..models import (
SimpleModel, OtherSimpleModel, DeepLinkModel1, DeepLinkModel2,
NullableFKModel, BasicModel, UniqueTestModel, UniqueTogetherTestModel,
RelatedModel, O2OModel, O2OPrimaryKeyModel, InheritModel, InheritUniqueTogetherModel,
M2MModel, ThroughModel, M2MModelThrough, SelfReferencingModel,
UniqueNullFieldModel, UniqueTogetherNullFieldModel,
MultipleUniqueTogetherNullFieldModel, SelfReferencingModelNoNull, GFKModel,
GRModel, RelationWithCustomAutofixtureModel)
autofixture.autodiscover()
if sys.version_info[0] < 3:
str_ = unicode
else:
str_ = str
class SimpleAutoFixture(AutoFixture):
field_values = {
'name': generators.StaticGenerator('foo'),
}
class BasicValueFixtureBase(AutoFixture):
field_values = Values(blankchars='bar')
class BasicValueFixture(BasicValueFixtureBase):
class Values:
chars = 'foo'
shortchars = staticmethod(lambda: 'a')
intfield = generators.IntegerGenerator(min_value=1, max_value=13)
field_values = {
'nullchars': 'spam',
}
class TestBasicModel(FileSystemCleanupTestCase):
def assertEqualOr(self, first, second, fallback):
if first != second and not fallback:
self.fail()
def test_create(self):
filler = AutoFixture(BasicModel)
filler.create(10)
self.assertEqual(BasicModel.objects.count(), 10)
def test_constraints(self):
filler = AutoFixture(
BasicModel,
overwrite_defaults=False)
for obj in filler.create(100):
self.assertTrue(len(obj.chars) > 0)
self.assertEqual(type(obj.chars), str_)
self.assertTrue(len(obj.shortchars) <= 2)
self.assertEqual(type(obj.shortchars), str_)
self.assertTrue(type(obj.blankchars), str_)
self.assertEqualOr(type(obj.nullchars), str_, None)
self.assertEqual(type(obj.slugfield), str_)
self.assertEqual(type(obj.defaultint), int)
self.assertEqual(obj.defaultint, 1)
self.assertEqual(type(obj.intfield), int)
self.assertEqual(type(obj.sintfield), int)
self.assertEqual(type(obj.pintfield), int)
self.assertEqual(type(obj.psintfield), int)
self.assertEqual(type(obj.datefield), date)
self.assertEqual(type(obj.datetimefield), datetime)
self.assertEqual(type(obj.defaultdatetime), datetime)
self.assertEqual(obj.defaultdatetime, y2k())
self.assertEqual(type(obj.decimalfield), Decimal)
self.assertTrue('@' in obj.emailfield)
self.assertTrue('.' in obj.emailfield)
self.assertTrue(' ' not in obj.emailfield)
self.assertTrue(obj.ipaddressfield.count('.'), 3)
self.assertTrue(len(obj.ipaddressfield) >= 7)
self.assertEqual(BasicModel.objects.count(), 100)
def test_field_values(self):
int_value = 1
char_values = (u'a', u'b')
filler = AutoFixture(
BasicModel,
field_values={
'intfield': 1,
'chars': generators.ChoicesGenerator(values=char_values),
'shortchars': lambda: u'ab',
})
for obj in filler.create(100):
self.assertEqual(obj.intfield, int_value)
self.assertTrue(obj.chars in char_values)
self.assertEqual(obj.shortchars, u'ab')
def test_field_values_overwrite_defaults(self):
fixture = AutoFixture(
BasicModel,
field_values={
'defaultint': 42,
})
obj = fixture.create(1)[0]
self.assertEqual(obj.defaultint, 42)
class TestRelations(FileSystemCleanupTestCase):
def test_generate_foreignkeys(self):
filler = AutoFixture(
RelatedModel,
generate_fk=True)
for obj in filler.create(100):
self.assertEqual(obj.related.__class__, BasicModel)
self.assertEqual(obj.limitedfk.name, 'foo')
def test_deep_generate_foreignkeys(self):
filler = AutoFixture(
DeepLinkModel2,
generate_fk=True)
for obj in filler.create(10):
self.assertEqual(obj.related.__class__, DeepLinkModel1)
self.assertEqual(obj.related.related.__class__, SimpleModel)
self.assertEqual(obj.related.related2.__class__, SimpleModel)
def test_deep_generate_foreignkeys2(self):
filler = AutoFixture(
DeepLinkModel2,
follow_fk=False,
generate_fk=('related', 'related__related'))
for obj in filler.create(10):
self.assertEqual(obj.related.__class__, DeepLinkModel1)
self.assertEqual(obj.related.related.__class__, SimpleModel)
self.assertEqual(obj.related.related2, None)
def test_generate_only_some_foreignkeys(self):
filler = AutoFixture(
RelatedModel,
generate_fk=('related',))
for obj in filler.create(100):
self.assertEqual(obj.related.__class__, BasicModel)
self.assertEqual(obj.limitedfk, None)
def test_follow_foreignkeys(self):
related = AutoFixture(BasicModel).create()[0]
self.assertEqual(BasicModel.objects.count(), 1)
simple = SimpleModel.objects.create(name='foo')
simple2 = SimpleModel.objects.create(name='bar')
filler = AutoFixture(
RelatedModel,
follow_fk=True)
for obj in filler.create(100):
self.assertEqual(obj.related, related)
self.assertEqual(obj.limitedfk, simple)
def test_follow_only_some_foreignkeys(self):
related = AutoFixture(BasicModel).create()[0]
self.assertEqual(BasicModel.objects.count(), 1)
simple = SimpleModel.objects.create(name='foo')
simple2 = SimpleModel.objects.create(name='bar')
filler = AutoFixture(
RelatedModel,
follow_fk=('related',))
for obj in filler.create(100):
self.assertEqual(obj.related, related)
self.assertEqual(obj.limitedfk, None)
def test_follow_fk_for_o2o(self):
# OneToOneField is the same as a ForeignKey with unique=True
filler = AutoFixture(O2OModel, follow_fk=True)
simple = SimpleModel.objects.create()
obj = filler.create()[0]
self.assertEqual(obj.o2o, simple)
self.assertRaises(CreateInstanceError, filler.create)
def test_generate_fk_for_o2o(self):
# OneToOneField is the same as a ForeignKey with unique=True
filler = AutoFixture(O2OModel, generate_fk=True)
all_o2o = set()
for obj in filler.create(10):
all_o2o.add(obj.o2o)
self.assertEqual(set(SimpleModel.objects.all()), all_o2o)
def test_follow_fk_for_o2o_primary_key(self):
# OneToOneField on primary key should follow if it is not table inheritance
filler = AutoFixture(O2OPrimaryKeyModel, follow_fk=True)
simple = SimpleModel.objects.create()
obj = filler.create()[0]
self.assertEqual(obj.o2o, simple)
self.assertRaises(CreateInstanceError, filler.create)
def test_generate_fk_for_o2o_primary_key(self):
# OneToOneField on primary key should follow if it is not table inheritance
filler = AutoFixture(O2OPrimaryKeyModel, generate_fk=True)
all_o2o = set()
for obj in filler.create(10):
all_o2o.add(obj.o2o)
self.assertEqual(set(SimpleModel.objects.all()), all_o2o)
def test_follow_m2m(self):
related = AutoFixture(SimpleModel).create()[0]
self.assertEqual(SimpleModel.objects.count(), 1)
filler = AutoFixture(
M2MModel,
follow_m2m=(2, 10))
for obj in filler.create(10):
self.assertEqual(list(obj.m2m.all()), [related])
def test_follow_only_some_m2m(self):
related = AutoFixture(SimpleModel).create()[0]
self.assertEqual(SimpleModel.objects.count(), 1)
other_related = AutoFixture(OtherSimpleModel).create()[0]
self.assertEqual(OtherSimpleModel.objects.count(), 1)
filler = AutoFixture(
M2MModel,
none_p=0,
follow_m2m={
'm2m': (2, 10),
})
for obj in filler.create(10):
self.assertEqual(list(obj.m2m.all()), [related])
self.assertEqual(list(obj.secondm2m.all()), [])
def test_generate_m2m(self):
filler = AutoFixture(
M2MModel,
none_p=0,
generate_m2m=(1, 5))
all_m2m = set()
all_secondm2m = set()
for obj in filler.create(10):
self.assertTrue(1 <= obj.m2m.count() <= 5)
self.assertTrue(1 <= obj.secondm2m.count() <= 5)
all_m2m.update(obj.m2m.all())
all_secondm2m.update(obj.secondm2m.all())
self.assertEqual(SimpleModel.objects.count(), len(all_m2m))
self.assertEqual(OtherSimpleModel.objects.count(), len(all_secondm2m))
def test_generate_m2m_with_custom_autofixture(self):
filler = AutoFixture(RelationWithCustomAutofixtureModel,
generate_fk=True,
generate_m2m=(1, 1))
instance = filler.create_one()
self.assertEqual(instance.users.count(), 1)
user = instance.users.get()
# Detect that the UserFixture was used.
self.assertTrue(' ' not in user.username)
self.assertTrue(' ' not in user.first_name)
self.assertTrue(' ' not in user.last_name)
def test_generate_only_some_m2m(self):
filler = AutoFixture(
M2MModel,
none_p=0,
generate_m2m={
'm2m': (1, 5),
})
all_m2m = set()
all_secondm2m = set()
for obj in filler.create(10):
self.assertTrue(1 <= obj.m2m.count() <= 5)
self.assertEqual(0, obj.secondm2m.count())
all_m2m.update(obj.m2m.all())
all_secondm2m.update(obj.secondm2m.all())
self.assertEqual(SimpleModel.objects.count(), len(all_m2m))
self.assertEqual(OtherSimpleModel.objects.count(), len(all_secondm2m))
def test_generate_m2m_with_intermediary_model(self):
filler = AutoFixture(
M2MModelThrough,
generate_m2m=(1, 5))
all_m2m = set()
for obj in filler.create(10):
self.assertTrue(1 <= obj.m2m.count() <= 5)
all_m2m.update(obj.m2m.all())
self.assertEqual(SimpleModel.objects.count(), len(all_m2m))
def test_generate_fk_to_self(self):
''' When a model with a reference to itself is encountered, If NULL is allowed
don't generate a new instance of itself as a foreign key, so as not to reach
pythons recursion limit
'''
filler = AutoFixture(SelfReferencingModel, generate_fk=True)
model = filler.create_one()
self.assertEqual(model.parent_self, None)
self.assertEqual(SelfReferencingModel.objects.count(), 1)
def test_generate_fk_to_self_no_null(self):
''' Throw an exception when a model is encountered which references itself but
does not allow NULL values to be set.
'''
filler = AutoFixture(SelfReferencingModelNoNull, generate_fk=True)
self.assertRaises(CreateInstanceError, filler.create_one)
def test_generate_fk_to_self_follow(self):
filler = AutoFixture(SelfReferencingModel, follow_fk=True)
first = filler.create_one()
self.assertEqual(SelfReferencingModel.objects.count(), 1)
filler = AutoFixture(SelfReferencingModel, follow_fk=True)
second = filler.create_one()
self.assertEqual(SelfReferencingModel.objects.count(), 2)
self.assertEqual(second.parent_self, first)
class TestInheritModel(FileSystemCleanupTestCase):
def test_inheritence_model(self):
filler = AutoFixture(InheritModel)
filler.create(10)
self.assertEqual(InheritModel.objects.count(), 10)
def test_inheritence_unique_together_model(self):
filler = AutoFixture(InheritUniqueTogetherModel)
filler.create(10)
self.assertEqual(InheritUniqueTogetherModel.objects.count(), 10)
class TestUniqueConstraints(FileSystemCleanupTestCase):
def test_unique_field(self):
filler = AutoFixture(UniqueTestModel)
count = len(get_field(filler.model, 'choice1').choices)
for obj in filler.create(count):
pass
def test_unique_together(self):
filler = AutoFixture(UniqueTogetherTestModel)
count1 = len(get_field(filler.model, 'choice1').choices)
count2 = len(get_field(filler.model, 'choice2').choices)
for obj in filler.create(count1 * count2):
pass
def test_unique_constraint_null(self):
fixture = AutoFixture(
UniqueNullFieldModel,
field_values={
'name': generators.NoneGenerator()
}
)
self.assertIn(constraints.unique_constraint, fixture.constraints)
fixture.create_one()
# Creating another entry with a null value should not raise an
# exception as a unique column can contain multiple null values
fixture.create_one()
def test_unique_together_constraint_nulls(self):
fixture = AutoFixture(
UniqueTogetherNullFieldModel,
field_values={
'field_one': generators.NoneGenerator(),
'field_two': generators.NoneGenerator()
}
)
self.assertIn(constraints.unique_together_constraint,
fixture.constraints)
fixture.create_one()
fixture.create_one()
def test_unique_together_constraint_one_field_null(self):
fixture = AutoFixture(
UniqueTogetherNullFieldModel,
field_values={
'field_one': generators.NoneGenerator(),
'field_two': generators.StaticGenerator('test_string')
}
)
self.assertIn(constraints.unique_together_constraint,
fixture.constraints)
with self.assertRaises(CreateInstanceError):
fixture.create_one()
fixture.create_one()
def test_multiple_unique_together_constraint_nulls(self):
fixture = AutoFixture(
MultipleUniqueTogetherNullFieldModel,
field_values={
'field_one': generators.NoneGenerator(),
'field_two': generators.NoneGenerator(),
'field_three': generators.NoneGenerator(),
'field_four': generators.NoneGenerator(),
'field_five': generators.NoneGenerator(),
}
)
self.assertIn(constraints.unique_together_constraint,
fixture.constraints)
fixture.create_one()
fixture.create_one()
def test_multiple_unique_together_constraint_one_field_null(self):
fixture = AutoFixture(
MultipleUniqueTogetherNullFieldModel,
field_values={
'field_one': generators.NoneGenerator(),
'field_two': generators.NoneGenerator(),
'field_three': generators.NoneGenerator(),
'field_four': generators.NoneGenerator(),
'field_five': generators.StaticGenerator('test_string'),
}
)
self.assertIn(constraints.unique_together_constraint,
fixture.constraints)
with self.assertRaises(CreateInstanceError):
fixture.create_one()
fixture.create_one()
def test_multiple_unique_together_constraint_one_field_null_first_unique_together_tuple(self):
fixture = AutoFixture(
MultipleUniqueTogetherNullFieldModel,
field_values={
'field_one': generators.NoneGenerator(),
'field_two': generators.StaticGenerator('test_string'),
'field_three': generators.NoneGenerator(),
'field_four': generators.NoneGenerator(),
'field_five': generators.NoneGenerator(),
}
)
self.assertIn(constraints.unique_together_constraint,
fixture.constraints)
with self.assertRaises(CreateInstanceError):
fixture.create_one()
fixture.create_one()
class TestGenerators(FileSystemCleanupTestCase):
def test_instance_selector(self):
AutoFixture(SimpleModel).create(10)
result = generators.InstanceSelector(SimpleModel).generate()
self.assertEqual(result.__class__, SimpleModel)
for i in range(10):
result = generators.InstanceSelector(
SimpleModel, max_count=10).generate()
self.assertTrue(0 <= len(result) <= 10)
for obj in result:
self.assertEqual(obj.__class__, SimpleModel)
for i in range(10):
result = generators.InstanceSelector(
SimpleModel, min_count=5, max_count=10).generate()
self.assertTrue(5 <= len(result) <= 10)
for obj in result:
self.assertEqual(obj.__class__, SimpleModel)
for i in range(10):
result = generators.InstanceSelector(
SimpleModel, min_count=20, max_count=100).generate()
# cannot return more instances than available
self.assertEqual(len(result), 10)
for obj in result:
self.assertEqual(obj.__class__, SimpleModel)
# works also with queryset as argument
result = generators.InstanceSelector(SimpleModel.objects.all()).generate()
self.assertEqual(result.__class__, SimpleModel)
class TestLinkClass(FileSystemCleanupTestCase):
def test_flat_link(self):
link = Link(('foo', 'bar'))
self.assertTrue('foo' in link)
self.assertTrue('bar' in link)
self.assertFalse('spam' in link)
self.assertEqual(link['foo'], None)
self.assertEqual(link['spam'], None)
def test_nested_links(self):
link = Link(('foo', 'foo__bar', 'spam__ALL'))
self.assertTrue('foo' in link)
self.assertFalse('spam' in link)
self.assertFalse('egg' in link)
foolink = link.get_deep_links('foo')
self.assertTrue('bar' in foolink)
self.assertFalse('egg' in foolink)
spamlink = link.get_deep_links('spam')
self.assertTrue('bar' in spamlink)
self.assertTrue('egg' in spamlink)
def test_links_with_value(self):
link = Link({'foo': 1, 'spam__egg': 2}, default=0)
self.assertTrue('foo' in link)
self.assertEqual(link['foo'], 1)
self.assertFalse('spam' in link)
self.assertEqual(link['spam'], 0)
spamlink = link.get_deep_links('spam')
self.assertTrue('egg' in spamlink)
self.assertEqual(spamlink['bar'], 0)
self.assertEqual(spamlink['egg'], 2)
def test_always_true_link(self):
link = Link(True)
self.assertTrue('field' in link)
self.assertTrue('any' in link)
link = link.get_deep_links('field')
self.assertTrue('field' in link)
self.assertTrue('any' in link)
link = Link(('ALL',))
self.assertTrue('field' in link)
self.assertTrue('any' in link)
link = link.get_deep_links('field')
self.assertTrue('field' in link)
self.assertTrue('any' in link)
def test_inherit_always_true_value(self):
link = Link({'ALL': 1})
self.assertEqual(link['foo'], 1)
sublink = link.get_deep_links('foo')
self.assertEqual(sublink['bar'], 1)
class TestRegistry(FileSystemCleanupTestCase):
def setUp(self):
self.original_registry = autofixture.REGISTRY
autofixture.REGISTRY = {}
def tearDown(self):
autofixture.REGISTRY = self.original_registry
def test_registration(self):
autofixture.register(SimpleModel, SimpleAutoFixture)
self.assertTrue(SimpleModel in autofixture.REGISTRY)
self.assertEqual(autofixture.REGISTRY[SimpleModel], SimpleAutoFixture)
def test_unregister(self):
autofixture.register(SimpleModel, SimpleAutoFixture)
self.assertTrue(SimpleModel in autofixture.REGISTRY)
self.assertEqual(autofixture.REGISTRY[SimpleModel], SimpleAutoFixture)
autofixture.unregister(SimpleModel)
self.assertFalse(SimpleModel in autofixture.REGISTRY)
def test_create(self):
autofixture.register(SimpleModel, SimpleAutoFixture)
for obj in autofixture.create(SimpleModel, 10):
self.assertEqual(obj.name, 'foo')
obj = autofixture.create_one(SimpleModel)
self.assertEqual(obj.name, 'foo')
def test_overwrite_attributes(self):
autofixture.register(SimpleModel, SimpleAutoFixture)
for obj in autofixture.create(
SimpleModel, 10, field_values={'name': 'bar'}):
self.assertEqual(obj.name, 'bar')
obj = autofixture.create_one(
SimpleModel, field_values={'name': 'bar'})
self.assertEqual(obj.name, 'bar')
def test_registered_fixture_is_used_for_fk(self):
class BasicModelFixture(AutoFixture):
field_values={'chars': 'Hello World!'}
autofixture.register(BasicModel, BasicModelFixture)
fixture = AutoFixture(RelatedModel, generate_fk=['related'])
obj = fixture.create_one()
self.assertTrue(obj)
self.assertEqual(obj.related.chars, 'Hello World!')
def test_registered_fixture_is_used_for_m2m(self):
class SimpleModelFixture(AutoFixture):
field_values={'name': 'Jon Doe'}
autofixture.register(SimpleModel, SimpleModelFixture)
fixture = AutoFixture(M2MModel, generate_m2m={'m2m': (5,5)})
obj = fixture.create_one()
self.assertTrue(obj)
self.assertEqual(obj.m2m.count(), 5)
self.assertEqual(
list(obj.m2m.values_list('name', flat=True)),
['Jon Doe'] * 5)
class TestAutofixtureAPI(FileSystemCleanupTestCase):
def setUp(self):
self.original_registry = autofixture.REGISTRY
autofixture.REGISTRY = {}
def tearDown(self):
autofixture.REGISTRY = self.original_registry
def test_values_class(self):
autofixture.register(BasicModel, BasicValueFixture)
for obj in autofixture.create(BasicModel, 10):
self.assertEqual(obj.chars, 'foo')
self.assertEqual(obj.shortchars, 'a')
self.assertEqual(obj.blankchars, 'bar')
self.assertEqual(obj.nullchars, 'spam')
self.assertTrue(1 <= obj.intfield <= 13)
class TestManagementCommand(FileSystemCleanupTestCase):
def setUp(self):
self.original_registry = autofixture.REGISTRY
autofixture.REGISTRY = {}
def call(self, *args, **kwargs):
return call_command('loadtestdata', *args, verbosity=0, **kwargs)
def tearDown(self):
autofixture.REGISTRY = self.original_registry
def test_basic(self):
self.call('autofixture_tests.SimpleModel:1')
self.assertEqual(SimpleModel.objects.count(), 1)
self.call('autofixture_tests.SimpleModel:5')
self.assertEqual(SimpleModel.objects.count(), 6)
def test_generate_fk(self):
self.call('autofixture_tests.DeepLinkModel2:1',
generate_fk='related,related__related')
obj = DeepLinkModel2.objects.get()
self.assertTrue(obj.related)
self.assertTrue(obj.related.related)
self.assertEqual(obj.related.related2, obj.related.related)
def test_generate_fk_with_no_follow(self):
self.call('autofixture_tests.DeepLinkModel2:1',
generate_fk='related,related__related',
no_follow_fk=True)
obj = DeepLinkModel2.objects.get()
self.assertTrue(obj.related)
self.assertTrue(obj.related.related)
self.assertEqual(obj.related.related2, None)
def test_generate_fk_with_ALL(self):
self.call('autofixture_tests.DeepLinkModel2:1',
generate_fk='ALL')
obj = DeepLinkModel2.objects.get()
self.assertTrue(obj.related)
self.assertTrue(obj.related.related)
self.assertTrue(obj.related.related2)
self.assertTrue(obj.related.related != obj.related.related2)
def test_no_follow_m2m(self):
AutoFixture(SimpleModel).create(1)
self.call('autofixture_tests.NullableFKModel:1',
no_follow_m2m=True)
obj = NullableFKModel.objects.get()
self.assertEqual(obj.m2m.count(), 0)
def test_follow_m2m(self):
AutoFixture(SimpleModel).create(10)
AutoFixture(OtherSimpleModel).create(10)
self.call('autofixture_tests.M2MModel:25',
follow_m2m='m2m:3:3,secondm2m:0:10')
for obj in M2MModel.objects.all():
self.assertEqual(obj.m2m.count(), 3)
self.assertTrue(0 <= obj.secondm2m.count() <= 10)
def test_generate_m2m(self):
self.call('autofixture_tests.M2MModel:10',
generate_m2m='m2m:1:1,secondm2m:2:5')
all_m2m, all_secondm2m = set(), set()
for obj in M2MModel.objects.all():
self.assertEqual(obj.m2m.count(), 1)
self.assertTrue(
2 <= obj.secondm2m.count() <= 5 or
obj.secondm2m.count() == 0)
all_m2m.update(obj.m2m.all())
all_secondm2m.update(obj.secondm2m.all())
self.assertEqual(all_m2m, set(SimpleModel.objects.all()))
self.assertEqual(all_secondm2m, set(OtherSimpleModel.objects.all()))
def test_using_registry(self):
autofixture.register(SimpleModel, SimpleAutoFixture)
self.call('autofixture_tests.SimpleModel:10')
for obj in SimpleModel.objects.all():
self.assertEqual(obj.name, 'foo')
def test_use_option(self):
self.call('autofixture_tests.SimpleModel:10',
use='autofixture_tests.tests.test_base.SimpleAutoFixture')
for obj in SimpleModel.objects.all():
self.assertEqual(obj.name, 'foo')
class TestGenericRelations(FileSystemCleanupTestCase):
def assertNotRaises(self, exc_type, func, msg=None,
args=None, kwargs=None):
args = args or []
kwargs = kwargs or {}
try:
func(*args, **kwargs)
except exc_type as exc:
if msg is not None and exc.message != msg:
return
self.fail('{} failed with {}'.format(func, exc))
def test_process_gr(self):
"""Tests the bug when GenericRelation field being processed
by autofixture.base.AutoFixtureBase#process_m2m
and through table appears as None.
"""
count = 10
fixture = AutoFixture(GRModel)
self.assertNotRaises(AttributeError, fixture.create,
msg="'NoneType' object has no attribute '_meta'", args=[count])
self.assertEqual(GRModel.objects.count(), count)
class TestShortcuts(FileSystemCleanupTestCase):
def test_commit_kwarg(self):
instances = autofixture.create(BasicModel, 3, commit=False)
self.assertEqual([i.pk for i in instances], [None] * 3)
instance = autofixture.create_one(BasicModel, commit=False)
self.assertEqual(instance.pk, None)
class TestPreProcess(FileSystemCleanupTestCase):
def test_pre_process_instance_not_yet_saved(self):
self_ = self
class TestAutoFixture(AutoFixture):
def pre_process_instance(self, instance):
self_.assertIsNone(instance.pk)
return instance
TestAutoFixture(BasicModel).create_one()
self.assertEqual(BasicModel.objects.count(), 1)
def test_pre_process_has_effect(self):
expected_string = generators.LoremGenerator(max_length=50)()
class TestAutoFixture(AutoFixture):
def pre_process_instance(self, instance):
instance.name = expected_string
return instance
instance = TestAutoFixture(SimpleModel).create_one()
self.assertEqual(instance.name, expected_string)
| ad-m/django-autofixture | autofixture_tests/tests/test_base.py | Python | bsd-3-clause | 28,812 |
from Framework.Controller import Controller
from Database.Controllers.Prereq import Prereq as BDPrereq
from Models.Prereq.RespostaListar import RespostaListar
class Prereq(Controller):
def Listar(self,pedido_listar):
return RespostaListar(BDPrereq().pegarPrereqs("WHERE id_disc_pre = %s AND grupo LIKE %s LIMIT %s OFFSET %s",(pedido_listar.getIdDisc_pre(),"%".pedido_listar.getCodigo().replace(' ','%')."%",pedido_listar.getQuantidade(),(pedido_listar.getQuantidade()*pedido_listar.getPagina()))))
| AEDA-Solutions/matweb | backend/Controllers/Prereq.py | Python | mit | 527 |
# -*- coding: utf-8 -*-
# Copyright (c) 2010 - 2014 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing a widget to show numbers in different formats.
"""
from __future__ import unicode_literals
from PyQt5.QtCore import pyqtSlot, pyqtSignal, Qt, QAbstractTableModel, \
qVersion
from PyQt5.QtWidgets import QWidget, QHeaderView
from E5Gui.E5Application import e5App
from .Ui_NumbersWidget import Ui_NumbersWidget
import UI.PixmapCache
class BinaryModel(QAbstractTableModel):
"""
Class implementing a model for entering binary numbers.
"""
def __init__(self, parent=None):
"""
Constructor
@param parent reference to the parent widget (QWidget)
"""
super(BinaryModel, self).__init__(parent)
self.__bits = 0
self.__value = 0
def rowCount(self, parent):
"""
Public method to get the number of rows of the model.
@param parent parent index (QModelIndex)
@return number of columns (integer)
"""
return 1
def columnCount(self, parent):
"""
Public method to get the number of columns of the model.
@param parent parent index (QModelIndex)
@return number of columns (integer)
"""
return self.__bits
def data(self, index, role=Qt.DisplayRole):
"""
Public method to get data from the model.
@param index index to get data for (QModelIndex)
@param role role of the data to retrieve (integer)
@return requested data
"""
if role == Qt.CheckStateRole:
return (self.__value >> (self.__bits - index.column() - 1)) & 1
elif role == Qt.DisplayRole:
return ""
return None
def flags(self, index):
"""
Public method to get flags from the model.
@param index index to get flags for (QModelIndex)
@return flags (Qt.ItemFlags)
"""
return Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsSelectable
def headerData(self, section, orientation, role=Qt.DisplayRole):
"""
Public method to get header data from the model.
@param section section number (integer)
@param orientation orientation (Qt.Orientation)
@param role role of the data to retrieve (integer)
@return requested data
"""
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return str(self.__bits - section - 1)
return QAbstractTableModel.headerData(self, section, orientation, role)
def setBits(self, bits):
"""
Public slot to set the number of bits.
@param bits number of bits to show (integer)
"""
self.beginResetModel()
self.__bits = bits
self.endResetModel()
def setValue(self, value):
"""
Public slot to set the value to show.
@param value value to show (integer)
"""
self.beginResetModel()
self.__value = value
self.endResetModel()
def setBitsAndValue(self, bits, value):
"""
Public slot to set the number of bits and the value to show.
@param bits number of bits to show (integer)
@param value value to show (integer)
"""
self.__bits = bits
self.__value = value
self.beginResetModel()
self.endResetModel()
def getValue(self):
"""
Public slot to get the current value.
@return current value of the model (integer)
"""
return self.__value
def setData(self, index, value, role=Qt.EditRole):
"""
Public method to set the data of a node cell.
@param index index of the node cell (QModelIndex)
@param value value to be set
@param role role of the data (integer)
@return flag indicating success (boolean)
"""
if role == Qt.CheckStateRole:
if value == Qt.Checked and not self.data(index, Qt.CheckStateRole):
# that seems like a hack; Qt 4.6 always sends Qt.Checked
self.__value |= (1 << self.__bits - index.column() - 1)
else:
self.__value &= ~(1 << self.__bits - index.column() - 1)
self.dataChanged.emit(index, index)
return True
return False
class NumbersWidget(QWidget, Ui_NumbersWidget):
"""
Class implementing a widget to show numbers in different formats.
@signal insertNumber(str) emitted after the user has entered a number
and selected the number format
"""
insertNumber = pyqtSignal(str)
def __init__(self, parent=None):
"""
Constructor
@param parent reference to the parent widget (QWidget)
"""
super(NumbersWidget, self).__init__(parent)
self.setupUi(self)
self.setWindowIcon(UI.PixmapCache.getIcon("eric.png"))
self.__badNumberSheet = "background-color: #ffa0a0;"
self.binInButton.setIcon(UI.PixmapCache.getIcon("2downarrow.png"))
self.binOutButton.setIcon(UI.PixmapCache.getIcon("2uparrow.png"))
self.octInButton.setIcon(UI.PixmapCache.getIcon("2downarrow.png"))
self.octOutButton.setIcon(UI.PixmapCache.getIcon("2uparrow.png"))
self.decInButton.setIcon(UI.PixmapCache.getIcon("2downarrow.png"))
self.decOutButton.setIcon(UI.PixmapCache.getIcon("2uparrow.png"))
self.hexInButton.setIcon(UI.PixmapCache.getIcon("2downarrow.png"))
self.hexOutButton.setIcon(UI.PixmapCache.getIcon("2uparrow.png"))
self.formatBox.addItem(self.tr("Auto"), 0)
self.formatBox.addItem(self.tr("Dec"), 10)
self.formatBox.addItem(self.tr("Hex"), 16)
self.formatBox.addItem(self.tr("Oct"), 8)
self.formatBox.addItem(self.tr("Bin"), 2)
self.sizeBox.addItem("8", 8)
self.sizeBox.addItem("16", 16)
self.sizeBox.addItem("32", 32)
self.sizeBox.addItem("64", 64)
self.__input = 0
self.__inputValid = True
self.__bytes = 1
self.__model = BinaryModel(self)
self.binTable.setModel(self.__model)
if qVersion() >= "5.0.0":
self.binTable.horizontalHeader().setSectionResizeMode(
QHeaderView.ResizeToContents)
else:
self.binTable.horizontalHeader().setResizeMode(
QHeaderView.ResizeToContents)
self.__model.setBitsAndValue(self.__bytes * 8, self.__input)
self.__model.dataChanged.connect(self.__binModelDataChanged)
def __formatNumbers(self, format):
"""
Private method to format the various number inputs.
@param format number format indicator (integer)
"""
self.__block(True)
self.binEdit.setStyleSheet("")
self.octEdit.setStyleSheet("")
self.decEdit.setStyleSheet("")
self.hexEdit.setStyleSheet("")
# determine byte count
bytes = 8
tmp = self.__input
for i in range(8):
c = (tmp & 0xff00000000000000) >> 7 * 8
if c != 0 and self.__input >= 0:
break
if c != 0xff and self.__input < 0:
break
tmp <<= 8
bytes -= 1
if bytes == 0:
bytes = 1
self.__bytes = bytes
bytesIn = self.sizeBox.itemData(self.sizeBox.currentIndex()) // 8
if bytesIn and bytes > bytesIn:
self.sizeBox.setStyleSheet(self.__badNumberSheet)
else:
self.sizeBox.setStyleSheet("")
# octal
if format != 8:
self.octEdit.setText("{0:0{1}o}".format(self.__input, bytesIn * 3))
# decimal
if format != 10:
self.decEdit.setText("{0:d}".format(self.__input))
# hexadecimal
if format != 16:
self.hexEdit.setText("{0:0{1}x}".format(self.__input, bytesIn * 2))
# octal
if format != 8:
self.octEdit.setText("{0:0{1}o}".format(self.__input, bytesIn * 3))
# binary
if format != 2:
num = "{0:0{1}b}".format(self.__input, bytesIn * 8)
self.binEdit.setText(num)
self.__model.setBitsAndValue(len(self.binEdit.text()), self.__input)
self.__block(False)
def __block(self, b):
"""
Private slot to block some signals.
@param b flah indicating the blocking state (boolean)
"""
self.hexEdit.blockSignals(b)
self.decEdit.blockSignals(b)
self.octEdit.blockSignals(b)
self.binEdit.blockSignals(b)
self.binTable.blockSignals(b)
@pyqtSlot(int)
def on_sizeBox_valueChanged(self, value):
"""
Private slot handling a change of the bit size.
@param value selected bit size (integer)
"""
self.__formatNumbers(10)
@pyqtSlot()
def on_byteOrderButton_clicked(self):
"""
Private slot to swap the byte order.
"""
bytesIn = self.sizeBox.itemData(self.sizeBox.currentIndex()) // 8
if bytesIn == 0:
bytesIn = self.__bytes
tmp1 = self.__input
tmp2 = 0
for i in range(bytesIn):
tmp2 <<= 8
tmp2 |= tmp1 & 0xff
tmp1 >>= 8
self.__input = tmp2
self.__formatNumbers(0)
@pyqtSlot()
def on_binInButton_clicked(self):
"""
Private slot to retrieve a binary number from the current editor.
"""
number = e5App().getObject("ViewManager").getNumber()
if number == "":
return
self.binEdit.setText(number)
self.binEdit.setFocus()
@pyqtSlot(str)
def on_binEdit_textChanged(self, txt):
"""
Private slot to handle input of a binary number.
@param txt text entered (string)
"""
try:
self.__input = int(txt, 2)
self.__inputValid = True
except ValueError:
self.__inputValid = False
if self.__inputValid:
self.__formatNumbers(2)
else:
self.binEdit.setStyleSheet(self.__badNumberSheet)
@pyqtSlot()
def on_binOutButton_clicked(self):
"""
Private slot to send a binary number.
"""
self.insertNumber.emit(self.binEdit.text())
def __binModelDataChanged(self, start, end):
"""
Private slot to handle a change of the binary model value by the user.
@param start start index (QModelIndex)
@param end end index (QModelIndex)
"""
val = self.__model.getValue()
bytesIn = self.sizeBox.itemData(self.sizeBox.currentIndex()) // 8
num = "{0:0{1}b}".format(val, bytesIn * 8)
self.binEdit.setText(num)
@pyqtSlot()
def on_octInButton_clicked(self):
"""
Private slot to retrieve an octal number from the current editor.
"""
number = e5App().getObject("ViewManager").getNumber()
if number == "":
return
self.octEdit.setText(number)
self.octEdit.setFocus()
@pyqtSlot(str)
def on_octEdit_textChanged(self, txt):
"""
Private slot to handle input of an octal number.
@param txt text entered (string)
"""
try:
self.__input = int(txt, 8)
self.__inputValid = True
except ValueError:
self.__inputValid = False
if self.__inputValid:
self.__formatNumbers(8)
else:
self.octEdit.setStyleSheet(self.__badNumberSheet)
@pyqtSlot()
def on_octOutButton_clicked(self):
"""
Private slot to send an octal number.
"""
self.insertNumber.emit(self.octEdit.text())
@pyqtSlot()
def on_decInButton_clicked(self):
"""
Private slot to retrieve a decimal number from the current editor.
"""
number = e5App().getObject("ViewManager").getNumber()
if number == "":
return
self.decEdit.setText(number)
self.decEdit.setFocus()
@pyqtSlot(str)
def on_decEdit_textChanged(self, txt):
"""
Private slot to handle input of a decimal number.
@param txt text entered (string)
"""
try:
self.__input = int(txt, 10)
self.__inputValid = True
except ValueError:
self.__inputValid = False
if self.__inputValid:
self.__formatNumbers(10)
else:
self.decEdit.setStyleSheet(self.__badNumberSheet)
@pyqtSlot()
def on_decOutButton_clicked(self):
"""
Private slot to send a decimal number.
"""
self.insertNumber.emit(self.decEdit.text())
@pyqtSlot()
def on_hexInButton_clicked(self):
"""
Private slot to retrieve a hexadecimal number from the current editor.
"""
number = e5App().getObject("ViewManager").getNumber()
if number == "":
return
self.hexEdit.setText(number)
self.hexEdit.setFocus()
@pyqtSlot(str)
def on_hexEdit_textChanged(self, txt):
"""
Private slot to handle input of a hexadecimal number.
@param txt text entered (string)
"""
try:
self.__input = int(txt, 16)
self.__inputValid = True
except ValueError:
self.__inputValid = False
if self.__inputValid:
self.__formatNumbers(16)
else:
self.hexEdit.setStyleSheet(self.__badNumberSheet)
@pyqtSlot()
def on_hexOutButton_clicked(self):
"""
Private slot to send a hexadecimal number.
"""
self.insertNumber.emit(self.hexEdit.text())
| davy39/eric | UI/NumbersWidget.py | Python | gpl-3.0 | 14,344 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-17 17:23
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('rest_api', '0007_auto_20170817_1635'),
]
operations = [
migrations.RenameModel(
old_name='Fotos',
new_name='Foto',
),
]
| alexiwamoto/django-rest-api | rest_api/migrations/0008_auto_20170817_1723.py | Python | mit | 393 |
from __future__ import with_statement
try:
from io import BytesIO
except ImportError:
from StringIO import StringIO as BytesIO
from flask import render_template, request
from wtforms import StringField, FieldList
from flask_wtf import Form
from flask_wtf.file import FileField
from flask_wtf.file import file_required, file_allowed
from .base import TestCase
class UploadSet(object):
def __init__(self, name='files', extensions=None):
self.name = name
self.extensions = extensions
def file_allowed(self, storage, basename):
if not self.extensions:
return True
ext = basename.rsplit('.', 1)[-1]
return ext in self.extensions
images = UploadSet('images', ['jpg', 'png'])
class FileUploadForm(Form):
upload = FileField("Upload file")
class MultipleFileUploadForm(Form):
uploads = FieldList(FileField("upload"), min_entries=3)
class ImageUploadForm(Form):
upload = FileField("Upload file",
validators=[file_required(),
file_allowed(images)])
class TextUploadForm(Form):
upload = FileField("Upload file",
validators=[file_required(),
file_allowed(['txt'])])
class TestFileUpload(TestCase):
def create_app(self):
app = super(TestFileUpload, self).create_app()
app.config['WTF_CSRF_ENABLED'] = False
@app.route("/upload-image/", methods=("POST",))
def upload_image():
form = ImageUploadForm()
if form.validate_on_submit():
return "OK"
return "invalid"
@app.route("/upload-text/", methods=("POST",))
def upload_text():
form = TextUploadForm()
if form.validate_on_submit():
return "OK"
return "invalid"
@app.route("/upload-multiple/", methods=("POST",))
def upload_multiple():
form = MultipleFileUploadForm()
if form.validate_on_submit():
assert len(form.uploads.entries) == 3
for upload in form.uploads.entries:
assert upload.has_file()
return "OK"
@app.route("/upload/", methods=("POST",))
def upload():
form = FileUploadForm()
if form.validate_on_submit():
filedata = form.upload.data
else:
filedata = None
return render_template("upload.html",
filedata=filedata,
form=form)
return app
def test_multiple_files(self):
fps = [self.app.open_resource("flask.png") for i in range(3)]
data = [("uploads-%d" % i, fp) for i, fp in enumerate(fps)]
response = self.client.post("/upload-multiple/", data=dict(data))
assert response.status_code == 200
def test_valid_file(self):
with self.app.open_resource("flask.png") as fp:
response = self.client.post(
"/upload-image/",
data={'upload': fp}
)
assert b'OK' in response.data
def test_missing_file(self):
response = self.client.post(
"/upload-image/",
data={'upload': "test"}
)
assert b'invalid' in response.data
def test_invalid_file(self):
with self.app.open_resource("flask.png") as fp:
response = self.client.post(
"/upload-text/",
data={'upload': fp}
)
assert b'invalid' in response.data
def test_invalid_file_2(self):
response = self.client.post(
"/upload/",
data={'upload': 'flask.png'}
)
assert b'flask.png</h3>' not in response.data
def test_valid_txt_file(self):
with self.app.open_resource("flask.txt") as fp:
response = self.client.post(
"/upload-text/",
data={'upload': fp}
)
assert b'OK' in response.data
def test_invalid_image_file(self):
with self.app.open_resource("flask.txt") as fp:
response = self.client.post(
"/upload-image/",
data={'upload': fp}
)
assert b'invalid' in response.data
class BrokenForm(Form):
text_fields = FieldList(StringField())
file_fields = FieldList(FileField())
text_data = [('text_fields-0', 'First input'),
('text_fields-1', 'Second input')]
file_data = [('file_fields-0', (BytesIO(b'contents 0'), 'file0.txt')),
('file_fields-1', (BytesIO(b'contents 1'), 'file1.txt'))]
class TestFileList(TestCase):
def test_multiple_upload(self):
data = dict(text_data + file_data)
with self.app.test_request_context(method='POST', data=data):
assert len(request.files) # the files have been added to the
# request
f = BrokenForm(csrf_enabled=False)
assert f.validate_on_submit()
assert len(text_data) == len(f.text_fields)
assert len(file_data) == len(f.file_fields)
| Maxence1/flask-wtf | tests/test_uploads.py | Python | bsd-3-clause | 5,211 |
import simplejson
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseForbidden
from django.shortcuts import render_to_response
from django.template import RequestContext
from package.models import Package, Category
from importer.importers import import_from_github_acct
@login_required
def import_github(request, template_name="importer/github.html"):
if not request.user.is_superuser:
return HttpResponseForbidden()
results = []
if request.method == 'POST':
github_name = request.POST.get('github_name')
user_type = request.POST.get('user_type')
category_slug = request.POST.get('category_slug')
results = import_from_github_acct(github_name, user_type, category_slug)
return render_to_response(template_name,
{'results':results,
'categories':Category.objects.all()},
context_instance=RequestContext(request))
| benracine/opencomparison | apps/importer/views.py | Python | mit | 969 |
"""Initial migration
Revision ID: 73b22ccbe472
Revises: fc791d73e762
Create Date: 2017-04-24 09:08:30.923731
"""
# revision identifiers, used by Alembic.
revision = '73b22ccbe472'
down_revision = 'fc791d73e762'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
import residue
try:
is_sqlite = op.get_context().dialect.name == 'sqlite'
except:
is_sqlite = False
if is_sqlite:
op.get_context().connection.execute('PRAGMA foreign_keys=ON;')
utcnow_server_default = "(datetime('now', 'utc'))"
else:
utcnow_server_default = "timezone('utc', current_timestamp)"
def upgrade():
op.create_table('attendee_tournament',
sa.Column('id', residue.UUID(), nullable=False),
sa.Column('first_name', sa.Unicode(), server_default='', nullable=False),
sa.Column('last_name', sa.Unicode(), server_default='', nullable=False),
sa.Column('email', sa.Unicode(), server_default='', nullable=False),
sa.Column('cellphone', sa.Unicode(), server_default='', nullable=False),
sa.Column('game', sa.Unicode(), server_default='', nullable=False),
sa.Column('availability', sa.Unicode(), server_default='', nullable=False),
sa.Column('format', sa.Unicode(), server_default='', nullable=False),
sa.Column('experience', sa.Unicode(), server_default='', nullable=False),
sa.Column('needs', sa.Unicode(), server_default='', nullable=False),
sa.Column('why', sa.Unicode(), server_default='', nullable=False),
sa.Column('volunteering', sa.Boolean(), server_default='False', nullable=False),
sa.Column('status', sa.Integer(), server_default='239694250', nullable=False),
sa.PrimaryKeyConstraint('id', name=op.f('pk_attendee_tournament'))
)
def downgrade():
op.drop_table('attendee_tournament')
| magfest/ubersystem | alembic/versions/73b22ccbe472_initial_migration.py | Python | agpl-3.0 | 1,791 |
#
# pymobiledevice - Jython implementation of libimobiledevice
#
# Copyright (C) 2014 Taconut <https://github.com/Triforce1>
# Copyright (C) 2014 PythEch <https://github.com/PythEch>
# Copyright (C) 2013 GotoHack <https://github.com/GotoHack>
#
# pymobiledevice is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pymobiledevice is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pymobiledevice. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
from pprint import pprint
from progressbar import ProgressBar
from plist_service import PlistService
class ASRClient(object):
def __init__(self, payloadFile):
self.s = PlistService(12345)
self.size = os.path.getsize(payloadFile)
self.packet_payload_size = 1450
self.f = open(payloadFile, "rb")
def initiate(self, msg):
r = {"Checksum Chunk Size": 131072,
"FEC Slice Stride": 40,
"Packet Payload Size": self.packet_payload_size,
"Packets Per FEC": 25,
"Payload": {"Port": 1, "Size": self.size},
"Stream ID": 1,
"Version": 1
}
print "ASR: init"
self.s.sendPlist(r)
def handle_oob_request(self, msg):
length = msg["OOB Length"]
offset = msg["OOB Offset"]
print "ASR: OOB request off=%d len=%d" % (offset, length)
self.f.seek(offset)
data = self.f.read(length)
self.s.send_raw(data)
def send_payload(self, msg):
self.f.seek(0)
i = self.size
print "ASR: sending payload (%d bytes)" % self.size
pbar = ProgressBar(self.size)
pbar.start()
while i < self.size:
data = self.f.read(self.packet_payload_size)
self.s.send_raw(data)
i += len(data)
pbar.update(i)
pbar.finish()
def work_loop(self):
while True:
msg = self.s.recvPlist()
if not msg:
break
Command = msg["Command"]
pprint(msg)
if Command == "Initiate":
self.initiate(msg)
elif Command == "OOBData":
self.handle_oob_request(msg)
elif Command == "Payload":
self.send_payload(msg)
if __name__ == "__main__":
asr = ASRClient(sys.argv[1])
asr.work_loop()
| PythEch/pymobiledevice | asr.py | Python | lgpl-3.0 | 2,823 |
import contextlib
import os
import shlex
import sys
import threading
import traceback
import types
from mitmproxy import exceptions
from mitmproxy import ctx
from mitmproxy import events
import watchdog.events
from watchdog.observers import polling
def parse_command(command):
"""
Returns a (path, args) tuple.
"""
if not command or not command.strip():
raise ValueError("Empty script command.")
# Windows: escape all backslashes in the path.
if os.name == "nt": # pragma: no cover
backslashes = shlex.split(command, posix=False)[0].count("\\")
command = command.replace("\\", "\\\\", backslashes)
args = shlex.split(command) # pragma: no cover
args[0] = os.path.expanduser(args[0])
if not os.path.exists(args[0]):
raise ValueError(
("Script file not found: %s.\r\n"
"If your script path contains spaces, "
"make sure to wrap it in additional quotes, e.g. -s \"'./foo bar/baz.py' --args\".") %
args[0])
elif os.path.isdir(args[0]):
raise ValueError("Not a file: %s" % args[0])
return args[0], args[1:]
def cut_traceback(tb, func_name):
"""
Cut off a traceback at the function with the given name.
The func_name's frame is excluded.
Args:
tb: traceback object, as returned by sys.exc_info()[2]
func_name: function name
Returns:
Reduced traceback.
"""
tb_orig = tb
for _, _, fname, _ in traceback.extract_tb(tb):
tb = tb.tb_next
if fname == func_name:
break
if tb is None:
# We could not find the method, take the full stack trace.
# This may happen on some Python interpreters/flavors (e.g. PyInstaller).
return tb_orig
else:
return tb
@contextlib.contextmanager
def scriptenv(path, args):
oldargs = sys.argv
sys.argv = [path] + args
script_dir = os.path.dirname(os.path.abspath(path))
sys.path.append(script_dir)
try:
yield
except SystemExit as v:
ctx.log.error("Script exited with code %s" % v.code)
except Exception:
etype, value, tb = sys.exc_info()
tb = cut_traceback(tb, "scriptenv").tb_next
ctx.log.error(
"Script error: %s" % "".join(
traceback.format_exception(etype, value, tb)
)
)
finally:
sys.argv = oldargs
sys.path.pop()
def load_script(path, args):
with open(path, "rb") as f:
try:
code = compile(f.read(), path, 'exec')
except SyntaxError as e:
ctx.log.error(
"Script error: %s line %s: %s" % (
e.filename, e.lineno, e.msg
)
)
return
ns = {'__file__': os.path.abspath(path)}
with scriptenv(path, args):
exec(code, ns)
return types.SimpleNamespace(**ns)
class ReloadHandler(watchdog.events.FileSystemEventHandler):
def __init__(self, callback):
self.callback = callback
def filter(self, event):
if event.is_directory:
return False
if os.path.basename(event.src_path).startswith("."):
return False
return True
def on_modified(self, event):
if self.filter(event):
self.callback()
def on_created(self, event):
if self.filter(event):
self.callback()
class Script:
"""
An addon that manages a single script.
"""
def __init__(self, command):
self.name = command
self.command = command
self.path, self.args = parse_command(command)
self.ns = None
self.observer = None
self.dead = False
self.last_options = None
self.should_reload = threading.Event()
for i in events.Events:
if not hasattr(self, i):
def mkprox():
evt = i
def prox(*args, **kwargs):
self.run(evt, *args, **kwargs)
return prox
setattr(self, i, mkprox())
def run(self, name, *args, **kwargs):
# It's possible for ns to be un-initialised if we failed during
# configure
if self.ns is not None and not self.dead:
func = getattr(self.ns, name, None)
if func:
with scriptenv(self.path, self.args):
return func(*args, **kwargs)
def reload(self):
self.should_reload.set()
def load_script(self):
self.ns = load_script(self.path, self.args)
ret = self.run("start")
if ret:
self.ns = ret
self.run("start")
def tick(self):
if self.should_reload.is_set():
self.should_reload.clear()
ctx.log.info("Reloading script: %s" % self.name)
self.ns = load_script(self.path, self.args)
self.start()
self.configure(self.last_options, self.last_options.keys())
else:
self.run("tick")
def start(self):
self.load_script()
def configure(self, options, updated):
self.last_options = options
if not self.observer:
self.observer = polling.PollingObserver()
# Bind the handler to the real underlying master object
self.observer.schedule(
ReloadHandler(self.reload),
os.path.dirname(self.path) or "."
)
self.observer.start()
self.run("configure", options, updated)
def done(self):
self.run("done")
self.dead = True
class ScriptLoader:
"""
An addon that manages loading scripts from options.
"""
def run_once(self, command, flows):
try:
sc = Script(command)
except ValueError as e:
raise ValueError(str(e))
sc.load_script()
for f in flows:
for evt, o in events.event_sequence(f):
sc.run(evt, o)
sc.done()
return sc
def configure(self, options, updated):
if "scripts" in updated:
for s in options.scripts:
if options.scripts.count(s) > 1:
raise exceptions.OptionsError("Duplicate script: %s" % s)
for a in ctx.master.addons.chain[:]:
if isinstance(a, Script) and a.name not in options.scripts:
ctx.log.info("Un-loading script: %s" % a.name)
ctx.master.addons.remove(a)
# The machinations below are to ensure that:
# - Scripts remain in the same order
# - Scripts are listed directly after the script addon. This is
# needed to ensure that interactions with, for instance, flow
# serialization remains correct.
# - Scripts are not initialized un-necessarily. If only a
# script's order in the script list has changed, it should simply
# be moved.
current = {}
for a in ctx.master.addons.chain[:]:
if isinstance(a, Script):
current[a.name] = a
ctx.master.addons.chain.remove(a)
ordered = []
newscripts = []
for s in options.scripts:
if s in current:
ordered.append(current[s])
else:
ctx.log.info("Loading script: %s" % s)
try:
sc = Script(s)
except ValueError as e:
raise exceptions.OptionsError(str(e))
ordered.append(sc)
newscripts.append(sc)
ochain = ctx.master.addons.chain
pos = ochain.index(self)
ctx.master.addons.chain = ochain[:pos + 1] + ordered + ochain[pos + 1:]
for s in newscripts:
ctx.master.addons.startup(s)
| dwfreed/mitmproxy | mitmproxy/addons/script.py | Python | mit | 8,008 |
# -*- coding:utf-8 -*-
#--
# Copyright (c) 2012-2014 Net-ng.
# All rights reserved.
#
# This software is licensed under the BSD License, as described in
# the file LICENSE.txt, which you should have received as part of
# this distribution.
#--
import json
import os
import uuid
from PIL import Image
from PIL import ImageOps
from nagare import log
from ..assetsmanager import AssetsManager
class SimpleAssetsManager(AssetsManager):
def __init__(self, config_filename, error, basedir, baseurl, max_size):
super(SimpleAssetsManager, self).__init__(config_filename, error)
self.basedir = basedir
self.baseurl = baseurl
self.max_size = max_size
def copy_cover(self, file_id, new_file_id):
try:
data, metadata = self.load(file_id, 'cover')
with open(self._get_filename(new_file_id, 'cover'), "w") as f:
f.write(data)
except IOError:
# Cover not existing
pass
def copy(self, file_id):
data, metadata = self.load(file_id)
return self.save(data, metadata=metadata)
def _get_filename(self, file_id, size=None):
filename = os.path.join(self.basedir, file_id)
if size and size != 'large':
filename += '.' + size
return filename
def _get_metadata_filename(self, file_id):
return os.path.join(self.basedir, '%s.metadata' % file_id)
def save(self, data, file_id=None, metadata={}, THUMB_SIZE=()):
if file_id is None:
file_id = unicode(uuid.uuid4())
with open(self._get_filename(file_id), "w") as f:
f.write(data)
# Store metadata
with open(self._get_metadata_filename(file_id), "w") as f:
f.write(json.dumps(metadata))
img = None
try:
img = Image.open(self._get_filename(file_id))
except IOError:
log.info('Not an image file, skipping medium & thumbnail generation')
else:
# Store thumbnail & medium
kw = {}
if 'transparency' in img.info:
kw['transparency'] = img.info["transparency"]
orig_width, orig_height = img.size
medium_size = self.MEDIUM_WIDTH, int(float(self.MEDIUM_WIDTH) * orig_height / orig_width)
medium = img.copy()
medium.thumbnail(medium_size, Image.ANTIALIAS)
medium.save(self._get_filename(file_id, 'medium'), img.format, quality=75, **kw) # 'JPEG')
thumb = ImageOps.fit(img, THUMB_SIZE if THUMB_SIZE else self.THUMB_SIZE, Image.ANTIALIAS)
thumb.save(self._get_filename(file_id, 'thumb'), img.format, quality=75, **kw) # 'JPEG')
return file_id
def delete(self, file_id):
files = [self._get_filename(file_id),
self._get_filename(file_id, 'thumb'),
self._get_filename(file_id, 'medium'),
self._get_filename(file_id, 'cover'),
self._get_filename(file_id, 'large'),
self._get_metadata_filename(file_id)]
for f in files:
try:
os.remove(f)
except OSError: # File does not exist
pass
def load(self, file_id, size=None):
filename = self._get_filename(file_id, size)
with open(filename, "r") as f:
data = f.read()
return data, self.get_metadata(file_id)
def update_metadata(self, file_id, metadata):
with open(self._get_metadata_filename(file_id), "w") as f:
metadata = f.write(json.dumps(metadata))
def get_metadata(self, file_id):
try:
f = open(self._get_metadata_filename(file_id), "r")
metadata = json.loads(f.read())
f.close()
except IOError:
log.error('unable to load metadata for ' + self._get_metadata_filename(file_id))
metadata = {}
return metadata
def get_image_size(self, fileid):
"""Return the image dimensions
In:
- ``fileid`` -- file identifier
Return:
- a tuple representing the image dimensions (width, height) or (None, None) in case of error
"""
dim = (None, None)
try:
dim = Image.open(self._get_filename(fileid)).size
except:
log.error('Could not get image dimensions of %r', fileid, exc_info=True)
return dim
def get_image_url(self, file_id, size=None, include_filename=True):
"""Return an image significant URL
In:
- ``file_id`` -- file identifier
- ``size`` -- size to get (thumb, medium, cover, large)
- ``include_filename`` -- add the filename to the URL or not
Return:
- image significant URL
"""
if self.baseurl:
url = [self.baseurl, self.get_entry_name(), file_id, size or 'large']
else:
url = ['', self.get_entry_name(), file_id, size or 'large']
if include_filename:
url.append(self.get_metadata(file_id)['filename'])
return '/'.join(url)
def create_cover(self, file_id, left, top, width, height):
"""Create the cover version for a file
In :
- ``file_id`` -- The asset to make as cover
- ``left`` -- Left coordinate of the crop origin
- ``top`` -- Top coordinate of the crop origin
- ``width`` -- Crop width
- ``height`` -- Crop height
"""
# The crop dimensions are given for the medium version.
# Calculate them for the large version
medium_img = Image.open(self._get_filename(file_id, 'medium'))
medium_w, medium_h = medium_img.size
large_img = Image.open(self._get_filename(file_id))
large_w, large_h = large_img.size
kw = {}
if 'transparency' in large_img.info:
kw['transparency'] = large_img.info["transparency"]
left, width = int(float(left) * large_w / medium_w), int(float(width) * large_w / medium_w)
top, height = int(float(top) * large_h / medium_h), int(float(height) * large_h / medium_h)
n_img = large_img.crop((left, top, left + width, top + height))
n_img.thumbnail(self.COVER_SIZE, Image.ANTIALIAS)
n_img.save(self._get_filename(file_id, 'cover'), large_img.format, quality=75, **kw)
| Net-ng/kansha | kansha/services/simpleassetsmanager/simpleassetsmanager.py | Python | bsd-3-clause | 6,401 |
# RUN: %python -m artiq.compiler.testbench.embedding %s
from artiq.experiment import *
class MyClass:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
sl = [MyClass(x=1), MyClass(x=2)]
@kernel
def bug(l):
for c in l:
print(c.x)
@kernel
def entrypoint():
bug(sl)
| JQIamo/artiq | artiq/test/lit/embedding/bug_477.py | Python | lgpl-3.0 | 337 |
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Parser for BigMLer
"""
from __future__ import absolute_import
import argparse
import pkg_resources
from bigmler.options.common import get_common_options
from bigmler.options.delete import get_delete_options
from bigmler.options.source import get_source_options
from bigmler.options.dataset import get_dataset_options
from bigmler.options.test import get_test_options
from bigmler.options.multilabel import get_multi_label_options
from bigmler.options.main import get_main_options
from bigmler.options.analyze import get_analyze_options
from bigmler.options.cluster import get_cluster_options
from bigmler.options.anomaly import get_anomaly_options
from bigmler.options.sample import get_sample_options
from bigmler.options.report import get_report_options
SUBCOMMANDS = ["main", "analyze", "cluster", "anomaly", "sample",
"delete", "report"]
MAIN = SUBCOMMANDS[0]
def parser_add_options(parser, options):
"""Adds the options to the sucommand parser
"""
for option, properties in options.items():
parser.add_argument(option, **properties)
def create_parser(general_defaults={}, constants={}, subcommand=MAIN):
"""Sets the accepted command options, variables, defaults and help
"""
defaults = general_defaults['BigMLer']
version = pkg_resources.require("BigMLer")[0].version
version_text = """\
BigMLer %s - A Higher Level API to BigML's API
Copyright 2012-2015 BigML
Licensed under the Apache License, Version 2.0 (the \"License\"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.""" % version
constants['version_text'] = version_text
main_parser = argparse.ArgumentParser(
description="A higher level API to BigML's API.",
epilog="Happy predictive modeling!",
formatter_class=argparse.RawTextHelpFormatter)
main_parser.add_argument('--version',
action='version', version=version_text)
subparsers = main_parser.add_subparsers()
# list of options
common_options = get_common_options(defaults=defaults, constants=constants)
delete_options = get_delete_options(defaults=defaults)
source_options = get_source_options(defaults=defaults)
dataset_options = get_dataset_options(defaults=defaults)
test_options = get_test_options(defaults=defaults)
multi_label_options = get_multi_label_options(defaults=defaults)
# subcommand options
subcommand_options = {}
# specific options
subcommand_options["main"] = get_main_options(defaults=defaults,
constants=constants)
# general options
subcommand_options["main"].update(common_options)
subcommand_options["main"].update(source_options)
subcommand_options["main"].update(dataset_options)
subcommand_options["main"].update(multi_label_options)
subcommand_options["main"].update(test_options)
subcommand_options["main"].update({
'--source-tag': delete_options['--source-tag'],
'--dataset-tag': delete_options['--dataset-tag'],
'--model-tag': delete_options['--model-tag'],
'--ensemble-tag': delete_options['--ensemble-tag'],
'--prediction-tag': delete_options['--prediction-tag'],
'--batch-prediction-tag': delete_options['--batch-prediction-tag']})
main_options = subcommand_options["main"]
defaults = general_defaults["BigMLer analyze"]
subcommand_options["analyze"] = get_analyze_options(defaults=defaults)
subcommand_options["analyze"].update(common_options)
# we add the options that should be transmitted to bigmler main subcommands
# in analyze
subcommand_options["analyze"].update({
'--objective': main_options['--objective'],
'--max-parallel-models': main_options['--max-parallel-models'],
'--max-parallel-evaluations': main_options[
'--max-parallel-evaluations'],
'--model-fields': main_options['--model-fields'],
'--balance': main_options['--balance'],
'--no-balance': main_options['--no-balance'],
'--number-of-models': main_options['--number-of-models'],
'--sample-rate': main_options['--sample-rate'],
'--missing-splits': main_options['--missing-splits'],
'--pruning': main_options['--pruning'],
'--weight-field': main_options['--weight-field'],
'--replacement': main_options['--replacement'],
'--objective-weights': main_options['--objective-weights'],
'--model-attributes': main_options['--model-attributes'],
'--ensemble-attributes': main_options['--ensemble-attributes'],
'--tlp': main_options['--tlp'],
'--randomize': main_options['--randomize'],
'--no-csv': main_options['--no-csv'],
'--no-no-csv': main_options['--no-no-csv'],
'--to-dataset': main_options['--to-dataset']})
defaults = general_defaults["BigMLer cluster"]
subcommand_options["cluster"] = get_cluster_options(defaults=defaults)
# general options
subcommand_options["cluster"].update(common_options)
subcommand_options["cluster"].update(source_options)
subcommand_options["cluster"].update(dataset_options)
subcommand_options["cluster"].update(test_options)
subcommand_options["cluster"].update({
'--cpp': main_options['--cpp'],
'--fields-map': main_options['--fields-map'],
'--source-tag': delete_options['--source-tag'],
'--dataset-tag': delete_options['--dataset-tag'],
'--cluster-tag': delete_options['--cluster-tag'],
'--centroid-tag': delete_options['--centroid-tag'],
'--batch-centroid-tag': delete_options['--batch-centroid-tag'],
'--prediction-info': main_options['--prediction-info'],
'--prediction-header': main_options['--prediction-header'],
'--prediction-fields': main_options['--prediction-fields'],
'--reports': main_options['--reports'],
'--remote': main_options['--remote'],
'--no-batch': main_options['--no-batch'],
'--no-csv': main_options['--no-csv'],
'--no-no-csv': main_options['--no-no-csv'],
'--to-dataset': main_options['--to-dataset']})
defaults = general_defaults["BigMLer anomaly"]
subcommand_options["anomaly"] = get_anomaly_options(defaults=defaults)
# general options
subcommand_options["anomaly"].update(common_options)
subcommand_options["anomaly"].update(source_options)
subcommand_options["anomaly"].update(dataset_options)
subcommand_options["anomaly"].update(test_options)
subcommand_options["anomaly"].update({
'--cpp': main_options['--cpp'],
'--fields-map': main_options['--fields-map'],
'--source-tag': delete_options['--source-tag'],
'--dataset-tag': delete_options['--dataset-tag'],
'--anomaly-tag': delete_options['--anomaly-tag'],
'--anomaly-score-tag': delete_options['--anomaly-score-tag'],
'--batch-anomaly-score-tag': delete_options['--batch-anomaly-score-tag'],
'--prediction-info': main_options['--prediction-info'],
'--prediction-header': main_options['--prediction-header'],
'--prediction-fields': main_options['--prediction-fields'],
'--reports': main_options['--reports'],
'--remote': main_options['--remote'],
'--no-batch': main_options['--no-batch'],
'--no-csv': main_options['--no-csv'],
'--no-no-csv': main_options['--no-no-csv'],
'--to-dataset': main_options['--to-dataset']})
defaults = general_defaults["BigMLer sample"]
subcommand_options["sample"] = get_sample_options(defaults=defaults)
# general options
subcommand_options["sample"].update(common_options)
subcommand_options["sample"].update(source_options)
subcommand_options["sample"].update(dataset_options)
subcommand_options["sample"].update({
'--cpp': main_options['--cpp'],
'--source-tag': delete_options['--source-tag'],
'--dataset-tag': delete_options['--dataset-tag'],
'--sample-tag': delete_options['--sample-tag'],
'--reports': main_options['--reports']})
subcommand_options["delete"] = delete_options
subcommand_options["delete"].update(common_options)
defaults = general_defaults["BigMLer report"]
subcommand_options["report"] = get_report_options(defaults=defaults)
for subcommand in SUBCOMMANDS:
subparser = subparsers.add_parser(subcommand)
parser_add_options(subparser, subcommand_options[subcommand])
# options to be transmitted from analyze to main
chained_options = [
"--debug", "--dev", "--username", "--api-key", "--resources-log",
"--store", "--clear-logs", "--max-parallel-models",
"--max-parallel-evaluations", "--objective", "--tag",
"--no-tag", "--no-debug", "--no-dev", "--model-fields", "--balance",
"--verbosity", "--resume", "--stack_level", "--no-balance",
"--args-separator", "--name"]
return main_parser, chained_options
| brokendata/bigmler | bigmler/parser.py | Python | apache-2.0 | 10,005 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from django.contrib.auth.models import User
from django.db.utils import IntegrityError
class BorrowerTestCase(TestCase):
'''
New 'phone number' field should be valid and optional
'''
def test_model_create(self):
john = User.objects.create(first_name="John", last_name="Doe")
# If we don't set phone number, it should be an empty string
self.assertEqual(str(john.borrower.telephone_number), '')
# If we do set phone number, it should not be an empty string
john.borrower.telephone_number = '+44 7762 25 4775'
self.assertEqual(str(john.borrower.telephone_number), '+447762254775')
john.save()
'''
Only valid phone numbers should be accepted
'''
def test_telephone_format(self):
# No extension | Uses PHONENUMBER_DEFAULT_REGION in settings.py
john = User.objects.create(first_name="John", last_name="Doe")
john.borrower.telephone_number = '7762 25 4775'
john.save()
self.assertEqual(str(john.borrower.telephone_number), '+447762254775')
# Invalid numbers should not be accepted
john.borrower.telephone_number = '+00 7762 25 4775 7762 25 4775'
john.save()
self.assertEqual(str(john.borrower.telephone_number), '')
# International numbers should be accepted
john.borrower.telephone_number = '+40 745 497 778'
john.save()
self.assertEqual(str(john.borrower.telephone_number), '+40745497778')
| Amandil/django-tech-test | loans/tests/tests_model_borrower.py | Python | bsd-3-clause | 1,588 |
# Copyright (c) 2011-2013 Mick Thomure
# All rights reserved.
#
# Please see the file LICENSE.txt in this distribution for usage terms.
import unittest
import cPickle as pickle
from .callback import *
class C(object):
def __init__(self, arg = None):
self.arg = arg
def __eq__(self, other):
return type(self) == type(other) and self.arg == other.arg
def c_method(self, *args, **kw):
return "C.method", self.arg, args, kw
@classmethod
def c_class_method(cls, *args, **kw):
return "C.class_method", args, kw
def test_function(*args, **kw):
return "test_function", args, kw
def PicklingCallback(f, *args, **kw):
cb = Callback(f, *args, **kw)
return pickle.loads(pickle.dumps(cb, protocol = 2))
def _test(tester, f, callback_func = None):
if callback_func is None:
callback_func = Callback
tester.assertEqual(f, callback_func(f).f)
tester.assertEqual(f(), callback_func(f)())
tester.assertEqual(f(1, 2), callback_func(f)(1, 2))
tester.assertEqual(f(1, 2), callback_func(f, 1, 2)())
tester.assertEqual(f(1, 2, 3, 4), callback_func(f, 1, 2)(3, 4))
tester.assertEqual(f(key1 = 1, key2 = 2), callback_func(f)(key1 = 1,
key2 = 2))
tester.assertEqual(f(key1 = 1, key2 = 2), callback_func(f, key1 = 1,
key2 = 2)())
tester.assertEqual(f(key1 = 1, key2 = 2, key3 = 3, key4 = 4), callback_func(f,
key1 = 1, key2 = 2)(key3 = 3, key4 = 4))
# Make sure repr works with args and keywords
tester.assertIsNotNone(repr(callback_func(f, 1, 2, key1 = 1, key2 = 2)))
class CallbackTest(unittest.TestCase):
def testCallFunction(self):
_test(self, test_function)
def testCallBoundMethod(self):
c = C('obj-arg')
_test(self, c.c_method)
def testCallClassMethod(self):
_test(self, C.c_class_method)
def testLambdaRaisesError(self):
with self.assertRaises(pickle.PicklingError):
Callback(lambda: None).__getstate__()
def testUnboundMethodRaisesError(self):
with self.assertRaises(pickle.PicklingError):
Callback(C.c_method).__getstate__()
def testPickleFunction(self):
_test(self, test_function, PicklingCallback)
def testPickleBoundMethod(self):
c = C('notnone')
_test(self, c.c_method, PicklingCallback)
def testPickleClassMethod(self):
_test(self, C.c_class_method, PicklingCallback)
if __name__ == '__main__':
unittest.main()
| mthomure/glimpse-project | glimpse/util/callback_test.py | Python | mit | 2,367 |
# -*- coding: utf-8 -*-
# -*- encoding: utf-8 -*-
#############################################################################
#
# Copyright (c) 2007 Martin Reisenhofer <martin.reisenhofer@funkring.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class wizard_multi_charts_accounts(osv.osv_memory):
def _create_bank_journals_from_o2m(self, cr, uid, obj_wizard, company_id, acc_template_ref, context=None):
'''
This function creates bank journals and its accounts for each line encoded in the field bank_accounts_id of the
wizard.
:param obj_wizard: the current wizard that generates the COA from the templates.
:param company_id: the id of the company for which the wizard is running.
:param acc_template_ref: the dictionary containing the mapping between the ids of account templates and the ids
of the accounts that have been generated from them.
:return: True
'''
obj_acc = self.pool.get('account.account')
obj_journal = self.pool.get('account.journal')
# Build a list with all the data to process
journal_data = []
if obj_wizard.bank_accounts_id:
for acc in obj_wizard.bank_accounts_id:
vals = {
'acc_name': acc.acc_name,
'account_type': acc.account_type,
'currency_id': acc.currency_id.id,
}
journal_data.append(vals)
ref_acc_bank = obj_wizard.chart_template_id.bank_account_view_id
if journal_data and not ref_acc_bank.code:
raise osv.except_osv(_('Configuration Error!'), _('You have to set a code for the bank account defined on the selected chart of accounts.'))
current_num = 1
journal_code_def = { "cash" : ["KAS",0], "bank" : ["BNK",0]}
for line in journal_data:
# Seek the next available number for the account code
account_ids = obj_acc.search(cr, uid, [('code', '=', line["acc_name"]), ('company_id', '=', company_id)])
if not account_ids:
raise osv.except_osv(_('Configuration Error!'), _('Account %s does not exist') % (line["acc_name"],))
account = obj_acc.browse(cr,uid,account_ids[0])
line["acc_name"] = account.name
#create the bank journal
vals_journal = self._prepare_bank_journal(cr, uid, line, current_num, account.id, company_id, context=context)
#set journal code
journal_code_format = journal_code_def.get(line["account_type"])
if journal_code_format:
journal_code = journal_code_format[0]
if journal_code_format[1]:
journal_code = journal_code + str(journal_code_format[1])
vals_journal["code"]=journal_code
journal_code_format[1]=journal_code_format[1]+1
obj_journal.create(cr, uid, vals_journal)
current_num += 1
return True
def default_get(self, cr, uid, fields, context=None):
res = super(wizard_multi_charts_accounts, self).default_get(cr, uid, fields, context=context)
if "bank_accounts_id" in fields:
res.update({'bank_accounts_id': [{'acc_name': "3135",'account_type':'bank'},
{'acc_name': "2700",'account_type':'cash'}]})
return res
_inherit = "wizard.multi.charts.accounts"
#class account_bank_accounts_wizard(osv.osv_memory):
# _inherit = "account.bank.accounts.wizard"
# _columns = {
# "acc_name" : fields.selection([("3115","Bank Austria"),
# ("3120","Raiffeisen"),
# ("3125","Volksbank"),
# ("3130","Oberbank"),
# ("3135","Sparkasse"),
# ("3140","Volkskreditbank"),
# ("3145","CA-Bankverein"),
# ("3180","Sonstige"),
# ("2700","Kassa")
# ], "Account Name")
# }
| funkring/fdoo | addons-funkring/l10n_chart_at_2010/chart_wizard.py | Python | agpl-3.0 | 5,108 |
class Solution(object):
def deleteAndEarn(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
counter = collections.Counter(nums)
prev = None
using = avoid = 0
for n in sorted(counter):
if n - 1 == prev:
using, avoid = counter[n] * n + avoid, max(using, avoid)
else:
using, avoid = counter[n] * n + max(using, avoid), max(using, avoid)
prev = n
return max(using, avoid) | Mlieou/leetcode_python | leetcode/python/ex_740.py | Python | mit | 516 |
from leapp.models import Model, fields
from leapp.topics import ApiTestTopic
class ApiTest(Model):
topic = ApiTestTopic
data = fields.String()
class ApiTestProduce(ApiTest):
pass
class ApiTestConsume(ApiTest):
pass
| leapp-to/prototype | tests/data/actor-api-tests/models/apitest.py | Python | lgpl-2.1 | 238 |
import math
from compsoc.events.models import *
from collections import defaultdict
from django.contrib.auth.models import User
class Point:
def __init__(self, x = 0, y = 0):
self.x = int(x)
self.y = int(y)
def __getitem__(self, key):
if( key == 0):
return self.x
elif( key == 1):
return self.y
else:
raise Exception("Invalid key to Point")
def __setitem__(self, key, value):
if( key == 0):
self.x = value
elif( key == 1):
self.y = value
else:
raise Exception("Invalid key to Point")
def __unicode__(self):
return u'(%s,%s)' % (self.x,self.y)
def distance(self, point2):
"""Returns the distance between to another point"""
# this is to insert gutters between tables
self_x = self.x + (self.x/2)
point2_x = point2.x + (point2.x/2)
return math.sqrt( ( (self_x-point2_x)**2 + (self.y-point2.y)**2) )
def ave(points):
return reduce(lambda x,y:x+y,points)/len(points)
def distance_matrix():
"""
Calculates a mapping from users onto distances
"""
# Build lookup map
# TODO: remove duplicate computations
# weight score by frequency
lookup = defaultdict(lambda: defaultdict(lambda: []))
for event in Event.objects.all():
seating = SeatingRevision.objects.filter(event=event).order_by('-number')
if seating:
latest = seating[0]
cache = {}
for s in latest.seating_set.all():
cache[s.user] = Point(s.col,s.row)
for user in cache.keys():
for other in cache.keys():
if other != user:
lookup[user][other].append(cache[user].distance(cache[other]))
results = defaultdict(lambda:{})
# Average distances
for user,others in lookup.items():
for other,values in others.items():
results[user][other] = ave(values)
return results
def closest_person():
distances = distance_matrix()
results = []
for user,others in distances.items():
other,score = sorted(others.items(),key=lambda (u,v):v)[0]
results.append((user,other,score))
return results
| esteluk/reinhardt | events/similarity.py | Python | agpl-3.0 | 2,330 |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 5 08:02:30 2018
@author: Ray Justin O. Huang
"""
from Custom_Transformers import PerColumnAttributesAdder, StringCaseChanger, Randomizer, StringCleaner, GroupAggregator
import pandas as pd
import numpy as np
import string
# Sample DataFrames
sample1 = pd.DataFrame({'a': [1,2,3,4,5,6,7],
'b': [2,4,6,8,10,12,14],
'c': [3,6,9,12,15,18,21]})
sample2 = pd.DataFrame(np.random.randn(100,5),
columns=['z','y','x','w','v'])
# Testing PerColumnAttributesAdder object
percol = PerColumnAttributesAdder('b', ['a','c'])
percol.transform(sample1)
percol.numerator_columns
percol.denominator_column
percol.newcolumns
percol2 = PerColumnAttributesAdder('z', ['y','w'])
percol2.transform(sample2).head()
# Loading iris dataset
from sklearn.datasets import load_iris
iris = load_iris()
iris.keys()
iris_X = pd.DataFrame(iris['data'], columns=iris['feature_names'])
iris_y = pd.Series(iris['target'], name='species')
# Sample DataFrames with strings
sample3 = pd.DataFrame({'ingredients': ['milk','Cheese','BACON','bread','Ham'],
'utensils': ['SPATULA', 'spoon', 'Fork', 'Knife', 'cheese grater']})
# Testing StringCaseChanger object
casechanger = StringCaseChanger(['ingredients','utensils'], 'title')
casechanger.transform(sample3)
sample3['ingredients'].str.upper()
sample3['ingredients']
iris_X['sepal length (cm)'].shape
# Testing Randomizer object
rdm = Randomizer(['a','b'], added_cols=True, integers=True)
rdm.fit(sample1)
rdm.transform(sample1)
rdm.newcols
rdm.randomizercols
# More sample DataFrames
rgen = np.random.RandomState(47)
sample4 = pd.DataFrame(np.rint(1000*rgen.rand(5,5)+1000), columns=['first','second','third','fourth','fifth'])
sample4.head()
# More Randomizer testing
rdm2 = Randomizer(['third', 'fifth'], added_cols=True, integers=True)
rdm2.fit(sample4)
rdm2.transform(sample4)
# Sample dirty string DataFrames
sample5 = pd.DataFrame({'John': ['P@ssword', 'c@rp', 'Te^&*ting'],
'Mary': ['L*t##e','B!g','M$d*u_'],
'Jane': ['L!^b', 'H@d', 'P@!l']})
# StringCleaner testing
sample5.head()
cleaner = StringCleaner('Mary')
cleaner = StringCleaner('Mary')
cleaner.transform(sample5)
# sample5['Mary'].str.replace(string.punctuation, '')
sample5['Mary'].str.replace('[{}]'.format(string.punctuation), '')
# sample5['Mary'].str.translate(None, string.punctuation)
sample5['Mary'].str.translate({string.punctuation: None})
sample5.info()
sample5['Mary'].str.lower()
cleaner2 = StringCleaner('John')
cleaner2.transform(sample5)
# Sample grouping DataFrames
sample6 = pd.DataFrame({'Customer': ['Customer A', 'Customer A', 'Customer A',
'Customer B', 'Customer B', 'Customer B',],
'Product': ['Product A', 'Product B', 'Product B',
'Product A', 'Product C', 'Product A'],
'Price': [10, 15, 17, 8, 20, 7]})
# GroupAggregator testing
sample6.head()
grouper = GroupAggregator('Customer', np.median)
grouper.transform(sample6)
sample6.groupby('Customer').transform(np.mean)
| rayjustinhuang/DataAnalysisandMachineLearning | RJ's Toolbox/Transformer_Tests.py | Python | mit | 3,229 |
# (C) British Crown Copyright 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Test function :func:`iris.util.array_equal`."""
from __future__ import (absolute_import, division, print_function)
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import numpy as np
import numpy.ma as ma
from iris.util import array_equal
class Test(tests.IrisTest):
def test_0d(self):
array_a = np.array(23)
array_b = np.array(23)
array_c = np.array(7)
self.assertTrue(array_equal(array_a, array_b))
self.assertFalse(array_equal(array_a, array_c))
def test_0d_and_scalar(self):
array_a = np.array(23)
self.assertTrue(array_equal(array_a, 23))
self.assertFalse(array_equal(array_a, 45))
def test_1d_and_sequences(self):
for sequence_type in (list, tuple):
seq_a = sequence_type([1, 2, 3])
array_a = np.array(seq_a)
self.assertTrue(array_equal(array_a, seq_a))
self.assertFalse(array_equal(array_a, seq_a[:-1]))
array_a[1] = 45
self.assertFalse(array_equal(array_a, seq_a))
def test_nd(self):
array_a = np.array(np.arange(24).reshape(2, 3, 4))
array_b = np.array(np.arange(24).reshape(2, 3, 4))
array_c = np.array(np.arange(24).reshape(2, 3, 4))
array_c[0, 1, 2] = 100
self.assertTrue(array_equal(array_a, array_b))
self.assertFalse(array_equal(array_a, array_c))
def test_masked_is_ignored(self):
array_a = ma.masked_array([1, 2, 3], mask=[1, 0, 1])
array_b = ma.masked_array([2, 2, 2], mask=[1, 0, 1])
self.assertFalse(array_equal(array_a, array_b))
def test_fully_masked_arrays(self):
array_a = ma.masked_array(np.arange(24).reshape(2, 3, 4), mask=True)
array_b = ma.masked_array(np.arange(24).reshape(2, 3, 4), mask=True)
self.assertTrue(array_equal(array_a, array_b))
def test_fully_masked_0d_arrays(self):
array_a = ma.masked_array(3, mask=True)
array_b = ma.masked_array(3, mask=True)
self.assertTrue(array_equal(array_a, array_b))
def test_fully_masked_string_arrays(self):
array_a = ma.masked_array(['a', 'b', 'c'], mask=True)
array_b = ma.masked_array(['a', 'b', 'c'], mask=[1, 1, 1])
self.assertTrue(array_equal(array_a, array_b))
def test_partially_masked_string_arrays(self):
array_a = ma.masked_array(['a', 'b', 'c'], mask=[1, 0, 1])
array_b = ma.masked_array(['a', 'b', 'c'], mask=[1, 0, 1])
self.assertTrue(array_equal(array_a, array_b))
def test_string_arrays_equal(self):
array_a = np.array(['abc', 'def', 'efg'])
array_b = np.array(['abc', 'def', 'efg'])
self.assertTrue(array_equal(array_a, array_b))
def test_string_arrays_different_contents(self):
array_a = np.array(['abc', 'def', 'efg'])
array_b = np.array(['abc', 'de', 'efg'])
self.assertFalse(array_equal(array_a, array_b))
def test_string_arrays_subset(self):
array_a = np.array(['abc', 'def', 'efg'])
array_b = np.array(['abc', 'def'])
self.assertFalse(array_equal(array_a, array_b))
self.assertFalse(array_equal(array_b, array_a))
def test_string_arrays_unequal_dimensionality(self):
array_a = np.array('abc')
array_b = np.array(['abc'])
array_c = np.array([['abc']])
self.assertFalse(array_equal(array_a, array_b))
self.assertFalse(array_equal(array_b, array_a))
self.assertFalse(array_equal(array_a, array_c))
self.assertFalse(array_equal(array_b, array_c))
def test_string_arrays_0d_and_scalar(self):
array_a = np.array('foobar')
self.assertTrue(array_equal(array_a, 'foobar'))
self.assertFalse(array_equal(array_a, 'foo'))
self.assertFalse(array_equal(array_a, 'foobar.'))
if __name__ == '__main__':
tests.main()
| Jozhogg/iris | lib/iris/tests/unit/util/test_array_equal.py | Python | lgpl-3.0 | 4,647 |
#!/usr/bin/env python3
import sys
# There is a little bit of guesswork in this. The Imaging With QuickDraw book
# lists $02FF as the "Version" opcode, but it's really the data for the $0011
# payload. It also gives it a size of 2. This contradicts the decompiled picture
# in listing A-5 on p. A-23, which gives it no data at all, and also contradicts
# the note on p. A-5, which says that the amount of data should be 4
# bytes. Saying that it isn't an opcode at all seems like the least
# contradictory option.
# Blank lines correspond to page breaks in Imaging With QuickDraw.
OPCODES = '''\
0000 NOP 0
0001 Clip Region
0002 BkPat 8
0003 TxFont 2
0004 TxFace 1
0005 TxMode 2
0006 SpExtra 4
0007 PnSize 4
0008 PnMode 2
0009 PnPat 8
000A FillPat 8
000B OvSize 4
000C Origin 4
000D TxSize 2
000E FgColor 4
000F BkColor 4
0010 TxRatio 8
0011 VersionOp Version
0012 BkPixPat Pattern
0013 PnPixPat Pattern
0014 FillPixPat Pattern
0015 PnLocHFrac 2
0016 ChExtra 2
0017-0019 Reserved NotDetermined
001A RGBFgCol 6
001B RGBBkCol 6
001C HiliteMode 0
001D HiliteColor 6
001E DefHilite 0
001F OpColor 6
0020 Line 8
0021 LineFrom 4
0022 ShortLine 6
0023 ShortLineFrom 2
0024-0027 Reserved Data16
0028 LongText Text
0029 DHText Text
002A DVText Text
002B DHDVText Text
002C fontName Text
002D lineJustify 10
002E glyphState 8
002F Reserved Data16
0030 frameRect 8
0031 paintRect 8
0032 eraseRect 8
0033 invertRect 8
0034 fillRect 8
0035-0037 Reserved 8
0038 frameSameRect 0
0039 paintSameRect 0
003A eraseSameRect 0
003B invertSameRect 0
003C fillSameRect 0
003D-003F Reserved 0
0040 frameRRect 8
0041 paintRRect 8
0042 eraseRRect 8
0043 invertRRect 8
0044 fillRRect 8
0045-0047 Reserved 8
0048 frameSameRRect 0
0049 paintSameRRect 0
004A eraseSameRRect 0
004B invertSameRRect 0
004C fillSameRRect 0
004D-004F Reserved 0
0050 frameOval 8
0051 paintOval 8
0052 eraseOval 8
0053 invertOval 8
0054 fillOval 8
0055-0057 Reserved 8
0058 frameSameOval 0
0059 paintSameOval 0
005A eraseSameOval 0
005B invertSameOval 0
005C fillSameOval 0
005D-005F Reserved 0
0060 frameArc 12
0061 paintArc 12
0062 eraseArc 12
0063 invertArc 12
0064 fillArc 12
0065-0067 Reserved 12
0068 frameSameArc 4
0069 paintSameArc 4
006A eraseSameArc 4
006B invertSameArc 4
006C fillSameArc 4
006D-006F Reserved 4
0070 framePoly Polygon
0071 paintPoly Polygon
0072 erasePoly Polygon
0073 invertPoly Polygon
0074 fillPoly Polygon
0075-0077 Reserved Polygon
0078 frameSamePoly 0
0079 paintSamePoly 0
007A eraseSamePoly 0
007B invertSamePoly 0
007C fillSamePoly 0
007D-007F Reserved 0
0080 frameRgn Region
0081 paintRgn Region
0082 eraseRgn Region
0083 invertRgn Region
0084 fillRgn Region
0085-0087 Reserved Region
0088 frameSameRgn 0
0089 paintSameRgn 0
008A eraseSameRgn 0
008B invertSameRgn 0
008C fillSameRgn 0
008D-008F Reserved 0
0090 BitsRect PixelData
0091 BitsRgn PixelData
0092-0097 Reserved Data16
0098 PackBitsRect PixelData
0099 PackBitsRgn PixelData
009A DirectBitsRect PixelData
009B DirectBitsRgn PixelData
009C-009F Reserved Data16
00A0 ShortComment 2
00A1 LongComment LongComment
00A2-00AF Reserved Data16
00B0-00CF Reserved 0
00D0-00FE Reserved Data32
00FF OpEndPic End
0C00 HeaderOp 0
8200 CompressedQuickTime 0
8201 UncompressedQuickTime 0
'''
def main():
opset = set()
names = []
name_len = 0
tbl_data = [None] * 256
tbl_name = [None] * 256
tbl_ext = []
enum = []
min_start = 0
for line in OPCODES.splitlines():
fields = line.split()
if not fields:
continue
oprange, name, data = fields
oprange = oprange.split('-')
if len(oprange) == 1:
start = int(oprange[0], 16)
end = start
assert start >= min_start
opset.add(start)
elif len(oprange) == 2:
start = int(oprange[0], 16)
end = int(oprange[1], 16)
assert start >= min_start
assert start < end
nopset = set(range(start, end + 1))
opset.update(nopset)
else:
assert False
min_start = end + 1
if name.lower() == 'reserved':
name_idx = 0
else:
assert start == end
name_idx = name_len + 1
names.append(name)
name_len += len(name) + 1
enum.append((name, start))
try:
data = int(data)
except ValueError:
data = '-kType{}-1'.format(data)
else:
data = str(data)
if start <= 255:
assert end <= 255
for i in range(start, end + 1):
tbl_data[i] = data
tbl_name[i] = name_idx
else:
assert start == end
tbl_ext.append((start, name_idx))
assert opset.issuperset(range(256))
write = sys.stdout.write
write('/* This file is automatically generated by pict_opcode.py. */\n')
write('\nenum {')
nl = '\n'
next_value = 0
for name, value in enum:
write(nl)
write('kOp_')
write(name)
if value != next_value:
write(' = 0x{:04x}'.format(value))
next_value = value + 1
nl = ',\n'
write('\n};\n')
write('\nstatic const char kOpcodeNames[] =')
nl = '\n'
for name in names:
write(nl)
write('"')
write(name)
nl = '\\0"\n'
write('";\n')
write('\nstatic const unsigned short kOpcodeNameTable[256] = {\n')
comma = ''
start = 0
for i in range(16):
end = (i + 1) * 16
for name_idx in tbl_name[start:end]:
write(comma)
write(str(name_idx))
comma = ','
comma = ',\n'
start = end
write('\n};\n')
write('\nstatic const short kOpcodeDataTable[256] = {\n')
comma = ''
start = 0
for i in range(64):
end = (i + 1) * 4
for data in tbl_data[start:end]:
write(comma)
write(data)
comma = ','
comma = ',\n'
start = end
write('\n};\n')
write('\nstatic const struct extended_opcode kExtendedOpcodes[] = {\n')
comma = ''
for row in tbl_ext:
write(comma)
write('{{0x{:04x}, {}}}'.format(*row))
comma = ',\n'
write('\n};\n')
if __name__ == '__main__':
main()
| depp/unrez | lib/pict_opcode.py | Python | mit | 6,299 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutNewStyleClasses(Koan):
class OldStyleClass:
"""An old style class"""
# Original class style have been phased out in Python 3.
class NewStyleClass(object):
"""A new style class"""
# Introduced in Python 2.2
#
# Aside from this set of tests, Python Koans sticks exclusively to this
# kind of class
pass
def test_new_style_classes_inherit_from_object_base_class(self):
self.assertEqual(True, issubclass(self.NewStyleClass, object))
self.assertEqual(False, issubclass(self.OldStyleClass, object))
def test_new_style_classes_have_more_attributes(self):
self.assertEqual(2, len(dir(self.OldStyleClass)))
self.assertEqual("""An old style class""", self.OldStyleClass.__doc__)
self.assertEqual('koans.about_new_style_classes', self.OldStyleClass.__module__)
self.assertEqual(18, len(dir(self.NewStyleClass)))
# To examine the available attributes, run
# 'dir(<Class name goes here>)'
# from a python console
# ------------------------------------------------------------------
def test_old_style_classes_have_type_but_no_class_attribute(self):
self.assertEqual('classobj', type(self.OldStyleClass).__name__)
try:
cls = self.OldStyleClass.__class__.__name__
except Exception as ex:
pass
# What was that error message from the exception?
self.assertMatch("class OldStyleClass has no attribute '__class__'", ex[0])
def test_new_style_classes_have_same_class_as_type(self):
new_style = self.NewStyleClass()
self.assertEqual(type(self.NewStyleClass), self.NewStyleClass.__class__)
self.assertEqual(True, type(self.NewStyleClass) == self.NewStyleClass.__class__)
# ------------------------------------------------------------------
def test_in_old_style_instances_class_is_different_to_type(self):
old_style = self.OldStyleClass()
self.assertEqual("OldStyleClass", old_style.__class__.__name__)
self.assertEqual("instance", type(old_style).__name__)
def test_new_style_instances_have_same_class_as_type(self):
new_style = self.NewStyleClass()
self.assertEqual("NewStyleClass", new_style.__class__.__name__)
self.assertEqual(True, type(new_style) == new_style.__class__)
| Isabek/python-koans | python2/koans/about_new_style_classes.py | Python | mit | 2,467 |
# -*- coding: utf-8 -*-
"""
ipcai2016
Copyright (c) German Cancer Research Center,
Computer Assisted Interventions.
All rights reserved.
This software is distributed WITHOUT ANY WARRANTY; without
even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE.
See LICENSE for details
"""
"""
Created on Fri Aug 14 11:09:18 2015
@author: wirkert
"""
from PIL import Image
from PIL import ImageEnhance
import logging
import datetime
import sys
import SimpleITK as sitk
import matplotlib
import msi.normalize as norm
from msi.io.nrrdreader import NrrdReader
from regression.estimation import estimate_image
from tasks_common import *
import commons
from msi.io.tiffringreader import TiffRingReader
TiffRingReader.RESIZE_FACTOR = 0.5
sc = commons.ScriptCommons()
sc.add_dir("LIVER_DATA",
os.path.join(sc.get_dir("DATA_FOLDER"), "liver_images"))
sc.add_dir("LIVER_RESULTS", os.path.join(sc.get_dir("RESULTS_FOLDER"), "liver"))
sc.add_dir("FILTER_TRANSMISSIONS",
os.path.join(sc.get_dir("DATA_FOLDER"),
"filter_transmissions"))
font = {'family' : 'normal',
'size' : 30}
matplotlib.rc('font', **font)
class ResultsFile(luigi.Task):
def output(self):
return luigi.LocalTarget(os.path.join(sc.get_full_dir("LIVER_RESULTS"),
"results.csv"))
class OxyAndVhbOverTimeTask(luigi.Task):
def output(self):
return luigi.LocalTarget(os.path.join(sc.get_full_dir("LIVER_RESULTS"),
"liver_oxy_over_time.pdf"))
def requires(self):
return ResultsFile()
def run(self):
df = pd.read_csv(self.input().path, index_col=0)
# determine times from start:
image_name_strings = df["image name"].values
time_strings = map(lambda s: s[
s.find("2014-08-03_")+11:s.find("2014-08-03_")+19],
image_name_strings)
time_in_s = map(lambda s: int(s[0:2]) * 3600 +
int(s[3:5]) * 60 +
int(s[6:]), time_strings)
df["time since drug delivery [s]"] = np.array(time_in_s) - time_in_s[0]
# print oxy over time as scatterplot.
ax = df.plot.scatter(x="time since drug delivery [s]",
y="oxygenation mean [%]",
s=100, alpha=0.5,
fontsize=30)
ax.set_xlim((-1, 70))
plt.axvline(x=0, ymin=0, ymax=1, linewidth=2)
plt.axvline(x=56, ymin=0, ymax=1, linewidth=2)
ax.annotate('drug delivery', xy=(0, ax.get_ylim()[1]),
xycoords='data', xytext=(0, 0),
fontsize=30,
textcoords='offset points')
ax.annotate('porcine death', xy=(56, ax.get_ylim()[1]),
xycoords='data', xytext=(-100, 0),
fontsize=30,
textcoords='offset points')
ax.yaxis.label.set_size(30)
ax.xaxis.label.set_size(30)
plt.grid()
df.to_csv(self.input().path)
# create and save vhb plot
plt.savefig(self.output().path,
dpi=250, bbox_inches='tight', mode="pdf")
# print vhb over time as scatterplot.
ax = df.plot.scatter(x="time since drug delivery [s]",
y="blood volume fraction mean [%]",
s=100, alpha=0.5,
fontsize=30)
ax.set_xlim((-1, 70))
plt.axvline(x=0, ymin=0, ymax=1, linewidth=2)
plt.axvline(x=56, ymin=0, ymax=1, linewidth=2)
ax.annotate('drug delivery', xy=(0, ax.get_ylim()[1]),
xycoords='data', xytext=(0, 0),
fontsize=30,
textcoords='offset points')
ax.annotate('porcine death', xy=(56, ax.get_ylim()[1]),
xycoords='data', xytext=(-100, 0),
fontsize=30,
textcoords='offset points')
ax.yaxis.label.set_size(30)
ax.xaxis.label.set_size(30)
plt.grid()
plt.savefig(self.output().path + "_vhb_mean.pdf",
dpi=250, bbox_inches='tight', mode="pdf")
class IPCAICreateOxyImageTask(luigi.Task):
image_name = luigi.Parameter()
df_prefix = luigi.Parameter()
eval_name = luigi.Parameter()
def requires(self):
return IPCAITrainRegressor(df_prefix=self.df_prefix, eval_name=self.eval_name), \
Flatfield(flatfield_folder=sc.get_full_dir("FLAT_FOLDER")), \
SingleMultispectralImage(image=os.path.join(
sc.get_full_dir("LIVER_DATA"), self.image_name)), \
Dark(dark_folder=sc.get_full_dir("DARK_FOLDER"))
def output(self):
return luigi.LocalTarget(os.path.join(sc.get_full_dir("LIVER_RESULTS"),
self.image_name + "_" +
self.df_prefix +
"_oxy_summary" + ".png"))
def run(self):
nrrd_reader = NrrdReader()
tiff_ring_reader = TiffRingReader()
# read the flatfield
flat = nrrd_reader.read(self.input()[1].path)
dark = nrrd_reader.read(self.input()[3].path)
# read the msi
nr_filters = len(sc.other["RECORDED_WAVELENGTHS"])
msi, segmentation = tiff_ring_reader.read(self.input()[2].path,
nr_filters)
# only take into account not saturated pixels.
segmentation = np.max(msi.get_image(), axis=-1) < 2000.
# read the regressor
e_file = open(self.input()[0].path, 'r')
e = pickle.load(e_file)
# correct image setup
position_filter_nr_in_string = self.image_name.find(" 2014") - 1
filter_nr = int(self.image_name[
position_filter_nr_in_string:position_filter_nr_in_string+1])
original_order = np.arange(nr_filters)
new_image_order = np.concatenate((
original_order[nr_filters - filter_nr:],
original_order[:nr_filters - filter_nr]))
# resort msi to restore original order
msimani.get_bands(msi, new_image_order)
# correct by flatfield
msimani.image_correction(msi, flat, dark)
# create artificial rgb
rgb_image = msi.get_image()[:, :, [2, 3, 1]]
rgb_image /= np.max(rgb_image)
rgb_image *= 255.
# preprocess the image
# sortout unwanted bands
print "1"
# zero values would lead to infinity logarithm, thus clip.
msi.set_image(np.clip(msi.get_image(), 0.00001, 2. ** 64))
# normalize to get rid of lighting intensity
norm.standard_normalizer.normalize(msi)
# transform to absorption
msi.set_image(-np.log(msi.get_image()))
# normalize by l2 for stability
norm.standard_normalizer.normalize(msi, "l2")
print "2"
# estimate
sitk_image, time = estimate_image(msi, e)
image = sitk.GetArrayFromImage(sitk_image)
plt.figure()
print "3"
rgb_image = rgb_image.astype(np.uint8)
im = Image.fromarray(rgb_image, 'RGB')
enh_brightness = ImageEnhance.Brightness(im)
im = enh_brightness.enhance(5.)
plotted_image = np.array(im)
top_left_axis = plt.gca()
top_left_axis.imshow(plotted_image, interpolation='nearest')
top_left_axis.xaxis.set_visible(False)
top_left_axis.yaxis.set_visible(False)
plt.set_cmap("jet")
print "4"
# plot parametric maps
# first oxygenation
plt.figure()
oxy_image = image[:, :, 0]
segmentation[0, 0] = 1
segmentation[0, 1] = 1
oxy_image = np.ma.masked_array(oxy_image, ~segmentation)
oxy_image[np.isnan(oxy_image)] = 0.
oxy_image[np.isinf(oxy_image)] = 0.
oxy_mean = np.mean(oxy_image)
oxy_image[0, 0] = 0.0
oxy_image[0, 1] = 1.
plot_image(oxy_image[:, :], plt.gca())
plt.savefig(self.output().path,
dpi=250, bbox_inches='tight')
# second blood volume fraction
plt.figure()
vhb_image = image[:, :, 1]
vhb_image = np.ma.masked_array(vhb_image, ~segmentation)
vhb_image[np.isnan(vhb_image)] = 0.
vhb_image[np.isinf(vhb_image)] = 0.
vhb_image[0, 0] = 0.0
vhb_image[0, 1] = 0.1
vhb_image = np.clip(vhb_image, 0.0, 0.1)
vhb_mean = np.mean(vhb_image)
plot_image(vhb_image, plt.gca())
plt.savefig(self.output().path + "vhb.png",
dpi=250, bbox_inches='tight')
# store results summary in dataframe
df_image_results = pd.DataFrame(data=np.expand_dims([self.image_name,
oxy_mean * 100.,
vhb_mean * 100.,
time], 0),
columns=["image name",
"oxygenation mean [%]",
"blood volume fraction mean [%]",
"time to estimate"])
results_file = os.path.join(sc.get_full_dir("LIVER_RESULTS"),
"results.csv")
if os.path.isfile(results_file):
df_results = pd.read_csv(results_file, index_col=0)
df_results = pd.concat((df_results, df_image_results)).reset_index(
drop=True
)
else:
df_results = df_image_results
df_results.to_csv(results_file)
print "5"
plt.close("all")
def main(args):
eval_dict = commons.read_configuration_dict(args[1])
eval_name = eval_dict["evaluation_name"]
train = eval_dict["in_vivo_mc_data_train"]
# create a folder for the results if necessary
sc.set_root(eval_dict["root_path"])
sc.create_folders()
# root folder there the data lies
logging.basicConfig(filename=os.path.join(sc.get_full_dir("LOG_FOLDER"),
"liver" +
str(datetime.datetime.now()) +
'.log'), level=logging.INFO)
luigi.interface.setup_interface_logging()
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
logger = logging.getLogger()
logger.addHandler(ch)
sch = luigi.scheduler.CentralPlannerScheduler()
w = luigi.worker.Worker(scheduler=sch)
onlyfiles = get_image_files_from_folder(sc.get_full_dir("LIVER_DATA"))
first_invivo_image_files = filter(lambda image_name: "0 2014" in image_name,
onlyfiles)
for f in first_invivo_image_files:
main_task = IPCAICreateOxyImageTask(image_name=f,
df_prefix=train,
eval_name=eval_name)
w.add(main_task)
w.run()
oxygenation_over_time_task = OxyAndVhbOverTimeTask()
w.add(oxygenation_over_time_task)
w.run()
if __name__ == '__main__':
main(sys.argv)
| swirkert/ipcai2016 | scripts/ipcai2016/script_analyze_ipcai_in_vivo_liver.py | Python | bsd-3-clause | 11,382 |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class ByocTrunkTestCase(IntegrationTestCase):
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.voice.v1.byoc_trunks.create()
self.holodeck.assert_has_request(Request(
'post',
'https://voice.twilio.com/v1/ByocTrunks',
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "BYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"voice_url": "https://byoc.example.com/twilio/app",
"voice_method": "POST",
"voice_fallback_method": "POST",
"voice_fallback_url": "https://byoc.example.com/twilio/fallback",
"status_callback_method": "POST",
"status_callback_url": "https://byoc.example.com/twilio/status_callback",
"cnam_lookup_enabled": false,
"connection_policy_sid": "NYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"from_domain_sid": "SDaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2020-03-18T23:31:36Z",
"date_updated": "2020-03-18T23:31:36Z",
"url": "https://voice.twilio.com/v1/ByocTrunks/BYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.voice.v1.byoc_trunks.create()
self.assertIsNotNone(actual)
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.voice.v1.byoc_trunks("BYXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://voice.twilio.com/v1/ByocTrunks/BYXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "BYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"voice_url": "https://byoc.example.com/twilio/app",
"voice_method": "POST",
"voice_fallback_method": "POST",
"voice_fallback_url": "https://byoc.example.com/twilio/fallback",
"status_callback_method": "POST",
"status_callback_url": "https://byoc.example.com/twilio/status_callback",
"cnam_lookup_enabled": false,
"connection_policy_sid": "NYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"from_domain_sid": "SDaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2020-03-18T23:31:36Z",
"date_updated": "2020-03-18T23:31:37Z",
"url": "https://voice.twilio.com/v1/ByocTrunks/BYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.voice.v1.byoc_trunks("BYXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.voice.v1.byoc_trunks.list()
self.holodeck.assert_has_request(Request(
'get',
'https://voice.twilio.com/v1/ByocTrunks',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://voice.twilio.com/v1/ByocTrunks?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://voice.twilio.com/v1/ByocTrunks?PageSize=50&Page=0",
"next_page_url": null,
"key": "byoc_trunks"
},
"byoc_trunks": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "BYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"voice_url": "https://byoc.example.com/twilio/app",
"voice_method": "POST",
"voice_fallback_method": "POST",
"voice_fallback_url": "https://byoc.example.com/twilio/fallback",
"status_callback_method": "POST",
"status_callback_url": "https://byoc.example.com/twilio/status_callback",
"cnam_lookup_enabled": false,
"connection_policy_sid": "NYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"from_domain_sid": "SDaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2020-03-18T23:31:36Z",
"date_updated": "2020-03-18T23:31:37Z",
"url": "https://voice.twilio.com/v1/ByocTrunks/BYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
]
}
'''
))
actual = self.client.voice.v1.byoc_trunks.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://voice.twilio.com/v1/ByocTrunks?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://voice.twilio.com/v1/ByocTrunks?PageSize=50&Page=0",
"next_page_url": null,
"key": "byoc_trunks"
},
"byoc_trunks": []
}
'''
))
actual = self.client.voice.v1.byoc_trunks.list()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.voice.v1.byoc_trunks("BYXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
'https://voice.twilio.com/v1/ByocTrunks/BYXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "BYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "update_name",
"voice_url": "https://byoc.example.com/twilio_updated/app",
"voice_method": "GET",
"voice_fallback_method": "GET",
"voice_fallback_url": "https://byoc.example.com/twilio_updated/fallback",
"status_callback_method": "GET",
"status_callback_url": "https://byoc.example.com/twilio_updated/status_callback",
"cnam_lookup_enabled": true,
"connection_policy_sid": "NYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab",
"from_domain_sid": "SDaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab",
"date_created": "2020-03-18T23:31:36Z",
"date_updated": "2020-03-18T23:31:37Z",
"url": "https://voice.twilio.com/v1/ByocTrunks/BYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.voice.v1.byoc_trunks("BYXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.voice.v1.byoc_trunks("BYXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://voice.twilio.com/v1/ByocTrunks/BYXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.voice.v1.byoc_trunks("BYXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
| twilio/twilio-python | tests/integration/voice/v1/test_byoc_trunk.py | Python | mit | 8,787 |
#!/usr/bin/env python
# expand-terrain-macros.py - Expand "meta-macros" for terrain WML
#
# Copyright (C) 2008 - 2009 by Moritz Goebelbecker
# Part of the Battle for Wesnoth Project http://www.wesnoth.org
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2
# or at your option any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY.
#
# See the COPYING file for more details.
# Meta-Macro syntax:
# #meta-macro BASENAME [{NORMAL_PARAM, OPTIONAL_PARAM} [...]]
#
# NORMAL_PARAM: Macro parameter that will be passed unmodified to the base
# macro
# OPTIONAL_PARAM: Macro parameter that will sometimes be passed to the base
# macro and sometimes be replaced with a default value. The script will
# create one macro for each possible combination of optional parameters
#
# Syntax: ABBREV=NAME=DEFAULT
# ABBREV: One letter that is appended to macros taking that argument
# NAME: Name of the parameter that is used when it's passed to the
# base macro
# ABBREV: Default value that is used when the parameter is not passed
# to the base macro
#
#
# !!! ONLY USE THIS IF YOU KNOW WHAT YOU ARE DOING !!!
import sys
import getopt
def printUsage():
print "Usage: expand-terrain-macros.py [OPTIONS] filename1\
[filename2 [...]]\n"
print """Options:
-i Insert the expanded sections into the input file(s) immediately after
their macro definitions.
-a Append the expanded sections to the input file(s)
-r Replace the input file(s) with the resulting output. Previously generated
expansions will be removed. Implies -i if nothing else is specified.
If no options are specified, only the expanded sections will be printed to
stdout"""
insert = False
append = False
replace = False
try:
(opts, args) = getopt.getopt(sys.argv[1:], 'iar')
except getopt.GetoptError, e:
print 'Error parsing command-line arguments: %s' % e
printUsage()
sys.exit(1)
for (option, parameter) in opts:
if option == '-i':
insert = True
if option == '-a':
append = True
if option == '-r':
replace = True
if replace and not append:
insert = True
if insert and append:
print "Error: cannot use -i and -a at the same time"
printUsage()
sys.exit(1)
if len(args) == 0:
printUsage()
sys.exit(1)
for filename in args:
f = file(filename)
content = f.readlines()
f.close()
changed = False
output = []
appended = []
autogenerated = False
for line in content:
if line.strip() == "#The following code is autogenerated\
by expand-terrain-macros.py":
autogenerated = True
if (insert or append) and not autogenerated:
output.append(line.rstrip("\n"))
if line.strip() == "#end of generated code":
autogenerated = False
if line.startswith('#meta-macro'):
elems = line[12:].strip().split()
basename = elems[0]
params = []
optional_params = []
for param in elems[1:]:
split_param = param.split('=')
if len(split_param) == 3:
optional_params.append(split_param[0])
elif len(split_param) != 1:
print "Error in line:\n" + line
sys.exit(1)
params.append(split_param)
base_macro_suffix = "_" + "".join(optional_params)
result = []
result.append("#The following code is autogenerated\
by expand-terrain-macros.py")
if append:
result.append("#generated from: " + line.strip())
result.append("#Please do not modify")
for i in xrange(2**len(optional_params) - 2, -1, -1):
enabled_map = dict([(param, i & (1<<index) != 0) for index, param in enumerate(optional_params)])
suffix = ""
params_external = []
params_internal = []
for param in params:
#normal parameter
if len(param) == 1:
params_external.append(param[0])
params_internal.append('({'+param[0]+'})')
else:
#enabled optional parameter
if enabled_map[param[0]]:
suffix += param[0]
params_external.append(param[1])
params_internal.append('({'+param[1]+'})')
else:
params_internal.append(param[2])
if len(suffix) > 0:
suffix = "_"+suffix
result.append("#define " + basename + suffix + " " + " ".join(params_external))
result.append(" {" + basename + base_macro_suffix + " " + " ".join(params_internal) + "}")
result.append("#enddef")
result.append("#end of generated code")
changed = True
if insert:
output += result
elif append:
appended += result
else:
for r in result:
print r
if (insert or append) and not replace:
for line in output:
print line
if append:
for line in appended:
print line
elif replace and changed:
f = open(filename, 'w')
for line in output:
f.write(line+"\n")
if append:
for line in appended:
f.write(line+"\n")
f.close()
| danteinforno/wesnoth | data/tools/expand-terrain-macros.py | Python | gpl-2.0 | 5,797 |
#
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010-2014 Joel Andersson, Joris Gillis, Moritz Diehl,
# K.U. Leuven. All rights reserved.
# Copyright (C) 2011-2014 Greg Horn
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
from casadi.tools import *
from casadi import *
import numpy as np
from pylab import *
import os
import casadi as c
import casadi
import time
casadiAvailable = False
casadiTypes = set()
try:
import casadi as c
casadiAvailable = True
casadiTypes = set([type(c.SX()),type(c.SX())])
except ImportError:
pass
def TRx(a):
constr = numpy.matrix
if casadiAvailable and type(a) in casadiTypes:
constr = c.SX
return constr([[1,0,0,0],[0,cos(a),-sin(a),0],[0,sin(a),cos(a),0],[0,0,0,1]])
def TRy(a):
constr = numpy.matrix
if casadiAvailable and type(a) in casadiTypes:
constr = c.SX
return constr([[cos(a),0,sin(a),0],[0,1,0,0],[-sin(a),0,cos(a),0],[0,0,0,1]])
def TRz(a):
constr = numpy.matrix
if casadiAvailable and type(a) in casadiTypes:
constr = c.SX
return constr([[cos(a),-sin(a),0,0],[sin(a),cos(a),0,0],[0,0,1,0],[0,0,0,1]])
def tr(x,y,z):
return numpy.matrix([[1,0,0,x],[0,1,0,y],[0,0,1,z],[0,0,0,1]])
def scale(a):
return numpy.matrix([[a,0,0],[0,a,0],[0,0,a]])
def Tscale(a):
return R2T(scale(a))
def Tquat(q0,q1,q2,q3):
return R2T(quat(q0,q1,q2,q3))
def quat(q0,q1,q2,q3):
"""
From Jeroen's presentation. q = [e*sin(theta/2); cos(theta/2)]
"""
constr = numpy.matrix
types = set([type(q) for q in [q0,q1,q2,q3]])
#if not(types.isdisjoint(casadiTypes)):
# constr = c.SX
rho = constr([[q0],[q1],[q2]])
rho_skew = skew(rho)
I_3 = constr([[1.0,0,0],[0,1.0,0],[0,0,1.0]])
#A = multiply(I_3,(numpy.dot(rho.T,-rho)+q3*q3))+numpy.dot(rho,rho.T)*2.0-q3*rho_skew*2.0
b = q0
c_ = q1
d = q2
a = q3
a2 = a**2
b2 = b**2
c2 = c_**2
d2 = d**2
am2 = -a2
bm2 = -b2
cm2 = -c2
dm2 = -d2
bb = 2*b
aa = 2*a
bc2 = bb*c_
bd2 = bb*d
ac2 = aa*c_
ab2 = aa*b
ad2 = aa*d
cd2 = 2*c_*d
A = constr([[a2+b2+cm2+dm2, bc2 - ad2, bd2 + ac2],[bc2 + ad2, a2+bm2+c2+dm2, cd2 - ab2], [ bd2 -ac2, cd2 + ab2, a2+bm2+cm2+d2]]).T
if not(types.isdisjoint(casadiTypes)):
constr = c.SX
return constr(A.T)
def quatOld(q0,q1,q2,q3):
"""
From Shabana AA. Dynamics of multibody systems. Cambridge Univ Pr; 2005.
defined as [ cos(theta/2) e*sin(theta/2) ]
"""
constr = numpy.matrix
types = set([type(q) for q in [q0,q1,q2,q3]])
#if not(types.isdisjoint(casadiTypes)):
# constr = c.SX
E = constr([[-q1, q0, -q3, q2],[-q2, q3, q0, -q1],[-q3,-q2,q1,q0]])
Eb = constr([[-q1, q0, q3, -q2],[-q2, -q3, q0, q1],[-q3,q2,-q1,q0]])
if not(types.isdisjoint(casadiTypes)):
constr = c.SX
return constr(numpy.dot(E,Eb.T))
def fullR(R_0_0,R_1_0,R_2_0, R_0_1, R_1_1, R_2_1, R_0_2, R_1_2, R_2_2):
constr = numpy.matrix
types = set([type(q) for q in [R_0_0,R_1_0,R_2_0, R_0_1, R_1_1, R_2_1, R_0_2, R_1_2, R_2_2]])
if not(types.isdisjoint(casadiTypes)):
constr = c.SX
return constr([[R_0_0, R_0_1, R_0_2],[R_1_0, R_1_1, R_1_2 ],[R_2_0, R_2_1, R_2_2 ]])
def TfullR(R_0_0,R_1_0,R_2_0, R_0_1, R_1_1, R_2_1, R_0_2, R_1_2, R_2_2):
return R2T(fullR(R_0_0,R_1_0,R_2_0, R_0_1, R_1_1, R_2_1, R_0_2, R_1_2, R_2_2))
def origin() :
return tr(0,0,0)
def trp(T):
return numpy.matrix(T)[:3,3]
def kin_inv(T):
R=numpy.matrix(T2R(T).T)
constr = numpy.matrix
if type(T) in casadiTypes:
constr = c.SX
return constr(vstack((hstack((R,-numpy.dot(R,trp(T)))),numpy.matrix([0,0,0,1]))))
def vectorize(vec):
"""
Make sure the result is something you can index with single index
"""
if hasattr(vec,"shape"):
if vec.shape[0] > 1 and vec.shape[1] > 1:
raise Exception("vectorize: got real matrix instead of vector like thing: %s" % str(vec))
if vec.shape[1] > 1:
vec = vec.T
if hasattr(vec,"tolist"):
vec = [ i[0] for i in vec.tolist()]
return vec
def skew(vec):
myvec = vectorize(vec)
x = myvec[0]
y = myvec[1]
z = myvec[2]
constr = numpy.matrix
types = set([type(q) for q in [x,y,z]])
if not(types.isdisjoint(casadiTypes)):
constr = c.SX
return constr([[0,-z,y],[z,0,-x],[-y,x,0]])
def invskew(S):
return c.SX([S[2,1],S[0,2],S[1,0]])
def cross(a,b):
return c.mul(skew(a),b)
def T2R(T):
"""
Rotational part of transformation matrix
"""
return T[0:3,0:3]
def R2T(R):
"""
Pack a rotational matrix in a homogenous form
"""
constr = numpy.matrix
if type(R) in casadiTypes:
constr = c.SX
T = constr([[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,1.0]])
T[:3,:3] = R
return T
def T2w():
"""
skew(w_100) = T2w(T_10)
"""
def T2W(T,p,dp):
"""
w_101 = T2W(T_10,p,dp)
"""
R = T2R(T)
dR = c.reshape(c.mul(c.jacobian(R,p),dp),(3,3))
return invskew(c.mul(R.T,dR))
def quatDynamics(q0,q1,q2,q3):
"""
dot(q) = quatDynamics(q)*w_101
"""
B = numpy.matrix([[q3,-q2,q1],[q2,q3,-q0],[-q1,q0,q3],[-q0,-q1,-q2]])*0.5
return B
def T2WJ(T,p):
"""
w_101 = T2WJ(T_10,p).diff(p,t)
"""
R = T2R(T)
RT = R.T
temp = []
for i,k in [(2,1),(0,2),(1,0)]:
#temp.append(c.mul(c.jacobian(R[:,k],p).T,R[:,i]).T)
temp.append(c.mul(RT[i,:],c.jacobian(R[:,k],p)))
return c.vertcat(temp)
class QuadcopterAnalysis:
def __init__(self):
self.N = 20 # Number of control intervals
self.tau_root = [0,0.155051,0.644949,1.000000] # Choose collocation points
self.noiserel = 0.05 # The standard deviation of noise -to-nominal value ratio
class QuadcopterModel:
def __init__(self,NR=4,debug=False,quatnorm=False):
"""
Keyword arguments:
NR -- the number of rotors
debug -- wether to print out debug info
quatnorm -- add the quaternion norm to the DAE rhs
"""
# ----------- system states and their derivatives ----
pos = struct_symSX(["x","y","z"]) # rigid body centre of mass position [m] {0}
v = struct_symSX(["vx","vy","vz"]) # rigid body centre of mass position velocity [m/s] {0}
NR = 4 # Number of rotors
states = struct_symSX([
entry("p",struct=pos),
entry("v",struct=v),
entry("q",shape=4), # quaternions {0} -> {1}
entry("w",shape=3), # rigid body angular velocity w_101 [rad/s] {1}
entry("r",shape=NR) # spin speed of rotor, wrt to platform. [rad/s] Should be positive!
# The signs are such that positive means lift generating, regardless of spin direction.
])
pos, v, q, w, r = states[...]
# ------------------------------------------------
dist = struct_symSX([
entry("Faer",shape=NR), # Disturbance on aerodynamic forcing [N]
entry("Caer",shape=NR) # Disturbance on aerodynamic torques [Nm]
])
# ----------------- Controls ---------------------
controls = struct_symSX([
entry("CR",shape=NR) # [Nm]
# Torques of the motors that drive the rotors, acting from platform on propeller
# The torque signs are always positive when putting energy in the propellor,
# regardless of spin direction.
#
])
CR = controls["CR"]
# ------------------------------------------------
# ---------------- Temporary symbols --------------
F = ssym("F",3) # Forces acting on the platform in {1} [N]
C = ssym("C",3) # Torques acting on the platform in {1} [Nm]
rotors_Faer = [ssym("Faer_%d" %i,3,1) for i in range(NR)] # Placeholder for aerodynamic force acting on propeller {1} [N]
rotors_Caer = [ssym("Caer_%d" %i,3,1) for i in range(NR)] # Placeholder for aerodynamic torques acting on propeller {1} [Nm]
# ---------------------------------------------------
# ----------------- Parameters ---------------------
rotor_model = struct_symSX([
"c", # c Cord length [m]
"R", # R Radius of propeller [m]
"CL_alpha", # CL_alpha Lift coefficient [-]
"alpha_0", # alpha_0
"CD_alpha", # CD_alpha Drag coefficient [-]
"CD_i", # CD_i Induced drag coefficient [-]
])
p = struct_symSX([
entry("rotors_model",repeat=NR,struct=rotor_model), # Parameters that describe the rotor model
entry("rotors_I",repeat=NR,shape=sp_diag(3)), # Inertias of rotors [kg.m^2]
entry("rotors_spin",repeat=NR), # Direction of spin from each rotor. 1 means rotation around positive z.
entry("rotors_p",repeat=NR,shape=3), # position of rotors in {1} [m],
entry("I",sym=casadi.diag(ssym("[Ix,Iy,Iz]"))), # Inertia of rigid body [kg.m^2]
"m", # Mass of the whole system [kg]
"g", # gravity [m/s^2]
"rho", # Air density [kg/m^3]
])
I,m,g,rho = p[["I","m","g","rho"]]
# --------------------------------------------------
# ----------------- Parameters fillin's ---------------------
p_ = p()
p_["rotors_spin"] = [1,-1,1,-1]
p_["rotors_model",:,{}] = { "c": 0.01, "R" : 0.127, "CL_alpha": 6.0, "alpha_0": 0.15, "CD_alpha": 0.02, "CD_i": 0.05} # c Cord length [m]
p_["m"] = 0.5 # [kg]
p_["g"] = 9.81 # [N/kg]
p_["rho"] = 1.225 # [kg/m^3]
L = 0.25
I_max = p_["m"] * L**2 # Inertia of a point mass at a distance L
I_ref = I_max/5
p_["I"] = casadi.diag([I_ref/2,I_ref/2,I_ref]) # [N.m^2]
p_["rotors_p",0] = DMatrix([L,0,0])
p_["rotors_p",1] = DMatrix([0,L,0])
p_["rotors_p",2] = DMatrix([-L,0,0])
p_["rotors_p",3] = DMatrix([0,-L,0])
for i in range(NR):
R_ = p_["rotors_model",i,"R"] # Radius of propeller [m]
m_ = 0.01 # Mass of a propeller [kg]
I_max = m_ * R_**2 # Inertia of a point mass
I_ref = I_max/5
p_["rotors_I",i] = casadi.diag([I_ref/2,I_ref/2,I_ref])
if debug:
print p.vecNZcat()
dist_ = dist(0)
# ----------------- Scaling ---------------------
scaling_states = states(1)
scaling_controls = controls(1)
scaling_states["r"] = 500
scaling_controls["CR"] = 0.005
scaling_dist = dist()
scaling_dist["Faer"] = float(p_["m"]*p_["g"]/NR)
scaling_dist["Caer"] = 0.0026
# ----------- Frames ------------------
T_10 = mul(tr(*pos),Tquat(*q))
T_01 = kin_inv(T_10)
R_10 = T2R(T_10)
R_01 = R_10.T
# -------------------------------------
dstates = struct_symSX(states)
dp,dv,dq,dw,dr = dstates[...]
res = struct_SX(states) # DAE residual hand side
# ----------- Dynamics of the body ----
res["p"] = v - dp
# Newton, but transform the force F from {1} to {0}
res["v"] = mul(R_10,F) - m*dv
# Kinematics of the quaterion.
res["q"] = mul(quatDynamics(*q),w)-dq
# This is a trick by Sebastien Gros to stabilize the quaternion evolution equation
res["q"] += -q*(sumAll(q**2)-1)
# Agular impulse H_1011
H = mul(p["I"],w) # Due to platform
for i in range(NR):
H+= mul(p["rotors_I",i], w + vertcat([0,0,p["rotors_spin",i]*r[i]])) # Due to rotor i
dH = mul(jacobian(H,w),dw) + mul(jacobian(H,q),dq) + mul(jacobian(H,r),dr) + casadi.cross(w,H)
res["w"] = C - dH
for i in range(NR):
res["r",i] = CR[i] + p["rotors_spin",i]*rotors_Caer[i][2] - p["rotors_I",i][2]*(dr[i]+dw[2]) # Dynamics of rotor i
# ---------------------------------
# Make a vector of f ?
#if quatnorm:
# f = vertcat(f+[sumAll(q**2)-1])
#else:
# f = vertcat(f)
# ------------ Force model ------------
Fg = mul(R_01,vertcat([0,0,-g*m]))
F_total = Fg + sum(rotors_Faer) # Total force acting on the platform
C_total = SX([0,0,0]) # Total torque acting on the platform
for i in range(NR):
C_total[:2] += rotors_Caer[i][:2] # The x and y components propagate
C_total[2] -= p["rotors_spin",i]*CR[i] # the z compent moves through a serparate system
C_total += casadi.cross(p["rotors_p",i],rotors_Faer[i]) # Torques due to thrust
res = substitute(res,F,F_total)
res = substitute(res,C,C_total)
subs_before = []
subs_after = []
v_global = mul(R_01,v)
u_z = SX([0,0,1])
# Now fill in the aerodynamic forces
for i in range(NR):
c,R,CL_alpha,alpha_0, CD_alpha, CD_i = p["rotors_model",i,...]
#Bristeau P-jean, Martin P, Salaun E, Petit N. The role of propeller aerodynamics in the model of a quadrotor UAV. In: Proceedings of the European Control Conference 2009.; 2009:683-688.
v_local = v_global + (casadi.cross(w,p["rotors_p",i])) # Velocity at rotor i
rotors_Faer_physics = (rho*c*R**3*r[i]**2*CL_alpha*(alpha_0/3.0-v_local[2]/(2.0*R*r[i]))) * u_z
subs_before.append(rotors_Faer[i])
subs_after.append(rotors_Faer_physics + dist["Faer",i])
rotors_Caer_physics = -p["rotors_spin",i]*rho*c*R**4*r[i]**2*(CD_alpha/4.0+CD_i*alpha_0**2*(alpha_0/4.0-2.0*v_local[2]/(3.0*r[i]*R))-CL_alpha*v_local[2]/(r[i]*R)*(alpha_0/3.0-v_local[2]/(2.0*r[i]*R))) * u_z
subs_before.append(rotors_Caer[i])
subs_after.append(rotors_Caer_physics + dist["Caer",i])
res = substitute(res,veccat(subs_before),veccat(subs_after))
# Make an explicit ode
rhs = - casadi.solve(jacobian(res,dstates),substitute(res,dstates,0))
# --------------------------------------
self.res_w = res
self.res = substitute(res,dist,dist_)
self.res_ = substitute(self.res,p,p_)
resf = SXFunction([dstates, states, controls ],[self.res_])
resf.init()
self.resf = resf
self.rhs_w = rhs
self.rhs = substitute(rhs,dist,dist_)
self.rhs_ = substitute(self.rhs,p,p_)
t = SX("t")
# We end up with a DAE that captures the system dynamics
dae = SXFunction(daeIn(t=t,x=states,p=controls),daeOut(ode=self.rhs_))
dae.init()
self.dae = dae
cdae = SXFunction(controldaeIn(t=t, x=states, u= controls,p=p),daeOut(ode=self.rhs))
cdae.init()
self.cdae = cdae
self.states = states
self.dstates = dstates
self.p = p
self.p_ = p_
self.controls = controls
self.NR = NR
self.w = dist
self.w_ = dist_
self.t = t
self.states_ = states()
self.dstates_ = states()
self.controls_ = controls()
self.scaling_states = scaling_states
self.scaling_controls = scaling_controls
self.scaling_dist = scaling_dist
model = QuadcopterModel()
analysis = QuadcopterAnalysis()
controls = model.controls
states = model.states
dstates = model.dstates
par = model.p
cdae = model.cdae
res = model.res
res_w = model.res_w
rhs = model.rhs
rhs_w = model.rhs_w
dist = model.w
dist_ = model.w_
scaling_states = model.scaling_states
scaling_controls = model.scaling_controls
scaling_dist = model.scaling_dist
scaling_P = mul(scaling_states.cat,scaling_states.cat.T)
scaling_K = DMatrix.ones(controls.size,states.size)
# Number of control intervals
N = 4 #analysis.N
# Place were time is shifted
Ns = N/2
# Initial guess for time duration of one period
tf = 10
# get system dimensions
ns = states.shape[0]
nu = controls.shape[0]
waypoints = DMatrix([[-2,0,2],[2,0,1]]).T
# Formulate the estimation problem using a collocation approach
# Choose collocation points
tau_root = analysis.tau_root
# Degree of interpolating polynomial + 1
d = len(tau_root)
tau = SX("tau")
#Lf = ssym("L",ns*(ns+1)/2)
Lf = ssym("L",ns*ns)
L = SX(sp_dense(ns,ns),Lf.data())
L2P = SXFunction([Lf],[L])
L2P.init()
optvar = struct_symMX([
(
entry("X",repeat=[N,d],struct=states),
entry("U",repeat=N,struct=controls),
entry("K",repeat=N,shapestruct=(controls,states)),
entry("P",repeat=N,shapestruct=(states,states)),
),
entry("z",shape=(states.shape[0],states.shape[0]-1)),
entry("umean"),
entry("rmean"),
"T",
"Tw"
])
#print optvar["X",0,0,"p"]
#raise Exception("0")
par = struct_symMX([
entry("Xref",shape=waypoints.shape),
entry("model",struct=model.p)
])
T,Tw = optvar[["T","Tw"]]
print "number of variables", optvar.shape
# A vertical stack of d-by-1 Langrange functions that together form a basis on the collocation interval tau = [0..1]
Le = vertcat([numpy.prod([(tau - tau_root[r])/(tau_root[j]-tau_root[r]) for r in range(d) if not(r==j)]) for j in range(d)])
L = SXFunction([tau],[Le])
L.init()
dL = L.jacobian(0,0)
dL.init()
L.input().set(1)
L.evaluate()
Lend = DMatrix(L.output()) # Le at the end of the control interval
dLm = numSample1D(dL,DMatrix(tau_root).T) # d-by-d
resf = SXFunction(customIO(t=model.t, x=states, dx= dstates, u=controls, p=model.p,w=model.w),[model.res_w])
resf.init()
def linear_combination(v,w):
return sum([i*j for i,j in zip(v,w)])
### LPDE -- start
# Disturbances have a standard deviation of 5% with respect to their nominal value
Sigma = c.diag(vertcat([scaling_dist,scaling_states])**2)*analysis.noiserel
K = ssym("K",controls.size,states.size)
u = ssym("u",nu)
x = ssym("x",ns)
zj = [ ssym("z",ns) for i in range(d)]
z = vertcat(zj)
w = ssym("w",model.w.size)
G = zj[0]-x
delta = ssym("delta")
for j in range(1,d):
dX = linear_combination(zj,dLm[:,j])
[dyn] = resf.eval(customIO(
t=0,
x=zj[j]*scaling_states,
dx=dX/delta*scaling_states,
u=(-mul(K,x))*scaling_controls,
p=model.p,
w=w
))
G.append(dyn)
F = mul(horzcat(zj),Lend)
Gf = SXFunction(customIO(x=x,z=z,w=w,p=model.p,K=K,delta=delta),[G,jacobian(G,x),jacobian(G,z),jacobian(G,model.w)])
Gf.init()
Ff = SXFunction([z],[F,jacobian(F,z)])
Ff.init()
linsol = CSparse(Gf.output(2).sparsity())
linsol.init()
A = msym("A",Gf.output(2).sparsity())
b = msym("b",Ff.output(1).sparsity())
linsys = MXFunction([A,b],[-linsol.solve(A,b,False)])
linsys.init()
### LPDE -- end
Ff = SXFunction([z],[F,jacobian(F,z)])
Ff.init()
par_ = par()
par_["Xref"] = waypoints
# Physical times at control intervals
ts = vertcat([c.linspace(0,Tw,Ns+1),c.linspace(Tw,T,N-Ns+1)[1:]])
# Local speed of time
dts = ts[1:]-ts[:-1]
# Physical times at collocation points
tsc = []
dLm = MX(dLm)
# Collect the equality constraints
coupling = []
collocation = []
dynamics_lpde = []
scaling_K = MX(scaling_K)
scaling_P = MX(scaling_P)
scaling_statesMX = MX(scaling_states)
scaling_controlsMX = MX(scaling_controls)
SigmaMX = MX(Sigma)
W = MX(DMatrix.zeros(model.w.size))
# P propagation function -- start
K = msym("K",controls.size,states.size)
P = msym("K",states.size,states.size)
u = msym("u",nu)
x = msym("x",ns)
z = msym("z",ns*d)
w = msym("w",model.w.size)
delta = msym("delta")
p = msym("p",model.p.size)
G,Gx,Gz,Gw = Gf.call(customIO(x=x,z=z,w=w,p=p,K=K,delta=delta))
F, Fz = Ff.call([z])
[M] = linsys.call([Gz,Fz])
Phix = mul(M,Gx)
Phiw = mul(M,horzcat([Gw,Gx]))
Pn = mul([Phix,P,Phix.T]) + mul([Phiw,SigmaMX,Phiw.T])
Pnf = MXFunction(customIO(x=x,z=z,w=w,p=p,K=K,delta=delta,P=P),[Pn])
Pnf.init()
# P propagation function -- end
for k in range(N):
if k+1 < N: # Constraint coupling state at end of k and start of k+1
coupling.append(optvar["X",k+1,0]-mul(optvar["X",k,horzcat],Lend))
#G,Gx,Gz,Gw = Gf.call(customIO(x=optvar["X",k,0],z=optvar["X",k,vertcat],w=W,p=par["model"],K=optvar["K",k],delta=ts[k+1]-ts[k]))
#F, Fz = Ff.call([optvar["X",k,vertcat]])
#[M] = linsys.call([Gz,Fz])
#Phix = mul(M,Gx)
#Phiw = mul(M,horzcat([Gw,Gx]))
#Pn = mul([Phix,optvar["P",k],Phix.T]) + mul([Phiw,SigmaMX,Phiw.T])
[Pn] = Pnf.call(customIO(x=optvar["X",k,0],z=optvar["X",k,vertcat],w=W,p=par["model"],K=optvar["K",k],delta=ts[k+1]-ts[k],P=optvar["P",k]))
dynamics_lpde.append(optvar["P",k+1]-Pn)
tsc.append(ts[k])
collocation.append([])
for j in range(1,d):
tsc.append(ts[k] + (ts[k+1]-ts[k]) * tau_root[j]) # The physical time
dX = linear_combination(optvar["X",k,:],dLm[:,j])
[dyn] = resf.call(customIO(
t=tsc[-1],
x=optvar["X",k,j]*scaling_statesMX,
dx=dX/dts[k]*scaling_statesMX,
u=optvar["U",k]*scaling_controlsMX,
p=par["model"],
w=W
))
collocation[-1].append(dyn)
tsf = MXFunction([optvar],[ts])
tsf.init()
tscf = MXFunction([optvar],[vertcat(tsc)])
tscf.init()
# Memory consumption blows up when building up g
C = sumRows(states["q"]**2) - 1
Cmx = sumRows(optvar["X",0,0,"q"]**2) - 1
J = jacobian(C,states)
J = SXFunction([states],[J])
J.init()
[J] = J.call([optvar["X",0,0]])
z = optvar["z"]
g = struct_MX([
(
entry("coupling",expr=coupling),
entry("collocation",expr=collocation),
entry("dynamics_lpde",expr=dynamics_lpde),
),
entry("quatnorm",expr=Cmx), # Add the quaternion norm constraint at the start
entry("Jz",expr=mul(J,z)), # Null space magic by Julia and Sebastien
entry("zz",expr=mul(z.T,z)-DMatrix.eye(states.shape[0]-1)), # Null space magic by Julia and Sebastien
entry("p",expr=mul(z.T,optvar["X",0,0]-optvar["X",-1,-1])), # periodicity on all states
entry("obstacle",expr=optvar["X",:,:,lambda x: x[0]**2+x[1]**2,"p"]),
entry("T-Tw",expr=T-Tw),
entry("init",expr=optvar["X",0,0,"p"]*scaling_states["p"]-par["Xref",:,0]),# Initial state
entry("waypoint",expr=optvar["X",N/2,0,"p"]*scaling_states["p"] - par["Xref",:,1])
])
# Objective function
f = T
# Favor control actions that don't deviate to much from a common mean
f += 0.01 * sumAll( (optvar["U",horzcat].T-optvar["umean"])**2 ) # control regularisation
# Favor positions close to unit quaternion
q0 = DMatrix([0,0,0,1])
f += 0.01 * sum( [ sumAll(q - q0)**2 for q in optvar["X",:,0,"q"] ] )
# Favor small angular velocities
f += 0.01 * sumAll( (optvar["X",horzcat,:,0,"w"].T)**2 )
nl = MXFunction(nlpIn(x=optvar,p=par),nlpOut(f=f,g=g))
nl.setOption("verbose",True)
nl.init()
jacG = nl.jacobian("x","g")
jacG.init()
print nl
| ghorn/debian-casadi | experimental/joris/expensive.py | Python | lgpl-3.0 | 22,968 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import frappe
import frappe.utils
from frappe.utils import get_url_to_form
from frappe.model import log_types
from frappe import _
from itertools import groupby
@frappe.whitelist()
def update_follow(doctype, doc_name, following):
if following:
return follow_document(doctype, doc_name, frappe.session.user)
else:
return unfollow_document(doctype, doc_name, frappe.session.user)
@frappe.whitelist()
def follow_document(doctype, doc_name, user):
'''
param:
Doctype name
doc name
user email
condition:
avoided for some doctype
follow only if track changes are set to 1
'''
if (doctype in ("Communication", "ToDo", "Email Unsubscribe", "File", "Comment", "Email Account", "Email Domain")
or doctype in log_types):
return
if ((not frappe.get_meta(doctype).track_changes)
or user == "Administrator"):
return
if not frappe.db.get_value("User", user, "document_follow_notify", ignore=True, cache=True):
return
if not is_document_followed(doctype, doc_name, user):
doc = frappe.new_doc("Document Follow")
doc.update({
"ref_doctype": doctype,
"ref_docname": doc_name,
"user": user
})
doc.save()
return doc
@frappe.whitelist()
def unfollow_document(doctype, doc_name, user):
doc = frappe.get_all(
"Document Follow",
filters={
"ref_doctype": doctype,
"ref_docname": doc_name,
"user": user
},
fields=["name"],
limit=1
)
if doc:
frappe.delete_doc("Document Follow", doc[0].name)
return 1
return 0
def get_message(doc_name, doctype, frequency, user):
activity_list = get_version(doctype, doc_name, frequency, user) + get_comments(doctype, doc_name, frequency, user)
return sorted(activity_list, key=lambda k: k["time"], reverse=True)
def send_email_alert(receiver, docinfo, timeline):
if receiver:
frappe.sendmail(
subject=_("Document Follow Notification"),
recipients=[receiver],
template="document_follow",
args={
"docinfo": docinfo,
"timeline": timeline,
}
)
def send_document_follow_mails(frequency):
'''
param:
frequency for sanding mails
task:
set receiver according to frequency
group document list according to user
get changes, activity, comments on doctype
call method to send mail
'''
users = frappe.get_list("Document Follow",
fields=["*"])
sorted_users = sorted(users, key=lambda k: k['user'])
grouped_by_user = {}
for k, v in groupby(sorted_users, key=lambda k: k['user']):
grouped_by_user[k] = list(v)
for user in grouped_by_user:
user_frequency = frappe.db.get_value("User", user, "document_follow_frequency")
message = []
valid_document_follows = []
if user_frequency == frequency:
for d in grouped_by_user[user]:
content = get_message(d.ref_docname, d.ref_doctype, frequency, user)
if content:
message = message + content
valid_document_follows.append({
"reference_docname": d.ref_docname,
"reference_doctype": d.ref_doctype,
"reference_url": get_url_to_form(d.ref_doctype, d.ref_docname)
})
if message and frappe.db.get_value("User", user, "document_follow_notify", ignore=True):
send_email_alert(user, valid_document_follows, message)
def get_version(doctype, doc_name, frequency, user):
timeline = []
filters = get_filters("docname", doc_name, frequency, user)
version = frappe.get_all("Version",
filters=filters,
fields=["ref_doctype", "data", "modified", "modified", "modified_by"]
)
if version:
for v in version:
change = frappe.parse_json(v.data)
time = frappe.utils.format_datetime(v.modified, "hh:mm a")
timeline_items = []
if change.changed:
timeline_items = get_field_changed(change.changed, time, doctype, doc_name, v)
if change.row_changed:
timeline_items = get_row_changed(change.row_changed, time, doctype, doc_name, v)
if change.added:
timeline_items = get_added_row(change.added, time, doctype, doc_name, v)
timeline = timeline + timeline_items
return timeline
def get_comments(doctype, doc_name, frequency, user):
from html2text import html2text
timeline = []
filters = get_filters("reference_name", doc_name, frequency, user)
comments = frappe.get_all("Comment",
filters=filters,
fields=["content", "modified", "modified_by", "comment_type"]
)
for comment in comments:
if comment.comment_type == "Like":
by = ''' By : <b>{0}<b>'''.format(comment.modified_by)
elif comment.comment_type == "Comment":
by = '''Commented by : <b>{0}<b>'''.format(comment.modified_by)
else:
by = ''
time = frappe.utils.format_datetime(comment.modified, "hh:mm a")
timeline.append({
"time": comment.modified,
"data": {
"time": time,
"comment": html2text(str(comment.content)),
"by": by
},
"doctype": doctype,
"doc_name": doc_name,
"type": "comment"
})
return timeline
def is_document_followed(doctype, doc_name, user):
return frappe.db.exists(
"Document Follow",
{
"ref_doctype": doctype,
"ref_docname": doc_name,
"user": user
}
)
@frappe.whitelist()
def get_follow_users(doctype, doc_name):
return frappe.get_all(
"Document Follow",
filters={
"ref_doctype": doctype,
"ref_docname":doc_name
},
fields=["user"]
)
def get_row_changed(row_changed, time, doctype, doc_name, v):
from html2text import html2text
items = []
for d in row_changed:
d[2] = d[2] if d[2] else ' '
d[0] = d[0] if d[0] else ' '
d[3][0][1] = d[3][0][1] if d[3][0][1] else ' '
items.append({
"time": v.modified,
"data": {
"time": time,
"table_field": d[0],
"row": str(d[1]),
"field": d[3][0][0],
"from": html2text(str(d[3][0][1])),
"to": html2text(str(d[3][0][2]))
},
"doctype": doctype,
"doc_name": doc_name,
"type": "row changed",
"by": v.modified_by
})
return items
def get_added_row(added, time, doctype, doc_name, v):
items = []
for d in added:
items.append({
"time": v.modified,
"data": {
"to": d[0],
"time": time
},
"doctype": doctype,
"doc_name": doc_name,
"type": "row added",
"by": v.modified_by
})
return items
def get_field_changed(changed, time, doctype, doc_name, v):
from html2text import html2text
items = []
for d in changed:
d[1] = d[1] if d[1] else ' '
d[2] = d[2] if d[2] else ' '
d[0] = d[0] if d[0] else ' '
items.append({
"time": v.modified,
"data": {
"time": time,
"field": d[0],
"from": html2text(str(d[1])),
"to": html2text(str(d[2]))
},
"doctype": doctype,
"doc_name": doc_name,
"type": "field changed",
"by": v.modified_by
})
return items
def send_hourly_updates():
send_document_follow_mails("Hourly")
def send_daily_updates():
send_document_follow_mails("Daily")
def send_weekly_updates():
send_document_follow_mails("Weekly")
def get_filters(search_by, name, frequency, user):
filters = []
if frequency == "Weekly":
filters = [
[search_by, "=", name],
["modified", ">", frappe.utils.add_days(frappe.utils.nowdate(),-7)],
["modified", "<", frappe.utils.nowdate()],
["modified_by", "!=", user]
]
elif frequency == "Daily":
filters = [
[search_by, "=", name],
["modified", ">", frappe.utils.add_days(frappe.utils.nowdate(),-1)],
["modified", "<", frappe.utils.nowdate()],
["modified_by", "!=", user]
]
elif frequency == "Hourly":
filters = [
[search_by, "=", name],
["modified", ">", frappe.utils.add_to_date(frappe.utils.now_datetime(), hours=-1)],
["modified", "<", frappe.utils.now_datetime()],
["modified_by", "!=", user]
]
return filters
| frappe/frappe | frappe/desk/form/document_follow.py | Python | mit | 7,625 |
from django.views.generic import DetailView
from braces.views import SelectRelatedMixin
from django_filters.views import FilterView
from .models import JST
from .filters import JSTFilter
from foundation.offices.models import Office
from dal import autocomplete
class JSTListView(SelectRelatedMixin, FilterView):
filterset_class = JSTFilter
model = JST
paginate_by = 25
def get_queryset(self, *args, **kwargs):
qs = super(JSTListView, self).get_queryset(*args, **kwargs)
return qs
class JSTDetailView(SelectRelatedMixin, DetailView):
model = JST
select_related = ['parent', ]
def get_context_data(self, **kwargs):
context = super(JSTDetailView, self).get_context_data(**kwargs)
context['office_list'] = (Office.objects.for_user(self.request.user).for_list().
area(self.object).all())
return context
class VoivodeshipAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
qs = JST.objects.voivodeship().all()
if self.q:
qs = qs.filter(name__istartswith=self.q)
return qs
class CountyAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
qs = JST.objects.county().all()
voivodeship = self.forwarded.get('voivodeship', None)
if self.q:
qs = qs.filter(name__istartswith=self.q)
if voivodeship:
return qs.filter(parent=voivodeship)
return qs.none()
class CommunityAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
qs = JST.objects.community().all()
if self.q:
qs = qs.filter(name__istartswith=self.q)
county = self.forwarded.get('county', None)
if county:
return qs.filter(parent=county)
return qs.none()
| ad-m/foundation-manager | foundation/teryt/views.py | Python | bsd-3-clause | 1,853 |
#
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
This script processes Jupyter notebooks. External Python scripts
can be inserted as new code cells (e.g. solutions to exercises).
Hidden solutions from the ``exercise2`` plugin can be converted
to code cells. The notebook may also be executed, if necessary
with modified global variables to reduce runtime. The processed
notebook can then be converted to HTML externally.
"""
import argparse
import nbformat
import re
import os
import ast
import sys
import uuid
sys.path.append('@CMAKE_SOURCE_DIR@/testsuite/scripts')
import importlib_wrapper as iw
def get_code_cells(nb):
return [c['source'] for c in nb['cells'] if c['cell_type'] == 'code']
def set_code_cells(nb, new_cells):
i = 0
for c in nb['cells']:
if c['cell_type'] == 'code':
c['source'] = new_cells[i]
i += 1
def add_cell_from_script(nb, filepath):
"""
Create new code cell at the end of a notebook and populate it with
the content of a script.
"""
with open(filepath, encoding='utf-8') as f:
code = f.read()
# remove ESPResSo copyright header
m = re.search('# Copyright \(C\) [\d\-,]+ The ESPResSo project\n.+?'
'If not, see <http://www\.gnu\.org/licenses/>\.\n', code, re.DOTALL)
if m and all(x.startswith('#') for x in m.group(0).strip().split('\n')):
code = re.sub('^(#\n)+', '', code.replace(m.group(0), ''), re.M)
# strip first component in relative paths
code = re.sub(r'(?<=[\'\"])\.\./', './', code)
# create new cells
filename = os.path.relpath(os.path.realpath(filepath))
if len(filename) > len(filepath):
filename = filepath
cell_md = nbformat.v4.new_markdown_cell(source='Solution from ' + filename)
nb['cells'].append(cell_md)
cell_code = nbformat.v4.new_code_cell(source=code.strip())
nb['cells'].append(cell_code)
def remove_empty_cells(nb):
for i in range(len(nb['cells']) - 1, 0, -1):
cell = nb['cells'][i]
if cell['source'].strip() == '':
nb['cells'].pop(i)
def disable_plot_interactivity(nb):
"""
Replace all occurrences of the magic command ``%matplotlib notebook``
by ``%matplotlib inline``.
"""
for cell in nb['cells']:
if cell['cell_type'] == 'code' and 'matplotlib' in cell['source']:
cell['source'] = re.sub('^%matplotlib +notebook',
'%matplotlib inline',
cell['source'], flags=re.M)
def split_matplotlib_cells(nb):
"""
If a cell imports matplotlib, split the cell to keep the
import statement separate from the code that uses matplotlib.
This prevents a known bug in the Jupyter backend which causes
the plot object to be represented as a string instead of a canvas
when created in the cell where matplotlib is imported for the
first time (https://github.com/jupyter/notebook/issues/3523).
"""
for i in range(len(nb['cells']) - 1, -1, -1):
cell = nb['cells'][i]
if cell['cell_type'] == 'code' and 'matplotlib' in cell['source']:
code = iw.protect_ipython_magics(cell['source'])
# split cells after matplotlib imports
mapping = iw.delimit_statements(code)
tree = ast.parse(code)
visitor = iw.GetMatplotlibPyplot()
visitor.visit(tree)
if visitor.matplotlib_first:
code = iw.deprotect_ipython_magics(code)
lines = code.split('\n')
lineno_end = mapping[visitor.matplotlib_first]
split_code = '\n'.join(lines[lineno_end:]).lstrip('\n')
if split_code:
new_cell = nbformat.v4.new_code_cell(source=split_code)
nb['cells'].insert(i + 1, new_cell)
lines = lines[:lineno_end]
nb['cells'][i]['source'] = '\n'.join(lines).rstrip('\n')
def convert_exercise2_to_code(nb):
"""
Walk through the notebook cells and convert exercise2 Markdown cells
containing fenced python code to exercise2 code cells.
"""
for i, cell in enumerate(nb['cells']):
if 'solution2' in cell['metadata']:
cell['metadata']['solution2'] = 'shown'
# convert solution markdown cells into code cells
if cell['cell_type'] == 'markdown' and 'solution2' in cell['metadata'] \
and 'solution2_first' not in cell['metadata']:
lines = cell['source'].strip().split('\n')
if lines[0].strip() == '```python' and lines[-1].strip() == '```':
source = '\n'.join(lines[1:-1]).strip()
nb['cells'][i] = nbformat.v4.new_code_cell(source=source)
nb['cells'][i]['metadata'] = cell['metadata']
nb['cells'][i]['metadata']['solution2'] = 'shown'
def convert_exercise2_to_markdown(nb):
"""
Walk through the notebook cells and convert exercise2 Python cells
to exercise2 Markdown cells using a fenced code block.
"""
for i, cell in enumerate(nb['cells']):
if 'solution2' in cell['metadata']:
cell['metadata']['solution2'] = 'hidden'
# convert solution code cells into markdown cells
if cell['cell_type'] == 'code' and 'solution2' in cell['metadata']:
content = '```python\n' + cell['source'] + '\n```'
nb['cells'][i] = nbformat.v4.new_markdown_cell(source=content)
nb['cells'][i]['metadata'] = cell['metadata']
nb['cells'][i]['metadata']['solution2'] = 'hidden'
def apply_autopep8(nb):
import yaml
import autopep8
def get_autopep8_options():
options = {'aggressive': 0, 'ignore': [], 'max_line_length': 120}
with open('@CMAKE_SOURCE_DIR@/.pre-commit-config.yaml') as f:
pre_config = yaml.safe_load(f)
for repo in pre_config['repos']:
for hook in repo['hooks']:
if hook['id'] == 'autopep8':
for arg in hook['args']:
if arg == '--aggressive':
options['aggressive'] += 1
elif arg.startswith('--ignore='):
options['ignore'] = arg.split('=', 1)[0].split(',')
return options
return options
pep8_opts = get_autopep8_options()
for cell in nb['cells']:
source = None
header = ''
footer = ''
if cell['cell_type'] == 'code':
source = cell['source']
elif cell['cell_type'] == 'markdown' and 'solution2' in cell['metadata']:
lines = cell['source'].strip().split('\n')
if lines[0].strip() == '```python' and lines[-1].strip() == '```':
source = '\n'.join(lines[1:-1])
header = lines[0] + '\n'
footer = '\n' + lines[-1]
if source is not None:
source = autopep8.fix_code(source, options=pep8_opts).strip()
cell['source'] = header + source + footer
def execute_notebook(nb, src, cell_separator, notebook_filepath):
"""
Run the notebook in a python3 kernel. The ESPResSo visualizers are
disabled to prevent the kernel from crashing and to allow running
the notebook in a CI environment.
"""
import nbconvert.preprocessors
notebook_dirname = os.path.dirname(notebook_filepath)
# disable OpenGL/Mayavi GUI
src_no_gui = iw.mock_es_visualization(src)
# update notebook with new code
set_code_cells(nb, src_no_gui.split(cell_separator))
# execute notebook
ep = nbconvert.preprocessors.ExecutePreprocessor(
timeout=20 * 60, kernel_name='python3')
ep.preprocess(nb, {'metadata': {'path': notebook_dirname}})
# restore notebook with code before the GUI removal step
set_code_cells(nb, src.split(cell_separator))
def handle_ci_case(args):
notebook_filepath = args.input
if args.output:
notebook_filepath_edited = args.output
else:
notebook_filepath_edited = notebook_filepath + '~'
# parse original notebook
with open(notebook_filepath, encoding='utf-8') as f:
nb = nbformat.read(f, as_version=4)
# add new cells containing the solutions
if args.scripts:
for filepath in args.scripts:
add_cell_from_script(nb, filepath)
# convert solution cells to code cells
if args.exercise2:
convert_exercise2_to_code(nb)
# remove empty cells (e.g. those below exercise2 cells)
if args.remove_empty_cells:
remove_empty_cells(nb)
# disable plot interactivity
disable_plot_interactivity(nb)
# guard against a jupyter bug involving matplotlib
split_matplotlib_cells(nb)
if args.substitutions or args.execute:
# substitute global variables
cell_separator = f'\n##{uuid.uuid4().hex}\n'
src = cell_separator.join(get_code_cells(nb))
new_values = args.substitutions or []
parameters = dict(x.split('=', 1) for x in new_values)
src = iw.substitute_variable_values(src, strings_as_is=True,
keep_original=False, **parameters)
set_code_cells(nb, src.split(cell_separator))
if args.execute:
execute_notebook(nb, src, cell_separator, args.input)
# write edited notebook
with open(notebook_filepath_edited, 'w', encoding='utf-8') as f:
nbformat.write(nb, f)
def handle_exercise2_case(args):
# parse original notebook
with open(args.input, encoding='utf-8') as f:
nb = nbformat.read(f, as_version=4)
if args.to_md:
convert_exercise2_to_markdown(nb)
elif args.to_py:
convert_exercise2_to_code(nb)
elif args.pep8:
convert_exercise2_to_code(nb)
apply_autopep8(nb)
convert_exercise2_to_markdown(nb)
# write edited notebook
with open(args.input, 'w', encoding='utf-8') as f:
nbformat.write(nb, f)
parser = argparse.ArgumentParser(description='Process Jupyter notebooks.',
epilog=__doc__)
subparsers = parser.add_subparsers(help='Submodules')
# CI module
parser_ci = subparsers.add_parser(
'ci', help='module for CI (variable substitution, code execution, etc.)')
parser_ci.add_argument('--input', type=str, required=True,
help='path to the original Jupyter notebook')
parser_ci.add_argument('--output', type=str,
help='path to the processed Jupyter notebook')
parser_ci.add_argument('--substitutions', nargs='*',
help='variables to substitute')
parser_ci.add_argument('--scripts', nargs='*',
help='scripts to insert in new cells')
parser_ci.add_argument('--exercise2', action='store_true',
help='convert exercise2 solutions into code cells')
parser_ci.add_argument('--remove-empty-cells', action='store_true',
help='remove empty cells')
parser_ci.add_argument('--execute', action='store_true',
help='run the notebook')
parser_ci.set_defaults(callback=handle_ci_case)
# exercise2 module
parser_exercise2 = subparsers.add_parser(
'exercise2', help='module for exercise2 conversion (Markdown <-> Python)')
parser_exercise2.add_argument('input', type=str, help='path to the Jupyter '
'notebook (in-place conversion)')
group_exercise2 = parser_exercise2.add_mutually_exclusive_group(required=True)
group_exercise2.add_argument('--to-md', action='store_true',
help='convert solution cells to Markdown')
group_exercise2.add_argument('--to-py', action='store_true',
help='convert solution cells to Python')
group_exercise2.add_argument('--pep8', action='store_true',
help='apply autopep8 formatting')
parser_exercise2.set_defaults(callback=handle_exercise2_case)
if __name__ == "__main__":
args = parser.parse_args()
args.callback(args)
| espressomd/espresso | doc/tutorials/convert.py | Python | gpl-3.0 | 12,717 |
"""
==========
ISOMAP neighbours parameter CV pipeline
==========
Use a pipeline to find the best neighbourhood size parameter for ISOMAP.
Adapted from:
http://scikit-learn.org/stable/auto_examples/decomposition/plot_kernel_pca.html#example-decomposition-plot-kernel-pca-py
http://scikit-learn.org/stable/auto_examples/grid_search_digits.html#example-grid-search-digits-py
"""
import numpy as np
import pickle
from optparse import OptionParser
from tables import *
from sklearn.manifold import Isomap
from sklearn.cluster import KMeans
from sklearn.metrics import v_measure_score, make_scorer, homogeneity_score
from extract_datasets import extract_labeled_chunkrange
from sklearn.preprocessing import scale
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
np.random.seed(0)
# parse commandline arguments
op = OptionParser()
op.add_option("--h5file",
dest="inputfile", help="Read data input from this hdf5 file.")
op.add_option("--size",
dest="size", type="int", help="Extract the first size chunks of the data set and labels.")
op.add_option("--sample-size",
dest="samplesize", type="int", help="The max size of the samples")
op.add_option("--output",
dest="outfile", help="Write the estimator model to this file.")
op.add_option("--num-jobs",
dest="jobs", type="int", help="Use these number of jobs in parallel for GridSearchCV")
(opts, args) = op.parse_args()
###############################################################################
# Load a training set from the given .h5 file
datafile = openFile(opts.inputfile, mode = "r", title = "Data is stored here")
# Extract some of the dataset from the datafile
X, labels = extract_labeled_chunkrange(datafile, opts.size)
# Sample from the dataset
wt_points = np.nonzero(labels[:,0] == 0)[0]
foci_points = np.nonzero(labels[:,0] == 1)[0]
ab_nuclei_points = np.nonzero(labels[:,0] == 2)[0]
wt_data = X[wt_points,5:]
foci_data = X[foci_points,5:]
ab_nuclei_data = X[ab_nuclei_points,5:]
wt_labels = labels[wt_points,0]
foci_labels = labels[foci_points,0]
ab_nuclei_labels = labels[ab_nuclei_points,0]
# Figure out the sample sizes based on the shape of the *_labels arrays and the
# sample size argument
wt_samplesize = min(opts.samplesize,wt_data.shape[0])
foci_samplesize = min(opts.samplesize,foci_data.shape[0])
ab_nuclei_samplesize = min(opts.samplesize, ab_nuclei_data.shape[0])
# Use np.random.permutation(array)[0:size,:] to sample u at random
# from the strata.
wt_data_sample = np.random.permutation(wt_data)[0:wt_samplesize,:]
foci_data_sample = np.random.permutation(foci_data)[0:foci_samplesize,:]
ab_nuclei_sample = np.random.permutation(ab_nuclei_data)[0:ab_nuclei_samplesize,:]
D = np.vstack((wt_data_sample,foci_data_sample,ab_nuclei_sample))
D_labels = np.hstack((wt_labels[0:wt_samplesize],foci_labels[0:foci_samplesize],ab_nuclei_labels[0:ab_nuclei_samplesize]))
D_scaled = scale(D)
datafile.close()
##################
# Range of parameters to consider for neighbours
neighbours = np.arange(5,50,5)
# Set up the method -> kmeans -> h-measure && LLE -> kmeans -> h-measure pipelines
isomap = Isomap(n_neighbors=5, n_components=30)
kmeans = KMeans(n_clusters=3)
# Make a scoring function for the pipeline
v_measure_scorer = make_scorer(v_measure_score)
homogeneity_scorer = make_scorer(homogeneity_score)
pipe = Pipeline(steps=[('isomap', isomap), ('kmeans', kmeans)])
# Set the model parameters to cycle over using '__' a prefix
estimator = GridSearchCV(pipe, dict(isomap__n_neighbors=neighbours), scoring=homogeneity_scorer, n_jobs=opts.jobs)
estimator.fit(D_scaled,D_labels)
# Dump the estimator to a file
f = file(opts.outfile, 'wb')
pickle.dump(estimator, f)
f.close()
# Report the best parameter values
print("Best estimator found on test data set:")
print()
print(estimator.best_estimator_)
print()
print("Best parameters fond on test data set:")
print()
print(estimator.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in estimator.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() / 2, params))
print()
| lzamparo/SdA_reduce | utils/isomap_neighbours_pipeline.py | Python | bsd-3-clause | 4,226 |
# hackerrank - Algorithms: Plus Minus
# Written by James Andreou, University of Waterloo
N = float(raw_input())
A = map(int, str.split(raw_input()))
print ("%.3f" % (len(filter(lambda x : x > 0, A)) / N))
print ("%.3f" % (len(filter(lambda x : x < 0, A)) / N))
print ("%.3f" % (len(filter(lambda x : x == 0, A)) / N)) | jamesandreou/hackerrank-solutions | warmup/hr_plus_minus.py | Python | mit | 317 |
import asyncio
import logging
import json
import aiohttp
import lxml.html
import lxml.etree
from .selector import Selector
logger = logging.getLogger("requester")
class RequestError(RuntimeError):
def __init__(self, url, *args, **kwargs):
self.url = url
super().__init__(*args, **kwargs)
class ServerError(RequestError):
pass
class NotFoundError(RequestError):
pass
class ResponseError(RequestError):
pass
class HttpError(RequestError):
def __init__(self, url, code):
self.code = code
super().__init__(url, "HTTP {0} encountered".format(self.code))
class Response(Selector):
def __init__(self, response, content, node):
self.response = response
self.is_json = False
try:
self.content = json.loads(content)
self.is_json = True
except Exception:
self.content = content
super().__init__(node)
def __getitem__(self, item):
if self.is_json:
return self.content[item]
else:
raise RuntimeError("Response is not JSON")
def open_in_browser(self):
import webbrowser
import tempfile
x = tempfile.NamedTemporaryFile(delete=False, suffix=".html")
x.write(lxml.html.etree.tostring(self.document, pretty_print=True))
x.close()
webbrowser.open(x.name)
print(x.name)
class Requester(object):
def __init__(self, concurrency=5, error_contents="", not_found_contents="", event_loop=None):
self.error_contents = error_contents
self.not_found_contents = not_found_contents
self.concurrency = concurrency
self.semaphore = asyncio.BoundedSemaphore(concurrency)
self.event_loop = event_loop or asyncio.get_event_loop()
@asyncio.coroutine
def get(self, url):
attempts = 0
while True:
logger.info("Requesting {0}".format(url))
with (yield from self.semaphore):
if self.event_loop.is_closed():
return None
try:
response = yield from aiohttp.request("GET", url, allow_redirects=True, loop=self.event_loop)
except Exception as e:
logger.error("Could not retrieve {0}".format(url))
raise RequestError(url) from e
if 500 < response.status < 599:
if response.status == 503:
attempts += 1
if attempts < 4:
continue
yield from response.close()
raise HttpError(url, response.status)
elif response.status == 404:
yield from response.close()
raise NotFoundError(url)
data = yield from response.text()
if self.error_contents and self.error_contents in data:
raise ServerError(url)
if self.not_found_contents and self.not_found_contents in data:
raise NotFoundError(url)
try:
node = lxml.html.fromstring(data)
except lxml.etree.ParseError as e:
raise ResponseError(url) from e
return Response(response, data, node)
| orf/cyborg | cyborg/requester.py | Python | apache-2.0 | 3,265 |
# Copyright (c) 2013 Rackspace, Inc.
# Copyright (c) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import multiprocessing
import os
import jsonschema
from oslo_utils import timeutils
from zaqar.api.v1 import response as response_v1
from zaqar.api.v1_1 import response as response_v1_1
from zaqar.api.v2 import response as response_v2
from zaqar import bootstrap
from zaqar.storage import mongodb
from zaqar.storage.redis import driver as redis
from zaqar import tests as testing
from zaqar.tests.functional import config
from zaqar.tests.functional import helpers
from zaqar.tests.functional import http
from zaqar.tests import helpers as base_helpers
from zaqar.transport import base as transport_base
# TODO(flaper87): This is necessary to register,
# wsgi configs and won't be permanent. It'll be
# refactored as part of the work for this blueprint
from zaqar.transport import validation
from zaqar.transport import wsgi # noqa
# TODO(kgriffs): Run functional tests to a devstack gate job and
# set this using an environment variable or something.
#
# TODO(kgriffs): Find a more general way to do this; we seem to be
# using this environ flag pattern over and over againg.
_TEST_INTEGRATION = os.environ.get('ZAQAR_TEST_INTEGRATION') is not None
class FunctionalTestBase(testing.TestBase):
server = None
server_class = None
config_file = None
class_bootstrap = None
# NOTE(Eva-i): ttl_gc_interval is the known maximum time interval between
# automatic resource TTL expirations. Depends on message store back end.
class_ttl_gc_interval = None
wipe_dbs_projects = set([])
def setUp(self):
super(FunctionalTestBase, self).setUp()
# NOTE(flaper87): Config can't be a class
# attribute because it may be necessary to
# modify it at runtime which will affect
# other instances running instances.
self.cfg = config.load_config()
if not self.cfg.run_tests:
self.skipTest("Functional tests disabled")
config_file = self.config_file or self.cfg.zaqar.config
config_file = base_helpers.override_mongo_conf(config_file, self)
self.mconf = self.load_conf(config_file)
validator = validation.Validator(self.mconf)
self.limits = validator._limits_conf
self.resource_defaults = transport_base.ResourceDefaults(self.mconf)
# Always register options
self.__class__.class_bootstrap = bootstrap.Bootstrap(self.mconf)
self.class_bootstrap.transport
datadriver = self.class_bootstrap.storage._storage
if isinstance(datadriver, redis.DataDriver):
self.__class__.class_ttl_gc_interval = 1
if isinstance(datadriver, mongodb.DataDriver):
# NOTE(kgriffs): MongoDB's TTL scavenger only runs once a minute
self.__class__.class_ttl_gc_interval = 60
if _TEST_INTEGRATION:
if not (self.server and self.server.is_alive()):
self.server = self.server_class()
self.server.start(self.mconf)
self.addCleanup(self.server.process.terminate)
self.client = http.Client()
else:
if self.server_class == ZaqarAdminServer:
self.mconf.pooling = True
self.mconf.admin_mode = True
self.addCleanup(self.class_bootstrap.storage.close)
self.addCleanup(self.class_bootstrap.control.close)
self.client = http.WSGIClient(self.class_bootstrap.transport.app)
self.headers = helpers.create_zaqar_headers(self.cfg)
self.headers_response_with_body = {'location', 'content-type'}
self.client.set_headers(self.headers)
# Store information required for cleaning databases after
# execution of test class
self.wipe_dbs_projects.add(self.headers["X-Project-ID"])
def tearDown(self):
super(FunctionalTestBase, self).tearDown()
# Project might has changed during test case execution.
# Lets add it again to the set.
self.wipe_dbs_projects.add(self.headers["X-Project-ID"])
@staticmethod
def _if_mongo_datadriver_drop_dbs(driver):
"""Drops MongoDB datadriver's databases.
:param driver: instance of zaqar.storage.mongodb.driver.DataDriver
"""
if not isinstance(driver, mongodb.DataDriver):
return
for db in driver.message_databases:
driver.connection.drop_database(db)
subscription_db = driver.subscriptions_database
driver.connection.drop_database(subscription_db)
@staticmethod
def _if_mongo_controldriver_drop_dbs(driver):
"""Drops all MongoDB controldriver's databases.
:param driver: instance of zaqar.storage.mongodb.driver.ControlDriver
"""
if not isinstance(driver, mongodb.ControlDriver):
return
driver.connection.drop_database(driver.database)
driver.connection.drop_database(driver.queues_database)
@classmethod
def _pooling_drop_dbs_by_project(cls, xproject):
"""Finds all pool drivers by project, drops all their databases.
Assumes that pooling is enabled.
:param xproject: project name to use for pool drivers search
"""
datadriver = cls.class_bootstrap.storage._storage
controldriver = cls.class_bootstrap.control
# Let's get list of all queues by project
queue_generator = controldriver.queue_controller.list(project=xproject)
queues = list(next(queue_generator))
# Let's extract all queue names from the list of queues
queue_names = [q['name'] for q in queues]
# Finally let's use queues names to get each one of pool datadrivers
catalog = datadriver._pool_catalog
for queue_name in queue_names:
pool_pipe_driver = catalog.lookup(queue_name, project=xproject)
pool_datadriver = pool_pipe_driver._storage
if pool_datadriver is not None:
# Let's delete the queue, so the next invocation of
# pooling_catalog.lookup() will not recreate pool driver
controldriver.queue_controller.delete(queue_name)
# Let's drop pool's databases
cls._if_mongo_datadriver_drop_dbs(pool_datadriver)
@classmethod
def tearDownClass(cls):
"""Cleans up after test class execution.
Drops all databases left.
Closes connections to databases.
"""
# Bootstrap can be None if all test cases were skipped, so nothing to
# clean
if cls.class_bootstrap is None:
return
datadriver = cls.class_bootstrap.storage._storage
controldriver = cls.class_bootstrap.control
if cls.class_bootstrap.conf.pooling:
# Pooling detected, let's drop pooling-specific databases
for p in cls.wipe_dbs_projects:
# This will find all pool databases by project and drop them
cls._pooling_drop_dbs_by_project(p)
controldriver.pools_controller.drop_all()
controldriver.flavors_controller.drop_all()
else:
# No pooling detected, let's just drop datadriver's databases
cls._if_mongo_datadriver_drop_dbs(datadriver)
cls.class_bootstrap.storage.close()
# Let's drop controldriver's databases
controldriver.catalogue_controller.drop_all()
cls._if_mongo_controldriver_drop_dbs(controldriver)
controldriver.close()
def assertIsSubset(self, required_values, actual_values):
"""Checks if a list is subset of another.
:param required_values: superset list.
:param required_values: subset list.
"""
form = 'Missing Header(s) - {0}'
self.assertTrue(required_values.issubset(actual_values),
msg=form.format((required_values - actual_values)))
def assertMessageCount(self, actualCount, expectedCount):
"""Checks if number of messages returned <= limit
:param expectedCount: limit value passed in the url (OR) default(10).
:param actualCount: number of messages returned in the API response.
"""
msg = ('More Messages returned than allowed: expected count = {0}'
', actual count = {1}'.format(expectedCount, actualCount))
self.assertLessEqual(actualCount, expectedCount, msg)
def assertQueueStats(self, result_json, claimed):
"""Checks the Queue Stats results
:param result_json: json response returned for Queue Stats.
:param claimed: expected number of claimed messages.
"""
total = self.limits.max_messages_per_claim_or_pop
free = total - claimed
self.assertEqual(claimed, result_json['messages']['claimed'])
self.assertEqual(free, result_json['messages']['free'])
self.assertEqual(total, result_json['messages']['total'])
if 'oldest' in result_json['messages']:
oldest_message = result_json['messages']['oldest']
self.verify_message_stats(oldest_message)
newest_message = result_json['messages']['newest']
self.verify_message_stats(newest_message)
def assertSchema(self, response, expectedSchemaName):
"""Compares the json response with the expected schema
:param response: response json returned by the API.
:type response: dict
:param expectedSchema: expected schema definition for response.
:type expectedSchema: string
"""
try:
expectedSchema = self.response.get_schema(expectedSchemaName)
jsonschema.validate(response, expectedSchema)
except jsonschema.ValidationError as message:
assert False, message
def verify_message_stats(self, message):
"""Verifies the oldest & newest message stats
:param message: oldest (or) newest message returned by
queue_name/stats.
"""
expected_keys = ['age', 'created', 'href']
response_keys = message.keys()
response_keys = sorted(response_keys)
self.assertEqual(expected_keys, response_keys)
# Verify that age has valid values
age = message['age']
msg = 'Invalid Age {0}'.format(age)
self.assertLessEqual(0, age, msg)
self.assertLessEqual(age, self.limits.max_message_ttl, msg)
# Verify that GET on href returns 200
path = message['href']
result = self.client.get(path)
self.assertEqual(200, result.status_code)
# Verify that created time falls within the last 10 minutes
# NOTE(malini): The messages are created during the test.
created_time = message['created']
created_time = timeutils.normalize_time(
timeutils.parse_isotime(created_time))
now = timeutils.utcnow()
delta = timeutils.delta_seconds(before=created_time, after=now)
# NOTE(malini): The 'int()' below is a work around for the small time
# difference between julianday & UTC.
# (needed to pass this test on sqlite driver)
delta = int(delta)
msg = ('Invalid Time Delta {0}, Created time {1}, Now {2}'
.format(delta, created_time, now))
self.assertLessEqual(0, delta, msg)
self.assertLessEqual(delta, 6000, msg)
class Server(object, metaclass=abc.ABCMeta):
name = "zaqar-functional-test-server"
def __init__(self):
self.process = None
@abc.abstractmethod
def get_target(self, conf):
"""Prepares the target object
This method is meant to initialize server's
bootstrap and return a callable to run the
server.
:param conf: The config instance for the
bootstrap class
:returns: A callable object
"""
def is_alive(self):
"""Returns True IFF the server is running."""
if self.process is None:
return False
return self.process.is_alive()
def start(self, conf):
"""Starts the server process.
:param conf: The config instance to use for
the new process
:returns: A `multiprocessing.Process` instance
"""
# TODO(flaper87): Re-use running instances.
target = self.get_target(conf)
if not callable(target):
raise RuntimeError("Target not callable")
self.process = multiprocessing.Process(target=target,
name=self.name)
self.process.daemon = True
self.process.start()
# NOTE(flaper87): Give it a second
# to boot.
self.process.join(1)
return self.process
def stop(self):
"""Terminates a process
This method kills a process by
calling `terminate`. Note that
children of this process won't be
terminated but become orphaned.
"""
self.process.terminate()
class ZaqarServer(Server):
name = "zaqar-wsgiref-test-server"
def get_target(self, conf):
server = bootstrap.Bootstrap(conf)
return server.run
class ZaqarAdminServer(Server):
name = "zaqar-admin-wsgiref-test-server"
def get_target(self, conf):
conf.admin_mode = True
server = bootstrap.Bootstrap(conf)
return server.run
class V1FunctionalTestBase(FunctionalTestBase):
def setUp(self):
super(V1FunctionalTestBase, self).setUp()
self.response = response_v1.ResponseSchema(self.limits)
class V1_1FunctionalTestBase(FunctionalTestBase):
def setUp(self):
super(V1_1FunctionalTestBase, self).setUp()
self.response = response_v1_1.ResponseSchema(self.limits)
class V2FunctionalTestBase(FunctionalTestBase):
def setUp(self):
super(V2FunctionalTestBase, self).setUp()
self.response = response_v2.ResponseSchema(self.limits)
| openstack/zaqar | zaqar/tests/functional/base.py | Python | apache-2.0 | 14,553 |
from __future__ import unicode_literals
from django.apps import AppConfig
class CawasConfig(AppConfig):
name = 'cawas'
| emilianobilli/backend | dam/cawas/apps.py | Python | gpl-3.0 | 126 |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class NigeriapostcodesPipeline(object):
def process_item(self, item, spider):
return item
| NigerianPostcodes/spiderman | nigeriapostcodes/pipelines.py | Python | mit | 296 |
"""
A pytz version that runs smoothly on Google App Engine.
Based on http://appengine-cookbook.appspot.com/recipe/caching-pytz-helper/
To use, add pytz to your path normally, but import it from the gae module:
from pytz.gae import pytz
Applied patches:
- The zoneinfo dir is removed from pytz, as this module includes a ziped
version of it.
- pytz is monkey patched to load zoneinfos from a zipfile.
- pytz is patched to not check all zoneinfo files when loaded. This is
sad, I wish that was lazy, so it could be monkey patched. As it is,
the zipfile patch doesn't work and it'll spend resources checking
hundreds of files that we know aren't there.
pytz caches loaded zoneinfos, and this module will additionally cache them
in memcache to avoid unzipping constantly. The cache key includes the
OLSON_VERSION so it is invalidated when pytz is updated.
"""
import os
import logging
import pytz
import zipfile
from cStringIO import StringIO
# Fake memcache for when we're not running under the SDK, likely a script.
class memcache(object):
@classmethod
def add(*args, **kwargs):
pass
@classmethod
def get(*args, **kwargs):
return None
try:
# Don't use memcache outside of Google App Engine or with GAE's dev server.
if not os.environ.get('SERVER_SOFTWARE', '').startswith('Development'):
from google.appengine.api import memcache
except ImportError:
pass
zoneinfo = None
zoneinfo_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'zoneinfo.zip'))
def get_zoneinfo():
"""Cache the opened zipfile in the module."""
global zoneinfo
if zoneinfo is None:
zoneinfo = zipfile.ZipFile(zoneinfo_path)
return zoneinfo
class TimezoneLoader(object):
"""A loader that that reads timezones using ZipFile."""
def __init__(self):
self.available = {}
def open_resource(self, name):
"""Opens a resource from the zoneinfo subdir for reading."""
name_parts = name.lstrip('/').split('/')
if os.path.pardir in name_parts:
raise ValueError('Bad path segment: %r' % os.path.pardir)
cache_key = 'pytz.zoneinfo.%s.%s' % (pytz.OLSON_VERSION, name)
zonedata = memcache.get(cache_key)
if zonedata is None:
zonedata = get_zoneinfo().read('zoneinfo/' + '/'.join(name_parts))
memcache.add(cache_key, zonedata)
logging.info('Added timezone to memcache: %s' % cache_key)
else:
logging.info('Loaded timezone from memcache: %s' % cache_key)
return StringIO(zonedata)
def resource_exists(self, name):
"""Return true if the given resource exists"""
if name not in self.available:
try:
get_zoneinfo().getinfo('zoneinfo/' + name)
self.available[name] = True
except KeyError:
self.available[name] = False
return self.available[name]
pytz.loader = TimezoneLoader()
| kurtisharms/ubcexamcram | pytz/gae.py | Python | gpl-3.0 | 3,068 |
import numpy as np
import pyflux as pf
noise = np.random.normal(0,1,200)
data = np.zeros(200)
for i in range(1,len(data)):
data[i] = 1.0*data[i-1] + noise[i]
countdata = np.random.poisson(3,200)
def test_t_couple_terms():
"""
Tests latent variable list length is correct, and that the estimated
latent variables are not nan
"""
model = pf.GASLLEV(data=data, family=pf.t())
x = model.fit()
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_t_couple_terms_integ():
"""
Tests an GAS model with 1 AR and 1 MA term, integrated once, and that
the latent variable list length is correct, and that the estimated
latent variables are not nan
"""
model = pf.GASLLEV(data=data, integ=1, family=pf.t())
x = model.fit()
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_t_bbvi():
"""
Tests an GAS model estimated with BBVI and that the length of the latent variable
list is correct, and that the estimated latent variables are not nan
"""
model = pf.GASLLEV(data=data, family=pf.t())
x = model.fit('BBVI',iterations=100)
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_t_bbvi_mini_batch():
"""
Tests an ARIMA model estimated with BBVI and that the length of the latent variable
list is correct, and that the estimated latent variables are not nan
"""
model = pf.GASLLEV(data=data, family=pf.t())
x = model.fit('BBVI',iterations=100, mini_batch=32)
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_t_bbvi_elbo():
"""
Tests that the ELBO increases
"""
model = pf.GASLLEV(data=data, family=pf.t())
x = model.fit('BBVI',iterations=100, record_elbo=True, map_start=False)
assert(x.elbo_records[-1]>x.elbo_records[0])
def test_t_bbvi_mini_batch_elbo():
"""
Tests that the ELBO increases
"""
model = pf.GASLLEV(data=data, family=pf.t())
x = model.fit('BBVI',iterations=100, mini_batch=32, record_elbo=True, map_start=False)
assert(x.elbo_records[-1]>x.elbo_records[0])
def test_t_mh():
"""
Tests an GAS model estimated with Metropolis-Hastings and that the length of the
latent variable list is correct, and that the estimated latent variables are not nan
"""
model = pf.GASLLEV(data=data, family=pf.t())
x = model.fit('M-H',nsims=300)
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_t_laplace():
"""
Tests an GAS model estimated with Laplace approximation and that the length of the
latent variable list is correct, and that the estimated latent variables are not nan
"""
model = pf.GASLLEV(data=data, family=pf.t())
x = model.fit('Laplace')
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_t_pml():
"""
Tests a PML model estimated with Laplace approximation and that the length of the
latent variable list is correct, and that the estimated latent variables are not nan
"""
model = pf.GASLLEV(data=data, family=pf.t())
x = model.fit('PML')
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_t_predict_length():
"""
Tests that the prediction dataframe length is equal to the number of steps h
"""
model = pf.GASLLEV(data=data, family=pf.t())
x = model.fit()
x.summary()
assert(model.predict(h=5).shape[0] == 5)
def test_t_predict_is_length():
"""
Tests that the prediction IS dataframe length is equal to the number of steps h
"""
model = pf.GASLLEV(data=data, family=pf.t())
x = model.fit()
assert(model.predict_is(h=5).shape[0] == 5)
def test_t_predict_nans():
"""
Tests that the predictions are not nans
"""
model = pf.GASLLEV(data=data, family=pf.t())
x = model.fit()
x.summary()
assert(len(model.predict(h=5).values[np.isnan(model.predict(h=5).values)]) == 0)
def test_t_predict_is_nans():
"""
Tests that the in-sample predictions are not nans
"""
model = pf.GASLLEV(data=data, family=pf.t())
x = model.fit()
x.summary()
assert(len(model.predict_is(h=5).values[np.isnan(model.predict_is(h=5).values)]) == 0)
def test_t_predict_intervals():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASLLEV(data=data, family=pf.t())
x = model.fit()
predictions = model.predict(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_t_predict_is_intervals():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASLLEV(data=data, family=pf.t())
x = model.fit()
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_t_predict_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASLLEV(data=data, family=pf.t())
x = model.fit('BBVI', iterations=100)
predictions = model.predict(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_t_predict_is_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASLLEV(data=data, family=pf.t())
x = model.fit('BBVI', iterations=100)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_t_predict_intervals_mh():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASLLEV(data=data, family=pf.t())
x = model.fit('M-H', nsims=400)
predictions = model.predict(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_t_predict_is_intervals_mh():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASLLEV(data=data, family=pf.t())
x = model.fit('M-H', nsims=400)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_t_sample_model():
"""
Tests sampling function
"""
model = pf.GASLLEV(data=data, family=pf.t())
x = model.fit('BBVI', iterations=100)
sample = model.sample(nsims=100)
assert(sample.shape[0]==100)
assert(sample.shape[1]==len(data)-1)
def test_t_ppc():
"""
Tests PPC value
"""
model = pf.GASLLEV(data=data, family=pf.t())
x = model.fit('BBVI', iterations=100)
p_value = model.ppc()
assert(0.0 <= p_value <= 1.0)
| RJT1990/pyflux | pyflux/gas/tests/gas_llev_tests_t.py | Python | bsd-3-clause | 8,879 |
from django.conf.urls import include, url
from waldur_core.core.routers import SortedDefaultRouter as DefaultRouter
from waldur_core.server.urls import urlpatterns
from . import views
def register_in(router):
router.register(r'test', views.TestServiceViewSet, base_name='test')
router.register(r'test-service-project-link', views.TestServiceProjectLinkViewSet, base_name='test-spl')
router.register(r'test-new-instances', views.TestNewInstanceViewSet, base_name='test-new-instances')
router = DefaultRouter()
register_in(router)
urlpatterns += [
url(r'^api/', include(router.urls)),
]
| opennode/nodeconductor | waldur_core/structure/tests/urls.py | Python | mit | 608 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('datasetmanager', '0003_auto_20151028_1559'),
]
operations = [
migrations.AlterField(
model_name='dataset',
name='creator_path',
field=models.CharField(max_length=1024),
),
]
| mmilaprat/policycompass-services | apps/datasetmanager/migrations/0004_auto_20151111_1746.py | Python | agpl-3.0 | 418 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import os
import sys
from setuptools import setup
name = 'drfjsonapi'
package = 'drfjsonapi'
description = 'JSON API reference implementation for Django Rest Framework'
url = 'https://github.com/sassoo/drfjsonapi'
author = 'Sassoo'
author_email = 'noreply@devnull.seriously'
license = 'BSD'
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("^__version__ = ['\"]([^'\"]+)['\"]",
init_py, re.MULTILINE).group(1)
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
version = get_version(package)
if sys.argv[-1] == 'publish':
if os.system("pip freeze | grep wheel"):
print("wheel not installed.\nUse `pip install wheel`.\nExiting.")
sys.exit()
os.system("python setup.py sdist upload")
os.system("python setup.py bdist_wheel upload")
print("You probably want to also tag the version now:")
print(" git tag -a {0} -m 'version {0}'".format(version))
print(" git push --tags")
sys.exit()
setup(
name=name,
version=version,
url=url,
license=license,
description=description,
author=author,
author_email=author_email,
packages=get_packages(package),
package_data=get_package_data(package),
install_requires=[],
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP',
]
)
| sassoo/drfjsonapi | setup.py | Python | isc | 2,755 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('imager_images', '0010_auto_20150728_1429'),
]
operations = [
migrations.AddField(
model_name='album',
name='published',
field=models.CharField(default='private', max_length=255, choices=[('public', 'public'), ('shared', 'shared'), ('private', 'private')]),
),
migrations.AlterField(
model_name='album',
name='date_modified',
field=models.DateTimeField(default=datetime.datetime(2015, 7, 28, 22, 15, 35, 492355, tzinfo=utc), auto_now=True),
preserve_default=False,
),
migrations.AlterField(
model_name='photo',
name='date_modified',
field=models.DateTimeField(default=datetime.datetime(2015, 7, 28, 22, 15, 42, 61686, tzinfo=utc), auto_now=True),
preserve_default=False,
),
migrations.AlterField(
model_name='photo',
name='published',
field=models.CharField(default='private', max_length=255, choices=[('public', 'public'), ('shared', 'shared'), ('private', 'private')]),
),
]
| gatita/django-imager | imagersite/imager_images/migrations/0011_auto_20150728_1515.py | Python | mit | 1,354 |
import os
from sqlalchemy import create_engine, ForeignKey, func
from sqlalchemy import Column, Date, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, backref, sessionmaker
Base = declarative_base()
class Series( Base ):
__tablename__ = 'series'
item = Column( String )
tag = Column( String, primary_key=True )
title = Column( String )
imdb = Column(String)
episodes = relationship( 'Episode', backref='episodes' )
class Episode( Base ):
__tablename__ = 'episode'
tag = Column( String, primary_key=True )
title = Column( String )
path = Column( String )
series = Column( String, ForeignKey( 'series.tag' ) )
season = Column( Integer )
class Movie( Base ):
__tablename__ = 'movie'
id = Column( Integer, primary_key=True )
title = Column( String )
path = Column( String )
imdb = Column( String ) | simondodson/Curator | media_db.py | Python | gpl-3.0 | 935 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Library of TPU helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf.tpu import dynamic_padding_pb2 as dynamic_padding
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.compat import compat as api_compat
from tensorflow.python.compiler.xla import xla
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import config
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.tpu import tpu_function
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.tf_export import tf_export
ops.NotDifferentiable("TPUReplicatedInput")
# Operations that indicate some error in the users graph, e.g. a placeholder
# that's introduced outside of the infeed.
_BLACKLISTED_OPS = set([
"Placeholder",
])
# XLA doesn't currently support reading of intermediate tensors, thus some ops
# are not supported.
_UNSUPPORTED_OPS = set([
"AudioSummary",
"AudioSummaryV2",
"HistogramSummary",
"ImageSummary",
"MergeSummary",
"Print",
"ScalarSummary",
"TensorSummary",
"TensorSummaryV2",
])
# Ops which can be safely pruned from XLA compile if they have no consumers.
# These ops should also have no inputs.
_UNCONNECTED_OPS_TO_PRUNE = set(["Placeholder", "VarHandleOp"])
_MAX_WARNING_LINES = 5
_TPU_REPLICATE_ATTR = "_tpu_replicate"
_POST_DEVICE_REWRITE_ATTR = "_post_device_rewrite"
_TPU_COMPILATION_STATUS_ATTR = "_tpu_compilation_status"
_OUTSIDE_COMPILATION_ATTR = "_xla_outside_compilation"
def _tpu_system_device_name(job):
"""Returns the device name for the TPU_SYSTEM device of `job`."""
if job is None:
return "/device:TPU_SYSTEM:0"
else:
return "/job:%s/device:TPU_SYSTEM:0" % job
@tf_export(v1=["tpu.initialize_system"])
def initialize_system(embedding_config=None, job=None):
"""Initializes a distributed TPU system for use with TensorFlow.
Args:
embedding_config: If not None, a `TPUEmbeddingConfiguration` proto
describing the desired configuration of the hardware embedding lookup
tables. If embedding_config is None, no hardware embeddings can be used.
job: The job (the XXX in TensorFlow device specification /job:XXX) that
contains the TPU devices that will be initialized. If job=None it is
assumed there is only one job in the TensorFlow flock, and an error will
be returned if this assumption does not hold.
Returns:
A serialized `TopologyProto` that describes the TPU system. Note:
the topology must be evaluated using `Session.run` before it can be used.
"""
config_string = ("" if embedding_config is None else
embedding_config.SerializeToString())
with ops.device(_tpu_system_device_name(job)):
return tpu_ops.configure_distributed_tpu(embedding_config=config_string)
def initialize_system_for_tpu_embedding(embedding_config, job=None):
"""Initializes a distributed TPU Embedding system for use with TensorFlow.
The following two are equivalent:
1. initialize_system() with embedding_config.
2. initialize_system() without embedding_config, then
initialize_system_for_tpu_embedding().
initialize_system() should not be called with embedding_config if
initialize_system_for_tpu_embedding() is meant to be called later.
Args:
embedding_config: a `TPUEmbeddingConfiguration` proto describing the desired
configuration of the hardware embedding lookup tables.
job: The job (the XXX in TensorFlow device specification /job:XXX) that
contains the TPU devices that will be initialized. If job=None it is
assumed there is only one job in the TensorFlow flock, and an error will
be returned if this assumption does not hold.
Returns:
A no-op.
"""
config_string = embedding_config.SerializeToString()
with ops.device(_tpu_system_device_name(job)):
return tpu_ops.configure_tpu_embedding(config=config_string)
@tf_export(v1=["tpu.shutdown_system"])
def shutdown_system(job=None):
"""Shuts down a running a distributed TPU system.
Args:
job: The job (the XXX in TensorFlow device specification /job:XXX) that
contains the TPU devices that will be shutdown. If job=None it is
assumed there is only one job in the TensorFlow flock, and an error will
be returned if this assumption does not hold.
"""
with ops.device(_tpu_system_device_name(job)):
shutdown_distributed_tpu = tpu_ops.shutdown_distributed_tpu()
return shutdown_distributed_tpu
@tf_export(v1=["tpu.core"])
def core(num):
"""Returns the device name for a core in a replicated TPU computation.
Args:
num: the virtual core number within each replica to which operators should
be assigned.
Returns:
A device name, suitable for passing to `tf.device()`.
"""
return "device:TPU_REPLICATED_CORE:{}".format(num)
def _enclosing_tpu_context_and_graph():
"""Returns the TPUReplicateContext and its associated graph."""
graph = ops.get_default_graph()
while graph is not None:
# pylint: disable=protected-access
context_ = graph._get_control_flow_context()
# pylint: enable=protected-access
while context_ is not None:
if isinstance(context_, TPUReplicateContext):
return context_, graph
context_ = context_.outer_context
graph = getattr(graph, "outer_graph", None)
raise ValueError("get_replicated_var_handle() called without "
"TPUReplicateContext. This shouldn't happen. Please file "
"a bug.")
def is_tpu_strategy(strategy):
is_tpu_strat = lambda k: k.__name__.startswith("TPUStrategy")
clz = strategy.__class__
return is_tpu_strat(clz) or any(map(is_tpu_strat, clz.__bases__))
def _enclosing_tpu_device_assignment():
if not distribution_strategy_context.has_strategy():
return None
strategy = distribution_strategy_context.get_strategy()
if not is_tpu_strategy(strategy):
return None
return strategy.extended._device_assignment # pylint: disable=protected-access
class TPUReplicateContext(control_flow_ops.XLAControlFlowContext):
"""A `ControlFlowContext` for nodes inside a TPU computation.
The primary role of `TPUReplicateContext` is to mark operators inside a
tpu.replicate() computation with the attribute "_tpu_replicate=XYZ", where XYZ
is a unique name.
We use a `ControlFlowContext` to perform the annotation since it integrates
with Tensorflow constructs like ResourceVariables. For example, if a
`ResourceVariable` is constructed inside a tpu.replicate() block, the
`ResourceVariable` implementation can use
`with ops.control_dependencies(None)` to build the variable's definition
outside the replicated computation.
"""
class _TFBufferWrapper(object):
"""An internal class to help manage the TF_Buffer lifetime."""
def __init__(self, buf_string):
self._buffer = pywrap_tensorflow.TF_NewBufferFromString(
compat.as_bytes(buf_string))
def __del__(self):
pywrap_tensorflow.TF_DeleteBuffer(self._buffer)
def __init__(self, name, num_replicas, pivot):
"""Builds a new TPUReplicateContext.
Args:
name: a unique name for the context, used to populate the `_tpu_replicate`
attribute.
num_replicas: an integer that gives the number of replicas for the
computation.
pivot: a pivot node. Nodes in the TPUReplicateContext that do not have any
inputs will have a control dependency on the pivot node. This ensures
that nodes are correctly included in any enclosing control flow
contexts.
"""
super(TPUReplicateContext, self).__init__()
self._num_replicas = num_replicas
self._outer_device_function_stack = None
self._oc_dev_fn_stack = None
self._outside_compilation_cluster = None
self._outside_compilation_counter = 0
self._in_gradient_colocation = None
self._gradient_colocation_stack = []
self._host_compute_core = []
self._name = name
self._name_as_bytes = compat.as_bytes(name)
self._tpu_relicate_attr_buf = self._TFBufferWrapper(
attr_value_pb2.AttrValue(s=self._name_as_bytes).SerializeToString())
self._unsupported_ops = []
self._pivot = pivot
self._replicated_vars = {}
def get_replicated_var_handle(self, name, vars_, device_map=None):
"""Returns a variable handle for replicated TPU variable 'var'.
This is a method used by an experimental replicated variable implementation
and is not intended as a public API.
Args:
name: The common name of the variable.
vars_: The replicated TPU variables.
device_map: The DeviceMap used to create the variables if it is a
TPUMirroredVariable.
Returns:
The handle of the TPU replicated input node.
"""
device_assignment = _enclosing_tpu_device_assignment()
# We don't need to put device assignment as part of the replicated_vars key
# because each TPUReplicateContext will only have one device assignment.
handle = self._replicated_vars.get(name)
if handle is not None:
return handle
replicated_vars = []
if device_assignment is not None and device_map is not None:
job_name = pydev.DeviceSpec.from_string(device_map.all_devices[0]).job
for replica_id in range(device_assignment.num_replicas):
tpu_device = device_assignment.tpu_device(
replica=replica_id, logical_core=0, job=job_name)
tpu_device = device_util.canonicalize(tpu_device)
replica = device_map.replica_for_device(tpu_device)
replicated_vars.append(vars_[replica])
else:
replicated_vars = vars_
# Builds a TPUReplicatedInput node for the variable, if one does not already
# exist. The TPUReplicatedInput node must belong to the enclosing
# control-flow scope of the TPUReplicateContext.
# TODO(phawkins): consider changing the contract of the TPU encapsulation
# so the TPUReplicatedInput nodes go inside the TPUReplicateContext scope
# instead.
_, graph = _enclosing_tpu_context_and_graph()
with graph.as_default():
# pylint: disable=protected-access
saved_context = graph._get_control_flow_context()
graph._set_control_flow_context(self.outer_context)
handle = tpu_ops.tpu_replicated_input([v.handle for v in replicated_vars],
name=name + "/handle",
is_mirrored_variable=True)
graph._set_control_flow_context(saved_context)
# pylint: enable=protected-access
self._replicated_vars[name] = handle
return handle
def report_unsupported_operations(self):
if self._unsupported_ops:
op_str = "\n".join([" %s (%s)" % (op.type, op.name)
for op in self._unsupported_ops[:_MAX_WARNING_LINES]])
logging.warning("%d unsupported operations found: \n%s",
len(self._unsupported_ops), op_str)
if len(self._unsupported_ops) > _MAX_WARNING_LINES:
logging.warning("... and %d more" %
(len(self._unsupported_ops) - _MAX_WARNING_LINES))
def EnterGradientColocation(self, op, gradient_uid):
if op is not None:
self._gradient_colocation_stack.append(op)
if not self._outside_compilation_cluster:
try:
outside_attr = op.get_attr(_OUTSIDE_COMPILATION_ATTR)
if self._in_gradient_colocation:
raise NotImplementedError(
"Cannot nest gradient colocation operations outside compilation"
)
if gradient_uid == "__unsupported__":
raise NotImplementedError(
"No gradient_uid calling gradient within outside_compilation")
# When we take the gradient of an op X in an outside_compilation
# cluster C in a forward computation we would like to put the ops
# corresponding to the gradient of X into a new outside_compilation
# cluster C'. However, if we take the gradient of X twice, the second
# one should get yet another new outside_compilation cluster C''.
#
# The mechanism we adopt is to use a 'root_cluster' which is the
# cluster that X was in before we took gradients, and a 'gradient_uid'
# which is different for every invocation of gradients, and put the
# gradient of X in cluster 'root_cluster.gradient_uid'.
#
# When taking a gradient of a gradient, some ops will be colocated
# with Op in the forward pass (e.g., cluster root_cluster) and some in
# the backward pass (e.g., cluster root_cluster.initial_gradient_uid).
# We need all of the grad-of-grad ops to be in the same cluster to
# avoid cyclic dependencies between clusters. We adopt a heuristic
# that puts any op clustered with root_cluster.<xxx> in
# root_cluster.gradient_uid, even if xxx was initial_gradient_uid.
self._in_gradient_colocation = op
parts = outside_attr.split(".")
cluster = parts[0] + "." + gradient_uid
self._EnterOutsideCompilationScope(cluster=cluster)
except ValueError:
# The attr was not present: do nothing.
pass
def ExitGradientColocation(self, op, gradient_uid):
if op is not None:
if not self._gradient_colocation_stack:
raise errors.InternalError(
op.node_def, op,
"Badly nested gradient colocation: empty stack when popping Op " +
op.name)
last_op = self._gradient_colocation_stack.pop()
if op is last_op:
if op is self._in_gradient_colocation:
self._in_gradient_colocation = None
self._ExitOutsideCompilationScope()
else:
raise errors.InternalError(
op.node_def, op, "Badly nested gradient colocation, expected " +
last_op + ", got " + op.name)
def _EnterOutsideCompilationScope(self, cluster=None):
class FakeOp(object):
"""A helper class to determine the current device.
Supports only the type and device set/get methods needed to run the
graph's _apply_device_function method.
"""
def __init__(self):
self._device = ""
@property
def type(self):
return "FakeOp"
@property
def device(self):
return self._device
def _set_device(self, device):
if isinstance(device, pydev.DeviceSpec):
self._device = device.to_string()
else:
self._device = device
def _set_device_from_string(self, device_str):
self._device = device_str
if self._outside_compilation_cluster:
raise NotImplementedError("Cannot nest outside_compilation clusters")
if cluster:
self._outside_compilation_cluster = cluster
else:
self._outside_compilation_cluster = str(self._outside_compilation_counter)
self._outside_compilation_counter += 1
graph = ops.get_default_graph()
fake_op = FakeOp()
graph._apply_device_functions(fake_op) # pylint: disable=protected-access
device = pydev.DeviceSpec.from_string(fake_op.device)
if (device.device_type == "TPU_REPLICATED_CORE" and
device.device_index is not None):
self._host_compute_core.append(self._outside_compilation_cluster + ":" +
str(device.device_index))
self._oc_dev_fn_stack = graph._device_function_stack # pylint: disable=protected-access
graph._device_function_stack = self._outer_device_function_stack # pylint: disable=protected-access
def _ExitOutsideCompilationScope(self):
if not self._outside_compilation_cluster:
raise NotImplementedError(
"Attempted to exit outside_compilation scope when not in scope")
self._outside_compilation_cluster = None
graph = ops.get_default_graph()
graph._device_function_stack = self._oc_dev_fn_stack # pylint: disable=protected-access
def Enter(self):
if not self._outer_device_function_stack:
# Capture the device function stack at the time of first entry
# since that is the stack that will be used outside_compilation.
graph = ops.get_default_graph()
# pylint: disable=protected-access
self._outer_device_function_stack = graph._device_function_stack.copy()
# pylint: enable=protected-access
super(TPUReplicateContext, self).Enter()
def HostComputeCore(self):
return self._host_compute_core
def _RemoveExternalControlEdges(self, op):
"""Remove any external control dependency on this op."""
internal_control_inputs = []
external_control_inputs = []
for x in op.control_inputs:
# pylint: disable=protected-access
is_internal_op = False
ctxt = x._get_control_flow_context()
while ctxt is not None:
if ctxt == self:
is_internal_op = True
break
ctxt = ctxt._outer_context
if is_internal_op:
internal_control_inputs.append(x)
else:
external_control_inputs.append(x)
# pylint: enable=protected-access
# pylint: disable=protected-access
op._remove_all_control_inputs()
op._add_control_inputs(internal_control_inputs)
# pylint: enable=protected-access
return internal_control_inputs, external_control_inputs
def AddOp(self, op):
# pylint: disable=protected-access
if op.type in _BLACKLISTED_OPS:
logging.error("Operation of type %s (%s) is not supported on the TPU. "
"Execution will fail if this op is used in the graph. " %
(op.type, op.name))
if op.type in _UNSUPPORTED_OPS:
self._unsupported_ops.append(op)
if any(x.dtype._is_ref_dtype for x in op.inputs):
raise NotImplementedError(
"Non-resource Variables are not supported inside TPU computations "
"(operator name: %s)" % op.name)
if _TPU_REPLICATE_ATTR in op.node_def.attr:
raise ValueError("TPU computations cannot be nested")
op._set_attr_with_buf(
_TPU_REPLICATE_ATTR, self._tpu_relicate_attr_buf._buffer)
if self._outside_compilation_cluster:
op._set_attr(
_OUTSIDE_COMPILATION_ATTR,
attr_value_pb2.AttrValue(
s=compat.as_bytes(self._outside_compilation_cluster)))
if self._num_replicas > 1 or not self._outside_compilation_cluster:
# Prevent feeding or fetching anything that is being compiled,
# and any replicated outside_compilation Op.
op.graph.prevent_feeding(op)
op.graph.prevent_fetching(op)
# Remove any control edges from outer control flow contexts. These may cause
# mismatched frame errors.
(internal_control_inputs,
external_control_inputs) = self._RemoveExternalControlEdges(op)
if not op.inputs:
# Add a control edge from the control pivot to this op.
if not internal_control_inputs:
# pylint: disable=protected-access
op._add_control_input(self.GetControlPivot())
# pylint: enable=protected-access
else:
for index in xrange(len(op.inputs)):
x = op.inputs[index]
real_x = self.AddValue(x)
if real_x is not x:
op._update_input(index, real_x) # pylint: disable=protected-access
if external_control_inputs:
# Use an identity to pull control inputs as data inputs. Note that we
# ignore ops which don't have outputs. TODO(phawkins): fix that.
with ops.control_dependencies(None):
self.Enter()
external_control_inputs = [
array_ops.identity(x.outputs[0]).op
for x in external_control_inputs
if x.outputs
]
self.Exit()
# pylint: disable=protected-access
op._add_control_inputs(external_control_inputs)
# pylint: enable=protected-access
# Mark op's outputs as seen by this context and any outer contexts.
output_names = [x.name for x in op.outputs]
context = self
while context is not None:
# pylint: disable=protected-access
context._values.update(output_names)
context = context._outer_context
# pylint: enable=protected-access
if self._outer_context:
self._outer_context.AddInnerOp(op)
def AddValue(self, val):
"""Add `val` to the current context and its outer context recursively."""
if val.name in self._values:
# Use the real value if it comes from outer context.
result = self._external_values.get(val.name)
return val if result is None else result
result = val
self._values.add(val.name)
if self._outer_context:
result = self._outer_context.AddValue(val)
self._values.add(result.name)
self._external_values[val.name] = result
return result
def AddInnerOp(self, op):
self.AddOp(op)
if self._outer_context:
self._outer_context.AddInnerOp(op)
@property
def grad_state(self):
# Define the gradient loop state associated with the TPUReplicateContext to
# be None as the TPUReplicateContext does not get nested nor does the
# grad_state outside the TPUReplicateContext affect the graph inside so the
# grad_state should be as if this is the top-level gradient state.
return None
@property
def back_prop(self):
"""Forwards to the enclosing while context, if any."""
if self.GetWhileContext():
return self.GetWhileContext().back_prop
return False
def GetControlPivot(self):
return self._pivot
@tf_export(v1=["tpu.outside_compilation"])
def outside_compilation(computation, *args, **kwargs):
"""Builds part of a computation outside any current TPU replicate scope.
Args:
computation: A Python function that builds the computation to
place on the host.
*args: the positional arguments for the computation.
**kwargs: the keyword arguments for the computation.
Returns:
The Tensors returned by computation.
"""
args = [] if args is None else args
graph = ops.get_default_graph()
# If we are in a TPUReplicateContext, signal that we are now
# outside_compilation
initial_context = graph._get_control_flow_context() # pylint: disable=protected-access
context = initial_context
while context:
if isinstance(context, TPUReplicateContext):
context._EnterOutsideCompilationScope() # pylint: disable=protected-access
context = context.outer_context
retval = computation(*args, **kwargs)
# If we are in a TPUReplicateContext, signal that we are no longer
# outside_compilation
final_context = graph._get_control_flow_context() # pylint: disable=protected-access
if initial_context is not final_context:
raise NotImplementedError(
"Control-flow context cannot be different at start and end of an "
"outside_compilation scope")
context = initial_context
while context:
if isinstance(context, TPUReplicateContext):
context._ExitOutsideCompilationScope() # pylint: disable=protected-access
context = context.outer_context
return retval
@tf_export(v1=["tpu.replicate"])
def replicate(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None,
maximum_shapes=None):
"""Builds a graph operator that runs a replicated TPU computation.
Args:
computation: A Python function that builds the computation to replicate.
inputs: A list of lists of input tensors or `None` (equivalent to
`[[]]`), indexed by `[replica_num][input_num]`. All replicas must
have the same number of inputs. Each input can be a nested structure
containing values that are convertible to tensors. Note that passing an
N-dimension list of compatible values will result in a N-dimension list of
scalar tensors rather than a single Rank-N tensors. If you need different
behavior, convert part of inputs to tensors with `tf.convert_to_tensor`.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to computation.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each replica of the computation uses
only one core, and there is either only one replica, or the number of
replicas is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
maximum_shapes: A nested structure of tf.TensorShape representing the shape
to which the respective component of each input element in each replica
should be padded. Any unknown dimensions (e.g.
tf.compat.v1.Dimension(None) in a tf.TensorShape or -1 in a tensor-like
object) will be padded to the maximum size of that dimension over all
replicas. The structure of `maximum_shapes` needs to be the same as
`inputs[0]`.
Returns:
A list of outputs, indexed by `[replica_num]` each output can be a nested
structure same as what computation() returns with a few exceptions.
Exceptions include:
1) None output: a NoOp would be returned which control-depends on
computation.
2) Single value output: A tuple containing the value would be returned.
3) Operation-only outputs: a NoOp would be returned which
control-depends on computation.
TODO(b/121383831): Investigate into removing these special cases.
Raises:
ValueError: If all replicas do not have equal numbers of input tensors.
ValueError: If the number of inputs per replica does not match
the number of formal parameters to `computation`.
ValueError: If the static `inputs` dimensions don't match with the values
given in `maximum_shapes`.
ValueError: If the structure of inputs per replica does not match
the structure of `maximum_shapes`.
"""
return split_compile_and_replicate(
computation,
inputs,
infeed_queue,
device_assignment,
name,
maximum_shapes=maximum_shapes)[1]
def _pad_all_input(inputs, padded_shapes):
"""Pad all input tensors given padded_shapes.
The real shape tensors will be concatenated with the padded original inputs.
Args:
inputs: The original inputs.
padded_shapes: A list of padded shapes for each input.
Returns:
The padded inputs and a PaddingMap list which maps the padded input
dimension to the real shape argument index.
"""
# maximum_static_shapes[idx][i] indicates the maximum static size of ith
# dimension of the idx input among all the replicas.
maximum_static_shapes = []
# need_padding[idx][i] indicates whether the ith dimension of the idx input
# needs padding.
need_padding = []
input_shape_tensors = []
for core_idx, inputs_per_core in enumerate(inputs):
for idx, input_tensor in enumerate(inputs_per_core):
input_shape = input_tensor.get_shape().as_list()
if core_idx == 0:
input_shape_tensors.append([])
maximum_static_shapes.append(input_shape)
need_padding.append(np.full_like(input_shape, False, dtype=bool))
else:
for i, s in enumerate(input_shape):
if not s or s != maximum_static_shapes[idx][i]:
need_padding[idx][i] = True
maximum_static_shapes[idx] = max(input_shape,
maximum_static_shapes[idx])
# Append _POST_DEVICE_REWRITE_ATTR attributes to the real shape ops.
real_input_shape = array_ops.shape(input_tensor)
real_input_shape.op._set_attr( # pylint: disable=protected-access
_POST_DEVICE_REWRITE_ATTR,
attr_value_pb2.AttrValue(b=True))
input_shape_tensors[idx].append(real_input_shape)
maximum_shapes = []
for shapes_per_input in input_shape_tensors:
maximum_shapes.append(
math_ops.reduce_max(array_ops.stack(shapes_per_input), axis=0))
padded_inputs = []
real_shapes = []
padding_maps = []
for core_idx, inputs_per_core in enumerate(inputs):
padded_inputs.append([])
real_shapes.append([])
real_shape_idx = len(inputs_per_core) - 1
for idx, input_tensor in enumerate(inputs_per_core):
input_shape_tensor = input_shape_tensors[idx][core_idx]
input_shape = input_tensor.get_shape().as_list()
padded_shape = padded_shapes[idx]
if any(need_padding[idx]):
for i, s in enumerate(input_shape):
if need_padding[idx][i]:
if core_idx == 0:
real_shape_idx += 1
padding_map = dynamic_padding.PaddingMap()
padding_map.arg_index = idx
padding_map.shape_index = i
padding_map.padding_arg_index = real_shape_idx
padding_maps.append(padding_map)
real_shapes[core_idx].append(
math_ops.cast(input_shape_tensor[i], dtypes.int32))
paddings = []
for i, s in enumerate(padded_shape.dims):
if need_padding[idx][i]:
if s.value:
# Pad to the given maximum value.
padding = [0, s.value - input_shape_tensor[i]]
else:
# If maximum value is not given, then pad to the maximum dimension
# among all the cores.
padding = [0, maximum_shapes[idx][i] - input_shape_tensor[i]]
else:
padding = [0, 0]
paddings.append(padding)
if input_tensor.get_shape().is_fully_defined():
# TODO(rxsang): This is a hack to make sure padded_input has dynamic
# shapes, so any tf.size/tf.shape op performed on it won't be constant
# folded. Do we have better ways to do it?
padded_input = control_flow_ops.cond(
array_ops.constant(True),
lambda: array_ops.pad(input_tensor, paddings), # pylint: disable=cell-var-from-loop
lambda: input_tensor)
else:
padded_input = array_ops.pad(input_tensor, paddings)
# Append _POST_DEVICE_REWRITE_ATTR attributes to all padded inputs.
padded_input.op._set_attr( # pylint: disable=protected-access
_POST_DEVICE_REWRITE_ATTR,
attr_value_pb2.AttrValue(b=True))
padded_inputs[core_idx].append(padded_input)
else:
padded_inputs[core_idx].append(input_tensor)
num_replicas = len(padded_inputs)
for i in range(num_replicas):
padded_inputs[i].extend(real_shapes[i])
return padded_inputs, padding_maps
def split_compile_and_replicate(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None,
use_tpu=True,
maximum_shapes=None):
"""Builds graph operators that runs compilation and replicated computation.
This is a lower level interface than replicate that returns a separate compile
and execute output tensor. In the generated graph the compile op feeds into
the execute op and no additional compilation is incurred when running the
compile op before the execute op. The compile op returns additional
information about the compilation but does not return the compiled program.
Args:
computation: A Python function that builds the computation to replicate.
inputs: A list of lists of input tensors or `None` (equivalent to
`[[]]`), indexed by `[replica_num][input_num]`. All replicas must
have the same number of inputs. Each input can be a nested structure
containing values that are convertible to tensors. Note that passing an
N-dimension list of compatible values will result in a N-dimension list of
scalar tensors rather than a single Rank-N tensors. If you need different
behavior, convert part of inputs to tensors with `tf.convert_to_tensor`.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to computation.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each replica of the computation uses
only one core, and there is either only one replica, or the number of
replicas is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
use_tpu: When false, the input `computation` is executed on the XLA CPU/GPU
backends. Currently, only supports a default placement (computation is
placed on GPU if one is available, and on CPU if not).
maximum_shapes: A nested structure of tf.TensorShape representing the shape
to which the respective component of each input element in each replica
should be padded. Any unknown dimensions (e.g.
tf.compat.v1.Dimension(None) in a tf.TensorShape or -1 in a tensor-like
object) will be padded to the maximum size of that dimension over all
replicas. The structure of `maximum_shapes` needs to be the same as
`inputs[0]`.
Returns:
A list of lists with the first list corresponding to the compile op and the
second a list of output tensors, indexed by `[replica_num][output_num]`.
Raises:
ValueError: If all replicas do not have equal numbers of input tensors.
ValueError: If the number of inputs per replica does not match
the number of formal parameters to `computation`.
ValueError: If the static `inputs` dimensions don't match with the values
given in `maximum_shapes`.
ValueError: If the structure of inputs per replica does not match
the structure of `maximum_shapes`.
"""
del name
inputs = [[]] if inputs is None else inputs
metadata_kwargs = {}
if device_assignment is not None:
# Turn the Numpy array into a flattened list so we can pass it as an
# operator attribute.
metadata_kwargs = {
"topology":
device_assignment.topology.serialized(),
"device_assignment":
device_assignment.core_assignment.flatten().tolist()
}
# TODO(phawkins): remove this case after the forward compatibility window
# expires on 2018-10-5.
if api_compat.forward_compatible(2018, 10, 5):
metadata_kwargs["num_cores_per_replica"] = (
device_assignment.num_cores_per_replica)
else:
metadata_kwargs["computation_shape"] = [
device_assignment.num_cores_per_replica
]
# This entry is used for enabling automatic outside compilation.
metadata_kwargs["allow_soft_placement"] = config.get_soft_device_placement()
if ((not isinstance(inputs, list)) or
any(not isinstance(inp, (list, tuple)) for inp in inputs)):
raise TypeError("tpu.replicate() inputs must be a list of lists/tuples")
num_replicas = len(inputs)
# No replicas? Nothing to do.
if num_replicas == 0:
return []
# Checks all replicas have the same structure.
for i in xrange(1, num_replicas):
nest.assert_same_structure(inputs[0], inputs[i])
# Flatten inputs.
flat_inputs = [
nest.flatten(per_replica_input) for per_replica_input in inputs
]
# Converts inputs to Tensors.
flat_inputs = [[ops.convert_to_tensor(x) for x in inp] for inp in flat_inputs]
# Verifies that all replicas have matching numbers and types of inputs
flat_input_types = [x.dtype for x in flat_inputs[0]]
input_arity = len(inputs[0])
flat_input_arity = len(flat_input_types)
for i in range(num_replicas):
if len(inputs[i]) != input_arity:
raise ValueError("Replicas must have the same number of inputs. "
"Replica 0 had {} inputs, replica {} had {} "
"inputs.".format(input_arity, i, len(inputs[i])))
types = [x.dtype for x in flat_inputs[i]]
if types != flat_input_types:
raise ValueError("Replicas must have matching input types. Replica 0 had "
"input types {}, replica {} had input types {}".format(
flat_input_types, i, types))
arg_error = xla.check_function_argument_count(
computation, input_arity, infeed_queue)
if arg_error is not None:
if infeed_queue is None:
raise TypeError(
"Supplied computation cannot be called with the specified inputs. "
"You specified %d inputs: %s, but the computation needs %s" % (
input_arity, str([i.name for i in inputs[0]]), arg_error))
else:
raise TypeError(
"Supplied computation cannot be called with the specified inputs. "
"You specified %d inputs: %s and %d additional inputs from infeed,"
" but the computation needs %s" % (input_arity, str(
[i.name
for i in inputs[0]]), infeed_queue.number_of_tuple_elements,
arg_error))
if maximum_shapes:
if infeed_queue:
raise ValueError(
"Dynamic input shapes are not supported with infeed queues")
# Make sure maximum_shapes has the same structure as inputs.
nest.assert_same_structure(inputs[0], maximum_shapes, check_types=False)
# Flatten padded shapes.
flat_maximum_shapes = nest.flatten(maximum_shapes)
flat_maximum_shapes = [
tensor_shape.TensorShape(s) for s in flat_maximum_shapes
]
flat_inputs, padding_maps = _pad_all_input(flat_inputs, flat_maximum_shapes)
serialized_padding_maps = []
for padding_map in padding_maps:
serialized_padding_maps.append(padding_map.SerializeToString())
metadata_kwargs["padding_map"] = serialized_padding_maps
metadata_kwargs["step_marker_location"] = getattr(
computation, "step_marker_location", "STEP_MARK_AT_ENTRY")
graph = ops.get_default_graph()
# Fan-in: Builds a TPUReplicatedInput node for each input.
flat_replicated_inputs = []
for i in range(0, len(flat_inputs[0])):
replicas = [flat_inputs[replica][i] for replica in xrange(num_replicas)]
if api_compat.forward_compatible(2019, 9, 19):
flat_replicated_inputs.append(
tpu_ops.tpu_replicated_input(replicas, name="input{}".format(i),
index=i))
else:
flat_replicated_inputs.append(
tpu_ops.tpu_replicated_input(replicas, name="input{}".format(i)))
if isinstance(graph, func_graph.FuncGraph):
# When we are in Tensorflow 2.0 function, 'graph' will be a FuncGraph
# object. If both outside graph and this function have a TPU cluster,
# they will have the same cluster name and it will cause problems (because
# we lower functional ops in Tensorflow 2.0). Append function name to
# 'cluster_name' to avoid cluster name collision.
cluster_name = graph.unique_name("cluster_" + graph.name)
else:
cluster_name = graph.unique_name("cluster")
pivot = control_flow_ops.no_op(name=cluster_name + "/pivot")
context = TPUReplicateContext(
name=cluster_name, num_replicas=num_replicas, pivot=pivot)
try:
context.Enter()
metadata = tpu_ops.tpu_replicate_metadata(
num_replicas=num_replicas, use_tpu=use_tpu, **metadata_kwargs)
with tpu_function.tpu_shard_context(
num_replicas), ops.control_dependencies([metadata]):
# Add identity ops so even unused inputs are "consumed" by the
# computation. This is to avoid orphaned TPUReplicatedInput nodes.
# TODO(phawkins): consider instead pruning unused TPUReplicatedInput
# and eliding trivial TPUReplicatedInput/TPUReplicatedOutput pairs.
flat_replicated_inputs = [
array_ops.identity(x, name="replicated_input_{}".format(i))
for i, x in enumerate(flat_replicated_inputs)
]
for i in flat_replicated_inputs:
# pylint: disable=protected-access
# Add an attribute to the identity node so that they could be removed in
# encapsulate TPU computation pass if unused. However we don't remove
# inputs when dynamic padding is enabled.
# TODO(rxsang): Use other ways except argument index in padding_map so
# outside compilation can work with dynamic padding correctly.
if maximum_shapes is None:
i.op._set_attr("_tpu_input_identity",
attr_value_pb2.AttrValue(b=True))
# pylint: enable=protected-access
# Unflatten the computation inputs to match original input structure.
computation_inputs = nest.pack_sequence_as(
structure=inputs[0],
flat_sequence=flat_replicated_inputs[:flat_input_arity])
# If there is an infeed queue, adds the dequeued values to the
# computation's inputs.
if infeed_queue is not None:
infeed_queue.set_number_of_shards(num_replicas)
for t in infeed_queue.generate_dequeue_op():
computation_inputs.append(t)
# Only resource variables work inside a TPU computation, so turn on
# resource variables for the computation.
# TODO(phawkins): consider removing this code. It will
# be less confusing to clients if they knowingly choose to use resource
# variables.
# Partitioned variables is not supported (b/112311320).
vscope = variable_scope.get_variable_scope()
saved_use_resource = vscope.use_resource
saved_custom_getter = vscope.custom_getter
def custom_getter(getter, name, *args, **kwargs):
"""Variables on TPU have a few restrictions."""
partitioner = kwargs["partitioner"]
if partitioner is not None:
kwargs["partitioner"] = None
logging.warning(
"Partitioned variables are not supported on TPU. Got "
"`partitioner` that is {} for variable {}. "
"Setting `partitioner` to `None`."
.format(partitioner, name))
if saved_custom_getter is None:
return getter(name, *args, **kwargs)
else:
return saved_custom_getter(getter, name, *args, **kwargs)
vscope.set_use_resource(True)
vscope.set_custom_getter(custom_getter)
outputs = computation(*computation_inputs)
vscope.set_use_resource(saved_use_resource)
vscope.set_custom_getter(saved_custom_getter)
outputs_is_flat = xla.is_flat(outputs)
if outputs_is_flat:
output_tensors, control_deps = _postprocess_flat_outputs(outputs)
else:
output_tensors, control_deps = _postprocess_non_flat_outputs(outputs)
# tensor_tracer imports tpu.py. Local import to tensor_tracer to avoid
# import-cycle
# pylint: disable=g-import-not-at-top
from tensorflow.python.tpu import tensor_tracer
# pylint: enable=g-import-not-at-top
if tensor_tracer.TensorTracer.is_enabled():
tt = tensor_tracer.TensorTracer()
output_tensors = tt.trace_tpu(ops.get_default_graph(),
output_tensors, control_deps,
num_replicas)
context.ExitResult(output_tensors)
finally:
context.report_unsupported_operations()
context.Exit()
host_compute_core = context.HostComputeCore()
if host_compute_core:
attr_value = attr_value_pb2.AttrValue()
attr_value.list.s.extend([compat.as_bytes(x) for x in host_compute_core])
metadata._set_attr("host_compute_core", attr_value) # pylint: disable=protected-access
with ops.control_dependencies([metadata]):
if use_tpu:
compile_status = tpu_ops.tpu_compilation_result()
op = compile_status.op
attr_value = attr_value_pb2.AttrValue(s=compat.as_bytes(cluster_name))
op._set_attr(_TPU_COMPILATION_STATUS_ATTR, attr_value) # pylint: disable=protected-access
else:
compile_status = control_flow_ops.no_op(name="compilation_status")
if not output_tensors:
# Returns a list of NoOps dependent on the replication Op, indexed by
# [replica_num].
return [
compile_status,
[
control_flow_ops.group(control_deps, name="shard_%d" % i)
for i in range(num_replicas)
]
]
# Fan-out: Builds a TPUReplicatedOutput node for each output.
replicated_outputs = [[] for i in xrange(num_replicas)]
for i, t in enumerate(output_tensors):
# Fan-out: Builds a TPUReplicatedOutput node for each output.
ys = tpu_ops.tpu_replicated_output(
t, num_replicas, name="output{}".format(i))
# Wraps the outputs in identity operators so the names of any possible
# `fetch` nodes are preserved by the replication rewrite.
with ops.control_dependencies(control_deps):
for replica in xrange(num_replicas):
replicated_outputs[replica].append(
array_ops.identity(
ys[replica], name="output_%d_shard_%d" % (i, replica)))
if not outputs_is_flat:
replicated_outputs = [
nest.pack_sequence_as(outputs, replica_outs)
for replica_outs in replicated_outputs
]
return [compile_status, replicated_outputs]
def _postprocess_flat_outputs(outputs):
"""Validates non-flat outputs, add backs device assignments and other attrs.
Args:
outputs: Output from `computation` inside `tpu.rewrite`.
Returns:
Tensors and Operations extracted from outputs.
"""
# Following code segment is to preserve legacy behavior. Previously we only
# supported flat outputs and thus for consistency it was nice to convert even
# single element into a tuple. But now that we support arbitrary output
# structure, this is no longer necessary.
# TODO(b/121383831): Migrate all legacy use cases and delete this special
# case.
# If the computation returns `None`, make it an empty tuple.
if outputs is None:
outputs = tuple()
# If the computation only returned one value, makes it a tuple.
if not isinstance(outputs, collections_abc.Sequence):
outputs = (outputs,)
# Append `no_op` here so that fetching any return value of this function
# will trigger TPUExecute node.
outputs += (control_flow_ops.no_op(),)
try:
with ops.device(core(0)):
outputs = [
o if isinstance(o, ops.Operation) else ops.convert_to_tensor(o)
for o in outputs
]
except Exception as e:
raise ValueError(
"TPU function return values must all either be Operations or "
"convertible to Tensors. Got '%s'" % str(e))
# Separates the returned Operations and Tensors.
output_operations = [o for o in outputs if isinstance(o, ops.Operation)]
output_tensors = [o for o in outputs if not isinstance(o, ops.Operation)]
if outputs != output_tensors + output_operations:
raise ValueError(
"TPU functions must return zero-or more Tensor values followed by "
"zero or more Operations.")
# Wraps outputs in Identity ops. Otherwise a replicated input copied
# straight to an output would bypass the replicate(). This would be bad
# because the TPUReplicatedInput/TPUReplicatedOutput operator would not
# be rewritten away, leading to a runtime error.
# TODO(phawkins): extend the rewrite to elide these nodes instead.
new_output_tensors = []
for t in output_tensors:
with ops.device(t.device if t.device else core(0)):
o = array_ops.identity(t)
# pylint: disable=protected-access
o.op._set_attr("_tpu_output_identity", attr_value_pb2.AttrValue(b=True))
# pylint: enable=protected-access
new_output_tensors.append(o)
return new_output_tensors, output_operations
def _postprocess_non_flat_outputs(outputs):
"""Validates non-flat outputs, add backs device assignments and other attrs.
Args:
outputs: Output from `computation` inside `tpu.rewrite`.
Returns:
Tensors extracted from outputs and an empty list because Operations are not
allowed in non-flat outputs..
"""
# Flatten output items.
flat_outputs = nest.flatten(outputs)
# Convert all non-Operation outputs to Tensors.
for i, o in enumerate(flat_outputs):
if isinstance(o, ops.Operation):
raise ValueError(
"tpu.rewrite does not support Operation as return value in non-flat "
"output structure. You can set returned Operations as control "
"dependencies of returned Tensors so Operations are triggered when "
'Tensors are evaluated. Operation found: "%s"' % o.name)
try:
o = ops.convert_to_tensor(o)
except Exception as e:
raise ValueError(
"TPU function return values must all either be Operations or "
'convertible to Tensors. Got error: "%s"' % str(e))
# Wraps outputs in Identity ops. Otherwise a replicated input copied
# straight to an output would bypass the replicate(). This would be bad
# because the TPUReplicatedInput/TPUReplicatedOutput operator would not
# be rewritten away, leading to a runtime error.
# TODO(phawkins): extend the rewrite to elide these nodes instead.
with ops.device(core(0)):
o = array_ops.identity(o)
# pylint: disable=protected-access
o.op._set_attr("_tpu_output_identity", attr_value_pb2.AttrValue(b=True))
# pylint: enable=protected-access
flat_outputs[i] = array_ops.identity(o)
# All flat_outputs are Tensors, and no Operations.
return flat_outputs, []
def split_compile_and_shard(computation,
inputs=None,
num_shards=1,
input_shard_axes=None,
outputs_from_all_shards=True,
output_shard_axes=None,
infeed_queue=None,
device_assignment=None,
name=None):
"""Shards `computation` for parallel execution.
`inputs` must be a list of Tensors or None (equivalent to an empty list), each
of which has a corresponding split axis (from `input_shard_axes`). Each input
is split into `num_shards` pieces along the corresponding axis, and
computation is applied to each shard in parallel.
Tensors are broadcast to all shards if they are lexically captured by
`computation`. e.g.,
x = tf.constant(7)
def computation():
return x + 3
... = shard(computation, ...)
If `outputs_from_all_shards` is true, the outputs from all shards of
`computation` are concatenated back together along their `output_shards_axes`.
Otherwise, each output is taken from an arbitrary shard.
Inputs and outputs of the computation must be at least rank-1 Tensors.
Args:
computation: A Python function that builds a computation to apply to each
shard of the input.
inputs: A list of input tensors or None (equivalent to an empty list). Each
input tensor has a corresponding shard axes, given by `input_shard_axes`,
which must have size divisible by `num_shards`.
num_shards: The number of shards.
input_shard_axes: A list of dimensions along which to shard `inputs`, or
`None`. `None` means "shard all inputs along dimension 0". If not `None`,
there must be one dimension per input.
outputs_from_all_shards: Boolean or list of boolean. For each output, if
`True`, outputs from all shards are concatenated along the corresponding
`output_shard_axes` entry. Otherwise, each output is taken
from an arbitrary shard. If the argument is a boolean, the argument's
value is used for each output.
output_shard_axes: A list of dimensions along which to concatenate the
outputs of `computation`, or `None`. `None` means "concatenate all outputs
along dimension 0". If not `None`, there must be one dimension per output.
Ignored if `outputs_from_all_shards` is False.
infeed_queue: If not `None`, the `InfeedQueue` to use to augment the inputs
of `computation`.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each shard of the computation uses
only one core, and there is either only one shard, or the number of shards
is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
Returns:
A tuple of (compile op, [output tensors]).
Raises:
ValueError: If num_shards <= 0
ValueError: If len(input_shard_axes) != len(inputs)
ValueError: If len(output_shard_axes) != len(outputs from `computation`)
"""
# TODO(phawkins): consider adding support for broadcasting Tensors passed as
# inputs.
if num_shards <= 0:
raise ValueError("num_shards must be a positive integer.")
inputs = [] if inputs is None else inputs
if not isinstance(inputs, list):
raise TypeError("tpu.shard()'s inputs must be a list of Tensors or None.")
# Converts inputs to Tensors.
inputs = [ops.convert_to_tensor(x) for x in inputs]
if input_shard_axes is None:
input_shard_axes = [0] * len(inputs)
if len(inputs) != len(input_shard_axes):
raise ValueError("Length of input_shard_axes must be equal to the number "
"of inputs.")
if inputs:
# Splits the `inputs` along the corresponding `input_shard_axes`, giving
# lists with layout [input][shard]
split_inputs = [
array_ops.split(x, num_shards, axis=axis)
for (axis, x) in zip(input_shard_axes, inputs)]
# Transposes the input lists to have layout [shard][input]
transposed_inputs = [list(i) for i in zip(*split_inputs)]
else:
transposed_inputs = [[]] * num_shards
compile_op, outputs = split_compile_and_replicate(
computation,
transposed_inputs,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)
# There must be at least one shard since num_shards > 0.
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
if isinstance(outputs[0], ops.Operation):
# pylint: enable=indexing-exception
# There were no outputs from the computation and replicate returned a list
# of NoOps with control dependencies on the computation. Return the first
# one so it can be used as a control dependency or fetch node.
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
return compile_op, [outputs[0]]
# pylint: enable=indexing-exception
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
num_outputs = len(outputs[0])
# pylint: enable=indexing-exception
if output_shard_axes is None:
output_shard_axes = [0] * num_outputs
if num_outputs != len(output_shard_axes):
raise ValueError("Length of output_shard_axes must be equal to the number "
"of outputs.")
if isinstance(outputs_from_all_shards, bool):
outputs_from_all_shards = [outputs_from_all_shards] * num_outputs
if num_outputs != len(outputs_from_all_shards):
raise ValueError("Length of outputs_from_all_shards must be equal to the "
"number of outputs.")
results = []
for (axis, all_shards, x) in zip(output_shard_axes, outputs_from_all_shards,
zip(*outputs)):
if all_shards:
# Concatenate all of the outputs together (use stack for scalars).
shape = x[0].shape
is_scalar = shape is not None and (shape.ndims == 0)
results.append((array_ops.stack(list(x)) if is_scalar
else array_ops.concat(list(x), axis=axis)))
else:
# TODO(phawkins): use a smarter policy, e.g., round-robin across shards.
results.append(x[0])
return compile_op, results
@tf_export(v1=["tpu.shard"])
def shard(computation,
inputs=None,
num_shards=1,
input_shard_axes=None,
outputs_from_all_shards=True,
output_shard_axes=None,
infeed_queue=None,
device_assignment=None,
name=None):
"""Shards `computation` for parallel execution.
`inputs` must be a list of Tensors or None (equivalent to an empty list), each
of which has a corresponding split axis (from `input_shard_axes`). Each input
is split into `num_shards` pieces along the corresponding axis, and
computation is applied to each shard in parallel.
Tensors are broadcast to all shards if they are lexically captured by
`computation`. e.g.,
x = tf.constant(7)
def computation():
return x + 3
... = shard(computation, ...)
TODO(phawkins): consider adding support for broadcasting Tensors passed
as inputs.
If `outputs_from_all_shards` is true, the outputs from all shards of
`computation` are concatenated back together along their `output_shards_axes`.
Otherwise, each output is taken from an arbitrary shard.
Inputs and outputs of the computation must be at least rank-1 Tensors.
Args:
computation: A Python function that builds a computation to apply to each
shard of the input.
inputs: A list of input tensors or None (equivalent to an empty list). Each
input tensor has a corresponding shard axes, given by `input_shard_axes`,
which must have size divisible by `num_shards`.
num_shards: The number of shards.
input_shard_axes: A list of dimensions along which to shard `inputs`, or
`None`. `None` means "shard all inputs along dimension 0". If not `None`,
there must be one dimension per input.
outputs_from_all_shards: Boolean or list of boolean. For each output, if
`True`, outputs from all shards are concatenated along the corresponding
`output_shard_axes` entry. Otherwise, each output is taken
from an arbitrary shard. If the argument is a boolean, the argument's
value is used for each output.
output_shard_axes: A list of dimensions along which to concatenate the
outputs of `computation`, or `None`. `None` means "concatenate all outputs
along dimension 0". If not `None`, there must be one dimension per output.
Ignored if `outputs_from_all_shards` is False.
infeed_queue: If not `None`, the `InfeedQueue` to use to augment the inputs
of `computation`.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each shard of the computation uses
only one core, and there is either only one shard, or the number of shards
is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
Returns:
A list of output tensors.
Raises:
ValueError: If num_shards <= 0
ValueError: If len(input_shard_axes) != len(inputs)
ValueError: If len(output_shard_axes) != len(outputs from `computation`)
"""
return split_compile_and_shard(
computation,
inputs=inputs,
num_shards=num_shards,
input_shard_axes=input_shard_axes,
outputs_from_all_shards=outputs_from_all_shards,
output_shard_axes=output_shard_axes,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)[1]
@tf_export(v1=["tpu.batch_parallel"])
def batch_parallel(computation,
inputs=None,
num_shards=1,
infeed_queue=None,
device_assignment=None,
name=None):
"""Shards `computation` along the batch dimension for parallel execution.
Convenience wrapper around shard().
`inputs` must be a list of Tensors or None (equivalent to an empty list).
Each input is split into `num_shards` pieces along the 0-th dimension, and
computation is applied to each shard in parallel.
Tensors are broadcast to all shards if they are lexically captured by
`computation`. e.g.,
x = tf.constant(7)
def computation():
return x + 3
... = shard(computation, ...)
The outputs from all shards are concatenated back together along their 0-th
dimension.
Inputs and outputs of the computation must be at least rank-1 Tensors.
Args:
computation: A Python function that builds a computation to apply to each
shard of the input.
inputs: A list of input tensors or None (equivalent to an empty list). The
0-th dimension of each Tensor must have size divisible by `num_shards`.
num_shards: The number of shards.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to `computation`.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each shard of the computation uses
only one core, and there is either only one shard, or the number of shards
is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
Returns:
A list of output tensors.
Raises:
ValueError: If `num_shards <= 0`
"""
return shard(
computation,
inputs,
num_shards=num_shards,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)
@tf_export(v1=["tpu.rewrite"])
def rewrite(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None):
"""Rewrites `computation` for execution on a TPU system.
Args:
computation: A Python function that builds a computation to apply to the
input. If the function takes n inputs, 'inputs' should be a list of n
tensors.
`computation` may return a list of operations and tensors. Tensors must
come before operations in the returned list. The return value of
`rewrite` is a list of tensors corresponding to the tensors from the
output of `computation`.
All `Operation`s constructed during `computation` will be executed when
evaluating any of the returned output tensors, not just the ones returned.
inputs: A list of input tensors or `None` (equivalent to an empty list).
Each input can be a nested structure containing values that are
convertible to tensors. Note that passing an N-dimension list of
compatible values will result in a N-dimention list of scalar tensors
rather than a single Rank-N tensors. If you need different behavior,
convert part of inputs to tensors with `tf.convert_to_tensor`.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to `computation`.
device_assignment: if not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. May be omitted for a single-core computation, in which
case the core attached to task 0, TPU device 0 is used.
name: (Deprecated) Does nothing.
Returns:
Same data structure as if computation(*inputs) is called directly with some
exceptions for correctness. Exceptions include:
1) None output: a NoOp would be returned which control-depends on
computation.
2) Single value output: A tuple containing the value would be returned.
3) Operation-only outputs: a NoOp would be returned which
control-depends on computation.
TODO(b/121383831): Investigate into removing these special cases.
"""
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
return replicate(
computation,
None if inputs is None else [inputs],
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)[0]
# pylint: enable=indexing-exception
# Operations that indicate some error in the user's inference graph.
_BLACKLISTED_INFERENCE_OPS = set([
"ReadVariableOp",
"AssignVariableOp",
"AssignAddVariableOp",
"AssignSubVariableOp",
"VarHandleOp",
"Variable",
"VariableV2",
])
def under_tpu_inference_context():
"""Check if it is currently under `_TPUInferenceContext`."""
graph = ops.get_default_graph()
while graph:
context = graph._get_control_flow_context() # pylint: disable=protected-access
while context:
if isinstance(context, _TPUInferenceContext):
return True
context = context.outer_context
if isinstance(graph, function._FuncGraph): # pylint: disable=protected-access
graph = graph._outer_graph # pylint: disable=protected-access
elif isinstance(graph, func_graph.FuncGraph):
graph = graph.outer_graph
else:
return False
class _TPUInferenceContext(control_flow_ops.XLAControlFlowContext):
"""A `ControlFlowContext` for nodes inside a TPU inference computation.
The primary role of `TPUReplicateContext` is to sanity check operators inside
a tpu.rewrite_for_inference() computation.
"""
def __init__(self, name):
super(_TPUInferenceContext, self).__init__()
self._name = name
def AddOp(self, op):
self._AddOpInternal(op)
def _AddOpInternal(self, op):
# pylint: disable=protected-access
if op.type in _BLACKLISTED_INFERENCE_OPS:
raise NotImplementedError(
"Operation of type %s (%s) is not supported on the TPU for inference."
" Execution will fail if this op is used in the graph. Make sure your"
" variables are using variable_scope." % (op.type, op.name))
if self._outer_context:
self._outer_context.AddInnerOp(op)
def AddValue(self, val):
result = val
if self._outer_context:
result = self._outer_context.AddValue(val)
return result
def AddInnerOp(self, op):
self._AddOpInternal(op)
@property
def grad_state(self):
return None
def validate_inference_rewrite_for_variables(graph):
"""Validates whether rewrite_for_inference() 'worked' for variables.
The rewrite_for_inference() method is supposed to append GuaranteeConstOps
after ReadVariableOps, but this mechanism works only if you are using
tf.compat.v1.get_variable() to create and access variables in your tpu
computation. This validation method can be called immediately after calling
tpu.rewrite_for_inference() to check whether GuaranteeConstOps where added
to the graph.
Typical usages:
tpu.validate_inference_rewrite_for_variables(
tf.compat.v1.get_default_graph())
tpu.validate_inference_rewrite_for_variables(sess.graph)
Args:
graph: The graph which needs to be validated.
Raises:
RuntimeError: if validation failed.
"""
if not any(x.type == "GuaranteeConst" for x in graph.get_operations()):
raise RuntimeError(
"No GuaranteeConst ops found in the graph after running "
"tpu.rewrite_for_inference(...). Please check that you are using "
"tf.get_variable() to create and access variables in your tpu "
"computation.")
def rewrite_for_inference(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None):
"""Rewrites `computation` for inference on a TPU system.
Other than 'rewriting' the computation to run on a TPU, if using variables
in your computation, it moves the ReadVariableOps outside the TPU
computation, and adds GuaranteeConst ops just after the ReadVariableOps.
This mechanism works only if you are using tf.compat.v1.get_variable() to
create and access variables in your tpu computation. You can validate
whether this worked, by calling validate_inference_rewrite_for_variables()
method immediately after this method to check whether GuaranteeConstOps
where added to the graph.
Args:
computation: A Python function that builds a computation to apply to the
input. If the function takes n inputs, 'inputs' should be a list of n
tensors. If the function returns m outputs, rewrite will return a list of
m tensors.
inputs: A list of input tensors or `None` (equivalent to an empty list).
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to `computation`.
device_assignment: if not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. May be omitted for a single-core computation, in which
case the core attached to task 0, TPU device 0 is used.
name: The name of the operator.
Returns:
A list of output tensors.
"""
def guarantee_const_getter(getter, name, *args, **kwargs):
with ops.control_dependencies(None):
return array_ops.guarantee_const(
getter(name, *args, **kwargs), name=name + "/GuaranteeConst")
def wrapped_computation(*args, **kwargs):
"""Execute computation under `_TPUInferenceContext`."""
context = _TPUInferenceContext(
name=ops.get_default_graph().unique_name("rewrite_for_inference"))
try:
context.Enter()
vscope = variable_scope.get_variable_scope()
prev_custom_getter = vscope.custom_getter
prev_caching_device = vscope.caching_device
vscope.set_custom_getter(guarantee_const_getter)
vscope.set_caching_device(lambda op: op.device)
result = computation(*args, **kwargs)
vscope.set_custom_getter(prev_custom_getter)
vscope.set_caching_device(prev_caching_device)
finally:
context.Exit()
return result
# pylint: disable=undefined-variable
return rewrite(
wrapped_computation,
inputs=inputs,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)
# pylint: enable=undefined-variable
def prune_unconnected_ops_from_xla(prune_graph):
"""Prunes unconnected ops as listed in _UNCONNECTED_OPS_TO_PRUNE.
Args:
prune_graph: A tensorflow graph from which we wish to prune unconnected ops
as listed in _UNCONNECTED_OPS_TO_PRUNE. In general, these ops should have
no inputs and no consumers. These can often be left behind due to graph
construction rewiring (for instance TF-Hub). While they never execute,
they will cause XLA compile to fail so we strip them from XLA compile by
removing the tpu_replicate attribute.
"""
# Scan over the top level graph and all function graphs.
for graph in [prune_graph] + [
f for f in prune_graph._functions.values() # pylint: disable=protected-access
]:
if not isinstance(graph, ops.Graph):
continue
for op in graph.get_operations():
if op.type not in _UNCONNECTED_OPS_TO_PRUNE:
continue
outputs_consumed = False
for output in op.outputs:
if output.consumers():
outputs_consumed = True
break
if not outputs_consumed:
logging.info(
"Pruning OP %s of type %s from XLA Compile due to "
"it being disconnected.", op.name, op.type)
op._clear_attr(_TPU_REPLICATE_ATTR) # pylint: disable=protected-access
| DavidNorman/tensorflow | tensorflow/python/tpu/tpu.py | Python | apache-2.0 | 72,964 |
#!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Retrieve web resources over http."""
import copy
import httparchive
import httplib
import logging
import os
import platformsettings
import re
import util
HTML_RE = re.compile(r'^.{,256}?<html.*?>', re.IGNORECASE | re.DOTALL)
HEAD_RE = re.compile(r'^.{,256}?<head.*?>', re.IGNORECASE | re.DOTALL)
TIMER = platformsettings.timer
class HttpClientException(Exception):
"""Base class for all exceptions in httpclient."""
pass
def GetInjectScript(scripts):
"""Loads |scripts| from disk and returns a string of their content."""
lines = []
if scripts:
for script in scripts.split(','):
if os.path.exists(script):
lines += open(script).read()
elif util.resource_exists(script):
lines += util.resource_string(script)
else:
raise HttpClientException('Script does not exist: %s', script)
def MinifyScript(script):
"""Remove C-style comments and line breaks from script.
Note: statements must be ';' terminated, and not depending on newline"""
# Regex adapted from http://ostermiller.org/findcomment.html.
MULTILINE_COMMENT_RE = re.compile(r'/\*.*?\*/', re.DOTALL | re.MULTILINE)
SINGLELINE_COMMENT_RE = re.compile(r'//.*', re.MULTILINE)
# Remove C-style comments from JS.
script = re.sub(MULTILINE_COMMENT_RE, '', script)
script = re.sub(SINGLELINE_COMMENT_RE, '', script)
# Remove line breaks.
script = script.translate(None, '\r\n')
return script
return MinifyScript(''.join(lines))
def _InjectScripts(response, inject_script):
"""Injects |inject_script| immediately after <head> or <html>.
Copies |response| if it is modified.
Args:
response: an ArchivedHttpResponse
inject_script: JavaScript string (e.g. "Math.random = function(){...}")
Returns:
an ArchivedHttpResponse
"""
if type(response) == tuple:
logging.warn('tuple response: %s', response)
content_type = response.get_header('content-type')
if content_type and content_type.startswith('text/html'):
text = response.get_data_as_text()
def InsertScriptAfter(matchobj):
return '%s<script>%s</script>' % (matchobj.group(0), inject_script)
if text and not inject_script in text:
text, is_injected = HEAD_RE.subn(InsertScriptAfter, text, 1)
if not is_injected:
text, is_injected = HTML_RE.subn(InsertScriptAfter, text, 1)
if not is_injected:
logging.warning('Failed to inject scripts.')
logging.debug('Response content: %s', text)
else:
response = copy.deepcopy(response)
response.set_data(text)
return response
class DetailedHTTPResponse(httplib.HTTPResponse):
"""Preserve details relevant to replaying responses.
WARNING: This code uses attributes and methods of HTTPResponse
that are not part of the public interface.
"""
def read_chunks(self):
"""Return the response body content and timing data.
The returned chunks have the chunk size and CRLFs stripped off.
If the response was compressed, the returned data is still compressed.
Returns:
(chunks, delays)
chunks:
[response_body] # non-chunked responses
[chunk_1, chunk_2, ...] # chunked responses
delays:
[0] # non-chunked responses
[chunk_1_first_byte_delay, ...] # chunked responses
The delay for the first body item should be recorded by the caller.
"""
buf = []
chunks = []
delays = []
if not self.chunked:
chunks.append(self.read())
delays.append(0)
else:
start = TIMER()
try:
while True:
line = self.fp.readline()
chunk_size = self._read_chunk_size(line)
if chunk_size is None:
raise httplib.IncompleteRead(''.join(chunks))
if chunk_size == 0:
break
delays.append(TIMER() - start)
chunks.append(self._safe_read(chunk_size))
self._safe_read(2) # skip the CRLF at the end of the chunk
start = TIMER()
# Ignore any trailers.
while True:
line = self.fp.readline()
if not line or line == '\r\n':
break
finally:
self.close()
return chunks, delays
@classmethod
def _read_chunk_size(cls, line):
chunk_extensions_pos = line.find(';')
if chunk_extensions_pos != -1:
line = line[:extention_pos] # strip chunk-extensions
try:
chunk_size = int(line, 16)
except ValueError:
return None
return chunk_size
class DetailedHTTPConnection(httplib.HTTPConnection):
"""Preserve details relevant to replaying connections."""
response_class = DetailedHTTPResponse
class DetailedHTTPSResponse(DetailedHTTPResponse):
"""Preserve details relevant to replaying SSL responses."""
pass
class DetailedHTTPSConnection(httplib.HTTPSConnection):
"""Preserve details relevant to replaying SSL connections."""
response_class = DetailedHTTPSResponse
class RealHttpFetch(object):
def __init__(self, real_dns_lookup, get_server_rtt):
"""Initialize RealHttpFetch.
Args:
real_dns_lookup: a function that resolves a host to an IP.
get_server_rtt: a function that returns the round-trip time of a host.
"""
self._real_dns_lookup = real_dns_lookup
self._get_server_rtt = get_server_rtt
@staticmethod
def _GetHeaderNameValue(header):
"""Parse the header line and return a name/value tuple.
Args:
header: a string for a header such as "Content-Length: 314".
Returns:
A tuple (header_name, header_value) on success or None if the header
is not in expected format. header_name is in lowercase.
"""
i = header.find(':')
if i > 0:
return (header[:i].lower(), header[i+1:].strip())
return None
@staticmethod
def _ToTuples(headers):
"""Parse headers and save them to a list of tuples.
This method takes HttpResponse.msg.headers as input and convert it
to a list of (header_name, header_value) tuples.
HttpResponse.msg.headers is a list of strings where each string
represents either a header or a continuation line of a header.
1. a normal header consists of two parts which are separated by colon :
"header_name:header_value..."
2. a continuation line is a string starting with whitespace
"[whitespace]continued_header_value..."
If a header is not in good shape or an unexpected continuation line is
seen, it will be ignored.
Should avoid using response.getheaders() directly
because response.getheaders() can't handle multiple headers
with the same name properly. Instead, parse the
response.msg.headers using this method to get all headers.
Args:
headers: an instance of HttpResponse.msg.headers.
Returns:
A list of tuples which looks like:
[(header_name, header_value), (header_name2, header_value2)...]
"""
all_headers = []
for line in headers:
if line[0] in '\t ':
if not all_headers:
logging.warning(
'Unexpected response header continuation line [%s]', line)
continue
name, value = all_headers.pop()
value += '\n ' + line.strip()
else:
name_value = RealHttpFetch._GetHeaderNameValue(line)
if not name_value:
logging.warning(
'Response header in wrong format [%s]', line)
continue
name, value = name_value
all_headers.append((name, value))
return all_headers
def __call__(self, request):
"""Fetch an HTTP request.
Args:
request: an ArchivedHttpRequest
Returns:
an ArchivedHttpResponse
"""
logging.debug('RealHttpFetch: %s %s', request.host, request.path)
if ':' in request.host:
parts = request.host.split(':')
truehost = parts[0]
trueport = int(parts[1])
else:
truehost = request.host
trueport = None
host_ip = self._real_dns_lookup(truehost)
if not host_ip:
logging.critical('Unable to find host ip for name: %s', truehost)
return None
retries = 3
while True:
try:
if request.is_ssl:
if trueport:
connection = DetailedHTTPSConnection(host_ip, trueport)
else:
connection = DetailedHTTPSConnection(host_ip)
else:
if trueport:
connection = DetailedHTTPConnection(host_ip, trueport)
else:
connection = DetailedHTTPConnection(host_ip)
start = TIMER()
connection.request(
request.command,
request.path,
request.request_body,
request.headers)
response = connection.getresponse()
headers_delay = int((TIMER() - start) * 1000)
headers_delay -= self._get_server_rtt(request.host)
chunks, chunk_delays = response.read_chunks()
delays = {
'headers': headers_delay,
'data': chunk_delays
}
archived_http_response = httparchive.ArchivedHttpResponse(
response.version,
response.status,
response.reason,
RealHttpFetch._ToTuples(response.msg.headers),
chunks,
delays)
return archived_http_response
except Exception, e:
if retries:
retries -= 1
logging.warning('Retrying fetch %s: %s', request, e)
continue
logging.critical('Could not fetch %s: %s', request, e)
return None
class RecordHttpArchiveFetch(object):
"""Make real HTTP fetches and save responses in the given HttpArchive."""
def __init__(self, http_archive, real_dns_lookup, inject_script,
cache_misses=None):
"""Initialize RecordHttpArchiveFetch.
Args:
http_archive: an instance of a HttpArchive
real_dns_lookup: a function that resolves a host to an IP.
inject_script: script string to inject in all pages
cache_misses: instance of CacheMissArchive
"""
self.http_archive = http_archive
self.real_http_fetch = RealHttpFetch(real_dns_lookup,
http_archive.get_server_rtt)
self.inject_script = inject_script
self.cache_misses = cache_misses
def __call__(self, request):
"""Fetch the request and return the response.
Args:
request: an ArchivedHttpRequest.
Returns:
an ArchivedHttpResponse
"""
if self.cache_misses:
self.cache_misses.record_request(
request, is_record_mode=True, is_cache_miss=False)
# If request is already in the archive, return the archived response.
if request in self.http_archive:
logging.debug('Repeated request found: %s', request)
response = self.http_archive[request]
else:
response = self.real_http_fetch(request)
if response is None:
return None
self.http_archive[request] = response
if self.inject_script:
response = _InjectScripts(response, self.inject_script)
logging.debug('Recorded: %s', request)
return response
class ReplayHttpArchiveFetch(object):
"""Serve responses from the given HttpArchive."""
def __init__(self, http_archive, inject_script,
use_diff_on_unknown_requests=False, cache_misses=None,
use_closest_match=False):
"""Initialize ReplayHttpArchiveFetch.
Args:
http_archive: an instance of a HttpArchive
inject_script: script string to inject in all pages
use_diff_on_unknown_requests: If True, log unknown requests
with a diff to requests that look similar.
cache_misses: Instance of CacheMissArchive.
Callback updates archive on cache misses
use_closest_match: If True, on replay mode, serve the closest match
in the archive instead of giving a 404.
"""
self.http_archive = http_archive
self.inject_script = inject_script
self.use_diff_on_unknown_requests = use_diff_on_unknown_requests
self.cache_misses = cache_misses
self.use_closest_match = use_closest_match
def __call__(self, request):
"""Fetch the request and return the response.
Args:
request: an instance of an ArchivedHttpRequest.
Returns:
Instance of ArchivedHttpResponse (if found) or None
"""
response = self.http_archive.get(request)
if self.use_closest_match and not response:
closest_request = self.http_archive.find_closest_request(
request, use_path=True)
if closest_request:
response = self.http_archive.get(closest_request)
if response:
logging.info('Request not found: %s\nUsing closest match: %s',
request, closest_request)
if self.cache_misses:
self.cache_misses.record_request(
request, is_record_mode=False, is_cache_miss=not response)
if not response:
reason = str(request)
if self.use_diff_on_unknown_requests:
diff = self.http_archive.diff(request)
if diff:
reason += (
"\nNearest request diff "
"('-' for archived request, '+' for current request):\n%s" % diff)
logging.warning('Could not replay: %s', reason)
else:
response = _InjectScripts(response, self.inject_script)
return response
class ControllableHttpArchiveFetch(object):
"""Controllable fetch function that can swap between record and replay."""
def __init__(self, http_archive, real_dns_lookup,
inject_script, use_diff_on_unknown_requests,
use_record_mode, cache_misses, use_closest_match):
"""Initialize HttpArchiveFetch.
Args:
http_archive: an instance of a HttpArchive
real_dns_lookup: a function that resolves a host to an IP.
inject_script: script string to inject in all pages.
use_diff_on_unknown_requests: If True, log unknown requests
with a diff to requests that look similar.
use_record_mode: If True, start in server in record mode.
cache_misses: Instance of CacheMissArchive.
use_closest_match: If True, on replay mode, serve the closest match
in the archive instead of giving a 404.
"""
self.record_fetch = RecordHttpArchiveFetch(
http_archive, real_dns_lookup, inject_script,
cache_misses)
self.replay_fetch = ReplayHttpArchiveFetch(
http_archive, inject_script, use_diff_on_unknown_requests, cache_misses,
use_closest_match)
if use_record_mode:
self.SetRecordMode()
else:
self.SetReplayMode()
def SetRecordMode(self):
self.fetch = self.record_fetch
self.is_record_mode = True
def SetReplayMode(self):
self.fetch = self.replay_fetch
self.is_record_mode = False
def __call__(self, *args, **kwargs):
"""Forward calls to Replay/Record fetch functions depending on mode."""
return self.fetch(*args, **kwargs)
| windyuuy/opera | chromium/src/third_party/webpagereplay/httpclient.py | Python | bsd-3-clause | 15,553 |
"""Helper methods for plotting (mostly 2-D georeferenced maps)."""
import numpy
from matplotlib import pyplot
import matplotlib.colors
from mpl_toolkits.basemap import Basemap
from gewittergefahr.gg_utils import nwp_model_utils
from gewittergefahr.gg_utils import number_rounding
from gewittergefahr.gg_utils import longitude_conversion as lng_conversion
from gewittergefahr.gg_utils import error_checking
DEFAULT_FIGURE_WIDTH_INCHES = 15.
DEFAULT_FIGURE_HEIGHT_INCHES = 15.
DEFAULT_RESOLUTION_STRING = 'l'
DEFAULT_NUM_PARALLELS = 8
DEFAULT_NUM_MERIDIANS = 6
DEFAULT_COUNTRY_WIDTH = 2.
DEFAULT_PROVINCE_WIDTH = 1.
DEFAULT_COUNTY_WIDTH = 0.5
DEFAULT_COAST_WIDTH = 2.
DEFAULT_RIVER_WIDTH = 0.5
DEFAULT_GRID_LINE_WIDTH = 1.
DEFAULT_COUNTRY_COLOUR = numpy.array([139, 69, 19], dtype=float) / 255
DEFAULT_PROVINCE_COLOUR = numpy.array([139, 69, 19], dtype=float) / 255
DEFAULT_COUNTY_COLOUR = numpy.array([152, 152, 152], dtype=float) / 255
DEFAULT_COAST_COLOUR = numpy.array([31, 120, 180], dtype=float) / 255
DEFAULT_RIVER_COLOUR = numpy.array([166, 206, 227], dtype=float) / 255
DEFAULT_GRID_LINE_COLOUR = numpy.full(3, 0.)
DEFAULT_COUNTRY_Z_ORDER = -1e8
DEFAULT_PROVINCE_Z_ORDER = -1e9
DEFAULT_COUNTY_Z_ORDER = -1e10
DEFAULT_COAST_Z_ORDER = -1e7
DEFAULT_RIVER_Z_ORDER = -1e6
DEFAULT_GRID_LINE_Z_ORDER = -1e5
ELLIPSOID_NAME = 'sphere'
EARTH_RADIUS_METRES = 6370997.
VERTICAL_CBAR_PADDING = 0.05
HORIZONTAL_CBAR_PADDING = 0.01
DEFAULT_CBAR_ORIENTATION_STRING = 'horizontal'
DEFAULT_LABEL_FONT_SIZE = 50
DEFAULT_LABEL_FONT_COLOUR = numpy.full(3, 0.)
DEFAULT_LABEL_X_NORMALIZED = 0.
DEFAULT_LABEL_Y_NORMALIZED = 1.
FONT_SIZE = 30
pyplot.rc('font', size=FONT_SIZE)
pyplot.rc('axes', titlesize=FONT_SIZE)
pyplot.rc('axes', labelsize=FONT_SIZE)
pyplot.rc('xtick', labelsize=FONT_SIZE)
pyplot.rc('ytick', labelsize=FONT_SIZE)
pyplot.rc('legend', fontsize=FONT_SIZE)
pyplot.rc('figure', titlesize=FONT_SIZE)
X_MIN_KEY = 'x_min_metres'
X_MAX_KEY = 'x_max_metres'
Y_MIN_KEY = 'y_min_metres'
Y_MAX_KEY = 'y_max_metres'
MIN_LATITUDE_KEY = 'min_latitude_deg'
MAX_LATITUDE_KEY = 'max_latitude_deg'
MIN_LONGITUDE_KEY = 'min_longitude_deg'
MAX_LONGITUDE_KEY = 'max_longitude_deg'
def _check_basemap_args(
min_latitude_deg, max_latitude_deg, min_longitude_deg,
max_longitude_deg, resolution_string):
"""Error-checks input args for creating basemap.
Latitudes must be in deg N, and longitudes must be in deg E.
Both output values are in deg E, with positive values (180-360) in the
western hemisphere. The inputs may be positive or negative in WH.
:param min_latitude_deg: Minimum latitude in map (bottom-left corner).
:param max_latitude_deg: Max latitude in map (top-right corner).
:param min_longitude_deg: Minimum longitude in map (bottom-left corner).
:param max_longitude_deg: Max longitude in map (top-right corner).
:param resolution_string: Resolution of boundaries (political borders,
lakes, rivers, etc.) in basemap. Options are "c" for crude, "l" for
low, "i" for intermediate, "h" for high, and "f" for full.
:return: min_longitude_deg: Minimum longitude (deg E, positive in western
hemisphere).
:return: max_longitude_deg: Max longitude (deg E, positive in western
hemisphere).
"""
error_checking.assert_is_valid_latitude(min_latitude_deg)
error_checking.assert_is_valid_latitude(max_latitude_deg)
error_checking.assert_is_greater(max_latitude_deg, min_latitude_deg)
min_longitude_deg = lng_conversion.convert_lng_positive_in_west(
min_longitude_deg)
max_longitude_deg = lng_conversion.convert_lng_positive_in_west(
max_longitude_deg)
error_checking.assert_is_greater(max_longitude_deg, min_longitude_deg)
error_checking.assert_is_string(resolution_string)
return min_longitude_deg, max_longitude_deg
def _basemap_to_latlng_limits(basemap_object):
"""Returns lat-long limits for basemap.
:param basemap_object: Basemap handle (instance of
`mpl_toolkits.basemap.Basemap`).
:return: latitude_limits_deg: length-2 numpy array with min and max
latitudes in deg N.
:return: longitude_limits_deg: length-2 numpy array with min and max
longitudes in deg E.
"""
x_coords_metres = numpy.linspace(
basemap_object.llcrnrx, basemap_object.urcrnrx, num=25)
y_coords_metres = numpy.linspace(
basemap_object.llcrnry, basemap_object.urcrnry, num=25)
x_matrix_metres, y_matrix_metres = numpy.meshgrid(
x_coords_metres, y_coords_metres)
longitude_matrix_deg, latitude_matrix_deg = basemap_object(
x_matrix_metres, y_matrix_metres, inverse=True)
longitude_matrix_deg = lng_conversion.convert_lng_positive_in_west(
longitude_matrix_deg)
latitude_limits_deg = numpy.array([
numpy.min(latitude_matrix_deg), numpy.max(latitude_matrix_deg)
])
longitude_limits_deg = numpy.array([
numpy.min(longitude_matrix_deg), numpy.max(longitude_matrix_deg)
])
return latitude_limits_deg, longitude_limits_deg
def colour_from_numpy_to_tuple(input_colour):
"""Converts colour from numpy array to tuple (if necessary).
:param input_colour: Colour (possibly length-3 or length-4 numpy array).
:return: output_colour: Colour (possibly length-3 or length-4 tuple).
"""
if not isinstance(input_colour, numpy.ndarray):
return input_colour
error_checking.assert_is_numpy_array(input_colour, num_dimensions=1)
num_entries = len(input_colour)
error_checking.assert_is_geq(num_entries, 3)
error_checking.assert_is_leq(num_entries, 4)
return tuple(input_colour.tolist())
def label_axes(axes_object, label_string, font_size=DEFAULT_LABEL_FONT_SIZE,
font_colour=DEFAULT_LABEL_FONT_COLOUR,
x_coord_normalized=DEFAULT_LABEL_X_NORMALIZED,
y_coord_normalized=DEFAULT_LABEL_Y_NORMALIZED):
"""Adds text label to axes.
:param axes_object: Axes (instance of
`matplotlib.axes._subplots.AxesSubplot`).
:param label_string: Label.
:param font_size: Font size.
:param font_colour: Font colour.
:param x_coord_normalized: Normalized x-coordinate (from 0...1, where 1 is
the right side).
:param y_coord_normalized: Normalized y-coordinate (from 0...1, where 1 is
the top).
"""
error_checking.assert_is_string(label_string)
# error_checking.assert_is_geq(x_coord_normalized, 0.)
# error_checking.assert_is_leq(x_coord_normalized, 1.)
# error_checking.assert_is_geq(y_coord_normalized, 0.)
# error_checking.assert_is_leq(y_coord_normalized, 1.)
axes_object.text(
x_coord_normalized, y_coord_normalized, label_string,
fontsize=font_size, color=colour_from_numpy_to_tuple(font_colour),
horizontalalignment='right', verticalalignment='bottom',
transform=axes_object.transAxes)
def create_paneled_figure(
num_rows, num_columns, figure_width_inches=DEFAULT_FIGURE_WIDTH_INCHES,
figure_height_inches=DEFAULT_FIGURE_HEIGHT_INCHES,
horizontal_spacing=0.075, vertical_spacing=0., shared_x_axis=False,
shared_y_axis=False, keep_aspect_ratio=True):
"""Creates paneled figure.
This method only initializes the panels. It does not plot anything.
J = number of panel rows
K = number of panel columns
:param num_rows: J in the above discussion.
:param num_columns: K in the above discussion.
:param figure_width_inches: Width of the entire figure (including all
panels).
:param figure_height_inches: Height of the entire figure (including all
panels).
:param horizontal_spacing: Spacing (in figure-relative coordinates, from
0...1) between adjacent panel columns.
:param vertical_spacing: Spacing (in figure-relative coordinates, from
0...1) between adjacent panel rows.
:param shared_x_axis: Boolean flag. If True, all panels will share the same
x-axis.
:param shared_y_axis: Boolean flag. If True, all panels will share the same
y-axis.
:param keep_aspect_ratio: Boolean flag. If True, the aspect ratio of each
panel will be preserved (reflect the aspect ratio of the data plotted
therein).
:return: figure_object: Figure handle (instance of
`matplotlib.figure.Figure`).
:return: axes_object_matrix: J-by-K numpy array of axes handles (instances
of `matplotlib.axes._subplots.AxesSubplot`).
"""
error_checking.assert_is_geq(horizontal_spacing, 0.)
error_checking.assert_is_less_than(horizontal_spacing, 1.)
error_checking.assert_is_geq(vertical_spacing, 0.)
error_checking.assert_is_less_than(vertical_spacing, 1.)
error_checking.assert_is_boolean(shared_x_axis)
error_checking.assert_is_boolean(shared_y_axis)
error_checking.assert_is_boolean(keep_aspect_ratio)
figure_object, axes_object_matrix = pyplot.subplots(
num_rows, num_columns, sharex=shared_x_axis, sharey=shared_y_axis,
figsize=(figure_width_inches, figure_height_inches)
)
if num_rows == num_columns == 1:
axes_object_matrix = numpy.full(
(1, 1), axes_object_matrix, dtype=object
)
if num_rows == 1 or num_columns == 1:
axes_object_matrix = numpy.reshape(
axes_object_matrix, (num_rows, num_columns)
)
pyplot.subplots_adjust(
left=0.02, bottom=0.02, right=0.98, top=0.95,
hspace=horizontal_spacing, wspace=vertical_spacing)
if not keep_aspect_ratio:
return figure_object, axes_object_matrix
for i in range(num_rows):
for j in range(num_columns):
axes_object_matrix[i][j].set(aspect='equal')
return figure_object, axes_object_matrix
def create_lambert_conformal_map(
min_latitude_deg, max_latitude_deg, min_longitude_deg,
max_longitude_deg, standard_latitudes_deg=numpy.full(2, 25.),
central_longitude_deg=265.,
figure_width_inches=DEFAULT_FIGURE_WIDTH_INCHES,
figure_height_inches=DEFAULT_FIGURE_HEIGHT_INCHES,
resolution_string=DEFAULT_RESOLUTION_STRING):
"""Creates Lambert conformal map.
This method only initializes a map with the Lambert conformal projection.
It does not plot anything.
Latitudes must be in deg N, and longitudes must be in deg E.
:param min_latitude_deg: See doc for `_check_basemap_args`.
:param max_latitude_deg: Same.
:param min_longitude_deg: Same.
:param max_longitude_deg: Same.
:param standard_latitudes_deg: length-2 numpy array of standard latitudes
for projection.
:param central_longitude_deg: Central longitude for projection.
:param figure_width_inches: Figure width.
:param figure_height_inches: Figure height.
:param resolution_string: See doc for `_check_basemap_args`.
:return: figure_object: Figure handle (instance of
`matplotlib.figure.Figure`).
:return: axes_object: Axes handle (instance of
`matplotlib.axes._subplots.AxesSubplot`).
:return: basemap_object: Basemap handle (instance of
`mpl_toolkits.basemap.Basemap`).
"""
min_longitude_deg, max_longitude_deg = _check_basemap_args(
min_latitude_deg=min_latitude_deg, max_latitude_deg=max_latitude_deg,
min_longitude_deg=min_longitude_deg,
max_longitude_deg=max_longitude_deg,
resolution_string=resolution_string)
error_checking.assert_is_valid_lat_numpy_array(standard_latitudes_deg)
error_checking.assert_is_numpy_array(
standard_latitudes_deg,
exact_dimensions=numpy.array([2], dtype=int)
)
error_checking.assert_is_non_array(central_longitude_deg)
central_longitude_deg = lng_conversion.convert_lng_positive_in_west(
central_longitude_deg)
figure_object, axes_object = pyplot.subplots(
1, 1, figsize=(figure_width_inches, figure_height_inches)
)
basemap_object = Basemap(
projection='lcc', lat_1=standard_latitudes_deg[0],
lat_2=standard_latitudes_deg[1], lon_0=central_longitude_deg,
rsphere=EARTH_RADIUS_METRES, ellps=ELLIPSOID_NAME,
resolution=resolution_string, llcrnrlat=min_latitude_deg,
llcrnrlon=min_longitude_deg, urcrnrlat=max_latitude_deg,
urcrnrlon=max_longitude_deg)
return figure_object, axes_object, basemap_object
def create_equidist_cylindrical_map(
min_latitude_deg, max_latitude_deg, min_longitude_deg,
max_longitude_deg, figure_width_inches=DEFAULT_FIGURE_WIDTH_INCHES,
figure_height_inches=DEFAULT_FIGURE_HEIGHT_INCHES,
resolution_string=DEFAULT_RESOLUTION_STRING):
"""Creates equidistant cylindrical map.
This method only initializes a map with the equidistant cylindrical
projection. It does not plot anything.
:param min_latitude_deg: See doc for `_check_basemap_args`.
:param max_latitude_deg: Same.
:param min_longitude_deg: Same.
:param max_longitude_deg: Same.
:param figure_width_inches: Figure width.
:param figure_height_inches: Figure height.
:param resolution_string: See doc for `_check_basemap_args`.
:return: figure_object: See doc for `create_lambert_conformal_map`.
:return: axes_object: Same.
:return: basemap_object: Same.
"""
min_longitude_deg, max_longitude_deg = _check_basemap_args(
min_latitude_deg=min_latitude_deg, max_latitude_deg=max_latitude_deg,
min_longitude_deg=min_longitude_deg,
max_longitude_deg=max_longitude_deg,
resolution_string=resolution_string)
figure_object, axes_object = pyplot.subplots(
1, 1, figsize=(figure_width_inches, figure_height_inches)
)
basemap_object = Basemap(
projection='cyl', resolution=resolution_string,
llcrnrlat=min_latitude_deg, llcrnrlon=min_longitude_deg,
urcrnrlat=max_latitude_deg, urcrnrlon=max_longitude_deg)
return figure_object, axes_object, basemap_object
def create_map_with_nwp_proj(
model_name, grid_name=None, latlng_limit_dict=None, xy_limit_dict=None,
figure_width_inches=DEFAULT_FIGURE_WIDTH_INCHES,
figure_height_inches=DEFAULT_FIGURE_HEIGHT_INCHES,
resolution_string=DEFAULT_RESOLUTION_STRING):
"""Initializes map with same projection as NWP model.
However, this map will have false easting = false northing = 0 metres.
If `latlng_limit_dict is not None`, corners of the map will be determined by
lat-long coords.
If `xy_limit_dict is not None`, corners of the map will be determined by
x-y coords.
If both are None, corners of the map will be x-y corners of model grid.
:param model_name: See doc for `nwp_model_utils.check_grid_name`.
:param grid_name: See doc for `nwp_model_utils.check_grid_name`.
:param latlng_limit_dict: Dictionary with the following keys:
latlng_limit_dict['min_latitude_deg']: Minimum latitude (deg N) in map.
latlng_limit_dict['max_latitude_deg']: Max latitude (deg N) in map.
latlng_limit_dict['min_longitude_deg']: Minimum longitude (deg E) in map.
latlng_limit_dict['max_longitude_deg']: Max longitude (deg E) in map.
:param xy_limit_dict: Dictionary with the following keys:
xy_limit_dict['x_min_metres']: Minimum x-coord in map.
xy_limit_dict['x_max_metres']: Max x-coord in map.
xy_limit_dict['y_min_metres']: Minimum y-coord in map.
xy_limit_dict['y_max_metres']: Max y-coord in map.
:param figure_width_inches: Figure width.
:param figure_height_inches: Figure height.
:param resolution_string: See doc for `create_lambert_conformal_map`.
:return: figure_object: Same.
:return: axes_object: Same.
:return: basemap_object: Same.
"""
nwp_model_utils.check_grid_name(model_name=model_name, grid_name=grid_name)
standard_latitudes_deg, central_longitude_deg = (
nwp_model_utils.get_projection_params(model_name)
)
if latlng_limit_dict is None and xy_limit_dict is None:
all_x_coords_metres, all_y_coords_metres = (
nwp_model_utils.get_xy_grid_cell_edges(
model_name=model_name, grid_name=grid_name)
)
false_easting_metres, false_northing_metres = (
nwp_model_utils.get_false_easting_and_northing(
model_name=model_name, grid_name=grid_name)
)
all_x_coords_metres -= false_easting_metres
all_y_coords_metres -= false_northing_metres
xy_limit_dict = {
X_MIN_KEY: numpy.min(all_x_coords_metres),
X_MAX_KEY: numpy.max(all_x_coords_metres),
Y_MIN_KEY: numpy.min(all_y_coords_metres),
Y_MAX_KEY: numpy.max(all_y_coords_metres)
}
figure_object, axes_object = pyplot.subplots(
1, 1, figsize=(figure_width_inches, figure_height_inches)
)
if latlng_limit_dict is not None:
min_latitude_deg = latlng_limit_dict[MIN_LATITUDE_KEY]
max_latitude_deg = latlng_limit_dict[MAX_LATITUDE_KEY]
error_checking.assert_is_valid_lat_numpy_array(
numpy.array([min_latitude_deg, max_latitude_deg])
)
min_longitude_deg = lng_conversion.convert_lng_positive_in_west(
latlng_limit_dict[MIN_LONGITUDE_KEY]
)
max_longitude_deg = lng_conversion.convert_lng_positive_in_west(
latlng_limit_dict[MAX_LONGITUDE_KEY]
)
error_checking.assert_is_greater(max_latitude_deg, min_latitude_deg)
error_checking.assert_is_greater(max_longitude_deg, min_longitude_deg)
basemap_object = Basemap(
projection='lcc', lat_1=standard_latitudes_deg[0],
lat_2=standard_latitudes_deg[1], lon_0=central_longitude_deg,
rsphere=EARTH_RADIUS_METRES, ellps=ELLIPSOID_NAME,
resolution=resolution_string, llcrnrlat=min_latitude_deg,
llcrnrlon=min_longitude_deg, urcrnrlat=max_latitude_deg,
urcrnrlon=max_longitude_deg)
else:
x_min_metres = xy_limit_dict[X_MIN_KEY]
x_max_metres = xy_limit_dict[X_MAX_KEY]
y_min_metres = xy_limit_dict[Y_MIN_KEY]
y_max_metres = xy_limit_dict[Y_MAX_KEY]
error_checking.assert_is_greater(x_max_metres, x_min_metres)
error_checking.assert_is_greater(y_max_metres, y_min_metres)
basemap_object = Basemap(
projection='lcc', lat_1=standard_latitudes_deg[0],
lat_2=standard_latitudes_deg[1], lon_0=central_longitude_deg,
rsphere=EARTH_RADIUS_METRES, ellps=ELLIPSOID_NAME,
resolution=resolution_string,
llcrnrx=x_min_metres, urcrnrx=x_max_metres,
llcrnry=y_min_metres, urcrnry=y_max_metres)
return figure_object, axes_object, basemap_object
def plot_countries(
basemap_object, axes_object, line_width=DEFAULT_COUNTRY_WIDTH,
line_colour=DEFAULT_COUNTRY_COLOUR, z_order=DEFAULT_COUNTRY_Z_ORDER):
"""Plots national borders.
:param basemap_object: Basemap handle (instance of
`mpl_toolkits.basemap.Basemap`).
:param axes_object: Axes handle (instance of
`matplotlib.axes._subplots.AxesSubplot`).
:param line_width: Border width.
:param line_colour: Border colour.
:param z_order: z-order. Higher numbers mean that national borders will be
plotted closer to the "top" (on top of other features).
"""
basemap_object.drawcountries(
linewidth=line_width, color=colour_from_numpy_to_tuple(line_colour),
ax=axes_object, zorder=z_order
)
def plot_states_and_provinces(
basemap_object, axes_object, line_width=DEFAULT_PROVINCE_WIDTH,
line_colour=DEFAULT_PROVINCE_COLOUR, z_order=DEFAULT_PROVINCE_Z_ORDER):
"""Plots state and provincial borders.
:param basemap_object: See doc for `plot_countries`.
:param axes_object: Same.
:param line_width: Same.
:param line_colour: Same.
:param z_order: Same.
"""
basemap_object.drawstates(
linewidth=line_width, color=colour_from_numpy_to_tuple(line_colour),
ax=axes_object, zorder=z_order
)
def plot_counties(
basemap_object, axes_object, line_width=DEFAULT_COUNTY_WIDTH,
line_colour=DEFAULT_COUNTY_COLOUR, z_order=DEFAULT_COUNTY_Z_ORDER):
"""Plots county borders.
:param basemap_object: See doc for `plot_countries`.
:param axes_object: Same.
:param line_width: Same.
:param line_colour: Same.
:param z_order: Same.
"""
basemap_object.drawcounties(
linewidth=line_width, color=colour_from_numpy_to_tuple(line_colour),
ax=axes_object, zorder=z_order
)
def plot_coastlines(
basemap_object, axes_object, line_width=DEFAULT_COAST_WIDTH,
line_colour=DEFAULT_COAST_COLOUR, z_order=DEFAULT_COAST_Z_ORDER):
"""Plots coastlines (for some reason this includes lakes -- ugh).
:param basemap_object: See doc for `plot_countries`.
:param axes_object: Same.
:param line_width: Same.
:param line_colour: Same.
:param z_order: Same.
"""
basemap_object.drawcoastlines(
linewidth=line_width, color=colour_from_numpy_to_tuple(line_colour),
ax=axes_object, zorder=z_order
)
def plot_rivers(
basemap_object, axes_object, line_width=DEFAULT_RIVER_WIDTH,
line_colour=DEFAULT_RIVER_COLOUR, z_order=DEFAULT_RIVER_Z_ORDER):
"""Plots rivers.
:param basemap_object: See doc for `plot_countries`.
:param axes_object: Same.
:param line_width: Same.
:param line_colour: Same.
:param z_order: Same.
"""
basemap_object.drawrivers(
linewidth=line_width, color=colour_from_numpy_to_tuple(line_colour),
ax=axes_object, zorder=z_order
)
def plot_parallels(
basemap_object, axes_object, min_latitude_deg=None,
max_latitude_deg=None, num_parallels=DEFAULT_NUM_PARALLELS,
font_size=FONT_SIZE, line_width=DEFAULT_GRID_LINE_WIDTH,
line_colour=DEFAULT_GRID_LINE_COLOUR,
z_order=DEFAULT_GRID_LINE_Z_ORDER):
"""Plots parallels (grid lines for latitude).
If `min_latitude_deg` and `max_latitude_deg` are both None, this method will
take plotting limits from `basemap_object`.
:param basemap_object: See doc for `plot_countries`.
:param axes_object: Same.
:param min_latitude_deg: Minimum latitude for grid lines.
:param max_latitude_deg: Max latitude for grid lines.
:param num_parallels: Number of parallels.
:param font_size: Font size for tick labels.
:param line_width: See doc for `plot_countries`.
:param line_colour: Same.
:param z_order: Same.
"""
if min_latitude_deg is None or max_latitude_deg is None:
latitude_limits_deg = _basemap_to_latlng_limits(basemap_object)[0]
min_latitude_deg = latitude_limits_deg[0]
max_latitude_deg = latitude_limits_deg[1]
error_checking.assert_is_valid_latitude(min_latitude_deg)
error_checking.assert_is_valid_latitude(max_latitude_deg)
error_checking.assert_is_greater(max_latitude_deg, min_latitude_deg)
error_checking.assert_is_integer(num_parallels)
error_checking.assert_is_geq(num_parallels, 2)
parallel_spacing_deg = (
(max_latitude_deg - min_latitude_deg) / (num_parallels - 1)
)
if parallel_spacing_deg < 1.:
parallel_spacing_deg = number_rounding.round_to_nearest(
parallel_spacing_deg, 0.1)
else:
parallel_spacing_deg = numpy.round(parallel_spacing_deg)
min_latitude_deg = number_rounding.ceiling_to_nearest(
min_latitude_deg, parallel_spacing_deg
)
max_latitude_deg = number_rounding.floor_to_nearest(
max_latitude_deg, parallel_spacing_deg
)
num_parallels = 1 + int(numpy.round(
(max_latitude_deg - min_latitude_deg) / parallel_spacing_deg
))
latitudes_deg = numpy.linspace(
min_latitude_deg, max_latitude_deg, num=num_parallels
)
basemap_object.drawparallels(
latitudes_deg, color=colour_from_numpy_to_tuple(line_colour),
fontsize=font_size, linewidth=line_width,
labels=[True, False, False, False], ax=axes_object, zorder=z_order
)
def plot_meridians(
basemap_object, axes_object, min_longitude_deg=None,
max_longitude_deg=None, num_meridians=DEFAULT_NUM_MERIDIANS,
font_size=FONT_SIZE, line_width=DEFAULT_GRID_LINE_WIDTH,
line_colour=DEFAULT_GRID_LINE_COLOUR,
z_order=DEFAULT_GRID_LINE_Z_ORDER):
"""Plots meridians (grid lines for longitude).
If `min_longitude_deg` and `max_longitude_deg` are both None, this method
will take plotting limits from `basemap_object`.
:param basemap_object: See doc for `plot_countries`.
:param axes_object: Same.
:param min_longitude_deg: Minimum longitude for grid lines.
:param max_longitude_deg: Max longitude for grid lines.
:param num_meridians: Number of meridians.
:param font_size: Font size for tick labels.
:param line_width: See doc for `plot_countries`.
:param line_colour: Same.
:param z_order: Same.
"""
if min_longitude_deg is None or max_longitude_deg is None:
longitude_limits_deg = _basemap_to_latlng_limits(basemap_object)[1]
min_longitude_deg = longitude_limits_deg[0]
max_longitude_deg = longitude_limits_deg[1]
min_longitude_deg = lng_conversion.convert_lng_positive_in_west(
min_longitude_deg)
max_longitude_deg = lng_conversion.convert_lng_positive_in_west(
max_longitude_deg)
error_checking.assert_is_greater(max_longitude_deg, min_longitude_deg)
error_checking.assert_is_integer(num_meridians)
error_checking.assert_is_geq(num_meridians, 2)
meridian_spacing_deg = (
(max_longitude_deg - min_longitude_deg) / (num_meridians - 1)
)
if meridian_spacing_deg < 1.:
meridian_spacing_deg = number_rounding.round_to_nearest(
meridian_spacing_deg, 0.1)
else:
meridian_spacing_deg = numpy.round(meridian_spacing_deg)
min_longitude_deg = number_rounding.ceiling_to_nearest(
min_longitude_deg, meridian_spacing_deg
)
max_longitude_deg = number_rounding.floor_to_nearest(
max_longitude_deg, meridian_spacing_deg
)
num_meridians = 1 + int(numpy.round(
(max_longitude_deg - min_longitude_deg) / meridian_spacing_deg
))
longitudes_deg = numpy.linspace(
min_longitude_deg, max_longitude_deg, num=num_meridians
)
basemap_object.drawmeridians(
longitudes_deg, color=colour_from_numpy_to_tuple(line_colour),
fontsize=font_size, linewidth=line_width,
labels=[False, False, False, True], ax=axes_object, zorder=z_order
)
def plot_colour_bar(
axes_object_or_matrix, data_matrix, colour_map_object,
colour_norm_object, orientation_string=DEFAULT_CBAR_ORIENTATION_STRING,
padding=None, extend_min=True, extend_max=True,
fraction_of_axis_length=1., font_size=FONT_SIZE, aspect_ratio=20.):
"""Plots colour bar.
:param axes_object_or_matrix: Either one axis handle (instance of
`matplotlib.axes._subplots.AxesSubplot`) or a numpy array thereof.
:param data_matrix: numpy array of values to which the colour map applies.
:param colour_map_object: Colour map (instance of `matplotlib.pyplot.cm` or
similar).
:param colour_norm_object: Colour normalization (maps from data space to
colour-bar space, which goes from 0...1). This should be an instance of
`matplotlib.colors.Normalize`.
:param orientation_string: Orientation ("vertical" or "horizontal").
:param padding: Padding between colour bar and main plot (in range 0...1).
To use the default (there are different defaults for vertical and horiz
colour bars), leave this alone.
:param extend_min: Boolean flag. If True, values below the minimum
specified by `colour_norm_object` are possible, so the colour bar will
be plotted with an arrow at the bottom.
:param extend_max: Boolean flag. If True, values above the max specified by
`colour_norm_object` are possible, so the colour bar will be plotted
with an arrow at the top.
:param fraction_of_axis_length: The colour bar will take up this fraction of
the axis length (x-axis if orientation_string = "horizontal", y-axis if
orientation_string = "vertical").
:param font_size: Font size for tick marks on colour bar.
:param aspect_ratio: Ratio of length to width.
:return: colour_bar_object: Colour-bar handle (instance of
`matplotlib.pyplot.colorbar`).
"""
error_checking.assert_is_real_numpy_array(data_matrix)
error_checking.assert_is_boolean(extend_min)
error_checking.assert_is_boolean(extend_max)
error_checking.assert_is_greater(fraction_of_axis_length, 0.)
# error_checking.assert_is_leq(fraction_of_axis_length, 1.)
scalar_mappable_object = pyplot.cm.ScalarMappable(
cmap=colour_map_object, norm=colour_norm_object
)
scalar_mappable_object.set_array(data_matrix)
if extend_min and extend_max:
extend_arg = 'both'
elif extend_min:
extend_arg = 'min'
elif extend_max:
extend_arg = 'max'
else:
extend_arg = 'neither'
if padding is None:
if orientation_string == 'horizontal':
padding = HORIZONTAL_CBAR_PADDING
else:
padding = VERTICAL_CBAR_PADDING
# error_checking.assert_is_geq(padding, 0.)
# error_checking.assert_is_leq(padding, 1.)
error_checking.assert_is_real_number(padding)
if isinstance(axes_object_or_matrix, numpy.ndarray):
axes_arg = axes_object_or_matrix.ravel().tolist()
else:
axes_arg = axes_object_or_matrix
colour_bar_object = pyplot.colorbar(
ax=axes_arg, mappable=scalar_mappable_object,
orientation=orientation_string, pad=padding, extend=extend_arg,
shrink=fraction_of_axis_length, aspect=aspect_ratio
)
colour_bar_object.ax.tick_params(labelsize=font_size)
if orientation_string == 'horizontal':
colour_bar_object.ax.set_xticklabels(
colour_bar_object.ax.get_xticklabels(), rotation=90
)
return colour_bar_object
def plot_linear_colour_bar(
axes_object_or_matrix, data_matrix, colour_map_object, min_value,
max_value, orientation_string=DEFAULT_CBAR_ORIENTATION_STRING,
padding=None, extend_min=True, extend_max=True,
fraction_of_axis_length=1., font_size=FONT_SIZE, aspect_ratio=20.):
"""Plots colour bar with linear scale.
:param axes_object_or_matrix: See doc for `plot_colour_bar`.
:param data_matrix: Same.
:param colour_map_object: Same.
:param min_value: Minimum value in colour bar.
:param max_value: Max value in colour bar.
:param orientation_string: See doc for `plot_colour_bar`.
:param padding: Same.
:param extend_min: Same.
:param extend_max: Same.
:param fraction_of_axis_length: Same.
:param font_size: Same.
:param aspect_ratio: Same.
:return: colour_bar_object: Same.
"""
error_checking.assert_is_greater(max_value, min_value)
colour_norm_object = matplotlib.colors.Normalize(
vmin=min_value, vmax=max_value, clip=False)
return plot_colour_bar(
axes_object_or_matrix=axes_object_or_matrix, data_matrix=data_matrix,
colour_map_object=colour_map_object,
colour_norm_object=colour_norm_object,
orientation_string=orientation_string, padding=padding,
extend_min=extend_min, extend_max=extend_max,
fraction_of_axis_length=fraction_of_axis_length, font_size=font_size,
aspect_ratio=aspect_ratio
)
| thunderhoser/GewitterGefahr | gewittergefahr/plotting/plotting_utils.py | Python | mit | 31,807 |
# -*- coding: iso-8859-1 -*-
"""
Plota curva de ZigZag
____________________
Variáveis de entrada:
save (True/False) -- Opção para salvar as figuras ou somente mostrar os gráficos, utilizar somente True até o momento;
formato ('png'/'pdf'/'ps'/'eps'/'svg') -- formatos de saída da figura;
passo (float) -- Paso de tempo da integração;
tmax (integer) -- Tempo máximo;
tini (integer) -- Tempo inicial;
metodo ('euler') -- Método de integração;
____________________
Salva as figuras no diretório './figuras/Curva_de_Giro/curva_de_giro'
"""
import sys
sys.path.append('./AnalisaNavio/')
import scipy as sp
from Es import *
from Leme import *
from Prop import *
from Casco import *
from Navio import *
nome = 'B MarAd'
save = True
formato = 'png'
passo = 0.5
tmax = 1500
tini = 0
metodo = 'euler'
TipoModelo = 'MARAD'
GrausDeLib = 3
LemeCom= sp.array(10.)
Proa = sp.array(10.)
Rot = sp.array(1.24)
#
#Classes
#
In = ('Navioteste','./dados/MarAdinputder.dat', 'inputtab.dat')
io = es(entrada = In)
################################
DicionarioDerivadas = io.lerarqder()
navio1 = navio(DicionarioDerivadas, Nome = nome, Tipo = TipoModelo )
a = navio1.simula(met = metodo, t = tmax, t0 = tini, dt=passo, GrausDeLib = GrausDeLib, tipo ='ZigZag', leme = LemeCom, proa = Proa, RotCom=Rot)
dir = './figuras/Zig_Zag/' + TipoModelo + '/'
os.makedirs(dir)
####################################
##
## Velocidade em Surge
##
####################################
plt.plot(a[0][:, 0], a[0][:, 1], 'bo')
plt.ylabel(r'$u$')
plt.xlabel(r'$t$')
plt.title ('ZigZag10/10')
if save:
plt.savefig(dir + TipoModelo +'pltut', format=formato)
plt.clf()
else:
plt.show()
plt.clf()
####################################
##
## Velocidade em Sway
##
####################################
plt.plot(a[0][:, 0], a[0][:, 2], 'g^')#v
plt.ylabel(r'$v$')
plt.xlabel(r'$t$')
plt.title ('ZigZag10/10')
if save:
plt.savefig(dir + TipoModelo +'pltvt', format=formato)
plt.clf()
else:
plt.show()
plt.clf()
###################################
#
# Velocidade de yaw
#
###################################
plt.plot(a[0][:, 0], a[0][:, 6])
plt.ylabel(r'$\dot\psi$')
plt.xlabel(r'$t$')
plt.title ('ZigZag10/10')
if save:
plt.savefig(dir + TipoModelo +'pltvelyawt', format=formato)
plt.clf()
else:
plt.show()
plt.clf()
####################################
##
## Velocidade de roll
##
####################################
plt.plot(a[0][:, 0], a[0][:, 4], '--')#r
plt.ylabel('$\dot\phi$')
plt.xlabel(r'$t$')
plt.title ('ZigZag10/10')
if save:
plt.savefig(dir + TipoModelo +'pltvelrollt', format=formato)
plt.clf()
else:
plt.show()
plt.clf()
##
##
## Posição
##
####################################
##
## Posição x
##
####################################
plt.plot(a[1][:, 0], a[1][:, 1], '--')
plt.ylabel(r'$x$')
plt.xlabel(r'$t$')
plt.title ('ZigZag10/10')
if save:
plt.savefig(dir + TipoModelo +'pltxt', format=formato)
plt.clf()
else:
plt.show()
plt.clf()
####################################
##
## Posição y
##
####################################
plt.plot(a[1][:, 0], a[1][:, 2], 'g^')#v
plt.ylabel(r'$y$')
plt.xlabel(r'$t$')
plt.title ('ZigZag10/10')
if save:
plt.savefig(dir + TipoModelo +'pltyt', format=formato)
plt.clf()
else:
plt.show()
plt.clf()
## ###################################
###
### Posição Psi
###
#####################################
plt.plot(a[1][:, 0], a[1][:, 6]*(180/sp.pi), 'o-', a[5][:, 0], a[5][:, 1]*(180/sp.pi), '-.')
plt.ylabel(r'$\psi$')
plt.xlabel(r'$t$')
plt.title ('ZigZag10/10')
if save:
plt.savefig(dir + TipoModelo +'pltyawt', format=formato)
plt.clf()
else:
plt.show()
plt.clf()
## ###################################
###
### orientação roll
###
#####################################
plt.plot(a[1][:, 0], a[1][:, 4], 'o-')
plt.ylabel(r'$\phi$')
plt.xlabel(r'$t$')
plt.title ('ZigZag10/10')
if save:
plt.savefig(dir + TipoModelo +'pltrollt', format=formato)
plt.clf()
else:
plt.show()
plt.clf()
##
##
## Aceleração
##
##
####################################
##
## dotu
##
####################################
plt.plot(a[2][:, 0], a[2][:, 1], '--')
plt.ylabel(r'$\dot u$')
plt.xlabel(r'$t$')
plt.title ('ZigZag10/10')
if save:
plt.savefig(dir + TipoModelo +'pltdotut', format=formato)
plt.clf()
else:
plt.show()
plt.clf()
####################################
##
## dot v
##
####################################
plt.plot(a[2][:, 0], a[2][:, 2], 'g^')#v
plt.ylabel(r'$\dot v$')
plt.xlabel(r'$t$')
plt.title ('ZigZag10/10')
if save:
plt.savefig(dir + TipoModelo +'pltdotvt', format=formato)
plt.clf()
else:
plt.show()
plt.clf()
## ###################################
###
### Acerleração Yaw
###
#####################################
plt.plot(a[2][:, 0], a[2][:, 6]*(sp.array([180])/sp.pi))#, 'o-', a[5][:, 0], a[5][:, 1]*(180/sp.pi), '-.')
plt.ylabel(r'$\dot r$')
plt.xlabel(r'$t$')
plt.title ('ZigZag10/10')
if save:
plt.savefig(dir + TipoModelo +'pltdotrt', format=formato)
plt.clf()
else:
plt.show()
plt.clf()
## ###################################
###
### Aceleração Roll
###
#####################################
plt.plot(a[2][:, 0], a[2][:, 4]*(sp.array([180])/sp.pi), 'o-')#r
plt.ylabel(r'$\dot p$')
plt.xlabel(r'$t$')
plt.title ('ZigZag10/10')
if save:
plt.savefig(dir + TipoModelo +'pltdotpt', format=formato)
plt.clf()
else:
plt.show()
plt.clf()
##
##
## Força
##
##
####################################
##
## Força de Surge
##
####################################
plt.plot(a[3][:, 0], a[3][:, 1], '--')
plt.ylabel(r'$F_x$')
plt.xlabel(r'$t$')
plt.title ('ZigZag10/10')
if save:
plt.savefig(dir + TipoModelo +'pltforsurget', format=formato)
plt.clf()
else:
plt.show()
plt.clf()
####################################
##
## Força de Yaw
##
####################################
plt.plot(a[3][:, 0], a[3][:, 2], 'g^')#v
plt.ylabel(r'$F_y$')
plt.xlabel(r'$t$')
plt.title ('ZigZag10/10')
if save:
plt.savefig(dir + TipoModelo +'pltforswayt', format=formato)
plt.clf()
else:
plt.show()
plt.clf()
## ###################################
###
### Momento de Yaw
###
#####################################
plt.plot(a[3][:, 0], a[3][:, 4], 'o-')#, a[5][:, 0], a[5][:, 1]*(180/sp.pi), '-.')
plt.ylabel(r'$N$')
plt.xlabel(r'$t$')
plt.title ('ZigZag10/10')
if save:
plt.savefig(dir + TipoModelo +'pltNt', format=formato)
plt.clf()
else:
plt.show()
plt.clf()
## ###################################
###
### Momento de Roll
###
#####################################
plt.plot(a[3][:, 0], a[3][:, 3], 'o-')#r
plt.ylabel(r'$K$')
plt.xlabel(r'$t$')
plt.title ('ZigZag10/10')
if save:
plt.savefig(dir + TipoModelo +'pltKt', format=formato)
plt.clf()
else:
plt.show()
plt.clf()
## ###################################
###
### Rotação da Máquina
###
#####################################
plt.plot(a[6][:, 0], a[6][:, 1], 'o-')#r
plt.ylabel(r'$n$')
plt.xlabel(r'$t$')
plt.title ('ZigZag10/10')
if save:
plt.savefig(dir + TipoModelo +'pltnt', format=formato)
plt.clf()
else:
plt.show()
plt.clf()
## ###################################
###
### Leme
###
#####################################
plt.plot( a[5][:, 0], a[5][:, 1]*(180/sp.pi), '-.')
plt.ylabel(r'$\delta_R$')
plt.xlabel(r'$t$')
plt.title ('ZigZag10/10')
if save:
plt.savefig(dir + TipoModelo +'pltlemet', format=formato)
plt.clf()
else:
plt.show()
plt.clf()
####################################################
#
# Relatório
#
####################################################
f = open('./dados/rel.tex', 'w')
f.write('\chapter{Relatório da Curva Zig-Zag Navio '+ nome +' }\n\n\t')
#
# Primeira Execução
#
f.write('\section{Primera Execução}')
f.write('\n\t')
f.write('\\begin{center}')
f.write('\n\t')
f.write('\\begin{tabular}{ll}')
f.write('\\ \n\t\t')
f.write('execução = & ' + str(a[7][0]['exeNummber']))
f.write('\\\ \n\t\t')
f.write('Tempo até a execução (segundos) = & ' + str(a[7][0]['time']) )
f.write('\\\ \n\t\t')
f.write('ângulo de \emph{overshoot} = & '+ str(a[7][0]['osangle']*(180/sp.pi)))
f.write('\\\ \n\t\t')
f.write('\emph{overshoot path}= & '+ str(a[7][0]['ospath']) )
f.write('\\\ \n\t\t')
#f.write('\emph{reach} = & '+ str(a[7][0]['reach']*(180/sp.pi)) )
#f.write('\\\ \n\t')
f.write('\end{tabular}')
f.write('\n\t')
f.write('\end{center}')
#
# Segunda Execução
#
f.write('\section{Segunda Execução}')
f.write('\n\t')
f.write('\\begin{center}')
f.write('\n\t')
f.write('\\begin{tabular}{ll}')
f.write('\\ \n\t\t')
f.write('execução = & ' + str(a[7][1]['exeNummber']))
f.write('\\\ \n\t\t')
f.write('Tempo até a execução (segundos) = & ' + str(a[7][1]['time']) )
f.write('\\\ \n\t\t')
f.write('ângulo de \emph{overshoot} = & '+ str(a[7][1]['osangle']*(180/sp.pi)))
f.write('\\\ \n\t\t')
f.write('\emph{overshoot path}= & '+ str(a[7][1]['ospath']) )
f.write('\\\ \n\t\t')
#f.write('\emph{reach} = & '+ str(a[7][1]['reach']*(180/sp.pi)) )
#f.write('\\\ \n\t')
f.write('\end{tabular}')
f.write('\n\t')
f.write('\end{center}')
#
# Terceira Execução
#
f.write('\section{Terceira Execução}')
f.write('\n\t')
f.write('\\begin{center}')
f.write('\n\t')
f.write('\\begin{tabular}{ll}')
f.write('\\ \n\t\t')
f.write('execução = & ' + str(a[7][2]['exeNummber']))
f.write('\\\ \n\t\t')
f.write('Tempo até a execução (segundos) = & ' + str(a[7][2]['time']) )
f.write('\\\ \n\t\t')
f.write('ângulo de \emph{overshoot} = & '+ str(a[7][2]['osangle']*(180/sp.pi)))
f.write('\\\ \n\t\t')
f.write('\emph{overshoot path}= & '+ str(a[7][2]['ospath']) )
f.write('\\\ \n\t\t')
#f.write('\emph{reach} = & '+ str(a[7][2]['reach']*(180/sp.pi)) )
#f.write('\\\ \n\t')
f.write('\end{tabular}')
f.write('\n\t')
f.write('\end{center}')
f.close()
| asoliveira/NumShip | scripts/curvazigzag.py | Python | gpl-3.0 | 10,194 |
"""GraphLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
# Copyright: INRIA
import warnings
import operator
import sys
import time
import numpy as np
from scipy import linalg
from .empirical_covariance_ import (empirical_covariance, EmpiricalCovariance,
log_likelihood)
from ..exceptions import ConvergenceWarning
from ..utils.extmath import pinvh
from ..utils.validation import check_random_state, check_array
from ..utils import deprecated
from ..linear_model import lars_path
from ..linear_model import cd_fast
from ..model_selection import check_cv, cross_val_score
from ..externals.joblib import Parallel, delayed
import collections
# Helper functions to compute the objective and dual objective functions
# of the l1-penalized estimator
def _objective(mle, precision_, alpha):
"""Evaluation of the graph-lasso objective function
the objective function is made of a shifted scaled version of the
normalized log-likelihood (i.e. its empirical mean over the samples) and a
penalisation term to promote sparsity
"""
p = precision_.shape[0]
cost = - 2. * log_likelihood(mle, precision_) + p * np.log(2 * np.pi)
cost += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return cost
def _dual_gap(emp_cov, precision_, alpha):
"""Expression of the dual gap convergence criterion
The specific definition is given in Duchi "Projected Subgradient Methods
for Learning Sparse Gaussians".
"""
gap = np.sum(emp_cov * precision_)
gap -= precision_.shape[0]
gap += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return gap
def alpha_max(emp_cov):
"""Find the maximum alpha for which there are some non-zeros off-diagonal.
Parameters
----------
emp_cov : 2D array, (n_features, n_features)
The sample covariance matrix
Notes
-----
This results from the bound for the all the Lasso that are solved
in GraphLasso: each time, the row of cov corresponds to Xy. As the
bound for alpha is given by `max(abs(Xy))`, the result follows.
"""
A = np.copy(emp_cov)
A.flat[::A.shape[0] + 1] = 0
return np.max(np.abs(A))
# The g-lasso algorithm
def graph_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4,
enet_tol=1e-4, max_iter=100, verbose=False,
return_costs=False, eps=np.finfo(np.float64).eps,
return_n_iter=False):
"""l1-penalized covariance estimator
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
emp_cov : 2D ndarray, shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : positive float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : boolean, optional
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
The estimated covariance matrix.
precision : 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
n_iter : int
Number of iterations. Returned only if `return_n_iter` is set to True.
See Also
--------
GraphLasso, GraphLassoCV
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
"""
_, n_features = emp_cov.shape
if alpha == 0:
if return_costs:
precision_ = linalg.inv(emp_cov)
cost = - 2. * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
if return_n_iter:
return emp_cov, precision_, (cost, d_gap), 0
else:
return emp_cov, precision_, (cost, d_gap)
else:
if return_n_iter:
return emp_cov, linalg.inv(emp_cov), 0
else:
return emp_cov, linalg.inv(emp_cov)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
# As a trivial regularization (Tikhonov like), we scale down the
# off-diagonal coefficients of our starting point: This is needed, as
# in the cross-validation the cov_init can easily be
# ill-conditioned, and the CV loop blows. Beside, this takes
# conservative stand-point on the initial conditions, and it tends to
# make the convergence go faster.
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
# The different l1 regression solver have different numerical errors
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
# be robust to the max_iter=0 edge case, see:
# https://github.com/scikit-learn/scikit-learn/issues/4134
d_gap = np.inf
for i in range(max_iter):
for idx in range(n_features):
sub_covariance = np.ascontiguousarray(
covariance_[indices != idx].T[indices != idx])
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
# Use coordinate descent
coefs = -(precision_[indices != idx, idx]
/ (precision_[idx, idx] + 1000 * eps))
coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
coefs, alpha, 0, sub_covariance, row, row,
max_iter, enet_tol, check_random_state(None), False)
else:
# Use LARS
_, _, coefs = lars_path(
sub_covariance, row, Xy=row, Gram=sub_covariance,
alpha_min=alpha / (n_features - 1), copy_Gram=True,
method='lars', return_path=False)
# Update the precision matrix
precision_[idx, idx] = (
1. / (covariance_[idx, idx]
- np.dot(covariance_[indices != idx, idx], coefs)))
precision_[indices != idx, idx] = (- precision_[idx, idx]
* coefs)
precision_[idx, indices != idx] = (- precision_[idx, idx]
* coefs)
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if verbose:
print(
'[graph_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e'
% (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is '
'too ill-conditioned for this solver')
else:
warnings.warn('graph_lasso: did not converge after %i iteration:'
' dual gap: %.3e' % (max_iter, d_gap),
ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0]
+ '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
if return_n_iter:
return covariance_, precision_, costs, i + 1
else:
return covariance_, precision_, costs
else:
if return_n_iter:
return covariance_, precision_, i + 1
else:
return covariance_, precision_
class GraphLasso(EmpiricalCovariance):
"""Sparse inverse covariance estimation with an l1-penalized estimator.
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alpha : positive float, default 0.01
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
mode : {'cd', 'lars'}, default 'cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, default 1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, default 100
The maximum number of iterations.
verbose : boolean, default False
If verbose is True, the objective function and dual gap are
plotted at each iteration.
assume_centered : boolean, default False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
n_iter_ : int
Number of iterations run.
See Also
--------
graph_lasso, GraphLassoCV
"""
def __init__(self, alpha=.01, mode='cd', tol=1e-4, enet_tol=1e-4,
max_iter=100, verbose=False, assume_centered=False):
self.alpha = alpha
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
# Covariance does not make sense for a single feature
X = check_array(X, ensure_min_features=2, ensure_min_samples=2,
estimator=self)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=self.alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=self.verbose, return_n_iter=True)
return self
# Cross-validation with GraphLasso
def graph_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd',
tol=1e-4, enet_tol=1e-4, max_iter=100, verbose=False):
"""l1-penalized covariance estimator along a path of decreasing alphas
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
X : 2D ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate.
alphas : list of positive floats
The list of regularization parameters, decreasing order.
X_test : 2D array, shape (n_test_samples, n_features), optional
Optional test matrix to measure generalisation error.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : integer, optional
The higher the verbosity flag, the more information is printed
during the fitting.
Returns
-------
covariances_ : List of 2D ndarray, shape (n_features, n_features)
The estimated covariance matrices.
precisions_ : List of 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrices.
scores_ : List of float
The generalisation error (log-likelihood) on the test data.
Returned only if test data is passed.
"""
inner_verbose = max(0, verbose - 1)
emp_cov = empirical_covariance(X)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init
covariances_ = list()
precisions_ = list()
scores_ = list()
if X_test is not None:
test_emp_cov = empirical_covariance(X_test)
for alpha in alphas:
try:
# Capture the errors, and move on
covariance_, precision_ = graph_lasso(
emp_cov, alpha=alpha, cov_init=covariance_, mode=mode, tol=tol,
enet_tol=enet_tol, max_iter=max_iter, verbose=inner_verbose)
covariances_.append(covariance_)
precisions_.append(precision_)
if X_test is not None:
this_score = log_likelihood(test_emp_cov, precision_)
except FloatingPointError:
this_score = -np.inf
covariances_.append(np.nan)
precisions_.append(np.nan)
if X_test is not None:
if not np.isfinite(this_score):
this_score = -np.inf
scores_.append(this_score)
if verbose == 1:
sys.stderr.write('.')
elif verbose > 1:
if X_test is not None:
print('[graph_lasso_path] alpha: %.2e, score: %.2e'
% (alpha, this_score))
else:
print('[graph_lasso_path] alpha: %.2e' % alpha)
if X_test is not None:
return covariances_, precisions_, scores_
return covariances_, precisions_
class GraphLassoCV(GraphLasso):
"""Sparse inverse covariance w/ cross-validated choice of the l1 penalty
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alphas : integer, or list positive float, optional
If an integer is given, it fixes the number of points on the
grids of alpha to be used. If a list is given, it gives the
grid to be used. See the notes in the class docstring for
more details.
n_refinements : strictly positive integer
The number of times the grid is refined. Not used if explicit
values of alphas are passed.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
Maximum number of iterations.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where number of features is greater
than number of samples. Elsewhere prefer cd which is more numerically
stable.
n_jobs : int, optional
number of jobs to run in parallel (default 1).
verbose : boolean, optional
If verbose is True, the objective function and duality gap are
printed at each iteration.
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : numpy.ndarray, shape (n_features, n_features)
Estimated covariance matrix.
precision_ : numpy.ndarray, shape (n_features, n_features)
Estimated precision matrix (inverse covariance).
alpha_ : float
Penalization parameter selected.
cv_alphas_ : list of float
All penalization parameters explored.
grid_scores_ : 2D numpy.ndarray (n_alphas, n_folds)
Log-likelihood score on left-out data across folds.
n_iter_ : int
Number of iterations run for the optimal alpha.
See Also
--------
graph_lasso, GraphLasso
Notes
-----
The search for the optimal penalization parameter (alpha) is done on an
iteratively refined grid: first the cross-validated scores on a grid are
computed, then a new refined grid is centered around the maximum, and so
on.
One of the challenges which is faced here is that the solvers can
fail to converge to a well-conditioned estimate. The corresponding
values of alpha then come out as missing values, but the optimum may
be close to these missing values.
"""
def __init__(self, alphas=4, n_refinements=4, cv=None, tol=1e-4,
enet_tol=1e-4, max_iter=100, mode='cd', n_jobs=1,
verbose=False, assume_centered=False):
self.alphas = alphas
self.n_refinements = n_refinements
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.cv = cv
self.n_jobs = n_jobs
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
@property
@deprecated("Attribute grid_scores was deprecated in version 0.19 and "
"will be removed in 0.21. Use 'grid_scores_' instead")
def grid_scores(self):
return self.grid_scores_
def fit(self, X, y=None):
"""Fits the GraphLasso covariance model to X.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
"""
# Covariance does not make sense for a single feature
X = check_array(X, ensure_min_features=2, estimator=self)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
cv = check_cv(self.cv, y, classifier=False)
# List of (alpha, scores, covs)
path = list()
n_alphas = self.alphas
inner_verbose = max(0, self.verbose - 1)
if isinstance(n_alphas, collections.Sequence):
alphas = self.alphas
n_refinements = 1
else:
n_refinements = self.n_refinements
alpha_1 = alpha_max(emp_cov)
alpha_0 = 1e-2 * alpha_1
alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1),
n_alphas)[::-1]
t0 = time.time()
for i in range(n_refinements):
with warnings.catch_warnings():
# No need to see the convergence warnings on this grid:
# they will always be points that will not converge
# during the cross-validation
warnings.simplefilter('ignore', ConvergenceWarning)
# Compute the cross-validated loss on the current grid
# NOTE: Warm-restarting graph_lasso_path has been tried, and
# this did not allow to gain anything (same execution time with
# or without).
this_path = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose
)(delayed(graph_lasso_path)(X[train], alphas=alphas,
X_test=X[test], mode=self.mode,
tol=self.tol,
enet_tol=self.enet_tol,
max_iter=int(.1 * self.max_iter),
verbose=inner_verbose)
for train, test in cv.split(X, y))
# Little danse to transform the list in what we need
covs, _, scores = zip(*this_path)
covs = zip(*covs)
scores = zip(*scores)
path.extend(zip(alphas, scores, covs))
path = sorted(path, key=operator.itemgetter(0), reverse=True)
# Find the maximum (avoid using built in 'max' function to
# have a fully-reproducible selection of the smallest alpha
# in case of equality)
best_score = -np.inf
last_finite_idx = 0
for index, (alpha, scores, _) in enumerate(path):
this_score = np.mean(scores)
if this_score >= .1 / np.finfo(np.float64).eps:
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if this_score >= best_score:
best_score = this_score
best_index = index
# Refine the grid
if best_index == 0:
# We do not need to go back: we have chosen
# the highest value of alpha for which there are
# non-zero coefficients
alpha_1 = path[0][0]
alpha_0 = path[1][0]
elif (best_index == last_finite_idx
and not best_index == len(path) - 1):
# We have non-converged models on the upper bound of the
# grid, we need to refine the grid there
alpha_1 = path[best_index][0]
alpha_0 = path[best_index + 1][0]
elif best_index == len(path) - 1:
alpha_1 = path[best_index][0]
alpha_0 = 0.01 * path[best_index][0]
else:
alpha_1 = path[best_index - 1][0]
alpha_0 = path[best_index + 1][0]
if not isinstance(n_alphas, collections.Sequence):
alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0),
n_alphas + 2)
alphas = alphas[1:-1]
if self.verbose and n_refinements > 1:
print('[GraphLassoCV] Done refinement % 2i out of %i: % 3is'
% (i + 1, n_refinements, time.time() - t0))
path = list(zip(*path))
grid_scores = list(path[1])
alphas = list(path[0])
# Finally, compute the score with alpha = 0
alphas.append(0)
grid_scores.append(cross_val_score(EmpiricalCovariance(), X,
cv=cv, n_jobs=self.n_jobs,
verbose=inner_verbose))
self.grid_scores_ = np.array(grid_scores)
best_alpha = alphas[best_index]
self.alpha_ = best_alpha
self.cv_alphas_ = alphas
# Finally fit the model with the selected alpha
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=inner_verbose, return_n_iter=True)
return self
| pprett/scikit-learn | sklearn/covariance/graph_lasso_.py | Python | bsd-3-clause | 26,692 |
from bespin.errors import BespinError
from contextlib import contextmanager
import json
import sys
class NotSpecified(object):
"""Tell the difference between empty and None"""
class AssertionsAssertionsMixin:
def assertSortedEqual(self, one, two):
"""Assert that the sorted of the two equal"""
self.assertEqual(sorted(one), sorted(two))
def assertJsonDictEqual(self, one, two):
"""Assert the two dictionaries are the same, print out as json if not"""
try:
self.assertEqual(one, two)
except AssertionError:
print("Got =============>")
print(json.dumps(one, indent=2, sort_keys=True))
print("Expected --------------->")
print(json.dumps(two, indent=2, sort_keys=True))
raise
def assertItemsEqual(self, a, b):
"""wraps assertCountEqual, assertItemsEqual or poorly emulates it"""
if sys.version_info[0] == 3 and sys.version_info[1] >= 2:
return self.assertCountEqual(a, b)
elif sys.version_info[0] == 2 and sys.version_info[1] >= 7:
return self.assertItemsEqual(a, b)
else:
return self.assertEqual(sorted(a), sorted(b))
@contextmanager
def fuzzyAssertRaisesError(self, expected_kls, expected_msg_regex=NotSpecified, **values):
"""
Assert that something raises a particular type of error.
The error raised must be a subclass of the expected_kls
Have a message that matches the specified regex.
And have atleast the values specified in it's kwargs.
"""
try:
yield
except BespinError as error:
try:
assert issubclass(error.__class__, expected_kls)
if expected_msg_regex is not NotSpecified:
self.assertRegexpMatches(expected_msg_regex, error.message)
errors = values.get("_errors")
if "_errors" in values:
del values["_errors"]
self.assertDictContainsSubset(values, error.kwargs)
if errors:
self.assertEqual(sorted(error.errors), sorted(errors))
except AssertionError:
print("Got error: {0}".format(error))
print("Expected: {0}: {1}: {2}".format(expected_kls, expected_msg_regex, values))
raise
else:
assert False, "Expected an exception to be raised\n\texpected_kls: {0}\n\texpected_msg_regex: {1}\n\thave_atleast: {2}".format(
expected_kls, expected_msg_regex, values
)
| realestate-com-au/bespin | tests/helpers/mixins/assertions.py | Python | mit | 2,632 |
import sys
from PyQt4 import QtGui
def main ():
app = QtGui.QApplication(sys.argv)
window =QtGui.QWidget()
window.setGeometry(400,250,500,300)
window.setWindowTitle("pode psa ")
window.show()
sys.exit(app.exec_())
if __name__ >='__main__':
main() | ronas/PythonGNF | Bruno/PrimeiraJanela.py | Python | gpl-3.0 | 294 |
'''
Created by auto_sdk on 2015.06.23
'''
from aliyun.api.base import RestApi
class Ram20140214GetUserRequest(RestApi):
def __init__(self,domain='ram.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.AccountSpace = None
self.UserName = None
def getapiname(self):
return 'ram.aliyuncs.com.GetUser.2014-02-14'
| francisar/rds_manager | aliyun/api/rest/Ram20140214GetUserRequest.py | Python | mit | 334 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import tensorflow as tf
from niftynet.layer.downsample import DownSampleLayer
from tests.niftynet_testcase import NiftyNetTestCase
class DownSampleTest(NiftyNetTestCase):
def get_3d_input(self):
input_shape = (2, 16, 16, 16, 8)
x = tf.ones(input_shape)
return x
def get_2d_input(self):
input_shape = (2, 16, 16, 8)
x = tf.ones(input_shape)
return x
def _test_nd_downsample_output_shape(self,
rank,
param_dict,
output_shape):
if rank == 2:
input_data = self.get_2d_input()
elif rank == 3:
input_data = self.get_3d_input()
downsample_layer = DownSampleLayer(**param_dict)
output_data = downsample_layer(input_data)
print(downsample_layer)
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
out = sess.run(output_data)
self.assertAllClose(output_shape, out.shape)
def test_3d_max_shape(self):
input_param = {'func': 'MAX',
'kernel_size': 3,
'stride': 3}
self._test_nd_downsample_output_shape(rank=3,
param_dict=input_param,
output_shape=(2, 6, 6, 6, 8))
def test_3d_avg_shape(self):
input_param = {'func': 'AVG',
'kernel_size': [3, 3, 2],
'stride': [3, 2, 1]}
self._test_nd_downsample_output_shape(rank=3,
param_dict=input_param,
output_shape=(2, 6, 8, 16, 8))
def test_3d_const_shape(self):
input_param = {'func': 'CONSTANT',
'kernel_size': [1, 3, 2],
'stride': [3, 2, 2]}
self._test_nd_downsample_output_shape(rank=3,
param_dict=input_param,
output_shape=(2, 6, 8, 8, 8))
def test_2d_max_shape(self):
input_param = {'func': 'CONSTANT',
'kernel_size': [1, 3],
'stride': 3}
self._test_nd_downsample_output_shape(rank=2,
param_dict=input_param,
output_shape=(2, 6, 6, 8))
def test_2d_avg_shape(self):
input_param = {'func': 'AVG',
'kernel_size': [2, 3],
'stride': 2}
self._test_nd_downsample_output_shape(rank=2,
param_dict=input_param,
output_shape=(2, 8, 8, 8))
def test_2d_const_shape(self):
input_param = {'func': 'CONSTANT',
'kernel_size': [2, 3],
'stride': [2, 3]}
self._test_nd_downsample_output_shape(rank=2,
param_dict=input_param,
output_shape=(2, 8, 6, 8))
if __name__ == "__main__":
tf.test.main()
| NifTK/NiftyNet | tests/downsample_test.py | Python | apache-2.0 | 3,371 |
# vim:fileencoding=utf-8
__all__ = [ 'log_str' ]
| desci/tg-cryptoforexbot | plugins/log/__init__.py | Python | gpl-3.0 | 50 |
#coding:utf-8
#################################
#Copyright(c) 2014 dtysky
#################################
import G2R,os
class SoundTag(G2R.TagSource):
def Get(self,Flag,US):
tags={'m':{},'k':{}}
for m in US.Args[Flag]:
tags['m'][m]='sound_'+os.path.splitext(US.Args[Flag][m])[0]
tags['k'][m]={'loop':'loop','normal':'normal'}
return tags | dtysky/Gal2Renpy | Gal2Renpy/TagSource/SoundTag.py | Python | mit | 353 |
# This is the version of this source code.
manual_verstr = "1.5"
auto_build_num = "166"
verstr = manual_verstr + "." + auto_build_num
try:
from pyutil.version_class import Version as pyutil_Version
__version__ = pyutil_Version(verstr)
except (ImportError, ValueError):
# Maybe there is no pyutil installed.
from distutils.version import LooseVersion as distutils_Version
__version__ = distutils_Version(verstr)
| zyegfryed/python-oauth2 | oauth2/_version.py | Python | mit | 438 |
# Nemubot is a smart and modulable IM bot.
# Copyright (C) 2012-2016 Mercier Pierre-Olivier
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import queue
import traceback
from nemubot.bot import sync_act
class AbstractServer:
"""An abstract server: handle communication with an IM server"""
def __init__(self, name, fdClass, **kwargs):
"""Initialize an abstract server
Keyword argument:
name -- Identifier of the socket, for convinience
fdClass -- Class to instantiate as support file
"""
self._name = name
self._fd = fdClass(**kwargs)
self._logger = logging.getLogger("nemubot.server." + str(self.name))
self._readbuffer = b''
self._sending_queue = queue.Queue()
@property
def name(self):
if self._name is not None:
return self._name
else:
return self._fd.fileno()
# Open/close
def connect(self, *args, **kwargs):
"""Register the server in _poll"""
self._logger.info("Opening connection")
self._fd.connect(*args, **kwargs)
self._on_connect()
def _on_connect(self):
sync_act("sckt", "register", self._fd.fileno())
def close(self, *args, **kwargs):
"""Unregister the server from _poll"""
self._logger.info("Closing connection")
if self._fd.fileno() > 0:
sync_act("sckt", "unregister", self._fd.fileno())
self._fd.close(*args, **kwargs)
# Writes
def write(self, message):
"""Asynchronymously send a message to the server using send_callback
Argument:
message -- message to send
"""
self._sending_queue.put(self.format(message))
self._logger.debug("Message '%s' appended to write queue coming from %s:%d in %s", message, *traceback.extract_stack(limit=3)[0][:3])
sync_act("sckt", "write", self._fd.fileno())
def async_write(self):
"""Internal function used when the file descriptor is writable"""
try:
sync_act("sckt", "unwrite", self._fd.fileno())
while not self._sending_queue.empty():
self._write(self._sending_queue.get_nowait())
self._sending_queue.task_done()
except queue.Empty:
pass
def send_response(self, response):
"""Send a formated Message class
Argument:
response -- message to send
"""
if response is None:
return
elif isinstance(response, list):
for r in response:
self.send_response(r)
else:
vprnt = self.printer()
response.accept(vprnt)
self.write(vprnt.pp)
# Read
def async_read(self):
"""Internal function used when the file descriptor is readable
Returns:
A list of fully received messages
"""
ret, self._readbuffer = self.lex(self._readbuffer + self.read())
for r in ret:
yield r
def lex(self, buf):
"""Assume lexing in default case is per line
Argument:
buf -- buffer to lex
"""
msgs = buf.split(b'\r\n')
partial = msgs.pop()
return msgs, partial
def parse(self, msg):
raise NotImplemented
# Exceptions
def exception(self, flags):
"""Exception occurs on fd"""
self._fd.close()
# Proxy
def fileno(self):
return self._fd.fileno()
| nbr23/nemubot | nemubot/server/abstract.py | Python | agpl-3.0 | 4,141 |
import os
import re
import json
import shutil
import tarfile
import tempfile
from climb.config import config
from climb.commands import Commands, command, completers
from climb.exceptions import CLIException
from climb.paths import format_path, split_path, ROOT_PATH
from grafcli.documents import Document, Dashboard, Row, Panel
from grafcli.exceptions import CommandCancelled
from grafcli.resources import Resources
from grafcli.storage.system import to_file_format, from_file_format
from grafcli.utils import json_pretty
class GrafCommands(Commands):
def __init__(self, cli):
super().__init__(cli)
self._resources = Resources()
@command
@completers('path')
def ls(self, path=None):
path = format_path(self._cli.current_path, path)
result = self._resources.list(path)
return "\n".join(sorted(result))
@command
@completers('path')
def cd(self, path=None):
path = format_path(self._cli.current_path, path, default=ROOT_PATH)
# No exception means correct path
self._resources.list(path)
self._cli.set_current_path(path)
@command
@completers('path')
def cat(self, path):
path = format_path(self._cli.current_path, path)
document = self._resources.get(path)
return json_pretty(document.source, colorize=config['grafcli'].getboolean('colorize'))
@command
@completers('path')
def cp(self, source, destination, match_slug=False):
if len(source) < 2:
raise CLIException("No destination provided")
destination = source.pop(-1)
destination_path = format_path(self._cli.current_path, destination)
for path in source:
source_path = format_path(self._cli.current_path, path)
document = self._resources.get(source_path)
if match_slug:
destination_path = self._match_slug(document, destination_path)
self._resources.save(destination_path, document)
self._cli.log("cp: {} -> {}", source_path, destination_path)
@command
@completers('path')
def mv(self, source, destination, match_slug=False):
if len(source) < 2:
raise CLIException("No destination provided")
destination = source.pop(-1)
destination_path = format_path(self._cli.current_path, destination)
for path in source:
source_path = format_path(self._cli.current_path, path)
document = self._resources.get(source_path)
if match_slug:
destination_path = self._match_slug(document, destination_path)
self._resources.save(destination_path, document)
self._resources.remove(source_path)
self._cli.log("mv: {} -> {}", source_path, destination_path)
@command
@completers('path')
def rm(self, path):
path = format_path(self._cli.current_path, path)
self._resources.remove(path)
self._cli.log("rm: {}", path)
@command
@completers('path')
def template(self, path):
path = format_path(self._cli.current_path, path)
document = self._resources.get(path)
if isinstance(document, Dashboard):
template = 'dashboards'
elif isinstance(document, Row):
template = 'rows'
elif isinstance(document, Panel):
template = 'panels'
else:
raise CLIException("Unknown document type: {}".format(
document.__class__.__name__))
template_path = "/templates/{}".format(template)
self._resources.save(template_path, document)
self._cli.log("template: {} -> {}", path, template_path)
@command
@completers('path')
def editor(self, path):
path = format_path(self._cli.current_path, path)
document = self._resources.get(path)
tmp_file = tempfile.mktemp(suffix=".json")
with open(tmp_file, 'w') as file:
file.write(json_pretty(document.source))
cmd = "{} {}".format(config['grafcli']['editor'], tmp_file)
exit_status = os.system(cmd)
if not exit_status:
self._cli.log("Updating: {}".format(path))
self.file_import(tmp_file, path)
os.unlink(tmp_file)
@command
@completers('path')
def merge(self, paths):
if len(paths) < 2:
raise CLIException("Provide at least two paths")
tmp_files = []
for path in paths:
formatted_path = format_path(self._cli.current_path, path)
document = self._resources.get(formatted_path)
tmp_file = tempfile.mktemp(suffix=".json")
tmp_files.append((formatted_path, tmp_file))
with open(tmp_file, 'w') as file:
file.write(json_pretty(document.source))
cmd = "{} {}".format(config['grafcli'].get('mergetool', 'vimdiff'), ' '.join([v[1] for v in tmp_files]))
exit_status = os.system(cmd)
for path, tmp_file in tmp_files:
if not exit_status:
self._cli.log("Updating: {}".format(path))
self.file_import(tmp_file, path)
os.unlink(tmp_file)
@command
@completers('path')
def pos(self, path, position):
if not path:
raise CLIException("No path provided")
if not position:
raise CLIException("No position provided")
path = format_path(self._cli.current_path, path)
parts = split_path(path)
parent_path = '/'.join(parts[:-1])
child = parts[-1]
parent = self._resources.get(parent_path)
parent.move_child(child, position)
self._resources.save(parent_path, parent)
@command
@completers('path', 'system_path')
def backup(self, path, system_path):
if not path:
raise CLIException("No path provided")
if not system_path:
raise CLIException("No system path provided")
path = format_path(self._cli.current_path, path)
system_path = os.path.expanduser(system_path)
documents = self._resources.list(path)
if not documents:
raise CLIException("Nothing to backup")
tmp_dir = tempfile.mkdtemp()
archive = tarfile.open(name=system_path, mode="w:gz")
for doc_name in documents:
file_name = to_file_format(doc_name)
file_path = os.path.join(tmp_dir, file_name)
doc_path = os.path.join(path, doc_name)
self.file_export(doc_path, file_path)
archive.add(file_path, arcname=file_name)
archive.close()
shutil.rmtree(tmp_dir)
@command
@completers('system_path', 'path')
def restore(self, system_path, path):
system_path = os.path.expanduser(system_path)
path = format_path(self._cli.current_path, path)
tmp_dir = tempfile.mkdtemp()
with tarfile.open(name=system_path, mode="r:gz") as archive:
archive.extractall(path=tmp_dir)
for name in os.listdir(tmp_dir):
try:
file_path = os.path.join(tmp_dir, name)
doc_path = os.path.join(path, from_file_format(name))
self.file_import(file_path, doc_path)
except CommandCancelled:
pass
shutil.rmtree(tmp_dir)
@command
@completers('path', 'system_path')
def file_export(self, path, system_path):
path = format_path(self._cli.current_path, path)
system_path = os.path.expanduser(system_path)
document = self._resources.get(path)
with open(system_path, 'w') as file:
file.write(json_pretty(document.source))
self._cli.log("export: {} -> {}", path, system_path)
@command
@completers('system_path', 'path')
def file_import(self, system_path, path, match_slug=False):
system_path = os.path.expanduser(system_path)
path = format_path(self._cli.current_path, path)
with open(system_path, 'r') as file:
content = file.read()
document = Document.from_source(json.loads(content))
if match_slug:
path = self._match_slug(document, path)
self._resources.save(path, document)
self._cli.log("import: {} -> {}", system_path, path)
def _match_slug(self, document, destination):
pattern = re.compile(r'^\d+-{}$'.format(document.slug))
children = self._resources.list(destination)
matches = [child for child in children
if pattern.search(child)]
if not matches:
return destination
if len(matches) > 2:
raise CLIException("Too many matching slugs, be more specific")
return "{}/{}".format(destination, matches[0])
| m110/grafcli | grafcli/commands.py | Python | mit | 8,828 |
"""Config flow to configure Motion Blinds using their WLAN API."""
from socket import gaierror
from motionblinds import AsyncMotionMulticast, MotionDiscovery
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import network
from homeassistant.const import CONF_API_KEY, CONF_HOST
from homeassistant.core import callback
from .const import (
CONF_INTERFACE,
CONF_WAIT_FOR_PUSH,
DEFAULT_GATEWAY_NAME,
DEFAULT_INTERFACE,
DEFAULT_WAIT_FOR_PUSH,
DOMAIN,
)
from .gateway import ConnectMotionGateway
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(CONF_HOST): str,
}
)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Options for the component."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Init object."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
errors = {}
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
settings_schema = vol.Schema(
{
vol.Optional(
CONF_WAIT_FOR_PUSH,
default=self.config_entry.options.get(
CONF_WAIT_FOR_PUSH, DEFAULT_WAIT_FOR_PUSH
),
): bool,
}
)
return self.async_show_form(
step_id="init", data_schema=settings_schema, errors=errors
)
class MotionBlindsFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a Motion Blinds config flow."""
VERSION = 1
def __init__(self):
"""Initialize the Motion Blinds flow."""
self._host = None
self._ips = []
self._config_settings = None
@staticmethod
@callback
def async_get_options_flow(config_entry) -> OptionsFlowHandler:
"""Get the options flow."""
return OptionsFlowHandler(config_entry)
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
errors = {}
if user_input is not None:
self._host = user_input.get(CONF_HOST)
if self._host is not None:
return await self.async_step_connect()
# Use MotionGateway discovery
discover_class = MotionDiscovery()
gateways = await self.hass.async_add_executor_job(discover_class.discover)
self._ips = list(gateways)
if len(self._ips) == 1:
self._host = self._ips[0]
return await self.async_step_connect()
if len(self._ips) > 1:
return await self.async_step_select()
errors["base"] = "discovery_error"
return self.async_show_form(
step_id="user", data_schema=CONFIG_SCHEMA, errors=errors
)
async def async_step_select(self, user_input=None):
"""Handle multiple motion gateways found."""
if user_input is not None:
self._host = user_input["select_ip"]
return await self.async_step_connect()
select_schema = vol.Schema({vol.Required("select_ip"): vol.In(self._ips)})
return self.async_show_form(step_id="select", data_schema=select_schema)
async def async_step_connect(self, user_input=None):
"""Connect to the Motion Gateway."""
errors = {}
if user_input is not None:
key = user_input[CONF_API_KEY]
multicast_interface = user_input[CONF_INTERFACE]
# check socket interface
if multicast_interface != DEFAULT_INTERFACE:
motion_multicast = AsyncMotionMulticast(interface=multicast_interface)
try:
await motion_multicast.Start_listen()
motion_multicast.Stop_listen()
except gaierror:
errors[CONF_INTERFACE] = "invalid_interface"
return self.async_show_form(
step_id="connect",
data_schema=self._config_settings,
errors=errors,
)
connect_gateway_class = ConnectMotionGateway(self.hass, multicast=None)
if not await connect_gateway_class.async_connect_gateway(self._host, key):
return self.async_abort(reason="connection_error")
motion_gateway = connect_gateway_class.gateway_device
mac_address = motion_gateway.mac
await self.async_set_unique_id(mac_address)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=DEFAULT_GATEWAY_NAME,
data={
CONF_HOST: self._host,
CONF_API_KEY: key,
CONF_INTERFACE: multicast_interface,
},
)
(interfaces, default_interface) = await self.async_get_interfaces()
self._config_settings = vol.Schema(
{
vol.Required(CONF_API_KEY): vol.All(str, vol.Length(min=16, max=16)),
vol.Optional(CONF_INTERFACE, default=default_interface): vol.In(
interfaces
),
}
)
return self.async_show_form(
step_id="connect", data_schema=self._config_settings, errors=errors
)
async def async_get_interfaces(self):
"""Get list of interface to use."""
interfaces = [DEFAULT_INTERFACE]
enabled_interfaces = []
default_interface = DEFAULT_INTERFACE
adapters = await network.async_get_adapters(self.hass)
for adapter in adapters:
if ipv4s := adapter["ipv4"]:
ip4 = ipv4s[0]["address"]
interfaces.append(ip4)
if adapter["enabled"]:
enabled_interfaces.append(ip4)
if adapter["default"]:
default_interface = ip4
if len(enabled_interfaces) == 1:
default_interface = enabled_interfaces[0]
return (interfaces, default_interface)
| jawilson/home-assistant | homeassistant/components/motion_blinds/config_flow.py | Python | apache-2.0 | 6,224 |
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from .posterior import Posterior
from ...util.linalg import mdot, jitchol, backsub_both_sides, tdot, dtrtrs, dtrtri, dpotri, dpotrs, symmetrify
from ...util import diag
from GPy.core.parameterization.variational import VariationalPosterior
import numpy as np
from . import LatentFunctionInference
log_2_pi = np.log(2*np.pi)
import logging, itertools
logger = logging.getLogger('vardtc')
class VarDTC(LatentFunctionInference):
"""
An object for inference when the likelihood is Gaussian, but we want to do sparse inference.
The function self.inference returns a Posterior object, which summarizes
the posterior.
For efficiency, we sometimes work with the cholesky of Y*Y.T. To save repeatedly recomputing this, we cache it.
"""
const_jitter = 1e-8
def __init__(self, limit=1):
from paramz.caching import Cacher
self.limit = limit
self.get_trYYT = Cacher(self._get_trYYT, limit)
self.get_YYTfactor = Cacher(self._get_YYTfactor, limit)
def set_limit(self, limit):
self.get_trYYT.limit = limit
self.get_YYTfactor.limit = limit
def _get_trYYT(self, Y):
return np.einsum("ij,ij->", Y, Y)
# faster than, but same as:
# return np.sum(np.square(Y))
def __getstate__(self):
# has to be overridden, as Cacher objects cannot be pickled.
return self.limit
def __setstate__(self, state):
# has to be overridden, as Cacher objects cannot be pickled.
self.limit = state
from paramz.caching import Cacher
self.get_trYYT = Cacher(self._get_trYYT, self.limit)
self.get_YYTfactor = Cacher(self._get_YYTfactor, self.limit)
def _get_YYTfactor(self, Y):
"""
find a matrix L which satisfies LLT = YYT.
Note that L may have fewer columns than Y.
"""
N, D = Y.shape
if (N>=D):
return Y.view(np.ndarray)
else:
return jitchol(tdot(Y))
def get_VVTfactor(self, Y, prec):
return Y * prec # TODO cache this, and make it effective
def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None, mean_function=None, precision=None, Lm=None, dL_dKmm=None, psi0=None, psi1=None, psi2=None, Z_tilde=None):
num_data, output_dim = Y.shape
num_inducing = Z.shape[0]
uncertain_inputs = isinstance(X, VariationalPosterior)
if mean_function is not None:
mean = mean_function.f(X)
else:
mean = 0
if precision is None:
#assume Gaussian likelihood
precision = 1./np.fmax(likelihood.gaussian_variance(Y_metadata), self.const_jitter)
if precision.ndim == 1:
precision = precision[:, None]
het_noise = precision.size > 1
if (het_noise or uncertain_inputs) and mean_function is not None:
raise ValueError('Mean function not implemented with uncertain inputs or heteroscedasticity')
VVT_factor = precision*(Y-mean)
trYYT = self.get_trYYT(Y-mean)
# kernel computations, using BGPLVM notation
if Lm is None:
Kmm = kern.K(Z).copy()
diag.add(Kmm, self.const_jitter)
Lm = jitchol(Kmm)
else:
Kmm = tdot(Lm)
symmetrify(Kmm)
# The rather complex computations of A, and the psi stats
if uncertain_inputs:
if psi0 is None:
psi0 = kern.psi0(Z, X)
if psi1 is None:
psi1 = kern.psi1(Z, X)
if het_noise:
if psi2 is None:
psi2_beta = (kern.psi2n(Z, X) * precision[:, :, None]).sum(0)
else:
psi2_beta = (psi2 * precision[:, :, None]).sum(0)
else:
if psi2 is None:
psi2_beta = kern.psi2(Z,X) * precision
elif psi2.ndim == 3:
psi2_beta = psi2.sum(0) * precision
else:
psi2_beta = psi2 * precision
LmInv = dtrtri(Lm)
A = LmInv.dot(psi2_beta.dot(LmInv.T))
else:
if psi0 is None:
psi0 = kern.Kdiag(X)
if psi1 is None:
psi1 = kern.K(X, Z)
if het_noise:
tmp = psi1 * (np.sqrt(precision))
else:
tmp = psi1 * (np.sqrt(precision))
tmp, _ = dtrtrs(Lm, tmp.T, lower=1)
A = tdot(tmp) #print A.sum()
# factor B
B = np.eye(num_inducing) + A
LB = jitchol(B)
# back substutue C into psi1Vf
tmp, _ = dtrtrs(Lm, psi1.T, lower=1, trans=0)
_LBi_Lmi_psi1, _ = dtrtrs(LB, tmp, lower=1, trans=0)
_LBi_Lmi_psi1Vf = np.dot(_LBi_Lmi_psi1, VVT_factor)
tmp, _ = dtrtrs(LB, _LBi_Lmi_psi1Vf, lower=1, trans=1)
Cpsi1Vf, _ = dtrtrs(Lm, tmp, lower=1, trans=1)
# data fit and derivative of L w.r.t. Kmm
dL_dm = -np.dot((_LBi_Lmi_psi1.T.dot(_LBi_Lmi_psi1))
- np.eye(Y.shape[0]), VVT_factor)
delit = tdot(_LBi_Lmi_psi1Vf)
data_fit = np.trace(delit)
DBi_plus_BiPBi = backsub_both_sides(LB, output_dim * np.eye(num_inducing) + delit)
if dL_dKmm is None:
delit = -0.5 * DBi_plus_BiPBi
delit += -0.5 * B * output_dim
delit += output_dim * np.eye(num_inducing)
# Compute dL_dKmm
dL_dKmm = backsub_both_sides(Lm, delit)
# derivatives of L w.r.t. psi
dL_dpsi0, dL_dpsi1, dL_dpsi2 = _compute_dL_dpsi(num_inducing, num_data, output_dim, precision, Lm,
VVT_factor, Cpsi1Vf, DBi_plus_BiPBi,
psi1, het_noise, uncertain_inputs)
# log marginal likelihood
log_marginal = _compute_log_marginal_likelihood(likelihood, num_data, output_dim, precision, het_noise,
psi0, A, LB, trYYT, data_fit, Y)
if Z_tilde is not None:
# This is a correction term for the log marginal likelihood
# In EP this is log Z_tilde, which is the difference between the
# Gaussian marginal and Z_EP
log_marginal += Z_tilde
#noise derivatives
dL_dR = _compute_dL_dR(likelihood,
het_noise, uncertain_inputs, LB,
_LBi_Lmi_psi1Vf, DBi_plus_BiPBi, Lm, A,
psi0, psi1, precision,
data_fit, num_data, output_dim, trYYT, Y, VVT_factor)
dL_dthetaL = likelihood.exact_inference_gradients(dL_dR,Y_metadata)
#put the gradients in the right places
if uncertain_inputs:
grad_dict = {'dL_dKmm': dL_dKmm,
'dL_dpsi0':dL_dpsi0,
'dL_dpsi1':dL_dpsi1,
'dL_dpsi2':dL_dpsi2,
'dL_dthetaL':dL_dthetaL}
else:
grad_dict = {'dL_dKmm': dL_dKmm,
'dL_dKdiag':dL_dpsi0,
'dL_dKnm':dL_dpsi1,
'dL_dthetaL':dL_dthetaL,
'dL_dm':dL_dm}
#get sufficient things for posterior prediction
#TODO: do we really want to do this in the loop?
if VVT_factor.shape[1] == Y.shape[1]:
woodbury_vector = Cpsi1Vf # == Cpsi1V
else:
print('foobar')
import ipdb; ipdb.set_trace()
psi1V = np.dot(Y.T*precision, psi1).T
tmp, _ = dtrtrs(Lm, psi1V, lower=1, trans=0)
tmp, _ = dpotrs(LB, tmp, lower=1)
woodbury_vector, _ = dtrtrs(Lm, tmp, lower=1, trans=1)
#Bi, _ = dpotri(LB, lower=1)
#symmetrify(Bi)
Bi = -dpotri(LB, lower=1)[0]
diag.add(Bi, 1)
woodbury_inv = backsub_both_sides(Lm, Bi)
#construct a posterior object
post = Posterior(woodbury_inv=woodbury_inv, woodbury_vector=woodbury_vector, K=Kmm, mean=None, cov=None, K_chol=Lm)
return post, log_marginal, grad_dict
def _compute_dL_dpsi(num_inducing, num_data, output_dim, beta, Lm, VVT_factor, Cpsi1Vf, DBi_plus_BiPBi, psi1, het_noise, uncertain_inputs):
dL_dpsi0 = -0.5 * output_dim * (beta* np.ones([num_data, 1])).flatten()
dL_dpsi1 = np.dot(VVT_factor, Cpsi1Vf.T)
dL_dpsi2_beta = 0.5 * backsub_both_sides(Lm, output_dim * np.eye(num_inducing) - DBi_plus_BiPBi)
if het_noise:
if uncertain_inputs:
dL_dpsi2 = beta[:, None] * dL_dpsi2_beta[None, :, :]
else:
dL_dpsi1 += 2.*np.dot(dL_dpsi2_beta, (psi1 * beta).T).T
dL_dpsi2 = None
else:
dL_dpsi2 = beta * dL_dpsi2_beta
if not uncertain_inputs:
# subsume back into psi1 (==Kmn)
dL_dpsi1 += 2.*np.dot(psi1, dL_dpsi2)
dL_dpsi2 = None
return dL_dpsi0, dL_dpsi1, dL_dpsi2
def _compute_dL_dR(likelihood, het_noise, uncertain_inputs, LB, _LBi_Lmi_psi1Vf, DBi_plus_BiPBi, Lm, A, psi0, psi1, beta, data_fit, num_data, output_dim, trYYT, Y, VVT_factr=None):
# the partial derivative vector for the likelihood
if likelihood.size == 0:
# save computation here.
dL_dR = None
elif het_noise:
if uncertain_inputs:
raise NotImplementedError("heteroscedatic derivates with uncertain inputs not implemented")
else:
#from ...util.linalg import chol_inv
#LBi = chol_inv(LB)
LBi, _ = dtrtrs(LB,np.eye(LB.shape[0]))
Lmi_psi1, nil = dtrtrs(Lm, psi1.T, lower=1, trans=0)
_LBi_Lmi_psi1, _ = dtrtrs(LB, Lmi_psi1, lower=1, trans=0)
dL_dR = -0.5 * beta + 0.5 * VVT_factr**2
dL_dR += 0.5 * output_dim * (psi0 - np.sum(Lmi_psi1**2,0))[:,None] * beta**2
dL_dR += 0.5*np.sum(mdot(LBi.T,LBi,Lmi_psi1)*Lmi_psi1,0)[:,None]*beta**2
dL_dR += -np.dot(_LBi_Lmi_psi1Vf.T,_LBi_Lmi_psi1).T * Y * beta**2
dL_dR += 0.5*np.dot(_LBi_Lmi_psi1Vf.T,_LBi_Lmi_psi1).T**2 * beta**2
else:
# likelihood is not heteroscedatic
dL_dR = -0.5 * num_data * output_dim * beta + 0.5 * trYYT * beta ** 2
dL_dR += 0.5 * output_dim * (psi0.sum() * beta ** 2 - np.trace(A) * beta)
dL_dR += beta * (0.5 * np.sum(A * DBi_plus_BiPBi) - data_fit)
return dL_dR
def _compute_log_marginal_likelihood(likelihood, num_data, output_dim, beta, het_noise, psi0, A, LB, trYYT, data_fit, Y):
#compute log marginal likelihood
if het_noise:
lik_1 = -0.5 * num_data * output_dim * np.log(2. * np.pi) + 0.5 * output_dim * np.sum(np.log(beta)) - 0.5 * np.sum(beta.ravel() * np.square(Y).sum(axis=-1))
lik_2 = -0.5 * output_dim * (np.sum(beta.flatten() * psi0) - np.trace(A))
else:
lik_1 = -0.5 * num_data * output_dim * (np.log(2. * np.pi) - np.log(beta)) - 0.5 * beta * trYYT
lik_2 = -0.5 * output_dim * (np.sum(beta * psi0) - np.trace(A))
lik_3 = -output_dim * (np.sum(np.log(np.diag(LB))))
lik_4 = 0.5 * data_fit
log_marginal = lik_1 + lik_2 + lik_3 + lik_4
return log_marginal
| befelix/GPy | GPy/inference/latent_function_inference/var_dtc.py | Python | bsd-3-clause | 11,145 |
class IdeCommandDelegate:
def override_command(self, path):
pass
def before_run(self, config, docker_config):
pass
| bhdouglass/clickable | clickable/commands/idedelegates/idedelegate.py | Python | gpl-3.0 | 142 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.compute.providers import Provider
from libcloud.compute.base import Node, NodeImage, NodeSize
from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver
class KTUCloudNodeDriver(CloudStackNodeDriver):
"Driver for KTUCloud Compute platform."
EMPTY_DISKOFFERINGID = '0'
type = Provider.KTUCLOUD
name = 'KTUCloud'
website = 'https://ucloudbiz.olleh.com/'
def list_images(self, location=None):
args = {
'templatefilter': 'executable'
}
if location is not None:
args['zoneid'] = location.id
imgs = self._sync_request(command='listAvailableProductTypes',
method='GET')
images = []
for img in imgs['producttypes']:
images.append(
NodeImage(
img['serviceofferingid'],
img['serviceofferingdesc'],
self,
{'hypervisor': '',
'format': '',
'os': img['templatedesc'],
'templateid': img['templateid'],
'zoneid': img['zoneid']}
)
)
return images
def list_sizes(self, location=None):
szs = self._sync_request('listAvailableProductTypes')
sizes = []
for sz in szs['producttypes']:
diskofferingid = sz.get('diskofferingid',
self.EMPTY_DISKOFFERINGID)
sizes.append(NodeSize(
diskofferingid,
sz['diskofferingdesc'],
0, 0, 0, 0, self)
)
return sizes
def create_node(self, name, size, image, location=None, **kwargs):
params = {'displayname': name,
'serviceofferingid': image.id,
'templateid': str(image.extra['templateid']),
'zoneid': str(image.extra['zoneid'])}
usageplantype = kwargs.pop('usageplantype', None)
if usageplantype is None:
params['usageplantype'] = 'hourly'
else:
params['usageplantype'] = usageplantype
if size.id != self.EMPTY_DISKOFFERINGID:
params['diskofferingid'] = size.id
result = self._async_request(
command='deployVirtualMachine',
params=params,
method='GET')
node = result['virtualmachine']
return Node(
id=node['id'],
name=node['displayname'],
state=self.NODE_STATE_MAP[node['state']],
public_ips=[],
private_ips=[],
driver=self,
extra={
'zoneid': image.extra['zoneid'],
'ip_addresses': [],
'forwarding_rules': [],
}
)
| Hybrid-Cloud/badam | patches_tool/aws_patch/aws_deps/libcloud/compute/drivers/ktucloud.py | Python | apache-2.0 | 3,709 |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import six
from .biology import BiologyType
from .cell import Cell
from .dataObject import DatatypeProperty, ObjectProperty
__all__ = ['Connection']
class SynapseType:
Chemical = 'send'
GapJunction = 'gapJunction'
class Termination:
Neuron = 'neuron'
Muscle = 'muscle'
class Connection(BiologyType):
class_context = BiologyType.class_context
post_cell = ObjectProperty(value_type=Cell)
''' The post-synaptic cell '''
pre_cell = ObjectProperty(value_type=Cell)
''' The pre-synaptic cell '''
number = DatatypeProperty()
''' The weight of the connection '''
synclass = DatatypeProperty()
''' The kind of Neurotransmitter (if any) sent between `pre_cell` and `post_cell` '''
syntype = DatatypeProperty()
''' The kind of synaptic connection. 'gapJunction' indicates a gap junction and 'send' a chemical synapse '''
termination = DatatypeProperty()
''' Where the connection terminates. Inferred from type of post_cell at initialization '''
def __init__(self,
pre_cell=None,
post_cell=None,
number=None,
syntype=None,
synclass=None,
termination=None,
**kwargs):
super(Connection, self).__init__(pre_cell=pre_cell,
post_cell=post_cell,
number=number,
syntype=syntype,
synclass=synclass,
**kwargs)
if isinstance(termination, six.string_types):
termination = termination.lower()
if termination in ('neuron', Termination.Neuron):
self.termination(Termination.Neuron)
elif termination in ('muscle', Termination.Muscle):
self.termination(Termination.Muscle)
if isinstance(syntype, six.string_types):
syntype = syntype.lower()
if syntype in ('send', SynapseType.Chemical):
self.syntype(SynapseType.Chemical)
elif syntype in ('gapjunction', SynapseType.GapJunction):
self.syntype(SynapseType.GapJunction)
def defined_augment(self):
return (self.pre_cell.has_defined_value() and
self.post_cell.has_defined_value() and
self.syntype.has_defined_value())
def identifier_augment(self):
data = (self.pre_cell,
self.post_cell,
self.syntype)
data = tuple(x.defined_values[0].identifier.n3() for x in data)
data = "".join(data)
return self.make_identifier(data)
def __str__(self):
nom = []
if self.pre_cell.has_defined_value():
nom.append(('pre_cell', self.pre_cell.values[0]))
if self.post_cell.has_defined_value():
nom.append(('post_cell', self.post_cell.values[0]))
if self.syntype.has_defined_value():
nom.append(('syntype', self.syntype.values[0]))
if self.termination.has_defined_value():
nom.append(('termination', self.termination.values[0]))
if self.number.has_defined_value():
nom.append(('number', self.number.values[0]))
if self.synclass.has_defined_value():
nom.append(('synclass', self.synclass.values[0]))
if len(nom) == 0:
return super(Connection, self).__str__()
else:
return 'Connection(' + \
', '.join('{}={}'.format(n[0], n[1]) for n in nom) + \
')'
__yarom_mapped_classes__ = (Connection,)
| gsarma/PyOpenWorm | PyOpenWorm/connection.py | Python | mit | 3,807 |
import os
from datetime import datetime
from time import sleep
from types import FunctionType
from copy import copy
from numpy import array
import dynamixel
from Motion import lInterp, scaleTime
'''
Much inspiration taken from http://code.google.com/p/pydynamixel/
'''
'''Min and max values for the QuadraTot robot, based on some tests.
Note that these values will avoid collisions only for each servo
individually. More complex collisions are still possible given
certain vectors of motor position.
'''
from ConstantsQuadratot import *
import inspect
def fileLine():
"""Returns the current line number in our program."""
#print dir(inspect.currentframe().f_back.f_back)
#print inspect.currentframe().f_back.f_back.f_code.co_filename
return inspect.currentframe().f_back.f_back.f_code.co_filename, inspect.currentframe().f_back.f_back.f_lineno
#return inspect.getouterframes( inspect.currentframe() )[1].f_back.f_lineno
class RobotFailure(Exception):
pass
class RobotQuadratot():
''''''
def __init__(self, silentNetFail = False, expectedIds = None, commandRate = 40,
loud = False, skipInit = False):
'''Initialize the robot.
Keyword arguments:
silentNetworkFail -- Whether or not to fail silently if the
network does not find all the dynamixel
servos.
nServos -- How many servos are connected to the robot,
i.e. how many to expect to find on the network.
commandRate -- Rate at which the motors should be commanded,
in Hertz. Default: 40.
'''
# The number of Dynamixels on the bus.
self.expectedIds = expectedIds if expectedIds is not None else range(9)
self.nServos = len(self.expectedIds)
self.silentNetFail = silentNetFail
self.sleep = 1. / float(commandRate)
self.loud = loud
#if self.nServos != 9:
# pass
# #raise Exception('Unfortunately, the RobotQuadratot class currently assumes 9 servos.')
# Default baud rate of the USB2Dynamixel device.
self.baudRate = 1000000
if not skipInit:
self.initServos()
def initServos(self):
# Set your serial port accordingly.
if os.name == "posix":
possibilities = ['/dev/ttyUSB0', '/dev/ttyUSB1']
portName = None
for pos in possibilities:
if os.path.exists(pos):
portName = pos
if portName is None:
raise Exception('Could not find any of %s' % repr(possibilities))
else:
portName = "COM11"
serial = dynamixel.SerialStream(port = portName,
baudrate = self.baudRate,
timeout = 1)
self.net = dynamixel.DynamixelNetwork(serial)
#print 'Prescan...'
#print self.net.get_dynamixels()
print "Scanning for Dynamixels...",
self.net.scan(min(self.expectedIds), max(self.expectedIds))
self.actuators = []
self.actuatorIds = []
for dyn in self.net.get_dynamixels():
print dyn.id,
self.actuatorIds.append(dyn.id)
self.actuators.append(self.net[dyn.id])
print "...Done"
if len(self.actuators) != self.nServos and not self.silentNetFail:
raise RobotFailure('Expected to find %d servos on network, but only got %d (%s)'
% (self.nServos, len(self.actuators), repr(self.actuatorIds)))
for actuator in self.actuators:
#actuator.moving_speed = 90
#actuator.synchronized = True
#actuator.torque_enable = True
#actuator.torque_limit = 1000
#actuator.max_torque = 1000
actuator.moving_speed = 250
actuator.synchronized = True
actuator.torque_enable = True
actuator.torque_limit = 1000
actuator.max_torque = 1000
actuator.ccw_compliance_margin = 3
actuator.cw_compliance_margin = 3
self.net.synchronize()
#print 'options are:'
#for op in dir(self.actuators[0]):
# print ' ', op
#for ac in self.actuators:
# print 'Voltage at', ac, 'is', ac.current_voltage, 'load is', ac.current_load
# ac.read_all()
self.currentPos = None
self.resetClock()
def run(self, motionFunction, runSeconds = 10, resetFirst = True,
interpBegin = 0, interpEnd = 0, timeScale = 1, logFile = None,
extraLogInfoFn = None):
'''Run the robot with a given motion generating function.
Positional arguments:
motionFunction -- Function used to generate the desired motor
positions. This function must take a single
argument -- time, in seconds -- and must
return the desired length 9 vector of motor
positions. The current implementation
expects that this function will be
deterministic.
Keyword arguments:
runSeconds -- How many seconds to run for. This is in
addition to the time added for interpBegin and
interpEnd, if any. Default: 10
resetFirst -- Begin each run by resetting the robot to its
base position, currently implemented as a
transition from CURRENT -> POS_FLAT ->
POS_READY. Default: True
interpBegin -- Number of seconds over which to interpolate
from current position to commanded positions.
If this is not None, the robot will spend the
first interpBegin seconds interpolating from its
current position to that specified by
motionFunction. This should probably be used
for motion models which do not return POS_READY
at time 0. Affected by timeScale. Default: None
interpEnd -- Same as interpBegin, but at the end of motion.
If interpEnd is not None, interpolation is
performed from final commanded position to
POS_READY, over the given number of
seconds. Affected by timeScale. Default: None
timeScale -- Factor by which time should be scaled during this
run, higher is slower. Default: 1
logFile -- File to log time/positions to, should already be
opened. Default: None
extraLogInfoFn -- Function to call and append info to every
line the log file. Should return a
string. Default: None
'''
#net, actuators = initialize()
#def run(self, motionFunction, runSeconds = 10, resetFirst = True
# interpBegin = 0, interpEnd = 0):
if self.loud:
print 'Starting motion.'
failures = self.pingAll()
if failures:
self.initServos()
failures = self.pingAll()
if failures:
raise RobotFailure('Could not communicate with servos %s at beginning of run.' % repr(failures))
self.resetClock()
self.currentPos = self.readCurrentPosition()
if logFile:
#print >>logFile, '# time, servo goal positions (9), servo actual positions (9), robot location (x, y, age)'
print >>logFile, '# time, servo goal positions (9), robot location (x, y, age)'
# Reset the robot position, if desired
if resetFirst:
self.interpMove(self.readCurrentPosition(), POS_FLAT, 3)
self.interpMove(POS_FLAT, POS_READY, 3)
#self.interpMove(POS_READY, POS_HALFSTAND, 4)
self.currentPos = POS_READY
self.resetClock()
# Begin with a segment smoothly interpolated between the
# current position and the motion model.
if interpBegin is not None:
self.interpMove(self.currentPos,
scaleTime(motionFunction, timeScale),
interpBegin * timeScale,
logFile, extraLogInfoFn)
self.currentPos = motionFunction(self.time)
# Main motion segment
self.interpMove(scaleTime(motionFunction, timeScale),
scaleTime(motionFunction, timeScale),
runSeconds * timeScale,
logFile, extraLogInfoFn)
self.currentPos = motionFunction(self.time)
# End with a segment smoothly interpolated between the
# motion model and a ready position.
if interpEnd is not None:
self.interpMove(scaleTime(motionFunction, timeScale),
POS_READY,
interpEnd * timeScale,
logFile, extraLogInfoFn)
failures = self.pingAll()
if failures:
# give it a second chance
sleep(1)
failures = self.pingAll()
if failures:
raise RobotFailure('Servos %s may have died during run.' % repr(failures))
def interpMove(self, start, end, seconds, logFile=None, extraLogInfoFn=None):
'''Moves between start and end over seconds seconds. start
and end may be functions of the time.'''
self.updateClock()
timeStart = self.time
timeEnd = self.time + seconds
ii = 0
tlast = self.time
while self.time < timeEnd:
print 'time:', self.time
ii += 1
posS = start(self.time) if isinstance(start, FunctionType) else start
posE = end(self.time) if isinstance(end, FunctionType) else end
goal = lInterp(self.time, [timeStart, timeEnd], posS, posE)
print goal
cmdPos = self.commandPosition(goal)
if logFile:
extraInfo = ''
if extraLogInfoFn:
extraInfo = extraLogInfoFn()
print >>logFile, self.time, ' '.join([str(x) for x in cmdPos]),
#print >>logFile, ' '.join(str(ac.current_position) for ac in self.actuators),
print >>logFile, extraInfo
#volts = ['%d: %s' % (ii,ac.current_voltage) for ii,ac in enumerate(self.actuators)]
#print ' '.join(volts)
#[ac.read_all() for ac in self.actuators]
#positions = ['%d: %s' % (ii,ac.cache[dynamixel.defs.REGISTER['CurrentPosition']]) for ii,ac in enumerate(self.actuators)]
#print ' '.join(positions)
#print ''.join(['x' if ac.led else ' ' for ac in self.actuators])
#sleep(self.sleep)
#sleep(float(1)/100)
self.updateClock()
secElapsed = self.time - tlast
tosleep = self.sleep - secElapsed
#if tosleep > 0:
#sleep(tosleep)
self.updateClock()
tlast = self.time
#
# currentPos = None
#
# if resetFirst:
# currentPos = self.currentPostion()
# time0 = datetime.datetime.now()
# seconds = 0
#
# while seconds < 10:
# timeDiff = datetime.datetime.now() - time0
# seconds = timeDiff.seconds + timeDiff.microseconds/1e6
#
# if seconds < 3:
# goal = lInterp(seconds, [0, 3], startingPos, POS_FLAT)
# elif seconds < 6:
# goal = lInterp(seconds, [3, 6], POS_FLAT, POS_READY)
# elif seconds < 10:
# goal = lInterp(seconds, [6, 10], POS_READY, POS_HALFSTAND)
# else:
# break
#
# self.commandPosition(goal)
# sleep(self.sleep)
#
# currentPos = POS_HALFSTAND
#
# if interpBegin is not None:
# if currentPos is None:
# currentPos = self.currentPostion()
#
# time0 = datetime.datetime.now()
# seconds = 0
#
# while seconds < interpBegin:
# timeDiff = datetime.datetime.now() - time0
# seconds = timeDiff.seconds + timeDiff.microseconds/1e6
#
# goal = lInterp(seconds, [0, interpBegin], currentPos, motionFunction(seconds))
#
# self.commandPosition(goal)
# time.sleep(self.sleep)
#
def resetClock(self):
'''Resets the robot time to zero'''
self.time0 = datetime.now()
self.time = 0.0
def updateClock(self):
'''Updates the Robots clock to the current time'''
timeDiff = datetime.now() - self.time0
self.time = timeDiff.seconds + timeDiff.microseconds/1e6
def tic(self):
self.updateClock()
self.tictime = self.time
def toc(self):
self.updateClock()
print 'Elapsed (%s:%s):' % fileLine(), self.time - self.tictime
def readyPosition(self, persist = False):
if persist:
self.resetClock()
while self.time < 2.0:
self.commandPosition(POS_READY)
sleep(.1)
self.updateClock()
else:
self.commandPosition(POS_READY)
sleep(2)
def commandPosition(self, position, crop = True, cropWarning = False):
'''Command the given position
commandPosition will command the robot to move its servos to
the given position vector. This vector is cropped to
the physical limits of the robot and converted to integer
Positional arguments:
position -- A length 9 vector of desired positions.
Keyword arguments:
cropWarning -- Whether or not to print a warning if the
positions are cropped. Default: False.
'''
if len(position) != self.nServos:
raise Exception('Expected postion vector of length %d, got %s instead'
% (self.nServos, repr(position)))
if crop:
goalPosition = self.cropPosition([int(xx) for xx in position], cropWarning)
else:
goalPosition = [int(xx) for xx in position]
if self.loud:
posstr = ', '.join(['%4d' % xx for xx in goalPosition])
print '%.2fs -> %s' % (self.time, posstr)
for ii,actuator in enumerate(self.actuators):
actuator.goal_position = goalPosition[ii]
self.net.synchronize()
#[ac.read_all() for ac in self.actuators]
#positions = ['%d: %s' % (ii,ac.cache[dynamixel.defs.REGISTER['CurrentPosition']]) for ii,ac in enumerate(self.actuators)]
#print ' '.join(positions)
print ''.join(['x' if ac.led else ' ' for ac in self.actuators]) + ' ' ,
print ' '.join(['%.1f' % ac.current_voltage for ac in self.actuators])
return goalPosition
def cropPosition(self, position, cropWarning = False):
'''Crops the given positions to their appropriate min/max values.
Requires a vector of length 9 to be sure the IDs are in the
assumed order.'''
if len(position) != self.nServos:
raise Exception('cropPosition expects a vector of length %d' % self.nServos)
ret = copy(position)
for ii in [0, 2, 4, 6]:
ret[ii] = max(MIN_INNER, min(MAX_INNER, ret[ii]))
ret[ii+1] = max(MIN_OUTER, min(MAX_OUTER, ret[ii+1]))
ret[8] = max(MIN_CENTER, min(MAX_CENTER, ret[8]))
if cropWarning and ret != position:
print 'Warning: cropped %s to %s' % (repr(position), repr(ret))
return ret
def readCurrentPosition(self):
ret = []
if len(self.actuators) != self.nServos:
raise RobotFailure('Lost some servos, now we only have %d' % len(self.actuators))
for ac in self.actuators:
#ac.read_all()
#ret.append(ac.cache[dynamixel.defs.REGISTER['CurrentPosition']])
ret.append(ac.current_position)
#sleep(.001)
return ret
def pingAll(self):
failures = []
for ii in self.actuatorIds:
result = self.net.ping(ii)
if result is False:
failures.append(ii)
return failures
def printStatus(self):
pos = self.readCurrentPosition()
print 'Positions:', ' '.join(['%d:%d' % (ii,pp) for ii,pp in enumerate(pos)])
def shimmy(self):
'''Moves through a set of checks and makes sure the robot is
still moving.'''
self.commandPosition(POS_READY)
sleep(.8)
success = True
success &= self.checkMove(POS_READY, POS_CHECK_1)
success &= self.checkMove(POS_CHECK_1, POS_CHECK_2)
success &= self.checkMove(POS_CHECK_2, POS_CHECK_3)
success &= self.checkMove(POS_CHECK_3, POS_CHECK_2)
success &= self.checkMove(POS_CHECK_2, POS_CHECK_1)
success &= self.checkMove(POS_CHECK_1, POS_READY)
return success
def checkMove(self, aa, bb):
aa = array(aa)
bb = array(bb)
self.commandPosition(aa)
posAA = array(self.readCurrentPosition())
self.commandPosition(bb)
sleep(.4)
posBB = array(self.readCurrentPosition())
success = True
success &= all( abs((aa-bb) - (posAA-posBB)) < 50)
success &= all( abs(aa - posAA) < 50)
success &= all( abs(bb - posBB) < 50)
if not success and False:
print 'shimmy errors'
print (aa-bb) - (posAA-posBB)
print aa - posAA
print bb - posBB
return success
| booi/aracna | RobotPi/RobotQuadratot.py | Python | gpl-3.0 | 18,657 |
''' Module containing tests for the network data structure
'''
import unittest
from power_grid import network
class TestNetwork(unittest.TestCase):
''' Class containing all unit tests related to
the network data structure.
'''
def test_randomly_generated_network(self):
''' Tests if the average_connectivity is correct
'''
number_of_nodes = 100
average_connectivity = 2
my_net = network.RandomNetworkGenerator(
num_nodes=number_of_nodes,
average_connectivity=average_connectivity)
average_connectivity_computed = sum(
len(node.edges) for node in my_net.nodes)/number_of_nodes
self.assertAlmostEqual(
average_connectivity,
average_connectivity_computed,
delta=10e-6)
| ABM-project/power-grid | test/test_network.py | Python | mit | 814 |
#!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Hongjuan, Wang<hongjuanx.wang@intel.com>
import unittest
import os, sys, commands
import comm
class TestSecurityFunctions(unittest.TestCase):
def test_permission_chinese(self):
comm.setUp()
manifestPath = comm.ConstPath + "/../testapp/permission_field_chinese_tests/manifest.json"
cmd = "python %smake_apk.py --package=org.xwalk.example --arch=%s --mode=%s --manifest=%s" % \
(comm.Pck_Tools, comm.ARCH, comm.MODE, manifestPath)
packInfo = commands.getstatusoutput(cmd)
self.assertNotEquals(0, packInfo[0])
def test_permission_noapi(self):
comm.setUp()
manifestPath = comm.ConstPath + "/../testapp/permission_field_noapi_tests/manifest.json"
cmd = "python %smake_apk.py --package=org.xwalk.example --arch=%s --mode=%s --manifest=%s" % \
(comm.Pck_Tools, comm.ARCH, comm.MODE, manifestPath)
packInfo = commands.getstatusoutput(cmd)
self.assertNotEquals(0, packInfo[0])
def test_permission_null(self):
comm.setUp()
manifestPath = comm.ConstPath + "/../testapp/permission_field_null_tests/manifest.json"
cmd = "python %smake_apk.py --package=org.xwalk.example --arch=%s --mode=%s --manifest=%s" % \
(comm.Pck_Tools, comm.ARCH, comm.MODE, manifestPath)
comm.gen_pkg(cmd, self)
def test_permission_splite(self):
comm.setUp()
manifestPath = comm.ConstPath + "/../testapp/permission_field_splite_tests/manifest.json"
cmd = "python %smake_apk.py --package=org.xwalk.example --arch=%s --mode=%s --manifest=%s" % \
(comm.Pck_Tools, comm.ARCH, comm.MODE, manifestPath)
packInfo = commands.getstatusoutput(cmd)
self.assertNotEquals(0, packInfo[0])
if __name__ == '__main__':
unittest.main()
| pk-sam/crosswalk-test-suite | wrt/wrt-security-android-tests/security/permissiontest.py | Python | bsd-3-clause | 3,330 |
"""
.. _ex-spm-faces:
==========================================
From raw data to dSPM on SPM Faces dataset
==========================================
Runs a full pipeline using MNE-Python:
- artifact removal
- averaging Epochs
- forward model computation
- source reconstruction using dSPM on the contrast : "faces - scrambled"
.. note:: This example does quite a bit of processing, so even on a
fast machine it can take several minutes to complete.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
# sphinx_gallery_thumbnail_number = 10
import matplotlib.pyplot as plt
import mne
from mne.datasets import spm_face
from mne.preprocessing import ICA, create_eog_epochs
from mne import io, combine_evoked
from mne.minimum_norm import make_inverse_operator, apply_inverse
print(__doc__)
data_path = spm_face.data_path()
subjects_dir = data_path + '/subjects'
###############################################################################
# Load and filter data, set up epochs
raw_fname = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces%d_3D.ds'
raw = io.read_raw_ctf(raw_fname % 1, preload=True) # Take first run
# Here to save memory and time we'll downsample heavily -- this is not
# advised for real data as it can effectively jitter events!
raw.resample(120., npad='auto')
picks = mne.pick_types(raw.info, meg=True, exclude='bads')
raw.filter(1, 30, method='fir', fir_design='firwin')
events = mne.find_events(raw, stim_channel='UPPT001')
# plot the events to get an idea of the paradigm
mne.viz.plot_events(events, raw.info['sfreq'])
event_ids = {"faces": 1, "scrambled": 2}
tmin, tmax = -0.2, 0.6
baseline = None # no baseline as high-pass is applied
reject = dict(mag=5e-12)
epochs = mne.Epochs(raw, events, event_ids, tmin, tmax, picks=picks,
baseline=baseline, preload=True, reject=reject)
# Fit ICA, find and remove major artifacts
ica = ICA(n_components=0.95, random_state=0).fit(raw, decim=1, reject=reject)
# compute correlation scores, get bad indices sorted by score
eog_epochs = create_eog_epochs(raw, ch_name='MRT31-2908', reject=reject)
eog_inds, eog_scores = ica.find_bads_eog(eog_epochs, ch_name='MRT31-2908')
ica.plot_scores(eog_scores, eog_inds) # see scores the selection is based on
ica.plot_components(eog_inds) # view topographic sensitivity of components
ica.exclude += eog_inds[:1] # we saw the 2nd ECG component looked too dipolar
ica.plot_overlay(eog_epochs.average()) # inspect artifact removal
ica.apply(epochs) # clean data, default in place
evoked = [epochs[k].average() for k in event_ids]
contrast = combine_evoked(evoked, weights=[-1, 1]) # Faces - scrambled
evoked.append(contrast)
for e in evoked:
e.plot(ylim=dict(mag=[-400, 400]))
plt.show()
# estimate noise covarariance
noise_cov = mne.compute_covariance(epochs, tmax=0, method='shrunk',
rank=None)
###############################################################################
# Visualize fields on MEG helmet
# The transformation here was aligned using the dig-montage. It's included in
# the spm_faces dataset and is named SPM_dig_montage.fif.
trans_fname = data_path + ('/MEG/spm/SPM_CTF_MEG_example_faces1_3D_'
'raw-trans.fif')
maps = mne.make_field_map(evoked[0], trans_fname, subject='spm',
subjects_dir=subjects_dir, n_jobs=1)
evoked[0].plot_field(maps, time=0.170)
###############################################################################
# Look at the whitened evoked daat
evoked[0].plot_white(noise_cov)
###############################################################################
# Compute forward model
src = data_path + '/subjects/spm/bem/spm-oct-6-src.fif'
bem = data_path + '/subjects/spm/bem/spm-5120-5120-5120-bem-sol.fif'
forward = mne.make_forward_solution(contrast.info, trans_fname, src, bem)
###############################################################################
# Compute inverse solution
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = 'dSPM'
inverse_operator = make_inverse_operator(contrast.info, forward, noise_cov,
loose=0.2, depth=0.8)
# Compute inverse solution on contrast
stc = apply_inverse(contrast, inverse_operator, lambda2, method, pick_ori=None)
# stc.save('spm_%s_dSPM_inverse' % contrast.comment)
# Plot contrast in 3D with PySurfer if available
brain = stc.plot(hemi='both', subjects_dir=subjects_dir, initial_time=0.170,
views=['ven'], clim={'kind': 'value', 'lims': [3., 6., 9.]})
# brain.save_image('dSPM_map.png')
| adykstra/mne-python | examples/datasets/spm_faces_dataset.py | Python | bsd-3-clause | 4,719 |
"""
Copyright 2015 Malte Splietker
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
def indent(text, indentation, indentation_character="\t"):
"""Indents all lines of a string with the given indentation.
Args:
text: String to indent.
indentation: Number of indentation characters to indent.
indentation_character: indentation character to use
"""
blank = indentation_character * indentation
return "\n".join(blank + x for x in text.splitlines())
| wette/netSLS | network_emulator/utils.py | Python | apache-2.0 | 971 |
# -*- coding: utf-8 -*-
#
# Troy documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 15 21:44:01 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Troy'
copyright = u'2012, Andre Merzky'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['docs', 'examples', 'misc', 'test']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Troydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Troy.tex', u'Troy Documentation', u'Andre Merzky', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'troy', u'Troy Documentation', [u'Andre Merzky'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Troy', u'Troy Documentation', u'Andre Merzky', 'Troy', 'description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| andre-merzky/troy_old | docs/conf.py | Python | gpl-3.0 | 8,120 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from devstack import component as comp
from devstack import log as logging
LOG = logging.getLogger("devstack.components.quantum_client")
class QuantumClientUninstaller(comp.PythonUninstallComponent):
def __init__(self, *args, **kargs):
comp.PythonUninstallComponent.__init__(self, *args, **kargs)
class QuantumClientInstaller(comp.PythonInstallComponent):
def __init__(self, *args, **kargs):
comp.PythonInstallComponent.__init__(self, *args, **kargs)
def _get_download_locations(self):
places = list()
places.append({
'uri': ("git", "quantum_client_repo"),
'branch': ("git", "quantum_client_branch"),
})
return places
class QuantumClientRuntime(comp.EmptyRuntime):
def __init__(self, *args, **kargs):
comp.EmptyRuntime.__init__(self, *args, **kargs)
| hagleitn/Openstack-Devstack2 | devstack/components/quantum_client.py | Python | apache-2.0 | 1,531 |
#!/usr/bin/python
# *****************************************************************************
#
# Copyright (c) 2016, EPAM SYSTEMS INC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ******************************************************************************
import logging
import json
import sys
from dlab.fab import *
from dlab.meta_lib import *
from dlab.actions_lib import *
import os
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
try:
notebook_config = dict()
try:
notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
except:
notebook_config['exploratory_name'] = ''
notebook_config['service_base_name'] = os.environ['conf_service_base_name']
notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
notebook_config['instance_size'] = os.environ['azure_notebook_instance_size']
notebook_config['key_name'] = os.environ['conf_key_name']
notebook_config['user_name'] = os.environ['edge_user_name'].replace('_', '-')
notebook_config['user_keyname'] = os.environ['edge_user_name']
notebook_config['instance_name'] = '{}-{}-nb-{}'.format(notebook_config['service_base_name'],
notebook_config['user_name'],
notebook_config['exploratory_name'])
notebook_config['expected_image_name'] = '{}-{}-notebook-image'.format(notebook_config['service_base_name'],
os.environ['application'])
notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
notebook_config['security_group_name'] = '{}-{}-nb-sg'.format(notebook_config['service_base_name'],
notebook_config['user_name'])
notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
notebook_config['tags'] = {"Name": notebook_config['instance_name'],
"SBN": notebook_config['service_base_name'],
"User": notebook_config['user_name'],
"Exploratory": notebook_config['exploratory_name']}
notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
# generating variables regarding EDGE proxy on Notebook instance
instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
notebook_config['instance_name'])
edge_instance_name = '{}-{}-edge'.format(notebook_config['service_base_name'], notebook_config['user_name'])
edge_instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
edge_instance_name)
keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
if os.environ['conf_os_family'] == 'debian':
initial_user = 'ubuntu'
sudo_group = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
initial_user = 'ec2-user'
sudo_group = 'wheel'
except Exception as err:
append_result("Failed to generate variables dictionary.", str(err))
AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
(instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
notebook_config['dlab_ssh_user'], sudo_group)
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed creating ssh user 'dlab-user'.", str(err))
AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
# configuring proxy on Notebook instance
try:
logging.info('[CONFIGURE PROXY ON JUPYTER INSTANCE]')
print('[CONFIGURE PROXY ON JUPYTER INSTANCE]')
additional_config = {"proxy_host": edge_instance_hostname, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
.format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config),
notebook_config['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed to configure proxy.", str(err))
AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
# updating repositories & installing python packages
try:
logging.info('[INSTALLING PREREQUISITES TO JUPYTER NOTEBOOK INSTANCE]')
print('[INSTALLING PREREQUISITES TO JUPYTER NOTEBOOK INSTANCE]')
params = "--hostname {} --keyfile {} --user {} --region {}".\
format(instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], os.environ['azure_region'])
try:
local("~/scripts/{}.py {}".format('install_prerequisites', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed installing apps: apt & pip.", str(err))
AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
# installing and configuring jupiter and all dependencies
try:
logging.info('[CONFIGURE JUPYTER NOTEBOOK INSTANCE]')
print('[CONFIGURE JUPYTER NOTEBOOK INSTANCE]')
params = "--hostname {} --keyfile {} " \
"--region {} --spark_version {} " \
"--hadoop_version {} --os_user {} " \
"--scala_version {} --r_mirror {} " \
"--exploratory_name {}".\
format(instance_hostname, keyfile_name,
os.environ['azure_region'], os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'], notebook_config['dlab_ssh_user'],
os.environ['notebook_scala_version'], os.environ['notebook_r_mirror'],
notebook_config['exploratory_name'])
try:
local("~/scripts/{}.py {}".format('configure_jupyter_node', params))
remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed to configure jupyter.", str(err))
AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
print('[INSTALLING USERs KEY]')
logging.info('[INSTALLING USERs KEY]')
additional_config = {"user_keyname": notebook_config['user_keyname'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
instance_hostname, keyfile_name, json.dumps(additional_config), notebook_config['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
append_result("Failed installing users key")
raise Exception
except Exception as err:
append_result("Failed installing users key.", str(err))
AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
print('[SETUP USER GIT CREDENTIALS]')
logging.info('[SETUP USER GIT CREDENTIALS]')
params = '--os_user {} --notebook_ip {} --keyfile "{}"' \
.format(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
try:
# local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except:
append_result("Failed setup git credentials")
raise Exception
except Exception as err:
append_result("Failed to setup git credentials.", str(err))
AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
logging.info('[POST CONFIGURING PROCESS]')
print('[POST CONFIGURING PROCESS')
if notebook_config['notebook_image_name'] not in [notebook_config['expected_image_name'], 'None']:
params = "--hostname {} --keyfile {} --os_user {} --resource_group_name {} --notebook_name {}" \
.format(instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'],
notebook_config['resource_group_name'], notebook_config['instance_name'])
try:
local("~/scripts/{}.py {}".format('common_remove_remote_kernels', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed to post configuring instance.", str(err))
AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
if notebook_config['shared_image_enabled'] == 'true':
try:
print('[CREATING IMAGE]')
image = AzureMeta().get_image(notebook_config['resource_group_name'], notebook_config['expected_image_name'])
if image == '':
print("Looks like it's first time we configure notebook server. Creating image.")
prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
AzureActions().create_image_from_instance(notebook_config['resource_group_name'],
notebook_config['instance_name'],
os.environ['azure_region'],
notebook_config['expected_image_name'],
json.dumps(notebook_config['tags']))
print("Image was successfully created.")
local("~/scripts/{}.py".format('common_prepare_notebook'))
instance_running = False
while not instance_running:
if AzureMeta().get_instance_status(notebook_config['resource_group_name'],
notebook_config['instance_name']) == 'running':
instance_running = True
instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
notebook_config['instance_name'])
remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
'http://{}:3128'.format(edge_instance_hostname))
additional_config = {"proxy_host": edge_instance_hostname, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}" \
.format(instance_hostname, notebook_config['instance_name'], keyfile_name,
json.dumps(additional_config), notebook_config['dlab_ssh_user'])
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
except Exception as err:
append_result("Failed creating image from notebook.", str(err))
AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
# generating output information
try:
ip_address = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
notebook_config['instance_name'])
jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
print('[SUMMARY]')
logging.info('[SUMMARY]')
print("Instance name: {}".format(notebook_config['instance_name']))
print("Private IP: {}".format(ip_address))
print("Instance type: {}".format(notebook_config['instance_size']))
print("Key name: {}".format(notebook_config['key_name']))
print("User key name: {}".format(notebook_config['user_keyname']))
print("SG name: {}".format(notebook_config['security_group_name']))
print("Jupyter URL: {}".format(jupyter_ip_url))
print("Ungit URL: {}".format(ungit_ip_url))
print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
with open("/root/result.json", 'w') as result:
res = {"ip": ip_address,
"master_keyname": os.environ['conf_key_name'],
"notebook_name": notebook_config['instance_name'],
"instance_id": notebook_config['instance_name'],
"Action": "Create new notebook server",
"notebook_image_name": notebook_config['notebook_image_name'],
"exploratory_url": [
{"description": "Jupyter",
"url": jupyter_ip_url},
{"description": "Ungit",
"url": ungit_ip_url}]}
result.write(json.dumps(res))
except Exception as err:
append_result("Failed to generate output information", str(err))
AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1) | epam/DLab | infrastructure-provisioning/src/general/scripts/azure/jupyter_configure.py | Python | apache-2.0 | 15,835 |
from pyethapp.leveldb_service import LevelDB
from pyethapp.config import default_data_dir
from ethereum.chain import Chain
from ethereum.config import Env
from ethereum.transactions import Transaction
import rlp
from rlp.codec import consume_length_prefix
import os
import sys
def get_chain(data_dir=default_data_dir):
"""
returns an ethereum.chain.Chain instance
"""
dbfile = os.path.join(data_dir, 'leveldb')
db = LevelDB(dbfile)
return Chain(Env(db))
def _progress(i):
if i % 1000:
sys.stderr.write('.')
else:
sys.stderr.write(str(i))
sys.stderr.flush()
def export_blocks(chain):
"""
Export blocks
rlp and hex encoded, separated by newline
"""
head_number = chain.head.header.number
block_number = 0
while block_number < head_number:
h = chain.index.get_block_by_number(block_number)
raw = chain.blockchain.get(h)
print raw.encode('hex')
_progress(block_number)
block_number += 1
def export_transactions(chain):
"""
Export transactions
rlp and hex encoded, separated by newline
"""
head_number = chain.head.header.number
block_number = 0
seen = 0
while block_number < head_number:
h = chain.index.get_block_by_number(block_number)
raw = chain.blockchain.get(h)
# block [[], [], []]
typ, length, end = consume_length_prefix(raw, 0)
assert typ == list
typ, length, end = consume_length_prefix(raw, end) # header list
assert typ == list
txs_start = end + length
typ, length, end = consume_length_prefix(raw, txs_start) # tx list
txrlp = raw[txs_start:end + length]
r = rlp.decode(txrlp)
assert isinstance(r, list)
for tx in r:
s = rlp.encode(tx)
rlp.decode(s, Transaction)
print s.encode('hex')
seen += 1
_progress(seen)
block_number += 1
if __name__ == '__main__':
chain = get_chain()
# export_blocks(chain)
export_transactions(chain)
| RomanZacharia/pyethapp | examples/export.py | Python | mit | 2,083 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.