code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
from django.db.models.lookups import (
Exact, GreaterThan, GreaterThanOrEqual, In, IsNull, LessThan,
LessThanOrEqual,
)
class MultiColSource:
contains_aggregate = False
def __init__(self, alias, targets, sources, field):
self.targets, self.sources, self.field, self.alias = targets, sources, field, alias
self.output_field = self.field
def __repr__(self):
return "{}({}, {})".format(
self.__class__.__name__, self.alias, self.field)
def relabeled_clone(self, relabels):
return self.__class__(relabels.get(self.alias, self.alias),
self.targets, self.sources, self.field)
def get_normalized_value(value, lhs):
from django.db.models import Model
if isinstance(value, Model):
value_list = []
sources = lhs.output_field.get_path_info()[-1].target_fields
for source in sources:
while not isinstance(value, source.model) and source.remote_field:
source = source.remote_field.model._meta.get_field(source.remote_field.field_name)
try:
value_list.append(getattr(value, source.attname))
except AttributeError:
# A case like Restaurant.objects.filter(place=restaurant_instance),
# where place is a OneToOneField and the primary key of Restaurant.
return (value.pk,)
return tuple(value_list)
if not isinstance(value, tuple):
return (value,)
return value
class RelatedIn(In):
def get_prep_lookup(self):
if not isinstance(self.lhs, MultiColSource) and self.rhs_is_direct_value():
# If we get here, we are dealing with single-column relations.
self.rhs = [get_normalized_value(val, self.lhs)[0] for val in self.rhs]
# We need to run the related field's get_prep_value(). Consider case
# ForeignKey to IntegerField given value 'abc'. The ForeignKey itself
# doesn't have validation for non-integers, so we must run validation
# using the target field.
if hasattr(self.lhs.output_field, 'get_path_info'):
# Run the target field's get_prep_value. We can safely assume there is
# only one as we don't get to the direct value branch otherwise.
target_field = self.lhs.output_field.get_path_info()[-1].target_fields[-1]
self.rhs = [target_field.get_prep_value(v) for v in self.rhs]
return super().get_prep_lookup()
def as_sql(self, compiler, connection):
if isinstance(self.lhs, MultiColSource):
# For multicolumn lookups we need to build a multicolumn where clause.
# This clause is either a SubqueryConstraint (for values that need to be compiled to
# SQL) or a OR-combined list of (col1 = val1 AND col2 = val2 AND ...) clauses.
from django.db.models.sql.where import WhereNode, SubqueryConstraint, AND, OR
root_constraint = WhereNode(connector=OR)
if self.rhs_is_direct_value():
values = [get_normalized_value(value, self.lhs) for value in self.rhs]
for value in values:
value_constraint = WhereNode()
for source, target, val in zip(self.lhs.sources, self.lhs.targets, value):
lookup_class = target.get_lookup('exact')
lookup = lookup_class(target.get_col(self.lhs.alias, source), val)
value_constraint.add(lookup, AND)
root_constraint.add(value_constraint, OR)
else:
root_constraint.add(
SubqueryConstraint(
self.lhs.alias, [target.column for target in self.lhs.targets],
[source.name for source in self.lhs.sources], self.rhs),
AND)
return root_constraint.as_sql(compiler, connection)
else:
if not getattr(self.rhs, 'has_select_fields', True):
self.rhs.clear_select_clause()
if (getattr(self.lhs.output_field, 'primary_key', False) and
self.lhs.output_field.model == self.rhs.model):
# A case like Restaurant.objects.filter(place__in=restaurant_qs),
# where place is a OneToOneField and the primary key of
# Restaurant.
target_field = self.lhs.field.name
else:
target_field = self.lhs.field.target_field.name
self.rhs.add_fields([target_field], True)
return super().as_sql(compiler, connection)
class RelatedLookupMixin:
def get_prep_lookup(self):
if not isinstance(self.lhs, MultiColSource) and self.rhs_is_direct_value():
# If we get here, we are dealing with single-column relations.
self.rhs = get_normalized_value(self.rhs, self.lhs)[0]
# We need to run the related field's get_prep_value(). Consider case
# ForeignKey to IntegerField given value 'abc'. The ForeignKey itself
# doesn't have validation for non-integers, so we must run validation
# using the target field.
if self.prepare_rhs and hasattr(self.lhs.output_field, 'get_path_info'):
# Get the target field. We can safely assume there is only one
# as we don't get to the direct value branch otherwise.
target_field = self.lhs.output_field.get_path_info()[-1].target_fields[-1]
self.rhs = target_field.get_prep_value(self.rhs)
return super().get_prep_lookup()
def as_sql(self, compiler, connection):
if isinstance(self.lhs, MultiColSource):
assert self.rhs_is_direct_value()
self.rhs = get_normalized_value(self.rhs, self.lhs)
from django.db.models.sql.where import WhereNode, AND
root_constraint = WhereNode()
for target, source, val in zip(self.lhs.targets, self.lhs.sources, self.rhs):
lookup_class = target.get_lookup(self.lookup_name)
root_constraint.add(
lookup_class(target.get_col(self.lhs.alias, source), val), AND)
return root_constraint.as_sql(compiler, connection)
return super().as_sql(compiler, connection)
class RelatedExact(RelatedLookupMixin, Exact):
pass
class RelatedLessThan(RelatedLookupMixin, LessThan):
pass
class RelatedGreaterThan(RelatedLookupMixin, GreaterThan):
pass
class RelatedGreaterThanOrEqual(RelatedLookupMixin, GreaterThanOrEqual):
pass
class RelatedLessThanOrEqual(RelatedLookupMixin, LessThanOrEqual):
pass
class RelatedIsNull(RelatedLookupMixin, IsNull):
pass
| alexallah/django | django/db/models/fields/related_lookups.py | Python | bsd-3-clause | 6,846 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test RangeDataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.data.python.ops import counter
from tensorflow.contrib.data.python.ops import enumerate_ops
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import test
class RangeDatasetTest(test_base.DatasetTestBase):
def testEnumerateDataset(self):
components = (["a", "b"], [1, 2], [37.0, 38])
start = constant_op.constant(20, dtype=dtypes.int64)
iterator = (dataset_ops.Dataset.from_tensor_slices(components).apply(
enumerate_ops.enumerate_dataset(start)).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual(dtypes.int64, get_next[0].dtype)
self.assertEqual((), get_next[0].shape)
self.assertEqual([tensor_shape.TensorShape([])] * 3,
[t.shape for t in get_next[1]])
with self.cached_session() as sess:
sess.run(init_op)
self.assertEqual((20, (b"a", 1, 37.0)), sess.run(get_next))
self.assertEqual((21, (b"b", 2, 38.0)), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCounter(self):
"""Test dataset construction using `count`."""
iterator = (counter.Counter(start=3, step=4)
.make_one_shot_iterator())
get_next = iterator.get_next()
self.assertEqual([], get_next.shape.as_list())
self.assertEqual(dtypes.int64, get_next.dtype)
negative_iterator = (counter.Counter(start=0, step=-1)
.make_one_shot_iterator())
negative_get_next = negative_iterator.get_next()
with self.cached_session() as sess:
self.assertEqual(3, sess.run(get_next))
self.assertEqual(3 + 4, sess.run(get_next))
self.assertEqual(3 + 2 * 4, sess.run(get_next))
self.assertEqual(0, sess.run(negative_get_next))
self.assertEqual(-1, sess.run(negative_get_next))
self.assertEqual(-2, sess.run(negative_get_next))
if __name__ == "__main__":
test.main()
| kobejean/tensorflow | tensorflow/contrib/data/python/kernel_tests/range_dataset_op_test.py | Python | apache-2.0 | 3,072 |
class B:
def __init__(self, auno=True): pass
class C(B):
def __init__(self, **kwargs): pass
c = C(au<caret>)
| akosyakov/intellij-community | python/testData/completion/superInitKwParams.py | Python | apache-2.0 | 119 |
from django.core.management import call_command
from nose.tools import eq_
from mkt.feed.models import FeedApp, FeedCollection
from mkt.site.tests import app_factory, TestCase
class TestMigrateCollectionColor(TestCase):
def setUp(self):
self.color_hex = '#CE001C'
self.color_name = 'ruby'
def test_app(self):
obj = FeedApp.objects.create(app=app_factory(),
background_color=self.color_hex)
eq_(obj.color, None)
call_command('migrate_collection_colors')
eq_(FeedApp.objects.get(id=obj.id).color, self.color_name)
def test_collection(self):
obj = FeedCollection.objects.create(background_color=self.color_hex)
eq_(obj.color, None)
call_command('migrate_collection_colors')
eq_(FeedCollection.objects.get(id=obj.id).color, self.color_name)
def test_no_background_color(self):
obj = FeedCollection.objects.create()
eq_(obj.background_color, None)
eq_(obj.color, None)
call_command('migrate_collection_colors')
eq_(obj.background_color, None)
eq_(obj.color, None)
def test_invalid_background_color(self):
obj = FeedCollection.objects.create(background_color='#000000')
eq_(obj.color, None)
call_command('migrate_collection_colors')
eq_(obj.color, None)
| ingenioustechie/zamboni | mkt/feed/tests/test_commands.py | Python | bsd-3-clause | 1,374 |
class Class(object):
pass
def func():
return 3.14
CONSTANT = 42
| retoo/pystructure | tests/python/typeinference/import_star_definitions.py | Python | lgpl-2.1 | 74 |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from oslo_utils import units
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.images.images import views
from openstack_dashboard.dashboards.admin.images import forms as project_forms
from openstack_dashboard.dashboards.admin.images \
import tables as project_tables
LOG = logging.getLogger(__name__)
class IndexView(tables.DataTableView):
table_class = project_tables.AdminImagesTable
template_name = 'admin/images/index.html'
page_title = _("Images")
def has_prev_data(self, table):
return self._prev
def has_more_data(self, table):
return self._more
def get_data(self):
images = []
filters = self.get_filters()
prev_marker = self.request.GET.get(
project_tables.AdminImagesTable._meta.prev_pagination_param, None)
if prev_marker is not None:
sort_dir = 'asc'
marker = prev_marker
else:
sort_dir = 'desc'
marker = self.request.GET.get(
project_tables.AdminImagesTable._meta.pagination_param, None)
try:
images, self._more, self._prev = api.glance.image_list_detailed(
self.request,
marker=marker,
paginate=True,
filters=filters,
sort_dir=sort_dir)
if prev_marker is not None:
images = sorted(images, key=lambda image:
getattr(image, 'created_at'), reverse=True)
except Exception:
self._prev = False
self._more = False
msg = _('Unable to retrieve image list.')
exceptions.handle(self.request, msg)
if images:
try:
tenants, more = api.keystone.tenant_list(self.request)
except Exception:
tenants = []
msg = _('Unable to retrieve project list.')
exceptions.handle(self.request, msg)
tenant_dict = dict([(t.id, t.name) for t in tenants])
for image in images:
image.tenant_name = tenant_dict.get(image.owner)
return images
def get_filters(self):
filters = {'is_public': None}
filter_field = self.table.get_filter_field()
filter_string = self.table.get_filter_string()
filter_action = self.table._meta._filter_action
if filter_field and filter_string and (
filter_action.is_api_filter(filter_field)):
if filter_field in ['size_min', 'size_max']:
invalid_msg = ('API query is not valid and is ignored: %s=%s'
% (filter_field, filter_string))
try:
filter_string = long(float(filter_string) * (units.Mi))
if filter_string >= 0:
filters[filter_field] = filter_string
else:
LOG.warning(invalid_msg)
except ValueError:
LOG.warning(invalid_msg)
elif (filter_field == 'disk_format' and
filter_string.lower() == 'docker'):
filters['disk_format'] = 'raw'
filters['container_format'] = 'docker'
else:
filters[filter_field] = filter_string
return filters
class CreateView(views.CreateView):
template_name = 'admin/images/create.html'
form_class = project_forms.AdminCreateImageForm
submit_url = reverse_lazy('horizon:admin:images:create')
success_url = reverse_lazy('horizon:admin:images:index')
page_title = _("Create An Image")
class UpdateView(views.UpdateView):
template_name = 'admin/images/update.html'
form_class = project_forms.AdminUpdateImageForm
submit_url = "horizon:admin:images:update"
success_url = reverse_lazy('horizon:admin:images:index')
page_title = _("Update Image")
class DetailView(views.DetailView):
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
table = project_tables.AdminImagesTable(self.request)
context["url"] = reverse('horizon:admin:images:index')
context["actions"] = table.render_row_actions(context["image"])
return context
| Athrun29/horizon | openstack_dashboard/dashboards/admin/images/views.py | Python | apache-2.0 | 5,320 |
from typing import Any, Dict
from ..utils.projector import register_projector_element
# Important: All functions have to be prune. This means, that thay can only
# access the data, that they get as argument and do not have any
# side effects. They are called from an async context. So they have
# to be fast!
def user(
element: Dict[str, Any], all_data: Dict[str, Dict[int, Dict[str, Any]]]
) -> Dict[str, Any]:
"""
User slide.
"""
return {"error": "TODO"}
def register_projector_elements() -> None:
register_projector_element("users/user", user)
| boehlke/OpenSlides | openslides/users/projector.py | Python | mit | 611 |
# -*- coding: utf-8 -*-
# Copyright 2014 Foxdog Studios
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .server_message import ServerMessage
__all__ = ['ResultMessage']
class ResultMessage(ServerMessage):
'''The result of a method call, either the return value or an error.
Either ``error`` or ``result`` may be passed (possibly neither, but not
both.. ``error`` and ``result`` are considered passed even if their value
is ``None``.
:param id: The ID passed with the method call.
:type id: basestring
:param error: An error thrown by the method or a method-not-found error.
:type error: ddp.message.server.error
:param result: The return value of the method, if any.
'''
def __init__(self, id, **kwargs):
super(ResultMessage, self).__init__()
# Check that either error and result has been passed.
if kwargs.keys() not in [[], ['error'], ['result']]:
raise ValueError('Either error or result may be passed, but not '
'both.')
if not isinstance(id, basestring):
raise ValueError('id must be an instance of basestring.')
self._id = id
self._has_error = 'error' in kwargs
self._error = kwargs.get('error')
self._has_result = 'result' in kwargs
self._result = kwargs.get('result')
def __eq__(self, other):
if isinstance(other, ResultMessage):
return (self._id == other._id
and self._has_error == other._has_error
and self._error == other._error
and self._has_result == other._has_result
and self._result == other._result)
return super(ResultMessage, self).__eq__(other)
def __str__(self):
parts = ['ResultMessage(', repr(self._id)]
if self._has_error:
parts += [', error=', repr(self._error)]
if self._has_result:
parts += [', result=', repr(self._result)]
parts.append(')')
return ''.join(parts)
@property
def id(self):
return self._id
@property
def error(self):
return self._error
@property
def result(self):
return self._result
def has_error(self):
'''Does the result carry an error?
:retruns: True if the result carries an error and False otherwise.
:rtype: bool
'''
return self._has_error
def has_result(self):
'''Does the result carry a return value?
:returns: True if the result carries a return value and False
otherwise.
:rtype: bool
'''
return self._has_result
| foxdog-studios/pyddp | ddp/messages/server/result_message.py | Python | apache-2.0 | 3,305 |
# -*- mode: python; fill-column: 100; comment-column: 100; -*-
import unittest
import sys
import os
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import base_test
class RefreshPageTest(base_test.WebDriverBaseTest):
# Get a static page that must be the same upon refresh
def test_refreshPage(self):
self.driver.get(
self.webserver.where_is('navigation/res/refreshPageStatic.html'))
body = self.driver.find_element_by_css("body").get_text()
self.driver.execute_script(
"document.getElementById('body').innerHTML=''")
self.driver.refresh()
newbody = self.driver.find_element_by_css("body").get_text()
self.assertEqual(body, newbody)
self.driver.get(
self.webserver.where_is('navigation/res/refreshPageDynamic.html'))
body = self.driver.find_element_by_css("body").get_text()
self.driver.refresh()
newbody = self.driver.find_element_by_css("body").get_text()
self.assertNotEqual(body, newbody)
if __name__ == '__main__':
unittest.main()
| chunywang/crosswalk-test-suite | misc/webdriver-w3c-tests/navigation/refresh-page.py | Python | bsd-3-clause | 1,126 |
"""
Table for storing information about whether or not Studio users have course creation privileges.
"""
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.db import models
from django.db.models.signals import post_init, post_save
from django.dispatch import Signal, receiver
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from organizations.models import Organization
# A signal that will be sent when users should be added or removed from the creator group
# providing_args=["caller", "user", "state", "organizations"]
update_creator_state = Signal()
# A signal that will be sent when admin should be notified of a pending user request
# providing_args=["user"]
send_admin_notification = Signal()
# A signal that will be sent when user should be notified of change in course creator privileges
# providing_args=["user", "state"]
send_user_notification = Signal()
class CourseCreator(models.Model):
"""
Creates the database table model.
.. no_pii:
"""
UNREQUESTED = 'unrequested'
PENDING = 'pending'
GRANTED = 'granted'
DENIED = 'denied'
# Second value is the "human-readable" version.
STATES = (
(UNREQUESTED, _('unrequested')),
(PENDING, _('pending')),
(GRANTED, _('granted')),
(DENIED, _('denied')),
)
user = models.OneToOneField(User, help_text=_("Studio user"), on_delete=models.CASCADE)
state_changed = models.DateTimeField('state last updated', auto_now_add=True,
help_text=_("The date when state was last updated"))
state = models.CharField(max_length=24, blank=False, choices=STATES, default=UNREQUESTED,
help_text=_("Current course creator state"))
note = models.CharField(max_length=512, blank=True, help_text=_("Optional notes about this user (for example, "
"why course creation access was denied)"))
organizations = models.ManyToManyField(Organization, blank=True,
help_text=_("Organizations under which the user is allowed "
"to create courses."))
all_organizations = models.BooleanField(default=True,
help_text=_("Grant the user the permission to create courses "
"in ALL organizations"))
def __str__(self):
return f"{self.user} | {self.state} [{self.state_changed}]"
@receiver(post_init, sender=CourseCreator)
def post_init_callback(sender, **kwargs): # lint-amnesty, pylint: disable=unused-argument
"""
Extend to store previous state.
"""
instance = kwargs['instance']
instance.orig_state = instance.state
instance.orig_all_organizations = instance.all_organizations
@receiver(post_save, sender=CourseCreator)
def post_save_callback(sender, **kwargs):
"""
Extend to update state_changed time and fire event to update course creator group, if appropriate.
"""
instance = kwargs['instance']
# We only wish to modify the state_changed time if the state has been modified. We don't wish to
# modify it for changes to the notes field.
# We need to keep track of all_organization switch. If this switch is changed we are going to remove the
# Course Creator group.
if instance.state != instance.orig_state or instance.all_organizations != instance.orig_all_organizations:
granted_state_change = instance.state == CourseCreator.GRANTED or instance.orig_state == CourseCreator.GRANTED # pylint: disable=consider-using-in
# If either old or new state is 'granted', we must manipulate the course creator
# group maintained by authz. That requires staff permissions (stored admin).
if granted_state_change:
assert hasattr(instance, 'admin'), 'Must have stored staff user to change course creator group'
update_creator_state.send(
sender=sender,
caller=instance.admin,
user=instance.user,
state=instance.state,
all_organizations=instance.all_organizations
)
# If user has been denied access, granted access, or previously granted access has been
# revoked, send a notification message to the user.
if instance.state == CourseCreator.DENIED or granted_state_change:
send_user_notification.send(
sender=sender,
user=instance.user,
state=instance.state
)
# If the user has gone into the 'pending' state, send a notification to interested admin.
if instance.state == CourseCreator.PENDING:
send_admin_notification.send(
sender=sender,
user=instance.user
)
instance.state_changed = timezone.now()
instance.orig_state = instance.state
instance.orig_all_organizations = instance.all_organizations
instance.save()
| edx/edx-platform | cms/djangoapps/course_creators/models.py | Python | agpl-3.0 | 5,188 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'web.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('ufakisler.urls', namespace="ufakisler")),
url(r'^gunebakan/', include('gunebakan.urls', namespace="gunebakan")),
url(r'^saglamdis/', include('saglamdis.urls', namespace="saglamdis")),
)
| bgunebakan/ufakisler.net | web/web/urls.py | Python | gpl-2.0 | 487 |
#!/usr/bin/python
''' print generic info about device '''
from onepk_helper import NetworkDevice
import yaml
import sys
def main(args):
''' main '''
rtrs = yaml.load(file(args[0]).read())
for rtr in rtrs:
ndev = NetworkDevice(**rtr)
ndev.connect()
print "Connection established to: %s" % ndev.net_element.properties.sys_name
print ndev.net_element.properties.product_id
print ndev.net_element.properties.SerialNo
ndev.disconnect()
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt, e:
print >> sys.stderr, "Exiting on user request.\n"
sys.exit(1)
| sfromm/pyn | 6-1.py | Python | mit | 680 |
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all custom fields that apply to line items.
To create custom fields, run create_custom_fields.py.
Tags: CustomFieldService.getCustomFieldsByStatement
"""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.dfp import DfpUtils
def main(client):
# Initialize appropriate service.
custom_field_service = client.GetService(
'CustomFieldService', version='v201211')
# Create statement to select only custom fields that apply to line items.
values = [{
'key': 'entityType',
'value': {
'xsi_type': 'TextValue',
'value': 'LINE_ITEM'
}
}]
query = 'WHERE entityType = :entityType'
# Get custom fields by statement.
custom_fields = DfpUtils.GetAllEntitiesByStatementWithService(
custom_field_service, query=query, bind_vars=values)
# Display results.
for custom_field in custom_fields:
print ('Custom field with ID \'%s\' and name \'%s\' was found.'
% (custom_field['id'], custom_field['name']))
print
print 'Number of results found: %s' % len(custom_fields)
if __name__ == '__main__':
# Initialize client object.
dfp_client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
main(dfp_client)
| donspaulding/adspygoogle | examples/adspygoogle/dfp/v201211/custom_field_service/get_all_line_item_custom_fields.py | Python | apache-2.0 | 2,149 |
#!/usr/bin/env python
"""Builds a python package sdist release for upload to pypi.
Usage:
python tools/release.py
"""
import subprocess as sp
import os
import tempfile
import shutil
def run(cmd, capture=False, **kwargs):
print(" ".join(cmd))
if capture:
proc = sp.Popen(cmd, stdout=sp.PIPE, **kwargs)
else:
proc = sp.Popen(cmd, **kwargs)
stdout, _ = proc.communicate()
code = proc.wait()
if code != 0:
raise RuntimeError("command exited with code {}".format(code))
if stdout:
return stdout.decode('utf-8')
# Convert the README to rst
readme = run(["pandoc", "--from", "markdown", "--to", "rst", "README.md"], capture=True)
print("Writing README")
with open("README", "w") as fh:
fh.write(readme)
# Build the source distribution package
run(["python", "setup.py", "sdist"])
# Get the version number
version = run(["python", "setup.py", "--version"], capture=True).rstrip()
# Get the current directory
currdir = os.getcwd()
# Create a temporary conda environment
tempdir = tempfile.mkdtemp()
condadir = os.path.join(tempdir, "conda")
run(["conda", "create", "-y", "-p", condadir, "python=3"])
env = os.environ.copy()
env['PATH'] = "{}:{}".format(os.path.join(condadir, "bin"), env['PATH'])
# Install nbgrader into the temporary conda environment and run the tests
try:
os.chdir(tempdir)
run(["pip", "install", "-r", os.path.join(currdir, "dev-requirements.txt")], env=env)
run(["pip", "install", os.path.join(currdir, "dist", "nbgrader-{}.tar.gz".format(version))], env=env)
run(["python", "-m", "nbgrader.tests", "-v", "-x"], env=env)
finally:
os.chdir(currdir)
shutil.rmtree(tempdir)
| ellisonbg/nbgrader | tools/release.py | Python | bsd-3-clause | 1,690 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
from synthtool import gcp
gapic = gcp.GAPICGenerator()
#----------------------------------------------------------------------------
# Generate firestore client
#----------------------------------------------------------------------------
library = gapic.py_library(
'firestore',
'v1beta1',
config_path='/google/firestore/artman_firestore.yaml',
artman_output_name='firestore-v1beta1')
s.move(library / 'google/cloud/firestore_v1beta1/proto')
s.move(library / 'google/cloud/firestore_v1beta1/gapic')
s.move(library / 'tests/unit/gapic/v1beta1')
s.replace(
'tests/unit/gapic/v1beta1/test_firestore_client_v1beta1.py',
'from google.cloud import firestore_v1beta1',
'from google.cloud.firestore_v1beta1.gapic import firestore_client',
)
s.replace(
'tests/unit/gapic/v1beta1/test_firestore_client_v1beta1.py',
'client = firestore_v1beta1.FirestoreClient',
'client = firestore_client.FirestoreClient',
)
| jonparrott/google-cloud-python | firestore/synth.py | Python | apache-2.0 | 1,615 |
##
# Copyright (C) 2014 Jessica Tallon & Matt Molyneaux
#
# This file is part of Inboxen.
#
# Inboxen is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Inboxen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Inboxen. If not, see <http://www.gnu.org/licenses/>.
##
from django.conf import urls
from inboxen.blog import views
urlpatterns = [
urls.re_path(r'^post/(?P<slug>[-\w]+)/$', views.BlogDetailView.as_view(), name='blog-post'),
urls.re_path(r'^feed/atom/$', views.AtomFeed(), name='blog-feed-atom'),
urls.re_path(r'^feed/(rss/)?$', views.RssFeed(), name='blog-feed-rss'),
urls.re_path(r'^(?P<page>\d*)/$', views.BlogListView.as_view(), name='blog'),
urls.re_path(r'^$', views.BlogListView.as_view(), name='blog'),
]
| Inboxen/Inboxen | inboxen/blog/urls.py | Python | agpl-3.0 | 1,251 |
"""
Support for loading data into an HP Vertica database.
"""
import json
import logging
import luigi
import luigi.configuration
from edx.analytics.tasks.url import ExternalURL
from edx.analytics.tasks.util.overwrite import OverwriteOutputMixin
from edx.analytics.tasks.util.vertica_target import VerticaTarget
log = logging.getLogger(__name__)
try:
import vertica_python
vertica_client_available = True # pylint: disable-msg=C0103
except ImportError:
log.warn('Unable to import Vertica client libraries')
# On hadoop slave nodes we don't have Vertica client libraries installed so it is pointless to ship this package to
# them, instead just fail noisily if we attempt to use these libraries.
vertica_client_available = False # pylint: disable-msg=C0103
class VerticaCopyTaskMixin(OverwriteOutputMixin):
"""
Parameters for copying a database into Vertica.
credentials: Path to the external access credentials file.
schema: The schema to which to write.
insert_chunk_size: The number of rows to insert at a time.
"""
schema = luigi.Parameter(
config_path={'section': 'vertica-export', 'name': 'schema'}
)
credentials = luigi.Parameter(
config_path={'section': 'vertica-export', 'name': 'credentials'}
)
class VerticaCopyTask(VerticaCopyTaskMixin, luigi.Task):
"""
A task for copying into a Vertica database.
Note that the default behavior if overwrite is true is to first delete the existing
contents of the table being written to and then delete the entire history of table
updates corresponding to writes to that table, not just table updates with the same
update id (i.e. updates corresponding to writes done by a task with the same exact
task name and set of parameters).
Overwrite init_copy and init_touch if you want a different overwrite behavior in a
subclass.
"""
required_tasks = None
output_target = None
def requires(self):
if self.required_tasks is None:
self.required_tasks = {
'credentials': ExternalURL(url=self.credentials),
'insert_source': self.insert_source_task
}
return self.required_tasks
@property
def insert_source_task(self):
"""Defines the task that provides source of data for Vertica bulk loading."""
raise NotImplementedError
@property
def table(self):
"""Provides the name of the database table."""
raise NotImplementedError
@property
def columns(self):
"""
Provides definition of columns. If only writing to existing tables, then columns() need only provide a list of
names.
If also needing to create the table, then columns() should define a list of (name, definition) tuples.
For example, ('first_name', 'VARCHAR(255)').
"""
raise NotImplementedError
@property
def auto_primary_key(self):
"""Tuple defining name and definition of an auto-incrementing primary key, or None."""
return ('id', 'AUTO_INCREMENT')
@property
def foreign_key_mapping(self):
"""Dictionary of column_name: (schema.table, column) pairs representing foreign key constraints."""
return {}
@property
def default_columns(self):
"""List of tuples defining name and definition of automatically-filled columns."""
return [('created', 'TIMESTAMP DEFAULT NOW()')]
def create_schema(self, connection):
"""
Override to provide code for creating the target schema, if not existing.
By default it will be created using types (optionally) specified in columns.
If overridden, use the provided connection object for setting
up the schema in order to create the schema and insert data
using the same transaction.
"""
query = "CREATE SCHEMA IF NOT EXISTS {schema}".format(schema=self.schema)
log.debug(query)
connection.cursor().execute(query)
def create_column_definitions(self):
"""
Builds the list of column definitions for the table to be loaded.
Assumes that columns are specified as (name, definition) tuples.
:return a string to be used in a SQL query to create the table
"""
columns = []
if self.auto_primary_key is not None:
columns.append(self.auto_primary_key)
columns.extend(self.columns)
if self.default_columns is not None:
columns.extend(self.default_columns)
if self.auto_primary_key is not None:
columns.append(("PRIMARY KEY", "({name})".format(name=self.auto_primary_key[0])))
coldefs = ','.join(
'{name} {definition}'.format(name=name, definition=definition) for name, definition in columns
)
return coldefs
def create_table(self, connection):
"""
Override to provide code for creating the target table, if not existing.
Requires the schema to exist first.
By default it will be created using types (optionally) specified in columns.
If overridden, use the provided connection object for setting
up the table in order to create the table and insert data
using the same transaction.
"""
if len(self.columns[0]) != 2:
# only names of columns specified, no types
raise NotImplementedError(
"create_table() not implemented for %r and columns types not specified"
% self.table
)
# Assumes that columns are specified as (name, definition) tuples
coldefs = self.create_column_definitions()
foreign_key_defs = ''
for column in self.foreign_key_mapping:
foreign_key_defs += ", FOREIGN KEY ({col}) REFERENCES {other_schema_and_table} ({other_col})".format(
col=column, other_schema_and_table=self.foreign_key_mapping[column][0],
other_col=self.foreign_key_mapping[column][1]
)
query = "CREATE TABLE IF NOT EXISTS {schema}.{table} ({coldefs}{foreign_key_defs})".format(
schema=self.schema, table=self.table, coldefs=coldefs, foreign_key_defs=foreign_key_defs
)
log.debug(query)
connection.cursor().execute(query)
def update_id(self):
"""This update id will be a unique identifier for this insert on this table."""
# For MySQL tasks, we take the hash of the task id, but since Vertica does not similarly
# limit the size of columns, we can safely use the entire task ID.
return str(self)
def output(self):
"""
Returns a VerticaTarget representing the inserted dataset.
Normally you don't override this.
"""
if self.output_target is None:
self.output_target = CredentialFileVerticaTarget(
credentials_target=self.input()['credentials'],
table=self.table,
schema=self.schema,
update_id=self.update_id()
)
return self.output_target
def init_copy(self, connection):
"""
Override to perform custom queries.
Because attempting a DELETE will give this transaction an exclusive
lock on the marker table (Vertica does not support row-level locking),
the pre-copy initialization only overwrites the table to be overwritten
and not the relevant rows of the marker table, which are deleted
immediately before the marker table is marked with success for this
task. This way, an exclusive lock on the marker table is held for only
a very brief duration instead of the entire course of the data copy,
which might in general take longer than the 5 minutes that Vertica is
willing to wait for a lock before giving up and throwing an error.
Any code here will be formed in the same transaction as the
main copy, just prior to copying data. Example use cases
include truncating the table or removing all data older than X
in the database to keep a rolling window of data available in
the table.
Note that this method acquires an exclusive (X) lock on the table
this task is going to write to for the remainder of the transaction
(i.e. until a commit or rollback).
"""
# clear table contents
self.attempted_removal = True
if self.overwrite:
# Use "DELETE" instead of TRUNCATE since TRUNCATE forces an implicit commit before it executes which would
# commit the currently open transaction before continuing with the copy.
query = "DELETE FROM {schema}.{table}".format(schema=self.schema, table=self.table)
log.debug(query)
connection.cursor().execute(query)
# vertica-python and its maintainers intentionally avoid supporting open
# transactions like we do when self.overwrite=True (DELETE a bunch of rows
# and then COPY some), per https://github.com/uber/vertica-python/issues/56.
# The DELETE commands in this method will cause the connection to see some
# messages that will prevent it from trying to copy any data (if the cursor
# successfully executes the DELETEs), so we flush the message buffer.
connection.cursor().flush_to_query_ready()
def init_touch(self, connection):
"""
Clear the relevant rows from the marker table before touching
it to denote that the task has been completed.
Note that this method acquires an exclusive (X) lock on the
marker table for the remainder of the transaction (i.e. until
a commit or rollback).
"""
if self.overwrite:
# Clear the appropriate rows from the luigi Vertica marker table
marker_table = self.output().marker_table # side-effect: sets self.output_target if it's None
try:
query = "DELETE FROM {schema}.{marker_table} where target_table='{schema}.{target_table}';".format(
schema=self.schema,
marker_table=marker_table,
target_table=self.table,
)
log.debug(query)
connection.cursor().execute(query)
except vertica_python.errors.Error as err:
if (type(err) is vertica_python.errors.MissingRelation) or ('Sqlstate: 42V01' in err.args[0]):
# If so, then our query error failed because the table doesn't exist.
pass
else:
raise
# vertica-python and its maintainers intentionally avoid supporting open
# transactions like we do when self.overwrite=True (DELETE a bunch of rows
# and then COPY some), per https://github.com/uber/vertica-python/issues/56.
# The DELETE commands in this method will cause the connection to see some
# messages that will prevent it from trying to copy any data (if the cursor
# successfully executes the DELETEs), so we flush the message buffer.
connection.cursor().flush_to_query_ready()
@property
def copy_delimiter(self):
"""The delimiter in the data to be copied. Default is tab (\t)"""
return "E'\t'"
@property
def copy_null_sequence(self):
"""The null sequence in the data to be copied. Default is Hive NULL (\\N)"""
return "'\\N'"
def copy_data_table_from_target(self, cursor):
"""Performs the copy query from the insert source."""
if isinstance(self.columns[0], basestring):
column_names = ','.join([name for name in self.columns])
elif len(self.columns[0]) == 2:
column_names = ','.join([name for name, _type in self.columns])
else:
raise Exception('columns must consist of column strings or '
'(column string, type string) tuples (was %r ...)'
% (self.columns[0],))
with self.input()['insert_source'].open('r') as insert_source_file:
log.debug("Running stream copy from source file")
cursor.copy(
"COPY {schema}.{table} ({cols}) FROM STDIN DELIMITER AS {delim} NULL AS {null} DIRECT NO COMMIT;".format(
schema=self.schema,
table=self.table,
cols=column_names,
delim=self.copy_delimiter,
null=self.copy_null_sequence
),
insert_source_file
)
def run(self):
"""
Inserts data generated by the copy command into target table.
Normally you don't want to override this.
"""
if not (self.table and self.columns):
raise Exception("table and columns need to be specified")
self.check_vertica_availability()
connection = self.output().connect()
try:
# create schema and table only if necessary:
self.create_schema(connection)
self.create_table(connection)
self.init_copy(connection)
cursor = connection.cursor()
self.copy_data_table_from_target(cursor)
# mark as complete in same transaction
self.init_touch(connection)
self.output().touch(connection)
# We commit only if both operations completed successfully.
connection.commit()
log.debug("Committed transaction.")
except Exception as exc:
log.debug("Rolled back the transaction; exception raised: %s", str(exc))
connection.rollback()
raise
finally:
connection.close()
def check_vertica_availability(self):
"""Call to ensure fast failure if this machine doesn't have the Vertica client library available."""
if not vertica_client_available:
raise ImportError('Vertica client library not available')
class CredentialFileVerticaTarget(VerticaTarget):
"""
Represents a table in Vertica, is complete when the update_id is the same as a previous successful execution.
Arguments:
credentials_target (luigi.Target): A target that can be read to retrieve the hostname, port and user credentials
that will be used to connect to the database.
database_name (str): The name of the database that the table exists in. Note this database need not exist.
schema (str): The name of the schema in which the table being modified lies.
table (str): The name of the table in the schema that is being modified.
update_id (str): A unique identifier for this update to the table. Subsequent updates with identical update_id
values will not be executed.
"""
def __init__(self, credentials_target, schema, table, update_id):
with credentials_target.open('r') as credentials_file:
cred = json.load(credentials_file)
super(CredentialFileVerticaTarget, self).__init__(
# Annoying, but the port must be passed in with the host string...
host="{host}:{port}".format(host=cred.get('host'), port=cred.get('port', 5433)),
user=cred.get('username'),
password=cred.get('password'),
schema=schema,
table=table,
update_id=update_id
)
def exists(self, connection=None):
# The parent class fails if the database does not exist. This override tolerates that error.
try:
return super(CredentialFileVerticaTarget, self).exists(connection=connection)
except vertica_python.errors.ProgrammingError:
return False
| open-craft/edx-analytics-pipeline | edx/analytics/tasks/vertica_load.py | Python | agpl-3.0 | 15,908 |
from __future__ import absolute_import
from djaxelrod.celery import app as celery_app
| Axelrod-Python/DjAxelrod | djaxelrod/__init__.py | Python | mit | 86 |
# coding=utf-8
# Copyright 2022 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a RevNet with the bottleneck residual function.
Implements the following equations described in the RevNet paper:
y1 = x1 + f(x2)
y2 = x2 + g(y1)
However, in practice, the authors use the following equations to downsample
tensors inside a RevNet block:
y1 = h(x1) + f(x2)
y2 = h(x2) + g(y1)
In this case, h is the downsampling function used to change number of channels.
These modified equations are evident in the authors' code online:
https://github.com/renmengye/revnet-public
For reference, the original paper can be found here:
https://arxiv.org/pdf/1707.04585.pdf
"""
import functools
from tensor2tensor.layers import common_hparams
from tensor2tensor.utils import contrib
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
import tensorflow.compat.v1 as tf
def wrapped_partial(fn, *args, **kwargs):
partial = functools.partial(fn, *args, **kwargs)
wrapped = functools.update_wrapper(partial, fn)
return wrapped
conv_initializer = tf.initializers.variance_scaling(
scale=2.0, mode='fan_out')
CONFIG = {'2d': {'conv': wrapped_partial(
tf.layers.conv2d, kernel_initializer=conv_initializer),
'max_pool': tf.layers.max_pooling2d,
'avg_pool': tf.layers.average_pooling2d,
'split_axis': 3,
'reduction_dimensions': [1, 2]
},
'3d': {'conv': wrapped_partial(
tf.layers.conv3d, kernel_initializer=conv_initializer),
'max_pool': tf.layers.max_pooling3d,
'avg_pool': tf.layers.average_pooling2d,
'split_axis': 4,
'reduction_dimensions': [1, 2, 3]
}
}
def f(x, depth1, depth2, dim='2d', first_batch_norm=True, stride=1,
training=True, bottleneck=True, padding='SAME'):
"""Applies residual function for RevNet.
Args:
x: input tensor
depth1: Number of output channels for the first and second conv layers.
depth2: Number of output channels for the third conv layer.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
first_batch_norm: Whether to keep the first batch norm layer or not.
Typically used in the first RevNet block.
stride: Stride for the first conv filter. Note that this particular
RevNet architecture only varies the stride for the first conv
filter. The stride for the second conv filter is always set to 1.
training: True for train phase, False for eval phase.
bottleneck: If true, apply bottleneck 1x1 down/up sampling.
padding: Padding for each conv layer.
Returns:
Output tensor after applying residual function for RevNet.
"""
conv = CONFIG[dim]['conv']
with tf.variable_scope('f', reuse=tf.AUTO_REUSE):
if first_batch_norm:
net = tf.layers.batch_normalization(x, training=training)
net = tf.nn.relu(net)
else:
net = x
if bottleneck:
net = conv(net, depth1, 1, strides=stride,
padding=padding, activation=None)
net = tf.layers.batch_normalization(net, training=training)
net = tf.nn.relu(net)
net = conv(net, depth1, 3, strides=1,
padding=padding, activation=None)
net = tf.layers.batch_normalization(net, training=training)
net = tf.nn.relu(net)
net = conv(net, depth2, 1, strides=1,
padding=padding, activation=None)
else:
net = conv(net, depth2, 3, strides=stride,
padding=padding, activation=None)
net = tf.layers.batch_normalization(x, training=training)
net = tf.nn.relu(net)
net = conv(net, depth2, 3, strides=stride,
padding=padding, activation=None)
return net
def downsample_bottleneck(x, output_channels, dim='2d', stride=1, scope='h'):
"""Downsamples 'x' by `stride` using a 1x1 convolution filter.
Args:
x: input tensor of size [N, H, W, C]
output_channels: Desired number of output channels.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
stride: What stride to use. Usually 1 or 2.
scope: Optional variable scope.
Returns:
A downsampled tensor of size [N, H/2, W/2, output_channels] if stride
is 2, else returns a tensor of size [N, H, W, output_channels] if
stride is 1.
"""
conv = CONFIG[dim]['conv']
with tf.variable_scope(scope):
x = conv(x, output_channels, 1, strides=stride, padding='SAME',
activation=None)
return x
def downsample_residual(x, output_channels, dim='2d', stride=1, scope='h'):
"""Downsamples 'x' by `stride` using average pooling.
Args:
x: input tensor of size [N, H, W, C]
output_channels: Desired number of output channels.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
stride: What stride to use. Usually 1 or 2.
scope: Optional variable scope.
Returns:
A downsampled tensor of size [N, H/2, W/2, output_channels] if stride
is 2, else returns a tensor of size [N, H, W, output_channels] if
stride is 1.
"""
with tf.variable_scope(scope):
if stride > 1:
avg_pool = CONFIG[dim]['avg_pool']
x = avg_pool(x,
pool_size=(stride, stride),
strides=(stride, stride),
padding='VALID')
input_channels = tf.shape(x)[3]
diff = output_channels - input_channels
x = tf.pad(
x, [[0, 0], [0, 0], [0, 0],
[diff // 2, diff // 2]])
return x
def init(images, num_channels, dim='2d', stride=2,
kernel_size=7, maxpool=True, training=True, scope='init'):
"""Standard ResNet initial block used as first RevNet block.
Args:
images: [N, H, W, 3] tensor of input images to the model.
num_channels: Output depth of convolutional layer in initial block.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
stride: stride for the convolution and pool layer.
kernel_size: Size of the initial convolution filter
maxpool: If true, apply a maxpool after the convolution
training: True for train phase, False for eval phase.
scope: Optional scope for the init block.
Returns:
Two [N, H, W, C] output activations from input images.
"""
conv = CONFIG[dim]['conv']
pool = CONFIG[dim]['max_pool']
with tf.variable_scope(scope):
net = conv(images, num_channels, kernel_size, strides=stride,
padding='SAME', activation=None)
net = tf.layers.batch_normalization(net, training=training)
net = tf.nn.relu(net)
if maxpool:
net = pool(net, pool_size=3, strides=stride)
x1, x2 = tf.split(net, 2, axis=CONFIG[dim]['split_axis'])
return x1, x2
def unit(x1, x2, block_num, depth, num_layers, dim='2d',
bottleneck=True, first_batch_norm=True, stride=1, training=True):
"""Implements bottleneck RevNet unit from authors' RevNet architecture.
Args:
x1: [N, H, W, C] tensor of network activations.
x2: [N, H, W, C] tensor of network activations.
block_num: integer ID of block
depth: First depth in bottleneck residual unit.
num_layers: Number of layers in the RevNet block.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
bottleneck: Should a bottleneck layer be used.
first_batch_norm: Whether to keep the first batch norm layer or not.
Typically used in the first RevNet block.
stride: Stride for the residual function.
training: True for train phase, False for eval phase.
Returns:
Two [N, H, W, C] output activation tensors.
"""
scope_name = 'unit_%d' % block_num
if bottleneck:
depth1 = depth
depth2 = depth * 4
else:
depth1 = depth2 = depth
residual = wrapped_partial(f,
depth1=depth1, depth2=depth2, dim=dim,
training=training, bottleneck=bottleneck)
with tf.variable_scope(scope_name):
downsample = downsample_bottleneck if bottleneck else downsample_residual
# Manual implementation of downsampling
with tf.variable_scope('downsampling'):
with tf.variable_scope('x1'):
hx1 = downsample(x1, depth2, dim=dim, stride=stride)
fx2 = residual(x2, stride=stride, first_batch_norm=first_batch_norm)
x1 = hx1 + fx2
with tf.variable_scope('x2'):
hx2 = downsample(x2, depth2, dim=dim, stride=stride)
fx1 = residual(x1)
x2 = hx2 + fx1
# Full block using memory-efficient rev_block implementation.
with tf.variable_scope('full_block'):
x1, x2 = contrib.layers().rev_block(
x1, x2, residual, residual, num_layers=num_layers)
return x1, x2
def final_block(x1, x2, dim='2d', training=True, scope='final_block'):
"""Converts activations from last RevNet block to pre-logits.
Args:
x1: [NxHxWxC] tensor of network activations.
x2: [NxHxWxC] tensor of network activations.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
training: True for train phase, False for eval phase.
scope: Optional variable scope for the final block.
Returns:
[N, hidden_dim] pre-logits tensor from activations x1 and x2.
"""
# Final batch norm and relu
with tf.variable_scope(scope):
y = tf.concat([x1, x2], axis=CONFIG[dim]['split_axis'])
y = tf.layers.batch_normalization(y, training=training)
y = tf.nn.relu(y)
# Global average pooling
net = tf.reduce_mean(y, CONFIG[dim]['reduction_dimensions'],
name='final_pool', keep_dims=True)
return net
def revnet(inputs, hparams, reuse=None):
"""Uses Tensor2Tensor memory optimized RevNet block to build a RevNet.
Args:
inputs: [NxHxWx3] tensor of input images to the model.
hparams: HParams object that contains the following parameters,
in addition to the parameters contained in the basic_params1() object in
the common_hparams module:
num_channels_first - A Python list where each element represents the
depth of the first and third convolutional layers in the bottleneck
residual unit for a given block.
num_channels_second - A Python list where each element represents the
depth of the second convolutional layer in the bottleneck residual
unit for a given block.
num_layers_per_block - A Python list containing the number of RevNet
layers for each block.
first_batch_norm - A Python list containing booleans representing the
presence of a batch norm layer at the beginning of a given block.
strides - A Python list containing integers representing the stride of
the residual function for each block.
num_channels_init_block - An integer representing the number of channels
for the convolutional layer in the initial block.
dimension - A string (either "2d" or "3d") that decides if the RevNet is
2-dimensional or 3-dimensional.
reuse: Whether to reuse the default variable scope.
Returns:
[batch_size, hidden_dim] pre-logits tensor from the bottleneck RevNet.
"""
training = hparams.mode == tf.estimator.ModeKeys.TRAIN
with tf.variable_scope('RevNet', reuse=reuse):
x1, x2 = init(inputs,
num_channels=hparams.num_channels_init_block,
dim=hparams.dim,
kernel_size=hparams.init_kernel_size,
maxpool=hparams.init_maxpool,
stride=hparams.init_stride,
training=training)
for block_num in range(len(hparams.num_layers_per_block)):
block = {'depth': hparams.num_channels[block_num],
'num_layers': hparams.num_layers_per_block[block_num],
'first_batch_norm': hparams.first_batch_norm[block_num],
'stride': hparams.strides[block_num],
'bottleneck': hparams.bottleneck}
x1, x2 = unit(x1, x2, block_num, dim=hparams.dim, training=training,
**block)
pre_logits = final_block(x1, x2, dim=hparams.dim, training=training)
return pre_logits
@registry.register_model
class Revnet(t2t_model.T2TModel):
def body(self, features):
return revnet(features['inputs'], self.hparams)
def revnet_base():
"""Default hparams for Revnet."""
hparams = common_hparams.basic_params1()
hparams.add_hparam('num_channels', [64, 128, 256, 416])
hparams.add_hparam('num_layers_per_block', [1, 1, 10, 1])
hparams.add_hparam('bottleneck', True)
hparams.add_hparam('first_batch_norm', [False, True, True, True])
hparams.add_hparam('init_stride', 2)
hparams.add_hparam('init_kernel_size', 7)
hparams.add_hparam('init_maxpool', True)
hparams.add_hparam('strides', [1, 2, 2, 2])
hparams.add_hparam('num_channels_init_block', 64)
hparams.add_hparam('dim', '2d')
# Variable init
hparams.initializer = 'normal_unit_scaling'
hparams.initializer_gain = 2.
# Optimization
hparams.optimizer = 'Momentum'
hparams.optimizer_momentum_momentum = 0.9
hparams.optimizer_momentum_nesterov = True
hparams.weight_decay = 1e-4
hparams.clip_grad_norm = 0.0
# (base_lr=0.1) * (batch_size=128*8 (on TPU, or 8 GPUs)=1024) / (256.)
hparams.learning_rate = 0.4
hparams.learning_rate_decay_scheme = 'cosine'
# For image_imagenet224, 120k training steps, which effectively makes this a
# cosine decay (i.e. no cycles).
hparams.learning_rate_cosine_cycle_steps = 120000
# Can run with a batch size of 128 with Problem ImageImagenet224
hparams.batch_size = 128
return hparams
@registry.register_hparams
def revnet_104():
return revnet_base()
def revnet_cifar_base():
"""Tiny hparams suitable for CIFAR/etc."""
hparams = revnet_base()
hparams.num_channels_init_block = 32
hparams.first_batch_norm = [False, True, True]
hparams.init_stride = 1
hparams.init_kernel_size = 3
hparams.init_maxpool = False
hparams.strides = [1, 2, 2]
hparams.batch_size = 128
hparams.weight_decay = 1e-4
hparams.learning_rate = 0.1
hparams.learning_rate_cosine_cycle_steps = 5000
return hparams
@registry.register_hparams
def revnet_38_cifar():
hparams = revnet_cifar_base()
hparams.bottleneck = False
hparams.num_channels = [16, 32, 56]
hparams.num_layers_per_block = [2, 2, 2]
hparams.initializer = 'normal_unit_scaling'
hparams.initializer_gain = 1.5
return hparams
@registry.register_hparams
def revnet_110_cifar():
"""Tiny hparams suitable for CIFAR/etc."""
hparams = revnet_cifar_base()
hparams.bottleneck = False
hparams.num_channels = [16, 32, 64]
hparams.num_layers_per_block = [8, 8, 8]
return hparams
@registry.register_hparams
def revnet_164_cifar():
"""Tiny hparams suitable for CIFAR/etc."""
hparams = revnet_cifar_base()
hparams.bottleneck = True
hparams.num_channels = [16, 32, 64]
hparams.num_layers_per_block = [8, 8, 8]
return hparams
@registry.register_ranged_hparams
def revnet_range(rhp):
"""Hyperparameters for tuning revnet."""
rhp.set_float('learning_rate', 0.05, 0.2, scale=rhp.LOG_SCALE)
rhp.set_float('weight_decay', 1e-5, 1e-3, scale=rhp.LOG_SCALE)
rhp.set_discrete('num_channels_init_block', [64, 128])
return rhp
| tensorflow/tensor2tensor | tensor2tensor/models/revnet.py | Python | apache-2.0 | 15,682 |
# -*- coding: utf-8 -*-
from uuid import uuid4
from time import sleep
import json
import os
from pyoko.conf import settings
from pyoko.manage import FlushDB, LoadData
from pyoko.lib.utils import pprnt
from pprint import pprint
from zengine.lib.cache import ClearCache
from zengine.lib.exceptions import HTTPError
from zengine.log import log
from zengine.wf_daemon import Worker
from zengine.lib.json_interface import ZEngineJSONEncoder
from zengine.models import User
from zengine.messaging.model import Message
class ResponseWrapper(object):
"""
Wrapper object for test client's response
"""
def __init__(self, output):
self.content = output
try:
self.json = output
print(self.json)
except:
log.exception('ERROR at RWrapper JSON load')
self.json = {}
self.code = self.json.get('code', None)
self.token = self.json.get('token')
self.form_data = self.json['forms']['model'] if 'forms' in self.json else {}
if 'object_key' in self.form_data:
self.object_key = self.form_data['object_key']
else:
self.object_key = self.json.get('object_id', None)
if self.code and int(self.code) >= 400:
self.raw()
raise HTTPError(self.code,
(self.json.get('title', '') +
self.json.get('description', '') +
self.json.get('error', '')))
def raw(self):
"""
Pretty prints the response
"""
pprint(self.code)
try:
pprnt(self.json)
except TypeError: # If there is a custom type in the output (i.e. lazy translations)
print(json.dumps(self.json, cls=ZEngineJSONEncoder, indent=4, sort_keys=True))
if not self.json:
pprint(self.content)
class BaseTestClient(Worker):
"""
TestClient to simplify writing API tests for Zengine based apps.
"""
def __init__(self, path, *args, **kwargs):
"""
this is a wsgi test client based on zengine.worker
:param str path: Request uri
"""
super(BaseTestClient, self).__init__(*args, **kwargs)
self.test_client_sessid = None
self.response_wrapper = None
self.set_path(path, None)
self.user = None
self.username = None
self.path = ''
self.sess_id = uuid4().hex
import sys
sys._called_from_test = True
def set_path(self, path, token=''):
"""
Change the path (workflow)
Args:
path: New path (or wf name)
token: WF token.
"""
self.path = path
self.token = token
def _prepare_post(self, wf_meta, data):
"""
by default data dict encoded as json and content type set as application/json
when form data is post, UI should send wf_meta info to backend, but some tests works on
lack of wf_meta scenario so wf_meta info is done optional as True, False.
:param dict conf: additional configs for test client's post method.
pass "no_json" in conf dict to prevent json encoding
:param data: post data,
wf_meta(bool): fake wf_meta will be created or not
:return: RWrapper response object
:rtype: ResponseWrapper
"""
if 'token' not in data and self.token:
data['token'] = self.token
if self.response_wrapper:
form_data = self.response_wrapper.form_data.copy()
else:
form_data = {}
if self.path:
data['path'] = self.path.replace('/', '')
if 'form' in data:
form_data.update(data['form'])
data['form'] = form_data
if wf_meta and hasattr(self, 'current') and hasattr(self.current, 'spec'):
if self.current.task.parent.task_spec.__class__.__name__ == 'UserTask':
data['wf_meta'] = {'name': self.current.workflow_name,
'current_lane': self.current.task.parent.task_spec.lane,
'current_step': self.current.task.parent.task_spec.name}
post_data = {'data': data,
'_zops_remote_ip': '127.0.0.1',
'_zops_source': 'Remote',
}
log.info("PostData : %s" % post_data)
print("PostData : %s" % post_data)
return post_data
def post(self, wf_meta=True, **data):
post_data = json.dumps(self._prepare_post(wf_meta, data))
fake_method = type('FakeMethod', (object,), {'routing_key': self.sess_id})
self.handle_message(None, fake_method, None, post_data)
# update client token from response
self.token = self.response_wrapper.token
return self.response_wrapper
class TestClient(BaseTestClient):
def send_output(self, output):
self.response_wrapper = ResponseWrapper(output)
# encrypted form of test password (123)
user_pass = '$pbkdf2-sha512$10000$nTMGwBjDWCslpA$iRDbnITHME58h1/eVolNmPsHVq' \
'xkji/.BH0Q0GQFXEwtFvVwdwgxX4KcN/G9lUGTmv7xlklDeUp4DD4ClhxP/Q'
username = 'test_user'
import sys
sys.LOADED_FIXTURES = []
class BaseTestCase:
"""
Base test case.
"""
client = None
def setup_method(self, method):
"""
Creates a new user and Role with all Permissions.
"""
# if not '--ignore=fixture' in sys.argv:
# if hasattr(self, 'fixture'):
# print("\nREPORT:: Running test cases own fixture() method")
# self.fixture()
# sleep(2)
#
# else:
# fixture_guess = 'fixtures/%s.csv' % method.__self__.__module__.split('.test_')[1]
# if os.path.exists(fixture_guess) and fixture_guess not in sys.LOADED_FIXTURES:
# sys.LOADED_FIXTURES.append(fixture_guess)
# FlushDB(model='all', wait_sync=True,
# exclude=settings.TEST_FLUSHING_EXCLUDES).run()
# print("\nREPORT:: Test fixture will be loaded: %s" % fixture_guess)
# LoadData(path=fixture_guess, update=True).run()
# sleep(2)
# else:
# print(
# "\nREPORT:: Test case does not have a fixture file like %s" % fixture_guess)
#
# else:
# print("\nREPORT:: Fixture loading disabled by user. (by --ignore=fixture)")
# clear all caches
if not hasattr(sys, 'cache_cleared'):
sys.cache_cleared = True
print(ClearCache.flush())
print("\nREPORT:: Cache cleared")
@classmethod
def prepare_client(cls, path='', reset=False, user=None, login=None, token='', username=None):
"""
Setups the path, logs in if necessary
Args:
path: change or set path
reset: Create a new client
login: Login to system
token: Set token
"""
if not cls.client or reset or user:
cls.client = TestClient(path)
login = True if login is None else login
if username:
cls.client.username = username
if user:
cls.client.user = user
login = True if login is None else login
if login:
cls._do_login()
cls.client.set_path(path, token)
@classmethod
def _do_login(self):
"""
logs in the "test_user"
"""
self.client.sess_id = uuid4().hex
self.client.set_path("/login/")
resp = self.client.post()
assert resp.json['forms']['schema']['title'] == 'LoginForm'
req_fields = resp.json['forms']['schema']['required']
assert all([(field in req_fields) for field in ('username', 'password')])
resp = self.client.post(username=self.client.username or self.client.user.username,
password="123", cmd="do")
log.debug("login result :\n%s" % resp.json)
assert resp.json['cmd'] == 'upgrade'
def get_user_token(self, username):
user = User.objects.get(username=username)
token = self.client.current.token
return token, user
| zetaops/zengine | zengine/lib/test_utils.py | Python | gpl-3.0 | 8,364 |
#!python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009 John Hampton <pacopablo@pacopablo.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# Author: John Hampton <pacopablo@pacopablo.com>
import os
import logging
from subprocess import call, STDOUT, PIPE, Popen
from elderberrypy.errors import NonExistentUser, NonExistentGroup
from elderberrypy.accounts import get_uid, get_gid
log = logging.getLogger('elderberrypy.process')
__all__ = [
'run_prog',
]
def run_prog(env, cmd, user='su', group='srvadmins'):
""" Run the cmd as a subprocess as the given user/group """
run_env = {
'PATH' : '%(opt)s/bin:/usr/bin:/bin' % env,
'EBPY_SRV_ROOT' : '%(base)s' % env,
}
method = []
if isinstance(cmd, basestring):
method.append(cmd)
else:
method.extend(cmd)
cureuid = os.geteuid()
curegid = os.getegid()
# Set new ids
success = False
try:
os.setegid(get_gid(group))
os.seteuid(get_uid(user))
p = Popen(method, stdout=PIPE, stderr=STDOUT, env=run_env)
output = p.communicate()[0]
rc = p.returncode
success = rc == 0
log.debug("run_prog: %s returned %s" % (str(method), str(rc)))
if not success:
log.debug("run_prog: %s:\n%s" % (str(method), str(output)))
except NonExistentUser:
log.error("Can not run program as non-existent user: %s" % str(user))
except NonExistentGroup:
log.error("Can not run program as non-existent group: %s" % str(group))
except Exception, e:
log.error("Unable to run program: %s" % str(cmd), exc_info=True)
# Reset ids
os.seteuid(cureuid)
os.setegid(curegid)
return success
| pacopablo/elderberrypy | elderberrypy/process.py | Python | bsd-2-clause | 1,810 |
import json
import time
import datetime
import os
import uuid
import re
import campaign.utils as utils
from . import StorageBase, StorageException, Base
from campaign.logger import LOG
from campaign.views import api_version
from sqlalchemy import (Column, Integer, String, Text,
text)
from sqlalchemy.exc import IntegrityError
class Users(Base):
__tablename__ = 'users'
__table_args__ = {
'mysql_charset': 'utf8'
}
email = Column('email', String(100), primary_key=True)
id = Column('id', String(32), index=True)
sponsor = Column('sponsor', String(100))
time = Column('date', Integer)
level = Column('level', Integer)
class Campaign(Base):
__tablename__ = 'campaigns'
__table_args__ = {
'mysql_charset': 'utf8'
}
# Due to a miscommunication during design, the client was
# created requiring the 'id' to be a numeric. We use 'id' as
# a unique identifier string.
id = Column('id', String(25), primary_key=True)
priority = Column('priority', Integer, index=True)
specific = Column('specific', Integer, index=True)
channel = Column('channel', String(24), index=True, nullable=True)
version = Column('version', String(30), index=True, nullable=True)
product = Column('product', String(50), index=True, nullable=True)
platform = Column('platform', String(50), index=True, nullable=True)
lang = Column('lang', String(24), index=True, nullable=True)
locale = Column('locale', String(24), index=True, nullable=True)
start_time = Column('start_time', Integer, index=True)
end_time = Column('end_time', Integer, index=True, nullable=True)
idle_time = Column('idle_time', Integer, index=True, nullable=True)
note = Column('note', Text)
dest_url = Column('dest_url', Text)
author = Column('author', String(255), index=True)
created = Column('created', Integer, index=True)
title = Column('title', String(50))
status = Column('status', Integer)
hashval = Column('hashval', String(64), index=True, unique=True)
class Scrapes(Base):
__tablename__ = 'scrapes'
__table_args__ = {
'mysql_charset': 'utf8'
}
id = Column('id', String(25), unique=True, primary_key=True)
served = Column('served', Integer, server_default=text('0'))
clicks = Column('clicks', Integer, server_default=text('0'))
last = Column('last', Integer, index=True, server_default=text('0'))
class CounterException(Exception):
pass
class Counter(StorageBase):
__database__ = 'campaign'
__tablename__ = 'scrapes'
def __init__(self, config, logger, **kw):
try:
super(Counter, self).__init__(config, **kw)
self.logger = logger
self._connect()
#TODO: add the most common index.
except Exception, e:
logger.log(msg='Could not initialize Storage "%s"' % str(e),
type='error', severity=LOG.CRITICAL)
raise e
def bulk_increment(self, conn, id, action, time=time.time()):
action = re.sub(r'[^0-9A-Za-z]', '', action)
try:
if (self.settings.get("db.type") == "sqlite"):
conn.execute(text("insert or ignore into " +
self.__tablename__ +
" (id)" +
" values (:id ); "),
{"id": id})
else:
dml = text("insert into " + self.__tablename__
+ " (id, %s) values (:id, 1) " % action
+ " on duplicate key update %s=%s+1, last=:last;"
% (action, action))
conn.execute(dml, {"id": id, "last": time})
except Exception, e:
self.logger.log(msg="Could not increment id: %s" % str(e),
type="error", severity=LOG.ERROR)
def increment(self, id, action, time):
with self.engine.begin() as conn:
self.bulk_increment(conn, id, action, time)
def fetched(self, data, time=time.time()):
with self.engine.begin() as conn:
for item in data:
self.bulk_increment(conn, item.get('token'), 'served', time)
def redir(self, data, time=time.time()):
self.increment(data.get('id'), 'clicks', time)
commands = {'redirect': redir,
'fetched': fetched}
def log(self, line):
for command in self.commands.keys():
if command + ' :' in line:
dt = datetime.strptime(line.split(',')[0],
'%Y-%m-%d %H:%M:%S')
timestamp = int(time.mktime(dt.timetuple()))
try:
data = json.loads(line.split(command + ' :')[1])
while (isinstance(data, basestring)):
data = json.loads(data)
self.commands.get(command)(self,
data,
timestamp)
except Exception, e:
self.logger.log(msg="Could not log %s" % str(e),
type="error", severity=LOG.ERROR)
raise e
def report(self, id):
with self.engine.begin() as conn:
resp = conn.execute(text(("select * from %s " %
self.__tablename__) +
"where id = :id"), {'id': id})
if resp.rowcount > 0:
result = resp.fetchone()
return dict(zip(resp.keys(), result))
else:
return {}
def parse(self, logfile):
try:
file = open(logfile, 'r')
for line in file:
self.log(line)
except Exception, e:
self.logger.log(msg="Could not parse %s" % str(e),
type="error", severity=LOG.ERROR)
pass
class Storage(StorageBase):
__database__ = 'campaign'
__tablename__ = 'campaigns'
def __init__(self, config, logger, **kw):
try:
super(Storage, self).__init__(config, **kw)
self.logger = logger
# Store off a link to the main table.
self.campaigns = Base.metadata.tables.get(Campaign.__tablename__)
self.users = Base.metadata.tables.get(Users.__tablename__)
self.scrapes = Base.metadata.tables.get(Scrapes.__tablename__)
self.counter = Counter(config, logger, **kw)
self._connect()
#TODO: add the most common index.
except Exception, e:
logger.log(msg='Could not initialize Storage "%s"' % str(e),
type='error', severity=LOG.CRITICAL)
raise e
def health_check(self):
if utils.strToBool(self.settings.get('db.skip_health', False)):
return True
try:
healthy = True
with self.engine.begin() as conn:
test_id = "test" + os.urandom(5).encode("hex")
ins = self.campaigns.insert().values(
id=test_id,
product="test",
channel="test",
platform="test",
start_time=0,
end_time=0,
note="test",
dest_url="test",
author="test",
created=0)
conn.execute(ins)
sel = self.campaigns.select(self.campaigns.c.id == test_id)
resp = conn.execute(sel)
rows = resp.fetchall()
if not len(rows):
healthy = False
conn.execute(self.campaigns.delete(self.campaigns.c.id ==
test_id))
except Exception, e:
import warnings
warnings.warn(str(e))
return False
return healthy
def resolve(self, token):
if token is None:
return None
sel = self.campaigns.select(self.campaigns.c.id == token)
items = self.engine.execute(sel)
row = items.fetchone()
if row is None:
return None
result = dict(zip(items.keys(), row))
return result
def is_repost(self, data):
hashval = data.get('hashval')
if hashval is None:
self.logger.log(msg="No hash found, ignoring.",
type="error",
severity=LOG.DEBUG)
return True
with self.engine.begin() as conn:
resp = conn.execute(text(("select hashval from %s " %
self.__tablename__) +
"where hashval = :hashval",
{"hashval": hashval}))
row = resp.fetchone()
if row is not None:
return True
return False
def put_announce(self, data, sessionHandle=None, now=None):
if sessionHandle:
session = sessionHandle
else:
session = self.Session()
if isinstance(data, list):
for item in data:
self.put_announce(item, session, now)
return self
if self.is_repost(data):
return self
if data.get('body') is None:
raise StorageException('Incomplete record. Skipping.')
specificity = 0
for col in ['lang', 'loc', 'platform',
'channel', 'version']:
if len(str(data.get(col, ''))):
specificity += 1
if data.get('idle_time') and int(data.get('idle_time')) != 0:
specificity += 1
data['specific'] = specificity
snip = self.normalize_announce(data, now)
campaign = Campaign(**snip)
session.add(campaign)
try:
session.commit()
except IntegrityError:
pass
if not sessionHandle:
session.flush()
session.close()
return self
def get_announce(self, data, now=None):
# Really shouldn't allow "global" variables, but I know full well
# that they're going to want them.
params = {}
# The window allows the db to cache the query for the length of the
# window. This is because the database won't cache a query if it
# differs from a previous one. The timestamp will cause the query to
# not be cached.
#window = int(self.settings.get('db.query_window', 1))
window = 1
if now is None:
now = int(time.time() / window) * window
# Using raw sql here for performance reasons.
sql = ("select created, id, note, priority, `specific`, "
"start_time, idle_time from %s where " % self.__tablename__ +
" product != 'test' and "
" coalesce(cast(start_time as unsigned), %s) <= %s"
% (now - 1, now))
for field in ['product', 'platform', 'channel', 'lang', 'locale']:
if data.get(field):
sql += " and coalesce(%s, :%s) = :%s " % (field, field, field)
params[field] = data.get(field)
data['idle_time'] = data.get('idle', 0)
try:
if 'version' in data:
sql += " and coalesce(version, :version) = :version"
params['version'] = str(data['version']).split('.')[0]
except Exception:
pass
sql += " and coalesce(idle_time, 0) <= :idle_time "
params['idle_time'] = data.get('idle_time')
sql += " order by priority desc, `specific` desc, start_time desc"
if (self.settings.get('dbg.show_query', False)):
print sql
print params
if (self.settings.get('db.limit')):
sql += " limit :limit"
params['limit'] = self.settings.get('db.limit')
try:
items = self.engine.execute(text(sql), **dict(params))
except Exception, e:
self.logger.log(msg='SQL Error "%s" ' % str(e),
type='error', severity=LOG.CRITICAL)
result = []
for item in items:
# last_accessed may be actually set to 'None'
last_accessed = int(data.get('last_accessed') or '0')
if last_accessed:
start = item.start_time or item.created
if (item.idle_time and
last_accessed > start + (86400 * item.idle_time)):
continue
else:
if last_accessed > start:
continue
note = json.loads(item.note)
note.update({
# ID in this case is a unique integer per CM record
# it is used by the client to replace records.
'id': int(item.created * 100),
# token is stripped before being sent to the client.
# it's used for metrics tracking.
'token': item.id,
'created': item.created,
# This uses the server string ID for redirect/tracking
'url': self.settings.get('redir.url', 'http://%s/%s%s') % (
self.settings.get('redir.host', 'localhost'),
self.settings.get('redir.path',
'redirect/%s/' % api_version),
item.id)})
result.append(note)
return result
def get_all_announce(self, limit=None):
result = []
sql = ("select c.*,s.served,s.clicks from " +
"(campaigns as c left join scrapes " +
"as s on c.id=s.id) where " +
"c.product != \"test\" " +
"order by created desc ")
if limit:
sql += 'limit %d' % limit
items = self.engine.execute(text(sql))
for item in items:
counter = self.counter.report(item.id)
ann = dict(item)
ann.update(counter)
result.append(ann)
return result
def del_announce(self, keys):
#TODO: how do you safely do an "in (keys)" call?
session = self.Session()
sql = 'delete from %s where id = :key' % self.__tablename__
for key in keys:
self.engine.execute(text(sql), {"key": key})
session.commit()
def purge(self):
session = self.Session()
sql = 'delete from %s;' % self.__tablename__
self.engine.execute(text(sql))
session.commit()
def user_health_check(self):
#foo
try:
uid = self.add_user('test', '', 0)
self.rm_user(uid)
# with self.engine.begin() as conn:
except Exception, e:
import warnings
warnings.warn(str(e))
return False
return True
def is_user(self, email):
sel = self.users.select(self.users.c.email == email)
items = self.engine.execute(sel)
row = items.fetchone()
if row is None:
return False
return True
def user_list(self):
result = []
sql = "select * from users order by email"
items = self.engine.execute(text(sql))
for item in items:
result.append(dict(item))
return result
def add_user(self, email, sponsor, level=0):
try:
uid = uuid.uuid4().hex
with self.engine.begin() as conn:
ins = self.users.insert().values(
id=uid,
email=email,
sponsor=sponsor,
level=level,
date=int(time.time()))
conn.execute(ins)
return uid
except Exception, e:
self.logger.log(msg='Could not add user "%s", "%s"' %
(email, repr(e)),
type="error", severity=LOG.ERROR)
return None
def rm_user(self, id):
if len(id) == 0 or id == '0':
return False
try:
with self.engine.begin() as conn:
rm = self.users.delete().where(self.users.c.id == id)
conn.execute(rm)
return True
except Exception, e:
self.logger.log(msg='Could not delete user with id "%s", "%s"' %
(id, repr(e)),
type="error", severity=LOG.ERROR)
return False
| jrconlin/campaign_manager | campaign/storage/sql.py | Python | mpl-2.0 | 16,721 |
from setuptools import setup, find_packages
install_requirements = ['pytz', 'tzlocal']
version = '2.2.0'
try:
import importlib
except ImportError:
install_requirements.append('importlib')
setup(
name='tasklib',
version=version,
description='Python Task Warrior library',
long_description=open('README.rst').read(),
author='Rob Golding',
author_email='rob@robgolding.com',
license='BSD',
url='https://github.com/robgolding63/tasklib',
download_url='https://github.com/robgolding63/tasklib/downloads',
packages=find_packages(),
include_package_data=True,
test_suite='tasklib.tests',
install_requires=install_requirements,
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
'License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Intended Audience :: Developers',
],
)
| tbabej/tasklib | setup.py | Python | bsd-3-clause | 1,203 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Answered by Billy Wilson Arante
# Last updated on 2016/12/03 EST
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import sys
import re
import os
import shutil
import commands
from subprocess import call
"""Copy Special exercise
"""
# +++your code here+++
# Write functions and modify main() to call them
def get_special_paths(a_dir):
filenames = os.listdir(a_dir)
abs_paths = []
for a_file in filenames:
match = re.search(r"\w+__\w+__", a_file)
if match:
newpath = os.path.abspath("./" + a_file)
abs_paths.append(newpath)
return abs_paths
def copy_to(todir, a_dir):
if not os.path.exists(todir):
os.mkdir("./" + todir)
for a_file in get_special_paths(a_dir):
shutil.copy(a_file, todir)
return "Copying is successful."
def zip_to(zip_file, a_dir):
cmd = ["zip", "-j", zip_file]
# Filter files to be zipped
for a_file in get_special_paths(a_dir):
cmd.append(a_file)
print "Command I am going to do:", " ".join(cmd)
status = call(cmd) # Zipping process
# Exception hadling
if status:
sys.stderr.write(str(status))
sys.exit(1)
return "Zipping is successful."
def main():
"""Main program"""
# This basic command line argument parsing code is provided.
# Add code to call your functions below.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print "usage: [--todir dir][--tozip zip_file] dir [dir ...]";
sys.exit(1)
# todir and tozip are either set from command line
# or left as the empty string.
# The args array is left just containing the dirs.
todir = ''
if args[0] == '--todir':
todir = args[1]
del args[0:2]
tozip = ''
if args[0] == '--tozip':
tozip = args[1]
del args[0:2]
if len(args) == 0:
print "error: must specify one or more dirs"
sys.exit(1)
# +++your code here+++
# Call your functions
if todir != "":
print copy_to(todir, args[0])
elif tozip != "":
print zip_to(tozip, args[0])
else:
for a_path in get_special_paths(args[0]):
print a_path
if __name__ == "__main__":
# Call main if run as script
main()
| arantebillywilson/python-snippets | py2/google-python-exercises/copyspecial/copyspecial.py | Python | mit | 2,566 |
from v3 import *
def test1():
X = [random_vector(100,0,1) for i in range(100)]
X = list(map(rsm.sparsify,X))
nn = rsm(50,7,13)
s=nn.score(X[0])
print(sum(s.values()))
for i in range(2):
for x in X:
nn.learn(x,1)
s=nn.score(X[0])
print(sum(s.values()))
from pprint import pprint
pprint(nn.stats())
pprint(nn.mem)
pprint(nn.vol)
def test2():
# data
X1 = [set(combinations(10000,50)) for i in range(10)]
X2 = [set(combinations(10000,50)) for i in range(10)]
X3 = [set(combinations(10000,50)) for i in range(10)]
# learning
nn = rsm(5,5,5)
for i in range(2):
if 0: # negative
for x in X2:
nn.learn(x,1,negative=True)
for x in X1: # positive
nn.learn(x,1,decay=0.0,dropout=0.0)
print(nn.mem)
print(nn.neg)
# scoring
m = 6
s1 = [nn.score(x,1,method=m) for x in X1]
s2 = [nn.score(x,1,method=m) for x in X2]
s3 = [nn.score(x,1,method=m) for x in X3]
if 1:
print('X1 -> {:.03f} {:.03f} {:.03f}'.format(avg(s1),min(s1),max(s1)))
print('X2 -> {:.03f} {:.03f} {:.03f}'.format(avg(s2),min(s2),max(s2)))
print('X3 -> {:.03f} {:.03f} {:.03f}'.format(avg(s3),min(s3),max(s3)))
if 0:
print(list(sorted(s1,reverse=True)))
print(list(sorted(s2,reverse=True)))
# other
if 0:
print(X1)
print(nn.mem)
print(nn.vol)
# stats
if 0:
pprint(nn.stats('m'))
return avg(s1)
if __name__=="__main__":
import sys
iters = 1
total = 0
for i in range(iters):
total += test2()
#print('.')
sys.stdout.flush()
print 1.0*total/iters
| mobarski/sandbox | rsm/v9le/test_v3.py | Python | mit | 1,482 |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 12 13:40:24 2015
@author: Paco
"""
from api import API
class Eventful(API):
_class_name = 'Eventful'
_category = 'Event'
_help_url = 'http://api.eventful.com/docs/'
_api_url = 'http://api.eventful.com/json/'
def __init__(self,apikey):
self._api_key = apikey
def _parsing_data(self,data):
res = {'id':list(),'description':list(),'title':list(),'country':list(),'city':list(),'address':list(),'date':list(),'latitude':list(),'longitude':list(),'url':list()}
for d in data['events']['event']:
res['id'].append(self._tools.key_test('id',d))
res['description'].append(self._tools.key_test('description',d))
res['title'].append(self._tools.key_test('title',d))
res['country'].append(self._tools.key_test('country_name',d))
res['city'].append(self._tools.key_test('city_name',d))
res['address'].append(self._tools.key_test('venue_address',d))
res['date'].append(self._tools.key_test('start_time',d))
res['latitude'].append(self._tools.key_test('latitude',d,'float'))
res['longitude'].append(self._tools.key_test('longitude',d,'float'))
res['url'].append(self._tools.key_test('url',d))
return res
def _parsing_data2(self,data):
res = {'id':list(),'description':list(),'name':list(),'country':list(),'city':list(),'address':list(),'latitude':list(),'longitude':list(),'url':list()}
for d in data['venues']['venue']:
res['id'].append(self._tools.key_test('id',d))
res['description'].append(self._tools.key_test('description',d))
res['name'].append(self._tools.key_test('name',d))
res['country'].append(self._tools.key_test('country_name',d))
res['city'].append(self._tools.key_test('city_name',d))
res['address'].append(self._tools.key_test('address',d))
res['latitude'].append(self._tools.key_test('latitude',d,'float'))
res['longitude'].append(self._tools.key_test('longitude',d,'float'))
res['url'].append(self._tools.key_test('url',d))
return res
def _parsing_data3(self,data):
res = {'id':list(),'description':list(),'performer':list(),'location':list(),'latitude':list(),'longitude':list()}
for d in data['demands']['demand']:
res['id'].append(self._tools.key_test('id',d))
res['description'].append(self._tools.key_test('description',d))
res['performer'].append(self._tools.key_test('name',d['performer']))
res['location'].append(self._tools.key_test('location',d))
res['latitude'].append(self._tools.key_test('latitude',d,'float'))
res['longitude'].append(self._tools.key_test('longitude',d,'float'))
return res
def search_events(self,text='',location='paris',limit=10):
text = text.replace(' ','+')
location = location.replace(' ','+')
url = self._api_url+'events/search?app_key='+self._api_key+'&keywords='+text+'&location='+location+'&date=Future&page_size='+str(limit)
data = self._tools.data_from_url(url)
self._increment_nb_call()
return self._parsing_data(data)
def search_events_by_coordinates(self, lat=48.87, lon=2.30, radius=2, units='km', limit=10):
url = self._api_url+'events/search?app_key='+self._api_key+'&where='+str(lat)+','+str(lon)+'&within='+str(radius)+'&units='+units+'&date=Future&page_size='+str(limit)
data = self._tools.data_from_url(url)
self._increment_nb_call()
return self._parsing_data(data)
def search_venues(self,text='',location='paris',limit=10):
text = text.replace(' ','+')
location = location.replace(' ','+')
url = self._api_url+'venues/search?app_key='+self._api_key+'&keywords='+text+'&location='+location+'&page_size='+str(limit)
data = self._tools.data_from_url(url)
self._increment_nb_call()
return self._parsing_data2(data)
def search_demands(self,location='paris',limit=10):
location = location.replace(' ','+')
url = self._api_url+'demands/search?app_key='+self._api_key+'&location='+location+'&page_size='+str(limit)
data = self._tools.data_from_url(url)
self._increment_nb_call()
return self._parsing_data3(data)
| franblas/pyAPI | src/pyapi/eventful.py | Python | mit | 4,417 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2013 jmesteve All Rights Reserved
# https://github.com/jmesteve
# <jmesteve@me.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Warning box",
'version': '0.1',
'category': 'Tools',
'description': """
[ENG] Add Warning box.
usage return self.pool.get('warning_box').info(cr, uid, title='The title', message='the message')
""",
'author': 'jmesteve, ADHOC SA',
'website': 'https://github.com/jmesteve',
'license': 'AGPL-3',
"depends": ['base'],
"data": [
'warning_box.xml',
],
"active": False,
"installable": True
}
| syci/ingadhoc-odoo-addons | warning_box/__openerp__.py | Python | agpl-3.0 | 1,526 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Lndir(AutotoolsPackage, XorgPackage):
"""lndir - create a shadow directory of symbolic links to another
directory tree."""
homepage = "http://cgit.freedesktop.org/xorg/util/lndir"
xorg_mirror_path = "util/lndir-1.0.3.tar.gz"
version('1.0.3', sha256='95b2d26fb3cbe702f828146c7a4c7c48001d2da52b062580227b7b68180be902')
depends_on('xproto@7.0.17:', type='build')
depends_on('pkgconfig', type='build')
| iulian787/spack | var/spack/repos/builtin/packages/lndir/package.py | Python | lgpl-2.1 | 655 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle.fluid as fluid
from absl import flags
import build.layers as layers
import build.ops as _ops
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_stages", 3, "number of stages")
flags.DEFINE_integer("num_blocks", 5, "number of blocks per stage")
flags.DEFINE_integer("num_ops", 2, "number of operations per block")
flags.DEFINE_integer("width", 64, "network width")
flags.DEFINE_string("downsample", "pool", "conv or pool")
num_classes = 10
ops = [
_ops.conv_1x1,
_ops.conv_2x2,
_ops.conv_3x3,
_ops.dilated_2x2,
_ops.conv_1x2_2x1,
_ops.conv_1x3_3x1,
_ops.sep_2x2,
_ops.sep_3x3,
_ops.maxpool_2x2,
_ops.maxpool_3x3,
_ops.avgpool_2x2,
_ops.avgpool_3x3,
]
def net(inputs, tokens):
""" build network with skip links """
x = layers.conv(inputs, FLAGS.width, (3, 3))
num_ops = FLAGS.num_blocks * FLAGS.num_ops
x = stage(x, tokens[:num_ops], pre_activation=True)
for i in range(1, FLAGS.num_stages):
x = stage(x, tokens[i * num_ops:(i + 1) * num_ops], downsample=True)
x = layers.bn_relu(x)
x = layers.global_avgpool(x)
x = layers.dropout(x)
logits = layers.fully_connected(x, num_classes)
return fluid.layers.softmax(logits)
def stage(x, tokens, pre_activation=False, downsample=False):
""" build network's stage. Stage consists of blocks """
x = block(x, tokens[:FLAGS.num_ops], pre_activation, downsample)
for i in range(1, FLAGS.num_blocks):
print("-" * 12)
x = block(x, tokens[i * FLAGS.num_ops:(i + 1) * FLAGS.num_ops])
print("=" * 12)
return x
def block(x, tokens, pre_activation=False, downsample=False):
""" build block. """
if pre_activation:
x = layers.bn_relu(x)
res = x
else:
res = x
x = layers.bn_relu(x)
x = ops[tokens[0]](x, downsample)
print("%s \t-> shape %s" % (ops[0].__name__, x.shape))
for token in tokens[1:]:
x = layers.bn_relu(x)
x = ops[token](x)
print("%s \t-> shape %s" % (ops[token].__name__, x.shape))
if downsample:
filters = res.shape[1]
if FLAGS.downsample == "conv":
res = layers.conv(res, filters * 2, (1, 1), (2, 2))
elif FLAGS.downsample == "pool":
res = layers.avgpool(res, (2, 2), (2, 2))
res = fluid.layers.pad(res, (0, 0, filters // 2, filters // 2, 0, 0,
0, 0))
else:
raise NotImplementedError
return x + res
| kuke/models | fluid/PaddleCV/HiNAS_models/build/resnet_base.py | Python | apache-2.0 | 3,232 |
# -*- coding: utf-8 -*-
"""File parsing functions
"""
import numpy as np
import csv
import ast
import json
def read_tab_raw(fname):
"""Read .tab file from expyfun output without segmenting into trials.
Parameters
----------
fname : str
Input filename.
Returns
-------
data : list of tuple
The data with each line from the tab file being a tuple in a list.
Each tuple is of the form (``timestamp``, ``key``, ``value``).
See Also
--------
read_tab
"""
with open(fname, 'r') as f:
csvr = csv.reader(f, delimiter='\t')
lines = [c for c in csvr]
# first two lines are headers
assert (len(lines[0]) == 1 and lines[0][0][0] == '#')
#metadata = ast.literal_eval(lines[0][0][2:])
assert lines[1] == ['timestamp', 'event', 'value']
lines = lines[2:]
times = [float(line[0]) for line in lines]
keys = [line[1] for line in lines]
vals = [line[2] for line in lines]
data = list(zip(times, keys, vals))
return data
def read_tab(fname, group_start='trial_id', group_end='trial_ok'):
"""Read .tab file from expyfun output and segment into trials.
Parameters
----------
fname : str
Input filename.
group_start : str
Key to use to start a trial/row.
group_end : str | None
Key to use to end a trial/row. If None, the next ``group_start``
will end the current group.
Returns
-------
data : list of dict
The data, with a dict for each trial. Each value in the dict
is a list of tuples (event, time) for each occurrence of that
key.
See Also
--------
read_tab_raw
"""
# load everything into memory for ease of use
lines = read_tab_raw(fname)
# determine the event fields
header = list(set([l[1] for l in lines]))
header.sort()
if group_start not in header:
raise ValueError('group_start "{0}" not in header: {1}'
''.format(group_start, header))
if group_end == group_start:
raise ValueError('group_start cannot equal group_end, use '
'group_end=None')
header = [header.pop(header.index(group_start))] + header
b1s = np.where([line[1] == group_start for line in lines])[0]
if group_end is None:
b2s = np.concatenate((b1s[1:], [len(lines)]))
else: # group_end is not None
if group_end not in header:
raise ValueError('group_end "{0}" not in header ({1})'
''.format(group_end, header))
header.append(header.pop(header.index(group_end)))
b2s = np.where([line[1] == group_end for line in lines])[0]
if len(b1s) != len(b2s) or not np.all(b1s < b2s):
raise RuntimeError('bad bounds:\n{0}\n{1}'.format(b1s, b2s))
data = []
for b1, b2 in zip(b1s, b2s):
assert lines[b1][1] == group_start # prevent stupidity
if group_end is not None:
b2 = b2 + 1 # include the end
assert lines[b2 - 1][1] == group_end
d = dict()
these_times = [float(line[0]) for line in lines[b1:b2]]
these_keys = [line[1] for line in lines[b1:b2]]
these_vals = [line[2] for line in lines[b1:b2]]
for ki, key in enumerate(header):
idx = np.where(key == np.array(these_keys))[0]
d[key] = [(these_vals[ii], these_times[ii]) for ii in idx]
data.append(d)
return data
def reconstruct_tracker(fname):
"""Reconstruct TrackerUD and TrackerBinom objects from .tab files.
Parameters
----------
fname : str
Input filename.
Returns
-------
tr : list of TrackerUD or TrackerBinom
The tracker objects with all responses such that they are in their
stopped state (as long as the trackers were allowed to stop during
the generation of the file.) If only one tracker is found in the file,
it will still be stored in a list and will be accessible as ``tr[0]``.
"""
from ..stimuli import TrackerUD, TrackerBinom
# read in raw data
raw = read_tab_raw(fname)
# find tracker_identify and make list of IDs
tracker_idx = np.where([r[1] == 'tracker_identify' for r in raw])[0]
if len(tracker_idx) == 0:
raise ValueError('There are no Trackers in this file.')
tr = []
for ii in tracker_idx:
tracker_id = ast.literal_eval(raw[ii][2])['tracker_id']
tracker_type = ast.literal_eval(raw[ii][2])['tracker_type']
# find tracker_ID_init lines and get dict
init_str = 'tracker_' + str(tracker_id) + '_init'
tracker_dict_idx = np.where([r[1] == init_str for r in raw])[0][0]
tracker_dict = json.loads(raw[tracker_dict_idx][2])
td = dict(TrackerUD=TrackerUD, TrackerBinom=TrackerBinom)
tr.append(td[tracker_type](**tracker_dict))
tr[-1]._tracker_id = tracker_id # make sure tracker has original ID
stop_str = 'tracker_' + str(tracker_id) + '_stop'
tracker_stop_idx = np.where([r[1] == stop_str for r in raw])[0]
if len(tracker_stop_idx) == 0:
raise ValueError('Tracker {} has not stopped. All Trackers '
'must be stopped.'.format(tracker_id))
responses = json.loads(raw[tracker_stop_idx[0]][2])['responses']
# feed in responses from tracker_ID_stop
for r in responses:
tr[-1].respond(r)
return tr
def reconstruct_dealer(fname):
"""Reconstruct TrackerDealer object from .tab files.
The ``reconstruct_tracker`` function will be called to retrieve the
trackers.
Parameters
----------
fname : str
Input filename.
Returns
-------
dealer : list of TrackerDealer
The TrackerDealer objects with all responses such that they are in
their stopped state. If only one dealer is found in the file, it will
still be stored in a list and will be assessible as ``td[0]``.
"""
from ..stimuli import TrackerDealer
raw = read_tab_raw(fname)
# find infor on dealer
dealer_idx = np.where([r[1] == 'dealer_identify' for r in raw])[0]
if len(dealer_idx) == 0:
raise ValueError('There are no TrackerDealers in this file.')
dealer = []
for ii in dealer_idx:
dealer_id = ast.literal_eval(raw[ii][2])['dealer_id']
dealer_init_str = 'dealer_' + str(dealer_id) + '_init'
dealer_dict_idx = np.where([r[1] == dealer_init_str
for r in raw])[0][0]
dealer_dict = ast.literal_eval(raw[dealer_dict_idx][2])
dealer_trackers = dealer_dict['trackers']
# match up tracker objects to id
trackers = reconstruct_tracker(fname)
tr_objects = []
for t in dealer_trackers:
idx = np.where([t == t_id._tracker_id for t_id in trackers])[0][0]
tr_objects.append(trackers[idx])
# make the dealer object
max_lag = dealer_dict['max_lag']
pace_rule = dealer_dict['pace_rule']
dealer.append(TrackerDealer(None, tr_objects, max_lag, pace_rule))
# force input responses/log data
dealer_stop_str = 'dealer_' + str(dealer_id) + '_stop'
dealer_stop_idx = np.where([r[1] == dealer_stop_str for r in raw])[0]
if len(dealer_stop_idx) == 0:
raise ValueError('TrackerDealer {} has not stopped. All dealers '
'must be stopped.'.format(dealer_id))
dealer_stop_log = json.loads(raw[dealer_stop_idx[0]][2])
shape = tuple(dealer_dict['shape'])
log_response_history = dealer_stop_log['response_history']
log_x_history = dealer_stop_log['x_history']
log_tracker_history = dealer_stop_log['tracker_history']
dealer[-1]._shape = shape
dealer[-1]._trackers.shape = shape
dealer[-1]._response_history = log_response_history
dealer[-1]._x_history = log_x_history
dealer[-1]._tracker_history = log_tracker_history
dealer[-1]._stopped = True
return dealer
| rkmaddox/expyfun | expyfun/io/_parse.py | Python | bsd-3-clause | 8,093 |
from binary.continuous.continuous_binary import CNF_Net
CNF_Net.backward() | Fuchai/Philosophy-Machine | binary/continuous/debugger.py | Python | apache-2.0 | 75 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Heat exception subclasses"""
import functools
import urlparse
import sys
from heat.openstack.common import log as logging
_FATAL_EXCEPTION_FORMAT_ERRORS = False
logger = logging.getLogger(__name__)
class RedirectException(Exception):
def __init__(self, url):
self.url = urlparse.urlparse(url)
class KeystoneError(Exception):
def __init__(self, code, message):
self.code = code
self.message = message
def __str__(self):
return "Code: %s, message: %s" % (self.code, self.message)
def wrap_exception(notifier=None, publisher_id=None, event_type=None,
level=None):
"""This decorator wraps a method to catch any exceptions that may
get thrown. It logs the exception as well as optionally sending
it to the notification system.
"""
# TODO(sandy): Find a way to import nova.notifier.api so we don't have
# to pass it in as a parameter. Otherwise we get a cyclic import of
# nova.notifier.api -> nova.utils -> nova.exception :(
# TODO(johannes): Also, it would be nice to use
# utils.save_and_reraise_exception() without an import loop
def inner(f):
def wrapped(*args, **kw):
try:
return f(*args, **kw)
except Exception as e:
# Save exception since it can be clobbered during processing
# below before we can re-raise
exc_info = sys.exc_info()
if notifier:
payload = dict(args=args, exception=e)
payload.update(kw)
# Use a temp vars so we don't shadow
# our outer definitions.
temp_level = level
if not temp_level:
temp_level = notifier.ERROR
temp_type = event_type
if not temp_type:
# If f has multiple decorators, they must use
# functools.wraps to ensure the name is
# propagated.
temp_type = f.__name__
notifier.notify(publisher_id, temp_type, temp_level,
payload)
# re-raise original exception since it may have been clobbered
raise exc_info[0], exc_info[1], exc_info[2]
return functools.wraps(f)(wrapped)
return inner
class HeatException(Exception):
"""Base Heat Exception
To correctly use this class, inherit from it and define
a 'msg_fmt' property. That msg_fmt will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
def __init__(self, **kwargs):
self.kwargs = kwargs
try:
self.message = self.message % kwargs
except KeyError:
exc_info = sys.exc_info()
#kwargs doesn't match a variable in the message
#log the issue and the kwargs
logger.exception(_('Exception in string format operation'))
for name, value in kwargs.iteritems():
logger.error("%s: %s" % (name, value))
if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise exc_info[0], exc_info[1], exc_info[2]
def __str__(self):
return str(self.message)
def __unicode__(self):
return unicode(self.message)
class MissingCredentialError(HeatException):
message = _("Missing required credential: %(required)s")
class BadAuthStrategy(HeatException):
message = _("Incorrect auth strategy, expected \"%(expected)s\" but "
"received \"%(received)s\"")
class AuthBadRequest(HeatException):
message = _("Connect error/bad request to Auth service at URL %(url)s.")
class AuthUrlNotFound(HeatException):
message = _("Auth service at URL %(url)s not found.")
class AuthorizationFailure(HeatException):
message = _("Authorization failed.")
class NotAuthenticated(HeatException):
message = _("You are not authenticated.")
class Forbidden(HeatException):
message = _("You are not authorized to complete this action.")
#NOTE(bcwaldon): here for backwards-compatability, need to deprecate.
class NotAuthorized(Forbidden):
message = _("You are not authorized to complete this action.")
class Invalid(HeatException):
message = _("Data supplied was not valid: %(reason)s")
class AuthorizationRedirect(HeatException):
message = _("Redirecting to %(uri)s for authorization.")
class ClientConfigurationError(HeatException):
message = _("There was an error configuring the client.")
class RequestUriTooLong(HeatException):
message = _("The URI was too long.")
class ServerError(HeatException):
message = _("The request returned 500 Internal Server Error"
"\n\nThe response body:\n%(body)s")
class MaxRedirectsExceeded(HeatException):
message = _("Maximum redirects (%(redirects)s) was exceeded.")
class InvalidRedirect(HeatException):
message = _("Received invalid HTTP redirect.")
class NoServiceEndpoint(HeatException):
message = _("Response from Keystone does not contain a Heat endpoint.")
class RegionAmbiguity(HeatException):
message = _("Multiple 'image' service matches for region %(region)s. This "
"generally means that a region is required and you have not "
"supplied one.")
class UserParameterMissing(HeatException):
message = _("The Parameter (%(key)s) was not provided.")
class UnknownUserParameter(HeatException):
message = _("The Parameter (%(key)s) was not defined in template.")
class InvalidTemplateAttribute(HeatException):
message = _("The Referenced Attribute (%(resource)s %(key)s)"
" is incorrect.")
class InvalidTemplateReference(HeatException):
message = _("The specified reference \"%(resource)s\" (in %(key)s)"
" is incorrect.")
class UserKeyPairMissing(HeatException):
message = _("The Key (%(key_name)s) could not be found.")
class FlavorMissing(HeatException):
message = _("The Flavor ID (%(flavor_id)s) could not be found.")
class ImageNotFound(HeatException):
message = _("The Image (%(image_name)s) could not be found.")
class NoUniqueImageFound(HeatException):
message = _("Multiple images were found with name (%(image_name)s).")
class InvalidTenant(HeatException):
message = _("Searching Tenant %(target)s "
"from Tenant %(actual)s forbidden.")
class StackNotFound(HeatException):
message = _("The Stack (%(stack_name)s) could not be found.")
class StackExists(HeatException):
message = _("The Stack (%(stack_name)s) already exists.")
class StackValidationFailed(HeatException):
message = _("%(message)s")
class ResourceNotFound(HeatException):
message = _("The Resource (%(resource_name)s) could not be found "
"in Stack %(stack_name)s.")
class ResourceTypeNotFound(HeatException):
message = _("The Resource Type (%(type_name)s) could not be found.")
class ResourceNotAvailable(HeatException):
message = _("The Resource (%(resource_name)s) is not available.")
class PhysicalResourceNotFound(HeatException):
message = _("The Resource (%(resource_id)s) could not be found.")
class WatchRuleNotFound(HeatException):
message = _("The Watch Rule (%(watch_name)s) could not be found.")
class ResourceFailure(HeatException):
message = _("%(exc_type)s: %(message)s")
def __init__(self, exception, resource, action=None):
if isinstance(exception, ResourceFailure):
exception = getattr(exception, 'exc', exception)
self.exc = exception
self.resource = resource
self.action = action
exc_type = type(exception).__name__
super(ResourceFailure, self).__init__(exc_type=exc_type,
message=str(exception))
class NotSupported(HeatException):
message = _("%(feature)s is not supported.")
class ResourcePropertyConflict(HeatException):
message = _('Cannot define the following properties at the same time: %s.')
def __init__(self, *args):
self.message = self.message % ", ".join(args)
super(ResourcePropertyConflict, self).__init__()
class HTTPExceptionDisguise(Exception):
"""Disguises HTTP exceptions so they can be handled by the webob fault
application in the wsgi pipeline.
"""
def __init__(self, exception):
self.exc = exception
self.tb = sys.exc_info()[2]
class EgressRuleNotAllowed(HeatException):
message = _("Egress rules are only allowed when "
"Neutron is used and the 'VpcId' property is set.")
class Error(Exception):
def __init__(self, message=None):
super(Error, self).__init__(message)
class NotFound(Error):
pass
class InvalidContentType(HeatException):
message = "Invalid content type %(content_type)s"
class RequestLimitExceeded(HeatException):
message = _('Request limit exceeded: %(message)s')
class StackResourceLimitExceeded(HeatException):
message = _('Maximum resources per stack exceeded.')
| citrix-openstack-build/heat | heat/common/exception.py | Python | apache-2.0 | 9,969 |
from copy import deepcopy
import time
from libAnt.message import BroadcastMessage
class ProfileMessage:
def __init__(self, msg, previous):
self.previous = previous
self.msg = deepcopy(msg)
self.count = previous.count + 1 if previous is not None else 1
self.timestamp = time.time()
self.firstTimestamp = previous.firstTimestamp if previous is not None else self.timestamp
def __str__(self):
return str(self.msg.deviceNumber)
@staticmethod
def decode(cls, msg: BroadcastMessage):
if msg.deviceType in cls.match:
cls.match[msg.deviceType]() | half2me/libant | libAnt/profiles/profile.py | Python | mit | 627 |
# -*-coding:Utf-8 -*
# Copyright (c) 2013 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la volonté RelacherRames"""
import re
from secondaires.navigation.equipage.ordres.relacher_rames import \
RelacherRames as OrdreRelacherRames
from secondaires.navigation.equipage.ordres.long_deplacer import LongDeplacer
from secondaires.navigation.equipage.volonte import Volonte
class RelacherRames(Volonte):
"""Classe représentant une volonté.
Cette volonté demande à ceux qui tiennent les rames de les lâcher.
"""
cle = "relacher_rames"
ordre_court = re.compile(r"^rr$", re.I)
ordre_long = re.compile(r"^relacher\s+rames?$", re.I)
def choisir_matelots(self, exception=None):
"""Retourne le matelot le plus apte à accomplir la volonté."""
equipage = self.navire.equipage
objectifs = []
rames = self.navire.rames
rames = [r for r in rames if r.tenu]
for paire in rames:
matelot = equipage.get_matelot_depuis_personnage(paire.tenu)
if matelot:
objectifs.append((matelot, paire))
return objectifs
def executer(self, objectifs):
"""Exécute la volonté."""
for sequence in objectifs:
matelot, rames = sequence
matelot.invalider_ordres("ramer")
navire = self.navire
ordres = []
relacher = OrdreRelacherRames(matelot, navire, rames)
ordres.append(relacher)
self.ajouter_ordres(matelot, ordres)
def crier_ordres(self, personnage):
"""On fait crier l'ordre au personnage."""
msg = "{} s'écrie : rameurs, laissez courir !".format(
personnage.distinction_audible)
self.navire.envoyer(msg)
@classmethod
def extraire_arguments(cls, navire):
"""Extrait les arguments de la volonté."""
return ()
| stormi/tsunami | src/secondaires/navigation/equipage/volontes/relacher_rames.py | Python | bsd-3-clause | 3,392 |
"""Python API for the lab streaming layer.
The lab streaming layer provides a set of functions to make instrument data
accessible in real time within a lab network. From there, streams can be
picked up by recording programs, viewing programs or custom experiment
applications that access data streams in real time.
The API covers two areas:
- The "push API" allows to create stream outlets and to push data (regular
or irregular measurement time series, event data, coded audio/video frames,
etc.) into them.
- The "pull API" allows to create stream inlets and read time-synched
experiment data from them (for recording, viewing or experiment control).
pylsl has been tested with Python 2.7 and 3.4.
"""
import os
import platform
import struct
from ctypes import CDLL, util, byref, c_char_p, c_void_p, c_double, c_int, \
c_long, c_float, c_short, c_byte, c_longlong
__all__ = ['IRREGULAR_RATE', 'DEDUCED_TIMESTAMP', 'FOREVER', 'cf_float32',
'cf_double64', 'cf_string', 'cf_int32', 'cf_int16', 'cf_int8',
'cf_int64', 'cf_undefined', 'protocol_version', 'library_version',
'local_clock', 'StreamInfo', 'StreamOutlet', 'resolve_streams',
'resolve_byprop', 'resolve_bypred', 'StreamInlet', 'XMLElement',
'ContinuousResolver', 'TimeoutError', 'LostError',
'InvalidArgumentError', 'InternalError', 'stream_info',
'stream_outlet', 'stream_inlet', 'xml_element', 'timeout_error',
'lost_error', 'vectorf', 'vectord', 'vectorl', 'vectori',
'vectors', 'vectorc', 'vectorstr', 'resolve_stream']
# =================
# === Constants ===
# =================
# Constant to indicate that a stream has variable sampling rate.
IRREGULAR_RATE = 0.0
# Constant to indicate that a sample has the next successive time stamp
# according to the stream's defined sampling rate. Optional optimization to
# transmit less data per sample.
DEDUCED_TIMESTAMP = -1.0
# A very large time value (ca. 1 year); can be used in timeouts.
FOREVER = 32000000.0
# Value formats supported by LSL. LSL data streams are sequences of samples,
# each of which is a same-size vector of values with one of the below types.
# For up to 24-bit precision measurements in the appropriate physical unit (
# e.g., microvolts). Integers from -16777216 to 16777216 are represented
# accurately.
cf_float32 = 1
# For universal numeric data as long as permitted by network and disk budget.
# The largest representable integer is 53-bit.
cf_double64 = 2
# For variable-length ASCII strings or data blobs, such as video frames,
# complex event descriptions, etc.
cf_string = 3
# For high-rate digitized formats that require 32-bit precision. Depends
# critically on meta-data to represent meaningful units. Useful for
# application event codes or other coded data.
cf_int32 = 4
# For very high bandwidth signals or CD quality audio (for professional audio
# float is recommended).
cf_int16 = 5
# For binary signals or other coded data.
cf_int8 = 6
# For now only for future compatibility. Support for this type is not
# available on all languages and platforms.
cf_int64 = 7
# Can not be transmitted.
cf_undefined = 0
# ==========================================================
# === Free Functions provided by the lab streaming layer ===
# ==========================================================
def protocol_version():
"""Protocol version.
The major version is protocol_version() / 100;
The minor version is protocol_version() % 100;
Clients with different minor versions are protocol-compatible with each
other while clients with different major versions will refuse to work
together.
"""
return lib.lsl_protocol_version()
def library_version():
"""Version of the underlying liblsl library.
The major version is library_version() / 100;
The minor version is library_version() % 100;
"""
return lib.lsl_library_version()
def local_clock():
"""Obtain a local system time stamp in seconds.
The resolution is better than a milisecond. This reading can be used to
assign time stamps to samples as they are being acquired.
If the "age" of a sample is known at a particular time (e.g., from USB
transmission delays), it can be used as an offset to lsl_local_clock() to
obtain a better estimate of when a sample was actually captured. See
StreamOutlet.push_sample() for a use case.
"""
return lib.lsl_local_clock()
# ==========================
# === Stream Declaration ===
# ==========================
class StreamInfo:
"""The StreamInfo object stores the declaration of a data stream.
Represents the following information:
a) stream data format (#channels, channel format)
b) core information (stream name, content type, sampling rate)
c) optional meta-data about the stream content (channel labels,
measurement units, etc.)
Whenever a program wants to provide a new stream on the lab network it will
typically first create a StreamInfo to describe its properties and then
construct a StreamOutlet with it to create the stream on the network.
Recipients who discover the outlet can query the StreamInfo; it is also
written to disk when recording the stream (playing a similar role as a file
header).
"""
def __init__(self, name='untitled', type='', channel_count=1,
nominal_srate=IRREGULAR_RATE, channel_format=cf_float32,
source_id='', handle=None):
"""Construct a new StreamInfo object.
Core stream information is specified here. Any remaining meta-data can
be added later.
Keyword arguments:
name -- Name of the stream. Describes the device (or product series)
that this stream makes available (for use by programs,
experimenters or data analysts). Cannot be empty.
type -- Content type of the stream. By convention LSL uses the content
types defined in the XDF file format specification where
applicable (code.google.com/p/xdf). The content type is the
preferred way to find streams (as opposed to searching by name).
channel_count -- Number of channels per sample. This stays constant for
the lifetime of the stream. (default 1)
nominal_srate -- The sampling rate (in Hz) as advertised by the data
source, regular (otherwise set to IRREGULAR_RATE).
(default IRREGULAR_RATE)
channel_format -- Format/type of each channel. If your channels have
different formats, consider supplying multiple
streams or use the largest type that can hold
them all (such as cf_double64). It is also allowed
to pass this as a string, without the cf_ prefix,
e.g., 'float32' (default cf_float32)
source_id -- Unique identifier of the device or source of the data, if
available (such as the serial number). This is critical
for system robustness since it allows recipients to
recover from failure even after the serving app, device or
computer crashes (just by finding a stream with the same
source id on the network again). Therefore, it is highly
recommended to always try to provide whatever information
can uniquely identify the data source itself.
(default '')
"""
if handle is not None:
self.obj = c_void_p(handle)
else:
if isinstance(channel_format, str):
channel_format = string2fmt[channel_format]
self.obj = lib.lsl_create_streaminfo(c_char_p(str.encode(name)),
c_char_p(str.encode(type)),
channel_count,
c_double(nominal_srate),
channel_format,
c_char_p(str.encode(source_id)))
self.obj = c_void_p(self.obj)
if not self.obj:
raise RuntimeError("could not create stream description "
"object.")
def __del__(self):
""" Destroy a previously created StreamInfo object. """
# noinspection PyBroadException
try:
lib.lsl_destroy_streaminfo(self.obj)
except:
pass
# === Core Information (assigned at construction) ===
def name(self):
"""Name of the stream.
This is a human-readable name. For streams offered by device modules,
it refers to the type of device or product series that is generating
the data of the stream. If the source is an application, the name may
be a more generic or specific identifier. Multiple streams with the
same name can coexist, though potentially at the cost of ambiguity (for
the recording app or experimenter).
"""
return lib.lsl_get_name(self.obj).decode('utf-8')
def type(self):
"""Content type of the stream.
The content type is a short string such as "EEG", "Gaze" which
describes the content carried by the channel (if known). If a stream
contains mixed content this value need not be assigned but may instead
be stored in the description of channel types. To be useful to
applications and automated processing systems using the recommended
content types is preferred.
"""
return lib.lsl_get_type(self.obj).decode('utf-8')
def channel_count(self):
"""Number of channels of the stream.
A stream has at least one channel; the channel count stays constant for
all samples.
"""
return lib.lsl_get_channel_count(self.obj)
def nominal_srate(self):
"""Sampling rate of the stream, according to the source (in Hz).
If a stream is irregularly sampled, this should be set to
IRREGULAR_RATE.
Note that no data will be lost even if this sampling rate is incorrect
or if a device has temporary hiccups, since all samples will be
transmitted anyway (except for those dropped by the device itself).
However, when the recording is imported into an application, a good
data importer may correct such errors more accurately if the advertised
sampling rate was close to the specs of the device.
"""
return lib.lsl_get_nominal_srate(self.obj)
def channel_format(self):
"""Channel format of the stream.
All channels in a stream have the same format. However, a device might
offer multiple time-synched streams each with its own format.
"""
return lib.lsl_get_channel_format(self.obj)
def source_id(self):
"""Unique identifier of the stream's source, if available.
The unique source (or device) identifier is an optional piece of
information that, if available, allows that endpoints (such as the
recording program) can re-acquire a stream automatically once it is
back online.
"""
return lib.lsl_get_source_id(self.obj).decode('utf-8')
# === Hosting Information (assigned when bound to an outlet/inlet) ===
def version(self):
"""Protocol version used to deliver the stream."""
return lib.lsl_get_version(self.obj)
def created_at(self):
"""Creation time stamp of the stream.
This is the time stamp when the stream was first created
(as determined via local_clock() on the providing machine).
"""
return lib.lsl_get_created_at(self.obj)
def uid(self):
"""Unique ID of the stream outlet instance (once assigned).
This is a unique identifier of the stream outlet, and is guaranteed to
be different across multiple instantiations of the same outlet (e.g.,
after a re-start).
"""
return lib.lsl_get_uid(self.obj).decode('utf-8')
def session_id(self):
"""Session ID for the given stream.
The session id is an optional human-assigned identifier of the
recording session. While it is rarely used, it can be used to prevent
concurrent recording activitites on the same sub-network (e.g., in
multiple experiment areas) from seeing each other's streams
(can be assigned in a configuration file read by liblsl, see also
documentation on configuration files).
"""
return lib.lsl_get_session_id(self.obj).decode('utf-8')
def hostname(self):
"""Hostname of the providing machine."""
return lib.lsl_get_hostname(self.obj).decode('utf-8')
# === Data Description (can be modified) ===
def desc(self):
"""Extended description of the stream.
It is highly recommended that at least the channel labels are described
here. See code examples in the documentation. Other information, such
as amplifier settings, measurement units if deviating from defaults,
setup information, subject information, etc., can be specified here, as
well. See Meta-Data Recommendations in the docs.
Important: if you use a stream content type for which meta-data
recommendations exist, please try to lay out your meta-data in
agreement with these recommendations for compatibility with other
applications.
"""
return XMLElement(lib.lsl_get_desc(self.obj))
def as_xml(self):
"""Retrieve the entire stream_info in XML format.
This yields an XML document (in string form) whose top-level element is
<description>. The description element contains one element for each
field of the stream_info class, including:
a) the core elements <name>, <type>, <channel_count>, <nominal_srate>,
<channel_format>, <source_id>
b) the misc elements <version>, <created_at>, <uid>, <session_id>,
<v4address>, <v4data_port>, <v4service_port>, <v6address>,
<v6data_port>, <v6service_port>
c) the extended description element <desc> with user-defined
sub-elements.
"""
return lib.lsl_get_xml(self.obj).decode('utf-8')
# =====================
# === Stream Outlet ===
# =====================
class StreamOutlet:
"""A stream outlet.
Outlets are used to make streaming data (and the meta-data) available on
the lab network.
"""
def __init__(self, info, chunk_size=0, max_buffered=360):
"""Establish a new stream outlet. This makes the stream discoverable.
Keyword arguments:
description -- The StreamInfo object to describe this stream. Stays
constant over the lifetime of the outlet.
chunk_size --- Optionally the desired chunk granularity (in samples)
for transmission. If unspecified, each push operation
yields one chunk. Inlets can override this setting.
(default 0)
max_buffered -- Optionally the maximum amount of data to buffer (in
seconds if there is a nominal sampling rate, otherwise
x100 in samples). The default is 6 minutes of data.
Note that, for high-bandwidth data, you will want to
use a lower value here to avoid running out of RAM.
(default 360)
"""
self.obj = lib.lsl_create_outlet(info.obj, chunk_size, max_buffered)
self.obj = c_void_p(self.obj)
if not self.obj:
raise RuntimeError("could not create stream outlet.")
self.channel_format = info.channel_format()
self.channel_count = info.channel_count()
self.do_push_sample = fmt2push_sample[self.channel_format]
self.do_push_chunk = fmt2push_chunk[self.channel_format]
self.value_type = fmt2type[self.channel_format]
self.sample_type = self.value_type*self.channel_count
def __del__(self):
"""Destroy an outlet.
The outlet will no longer be discoverable after destruction and all
connected inlets will stop delivering data.
"""
# noinspection PyBroadException
try:
lib.lsl_destroy_outlet(self.obj)
except:
pass
def push_sample(self, x, timestamp=0.0, pushthrough=True):
"""Push a sample into the outlet.
Each entry in the list corresponds to one channel.
Keyword arguments:
x -- A list of values to push (one per channel).
timestamp -- Optionally the capture time of the sample, in agreement
with local_clock(); if omitted, the current
time is used. (default 0.0)
pushthrough -- Whether to push the sample through to the receivers
instead of buffering it with subsequent samples.
Note that the chunk_size, if specified at outlet
construction, takes precedence over the pushthrough flag.
(default True)
"""
if len(x) == self.channel_count:
if self.channel_format == cf_string:
x = [v.encode('utf-8') for v in x]
handle_error(self.do_push_sample(self.obj, self.sample_type(*x),
c_double(timestamp),
c_int(pushthrough)))
else:
raise ValueError("length of the data must correspond to the "
"stream's channel count.")
def push_chunk(self, x, timestamp=0.0, pushthrough=True):
"""Push a list of samples into the outlet.
samples -- A list of samples, either as a list of lists or a list of
multiplexed values.
timestamp -- Optionally the capture time of the most recent sample, in
agreement with local_clock(); if omitted, the current
time is used. The time stamps of other samples are
automatically derived according to the sampling rate of
the stream. (default 0.0)
pushthrough Whether to push the chunk through to the receivers instead
of buffering it with subsequent samples. Note that the
chunk_size, if specified at outlet construction, takes
precedence over the pushthrough flag. (default True)
"""
if len(x):
if type(x[0]) is list:
x = [v for sample in x for v in sample]
if self.channel_format == cf_string:
x = [v.encode('utf-8') for v in x]
if len(x) % self.channel_count == 0:
constructor = self.value_type*len(x)
# noinspection PyCallingNonCallable
handle_error(self.do_push_chunk(self.obj, constructor(*x),
c_long(len(x)),
c_double(timestamp),
c_int(pushthrough)))
else:
raise ValueError("each sample must have the same number of "
"channels.")
def have_consumers(self):
"""Check whether consumers are currently registered.
While it does not hurt, there is technically no reason to push samples
if there is no consumer.
"""
return bool(lib.lsl_have_consumers(self.obj))
def wait_for_consumers(self, timeout):
"""Wait until some consumer shows up (without wasting resources).
Returns True if the wait was successful, False if the timeout expired.
"""
return bool(lib.lsl_wait_for_consumers(self.obj, c_double(timeout)))
# =========================
# === Resolve Functions ===
# =========================
def resolve_streams(wait_time=1.0):
"""Resolve all streams on the network.
This function returns all currently available streams from any outlet on
the network. The network is usually the subnet specified at the local
router, but may also include a group of machines visible to each other via
multicast packets (given that the network supports it), or list of
hostnames. These details may optionally be customized by the experimenter
in a configuration file (see configuration file in the documentation).
Keyword arguments:
wait_time -- The waiting time for the operation, in seconds, to search for
streams. Warning: If this is too short (<0.5s) only a subset
(or none) of the outlets that are present on the network may
be returned. (default 1.0)
Returns a list of StreamInfo objects (with empty desc field), any of which
can subsequently be used to open an inlet. The full description can be
retrieved from the inlet.
"""
# noinspection PyCallingNonCallable
buffer = (c_void_p*1024)()
num_found = lib.lsl_resolve_all(byref(buffer), 1024, c_double(wait_time))
return [StreamInfo(handle=buffer[k]) for k in range(num_found)]
def resolve_byprop(prop, value, minimum=1, timeout=FOREVER):
"""Resolve all streams with a specific value for a given property.
If the goal is to resolve a specific stream, this method is preferred over
resolving all streams and then selecting the desired one.
Keyword arguments:
prop -- The StreamInfo property that should have a specific value (e.g.,
"name", "type", "source_id", or "desc/manufaturer").
value -- The string value that the property should have (e.g., "EEG" as
the type property).
minimum -- Return at least this many streams. (default 1)
timeout -- Optionally a timeout of the operation, in seconds. If the
timeout expires, less than the desired number of streams
(possibly none) will be returned. (default FOREVER)
Returns a list of matching StreamInfo objects (with empty desc field), any
of which can subsequently be used to open an inlet.
Example: results = resolve_Stream_byprop("type","EEG")
"""
# noinspection PyCallingNonCallable
buffer = (c_void_p*1024)()
num_found = lib.lsl_resolve_byprop(byref(buffer), 1024,
c_char_p(str.encode(prop)),
c_char_p(str.encode(value)),
minimum,
c_double(timeout))
return [StreamInfo(handle=buffer[k]) for k in range(num_found)]
def resolve_bypred(predicate, minimum=1, timeout=FOREVER):
"""Resolve all streams that match a given predicate.
Advanced query that allows to impose more conditions on the retrieved
streams; the given string is an XPath 1.0 predicate for the <description>
node (omitting the surrounding []'s), see also
http://en.wikipedia.org/w/index.php?title=XPath_1.0&oldid=474981951.
Keyword arguments:
predicate -- The predicate string, e.g. "name='BioSemi'" or
"type='EEG' and starts-with(name,'BioSemi') and
count(description/desc/channels/channel)=32"
minimum -- Return at least this many streams. (default 1)
timeout -- Optionally a timeout of the operation, in seconds. If the
timeout expires, less than the desired number of streams
(possibly none) will be returned. (default FOREVER)
Returns a list of matching StreamInfo objects (with empty desc field), any
of which can subsequently be used to open an inlet.
"""
# noinspection PyCallingNonCallable
buffer = (c_void_p*1024)()
num_found = lib.lsl_resolve_bypred(byref(buffer), 1024,
c_char_p(str.encode(predicate)),
minimum,
c_double(timeout))
return [StreamInfo(handle=buffer[k]) for k in range(num_found)]
# ====================
# === Stream Inlet ===
# ====================
class StreamInlet:
"""A stream inlet.
Inlets are used to receive streaming data (and meta-data) from the lab
network.
"""
def __init__(self, info, max_buflen=360, max_chunklen=0, recover=True):
"""Construct a new stream inlet from a resolved stream description.
Keyword arguments:
description -- A resolved stream description object (as coming from one
of the resolver functions). Note: the stream_inlet may also be
constructed with a fully-specified stream_info, if the desired
channel format and count is already known up-front, but this is
strongly discouraged and should only ever be done if there is
no time to resolve the stream up-front (e.g., due to
limitations in the client program).
max_buflen -- Optionally the maximum amount of data to buffer (in
seconds if there is a nominal sampling rate, otherwise
x100 in samples). Recording applications want to use a
fairly large buffer size here, while real-time
applications would only buffer as much as they need to
perform their next calculation. (default 360)
max_chunklen -- Optionally the maximum size, in samples, at which
chunks are transmitted (the default corresponds to the
chunk sizes used by the sender). Recording programs
can use a generous size here (leaving it to the network
how to pack things), while real-time applications may
want a finer (perhaps 1-sample) granularity. If left
unspecified (=0), the sender determines the chunk
granularity. (default 0)
recover -- Try to silently recover lost streams that are recoverable
(=those that that have a source_id set). In all other cases
(recover is False or the stream is not recoverable)
functions may throw a lost_error if the stream's source is
lost (e.g., due to an app or computer crash). (default True)
"""
if type(info) is list:
raise TypeError("description needs to be of type StreamInfo, "
"got a list.")
self.obj = lib.lsl_create_inlet(info.obj, max_buflen, max_chunklen,
recover)
self.obj = c_void_p(self.obj)
if not self.obj:
raise RuntimeError("could not create stream inlet.")
self.channel_format = info.channel_format()
self.channel_count = info.channel_count()
self.do_pull_sample = fmt2pull_sample[self.channel_format]
self.do_pull_chunk = fmt2pull_chunk[self.channel_format]
self.value_type = fmt2type[self.channel_format]
self.sample_type = self.value_type*self.channel_count
self.sample = self.sample_type()
self.buffers = {}
def __del__(self):
"""Destructor. The inlet will automatically disconnect if destroyed."""
# noinspection PyBroadException
try:
lib.lsl_destroy_inlet(self.obj)
except:
pass
def info(self, timeout=FOREVER):
"""Retrieve the complete information of the given stream.
This includes the extended description. Can be invoked at any time of
the stream's lifetime.
Keyword arguments:
timeout -- Timeout of the operation. (default FOREVER)
Throws a TimeoutError (if the timeout expires), or LostError (if the
stream source has been lost).
"""
errcode = c_int()
result = lib.lsl_get_fullinfo(self.obj, c_double(timeout),
byref(errcode))
handle_error(errcode)
return StreamInfo(handle=result)
def open_stream(self, timeout=FOREVER):
"""Subscribe to the data stream.
All samples pushed in at the other end from this moment onwards will be
queued and eventually be delivered in response to pull_sample() or
pull_chunk() calls. Pulling a sample without some preceding open_stream
is permitted (the stream will then be opened implicitly).
Keyword arguments:
timeout -- Optional timeout of the operation (default FOREVER).
Throws a TimeoutError (if the timeout expires), or LostError (if the
stream source has been lost).
"""
errcode = c_int()
lib.lsl_open_stream(self.obj, c_double(timeout), byref(errcode))
handle_error(errcode)
def close_stream(self):
"""Drop the current data stream.
All samples that are still buffered or in flight will be dropped and
transmission and buffering of data for this inlet will be stopped. If
an application stops being interested in data from a source
(temporarily or not) but keeps the outlet alive, it should call
lsl_close_stream() to not waste unnecessary system and network
resources.
"""
lib.lsl_close_stream(self.obj)
def time_correction(self, timeout=FOREVER):
"""Retrieve an estimated time correction offset for the given stream.
The first call to this function takes several miliseconds until a
reliable first estimate is obtained. Subsequent calls are instantaneous
(and rely on periodic background updates). The precision of these
estimates should be below 1 ms (empirically within +/-0.2 ms).
Keyword arguments:
timeout -- Timeout to acquire the first time-correction estimate
(default FOREVER).
Returns the current time correction estimate. This is the number that
needs to be added to a time stamp that was remotely generated via
local_clock() to map it into the local clock domain of this
machine.
Throws a TimeoutError (if the timeout expires), or LostError (if the
stream source has been lost).
"""
errcode = c_int()
result = lib.lsl_time_correction(self.obj, c_double(timeout),
byref(errcode))
handle_error(errcode)
return result
def pull_sample(self, timeout=FOREVER, sample=None):
"""Pull a sample from the inlet and return it.
Keyword arguments:
timeout -- The timeout for this operation, if any. (default FOREVER)
If this is passed as 0.0, then the function returns only a
sample if one is buffered for immediate pickup.
Returns a tuple (sample,timestamp) where sample is a list of channel
values and timestamp is the capture time of the sample on the remote
machine, or (None,None) if no new sample was available. To remap this
time stamp to the local clock, add the value returned by
.time_correction() to it.
Throws a LostError if the stream source has been lost. Note that, if
the timeout expires, no TimeoutError is thrown (because this case is
not considered an error).
"""
# support for the legacy API
if type(timeout) is list:
assign_to = timeout
timeout = sample if type(sample) is float else 0.0
else:
assign_to = None
errcode = c_int()
timestamp = self.do_pull_sample(self.obj, byref(self.sample),
self.channel_count, c_double(timeout),
byref(errcode))
handle_error(errcode)
if timestamp:
sample = [v for v in self.sample]
if self.channel_format == cf_string:
sample = [v.decode('utf-8') for v in sample]
if assign_to is not None:
assign_to[:] = sample
return sample, timestamp
else:
return None, None
def pull_chunk(self, timeout=0.0, max_samples=1024):
"""Pull a chunk of samples from the inlet.
Keyword arguments:
timeout -- The timeout of the operation; if passed as 0.0, then only
samples available for immediate pickup will be returned.
(default 0.0)
max_samples -- Maximum number of samples to return. (default
1024)
Returns a tuple (samples,timestamps) where samples is a list of samples
(each itself a list of values), and timestamps is a list of time-stamps.
Throws a LostError if the stream source has been lost.
"""
# look up a pre-allocated buffer of appropriate length
num_channels = self.channel_count
max_values = max_samples*num_channels
if max_samples not in self.buffers:
# noinspection PyCallingNonCallable
self.buffers[max_samples] = ((self.value_type*max_values)(),
(c_double*max_samples)())
buffer = self.buffers[max_samples]
# read data into it
errcode = c_int()
# noinspection PyCallingNonCallable
num_elements = self.do_pull_chunk(self.obj, byref(buffer[0]),
byref(buffer[1]), max_values,
max_samples, c_double(timeout),
byref(errcode))
handle_error(errcode)
# return results (note: could offer a more efficient format in the
# future, e.g., a numpy array)
num_samples = num_elements/num_channels
samples = [[buffer[0][s*num_channels+c] for c in range(num_channels)]
for s in range(int(num_samples))]
if self.channel_format == cf_string:
samples = [[v.decode('utf-8') for v in s] for s in samples]
timestamps = [buffer[1][s] for s in range(int(num_samples))]
return samples, timestamps
def samples_available(self):
"""Query whether samples are currently available for immediate pickup.
Note that it is not a good idea to use samples_available() to determine
whether a pull_*() call would block: to be sure, set the pull timeout
to 0.0 or an acceptably low value. If the underlying implementation
supports it, the value will be the number of samples available
(otherwise it will be 1 or 0).
"""
return lib.lsl_samples_available(self.obj)
def was_clock_reset(self):
"""Query whether the clock was potentially reset since the last call.
This is rarely-used function is only needed for applications that
combine multiple time_correction values to estimate precise clock
drift if they should tolerate cases where the source machine was
hot-swapped or restarted.
"""
return bool(lib.lsl_was_clock_reset(self.obj))
# ===================
# === XML Element ===
# ===================
class XMLElement:
"""A lightweight XML element tree modeling the .desc() field of StreamInfo.
Has a name and can have multiple named children or have text content as
value; attributes are omitted. Insider note: The interface is modeled after
a subset of pugixml's node type and is compatible with it. See also
http://pugixml.googlecode.com/svn/tags/latest/docs/manual/access.html for
additional documentation.
"""
def __init__(self, handle):
"""Construct new XML element from existing handle."""
self.e = handle
# === Tree Navigation ===
def first_child(self):
"""Get the first child of the element."""
return XMLElement(lib.lsl_first_child(self.e))
def last_child(self):
"""Get the last child of the element."""
return XMLElement(lib.lsl_last_child(self.e))
def child(self, name):
"""Get a child with a specified name."""
return XMLElement(lib.lsl_child(self.e, str.encode(name)))
def next_sibling(self, name=None):
"""Get the next sibling in the children list of the parent node.
If a name is provided, the next sibling with the given name is returned.
"""
if name is None:
return XMLElement(lib.lsl_next_sibling(self.e))
else:
return XMLElement(lib.lsl_next_sibling_n(self.e, str.encode(name)))
def previous_sibling(self, name=None):
"""Get the previous sibling in the children list of the parent node.
If a name is provided, the previous sibling with the given name is
returned.
"""
if name is None:
return XMLElement(lib.lsl_previous_sibling(self.e))
else:
return XMLElement(lib.lsl_previous_sibling_n(self.e,
str.encode(name)))
def parent(self):
"""Get the parent node."""
return XMLElement(lib.lsl_parent(self.e))
# === Content Queries ===
def empty(self):
"""Whether this node is empty."""
return bool(lib.lsl_empty(self.e))
def is_text(self):
"""Whether this is a text body (instead of an XML element).
True both for plain char data and CData.
"""
return bool(lib.lsl_is_text(self.e))
def name(self):
"""Name of the element."""
return lib.lsl_name(self.e).decode('utf-8')
def value(self):
"""Value of the element."""
return lib.lsl_value(self.e).decode('utf-8')
def child_value(self, name=None):
"""Get child value (value of the first child that is text).
If a name is provided, then the value of the first child with the
given name is returned.
"""
if name is None:
res = lib.lsl_child_value(self.e)
else:
res = lib.lsl_child_value_n(self.e, str.encode(name))
return res.decode('utf-8')
# === Modification ===
def append_child_value(self, name, value):
"""Append a child node with a given name, which has a (nameless)
plain-text child with the given text value."""
return XMLElement(lib.lsl_append_child_value(self.e,
str.encode(name),
str.encode(value)))
def prepend_child_value(self, name, value):
"""Prepend a child node with a given name, which has a (nameless)
plain-text child with the given text value."""
return XMLElement(lib.lsl_prepend_child_value(self.e,
str.encode(name),
str.encode(value)))
def set_child_value(self, name, value):
"""Set the text value of the (nameless) plain-text child of a named
child node."""
return XMLElement(lib.lsl_set_child_value(self.e,
str.encode(name),
str.encode(value)))
def set_name(self, name):
"""Set the element's name. Returns False if the node is empty."""
return bool(lib.lsl_set_name(self.e, str.encode(name)))
def set_value(self, value):
"""Set the element's value. Returns False if the node is empty."""
return bool(lib.lsl_set_value(self.e, str.encode(value)))
def append_child(self, name):
"""Append a child element with the specified name."""
return XMLElement(lib.lsl_append_child(self.e, str.encode(name)))
def prepend_child(self, name):
"""Prepend a child element with the specified name."""
return XMLElement(lib.lsl_prepend_child(self.e, str.encode(name)))
def append_copy(self, elem):
"""Append a copy of the specified element as a child."""
return XMLElement(lib.lsl_append_copy(self.e, elem.e))
def prepend_copy(self, elem):
"""Prepend a copy of the specified element as a child."""
return XMLElement(lib.lsl_prepend_copy(self.e, elem.e))
def remove_child(self, rhs):
"""Remove a given child element, specified by name or as element."""
if type(rhs) is XMLElement:
lib.lsl_remove_child(self.e, rhs.e)
else:
lib.lsl_remove_child_n(self.e, rhs)
# ==========================
# === ContinuousResolver ===
# ==========================
class ContinuousResolver:
"""A convenience class resolving streams continuously in the background.
This object can be queried at any time for the set of streams that are
currently visible on the network.
"""
def __init__(self, prop=None, value=None, pred=None, forget_after=5.0):
"""Construct a new continuous_resolver.
Keyword arguments:
forget_after -- When a stream is no longer visible on the network
(e.g., because it was shut down), this is the time in
seconds after which it is no longer reported by the
resolver.
"""
if pred is not None:
if prop is not None or value is not None:
raise ValueError("you can only either pass the prop/value "
"argument or the pred argument, but not "
"both.")
self.obj = lib.lsl_create_continuous_resolver_bypred(str.encode(pred),
c_double(forget_after))
elif prop is not None and value is not None:
self.obj = lib.lsl_create_continuous_resolver_byprop(str.encode(prop),
str.encode(value),
c_double(forget_after))
elif prop is not None or value is not None:
raise ValueError("if prop is specified, then value must be "
"specified, too, and vice versa.")
else:
self.obj = lib.lsl_create_continuous_resolver(c_double(forget_after))
self.obj = c_void_p(self.obj)
if not self.obj:
raise RuntimeError("could not create continuous resolver.")
def __del__(self):
"""Destructor for the continuous resolver."""
# noinspection PyBroadException
try:
lib.lsl_destroy_continuous_resolver(self.obj)
except:
pass
def results(self):
"""Obtain the set of currently present streams on the network.
Returns a list of matching StreamInfo objects (with empty desc
field), any of which can subsequently be used to open an inlet.
"""
# noinspection PyCallingNonCallable
buffer = (c_void_p*1024)()
num_found = lib.lsl_resolver_results(self.obj, byref(buffer), 1024)
return [StreamInfo(handle=buffer[k]) for k in range(num_found)]
# =========================
# === Error Definitions ===
# =========================
# noinspection PyShadowingBuiltins
class TimeoutError(RuntimeError):
# note: although this overrides the name of a built-in exception,
# this API is retained here for compatiblity with the Python 2.x
# version of pylsl
pass
class LostError(RuntimeError):
pass
class InvalidArgumentError(RuntimeError):
pass
class InternalError(RuntimeError):
pass
def handle_error(errcode):
"""Error handler function. Translates an error code into an exception."""
if type(errcode) is c_int:
errcode = errcode.value
if errcode == 0:
pass # no error
elif errcode == -1:
raise TimeoutError("the operation failed due to a timeout.")
elif errcode == -2:
raise LostError("the stream has been lost.")
elif errcode == -3:
raise InvalidArgumentError("an argument was incorrectly specified.")
elif errcode == -4:
raise InternalError("an internal error has occurred.")
elif errcode < 0:
raise RuntimeError("an unknown error has occurred.")
# =================================================
# === Compatibility Interface for old pylsl API ===
# =================================================
# set class aliases
stream_info = StreamInfo
stream_outlet = StreamOutlet
stream_inlet = StreamInlet
xml_element = XMLElement
timeout_error = TimeoutError
lost_error = LostError
vectorf = vectord = vectorl = vectori = vectors = vectorc = vectorstr = list
def resolve_stream(*args):
if len(args) == 0:
return resolve_streams()
elif type(args[0]) in [int, float]:
return resolve_streams(args[0])
elif type(args[0]) is str:
if len(args) == 1:
return resolve_bypred(args[0])
elif type(args[1]) in [int, float]:
return resolve_bypred(args[0], args[1])
else:
if len(args) == 2:
return resolve_byprop(args[0], args[1])
else:
return resolve_byprop(args[0], args[1], args[2])
# ==================================
# === Module Initialization Code ===
# ==================================
# find and load library
os_name = platform.system()
bitness = 8 * struct.calcsize("P")
if os_name in ['Windows', 'Microsoft']:
libname = 'liblsl32.dll' if bitness == 32 else 'liblsl64.dll'
elif os_name == 'Darwin':
libname = 'liblsl32.dylib' if bitness == 32 else 'liblsl64.dylib'
elif os_name == 'Linux':
libname = 'liblsl32.so' if bitness == 32 else 'liblsl64.so'
else:
raise RuntimeError("unrecognized operating system:", os_name)
libpath = os.path.join(os.path.dirname(__file__), libname)
if not os.path.isfile(libpath):
libpath = util.find_library(libname)
if not libpath:
raise RuntimeError("library " + libname + " was not found - make sure "
"that it is on the search path (e.g., in the same "
"folder as pylsl.py).")
lib = CDLL(libpath)
# set function return types where necessary
lib.lsl_local_clock.restype = c_double
lib.lsl_create_streaminfo.restype = c_void_p
lib.lsl_get_name.restype = c_char_p
lib.lsl_get_type.restype = c_char_p
lib.lsl_get_nominal_srate.restype = c_double
lib.lsl_get_source_id.restype = c_char_p
lib.lsl_get_created_at.restype = c_double
lib.lsl_get_uid.restype = c_char_p
lib.lsl_get_session_id.restype = c_char_p
lib.lsl_get_hostname.restype = c_char_p
lib.lsl_get_desc.restype = c_void_p
lib.lsl_get_xml.restype = c_char_p
lib.lsl_create_outlet.restype = c_void_p
lib.lsl_create_inlet.restype = c_void_p
lib.lsl_get_fullinfo.restype = c_void_p
lib.lsl_open_stream.restype = c_void_p
lib.lsl_time_correction.restype = c_double
lib.lsl_pull_sample_f.restype = c_double
lib.lsl_pull_sample_d.restype = c_double
lib.lsl_pull_sample_l.restype = c_double
lib.lsl_pull_sample_i.restype = c_double
lib.lsl_pull_sample_s.restype = c_double
lib.lsl_pull_sample_c.restype = c_double
lib.lsl_pull_sample_str.restype = c_double
lib.lsl_pull_sample_buf.restype = c_double
lib.lsl_first_child.restype = c_void_p
lib.lsl_last_child.restype = c_void_p
lib.lsl_next_sibling.restype = c_void_p
lib.lsl_previous_sibling.restype = c_void_p
lib.lsl_parent.restype = c_void_p
lib.lsl_child.restype = c_void_p
lib.lsl_next_sibling_n.restype = c_void_p
lib.lsl_previous_sibling_n.restype = c_void_p
lib.lsl_name.restype = c_char_p
lib.lsl_value.restype = c_char_p
lib.lsl_child_value.restype = c_char_p
lib.lsl_child_value_n.restype = c_char_p
lib.lsl_append_child_value.restype = c_void_p
lib.lsl_prepend_child_value.restype = c_void_p
lib.lsl_append_child.restype = c_void_p
lib.lsl_prepend_child.restype = c_void_p
lib.lsl_append_copy.restype = c_void_p
lib.lsl_prepend_copy.restype = c_void_p
# noinspection PyBroadException
try:
lib.lsl_pull_chunk_f.restype = c_long
lib.lsl_pull_chunk_d.restype = c_long
lib.lsl_pull_chunk_l.restype = c_long
lib.lsl_pull_chunk_i.restype = c_long
lib.lsl_pull_chunk_s.restype = c_long
lib.lsl_pull_chunk_c.restype = c_long
lib.lsl_pull_chunk_str.restype = c_long
lib.lsl_pull_chunk_buf.restype = c_long
except:
print("pylsl: chunk transfer functions not available in your liblsl "
"version.")
# noinspection PyBroadException
try:
lib.lsl_create_continuous_resolver.restype = c_void_p
lib.lsl_create_continuous_resolver_bypred.restype = c_void_p
lib.lsl_create_continuous_resolver_byprop.restype = c_void_p
except:
print("pylsl: ContinuousResolver not (fully) available in your liblsl "
"version.")
# set up some type maps
string2fmt = {'float32': cf_float32, 'double64': cf_double64,
'string': cf_string, 'int32': cf_int32, 'int16': cf_int16,
'int8': cf_int8, 'int64': cf_int64}
fmt2string = ['undefined', 'float32', 'double64', 'string', 'int32', 'int16',
'int8', 'int64']
fmt2type = [[], c_float, c_double, c_char_p, c_int, c_short, c_byte, c_longlong]
fmt2push_sample = [[], lib.lsl_push_sample_ftp, lib.lsl_push_sample_dtp,
lib.lsl_push_sample_strtp, lib.lsl_push_sample_itp,
lib.lsl_push_sample_stp, lib.lsl_push_sample_ctp, []]
fmt2pull_sample = [[], lib.lsl_pull_sample_f, lib.lsl_pull_sample_d,
lib.lsl_pull_sample_str, lib.lsl_pull_sample_i,
lib.lsl_pull_sample_s, lib.lsl_pull_sample_c, []]
# noinspection PyBroadException
try:
fmt2push_chunk = [[], lib.lsl_push_chunk_ftp, lib.lsl_push_chunk_dtp,
lib.lsl_push_chunk_strtp, lib.lsl_push_chunk_itp,
lib.lsl_push_chunk_stp, lib.lsl_push_chunk_ctp, []]
fmt2pull_chunk = [[], lib.lsl_pull_chunk_f, lib.lsl_pull_chunk_d,
lib.lsl_pull_chunk_str, lib.lsl_pull_chunk_i,
lib.lsl_pull_chunk_s, lib.lsl_pull_chunk_c, []]
except:
# if not available
fmt2push_chunk = [None, None, None, None, None, None, None, None]
fmt2pull_chunk = [None, None, None, None, None, None, None, None]
| jditz/mugs | calibration/pylsl/pylsl.py | Python | gpl-3.0 | 51,539 |
class TestMiddleware():
def __init__(self, request):
print "TestMiddleware loaded"
def request_hook(self):
print "TestMiddleware.request_hook called"
def response_hook(self):
print "TestMiddleware.response_hook called"
def view_hook(self, view):
print "TestMiddleware.view_hook called"
| trendrr/whirlwind | whirlwind/middleware/test.py | Python | mit | 338 |
# Copyright (c) 2018 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
from __future__ import absolute_import, division, print_function
SWI_SIG_FILE_NAME = 'swi-signature'
SWIX_SIG_FILE_NAME = 'swix-signature'
def getSigFileName( swiFile ):
if swiFile.lower().endswith( ".swix" ):
return SWIX_SIG_FILE_NAME
return SWI_SIG_FILE_NAME
| aristanetworks/swi-tools | switools/signaturelib.py | Python | apache-2.0 | 426 |
import datetime
import random
import csv
import json
import StringIO
from app import ton
from chart import Chart
from timeframe import Timeframe
from frequency import Frequency
from tweets import Tweets
# TODO: Fix * imports
from django.shortcuts import *
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.auth import logout as auth_logout
from django.conf import settings
MAX_AGE = getattr(settings, 'CACHE_CONTROL_MAX_AGE', 2592000)
# import twitter
def login(request):
"""
Returns login page for given request
"""
context = {"request": request}
return render_to_response(
'login.html',
context,
context_instance=RequestContext(request))
@login_required
def home(request):
"""
Returns home page for given request
"""
query = request.GET.get("query", "")
context = {"request": request, "query0": query}
tweets = []
return render_to_response(
'home.html',
context,
context_instance=RequestContext(request))
@login_required
def query_chart(request):
"""
Returns query chart for given request
"""
# TODO: Move this to one line e.g. queries to query
query = request.GET.get("query", None)
queries = request.GET.getlist("queries[]")
if query:
queries = [query]
response_chart = Chart(queries=queries, request=request).data
response = HttpResponse(
json.dumps(response_chart),
content_type="application/json")
response['Cache-Control'] = 'max-age=%d' % MAX_AGE
return response
@login_required
def query_frequency(request):
query = request.GET.get("query", None)
response_data = {}
sample = 500
if query is not None:
# Get Timeframe e.g. process time from request
request_timeframe = Timeframe(
start=request.GET.get(
"start", None), end=request.GET.get(
"end", None), interval=request.GET.get(
"interval", "hour"))
# Query GNIP and get frequency
data = Frequency(query=query,
sample=sample,
start=request_timeframe.start,
end=request_timeframe.end)
response_data["frequency"] = data.freq
response_data["sample"] = sample
response = HttpResponse(
json.dumps(response_data),
content_type="application/json")
response['Cache-Control'] = 'max-age=%d' % MAX_AGE
return response
@login_required
def query_tweets(request):
"""
Returns tweet query
"""
query_count = 10000 # int(request.GET.get("embedCount", TWEET_QUERY_COUNT))
export = request.GET.get("export", None)
query = request.GET.get("query", "")
tweets = Tweets(query=query, query_count=query_count, request=request)
response_data = {}
if export == "ta":
output = StringIO.StringIO()
for t in tweets.get_data():
user_id = t['actor']['id']
output.write(user_id + '\n')
ton_request = ton.TwitterTon(
twitter_consumer_key=settings.SOCIAL_AUTH_TWITTER_KEY,
twitter_consumer_secret=settings.SOCIAL_AUTH_TWITTER_SECRET,
access_token=settings.TWITTER_ACCESS_TOKEN,
access_token_secret=settings.TWITTER_ACCESS_TOKEN_SECRET)
bytes = output.getvalue()
ton_response = ton_request.upload_data(
payload=bytes.encode('utf-16be'))
output.close()
location = ton_response['location']
response = HttpResponse(json.dumps(
{"location": location, "query": query}), content_type="application/json")
return response
elif export == "csv":
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="export.csv"'
writer = csv.writer(response)
writer.writerow(['count',
'time',
'id',
'user_screen_name',
'user_id',
'status',
'retweet_count',
'favorite_count',
'is_retweet',
'in_reply_to_tweet_id',
'in_reply_to_screen_name'])
count = 0
for t in tweets.get_data():
count = count + 1
body = t['body'].encode('ascii', 'replace')
status_id = t['id']
status_id = status_id[status_id.rfind(':') + 1:]
user_id = t['actor']['id']
user_id = user_id[user_id.rfind(':') + 1:]
writer.writerow([count,
t['postedTime'],
status_id,
t['actor']['preferredUsername'],
user_id,
body,
t['retweetCount'],
t['favoritesCount'],
'X',
'X',
'X'])
return response
else:
response_data['tweets'] = tweets.get_data()
response = HttpResponse(
json.dumps(response_data),
content_type="application/json")
response['Cache-Control'] = 'max-age=%d' % MAX_AGE
return response
def handle_query_error(e):
"""
Returns HTTP response with an error
"""
response_data = {}
response_data['error'] = e.message
response_data['response'] = e.response
response_data['payload'] = e.payload
return HttpResponse(
json.dumps(response_data),
status=400,
content_type="application/json")
def logout(request):
"""
Returns a redirect response and logs out user
"""
auth_logout(request)
return HttpResponseRedirect('/')
| twitterdev/data-ads-sample | home/views.py | Python | mit | 5,930 |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy import units as u
from astropy.utils.decorators import format_doc
from astropy.coordinates.baseframe import frame_transform_graph, base_doc
from astropy.coordinates.attributes import TimeAttribute
from astropy.coordinates.transformations import (
FunctionTransformWithFiniteDifference, DynamicMatrixTransform)
from astropy.coordinates.representation import (CartesianRepresentation,
UnitSphericalRepresentation)
from astropy.coordinates import earth_orientation as earth
from .utils import EQUINOX_B1950
from .baseradec import doc_components, BaseRADecFrame
__all__ = ['FK4', 'FK4NoETerms']
doc_footer_fk4 = """
Other parameters
----------------
equinox : `~astropy.time.Time`
The equinox of this frame.
obstime : `~astropy.time.Time`
The time this frame was observed. If ``None``, will be the same as
``equinox``.
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer_fk4)
class FK4(BaseRADecFrame):
"""
A coordinate or frame in the FK4 system.
Note that this is a barycentric version of FK4 - that is, the origin for
this frame is the Solar System Barycenter, *not* the Earth geocenter.
The frame attributes are listed under **Other Parameters**.
"""
equinox = TimeAttribute(default=EQUINOX_B1950)
obstime = TimeAttribute(default=None, secondary_attribute='equinox')
# the "self" transform
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, FK4, FK4)
def fk4_to_fk4(fk4coord1, fk4frame2):
# deceptively complicated: need to transform to No E-terms FK4, precess, and
# then come back, because precession is non-trivial with E-terms
fnoe_w_eqx1 = fk4coord1.transform_to(FK4NoETerms(equinox=fk4coord1.equinox))
fnoe_w_eqx2 = fnoe_w_eqx1.transform_to(FK4NoETerms(equinox=fk4frame2.equinox))
return fnoe_w_eqx2.transform_to(fk4frame2)
@format_doc(base_doc, components=doc_components, footer=doc_footer_fk4)
class FK4NoETerms(BaseRADecFrame):
"""
A coordinate or frame in the FK4 system, but with the E-terms of aberration
removed.
The frame attributes are listed under **Other Parameters**.
"""
equinox = TimeAttribute(default=EQUINOX_B1950)
obstime = TimeAttribute(default=None, secondary_attribute='equinox')
@staticmethod
def _precession_matrix(oldequinox, newequinox):
"""
Compute and return the precession matrix for FK4 using Newcomb's method.
Used inside some of the transformation functions.
Parameters
----------
oldequinox : `~astropy.time.Time`
The equinox to precess from.
newequinox : `~astropy.time.Time`
The equinox to precess to.
Returns
-------
newcoord : array
The precession matrix to transform to the new equinox
"""
return earth._precession_matrix_besselian(oldequinox.byear, newequinox.byear)
# the "self" transform
@frame_transform_graph.transform(DynamicMatrixTransform, FK4NoETerms, FK4NoETerms)
def fk4noe_to_fk4noe(fk4necoord1, fk4neframe2):
return fk4necoord1._precession_matrix(fk4necoord1.equinox, fk4neframe2.equinox)
# FK4-NO-E to/from FK4 ----------------------------->
# Unlike other frames, this module include *two* frame classes for FK4
# coordinates - one including the E-terms of aberration (FK4), and
# one not including them (FK4NoETerms). The following functions
# implement the transformation between these two.
def fk4_e_terms(equinox):
"""
Return the e-terms of aberration vector
Parameters
----------
equinox : Time object
The equinox for which to compute the e-terms
"""
# Constant of aberration at J2000; from Explanatory Supplement to the
# Astronomical Almanac (Seidelmann, 2005).
k = 0.0056932 # in degrees (v_earth/c ~ 1e-4 rad ~ 0.0057 deg)
k = np.radians(k)
# Eccentricity of the Earth's orbit
e = earth.eccentricity(equinox.jd)
# Mean longitude of perigee of the solar orbit
g = earth.mean_lon_of_perigee(equinox.jd)
g = np.radians(g)
# Obliquity of the ecliptic
o = earth.obliquity(equinox.jd, algorithm=1980)
o = np.radians(o)
return (e * k * np.sin(g),
-e * k * np.cos(g) * np.cos(o),
-e * k * np.cos(g) * np.sin(o))
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, FK4, FK4NoETerms)
def fk4_to_fk4_no_e(fk4coord, fk4noeframe):
# Extract cartesian vector
rep = fk4coord.cartesian
# Find distance (for re-normalization)
d_orig = rep.norm()
rep /= d_orig
# Apply E-terms of aberration. Note that this depends on the equinox (not
# the observing time/epoch) of the coordinates. See issue #1496 for a
# discussion of this.
eterms_a = CartesianRepresentation(
u.Quantity(fk4_e_terms(fk4coord.equinox), u.dimensionless_unscaled,
copy=False), copy=False)
rep = rep - eterms_a + eterms_a.dot(rep) * rep
# Find new distance (for re-normalization)
d_new = rep.norm()
# Renormalize
rep *= d_orig / d_new
# now re-cast into an appropriate Representation, and precess if need be
if isinstance(fk4coord.data, UnitSphericalRepresentation):
rep = rep.represent_as(UnitSphericalRepresentation)
# if no obstime was given in the new frame, use the old one for consistency
newobstime = fk4coord._obstime if fk4noeframe._obstime is None else fk4noeframe._obstime
fk4noe = FK4NoETerms(rep, equinox=fk4coord.equinox, obstime=newobstime)
if fk4coord.equinox != fk4noeframe.equinox:
# precession
fk4noe = fk4noe.transform_to(fk4noeframe)
return fk4noe
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, FK4NoETerms, FK4)
def fk4_no_e_to_fk4(fk4noecoord, fk4frame):
# first precess, if necessary
if fk4noecoord.equinox != fk4frame.equinox:
fk4noe_w_fk4equinox = FK4NoETerms(equinox=fk4frame.equinox,
obstime=fk4noecoord.obstime)
fk4noecoord = fk4noecoord.transform_to(fk4noe_w_fk4equinox)
# Extract cartesian vector
rep = fk4noecoord.cartesian
# Find distance (for re-normalization)
d_orig = rep.norm()
rep /= d_orig
# Apply E-terms of aberration. Note that this depends on the equinox (not
# the observing time/epoch) of the coordinates. See issue #1496 for a
# discussion of this.
eterms_a = CartesianRepresentation(
u.Quantity(fk4_e_terms(fk4noecoord.equinox), u.dimensionless_unscaled,
copy=False), copy=False)
rep0 = rep.copy()
for _ in range(10):
rep = (eterms_a + rep0) / (1. + eterms_a.dot(rep))
# Find new distance (for re-normalization)
d_new = rep.norm()
# Renormalize
rep *= d_orig / d_new
# now re-cast into an appropriate Representation, and precess if need be
if isinstance(fk4noecoord.data, UnitSphericalRepresentation):
rep = rep.represent_as(UnitSphericalRepresentation)
return fk4frame.realize_frame(rep)
| pllim/astropy | astropy/coordinates/builtin_frames/fk4.py | Python | bsd-3-clause | 7,249 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .service_info import ServiceInfo
class StatefulServiceInfo(ServiceInfo):
"""Information about a stateful Service Fabric service.
:param id:
:type id: str
:param name: Full hierarchical name of the service in URI format starting
with `fabric:`.
:type name: str
:param type_name: The name of the service type as specified in the
service manifest.
:type type_name: str
:param manifest_version: The version of the service manifest.
:type manifest_version: str
:param health_state: Possible values include: 'Invalid', 'Ok', 'Warning',
'Error', 'Unknown'
:type health_state: str
:param service_status: Possible values include: 'Unknown', 'Active',
'Upgrading', 'Deleting', 'Creating', 'Failed'
:type service_status: str
:param is_service_group: Whether the service is in a service group.
:type is_service_group: bool
:param ServiceKind: Polymorphic Discriminator
:type ServiceKind: str
:param has_persisted_state: Whether the service has persisted state.
:type has_persisted_state: bool
"""
_validation = {
'ServiceKind': {'required': True},
}
_attribute_map = {
'id': {'key': 'Id', 'type': 'str'},
'name': {'key': 'Name', 'type': 'str'},
'type_name': {'key': 'TypeName', 'type': 'str'},
'manifest_version': {'key': 'ManifestVersion', 'type': 'str'},
'health_state': {'key': 'HealthState', 'type': 'str'},
'service_status': {'key': 'ServiceStatus', 'type': 'str'},
'is_service_group': {'key': 'IsServiceGroup', 'type': 'bool'},
'ServiceKind': {'key': 'ServiceKind', 'type': 'str'},
'has_persisted_state': {'key': 'HasPersistedState', 'type': 'bool'},
}
def __init__(self, id=None, name=None, type_name=None, manifest_version=None, health_state=None, service_status=None, is_service_group=None, has_persisted_state=None):
super(StatefulServiceInfo, self).__init__(id=id, name=name, type_name=type_name, manifest_version=manifest_version, health_state=health_state, service_status=service_status, is_service_group=is_service_group)
self.has_persisted_state = has_persisted_state
self.ServiceKind = 'Stateful'
| v-iam/azure-sdk-for-python | azure-servicefabric/azure/servicefabric/models/stateful_service_info.py | Python | mit | 2,711 |
## This file is part of Rabbit Hole.
## Rabbit Hole is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
## Rabbit Hole is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with Rabbit Hole. If not, see <http://www.gnu.org/licenses/>.
# -*- coding: us-ascii -*-
# generated by wxGlade 300a4438fc64 on Wed Mar 14 10:28:15 2012
import wx
from ObjectListView import ObjectListView, ColumnDefn
from utils import *
# begin wxGlade: dependencies
# end wxGlade
# begin wxGlade: extracode
# end wxGlade
class SearchPanel(wx.Panel):
def __init__(self, *args, **kwds):
# begin wxGlade: SearchPanel.__init__
kwds["style"] = wx.TAB_TRAVERSAL
wx.Panel.__init__(self, *args, **kwds)
self.torrList = ObjectListView(self, -1, style=wx.LC_REPORT | wx.LC_SINGLE_SEL | wx.SUNKEN_BORDER)
self.__set_properties()
self.__do_layout()
# end wxGlade
#Prepare columns
self.torrList.SetColumns([
ColumnDefn("Category","left",120,"category"),
ColumnDefn("Name","left",500,"name"),
ColumnDefn("Size","left",90,"size",stringConverter=formatSize),
ColumnDefn("Seeders","left",60,"seeders",stringConverter="%d"),
ColumnDefn("Leechers","left",60,"leechers",stringConverter="%d")])
self.torrList.SetEmptyListMsg("")
def __set_properties(self):
# begin wxGlade: SearchPanel.__set_properties
pass
# end wxGlade
def __do_layout(self):
# begin wxGlade: SearchPanel.__do_layout
sizer_1 = wx.BoxSizer(wx.HORIZONTAL)
sizer_1.Add(self.torrList, 1, wx.EXPAND, 0)
self.SetSizer(sizer_1)
sizer_1.Fit(self)
# end wxGlade
# end of class SearchPanel
| EduardoLeggiero/rabbithole | SearchPanel.py | Python | gpl-2.0 | 2,225 |
###
# Copyright (c) 2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
class MyChannelLoggerTestCase(PluginTestCase):
plugins = ('MyChannelLogger',)
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| octete/octete-supybot-plugins | MyChannelLogger/test.py | Python | bsd-3-clause | 1,757 |
# coding=utf-8
# Copyright 2022 The Meta-Dataset Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Meta-Dataset example generators.
The TFDS implementation assumes that the examples are returned in class order,
i.e. all examples of class 0 first, then all examples of class 1, and so on. If
implementing new functions, make sure that the class order property holds.
"""
import collections
import io
import itertools
import json
import os
import re
from typing import Optional
from absl import logging
from etils import epath
import numpy as np
from PIL import ImageOps
import tensorflow_datasets as tfds
def _image_key(image_id, total_num_examples):
"""Returns an image key in the form of a 128-bit integer.
The gap between keys is chosen as to occupy as much of the space of 128-bit
numbers as possible, which ensures that the temporary buckets the examples are
written to are as balanced as possible.
Args:
image_id: image ID, in [0, total_num_examples).
total_num_examples: total number of examples in the data source.
Returns:
The image key.
"""
gap = int(2 ** 128) // total_num_examples
return image_id * gap
def _load_and_process_image(image_path = None,
image_bytes = None,
invert_img = False,
bbox=None):
"""Loads and processes an image.
Args:
image_path: image path. Exactly one of image_path or image_bytes should be
passed.
image_bytes: image bytes. Exactly one of image_path or image_bytes should be
passed.
invert_img: if True, invert the image.
bbox: if passed, crop the image using the bounding box.
Returns:
Image bytes.
"""
if (None not in (image_path, image_bytes)) or not (image_path or image_bytes):
raise ValueError(
'exactly one of image_path and image_bytes should be passed.')
if image_path is not None:
image_bytes = image_path.read_bytes()
try:
img = tfds.core.lazy_imports.PIL_Image.open(io.BytesIO(image_bytes))
except:
logging.warn('Failed to open image')
raise
assert image_bytes is not None
img_needs_encoding = False
if img.format != 'JPEG':
img_needs_encoding = True
if img.mode != 'RGB':
img = img.convert('RGB')
img_needs_encoding = True
if bbox is not None:
img = img.crop(bbox)
img_needs_encoding = True
if invert_img:
img = ImageOps.invert(img)
img_needs_encoding = True
if img_needs_encoding:
# Convert the image into JPEG
buf = io.BytesIO()
img.save(buf, format='JPEG')
buf.seek(0)
image_bytes = buf.getvalue()
return image_bytes
def generate_aircraft_examples(metadata, paths):
"""Generates Aircraft examples."""
data_path = paths['fgvc-aircraft-2013b'] / 'fgvc-aircraft-2013b/data'
# Retrieve mapping from filename to bounding box.
# Cropping to the bounding boxes is important for two reasons:
# 1) The dataset documentation mentions that "[the] (main) aircraft in each
# image is annotated with a tight bounding box [...]", which suggests
# that there may be more than one aircraft in some images. Cropping to
# the bounding boxes removes ambiguity as to which airplane the label
# refers to.
# 2) Raw images have a 20-pixel border at the bottom with copyright
# information which needs to be removed. Cropping to the bounding boxes
# has the side-effect that it removes the border.
bboxes_path = data_path / 'images_box.txt'
with bboxes_path.open('r') as f:
names_to_bboxes = [
line.split('\n')[0].split(' ') for line in f.readlines()
]
names_to_bboxes = dict(
(name, tuple(map(int, (xmin, ymin, xmax, ymax))))
for name, xmin, ymin, xmax, ymax in names_to_bboxes)
# Retrieve mapping from filename to variant
variant_trainval_path = data_path / 'images_variant_trainval.txt'
with variant_trainval_path.open('r') as f:
names_to_variants = [
line.split('\n')[0].split(' ', 1) for line in f.readlines()
]
variant_test_path = data_path / 'images_variant_test.txt'
with variant_test_path.open('r') as f:
names_to_variants += [
line.split('\n')[0].split(' ', 1) for line in f.readlines()
]
names_to_variants = dict(names_to_variants)
# Build mapping from variant to filenames. "Variant" refers to the aircraft
# model variant (e.g., A330-200) and is used as the class name in the
# dataset. The position of the class name in the concatenated list of
# training, validation, and test class name constitutes its class ID.
variants_to_names = collections.defaultdict(list)
for name, variant in names_to_variants.items():
variants_to_names[variant].append(name)
image_ids = itertools.count()
for label, class_name in enumerate(metadata['class_names']):
for filename in sorted(variants_to_names[class_name]):
image_path = data_path / f'images/{filename}.jpg'
bbox = names_to_bboxes[image_path.stem]
yield _image_key(next(image_ids), metadata['total_num_examples']), {
'image': _load_and_process_image(image_path=image_path, bbox=bbox),
'format': 'JPEG',
'filename': image_path.name,
'label': label,
'class_name': class_name,
}
def generate_cu_birds_examples(metadata, paths):
"""Generates CUB examples."""
data_path = paths['CUB_200_2011'] / 'CUB_200_2011/images'
image_ids = itertools.count()
for label, class_name in enumerate(metadata['class_names']):
for image_path in (data_path / class_name).iterdir():
yield _image_key(next(image_ids), metadata['total_num_examples']), {
'image': _load_and_process_image(image_path=image_path),
'format': 'JPEG',
'filename': image_path.name,
'label': label,
'class_name': class_name,
}
def generate_dtd_examples(metadata, paths):
"""Generates DTD examples."""
data_path = paths['dtd-r1.0.1'] / 'dtd/images'
image_ids = itertools.count()
for label, class_name in enumerate(metadata['class_names']):
for image_path in (data_path / class_name).iterdir():
yield _image_key(next(image_ids), metadata['total_num_examples']), {
'image': _load_and_process_image(image_path=image_path),
'format': 'JPEG',
'filename': image_path.name,
'label': label,
'class_name': class_name,
}
def generate_fungi_examples(metadata, paths):
"""Generates Fungi examples."""
original_train = json.loads(
(paths['train_val_annotations'] / 'train.json').read_text())
original_val = json.loads(
(paths['train_val_annotations'] / 'val.json').read_text())
image_list = original_train['images'] + original_val['images']
image_id_dict = {}
for image in image_list:
# assert this image_id was not previously added
assert image['id'] not in image_id_dict
image_id_dict[image['id']] = image
# Add a class annotation to every image in image_id_dict.
annotations = original_train['annotations'] + original_val['annotations']
for annotation in annotations:
# assert this images_id was not previously annotated
assert 'class' not in image_id_dict[annotation['image_id']]
image_id_dict[annotation['image_id']]['class'] = annotation['category_id']
class_filepaths = collections.defaultdict(list)
for image in image_list:
class_filepaths[image['class']].append(
paths['train_val'] / image['file_name'])
image_ids = itertools.count()
for label, class_name in enumerate(metadata['class_names']):
for image_path in class_filepaths[int(class_name.split('.')[0])]:
yield _image_key(next(image_ids), metadata['total_num_examples']), {
'image': _load_and_process_image(image_path=image_path),
'format': 'JPEG',
'filename': image['file_name'].split('/')[-1],
'label': label,
'class_name': class_name,
}
def generate_ilsvrc_2012_examples(metadata, paths):
"""Generates ImageNet examples."""
# Enumerate all files to skip.
files_to_skip = set()
for other_dataset in ('Caltech101', 'Caltech256', 'CUBirds'):
lines = paths[f'{other_dataset}_duplicates'].read_text().splitlines()
for l in lines:
# Skip comment lines
l = l.strip()
if l.startswith('#'):
continue
# Lines look like:
# 'synset/synset_imgnumber.JPEG # original_file_name.jpg\n'.
# Extract only the 'synset_imgnumber.JPG' part.
file_path = l.split('#')[0].strip()
file_name = os.path.basename(file_path)
files_to_skip.add(file_name)
name_to_id = {name: id for id, name in enumerate(metadata['class_names'])}
for archive_name, archive in tfds.download.iter_archive(
paths['ILSVRC2012_img_train'], tfds.download.ExtractMethod.TAR_STREAM):
class_name = archive_name.split('.')[0]
label = name_to_id[class_name]
image_ids = list(range(*metadata['class_slices'][label]))
archive_iterator = tfds.download.iter_archive(
archive, tfds.download.ExtractMethod.TAR_STREAM) # pytype: disable=wrong-arg-types # gen-stub-imports
filenames_and_images = sorted([
(filename, extracted_image.read())
for filename, extracted_image in archive_iterator
if filename not in files_to_skip
])
assert len(filenames_and_images) == len(image_ids)
for image_id, (filename, image_bytes) in zip(image_ids,
filenames_and_images):
if filename in files_to_skip:
logging.info('Skipping file %s', filename)
continue
yield _image_key(image_id, metadata['total_num_examples']), {
'image': _load_and_process_image(image_bytes=image_bytes),
'format': 'JPEG',
'filename': filename,
'label': label,
'class_name': class_name,
}
def generate_mscoco_examples(metadata, paths, box_scale_ratio=1.2):
"""Generates MSCOCO examples."""
if box_scale_ratio < 1.0:
raise ValueError('Box scale ratio must be greater or equal to 1.0.')
image_dir = paths['train2017'] / 'train2017'
annotations = json.loads((
paths['annotations_trainval2017'] /
'annotations/instances_train2017.json'
).read_text())
class_name_to_category_id = {
category['name']: category['id']
for category in annotations['categories']
}
coco_id_to_label = {
class_name_to_category_id[class_name]: label
for label, class_name in enumerate(metadata['class_names'])
}
label_to_annotations = collections.defaultdict(list)
for annotation in annotations['annotations']:
category_id = annotation['category_id']
if category_id in coco_id_to_label:
label_to_annotations[coco_id_to_label[category_id]].append(annotation)
image_ids = itertools.count()
for label, class_name in enumerate(metadata['class_names']):
for annotation in label_to_annotations[label]:
image_path = image_dir / f"{annotation['image_id']:012d}.jpg"
# The bounding box is represented as (x_topleft, y_topleft, width, height)
bbox = annotation['bbox']
with image_path.open('rb') as f:
image = tfds.core.lazy_imports.PIL_Image.open(f)
# The image shape is [?, ?, 3] and the type is uint8.
image = image.convert(mode='RGB')
image_w, image_h = image.size
x, y, w, h = bbox
x = x - 0.5 * w * (box_scale_ratio - 1.0)
y = y - 0.5 * h * (box_scale_ratio - 1.0)
w = w * box_scale_ratio
h = h * box_scale_ratio
# Convert half-integer to full-integer representation.
# The Python Imaging Library uses a Cartesian pixel coordinate system,
# with (0,0) in the upper left corner. Note that the coordinates refer
# to the implied pixel corners; the centre of a pixel addressed as
# (0, 0) actually lies at (0.5, 0.5). Since COCO uses the later
# convention and we use PIL to crop the image, we need to convert from
# half-integer to full-integer representation.
xmin = max(int(round(x - 0.5)), 0)
ymin = max(int(round(y - 0.5)), 0)
xmax = min(int(round(x + w - 0.5)) + 1, image_w)
ymax = min(int(round(y + h - 0.5)) + 1, image_h)
image = image.crop((xmin, ymin, xmax, ymax))
crop_width, crop_height = image.size
if crop_width <= 0 or crop_height <= 0:
raise ValueError('crops are not valid.')
buffer = io.BytesIO()
image.save(buffer, format='JPEG')
buffer.seek(0)
image_bytes = buffer.getvalue()
yield _image_key(next(image_ids), metadata['total_num_examples']), {
'image': image_bytes,
'format': 'JPEG',
'filename': image_path.name,
'label': label,
'class_name': class_name,
}
def generate_omniglot_examples(metadata, paths):
"""Generates Omniglot examples."""
alphabet_paths = itertools.chain(
(paths['images_background'] / 'images_background').iterdir(),
(paths['images_evaluation'] / 'images_evaluation').iterdir())
alphabet_paths = {path.stem: path for path in alphabet_paths}
image_ids = itertools.count()
for label, class_name in enumerate(metadata['class_names']):
match = re.match('(.*)-(character..)', class_name)
assert match is not None
character_path = alphabet_paths[match.group(1)] / match.group(2)
for image_path in character_path.iterdir():
yield _image_key(next(image_ids), metadata['total_num_examples']), {
'image': _load_and_process_image(
image_path=image_path, invert_img=True),
'format': 'JPEG',
'filename': image_path.name,
'label': label,
'class_name': class_name,
}
def generate_quickdraw_examples(metadata, paths):
"""Generates Quickdraw examples."""
image_ids = itertools.count()
for label, class_name in enumerate(metadata['class_names']):
with paths[class_name].open('rb') as f:
images = np.load(f)
for i, image in enumerate(images):
# We make the assumption that the images are square.
side = int(np.sqrt(image.shape[0]))
# To load an array as a PIL.Image we must first reshape it to 2D.
image = tfds.core.lazy_imports.PIL_Image.fromarray(
image.reshape((side, side))).convert('RGB')
# Compress to JPEG before writing
buffer = io.BytesIO()
image.save(buffer, format='JPEG')
buffer.seek(0)
yield _image_key(next(image_ids), metadata['total_num_examples']), {
'image': buffer,
'format': 'JPEG',
'filename': f'{class_name}.npy[{i}]',
'label': label,
'class_name': class_name,
}
def generate_traffic_sign_examples(metadata, paths):
"""Generates VGG Flowers examples."""
data_path = paths['GTSRB'] / 'GTSRB/Final_Training/Images'
image_ids = itertools.count()
for label, class_name in enumerate(metadata['class_names']):
image_paths = sorted((data_path / f'{label:05d}').glob('*.ppm'))
rng = np.random.RandomState(23)
rng.shuffle(image_paths)
for image_path in image_paths:
yield _image_key(next(image_ids), metadata['total_num_examples']), {
'image': _load_and_process_image(image_path=image_path),
'format': 'JPEG',
'filename': image_path.name,
'label': label,
'class_name': class_name,
}
def generate_vgg_flower_examples(metadata, paths):
"""Generates VGG Flowers examples."""
data_path = paths['102flowers'] / 'jpg'
with paths['imagelabels'].open('rb') as f:
class_ids = tfds.core.lazy_imports.scipy.io.loadmat(f)['labels'][0]
image_paths = collections.defaultdict(list)
for i, class_id in enumerate(class_ids):
image_paths[class_id].append(data_path / f'image_{i + 1:05d}.jpg')
image_ids = itertools.count()
for label, class_name in enumerate(metadata['class_names']):
for image_path in image_paths[int(class_name.split('.')[0])]:
yield _image_key(next(image_ids), metadata['total_num_examples']), {
'image': _load_and_process_image(image_path=image_path),
'format': 'JPEG',
'filename': image_path.name,
'label': label,
'class_name': class_name,
}
| google-research/meta-dataset | meta_dataset/data/tfds/example_generators.py | Python | apache-2.0 | 16,724 |
#!/usr/bin/env python
import sys
import os
import argparse
import requests
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import messagebird
parser = argparse.ArgumentParser()
parser.add_argument('--accessKey', help='access key for MessageBird API', type=str, required=True)
args = vars(parser.parse_args())
try:
# Create a MessageBird client with the specified accessKey.
client = messagebird.Client(args['accessKey'])
# Fetch the NumberList object with specified params, limit, offset.
params = {'features': ['sms', 'voice'], 'number': 319}
numbers = client.available_numbers_list('NL', params, 2, 0)
# Print the object information.
print('\nThe following information was returned as a %s object:\n' % numbers.__class__)
if numbers.items is not None:
print(' Containing the the following items:')
for item in numbers.items:
print(' {')
print(' number : %s' % item.number)
print(' country : %s' % item.country)
print(' region : %s' % item.region)
print(' locality : %s' % item.locality)
print(' features : %s' % item.features)
print(' tags : %s' % item.tags)
print(' type : %s' % item.type)
print(' status : %s' % item.status)
print(' },')
else:
print(' With an empty response.')
except messagebird.client.ErrorException as e:
print('\nAn error occured while requesting a NumberList object:\n')
for error in e.errors:
print(' code : %d' % error.code)
print(' description : %s' % error.description)
print(' parameter : %s\n' % error.parameter)
except requests.exceptions.HTTPError as e:
print('\nAn HTTP exception occurred while fetching all purchased phone numbers:')
print(' ', e)
print(' Http request body: ', e.request.body)
print(' Http response status: ', e.response.status_code)
print(' Http response body: ', e.response.content.decode())
except Exception as e:
print('\nAn ', e.__class__, ' exception occurred while :')
print(e)
| messagebird/python-rest-api | examples/number_available_list.py | Python | bsd-2-clause | 2,266 |
'''
All modules to test the input listeners in game utility.
''' | michaelbradley91/GameUtility | GameUtility/mjb/test/game_utility/input_listeners/__init__.py | Python | mit | 64 |
__author__ = 'dhkarimi'
import cPickle
from datetime import datetime
from pprint import pprint
import json
testdict = {
'd': datetime.now(),
'f': 1.2,
'i': 1,
's': "ads"}
def test_pickle():
global testdict
pstring = cPickle.dumps(testdict)
pdict = cPickle.loads(pstring)
print("Pickle String:")
pprint(pstring)
print("Orig dict")
pprint(testdict)
print("Loaded dict")
pprint(pdict)
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return str(obj)
return json.JSONEncoder.default(self, obj)
TIME_FORMAT_STR = "%a %b %d %H:%M:%S %Y"
def string_to_date(s):
return datetime.strptime(s, TIME_FORMAT_STR)
def is_date(s):
if s is None:
return False
try:
string_to_date(s)
return True
except:
return False
class MyDecoder(json.JSONDecoder):
def default(self, s):
print 'DECODING:', str(type(s)), str(s)
defaultobj = json.JSONDecoder.decode(self,s)
print 'DEFAUKT DECODING:', str(type(s)), str(s)
return defaultobj
def test_json():
global testdict
pstring = json.dumps(testdict, cls=MyEncoder)
pdict = json.loads(pstring, cls=MyDecoder)
print("json String:")
pprint(pstring)
print("Orig dict")
pprint(testdict)
print("json dict")
pprint(pdict)
if __name__ == "__main__":
test_json() | AlwaysTraining/bbot | bin/prototypes/serializetest.py | Python | mit | 1,452 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-11 18:30
from __future__ import unicode_literals
from django.db import migrations
from temba.utils import chunk_list
def populate_attachments(Msg):
msg_ids = list(Msg.objects.exclude(media=None).values_list('id', flat=True))
if not msg_ids:
return
print("Fetched %d message ids with media to update..." % len(msg_ids))
num_updated = 0
for id_batch in chunk_list(msg_ids, 1000):
for msg in Msg.objects.filter(id__in=id_batch):
msg.attachments = [msg.media]
msg.save(update_fields=('attachments',))
num_updated += len(id_batch)
print (" > Updated %d of %d messages with media" % (num_updated, len(id_batch)))
def apply_as_migration(apps, schema_editor):
Msg = apps.get_model('msgs', 'Msg')
populate_attachments(Msg)
def apply_manual():
from temba.msgs.models import Msg
populate_attachments(Msg)
class Migration(migrations.Migration):
dependencies = [
('msgs', '0095_msg_attachments'),
]
operations = [
migrations.RunPython(apply_as_migration)
]
| onaio/rapidpro | temba/msgs/migrations/0096_populate_attachments.py | Python | agpl-3.0 | 1,146 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2020 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Utility functions for scripts."""
import os
import os.path
import sys
import contextlib
# Import side-effects are an evil thing, but here it's okay so scripts using
# colors work on Windows as well.
try:
import colorama
except ImportError:
colorama = None
else:
colorama.init()
use_color = os.name != 'nt' or colorama
fg_colors = {
'reset': 0,
'bold': 1,
'black': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'white': 37,
}
bg_colors = {name: col + 10 for name, col in fg_colors.items()}
ON_CI = 'CI' in os.environ
def _esc(code):
"""Get an ANSI color code based on a color number."""
return '\033[{}m'.format(code)
def print_col(text, color, file=sys.stdout):
"""Print a colorized text."""
if use_color:
fg = _esc(fg_colors[color.lower()])
reset = _esc(fg_colors['reset'])
print(''.join([fg, text, reset]), file=file, flush=True)
else:
print(text, file=file, flush=True)
def print_error(text):
print_col(text, 'red', file=sys.stderr)
def print_title(text):
"""Print a title."""
print()
print_col("==================== {} ====================".format(text),
'yellow')
def print_subtitle(text):
"""Print a subtitle."""
print_col("------ {} ------".format(text), 'cyan')
def change_cwd():
"""Change the scripts cwd if it was started inside the script folder."""
cwd = os.getcwd()
if os.path.split(cwd)[1] == 'scripts':
os.chdir(os.path.join(cwd, os.pardir))
@contextlib.contextmanager
def gha_group(name):
"""Print a GitHub Actions group.
Gets ignored if not on CI.
"""
if ON_CI:
print('::group::' + name)
yield
print('::endgroup::')
else:
yield
def gha_error(message):
"""Print a GitHub Actions error.
Should only be called on CI.
"""
assert ON_CI
print('::error::' + message)
| The-Compiler/qutebrowser | scripts/utils.py | Python | gpl-3.0 | 2,776 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Response.missing'
db.delete_column('responses', 'missing')
# Adding field 'TextResponse.missing'
db.add_column('responses_texts', 'missing',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Adding field 'Response.missing'
db.add_column('responses', 'missing',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Deleting field 'TextResponse.missing'
db.delete_column('responses_texts', 'missing')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.culture': {
'Meta': {'ordering': "['culture']", 'object_name': 'Culture', 'db_table': "'cultures'"},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'coder': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'culture': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'fact': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['core.Language']", 'symmetrical': 'False', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128'})
},
u'core.language': {
'Meta': {'ordering': "['language']", 'unique_together': "(('isocode', 'language'),)", 'object_name': 'Language', 'db_table': "'languages'"},
'abvdcode': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'classification': ('django.db.models.fields.TextField', [], {}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isocode': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3', 'db_index': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'core.section': {
'Meta': {'ordering': "['id']", 'object_name': 'Section', 'db_table': "'sections'"},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'section': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'})
},
u'core.source': {
'Meta': {'ordering': "['author', 'year']", 'unique_together': "(['author', 'year'],)", 'object_name': 'Source', 'db_table': "'sources'", 'index_together': "[['author', 'year']]"},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'bibtex': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reference': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '1000'}),
'year': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'survey.floatresponse': {
'Meta': {'object_name': 'FloatResponse', 'db_table': "'responses_floats'", '_ormbases': [u'survey.Response']},
'response': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'response_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['survey.Response']", 'unique': 'True', 'primary_key': 'True'})
},
u'survey.integerresponse': {
'Meta': {'object_name': 'IntegerResponse', 'db_table': "'responses_integers'", '_ormbases': [u'survey.Response']},
'response': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'response_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['survey.Response']", 'unique': 'True', 'primary_key': 'True'})
},
u'survey.optionquestion': {
'Meta': {'object_name': 'OptionQuestion', 'db_table': "'questions_option'", '_ormbases': [u'survey.Question']},
'options': ('django.db.models.fields.TextField', [], {}),
u'question_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['survey.Question']", 'unique': 'True', 'primary_key': 'True'})
},
u'survey.optionresponse': {
'Meta': {'object_name': 'OptionResponse', 'db_table': "'responses_options'", '_ormbases': [u'survey.Response']},
'response': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
u'response_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['survey.Response']", 'unique': 'True', 'primary_key': 'True'}),
'response_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'survey.question': {
'Meta': {'object_name': 'Question', 'db_table': "'questions'"},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'information': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_survey.question_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'question': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'response_type': ('django.db.models.fields.CharField', [], {'default': "'Int'", 'max_length': '6'}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Section']"}),
'simplified_question': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'subsection': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'subsection'", 'null': 'True', 'to': u"orm['core.Section']"})
},
u'survey.response': {
'Meta': {'unique_together': "(('question', 'culture'),)", 'object_name': 'Response', 'db_table': "'responses'"},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'codersnotes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'culture': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Culture']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page1': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'page2': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'page3': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'page4': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'page5': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_survey.response_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}),
'source1': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'source1'", 'null': 'True', 'to': u"orm['core.Source']"}),
'source2': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'source2'", 'null': 'True', 'to': u"orm['core.Source']"}),
'source3': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'source3'", 'null': 'True', 'to': u"orm['core.Source']"}),
'source4': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'source4'", 'null': 'True', 'to': u"orm['core.Source']"}),
'source5': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'source5'", 'null': 'True', 'to': u"orm['core.Source']"}),
'uncertainty': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'survey.textresponse': {
'Meta': {'object_name': 'TextResponse', 'db_table': "'responses_texts'", '_ormbases': [u'survey.Response']},
'missing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'response': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'response_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['survey.Response']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['survey'] | shh-dlce/pulotu | website/apps/survey/migrations/0053_auto__del_field_response_missing__add_field_textresponse_missing.py | Python | apache-2.0 | 14,377 |
"""
Django settings for popsicle project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-jm&@%x55$k2qmek=j@0cx_8c%%$j@w^%gy1pnu_^03j$uqhcy'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["127.0.0.1", "localhost"]
# Application definition
INSTALLED_APPS = [
"orders.apps.OrdersConfig",
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'weeklyCal.apps.WeeklycalConfig',
'emailList.apps.EmaillistConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'popsicle.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'popsicle.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Denver'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| ECastleton/Popstop | popsicle/popsicle/settings.py | Python | gpl-3.0 | 3,280 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogapp', '0004_auto_20151118_1518'),
]
operations = [
migrations.CreateModel(
name='Sponsors',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('brand', models.CharField(max_length=200)),
('level', models.CharField(default=b'B', max_length=1, choices=[(b'G', b'GOLD'), (b'S', b'SILVER'), (b'B', b'BRONZE')])),
],
options={
'verbose_name': 'Sponsor',
'verbose_name_plural': 'Sponsors',
},
),
]
| jgsjv/treinamento_django | blog/blogapp/migrations/0005_sponsors.py | Python | mit | 802 |
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
# All rights reserved.
"""Tokenization help for Python programs.
generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens. It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF). It generates
5-tuples with these members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators
Older entry points
tokenize_loop(readline, tokeneater)
tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found."""
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = \
'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
import string, re
from codecs import BOM_UTF8, lookup
from lib2to3.pgen2.token import *
from . import token
__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
"generate_tokens", "untokenize"]
del token
try:
bytes
except NameError:
# Support bytes type in Python <= 2.5, so 2to3 turns itself into
# valid Python 3 code.
bytes = str
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
def _combinations(*l):
return set(
x + y for x in l for y in l + ("",) if x.casefold() != y.casefold()
)
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'\w+'
Binnumber = r'0[bB]_?[01]+(?:_[01]+)*'
Hexnumber = r'0[xX]_?[\da-fA-F]+(?:_[\da-fA-F]+)*[lL]?'
Octnumber = r'0[oO]?_?[0-7]+(?:_[0-7]+)*[lL]?'
Decnumber = group(r'[1-9]\d*(?:_\d+)*[lL]?', '0[lL]?')
Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?\d+(?:_\d+)*'
Pointfloat = group(r'\d+(?:_\d+)*\.(?:\d+(?:_\d+)*)?', r'\.\d+(?:_\d+)*') + maybe(Exponent)
Expfloat = r'\d+(?:_\d+)*' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'\d+(?:_\d+)*[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
_litprefix = r"(?:[uUrRbBfF]|[rR][fFbB]|[fFbBuU][rR])?"
Triple = group(_litprefix + "'''", _litprefix + '"""')
# Single-line ' or " string.
String = group(_litprefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
_litprefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
r"//=?", r"->",
r"[+\-*/%&@|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'[:;.,`@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(_litprefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
_litprefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = map(
re.compile, (Token, PseudoToken, Single3, Double3))
_strprefixes = (
_combinations('r', 'R', 'f', 'F') |
_combinations('r', 'R', 'b', 'B') |
{'u', 'U', 'ur', 'uR', 'Ur', 'UR'}
)
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
**{f"{prefix}'''": single3prog for prefix in _strprefixes},
**{f'{prefix}"""': double3prog for prefix in _strprefixes},
**{prefix: None for prefix in _strprefixes}}
triple_quoted = (
{"'''", '"""'} |
{f"{prefix}'''" for prefix in _strprefixes} |
{f'{prefix}"""' for prefix in _strprefixes}
)
single_quoted = (
{"'", '"'} |
{f"{prefix}'" for prefix in _strprefixes} |
{f'{prefix}"' for prefix in _strprefixes}
)
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
def printtoken(type, token, xxx_todo_changeme, xxx_todo_changeme1, line): # for testing
(srow, scol) = xxx_todo_changeme
(erow, ecol) = xxx_todo_changeme1
print("%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token)))
def tokenize(readline, tokeneater=printtoken):
"""
The tokenize() function accepts two parameters: one representing the
input stream, and one providing an output mechanism for tokenize().
The first parameter, readline, must be a callable object which provides
the same interface as the readline() method of built-in file objects.
Each call to the function should return one line of input as a string.
The second parameter, tokeneater, must also be a callable object. It is
called once for each token, with five arguments, corresponding to the
tuples generated by generate_tokens().
"""
try:
tokenize_loop(readline, tokeneater)
except StopTokenizing:
pass
# backwards compatible interface
def tokenize_loop(readline, tokeneater):
for token_info in generate_tokens(readline):
tokeneater(*token_info)
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
def add_whitespace(self, start):
row, col = start
assert row <= self.prev_row
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
for t in iterable:
if len(t) == 2:
self.compat(t, iterable)
break
tok_type, token, start, end, line = t
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
startline = False
indents = []
toks_append = self.tokens.append
toknum, tokval = token
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum in (NEWLINE, NL):
startline = True
for tok in iterable:
toknum, tokval = tok[:2]
if toknum in (NAME, NUMBER, ASYNC, AWAIT):
tokval += ' '
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII)
blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argument, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read
in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present, but
disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return bytes()
def find_cookie(line):
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
match = cookie_re.match(line_string)
if not match:
return None
encoding = _get_normal_name(match.group(1))
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
raise SyntaxError("unknown encoding: " + encoding)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
raise SyntaxError('encoding problem: utf-8')
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
if not blank_re.match(first):
return default, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def untokenize(iterable):
"""Transform tokens back into Python source code.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited input:
# Output text will tokenize the back to the input
t1 = [tok[:2] for tok in generate_tokens(f.readline)]
newcode = untokenize(t1)
readline = iter(newcode.splitlines(1)).next
t2 = [tok[:2] for tokin generate_tokens(readline)]
assert t1 == t2
"""
ut = Untokenizer()
return ut.untokenize(iterable)
def generate_tokens(readline):
"""
The generate_tokens() generator requires one argument, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile).next # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
physical line.
"""
lnum = parenlev = continued = 0
contstr, needcont = '', 0
contline = None
indents = [0]
# 'stashed' and 'async_*' are used for async/await parsing
stashed = None
async_def = False
async_def_indent = 0
async_def_nl = False
while 1: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = ''
lnum = lnum + 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ': column = column + 1
elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f': column = 0
else: break
pos = pos + 1
if pos == max: break
if stashed:
yield stashed
stashed = None
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield (COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield (NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
if async_def and async_def_indent >= indents[-1]:
async_def = False
async_def_nl = False
async_def_indent = 0
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
if async_def and async_def_nl and async_def_indent >= indents[-1]:
async_def = False
async_def_nl = False
async_def_indent = 0
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
if initial in string.digits or \
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
newline = NEWLINE
if parenlev > 0:
newline = NL
elif async_def:
async_def_nl = True
if stashed:
yield stashed
stashed = None
yield (newline, token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
if stashed:
yield stashed
stashed = None
yield (COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
if stashed:
yield stashed
stashed = None
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
if stashed:
yield stashed
stashed = None
yield (STRING, token, spos, epos, line)
elif initial.isidentifier(): # ordinary name
if token in ('async', 'await'):
if async_def:
yield (ASYNC if token == 'async' else AWAIT,
token, spos, epos, line)
continue
tok = (NAME, token, spos, epos, line)
if token == 'async' and not stashed:
stashed = tok
continue
if token == 'def':
if (stashed
and stashed[0] == NAME
and stashed[1] == 'async'):
async_def = True
async_def_indent = indents[-1]
yield (ASYNC, stashed[1],
stashed[2], stashed[3],
stashed[4])
stashed = None
if stashed:
yield stashed
stashed = None
yield tok
elif initial == '\\': # continued stmt
# This yield is new; needed for better idempotency:
if stashed:
yield stashed
stashed = None
yield (NL, token, spos, (lnum, pos), line)
continued = 1
else:
if initial in '([{': parenlev = parenlev + 1
elif initial in ')]}': parenlev = parenlev - 1
if stashed:
yield stashed
stashed = None
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos = pos + 1
if stashed:
yield stashed
stashed = None
for indent in indents[1:]: # pop remaining indent levels
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
if __name__ == '__main__': # testing
import sys
if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
else: tokenize(sys.stdin.readline)
| batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/lib2to3/pgen2/tokenize.py | Python | apache-2.0 | 21,048 |
from base import DataSourceCollection, DataSourceBase
# we have to import these modules for them to register as valid data sources
import cfn_datasource
import yaml_datasource
import file_datasource
__all__ = ['DataSourceBase', 'DataSourceCollection']
| EverythingMe/rainbow | rainbow/datasources/__init__.py | Python | bsd-2-clause | 253 |
from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'PhotoPortfolio.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^[/]{0,1}$', 'photos.views.browse'),
url(r'^/gallery/([\w]*)', 'photos.views.gallery'),
) | tsitra/PhotoPortfolio | photos/urls.py | Python | mit | 304 |
# vim: ts=4:sw=4:expandtab
# BleachBit
# Copyright (C) 2008-2017 Andrew Ziem
# https://www.bleachbit.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Basic GUI code
"""
from __future__ import absolute_import, print_function
from bleachbit import _, expanduser
import gtk
import os
if 'nt' == os.name:
from bleachbit import Windows
def browse_folder(parent, title, multiple, stock_button):
"""Ask the user to select a folder. Return the full path or None."""
if 'nt' == os.name and None == os.getenv('BB_NATIVE'):
ret = Windows.browse_folder(
parent.window.handle if parent else None, title)
return [ret] if multiple and not ret is None else ret
# fall back to GTK+
chooser = gtk.FileChooserDialog(parent=parent,
title=title,
buttons=(
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
stock_button, gtk.RESPONSE_OK),
action=gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER)
chooser.set_select_multiple(multiple)
chooser.set_current_folder(expanduser('~'))
resp = chooser.run()
if multiple:
ret = chooser.get_filenames()
else:
ret = chooser.get_filename()
chooser.hide()
chooser.destroy()
if gtk.RESPONSE_OK != resp:
# user cancelled
return None
return ret
def browse_file(parent, title):
"""Prompt user to select a single file"""
if 'nt' == os.name and None == os.getenv('BB_NATIVE'):
return Windows.browse_file(parent.window.handle, title)
chooser = gtk.FileChooserDialog(title=title,
parent=parent,
action=gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
chooser.set_current_folder(expanduser('~'))
resp = chooser.run()
path = chooser.get_filename()
chooser.destroy()
if gtk.RESPONSE_OK != resp:
# user cancelled
return None
return path
def browse_files(parent, title):
"""Prompt user to select multiple files to delete"""
if 'nt' == os.name and None == os.getenv('BB_NATIVE'):
return Windows.browse_files(parent.window.handle, title)
chooser = gtk.FileChooserDialog(title=title,
parent=parent,
action=gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_DELETE, gtk.RESPONSE_OK))
chooser.set_select_multiple(True)
chooser.set_current_folder(expanduser('~'))
resp = chooser.run()
paths = chooser.get_filenames()
chooser.destroy()
if gtk.RESPONSE_OK != resp:
# user cancelled
return None
return paths
def delete_confirmation_dialog(parent, mention_preview):
"""Return boolean whether OK to delete files."""
dialog = gtk.Dialog(title=_("Delete confirmation"), parent=parent,
flags=gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT)
dialog.set_default_size(300, -1)
hbox = gtk.HBox(homogeneous=False, spacing=10)
icon = gtk.Image()
icon.set_from_stock(gtk.STOCK_DIALOG_WARNING, gtk.ICON_SIZE_DIALOG)
hbox.pack_start(icon, False)
if mention_preview:
question_text = _(
"Are you sure you want to permanently delete files according to the selected operations? The actual files that will be deleted may have changed since you ran the preview.")
else:
question_text = _(
"Are you sure you want to permanently delete these files?")
question = gtk.Label(question_text)
question.set_line_wrap(True)
hbox.pack_start(question, False)
dialog.vbox.pack_start(hbox, False)
dialog.vbox.set_spacing(10)
dialog.add_button(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
dialog.add_button(gtk.STOCK_DELETE, gtk.RESPONSE_ACCEPT)
dialog.set_default_response(gtk.RESPONSE_CANCEL)
dialog.show_all()
ret = dialog.run()
dialog.destroy()
return ret == gtk.RESPONSE_ACCEPT
def message_dialog(parent, msg, mtype=gtk.MESSAGE_ERROR, buttons=gtk.BUTTONS_OK):
"""Convenience wrapper for gtk.MessageDialog"""
dialog = gtk.MessageDialog(parent,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
mtype,
buttons,
msg)
resp = dialog.run()
dialog.destroy()
return resp
def open_url(url, parent_window=None, prompt=True):
"""Open an HTTP URL. Try to run as non-root."""
# drop privileges so the web browser is running as a normal process
if 'posix' == os.name and 0 == os.getuid():
msg = _(
"Because you are running as root, please manually open this link in a web browser:\n%s") % url
message_dialog(None, msg, gtk.MESSAGE_INFO)
return
if prompt:
# find hostname
import re
ret = re.search('^http(s)?://([a-z.]+)', url)
if None == ret:
host = url
else:
host = ret.group(2)
# TRANSLATORS: %s expands to www.bleachbit.org or similar
msg = _("Open web browser to %s?") % host
resp = message_dialog(parent_window, msg,
gtk.MESSAGE_QUESTION, gtk.BUTTONS_OK_CANCEL)
if gtk.RESPONSE_OK != resp:
return
# open web browser
if 'nt' == os.name:
# in gtk.show_uri() avoid 'glib.GError: No application is registered as
# handling this file'
import webbrowser
webbrowser.open(url)
else:
gtk.show_uri(None, url, gtk.gdk.CURRENT_TIME)
| brahmastra2016/bleachbit | bleachbit/GuiBasic.py | Python | gpl-3.0 | 6,459 |
"""HTTP views to interact with the entity registry."""
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.components.websocket_api.const import ERR_NOT_FOUND
from homeassistant.components.websocket_api.decorators import (
async_response,
require_admin,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity_registry import async_get_registry
async def async_setup(hass):
"""Enable the Entity Registry views."""
hass.components.websocket_api.async_register_command(websocket_list_entities)
hass.components.websocket_api.async_register_command(websocket_get_entity)
hass.components.websocket_api.async_register_command(websocket_update_entity)
hass.components.websocket_api.async_register_command(websocket_remove_entity)
return True
@async_response
@websocket_api.websocket_command({vol.Required("type"): "config/entity_registry/list"})
async def websocket_list_entities(hass, connection, msg):
"""Handle list registry entries command.
Async friendly.
"""
registry = await async_get_registry(hass)
connection.send_message(
websocket_api.result_message(
msg["id"], [_entry_dict(entry) for entry in registry.entities.values()]
)
)
@async_response
@websocket_api.websocket_command(
{
vol.Required("type"): "config/entity_registry/get",
vol.Required("entity_id"): cv.entity_id,
}
)
async def websocket_get_entity(hass, connection, msg):
"""Handle get entity registry entry command.
Async friendly.
"""
registry = await async_get_registry(hass)
entry = registry.entities.get(msg["entity_id"])
if entry is None:
connection.send_message(
websocket_api.error_message(msg["id"], ERR_NOT_FOUND, "Entity not found")
)
return
connection.send_message(
websocket_api.result_message(msg["id"], _entry_ext_dict(entry))
)
@require_admin
@async_response
@websocket_api.websocket_command(
{
vol.Required("type"): "config/entity_registry/update",
vol.Required("entity_id"): cv.entity_id,
# If passed in, we update value. Passing None will remove old value.
vol.Optional("name"): vol.Any(str, None),
vol.Optional("icon"): vol.Any(str, None),
vol.Optional("new_entity_id"): str,
# We only allow setting disabled_by user via API.
vol.Optional("disabled_by"): vol.Any("user", None),
}
)
async def websocket_update_entity(hass, connection, msg):
"""Handle update entity websocket command.
Async friendly.
"""
registry = await async_get_registry(hass)
if msg["entity_id"] not in registry.entities:
connection.send_message(
websocket_api.error_message(msg["id"], ERR_NOT_FOUND, "Entity not found")
)
return
changes = {}
for key in ("name", "icon", "disabled_by"):
if key in msg:
changes[key] = msg[key]
if "new_entity_id" in msg and msg["new_entity_id"] != msg["entity_id"]:
changes["new_entity_id"] = msg["new_entity_id"]
if hass.states.get(msg["new_entity_id"]) is not None:
connection.send_message(
websocket_api.error_message(
msg["id"], "invalid_info", "Entity is already registered"
)
)
return
try:
if changes:
entry = registry.async_update_entity(msg["entity_id"], **changes)
except ValueError as err:
connection.send_message(
websocket_api.error_message(msg["id"], "invalid_info", str(err))
)
else:
connection.send_message(
websocket_api.result_message(msg["id"], _entry_ext_dict(entry))
)
@require_admin
@async_response
@websocket_api.websocket_command(
{
vol.Required("type"): "config/entity_registry/remove",
vol.Required("entity_id"): cv.entity_id,
}
)
async def websocket_remove_entity(hass, connection, msg):
"""Handle remove entity websocket command.
Async friendly.
"""
registry = await async_get_registry(hass)
if msg["entity_id"] not in registry.entities:
connection.send_message(
websocket_api.error_message(msg["id"], ERR_NOT_FOUND, "Entity not found")
)
return
registry.async_remove(msg["entity_id"])
connection.send_message(websocket_api.result_message(msg["id"]))
@callback
def _entry_dict(entry):
"""Convert entry to API format."""
return {
"config_entry_id": entry.config_entry_id,
"device_id": entry.device_id,
"disabled_by": entry.disabled_by,
"entity_id": entry.entity_id,
"name": entry.name,
"icon": entry.icon,
"platform": entry.platform,
}
@callback
def _entry_ext_dict(entry):
"""Convert entry to API format."""
data = _entry_dict(entry)
data["original_name"] = entry.original_name
data["original_icon"] = entry.original_icon
data["unique_id"] = entry.unique_id
data["capabilities"] = entry.capabilities
return data
| tchellomello/home-assistant | homeassistant/components/config/entity_registry.py | Python | apache-2.0 | 5,192 |
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
start_nodes,
start_node,
assert_equal,
connect_nodes_bi,
)
import os
import shutil
class WalletHDTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 2
self.node_args = [['-usehd=0'], ['-usehd=1', '-keypool=0']]
def setup_network(self):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.node_args)
self.is_network_split = False
connect_nodes_bi(self.nodes, 0, 1)
def run_test (self):
tmpdir = self.options.tmpdir
# Make sure we use hd, keep masterkeyid
masterkeyid = self.nodes[1].getwalletinfo()['hdmasterkeyid']
assert_equal(len(masterkeyid), 40)
# Import a non-HD private key in the HD wallet
non_hd_add = self.nodes[0].getnewaddress()
self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))
# This should be enough to keep the master key and the non-HD key
self.nodes[1].backupwallet(tmpdir + "/hd.bak")
#self.nodes[1].dumpwallet(tmpdir + "/hd.dump")
# Derive some HD addresses and remember the last
# Also send funds to each add
self.nodes[0].generate(101)
hd_add = None
num_hd_adds = 300
for i in range(num_hd_adds):
hd_add = self.nodes[1].getnewaddress()
hd_info = self.nodes[1].validateaddress(hd_add)
assert_equal(hd_info["hdkeypath"], "m/0'/0'/"+str(i+1)+"'")
assert_equal(hd_info["hdmasterkeyid"], masterkeyid)
self.nodes[0].sendtoaddress(hd_add, 1)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(non_hd_add, 1)
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
print("Restore backup ...")
self.stop_node(1)
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/hd.bak", tmpdir + "/node1/regtest/wallet.dat")
self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1])
#connect_nodes_bi(self.nodes, 0, 1)
# Assert that derivation is deterministic
hd_add_2 = None
for _ in range(num_hd_adds):
hd_add_2 = self.nodes[1].getnewaddress()
hd_info_2 = self.nodes[1].validateaddress(hd_add_2)
assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/"+str(_+1)+"'")
assert_equal(hd_info_2["hdmasterkeyid"], masterkeyid)
assert_equal(hd_add, hd_add_2)
# Needs rescan
self.stop_node(1)
self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1] + ['-rescan'])
#connect_nodes_bi(self.nodes, 0, 1)
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
if __name__ == '__main__':
WalletHDTest().main ()
| realzzt/BitCoin2013 | qa/rpc-tests/wallet-hd.py | Python | mit | 3,292 |
"""
Settings for notifications: if a new game is hosted.
"""
from PyQt5 import QtCore
import config
import notifications as ns
import util
from config import Settings
from notifications.ns_hook import NsHook
class NsHookNewGame(NsHook):
def __init__(self):
NsHook.__init__(self, ns.Notifications.NEW_GAME)
self.button.setEnabled(True)
self.dialog = NewGameDialog(self, self.eventType)
self.button.clicked.connect(self.dialog.show)
FormClass, BaseClass = util.THEME.loadUiType("notification_system/new_game.ui")
class NewGameDialog(FormClass, BaseClass):
def __init__(self, parent, eventType):
BaseClass.__init__(self)
self.parent = parent
self.eventType = eventType
self._settings_key = 'notifications/{}'.format(eventType)
self.setupUi(self)
# remove help button
self.setWindowFlags(
self.windowFlags() & (~QtCore.Qt.WindowContextHelpButtonHint),
)
self.loadSettings()
def loadSettings(self):
self.mode = Settings.get(self._settings_key + '/mode', 'friends')
if self.mode == 'friends':
self.checkBoxFriends.setCheckState(QtCore.Qt.Checked)
else:
self.checkBoxFriends.setCheckState(QtCore.Qt.Unchecked)
self.parent.mode = self.mode
def saveSettings(self):
config.Settings.set(self._settings_key + '/mode', self.mode)
self.parent.mode = self.mode
@QtCore.pyqtSlot()
def on_btnSave_clicked(self):
if self.checkBoxFriends.checkState() == QtCore.Qt.Checked:
self.mode = 'friends'
else:
self.mode = 'all'
self.saveSettings()
self.hide()
| FAForever/client | src/notifications/hook_newgame.py | Python | gpl-3.0 | 1,713 |
import argparse
import Video
def run(path,keep,write,view,vidpath=""):
#create instance
video_instance=Video.Video(path=path,vidpath=vidpath,keep=keep,write=write,view=view)
#send to google for labels
video_instance.label()
#download file to play locally
video_instance.download()
#show video with annotations
video_instance.show()
#cleanup video staging file
video_instance.cleanup()
#run if called directly from command line
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-gcs_uri', help='The local file directory to save annotated video')
parser.add_argument('-keep', help='Should the downloaded file be kept after analysis?',action="store_true")
parser.add_argument('-write', help='Should a annotated video file be written',action="store_false")
parser.add_argument('-view', help='Show annotations within program video',action="store_true")
parser.add_argument('-vidpath', help='directory to save annotated video',default="")
args = parser.parse_args()
run(path=args.gcs_uri,keep=args.keep,write=args.write,show=args.show,vidpath=args.vidpath)
| bw4sz/GoogleVideo | PreviousVersion/main.py | Python | apache-2.0 | 1,211 |
from StringIO import StringIO
from datetime import datetime as dt, timedelta as dtd
from dateutil.rrule import rrule, DAILY
from pandas import DataFrame, Series, DatetimeIndex, MultiIndex, read_csv, Panel, date_range, concat
from pandas.util.testing import assert_frame_equal, assert_series_equal
import numpy as np
import pytest
import io
import itertools
from mock import Mock, patch
import string
from arctic.date import DateRange
from arctic._compression import decompress
from arctic.store._pandas_ndarray_store import PandasDataFrameStore, PandasSeriesStore, PandasStore
from arctic.store.version_store import register_versioned_storage
from pandas.tseries.offsets import DateOffset
register_versioned_storage(PandasDataFrameStore)
def test_save_read_pandas_series(library):
s = Series(data=[1, 2, 3], index=[4, 5, 6])
library.write('pandas', s)
saved = library.read('pandas').data
assert np.all(s == saved)
assert saved.name == "values"
def test_save_read_pandas_series_maintains_name(library):
s = Series(data=[1, 2, 3], index=[4, 5, 6], name="ADJ")
library.write('pandas', s)
saved = library.read('pandas').data
assert np.all(s == saved)
assert saved.name == "ADJ"
def test_save_read_pandas_series_with_multiindex(library):
df = Series(data=['A', 'BC', 'DEF'], index=MultiIndex.from_tuples([(1, 2), (1, 3), (2, 2)]))
library.write('pandas', df)
saved_df = library.read('pandas').data
assert np.all(df.values == saved_df.values)
def test_save_read_pandas_series_with_multiindex_and_name(library):
df = Series(data=['A', 'BC', 'DEF'],
index=MultiIndex.from_tuples([(1, 2), (1, 3), (2, 2)]),
name='Foo')
library.write('pandas', df)
saved_df = library.read('pandas').data
assert np.all(df.values == saved_df.values)
assert df.name == 'Foo'
def test_save_read_pandas_series_with_unicode_index_name(library):
df = Series(data=['A', 'BC', 'DEF'],
index=MultiIndex.from_tuples([(np.datetime64(dt(2013, 1, 1)),),
(np.datetime64(dt(2013, 1, 2)),),
(np.datetime64(dt(2013, 1, 3)),)], names=[u'DATETIME']))
library.write('pandas', df)
saved_df = library.read('pandas').data
assert np.all(df.values == saved_df.values)
def test_save_read_pandas_dataframe_with_multiindex(library):
df = DataFrame(data=['A', 'BC', 'DEF'], index=MultiIndex.from_tuples([(1, 2), (1, 3), (2, 2)]))
library.write('pandas', df)
saved_df = library.read('pandas').data
assert np.all(df.values == saved_df.values)
def test_save_read_pandas_dataframe_with_none_values(library):
df = DataFrame(data=[(1, None), (1, 3), (2, 2)])
library.write('pandas', df)
saved_df = library.read('pandas').data
assert np.all((df.values == saved_df.values) | (np.isnan(df.values) & np.isnan(saved_df.values)))
def test_save_read_pandas_dataframe_with_unicode_index_name(library):
df = DataFrame(data=['A', 'BC', 'DEF'],
index=MultiIndex.from_tuples([(np.datetime64(dt(2013, 1, 1)),),
(np.datetime64(dt(2013, 1, 2)),),
(np.datetime64(dt(2013, 1, 3)),)], names=[u'DATETIME']))
library.write('pandas', df)
saved_df = library.read('pandas').data
assert np.all(df.values == saved_df.values)
def test_cant_write_pandas_series_with_tuple_values(library):
df = Series(data=[('A', 'BC')], index=np.array([dt(2013, 1, 1), ]).astype('datetime64[ns]'))
assert PandasSeriesStore().can_write(Mock(), 'FOO', df) == False
def test_save_read_pandas_series_with_datetimeindex_with_timezone(library):
df = Series(data=['A', 'BC', 'DEF'], index=DatetimeIndex(np.array([dt(2013, 1, 1),
dt(2013, 1, 2),
dt(2013, 1, 3)]).astype('datetime64[ns]'),
tz="America/Chicago"))
library.write('pandas', df)
saved_df = library.read('pandas').data
assert df.index.tz == saved_df.index.tz
assert all(df.index == saved_df.index)
def test_save_read_pandas_series_with_datetimeindex(library):
df = Series(data=['A', 'BC', 'DEF'], index=np.array([dt(2013, 1, 1),
dt(2013, 1, 2),
dt(2013, 1, 3)]).astype('datetime64[ns]'))
library.write('pandas', df)
saved_df = library.read('pandas').data
assert np.all(df.index == saved_df.index)
assert np.all(df.values == saved_df.values)
def test_save_read_pandas_dataframe_with_datetimeindex_with_timezone(library):
df = DataFrame(data=['A', 'BC', 'DEF'], index=DatetimeIndex(np.array([dt(2013, 1, 1),
dt(2013, 1, 2),
dt(2013, 1, 3)]).astype('datetime64[ns]'),
tz="America/Chicago"))
library.write('pandas', df)
saved_df = library.read('pandas').data
assert df.index.tz == saved_df.index.tz
assert all(df.index == saved_df.index)
def test_save_read_pandas_dataframe_with_datetimeindex(library):
df = DataFrame(data=['A', 'BC', 'DEF'], index=np.array([dt(2013, 1, 1),
dt(2013, 1, 2),
dt(2013, 1, 3)]).astype('datetime64[ns]'))
library.write('pandas', df)
saved_df = library.read('pandas').data
assert np.all(df.index == saved_df.index)
assert np.all(df.values == saved_df.values)
def test_save_read_pandas_dataframe_with_strings(library):
df = DataFrame(data=['A', 'BC', 'DEF'], index=[4, 5, 6])
library.write('pandas', df)
saved_df = library.read('pandas').data
assert np.all(df.values == saved_df.values)
def test_save_read_pandas_dataframe(library):
df = DataFrame(data=[1, 2, 3], index=[4, 5, 6])
library.write('pandas', df)
saved_df = library.read('pandas').data
assert np.all(df.values == saved_df.values)
def test_save_read_empty_dataframe(library):
df = DataFrame({'a': [], 'b': []})
library.write('pandas', df)
saved_df = library.read('pandas').data
assert np.all(df.values == saved_df.values)
def test_save_read_pandas_dataframe2(library):
df = DataFrame(data=[1, 2, 3], index=DatetimeIndex(start='1/1/2011', periods=3, freq='H'))
library.write('pandas', df)
saved_df = library.read('pandas').data
assert np.all(df.values == saved_df.values)
def test_save_read_pandas_dataframe_strings(library):
df = DataFrame(data=['a', 'b', 'c'], index=DatetimeIndex(start='1/1/2011', periods=3, freq='H'))
library.write('pandas', df)
saved_df = library.read('pandas').data
assert np.all(df.values == saved_df.values)
def test_save_read_pandas_dataframe_empty_multiindex(library):
expected = read_csv(io.BytesIO('''\
STRATEGY MAC INSTRUMENT CONTRACT $Price $Delta $Gamma $Vega $Theta $Notional uDelta uGamma uVega uTheta Delta Gamma Vega Theta'''),
delimiter=' ').set_index(['STRATEGY', 'MAC', 'INSTRUMENT', 'CONTRACT'])
library.write('pandas', expected)
saved_df = library.read('pandas').data
assert np.all(expected.values == saved_df.values)
assert np.all(expected.index.names == saved_df.index.names)
def test_save_read_pandas_dataframe_empty_multiindex_and_no_columns(library):
expected = read_csv(io.BytesIO('''STRATEGY MAC INSTRUMENT CONTRACT'''),
delimiter=' ').set_index(['STRATEGY', 'MAC', 'INSTRUMENT', 'CONTRACT'])
library.write('pandas', expected)
saved_df = library.read('pandas').data
assert np.all(expected.values == saved_df.values)
assert np.all(expected.index.names == saved_df.index.names)
def test_save_read_pandas_dataframe_multiindex_and_no_columns(library):
expected = read_csv(io.BytesIO('''\
STRATEGY MAC INSTRUMENT CONTRACT
STRAT F22 ASD 201312'''),
delimiter=' ').set_index(['STRATEGY', 'MAC', 'INSTRUMENT', 'CONTRACT'])
library.write('pandas', expected)
saved_df = library.read('pandas').data
assert np.all(expected.values == saved_df.values)
assert np.all(expected.index.names == saved_df.index.names)
def test_append_pandas_dataframe(library):
df = DataFrame(data=[1, 2, 3], index=DatetimeIndex(start='1/1/2011', periods=3, freq='H'))
df2 = DataFrame(data=[4, 5, 6], index=DatetimeIndex(start='2/1/2011', periods=3, freq='H'))
library.write('pandas', df)
library.append('pandas', df2)
saved_df = library.read('pandas').data
assert np.all(df.append(df2).values == saved_df.values)
def test_empty_dataframe_multindex(library):
df = DataFrame({'a': [], 'b': [], 'c': []})
df = df.groupby(['a', 'b']).sum()
print df
library.write('pandas', df)
saved_df = library.read('pandas').data
assert np.all(df.values == saved_df.values)
assert np.all(df.index.names == df.index.names)
def test_dataframe_append_empty(library):
df = DataFrame(data=[1, 2, 3], index=DatetimeIndex(start='1/1/2011', periods=3, freq='H'))
df2 = DataFrame(data=[], index=[])
library.write('pandas', df)
library.append('pandas', df2)
saved_df = library.read('pandas').data
assert np.all(df.append(df2).values == saved_df.values)
def test_empy_dataframe_append(library):
df = DataFrame(data=[], index=[])
df2 = DataFrame(data=[1, 2, 3], index=DatetimeIndex(start='1/1/2011', periods=3, freq='H'))
library.write('pandas', df)
library.append('pandas', df2)
saved_df = library.read('pandas').data
assert np.all(df.append(df2).values == saved_df.values)
def test_dataframe_append_empty_multiindex(library):
df = DataFrame({'a': [1, 1, 1], 'b': [1, 1, 2], 'c': [1, 2, 3]}).groupby(['a', 'b']).sum()
df2 = DataFrame({'a': [], 'b': [], 'c': []}).groupby(['a', 'b']).sum()
library.write('pandas', df)
library.append('pandas', df2)
saved_df = library.read('pandas').data
assert np.all(df.append(df2).values == saved_df.values)
assert np.all(df.index.names == saved_df.index.names)
def test_empty_dataframe_append_multiindex(library):
df = DataFrame({'a': [], 'b': [], 'c': []}).groupby(['a', 'b']).sum()
df2 = DataFrame({'a': [1, 1, 1], 'b': [1, 1, 2], 'c': [1, 2, 3]}).groupby(['a', 'b']).sum()
library.write('pandas', df)
library.append('pandas', df2)
saved_df = library.read('pandas').data
assert np.all(df.append(df2).values == saved_df.values)
assert np.all(df.index.names == saved_df.index.names)
def test_empty_dataframe_should_ignore_dtype(library):
df = DataFrame({'a': [], 'b': [], 'c': []}).groupby(['a', 'b']).sum()
df2 = DataFrame({'a': [1, 1, 1], 'b': [1, 1, 2], 'c': [1, 2, 3]}).groupby(['a']).sum()
library.write('pandas', df)
library.append('pandas', df2)
saved_df = library.read('pandas').data
assert np.all(df2.index.names == saved_df.index.names)
def test_empty_dataframe_should_ignore_dtype2(library):
df = DataFrame({'a': []})
df2 = DataFrame({'a': [1, 1, 1], 'b': [1, 1, 2], 'c': [1, 2, 3]}).groupby(['a']).sum()
library.write('pandas', df)
library.append('pandas', df2)
saved_df = library.read('pandas').data
assert np.all(df2.values == saved_df.values)
assert np.all(df2.index.names == saved_df.index.names)
def test_dataframe_append_should_promote_string_column(library):
data = np.zeros((2,), dtype=[('A', 'i4'), ('B', 'f4'), ('C', 'a10')])
data[:] = [(1, 2., 'Hello'), (2, 3., "World")]
df = DataFrame(data, index=MultiIndex.from_tuples([(np.datetime64(dt(2013, 1, 1)),),
(np.datetime64(dt(2013, 1, 2)),), ], names=[u'DATETIME']))
data2 = np.zeros((1,), dtype=[('A', 'i4'), ('B', 'f4'), ('C', 'a30')])
data2[:] = [(3, 4., 'Hello World - Good Morning')]
df2 = DataFrame(data2, index=MultiIndex.from_tuples([(np.datetime64(dt(2013, 1, 3)),)], names=[u'DATETIME']))
expected_data = np.zeros((3,), dtype=[('A', 'i4'), ('B', 'f4'), ('C', 'a30')])
expected_data[:] = [(1, 2., 'Hello'), (2, 3., "World"), (3, 4., 'Hello World - Good Morning')]
expected = DataFrame(expected_data, MultiIndex.from_tuples([(np.datetime64(dt(2013, 1, 1)),),
(np.datetime64(dt(2013, 1, 2)),),
(np.datetime64(dt(2013, 1, 3)),)],
names=[u'DATETIME']))
library.write('pandas', df)
library.append('pandas', df2)
actual = library.read('pandas').data
assert_frame_equal(expected, actual)
def test_dataframe_append_should_add_new_column(library):
data = np.zeros((2,), dtype=[('A', 'i4'), ('B', 'f4'), ('C', 'a10')])
data[:] = [(1, 2., 'Hello'), (2, 3., "World")]
df = DataFrame(data, index=MultiIndex.from_tuples([(np.datetime64(dt(2013, 1, 1)),),
(np.datetime64(dt(2013, 1, 2)),), ], names=[u'DATETIME']))
data2 = np.zeros((1,), dtype=[('A', 'i4'), ('B', 'f4'), ('C', 'a10'), ('D', 'f4')])
data2[:] = [(4, 5., 'Hi', 6.)]
df2 = DataFrame(data2, index=MultiIndex.from_tuples([(np.datetime64(dt(2013, 1, 3)),)], names=[u'DATETIME']))
expected_data = np.zeros((3,), dtype=[('A', 'i4'), ('B', 'f4'), ('C', 'a10'), ('D', 'f4')])
expected_data[:] = [(1, 2., 'Hello', np.nan), (2, 3., "World", np.nan), (4, 5., 'Hi', 6.)]
expected = DataFrame(expected_data, MultiIndex.from_tuples([(np.datetime64(dt(2013, 1, 1)),),
(np.datetime64(dt(2013, 1, 2)),),
(np.datetime64(dt(2013, 1, 3)),)],
names=[u'DATETIME']))
library.write('pandas', df)
library.append('pandas', df2)
actual = library.read('pandas').data
assert_frame_equal(expected, actual)
def test_dataframe_append_should_add_new_columns_and_reorder(library):
data = np.zeros((2,), dtype=[('A', 'i4'), ('B', 'f4'), ('C', 'a10')])
data[:] = [(1, 2., 'Hello'), (2, 3., "World")]
df = DataFrame(data, index=MultiIndex.from_tuples([(np.datetime64(dt(2013, 1, 1)),),
(np.datetime64(dt(2013, 1, 2)),), ], names=[u'DATETIME']))
data2 = np.zeros((1,), dtype=[('C', 'a10'), ('A', 'i4'), ('E', 'a1'), ('B', 'f4'), ('D', 'f4'), ('F', 'i4')])
data2[:] = [('Hi', 4, 'Y', 5., 6., 7)]
df2 = DataFrame(data2, index=MultiIndex.from_tuples([(np.datetime64(dt(2013, 1, 3)),)], names=[u'DATETIME']))
expected_data = np.zeros((3,), dtype=[('C', 'a10'), ('A', 'i4'), ('E', 'a1'),
('B', 'f4'), ('D', 'f4'), ('F', 'i4')])
expected_data[:] = [('Hello', 1, '', 2., np.nan, 0), ("World", 2, '', 3., np.nan, 0), ('Hi', 4, 'Y', 5., 6., 7)]
expected = DataFrame(expected_data, MultiIndex.from_tuples([(np.datetime64(dt(2013, 1, 1)),),
(np.datetime64(dt(2013, 1, 2)),),
(np.datetime64(dt(2013, 1, 3)),)],
names=[u'DATETIME']))
library.write('pandas', df)
library.append('pandas', df2)
actual = library.read('pandas').data
assert_frame_equal(expected, actual)
# -- auto generated tests --- #
def dataframe(columns, length, index):
df = DataFrame(np.ones((length, columns)), columns=list(string.ascii_lowercase[:columns]))
index = min(index, columns)
if index:
df = df.set_index(list(string.ascii_lowercase[:index]))
return df
@pytest.mark.parametrize("df_size", list(itertools.combinations_with_replacement([0, 1, 2, 4], r=3)))
def test_dataframe_save_read(library, df_size):
df = dataframe(*df_size)
library.write('pandas', df)
result = library.read('pandas').data
assert np.all(df.values == result.values), str(df.values) + "!=" + str(result.values)
if None not in df.index.names: # saved as 'index' or 'level'
assert np.all(df.index.names == result.index.names), str(df.index.names) + "!=" + str(result.index.names)
assert np.all(df.index.values == result.index.values), str(df.index.values) + "!=" + str(result.index.values)
assert np.all(df.columns.values == result.columns.values), str(df.columns.values) + "!=" + str(result.columns.values)
@pytest.mark.parametrize("df_size", list(itertools.combinations_with_replacement([0, 1, 2, 4], r=3)))
def test_dataframe_save_append_read(library, df_size):
df = dataframe(*df_size)
library.write('pandas', df)
library.append('pandas', df)
result = library.read('pandas').data
assert len(result) == len(df) * 2
if None not in df.index.names: # saved as 'index' or 'level'
assert np.all(df.index.names == result.index.names), str(df.index.names) + "!=" + str(result.index.names)
assert np.all(df.columns.values == result.columns.values), str(df.columns.values) + "!=" + str(result.columns.values)
def test_large_dataframe_append_rewrite_same_item(library):
csv = \
"""index, f1, f2, f3, f4, f5, f6, f7, f8, iVol, tau, uPrice, uDelta, uGamma, uVega, uTheta, Delta, Gamma, Vega, Theta, $Price, $Delta, $Gamma, $Vega, $Theta, $Time_Value, $Notional, FX, f9
0, 201401, 2013 - 12 - 20 16:15:00, 15.0, F1, CALL, STRAT, 140.0, 140.345, 0.07231398622706062, 0.008813407863715872, 0.5768068954653813, 0.6427860135978315, 0.391592427081917, 4.915801583071703, -20.166163353481476, 9.641790203967473, 5.873886406228755, 73.73702374607555, -302.49245030222215, 11909.274289984183, 18625.940769791625, 15925.131550993763, 1014.9606370552315, -1601.4183005499872, 4786.093789984206, 2897689.1805000002, 1.37646, SYM
1, 201401, 2013 - 12 - 20 16:15:00, 15.0, F1, PUT, STRAT, 140.0, 140.345, 0.07231398622706062, 0.008813407863715872, 0.2318116692147143, -0.357200149447554, 0.391592427081917, 4.915801583071703, -20.16670499598669, -5.358002241713311, 5.873886406228755, 73.73702374607555, -302.50057493980034, 4786.192353109285, -10350.550083271604, 15925.131550993763, 1014.9606370552315, -1601.4613130062987, 4786.192353109285, 2897689.1805000002, 1.37646, SYM
2, 201401, 2013 - 12 - 20 16:15:00, -48.0, F22, CALL, STRAT, 141.5, 140.345, 0.0739452718231504, 0.008813407863715872, 0.05709601681178711, 0.11956012929302556, 0.20479158314197934, 2.628816497069195, -11.027911868706408, -5.738886206065227, -9.829995990815009, -126.18319185932137, 529.3397696979075, -3772.3383984361194, -11086.338978290602, -26650.835319775462, -1736.8611626668148, 2802.3654592245452, -3772.3383984361194, -9272605.3776, 1.37646, SYM
3, 201402, 2014 - 01 - 24 16:15:00, -286.0, F22, CALL, STRAT, 141.5, 140.345, 0.045487609195962696, 0.10463818541333941, 0.3747457492377393, 0.29120692771365, 0.1660598823861943, 15.56832633851715, -3.3830120036011397, -83.2851813261039, -47.49312636245157, -4452.541332815905, 967.541433029926, -147525.24472279268, -160889.7125497546, -128762.15724702866, -61287.4504296778, 5122.238772724507, -147525.24472279268, -55249273.7082, 1.37646, SYM
4, 201402, 2014 - 01 - 24 16:15:00, -264.0, F22, CALL, STRAT, 142.0, 140.345, 0.044822991783170785, 0.10463818541333941, 0.24229877494137142, 0.21142760067302388, 0.14217904830463807, 13.134711351643713, -2.812643033008342, -55.816886577678304, -37.53526875242445, -3467.56379683394, 742.5377607142022, -88047.84694353123, -107826.65888355605, -101764.66675460352, -47729.62863790045, 3931.052023510272, -88047.84694353123, -50999329.576799996, 1.37646, SYM
5, 201401, 2013 - 12 - 20 16:15:00, -350.0, F22, CALL, STRAT, 142.0, 140.345, 0.07732984880519912, 0.008813407863715872, 0.022997617617102506, 0.053564485523868555, 0.10692101346714668, 1.4353175202195965, -6.296783951449458, -18.747569933353994, -37.422354713501335, -502.3611320768588, 2203.8743830073104, -11079.355260832921, -36216.420371031316, -101458.53708176922, -6914.80003858513, 11667.480512439395, -11079.355260832921, -67612747.545, 1.37646, SYM
6, 201402, 2014 - 01 - 24 16:15:00, -43.0, F22, CALL, STRAT, 142.5, 140.345, 0.04429193547308161, 0.10463818541333941, 0.14930517833206025, 0.14535540627931182, 0.11352765189447668, 10.36359429711007, -2.1930395074393734, -6.250282470010408, -4.881689031462497, -445.634554775733, 94.30069881989306, -8837.042047978748, -12074.25059227865, -13235.111243323556, -6133.9813926660545, 499.23515345242305, -8837.042047978748, -8306708.9841, 1.37646, SYM
7, 201401, 2013 - 12 - 20 16:15:00, -557.0, F22, CALL, STRAT, 142.5, 140.345, 0.0814452531405243, 0.008813407863715872, 0.009355428262274312, 0.02334242880598363, 0.05141464658820557, 0.7269263150873877, -3.358771076933658, -13.001732844932882, -28.637958149630503, -404.89795750367495, 1870.8354898520474, -7172.696641740786, -25116.653728342328, -77642.50435641785, -5573.258425855083, 9904.346993699035, -7172.696641740786, -107600858.2359, 1.37646, SYM
8, 201401, 2013 - 12 - 20 16:15:00, -607.0, F22, CALL, STRAT, 143.0, 140.345, 0.08598678226600448, 0.008813407863715872, 0.003929576582252237, 0.010236258301012439, 0.024009328219809185, 0.35838470316321597, -1.748258969026736, -6.21340878871455, -14.573662229424174, -217.5395148200721, 1061.1931941992289, -3283.2053243209966, -12003.018280721177, -39511.74267470002, -2994.344405692364, 5618.038400336425, -3283.2053243209966, -117259822.1709, 1.37646, SYM
9, 201401, 2013 - 12 - 20 16:15:00, -799.0, F22, CALL, STRAT, 143.5, 140.345, 0.09076344895187359, 0.008813407863715872, 0.0017194411099074047, 0.004596451952699387, 0.01121737082629775, 0.1767420247600966, -0.9100718136522263, -3.67256511020681, -8.962679290211902, -141.2168777833172, 727.1473791081288, -1891.026786204374, -7094.634789685377, -24299.388322293227, -1943.793835936248, 3849.574159412212, -1891.026786204374, -154350243.6813, 1.37646, SYM
10, 201401, 2013 - 12 - 20 16:15:00, -377.0, F22, CALL, STRAT, 144.0, 140.345, 0.09566038240450792, 0.008813407863715872, 0.0007852689424384662, 0.0021295289144923784, 0.005324993197820229, 0.08842782200919548, -0.47989481865526434, -0.8028324007636266, -2.007522435578226, -33.3372888974667, 180.92034663303465, -407.4960157678369, -1550.905840965067, -5442.743810001693, -458.87444675807006, 957.8062320250265, -407.4960157678369, -72828588.06989999, 1.37646, SYM
11, 201402, 2014 - 01 - 24 16:15:00, -43.0, F22, PUT, STRAT, 137.5, 140.345, 0.05414565513055749, 0.10463818541333941, 0.14529132959784974, -0.11936326135136956, 0.08106840033227831, 9.046889913827847, -2.3403474666535415, 5.132620238108891, -3.4859412142879673, -389.0162662945974, 100.63494106610229, -8599.471252145018, 9915.158754388978, -9450.995231657676, -5354.653299038616, 532.7691191532583, -8599.471252145018, -8306708.9841, 1.37646, SYM
12, 201402, 2014 - 01 - 24 16:15:00, -264.0, F22, PUT, STRAT, 138.0, 140.345, 0.052853182910226726, 0.10463818541333941, 0.20369081242765574, -0.16004607860136968, 0.10141337819029916, 11.047155968410756, -2.789510903380204, 42.252164750761594, -26.77313184223898, -2916.44917566044, 736.4308784923738, -74018.27549798116, 81622.4271006569, -72586.63466280713, -40143.75632329569, 3898.7217192677417, -74018.27549798116, -50999329.576799996, 1.37646, SYM
13, 201401, 2013 - 12 - 20 16:15:00, -376.0, F22, PUT, STRAT, 138.0, 140.345, 0.08897789701177691, 0.008813407863715872, 0.009425028910330369, -0.021620830082859088, 0.04411750741642045, 0.6814450839280415, -3.43983227630679, 8.129432111155017, -16.588182788574088, -256.2233515569436, 1293.376935891353, -4877.913910511415, 15704.378314735444, -44973.45961948536, -3526.811944840706, 6847.236989142353, -4877.913910511415, -72635408.7912, 1.37646, SYM
14, 201401, 2013 - 12 - 20 16:15:00, -301.0, F22, PUT, STRAT, 138.5, 140.345, 0.08383267513417192, 0.008813407863715872, 0.020991265826436845, -0.045956251827941025, 0.08727871921287762, 1.2701629715541363, -6.0408289559434, 13.832831800210249, -26.270894483076166, -382.319054437795, 1818.2895157389635, -8696.984965596635, 26722.164695430383, -71224.98149804553, -5262.468856714474, 9626.164564746361, -8696.984965596635, -58146962.8887, 1.37646, SYM
15, 201402, 2014 - 01 - 24 16:15:00, -286.0, F22, PUT, STRAT, 138.5, 140.345, 0.051599724402617266, 0.10463818541333941, 0.28321473137770425, -0.21146513081873966, 0.12351912253075312, 13.136076509490826, -3.2382097361444653, 60.479027414159546, -35.32646904379539, -3756.917881714376, 926.127984537317, -111492.45225751627, 116832.94892344868, -95776.22511698281, -51712.4718746457, 4902.992790754752, -111492.45225751627, -55249273.7082, 1.37646, SYM
16, 201401, 2013 - 12 - 20 16:15:00, -739.0, F22, PUT, STRAT, 139.0, 140.345, 0.0791166184474159, 0.008813407863715872, 0.047581495319667155, -0.0967612790459439, 0.16434769983129724, 2.257195133461944, -10.131173555213623, 71.50658521495254, -121.45295017532867, -1668.0672036283765, 7486.937257302868, -48400.084510256995, 138135.90554124617, -329280.15202122304, -22960.277831063155, 39636.42175841195, -48400.084510256995, -142759486.9593, 1.37646, SYM
17, 201401, 2013 - 12 - 20 16:15:00, -669.0, F22, PUT, STRAT, 139.5, 140.345, 0.07513349054261133, 0.008813407863715872, 0.10733307441031315, -0.1949726645282245, 0.27848967340302655, 3.6322892048663644, -15.482297001088007, 130.4367125693822, -186.30959150662477, -2430.001478055598, 10357.656693727877, -98837.84833028633, 251976.70050152476, -505117.8297913038, -33447.99834484408, 54834.231279417974, -98837.84833028633, -129236937.4503, 1.37646, SYM
18, 201401, 2013 - 12 - 20 16:15:00, -471.0, F22, PUT, STRAT, 140.0, 140.345, 0.07231398622706062, 0.008813407863715872, 0.2318116692147143, -0.357200149447554, 0.391592427081917, 4.915801583071703, -20.16670499598669, 168.24127038979793, -184.4400331555829, -2315.3425456267723, 9498.518053109732, -150286.43988763154, 325007.2726147283, -500049.1307012041, -31869.76400353427, 50285.885228397776, -150286.43988763154, -90987440.2677, 1.37646, SYM
19, 201401, 2013 - 12 - 20 16:15:00, -364.0, F22, PUT, STRAT, 141.0, 140.345, 0.07172143045750252, 0.008813407863715872, 0.7922715181315709, -0.7543151841866509, 0.333159035321538, 4.147995696473539, -16.876460506586433, 274.5707270439409, -121.26988885703983, -1509.8704335163682, 6143.031624397461, -396952.9396004471, 530413.7500248309, -328783.8408272312, -20782.762569179402, 32521.681960454345, -68777.34640044652, -70317257.4468, 1.37646, SYM
20, 201401, 2013 - 12 - 20 16:15:00, -394.0, F22, PUT, STRAT, 141.5, 140.345, 0.0739452718231504, 0.008813407863715872, 1.212080035129219, -0.88042603375236, 0.20479158314197934, 2.628816497069195, -11.026098543797652, 346.8878572984298, -80.68788375793986, -1035.7536998452629, 4344.282826256274, -657341.595950662, 670115.460626992, -218758.93991649026, -14256.735376890107, 22998.967457802737, -30955.94375066146, -76112635.8078, 1.37646, SYM
21, 201402, 2014 - 01 - 24 16:15:00, -40.0, GEE1, CALL, STRAT, 141.5, 140.345, 0.045487609195962696, 0.10463818541333941, 0.3747457492377393, 0.29120692771365, 0.1660598823861943, 15.56832633851715, -3.3830120036011397, -11.648277108545999, -6.642395295447772, -622.733053540686, 135.32048014404558, -20632.901359831147, -22502.057699266377, -18008.69332126275, -8571.671388766126, 716.3970311502808, -20632.901359831147, -7727171.148, 1.37646, SYM
22, 201401, 2013 - 12 - 20 16:15:00, -12.0, GEE1, CALL, STRAT, 141.5, 140.345, 0.0739452718231504, 0.008813407863715872, 0.05709601681178711, 0.11956012929302556, 0.20479158314197934, 2.628816497069195, -11.027911868706408, -1.4347215515163068, -2.4574989977037522, -31.545797964830342, 132.33494242447688, -943.0845996090299, -2771.5847445726504, -6662.708829943866, -434.2152906667037, 700.5913648061363, -943.0845996090299, -2318151.3444, 1.37646, SYM
23, 201402, 2014 - 01 - 24 16:15:00, -45.0, GEE1, CALL, STRAT, 142.0, 140.345, 0.044822991783170785, 0.10463818541333941, 0.24229877494137142, 0.21142760067302388, 0.14217904830463807, 13.134711351643713, -2.812643033008342, -9.514242030286075, -6.398057173708713, -591.0620108239671, 126.56893648537539, -15008.155729011005, -18379.544127878875, -17346.250014989233, -8135.732154187577, 670.0656858256148, -15008.155729011005, -8693067.5415, 1.37646, SYM
24, 201401, 2013 - 12 - 20 16:15:00, -57.0, GEE1, CALL, STRAT, 142.0, 140.345, 0.07732984880519912, 0.008813407863715872, 0.022997617617102506, 0.053564485523868555, 0.10692101346714668, 1.4353175202195965, -6.296783951449458, -3.0531756748605074, -6.09449776762736, -81.813098652517, 358.9166852326191, -1804.3521424785042, -5898.102746139386, -16523.247467602414, -1126.1245777124357, 1900.1325405972727, -1804.3521424785042, -11011218.8859, 1.37646, SYM
25, 201401, 2013 - 12 - 20 16:15:00, -68.0, GEE1, CALL, STRAT, 142.5, 140.345, 0.0814452531405243, 0.008813407863715872, 0.009355428262274312, 0.02334242880598363, 0.05141464658820557, 0.7269263150873877, -3.358771076933658, -1.5872851588068868, -3.496195967997979, -49.430989425942364, 228.39643323148874, -875.6613494405268, -3066.306020695293, -9478.797659311334, -680.3977970523262, 1209.1482864839038, -875.6613494405268, -13136190.9516, 1.37646, SYM
26, 201402, 2014 - 01 - 24 16:15:00, -19.0, GEE1, CALL, STRAT, 142.5, 140.345, 0.04429193547308161, 0.10463818541333941, 0.14930517833206025, 0.14535540627931182, 0.11352765189447668, 10.36359429711007, -2.1930395074393734, -2.7617527193069247, -2.1570253859950568, -196.90829164509134, 41.66775064134809, -3904.7395095720058, -5335.133982634753, -5848.072409840642, -2710.3638711780245, 220.5922771068846, -3904.7395095720058, -3670406.2953000003, 1.37646, SYM
27, 201401, 2013 - 12 - 20 16:15:00, -91.0, GEE1, CALL, STRAT, 143.0, 140.345, 0.08598678226600448, 0.008813407863715872, 0.003929576582252237, 0.010236258301012439, 0.024009328219809185, 0.35838470316321597, -1.748258969026736, -0.9314995053921319, -2.1848488680026357, -32.613007987852654, 159.091566181433, -492.21035339902915, -1799.464025610588, -5923.5067271790795, -448.9050097495967, 842.2429891772894, -492.21035339902915, -17579314.3617, 1.37646, SYM
28, 201401, 2013 - 12 - 20 16:15:00, -117.0, GEE1, CALL, STRAT, 143.5, 140.345, 0.09076344895187359, 0.008813407863715872, 0.0017194411099074047, 0.004596451952699387, 0.01121737082629775, 0.1767420247600966, -0.9100718136522263, -0.5377848784658282, -1.3124323866768368, -20.678816896931302, 106.47840219731049, -276.9088034867481, -1038.8889491779587, -3558.233333802638, -284.63564305950064, 563.7048518788846, -276.9088034867481, -22601975.6079, 1.37646, SYM
29, 201401, 2013 - 12 - 20 16:15:00, -126.0, GEE1, CALL, STRAT, 144.0, 140.345, 0.09566038240450792, 0.008813407863715872, 0.0007852689424384662, 0.0021295289144923784, 0.005324993197820229, 0.08842782200919548, -0.47989481865526434, -0.26832064322603966, -0.6709491429253489, -11.141905573158631, 60.46674715056331, -136.19230235211526, -518.3398831872638, -1819.0602654117067, -153.3638734522993, 320.1156107033245, -136.19230235211526, -24340589.1162, 1.37646, SYM
30, 201402, 2014 - 01 - 24 16:15:00, -19.0, GEE1, PUT, STRAT, 137.5, 140.345, 0.05414565513055749, 0.10463818541333941, 0.14529132959784974, -0.11936326135136956, 0.08106840033227831, 9.046889913827847, -2.3403474666535415, 2.2679019656760215, -1.5402996063132879, -171.8909083627291, 44.46660186641729, -3799.766367226869, 4381.11665891606, -4176.021148871998, -2366.009597249621, 235.40961078864902, -3799.766367226869, -3670406.2953000003, 1.37646, SYM
31, 201401, 2013 - 12 - 20 16:15:00, -64.0, GEE1, PUT, STRAT, 138.0, 140.345, 0.08897789701177691, 0.008813407863715872, 0.009425028910330369, -0.021620830082859088, 0.04411750741642045, 0.6814450839280415, -3.43983227630679, 1.3837331253029816, -2.8235204746509086, -43.612485371394655, 220.14926568363455, -830.2832188104537, 2673.0856705932674, -7655.056956508147, -600.3084161430988, 1165.48714708806, -830.2832188104537, -12363473.8368, 1.37646, SYM
32, 201402, 2014 - 01 - 24 16:15:00, -45.0, GEE1, PUT, STRAT, 138.0, 140.345, 0.052853182910226726, 0.10463818541333941, 0.20369081242765574, -0.16004607860136968, 0.10141337819029916, 11.047155968410756, -2.789510903380204, 7.2020735370616356, -4.563602018563462, -497.122018578484, 125.52799065210918, -12616.751505337697, 13912.913710339246, -12372.721817523941, -6842.685736925402, 664.5548385115469, -12616.751505337697, -8693067.5415, 1.37646, SYM
33, 201401, 2013 - 12 - 20 16:15:00, -51.0, GEE1, PUT, STRAT, 138.5, 140.345, 0.08383267513417192, 0.008813407863715872, 0.020991265826436845, -0.045956251827941025, 0.08727871921287762, 1.2701629715541363, -6.0408289559434, 2.3437688432249923, -4.451214679856759, -64.77831154926095, 308.08227675311343, -1473.5755257323203, 4527.675745737374, -12068.020120931302, -891.6475471509574, 1631.011271767656, -1473.5755257323203, -9852143.2137, 1.37646, SYM
34, 201402, 2014 - 01 - 24 16:15:00, -40.0, GEE1, PUT, STRAT, 138.5, 140.345, 0.051599724402617266, 0.10463818541333941, 0.28321473137770425, -0.21146513081873966, 0.12351912253075312, 13.136076509490826, -3.2382097361444653, 8.458605232749587, -4.940764901230125, -525.443060379633, 129.5283894457786, -15593.349966086193, 16340.27257670611, -13395.276240137457, -7232.5135489014965, 685.733257448217, -15593.349966086193, -7727171.148, 1.37646, SYM
35, 201401, 2013 - 12 - 20 16:15:00, -98.0, GEE1, PUT, STRAT, 139.0, 140.345, 0.0791166184474159, 0.008813407863715872, 0.047581495319667155, -0.0967612790459439, 0.16434769983129724, 2.257195133461944, -10.131173555213623, 9.4826053465025, -16.10607458346713, -221.20512307927052, 992.855008410935, -6418.414454675487, 18318.42861034117, -43666.38010565609, -3044.8000371369267, 5256.250787989675, -6418.414454675487, -18931569.312599998, 1.37646, SYM
36, 201401, 2013 - 12 - 20 16:15:00, -111.0, GEE1, PUT, STRAT, 139.5, 140.345, 0.07513349054261133, 0.008813407863715872, 0.10733307441031315, -0.1949726645282245, 0.27848967340302655, 3.6322892048663644, -15.482297001088007, 21.64196576263292, -30.912353747735946, -403.18410174016645, 1718.5349671207687, -16399.10487991298, 41807.79335675523, -83808.78790259302, -5549.667886812695, 9098.05631093482, -16399.10487991298, -21442899.9357, 1.37646, SYM
37, 201401, 2013 - 12 - 20 16:15:00, -108.0, GEE1, PUT, STRAT, 140.0, 140.345, 0.07231398622706062, 0.008813407863715872, 0.2318116692147143, -0.357200149447554, 0.391592427081917, 4.915801583071703, -20.16670499598669, 38.577616140335834, -42.29198212484704, -530.9065709717439, 2178.0041395665626, -34460.58494238685, 74523.96059955555, -114660.94716715509, -7307.7165867976655, 11530.52145364535, -34460.58494238685, -20863362.0996, 1.37646, SYM
38, 201401, 2013 - 12 - 20 16:15:00, -83.0, GEE1, PUT, STRAT, 141.0, 140.345, 0.07172143045750252, 0.008813407863715872, 0.7922715181315709, -0.7543151841866509, 0.333159035321538, 4.147995696473539, -16.876460506586433, 62.608160287492026, -27.652199931687655, -344.28364280730375, 1400.746222046674, -90513.99446933273, 120945.99245071695, -74969.94172708844, -4738.926629785414, 7415.658249224481, -15682.746569332587, -16033880.132100001, 1.37646, SYM
39, 201401, 2013 - 12 - 20 16:15:00, -56.0, GEE1, PUT, STRAT, 141.5, 140.345, 0.0739452718231504, 0.008813407863715872, 1.212080035129219, -0.88042603375236, 0.20479158314197934, 2.628816497069195, -11.026098543797652, 49.30385789013216, -11.468328655950843, -147.21372383587493, 617.4615184526685, -93429.26236862202, 95244.83704343032, -31092.641206404707, -2026.3380231112837, 3268.888775728308, -4399.8295686219335, -10818039.607199999, 1.37646, SYM"""
csv = StringIO(csv)
df = read_csv(csv).set_index(['index'])
for _ in range(10):
library.write('pandas', df[:-2])
result = library.read('pandas').data
assert len(result) == len(df[:-2])
assert np.all(df[:-2].values == result.values)
assert np.all(df[:-2].columns.values == result.columns.values)
for _ in range(10):
library.write('pandas', df[:-1])
result = library.read('pandas').data
assert len(result) == len(df[:-1])
assert np.all(df[:-1].values == result.values)
assert np.all(df[:-1].columns.values == result.columns.values)
for _ in range(10):
library.write('pandas', df)
result = library.read('pandas').data
assert len(result) == len(df)
assert np.all(df.values == result.values)
assert np.all(df.columns.values == result.columns.values)
def test_large_dataframe_rewrite_same_item(library):
csv = \
"""index, f1, f2, f3, f4, f5, f6, f7, f8, iVol, tau, uPrice, uDelta, uGamma, uVega, uTheta, Delta, Gamma, Vega, Theta, $Price, $Delta, $Gamma, $Vega, $Theta, $Time_Value, $Notional, FX, f9
0, 201401, 2013 - 12 - 20 16:15:00, 15.0, F1, CALL, STRAT, 140.0, 140.345, 0.07231398622706062, 0.008813407863715872, 0.5768068954653813, 0.6427860135978315, 0.391592427081917, 4.915801583071703, -20.166163353481476, 9.641790203967473, 5.873886406228755, 73.73702374607555, -302.49245030222215, 11909.274289984183, 18625.940769791625, 15925.131550993763, 1014.9606370552315, -1601.4183005499872, 4786.093789984206, 2897689.1805000002, 1.37646, SYM
1, 201401, 2013 - 12 - 20 16:15:00, 15.0, F1, PUT, STRAT, 140.0, 140.345, 0.07231398622706062, 0.008813407863715872, 0.2318116692147143, -0.357200149447554, 0.391592427081917, 4.915801583071703, -20.16670499598669, -5.358002241713311, 5.873886406228755, 73.73702374607555, -302.50057493980034, 4786.192353109285, -10350.550083271604, 15925.131550993763, 1014.9606370552315, -1601.4613130062987, 4786.192353109285, 2897689.1805000002, 1.37646, SYM
2, 201401, 2013 - 12 - 20 16:15:00, -48.0, F22, CALL, STRAT, 141.5, 140.345, 0.0739452718231504, 0.008813407863715872, 0.05709601681178711, 0.11956012929302556, 0.20479158314197934, 2.628816497069195, -11.027911868706408, -5.738886206065227, -9.829995990815009, -126.18319185932137, 529.3397696979075, -3772.3383984361194, -11086.338978290602, -26650.835319775462, -1736.8611626668148, 2802.3654592245452, -3772.3383984361194, -9272605.3776, 1.37646, SYM
3, 201402, 2014 - 01 - 24 16:15:00, -286.0, F22, CALL, STRAT, 141.5, 140.345, 0.045487609195962696, 0.10463818541333941, 0.3747457492377393, 0.29120692771365, 0.1660598823861943, 15.56832633851715, -3.3830120036011397, -83.2851813261039, -47.49312636245157, -4452.541332815905, 967.541433029926, -147525.24472279268, -160889.7125497546, -128762.15724702866, -61287.4504296778, 5122.238772724507, -147525.24472279268, -55249273.7082, 1.37646, SYM
4, 201402, 2014 - 01 - 24 16:15:00, -264.0, F22, CALL, STRAT, 142.0, 140.345, 0.044822991783170785, 0.10463818541333941, 0.24229877494137142, 0.21142760067302388, 0.14217904830463807, 13.134711351643713, -2.812643033008342, -55.816886577678304, -37.53526875242445, -3467.56379683394, 742.5377607142022, -88047.84694353123, -107826.65888355605, -101764.66675460352, -47729.62863790045, 3931.052023510272, -88047.84694353123, -50999329.576799996, 1.37646, SYM
5, 201401, 2013 - 12 - 20 16:15:00, -350.0, F22, CALL, STRAT, 142.0, 140.345, 0.07732984880519912, 0.008813407863715872, 0.022997617617102506, 0.053564485523868555, 0.10692101346714668, 1.4353175202195965, -6.296783951449458, -18.747569933353994, -37.422354713501335, -502.3611320768588, 2203.8743830073104, -11079.355260832921, -36216.420371031316, -101458.53708176922, -6914.80003858513, 11667.480512439395, -11079.355260832921, -67612747.545, 1.37646, SYM
6, 201402, 2014 - 01 - 24 16:15:00, -43.0, F22, CALL, STRAT, 142.5, 140.345, 0.04429193547308161, 0.10463818541333941, 0.14930517833206025, 0.14535540627931182, 0.11352765189447668, 10.36359429711007, -2.1930395074393734, -6.250282470010408, -4.881689031462497, -445.634554775733, 94.30069881989306, -8837.042047978748, -12074.25059227865, -13235.111243323556, -6133.9813926660545, 499.23515345242305, -8837.042047978748, -8306708.9841, 1.37646, SYM
7, 201401, 2013 - 12 - 20 16:15:00, -557.0, F22, CALL, STRAT, 142.5, 140.345, 0.0814452531405243, 0.008813407863715872, 0.009355428262274312, 0.02334242880598363, 0.05141464658820557, 0.7269263150873877, -3.358771076933658, -13.001732844932882, -28.637958149630503, -404.89795750367495, 1870.8354898520474, -7172.696641740786, -25116.653728342328, -77642.50435641785, -5573.258425855083, 9904.346993699035, -7172.696641740786, -107600858.2359, 1.37646, SYM
8, 201401, 2013 - 12 - 20 16:15:00, -607.0, F22, CALL, STRAT, 143.0, 140.345, 0.08598678226600448, 0.008813407863715872, 0.003929576582252237, 0.010236258301012439, 0.024009328219809185, 0.35838470316321597, -1.748258969026736, -6.21340878871455, -14.573662229424174, -217.5395148200721, 1061.1931941992289, -3283.2053243209966, -12003.018280721177, -39511.74267470002, -2994.344405692364, 5618.038400336425, -3283.2053243209966, -117259822.1709, 1.37646, SYM
9, 201401, 2013 - 12 - 20 16:15:00, -799.0, F22, CALL, STRAT, 143.5, 140.345, 0.09076344895187359, 0.008813407863715872, 0.0017194411099074047, 0.004596451952699387, 0.01121737082629775, 0.1767420247600966, -0.9100718136522263, -3.67256511020681, -8.962679290211902, -141.2168777833172, 727.1473791081288, -1891.026786204374, -7094.634789685377, -24299.388322293227, -1943.793835936248, 3849.574159412212, -1891.026786204374, -154350243.6813, 1.37646, SYM
10, 201401, 2013 - 12 - 20 16:15:00, -377.0, F22, CALL, STRAT, 144.0, 140.345, 0.09566038240450792, 0.008813407863715872, 0.0007852689424384662, 0.0021295289144923784, 0.005324993197820229, 0.08842782200919548, -0.47989481865526434, -0.8028324007636266, -2.007522435578226, -33.3372888974667, 180.92034663303465, -407.4960157678369, -1550.905840965067, -5442.743810001693, -458.87444675807006, 957.8062320250265, -407.4960157678369, -72828588.06989999, 1.37646, SYM
11, 201402, 2014 - 01 - 24 16:15:00, -43.0, F22, PUT, STRAT, 137.5, 140.345, 0.05414565513055749, 0.10463818541333941, 0.14529132959784974, -0.11936326135136956, 0.08106840033227831, 9.046889913827847, -2.3403474666535415, 5.132620238108891, -3.4859412142879673, -389.0162662945974, 100.63494106610229, -8599.471252145018, 9915.158754388978, -9450.995231657676, -5354.653299038616, 532.7691191532583, -8599.471252145018, -8306708.9841, 1.37646, SYM
12, 201402, 2014 - 01 - 24 16:15:00, -264.0, F22, PUT, STRAT, 138.0, 140.345, 0.052853182910226726, 0.10463818541333941, 0.20369081242765574, -0.16004607860136968, 0.10141337819029916, 11.047155968410756, -2.789510903380204, 42.252164750761594, -26.77313184223898, -2916.44917566044, 736.4308784923738, -74018.27549798116, 81622.4271006569, -72586.63466280713, -40143.75632329569, 3898.7217192677417, -74018.27549798116, -50999329.576799996, 1.37646, SYM
13, 201401, 2013 - 12 - 20 16:15:00, -376.0, F22, PUT, STRAT, 138.0, 140.345, 0.08897789701177691, 0.008813407863715872, 0.009425028910330369, -0.021620830082859088, 0.04411750741642045, 0.6814450839280415, -3.43983227630679, 8.129432111155017, -16.588182788574088, -256.2233515569436, 1293.376935891353, -4877.913910511415, 15704.378314735444, -44973.45961948536, -3526.811944840706, 6847.236989142353, -4877.913910511415, -72635408.7912, 1.37646, SYM
14, 201401, 2013 - 12 - 20 16:15:00, -301.0, F22, PUT, STRAT, 138.5, 140.345, 0.08383267513417192, 0.008813407863715872, 0.020991265826436845, -0.045956251827941025, 0.08727871921287762, 1.2701629715541363, -6.0408289559434, 13.832831800210249, -26.270894483076166, -382.319054437795, 1818.2895157389635, -8696.984965596635, 26722.164695430383, -71224.98149804553, -5262.468856714474, 9626.164564746361, -8696.984965596635, -58146962.8887, 1.37646, SYM
15, 201402, 2014 - 01 - 24 16:15:00, -286.0, F22, PUT, STRAT, 138.5, 140.345, 0.051599724402617266, 0.10463818541333941, 0.28321473137770425, -0.21146513081873966, 0.12351912253075312, 13.136076509490826, -3.2382097361444653, 60.479027414159546, -35.32646904379539, -3756.917881714376, 926.127984537317, -111492.45225751627, 116832.94892344868, -95776.22511698281, -51712.4718746457, 4902.992790754752, -111492.45225751627, -55249273.7082, 1.37646, SYM
16, 201401, 2013 - 12 - 20 16:15:00, -739.0, F22, PUT, STRAT, 139.0, 140.345, 0.0791166184474159, 0.008813407863715872, 0.047581495319667155, -0.0967612790459439, 0.16434769983129724, 2.257195133461944, -10.131173555213623, 71.50658521495254, -121.45295017532867, -1668.0672036283765, 7486.937257302868, -48400.084510256995, 138135.90554124617, -329280.15202122304, -22960.277831063155, 39636.42175841195, -48400.084510256995, -142759486.9593, 1.37646, SYM
17, 201401, 2013 - 12 - 20 16:15:00, -669.0, F22, PUT, STRAT, 139.5, 140.345, 0.07513349054261133, 0.008813407863715872, 0.10733307441031315, -0.1949726645282245, 0.27848967340302655, 3.6322892048663644, -15.482297001088007, 130.4367125693822, -186.30959150662477, -2430.001478055598, 10357.656693727877, -98837.84833028633, 251976.70050152476, -505117.8297913038, -33447.99834484408, 54834.231279417974, -98837.84833028633, -129236937.4503, 1.37646, SYM
18, 201401, 2013 - 12 - 20 16:15:00, -471.0, F22, PUT, STRAT, 140.0, 140.345, 0.07231398622706062, 0.008813407863715872, 0.2318116692147143, -0.357200149447554, 0.391592427081917, 4.915801583071703, -20.16670499598669, 168.24127038979793, -184.4400331555829, -2315.3425456267723, 9498.518053109732, -150286.43988763154, 325007.2726147283, -500049.1307012041, -31869.76400353427, 50285.885228397776, -150286.43988763154, -90987440.2677, 1.37646, SYM
19, 201401, 2013 - 12 - 20 16:15:00, -364.0, F22, PUT, STRAT, 141.0, 140.345, 0.07172143045750252, 0.008813407863715872, 0.7922715181315709, -0.7543151841866509, 0.333159035321538, 4.147995696473539, -16.876460506586433, 274.5707270439409, -121.26988885703983, -1509.8704335163682, 6143.031624397461, -396952.9396004471, 530413.7500248309, -328783.8408272312, -20782.762569179402, 32521.681960454345, -68777.34640044652, -70317257.4468, 1.37646, SYM
20, 201401, 2013 - 12 - 20 16:15:00, -394.0, F22, PUT, STRAT, 141.5, 140.345, 0.0739452718231504, 0.008813407863715872, 1.212080035129219, -0.88042603375236, 0.20479158314197934, 2.628816497069195, -11.026098543797652, 346.8878572984298, -80.68788375793986, -1035.7536998452629, 4344.282826256274, -657341.595950662, 670115.460626992, -218758.93991649026, -14256.735376890107, 22998.967457802737, -30955.94375066146, -76112635.8078, 1.37646, SYM
21, 201402, 2014 - 01 - 24 16:15:00, -40.0, GEE1, CALL, STRAT, 141.5, 140.345, 0.045487609195962696, 0.10463818541333941, 0.3747457492377393, 0.29120692771365, 0.1660598823861943, 15.56832633851715, -3.3830120036011397, -11.648277108545999, -6.642395295447772, -622.733053540686, 135.32048014404558, -20632.901359831147, -22502.057699266377, -18008.69332126275, -8571.671388766126, 716.3970311502808, -20632.901359831147, -7727171.148, 1.37646, SYM
22, 201401, 2013 - 12 - 20 16:15:00, -12.0, GEE1, CALL, STRAT, 141.5, 140.345, 0.0739452718231504, 0.008813407863715872, 0.05709601681178711, 0.11956012929302556, 0.20479158314197934, 2.628816497069195, -11.027911868706408, -1.4347215515163068, -2.4574989977037522, -31.545797964830342, 132.33494242447688, -943.0845996090299, -2771.5847445726504, -6662.708829943866, -434.2152906667037, 700.5913648061363, -943.0845996090299, -2318151.3444, 1.37646, SYM
23, 201402, 2014 - 01 - 24 16:15:00, -45.0, GEE1, CALL, STRAT, 142.0, 140.345, 0.044822991783170785, 0.10463818541333941, 0.24229877494137142, 0.21142760067302388, 0.14217904830463807, 13.134711351643713, -2.812643033008342, -9.514242030286075, -6.398057173708713, -591.0620108239671, 126.56893648537539, -15008.155729011005, -18379.544127878875, -17346.250014989233, -8135.732154187577, 670.0656858256148, -15008.155729011005, -8693067.5415, 1.37646, SYM
24, 201401, 2013 - 12 - 20 16:15:00, -57.0, GEE1, CALL, STRAT, 142.0, 140.345, 0.07732984880519912, 0.008813407863715872, 0.022997617617102506, 0.053564485523868555, 0.10692101346714668, 1.4353175202195965, -6.296783951449458, -3.0531756748605074, -6.09449776762736, -81.813098652517, 358.9166852326191, -1804.3521424785042, -5898.102746139386, -16523.247467602414, -1126.1245777124357, 1900.1325405972727, -1804.3521424785042, -11011218.8859, 1.37646, SYM
25, 201401, 2013 - 12 - 20 16:15:00, -68.0, GEE1, CALL, STRAT, 142.5, 140.345, 0.0814452531405243, 0.008813407863715872, 0.009355428262274312, 0.02334242880598363, 0.05141464658820557, 0.7269263150873877, -3.358771076933658, -1.5872851588068868, -3.496195967997979, -49.430989425942364, 228.39643323148874, -875.6613494405268, -3066.306020695293, -9478.797659311334, -680.3977970523262, 1209.1482864839038, -875.6613494405268, -13136190.9516, 1.37646, SYM
26, 201402, 2014 - 01 - 24 16:15:00, -19.0, GEE1, CALL, STRAT, 142.5, 140.345, 0.04429193547308161, 0.10463818541333941, 0.14930517833206025, 0.14535540627931182, 0.11352765189447668, 10.36359429711007, -2.1930395074393734, -2.7617527193069247, -2.1570253859950568, -196.90829164509134, 41.66775064134809, -3904.7395095720058, -5335.133982634753, -5848.072409840642, -2710.3638711780245, 220.5922771068846, -3904.7395095720058, -3670406.2953000003, 1.37646, SYM
27, 201401, 2013 - 12 - 20 16:15:00, -91.0, GEE1, CALL, STRAT, 143.0, 140.345, 0.08598678226600448, 0.008813407863715872, 0.003929576582252237, 0.010236258301012439, 0.024009328219809185, 0.35838470316321597, -1.748258969026736, -0.9314995053921319, -2.1848488680026357, -32.613007987852654, 159.091566181433, -492.21035339902915, -1799.464025610588, -5923.5067271790795, -448.9050097495967, 842.2429891772894, -492.21035339902915, -17579314.3617, 1.37646, SYM
28, 201401, 2013 - 12 - 20 16:15:00, -117.0, GEE1, CALL, STRAT, 143.5, 140.345, 0.09076344895187359, 0.008813407863715872, 0.0017194411099074047, 0.004596451952699387, 0.01121737082629775, 0.1767420247600966, -0.9100718136522263, -0.5377848784658282, -1.3124323866768368, -20.678816896931302, 106.47840219731049, -276.9088034867481, -1038.8889491779587, -3558.233333802638, -284.63564305950064, 563.7048518788846, -276.9088034867481, -22601975.6079, 1.37646, SYM
29, 201401, 2013 - 12 - 20 16:15:00, -126.0, GEE1, CALL, STRAT, 144.0, 140.345, 0.09566038240450792, 0.008813407863715872, 0.0007852689424384662, 0.0021295289144923784, 0.005324993197820229, 0.08842782200919548, -0.47989481865526434, -0.26832064322603966, -0.6709491429253489, -11.141905573158631, 60.46674715056331, -136.19230235211526, -518.3398831872638, -1819.0602654117067, -153.3638734522993, 320.1156107033245, -136.19230235211526, -24340589.1162, 1.37646, SYM
30, 201402, 2014 - 01 - 24 16:15:00, -19.0, GEE1, PUT, STRAT, 137.5, 140.345, 0.05414565513055749, 0.10463818541333941, 0.14529132959784974, -0.11936326135136956, 0.08106840033227831, 9.046889913827847, -2.3403474666535415, 2.2679019656760215, -1.5402996063132879, -171.8909083627291, 44.46660186641729, -3799.766367226869, 4381.11665891606, -4176.021148871998, -2366.009597249621, 235.40961078864902, -3799.766367226869, -3670406.2953000003, 1.37646, SYM
31, 201401, 2013 - 12 - 20 16:15:00, -64.0, GEE1, PUT, STRAT, 138.0, 140.345, 0.08897789701177691, 0.008813407863715872, 0.009425028910330369, -0.021620830082859088, 0.04411750741642045, 0.6814450839280415, -3.43983227630679, 1.3837331253029816, -2.8235204746509086, -43.612485371394655, 220.14926568363455, -830.2832188104537, 2673.0856705932674, -7655.056956508147, -600.3084161430988, 1165.48714708806, -830.2832188104537, -12363473.8368, 1.37646, SYM
32, 201402, 2014 - 01 - 24 16:15:00, -45.0, GEE1, PUT, STRAT, 138.0, 140.345, 0.052853182910226726, 0.10463818541333941, 0.20369081242765574, -0.16004607860136968, 0.10141337819029916, 11.047155968410756, -2.789510903380204, 7.2020735370616356, -4.563602018563462, -497.122018578484, 125.52799065210918, -12616.751505337697, 13912.913710339246, -12372.721817523941, -6842.685736925402, 664.5548385115469, -12616.751505337697, -8693067.5415, 1.37646, SYM
33, 201401, 2013 - 12 - 20 16:15:00, -51.0, GEE1, PUT, STRAT, 138.5, 140.345, 0.08383267513417192, 0.008813407863715872, 0.020991265826436845, -0.045956251827941025, 0.08727871921287762, 1.2701629715541363, -6.0408289559434, 2.3437688432249923, -4.451214679856759, -64.77831154926095, 308.08227675311343, -1473.5755257323203, 4527.675745737374, -12068.020120931302, -891.6475471509574, 1631.011271767656, -1473.5755257323203, -9852143.2137, 1.37646, SYM
34, 201402, 2014 - 01 - 24 16:15:00, -40.0, GEE1, PUT, STRAT, 138.5, 140.345, 0.051599724402617266, 0.10463818541333941, 0.28321473137770425, -0.21146513081873966, 0.12351912253075312, 13.136076509490826, -3.2382097361444653, 8.458605232749587, -4.940764901230125, -525.443060379633, 129.5283894457786, -15593.349966086193, 16340.27257670611, -13395.276240137457, -7232.5135489014965, 685.733257448217, -15593.349966086193, -7727171.148, 1.37646, SYM
35, 201401, 2013 - 12 - 20 16:15:00, -98.0, GEE1, PUT, STRAT, 139.0, 140.345, 0.0791166184474159, 0.008813407863715872, 0.047581495319667155, -0.0967612790459439, 0.16434769983129724, 2.257195133461944, -10.131173555213623, 9.4826053465025, -16.10607458346713, -221.20512307927052, 992.855008410935, -6418.414454675487, 18318.42861034117, -43666.38010565609, -3044.8000371369267, 5256.250787989675, -6418.414454675487, -18931569.312599998, 1.37646, SYM
36, 201401, 2013 - 12 - 20 16:15:00, -111.0, GEE1, PUT, STRAT, 139.5, 140.345, 0.07513349054261133, 0.008813407863715872, 0.10733307441031315, -0.1949726645282245, 0.27848967340302655, 3.6322892048663644, -15.482297001088007, 21.64196576263292, -30.912353747735946, -403.18410174016645, 1718.5349671207687, -16399.10487991298, 41807.79335675523, -83808.78790259302, -5549.667886812695, 9098.05631093482, -16399.10487991298, -21442899.9357, 1.37646, SYM
37, 201401, 2013 - 12 - 20 16:15:00, -108.0, GEE1, PUT, STRAT, 140.0, 140.345, 0.07231398622706062, 0.008813407863715872, 0.2318116692147143, -0.357200149447554, 0.391592427081917, 4.915801583071703, -20.16670499598669, 38.577616140335834, -42.29198212484704, -530.9065709717439, 2178.0041395665626, -34460.58494238685, 74523.96059955555, -114660.94716715509, -7307.7165867976655, 11530.52145364535, -34460.58494238685, -20863362.0996, 1.37646, SYM
38, 201401, 2013 - 12 - 20 16:15:00, -83.0, GEE1, PUT, STRAT, 141.0, 140.345, 0.07172143045750252, 0.008813407863715872, 0.7922715181315709, -0.7543151841866509, 0.333159035321538, 4.147995696473539, -16.876460506586433, 62.608160287492026, -27.652199931687655, -344.28364280730375, 1400.746222046674, -90513.99446933273, 120945.99245071695, -74969.94172708844, -4738.926629785414, 7415.658249224481, -15682.746569332587, -16033880.132100001, 1.37646, SYM
39, 201401, 2013 - 12 - 20 16:15:00, -56.0, GEE1, PUT, STRAT, 141.5, 140.345, 0.0739452718231504, 0.008813407863715872, 1.212080035129219, -0.88042603375236, 0.20479158314197934, 2.628816497069195, -11.026098543797652, 49.30385789013216, -11.468328655950843, -147.21372383587493, 617.4615184526685, -93429.26236862202, 95244.83704343032, -31092.641206404707, -2026.3380231112837, 3268.888775728308, -4399.8295686219335, -10818039.607199999, 1.37646, SYM"""
csv = StringIO(csv)
df = read_csv(csv).set_index(['index'])
for _ in range(100):
library.write('pandas', df)
result = library.read('pandas').data
assert len(result) == len(df)
assert np.all(df.values == result.values)
assert np.all(df.columns.values == result.columns.values)
def test_append_after_truncate_after_append(library):
columns = ['MAIN_UPPER', 'MAIN_LOWER', 'AUX_UPPER', 'AUX_LOWER', 'TARGET_HEDGE_POSITION']
empty_df = DataFrame(columns=columns, dtype=np.float64)
library.write('sym', empty_df)
full_df = DataFrame(data=[np.zeros(5)], columns=columns)
library.write('sym', full_df)
library.write('sym', empty_df)
full_df = DataFrame(data=[np.zeros(5)], columns=columns)
library.write('sym', full_df)
assert len(library.read('sym', 1).data) == 0
assert len(library.read('sym', 2).data) == 1
assert len(library.read('sym', 3).data) == 0
assert len(library.read('sym', 4).data) == 1
def test_can_write_pandas_df_with_object_columns(library):
expected = DataFrame(data=dict(A=['a', 'b', None, 'c'], B=[1., 2., 3., 4.]), index=range(4))
library.write('objects', expected)
saved_df = library.read('objects').data
assert_frame_equal(saved_df, expected)
def panel(i1, i2, i3):
return Panel(np.random.randn(i1, i2, i3), range(i1), ['A%d' % i for i in range(i2)],
list(rrule(DAILY, count=i3, dtstart=dt(1970, 1, 1), interval=1)))
@pytest.mark.parametrize("df_size", list(itertools.combinations_with_replacement([1, 2, 4], r=3)))
def test_panel_save_read(library, df_size):
'''Note - empties are not tested here as they don't work!'''
pn = panel(*df_size)
library.write('pandas', pn)
result = library.read('pandas').data
assert np.all(pn.values == result.values), str(pn.values) + "!=" + str(result.values)
for i in range(3):
assert np.all(pn.axes[i] == result.axes[i])
if None not in pn.axes[i].names:
assert np.all(pn.axes[i].names == result.axes[i].names), \
str(pn.axes[i].names) + "!=" + str(pn.axes[i].names)
def test_save_read_ints(library):
ts1 = DataFrame(index=[dt(2012, 1, 1) + dtd(hours=x) for x in range(5)],
data={'col1':np.arange(5), 'col2':np.arange(5)})
ts1.index.name = 'index'
library.write('TEST_1', ts1)
ts2 = library.read('TEST_1').data
assert_frame_equal(ts1, ts2)
def test_save_read_datetimes(library):
# FEF symbols have datetimes in the CLOSE_REVISION field. Handle specially.
ts1 = DataFrame(index=[dt(2012, 1, 1) + dtd(hours=x) for x in range(3)],
data={'field1': [1, 2, 3],
'revision': [dt(2013, 1, 1), dt(2013, 1, 2), dt(2013, 1, 3)],
'field2': [4, 5, 6]},
)
ts1.index.name = 'index'
library.write('TEST_1', ts1)
ts2 = library.read('TEST_1').data
assert_frame_equal(ts1, ts2)
def test_labels(library):
ts1 = DataFrame(index=[dt(2012, 1, 1), dt(2012, 1, 2)],
data={'data': [1., 2.]})
ts1.index.name = 'some_index'
library.write('TEST_1', ts1)
ts2 = library.read('TEST_1').data
assert_frame_equal(ts1, ts2)
def test_duplicate_labels(library):
ts1 = DataFrame(index=[dt(2012, 1, 1) + dtd(hours=x) for x in range(5)],
data=[[np.arange(5), np.arange(5, 10)]],
columns=['a', 'a']
)
library.write('TEST_1', ts1)
ts2 = library.read('TEST_1').data
assert_frame_equal(ts1, ts2)
def test_no_labels(library):
ts1 = DataFrame(index=[dt(2012, 1, 1) + dtd(hours=x) for x in range(5)],
data=[[np.arange(5), np.arange(5, 10)]])
library.write('TEST_1', ts1)
ts2 = library.read('TEST_1').data
assert_frame_equal(ts1, ts2)
@pytest.mark.xfail(reason='needs investigating')
def test_no_index_labels(library):
ts1 = DataFrame(index=[dt(2012, 1, 1), dt(2012, 1, 2)],
data={'data': [1., 2.]})
library.write('TEST_1', ts1)
ts2 = library.read('TEST_1').data
assert_frame_equal(ts1, ts2)
def test_not_unique(library):
d = dt.now()
ts = DataFrame(index=[d, d], data={'near': [1., 2.]})
ts.index.name = 'index'
library.write('ts', ts)
ts2 = library.read('ts').data
assert_frame_equal(ts, ts2)
def test_daterange_end(library):
df = DataFrame(index=date_range(dt(2001, 1, 1), freq='S', periods=30 * 1024),
data=np.tile(np.arange(30 * 1024), 100).reshape((-1, 100)))
df.columns = [str(c) for c in df.columns]
library.write('MYARR', df)
mdecompressALL = Mock(side_effect=decompress)
with patch('arctic.store._ndarray_store.decompress', mdecompressALL):
library.read('MYARR').data
mdecompressLR = Mock(side_effect=decompress)
with patch('arctic.store._ndarray_store.decompress', mdecompressLR):
result = library.read('MYARR', date_range=DateRange(df.index[-1], df.index[-1])).data
assert len(result) == 1
assert mdecompressLR.call_count < mdecompressALL.call_count
def test_daterange_start(library):
df = DataFrame(index=date_range(dt(2001, 1, 1), freq='S', periods=30 * 1024),
data=np.tile(np.arange(30 * 1024), 100).reshape((-1, 100)))
df.columns = [str(c) for c in df.columns]
library.write('MYARR', df)
mdecompressALL = Mock(side_effect=decompress)
with patch('arctic.store._ndarray_store.decompress', mdecompressALL):
library.read('MYARR').data
mdecompressLR = Mock(side_effect=decompress)
with patch('arctic.store._ndarray_store.decompress', mdecompressLR):
result = library.read('MYARR', date_range=DateRange(end=df.index[0])).data
assert len(result) == 1
assert mdecompressLR.call_count < mdecompressALL.call_count
end = df.index[0] + dtd(milliseconds=1)
result = library.read('MYARR', date_range=DateRange(end=end)).data
assert len(result) == 1
def test_daterange_large_DataFrame(library):
df = DataFrame(index=date_range(dt(2001, 1, 1), freq='S', periods=30 * 1024),
data=np.tile(np.arange(30 * 1024), 100).reshape((-1, 100)))
df.columns = [str(c) for c in df.columns]
library.write('MYARR', df)
# assert saved
saved_arr = library.read('MYARR').data
assert_frame_equal(df, saved_arr, check_names=False)
# first 100
result = library.read('MYARR', date_range=DateRange(df.index[0], df.index[100])).data
assert_frame_equal(df[df.index[0]:df.index[100]], result, check_names=False)
# second 100
result = library.read('MYARR', date_range=DateRange(df.index[100], df.index[200])).data
assert_frame_equal(df[df.index[100]:df.index[200]], result, check_names=False)
# first row
result = library.read('MYARR', date_range=DateRange(df.index[0], df.index[0])).data
assert_frame_equal(df[df.index[0]:df.index[0]], result, check_names=False)
# last 100
result = library.read('MYARR', date_range=DateRange(df.index[-100])).data
assert_frame_equal(df[df.index[-100]:], result, check_names=False)
# last 200-100
result = library.read('MYARR', date_range=DateRange(df.index[-200], df.index[-100])).data
assert_frame_equal(df[df.index[-200]:df.index[-100]], result, check_names=False)
# last row
result = library.read('MYARR', date_range=DateRange(df.index[-1], df.index[-1])).data
assert_frame_equal(df[df.index[-1]:df.index[-1]], result, check_names=False)
# somewhere in time
result = library.read('MYARR', date_range=DateRange(dt(2020, 1, 1), dt(2031, 9, 1))).data
assert_frame_equal(df[dt(2020, 1, 1):dt(2031, 9, 1)], result, check_names=False)
def test_daterange_large_DataFrame_middle(library):
df = DataFrame(index=date_range(dt(2001, 1, 1), freq='S', periods=30 * 1024),
data=np.tile(np.arange(30 * 1024), 100).reshape((-1, 100)))
df.columns = [str(c) for c in df.columns]
library.write('MYARR', df)
# middle
start = 100
for end in np.arange(200, 30000, 1000):
result = library.read('MYARR', date_range=DateRange(df.index[start], df.index[end])).data
assert_frame_equal(df[df.index[start]:df.index[end]], result, check_names=False)
# middle following
for start in np.arange(200, 30000, 1000):
for offset in (100, 300, 500):
end = start + offset
result = library.read('MYARR', date_range=DateRange(df.index[start], df.index[end])).data
assert_frame_equal(df[df.index[start]:df.index[end]], result, check_names=False)
@pytest.mark.parametrize("df,assert_equal", [
(DataFrame(index=date_range(dt(2001, 1, 1), freq='D', periods=30000),
data=range(30000), columns=['A']), assert_frame_equal),
(Series(index=date_range(dt(2001, 1, 1), freq='D', periods=30000),
data=range(30000)), assert_series_equal),
])
def test_daterange(library, df, assert_equal):
df.index.name = 'idx'
df.name = 'FOO'
library.write('MYARR', df)
# whole array
saved_arr = library.read('MYARR').data
assert_equal(df, saved_arr)
assert_equal(df, library.read('MYARR', date_range=DateRange(df.index[0])).data)
assert_equal(df, library.read('MYARR', date_range=DateRange(df.index[0], df.index[-1])).data)
assert_equal(df, library.read('MYARR', date_range=DateRange()).data)
assert_equal(df[df.index[10]:], library.read('MYARR', date_range=DateRange(df.index[10])).data)
assert_equal(df[:df.index[10]], library.read('MYARR', date_range=DateRange(end=df.index[10])).data)
assert_equal(df[df.index[-1]:], library.read('MYARR', date_range=DateRange(df.index[-1])).data)
assert_equal(df[df.index[-1]:], library.read('MYARR', date_range=DateRange(df.index[-1], df.index[-1])).data)
assert_equal(df[df.index[0]:df.index[0]], library.read('MYARR', date_range=DateRange(df.index[0], df.index[0])).data)
assert_equal(df[:df.index[0]], library.read('MYARR', date_range=DateRange(end=df.index[0])).data)
assert_equal(df[df.index[0] - DateOffset(days=1):],
library.read('MYARR', date_range=DateRange(df.index[0] - DateOffset(days=1))).data)
assert_equal(df[df.index[-1] + DateOffset(days=1):],
library.read('MYARR', date_range=DateRange(df.index[-1] + DateOffset(days=1))).data)
assert len(library.read('MYARR', date_range=DateRange(dt(1950, 1, 1), dt(1951, 1, 1))).data) == 0
assert len(library.read('MYARR', date_range=DateRange(dt(2091, 1, 1), dt(2091, 1, 1))).data) == 0
def test_daterange_append(library):
df = DataFrame(index=date_range(dt(2001, 1, 1), freq='S', periods=30 * 1024),
data=np.tile(np.arange(30 * 1024), 100).reshape((-1, 100)))
df.columns = [str(c) for c in df.columns]
df.index.name = 'idx'
library.write('MYARR', df)
# assert saved
saved_arr = library.read('MYARR').data
assert_frame_equal(df, saved_arr, check_names=False)
# append two more rows
rows = df.ix[-2:].copy()
rows.index = rows.index + dtd(days=1)
library.append('MYARR', rows)
# assert we can rows back out
assert_frame_equal(rows, library.read('MYARR', date_range=DateRange(rows.index[0])).data)
# assert we can read back the first array
assert_frame_equal(df, library.read('MYARR', date_range=DateRange(df.index[0], df.index[-1])).data)
# append two more rows
rows1 = df.ix[-2:].copy()
rows1.index = rows1.index + dtd(days=2)
library.append('MYARR', rows1)
# assert we can read a mix of data
assert_frame_equal(rows1, library.read('MYARR', date_range=DateRange(rows1.index[0])).data)
assert_frame_equal(concat((df, rows, rows1)), library.read('MYARR').data)
assert_frame_equal(concat((rows, rows1)), library.read('MYARR', date_range=DateRange(start=rows.index[0])).data)
assert_frame_equal(concat((df, rows, rows1))[df.index[50]:rows1.index[-2]],
library.read('MYARR', date_range=DateRange(start=df.index[50], end=rows1.index[-2])).data)
| einarhuseby/arctic | tests/integration/store/test_pandas_store.py | Python | lgpl-2.1 | 67,391 |
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Licence: BSD 3 clause
from __future__ import division
import numbers
import numpy as np
from abc import ABCMeta, abstractmethod
from warnings import warn
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import array2d, check_random_state
from ..utils.fixes import unique
from ..utils.validation import check_arrays
from ._tree import Criterion, Splitter, Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE}
SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
max_features,
random_state):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.max_features = max_features
self.random_state = random_state
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_mask=None, X_argsorted=None, check_input=True,
sample_weight=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples. Use ``dtype=np.float32`` for maximum
efficiency.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (integers that correspond to classes in
classification, real numbers in regression).
Use ``dtype=np.float64`` and ``order='C'`` for maximum
efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
# Deprecations
if sample_mask is not None:
warn("The sample_mask parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if X_argsorted is not None:
warn("The X_argsorted parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
# Convert data
if check_input:
X, = check_arrays(X, dtype=DTYPE, sparse_format="dense",
check_ccontiguous=True)
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
for k in xrange(self.n_outputs_):
classes_k, y[:, k] = unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = (2 ** 31) - 1 if self.max_depth is None else self.max_depth
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
max_features = int(self.max_features * self.n_features_)
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_,
self.n_outputs_, splitter, max_depth,
min_samples_split, self.min_samples_leaf)
self.tree_.build(X, y, sample_weight=sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def predict(self, X):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE)
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = self.tree_.predict(X)
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in xrange(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
`tree_` : Tree object
The underlying Tree object.
`max_features_` : int,
The infered value of max_features.
`classes_` : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
`n_classes_` : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
`feature_importances_` : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features=None,
random_state=None,
min_density=None,
compute_importances=None):
super(DecisionTreeClassifier, self).__init__(criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
max_features,
random_state)
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
def predict_proba(self, X):
"""Predict class probabilities of the input samples X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by arithmetical order.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE)
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first.")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in xrange(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. Classes are
ordered by arithmetical order.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in xrange(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A tree regressor.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
`tree_` : Tree object
The underlying Tree object.
`max_features_` : int,
The infered value of max_features.
`feature_importances_` : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features=None,
random_state=None,
min_density=None,
compute_importances=None):
super(DecisionTreeRegressor, self).__init__(criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
max_features,
random_state)
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features="auto",
random_state=None,
min_density=None,
compute_importances=None):
super(ExtraTreeClassifier, self).__init__(criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
max_features,
random_state)
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features="auto",
random_state=None,
min_density=None,
compute_importances=None):
super(ExtraTreeRegressor, self).__init__(criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
max_features,
random_state)
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
| Tong-Chen/scikit-learn | sklearn/tree/tree.py | Python | bsd-3-clause | 29,287 |
#!/usr/bin/python
import sys
from Options import Options
from JiraProcessor import JiraProcessor
def main(argv):
opts = Options('agileProgress.ini', argv)
processor = JiraProcessor(opts)
processor.processCompleted()
if __name__ == "__main__":
main(sys.argv[1:])
| ksagon/productivity | agileProgress.py | Python | apache-2.0 | 287 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Android-specific implementation of the core backend interfaces.
See core/backends.py for more docs.
"""
import datetime
import glob
import hashlib
import json
import logging
import os
import posixpath
from memory_inspector import constants
from memory_inspector.backends import prebuilts_fetcher
from memory_inspector.backends.android import dumpheap_native_parser
from memory_inspector.backends.android import memdump_parser
from memory_inspector.core import backends
from memory_inspector.core import exceptions
from memory_inspector.core import native_heap
from memory_inspector.core import symbol
# The memory_inspector/__init__ module will add the <CHROME_SRC>/build/android
# deps to the PYTHONPATH for pylib.
from pylib import android_commands
from pylib.device import device_errors
from pylib.device import device_utils
from pylib.symbols import elf_symbolizer
_MEMDUMP_PREBUILT_PATH = os.path.join(constants.PROJECT_SRC,
'prebuilts', 'memdump-android-arm')
_MEMDUMP_PATH_ON_DEVICE = '/data/local/tmp/memdump'
_PSEXT_PREBUILT_PATH = os.path.join(constants.PROJECT_SRC,
'prebuilts', 'ps_ext-android-arm')
_PSEXT_PATH_ON_DEVICE = '/data/local/tmp/ps_ext'
_DLMALLOC_DEBUG_SYSPROP = 'libc.debug.malloc'
_DUMPHEAP_OUT_FILE_PATH = '/data/local/tmp/heap-%d-native.dump'
class AndroidBackend(backends.Backend):
"""Android-specific implementation of the core |Backend| interface."""
_SETTINGS_KEYS = {
'adb_path': 'Path of directory containing the adb binary',
'toolchain_path': 'Path of toolchain (for addr2line)'}
def __init__(self):
super(AndroidBackend, self).__init__(
settings=backends.Settings(AndroidBackend._SETTINGS_KEYS))
self._devices = {} # 'device id' -> |Device|.
def EnumerateDevices(self):
# If a custom adb_path has been setup through settings, prepend that to the
# PATH. The android_commands module will use that to locate adb.
if (self.settings['adb_path'] and
not os.environ['PATH'].startswith(self.settings['adb_path'])):
os.environ['PATH'] = os.pathsep.join([self.settings['adb_path'],
os.environ['PATH']])
for device_id in android_commands.GetAttachedDevices():
device = self._devices.get(device_id)
if not device:
device = AndroidDevice(
self, device_utils.DeviceUtils(device_id))
self._devices[device_id] = device
yield device
def ExtractSymbols(self, native_heaps, sym_paths):
"""Performs symbolization. Returns a |symbol.Symbols| from |NativeHeap|s.
This method performs the symbolization but does NOT decorate (i.e. add
symbol/source info) to the stack frames of |native_heaps|. The heaps
can be decorated as needed using the native_heap.SymbolizeUsingSymbolDB()
method. Rationale: the most common use case in this application is:
symbolize-and-store-symbols and load-symbols-and-decorate-heaps (in two
different stages at two different times).
Args:
native_heaps: a collection of native_heap.NativeHeap instances.
sym_paths: either a list of or a string of semicolon-sep. symbol paths.
"""
assert(all(isinstance(x, native_heap.NativeHeap) for x in native_heaps))
symbols = symbol.Symbols()
# Find addr2line in toolchain_path.
if isinstance(sym_paths, basestring):
sym_paths = sym_paths.split(';')
matches = glob.glob(os.path.join(self.settings['toolchain_path'],
'*addr2line'))
if not matches:
raise exceptions.MemoryInspectorException('Cannot find addr2line')
addr2line_path = matches[0]
# First group all the stack frames together by lib path.
frames_by_lib = {}
for nheap in native_heaps:
for stack_frame in nheap.stack_frames.itervalues():
frames = frames_by_lib.setdefault(stack_frame.exec_file_rel_path, set())
frames.add(stack_frame)
# The symbolization process is asynchronous (but yet single-threaded). This
# callback is invoked every time the symbol info for a stack frame is ready.
def SymbolizeAsyncCallback(sym_info, stack_frame):
if not sym_info.name:
return
sym = symbol.Symbol(name=sym_info.name,
source_file_path=sym_info.source_path,
line_number=sym_info.source_line)
symbols.Add(stack_frame.exec_file_rel_path, stack_frame.offset, sym)
# TODO(primiano): support inline sym info (i.e. |sym_info.inlined_by|).
# Perform the actual symbolization (ordered by lib).
for exec_file_rel_path, frames in frames_by_lib.iteritems():
# Look up the full path of the symbol in the sym paths.
exec_file_name = posixpath.basename(exec_file_rel_path)
if exec_file_rel_path.startswith('/'):
exec_file_rel_path = exec_file_rel_path[1:]
exec_file_abs_path = ''
for sym_path in sym_paths:
# First try to locate the symbol file following the full relative path
# e.g. /host/syms/ + /system/lib/foo.so => /host/syms/system/lib/foo.so.
exec_file_abs_path = os.path.join(sym_path, exec_file_rel_path)
if os.path.exists(exec_file_abs_path):
break
# If no luck, try looking just for the file name in the sym path,
# e.g. /host/syms/ + (/system/lib/)foo.so => /host/syms/foo.so
exec_file_abs_path = os.path.join(sym_path, exec_file_name)
if os.path.exists(exec_file_abs_path):
break
if not os.path.exists(exec_file_abs_path):
continue
symbolizer = elf_symbolizer.ELFSymbolizer(
elf_file_path=exec_file_abs_path,
addr2line_path=addr2line_path,
callback=SymbolizeAsyncCallback,
inlines=False)
# Kick off the symbolizer and then wait that all callbacks are issued.
for stack_frame in sorted(frames, key=lambda x: x.offset):
symbolizer.SymbolizeAsync(stack_frame.offset, stack_frame)
symbolizer.Join()
return symbols
@property
def name(self):
return 'Android'
class AndroidDevice(backends.Device):
"""Android-specific implementation of the core |Device| interface."""
_SETTINGS_KEYS = {
'native_symbol_paths': 'Semicolon-sep. list of native libs search path'}
def __init__(self, backend, underlying_device):
super(AndroidDevice, self).__init__(
backend=backend,
settings=backends.Settings(AndroidDevice._SETTINGS_KEYS))
self.underlying_device = underlying_device
self._id = underlying_device.old_interface.GetDevice()
self._name = underlying_device.old_interface.GetProductModel()
self._sys_stats = None
self._last_device_stats = None
self._sys_stats_last_update = None
self._processes = {} # pid (int) -> |Process|
self._initialized = False
def Initialize(self):
"""Starts adb root and deploys the prebuilt binaries on initialization."""
try:
self.underlying_device.EnableRoot()
except device_errors.CommandFailedError as e:
# Try to deploy memdump and ps_ext anyway.
# TODO(jbudorick) Handle this exception appropriately after interface
# conversions are finished.
logging.error(str(e))
# Download (from GCS) and deploy prebuilt helper binaries on the device.
self._DeployPrebuiltOnDeviceIfNeeded(_MEMDUMP_PREBUILT_PATH,
_MEMDUMP_PATH_ON_DEVICE)
self._DeployPrebuiltOnDeviceIfNeeded(_PSEXT_PREBUILT_PATH,
_PSEXT_PATH_ON_DEVICE)
self._initialized = True
def IsNativeTracingEnabled(self):
"""Checks for the libc.debug.malloc system property."""
return bool(self.underlying_device.old_interface.system_properties[
_DLMALLOC_DEBUG_SYSPROP])
def EnableNativeTracing(self, enabled):
"""Enables libc.debug.malloc and restarts the shell."""
assert(self._initialized)
prop_value = '1' if enabled else ''
self.underlying_device.old_interface.system_properties[
_DLMALLOC_DEBUG_SYSPROP] = prop_value
assert(self.IsNativeTracingEnabled())
# The libc.debug property takes effect only after restarting the Zygote.
self.underlying_device.old_interface.RestartShell()
def ListProcesses(self):
"""Returns a sequence of |AndroidProcess|."""
self._RefreshProcessesList()
return self._processes.itervalues()
def GetProcess(self, pid):
"""Returns an instance of |AndroidProcess| (None if not found)."""
assert(isinstance(pid, int))
self._RefreshProcessesList()
return self._processes.get(pid)
def GetStats(self):
"""Returns an instance of |DeviceStats| with the OS CPU/Memory stats."""
cur = self.UpdateAndGetSystemStats()
old = self._last_device_stats or cur # Handle 1st call case.
uptime = cur['time']['ticks'] / cur['time']['rate']
ticks = max(1, cur['time']['ticks'] - old['time']['ticks'])
cpu_times = []
for i in xrange(len(cur['cpu'])):
cpu_time = {
'usr': 100 * (cur['cpu'][i]['usr'] - old['cpu'][i]['usr']) / ticks,
'sys': 100 * (cur['cpu'][i]['sys'] - old['cpu'][i]['sys']) / ticks,
'idle': 100 * (cur['cpu'][i]['idle'] - old['cpu'][i]['idle']) / ticks}
# The idle tick count on many Linux kernels is frozen when the CPU is
# offline, and bumps up (compensating all the offline period) when it
# reactivates. For this reason it needs to be saturated at [0, 100].
cpu_time['idle'] = max(0, min(cpu_time['idle'],
100 - cpu_time['usr'] - cpu_time['sys']))
cpu_times.append(cpu_time)
memory_stats = {'Free': cur['mem']['MemFree:'],
'Cache': cur['mem']['Buffers:'] + cur['mem']['Cached:'],
'Swap': cur['mem']['SwapCached:'],
'Anonymous': cur['mem']['AnonPages:'],
'Kernel': cur['mem']['VmallocUsed:']}
self._last_device_stats = cur
return backends.DeviceStats(uptime=uptime,
cpu_times=cpu_times,
memory_stats=memory_stats)
def UpdateAndGetSystemStats(self):
"""Grabs and caches system stats through ps_ext (max cache TTL = 0.5s).
Rationale of caching: avoid invoking adb too often, it is slow.
"""
assert(self._initialized)
max_ttl = datetime.timedelta(seconds=0.5)
if (self._sys_stats_last_update and
datetime.datetime.now() - self._sys_stats_last_update <= max_ttl):
return self._sys_stats
dump_out = '\n'.join(
self.underlying_device.RunShellCommand(_PSEXT_PATH_ON_DEVICE))
stats = json.loads(dump_out)
assert(all([x in stats for x in ['cpu', 'processes', 'time', 'mem']])), (
'ps_ext returned a malformed JSON dictionary.')
self._sys_stats = stats
self._sys_stats_last_update = datetime.datetime.now()
return self._sys_stats
def _RefreshProcessesList(self):
sys_stats = self.UpdateAndGetSystemStats()
processes_to_delete = set(self._processes.keys())
for pid, proc in sys_stats['processes'].iteritems():
pid = int(pid)
process = self._processes.get(pid)
if not process or process.name != proc['name']:
process = AndroidProcess(self, int(pid), proc['name'])
self._processes[pid] = process
processes_to_delete.discard(pid)
for pid in processes_to_delete:
del self._processes[pid]
def _DeployPrebuiltOnDeviceIfNeeded(self, local_path, path_on_device):
# TODO(primiano): check that the md5 binary is built-in also on pre-KK.
# Alternatively add tools/android/md5sum to prebuilts and use that one.
prebuilts_fetcher.GetIfChanged(local_path)
with open(local_path, 'rb') as f:
local_hash = hashlib.md5(f.read()).hexdigest()
device_md5_out = self.underlying_device.RunShellCommand(
'md5 "%s"' % path_on_device)
if local_hash in device_md5_out:
return
self.underlying_device.old_interface.Adb().Push(local_path, path_on_device)
self.underlying_device.RunShellCommand('chmod 755 "%s"' % path_on_device)
@property
def name(self):
"""Device name, as defined in the |backends.Device| interface."""
return self._name
@property
def id(self):
"""Device id, as defined in the |backends.Device| interface."""
return self._id
class AndroidProcess(backends.Process):
"""Android-specific implementation of the core |Process| interface."""
def __init__(self, device, pid, name):
super(AndroidProcess, self).__init__(device, pid, name)
self._last_sys_stats = None
def DumpMemoryMaps(self):
"""Grabs and parses memory maps through memdump."""
cmd = '%s %d' % (_MEMDUMP_PATH_ON_DEVICE, self.pid)
dump_out = self.device.underlying_device.RunShellCommand(cmd)
return memdump_parser.Parse(dump_out)
def DumpNativeHeap(self):
"""Grabs and parses malloc traces through am dumpheap -n."""
# TODO(primiano): grab also mmap bt (depends on pending framework change).
dump_file_path = _DUMPHEAP_OUT_FILE_PATH % self.pid
cmd = 'am dumpheap -n %d %s' % (self.pid, dump_file_path)
self.device.underlying_device.RunShellCommand(cmd)
# TODO(primiano): Some pre-KK versions of Android might need a sleep here
# as, IIRC, 'am dumpheap' did not wait for the dump to be completed before
# returning. Double check this and either add a sleep or remove this TODO.
dump_out = self.device.underlying_device.old_interface.GetFileContents(
dump_file_path)
self.device.underlying_device.RunShellCommand('rm %s' % dump_file_path)
return dumpheap_native_parser.Parse(dump_out)
def GetStats(self):
"""Calculate process CPU/VM stats (CPU stats are relative to last call)."""
# Process must retain its own copy of _last_sys_stats because CPU times
# are calculated relatively to the last GetStats() call (for the process).
cur_sys_stats = self.device.UpdateAndGetSystemStats()
old_sys_stats = self._last_sys_stats or cur_sys_stats
cur_proc_stats = cur_sys_stats['processes'].get(str(self.pid))
old_proc_stats = old_sys_stats['processes'].get(str(self.pid))
# The process might have gone in the meanwhile.
if (not cur_proc_stats or not old_proc_stats):
return None
run_time = (((cur_sys_stats['time']['ticks'] -
cur_proc_stats['start_time']) / cur_sys_stats['time']['rate']))
ticks = max(1, cur_sys_stats['time']['ticks'] -
old_sys_stats['time']['ticks'])
cpu_usage = (100 *
((cur_proc_stats['user_time'] + cur_proc_stats['sys_time']) -
(old_proc_stats['user_time'] + old_proc_stats['sys_time'])) /
ticks) / len(cur_sys_stats['cpu'])
proc_stats = backends.ProcessStats(
threads=cur_proc_stats['n_threads'],
run_time=run_time,
cpu_usage=cpu_usage,
vm_rss=cur_proc_stats['vm_rss'],
page_faults=(
(cur_proc_stats['maj_faults'] + cur_proc_stats['min_faults']) -
(old_proc_stats['maj_faults'] + old_proc_stats['min_faults'])))
self._last_sys_stats = cur_sys_stats
return proc_stats
| TeamEOS/external_chromium_org | tools/memory_inspector/memory_inspector/backends/android/android_backend.py | Python | bsd-3-clause | 15,383 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Contacts',
'category': 'Sales/CRM',
'sequence': 150,
'summary': 'Centralize your address book',
'description': """
This module gives you a quick view of your contacts directory, accessible from your home page.
You can track your vendors, customers and other contacts.
""",
'depends': ['base', 'mail'],
'data': [
'views/contact_views.xml',
],
'application': True,
'license': 'LGPL-3',
}
| jeremiahyan/odoo | addons/contacts/__manifest__.py | Python | gpl-3.0 | 546 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# http://peak.telecommunity.com/DevCenter/setuptools#namespace-packages
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
| pygloo/emencia-django-layout-designer | emencia/django/layout/designer/__init__.py | Python | mit | 287 |
# -*- coding: utf-8 -*-
#
# Savu documentation build configuration file, created by
# sphinx-quickstart on Tue Sep 16 10:25:51 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
from os import path
autodoc_mock_imports = ['numpy', 'mpi4py', 'astra', 'scipy']
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
#sys.path.insert(0, '../.')
#sys.path.insert(0, '../../.')
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../../savu'))
sys.path.insert(0, os.path.abspath('../../savu/core'))
sys.path.insert(0, os.path.abspath('../../savu/transports'))
sys.path.insert(0, os.path.abspath('../../savu/data'))
sys.path.insert(0, os.path.abspath('../../savu/data/transport_data'))
sys.path.insert(0, os.path.abspath('../../savu/data/data_structures'))
sys.path.insert(0, os.path.abspath('../../savu/plugins'))
sys.path.insert(0, os.path.abspath('../../savu/plugins/driver'))
sys.path.insert(0, os.path.abspath('../../savu/plugins/filters'))
sys.path.insert(0, os.path.abspath('../../savu/plugins/filters/azimuthal_integrators'))
sys.path.insert(0, os.path.abspath('../../savu/plugins/filters/component_analysis'))
sys.path.insert(0, os.path.abspath('../../savu/plugins/filters/fitters'))
sys.path.insert(0, os.path.abspath('../../savu/plugins/corrections'))
sys.path.insert(0, os.path.abspath('../../savu/plugins/reconstructions'))
sys.path.insert(0, os.path.abspath('../../savu/plugins/reconstructions/astra_recons'))
sys.path.insert(0, os.path.abspath('../../savu/plugins/loaders'))
sys.path.insert(0, os.path.abspath('../../savu/plugins/loaders/multi_modal_loaders'))
sys.path.insert(0, os.path.abspath('../../savu/plugins/savers'))
print sys.path
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.ifconfig', 'sphinx.ext.autosummary', 'sphinx.ext.viewcode']
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Savu'
copyright = u'2014, Mark Basham'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_templates']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = ['savu.']
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '../../logo_small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Savudoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Savu.tex', u'Savu Documentation',
u'Mark Basham', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'savu', u'Savu Documentation',
[u'Mark Basham'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Savu', u'Savu Documentation',
u'Mark Basham', 'Savu', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| rcatwood/Savu | doc/source/conf.py | Python | gpl-3.0 | 9,449 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Isaku Yamahata <yamahata at private email ne jp>
# <yamahata at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ryu.app.client import OFPClient
from nova import flags
from nova import log as logging
from nova.network import linux_net
from nova.openstack.common import cfg
from nova import utils
LOG = logging.getLogger(__name__)
ryu_linux_net_opt = cfg.StrOpt('linuxnet_ovs_ryu_api_host',
default='127.0.0.1:8080',
help='Openflow Ryu REST API host:port')
FLAGS = flags.FLAGS
FLAGS.register_opt(ryu_linux_net_opt)
def _get_datapath_id(bridge_name):
out, _err = utils.execute('ovs-vsctl', 'get', 'Bridge',
bridge_name, 'datapath_id', run_as_root=True)
return out.strip().strip('"')
def _get_port_no(dev):
out, _err = utils.execute('ovs-vsctl', 'get', 'Interface', dev,
'ofport', run_as_root=True)
return int(out.strip())
class LinuxOVSRyuInterfaceDriver(linux_net.LinuxOVSInterfaceDriver):
def __init__(self):
super(LinuxOVSRyuInterfaceDriver, self).__init__()
LOG.debug('ryu rest host %s', FLAGS.linuxnet_ovs_ryu_api_host)
self.ryu_client = OFPClient(FLAGS.linuxnet_ovs_ryu_api_host)
self.datapath_id = _get_datapath_id(
FLAGS.linuxnet_ovs_integration_bridge)
if linux_net.binary_name == 'nova-network':
for tables in [linux_net.iptables_manager.ipv4,
linux_net.iptables_manager.ipv6]:
tables['filter'].add_rule(
'FORWARD',
'--in-interface gw-+ --out-interface gw-+ -j DROP')
linux_net.iptables_manager.apply()
def plug(self, network, mac_address, gateway=True):
LOG.debug("network %s mac_adress %s gateway %s",
network, mac_address, gateway)
ret = super(LinuxOVSRyuInterfaceDriver, self).plug(
network, mac_address, gateway)
port_no = _get_port_no(self.get_dev(network))
self.ryu_client.create_port(network['uuid'], self.datapath_id, port_no)
return ret
| savi-dev/quantum | quantum/plugins/ryu/nova/linux_net.py | Python | apache-2.0 | 2,805 |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# See astropy.sphinx.conf for which values are set there.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory (if "python setup.py build_sphinx" is used).
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
import datetime
import os
import sys
try:
import astropy_helpers
except ImportError:
# Building from inside the docs/ directory?
if os.path.basename(os.getcwd()) == 'doc':
a_h_path = os.path.abspath(os.path.join('..', 'astropy_helpers'))
if os.path.isdir(a_h_path):
sys.path.insert(1, a_h_path)
# Load all of the global Astropy configuration
from astropy_helpers.sphinx.conf import *
# Get configuration information from setup.cfg
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
conf = ConfigParser()
conf.read([os.path.join(os.path.dirname(__file__), '..', 'setup.cfg')])
setup_cfg = dict(conf.items('metadata'))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.2'
# To perform a Sphinx version check that needs to be more specific than
# major.minor, call `check_sphinx_version("x.y.z")` here.
# check_sphinx_version("1.2.1")
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns.append('_templates')
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
rst_epilog += """
"""
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['ginga.']
# -- Project information ------------------------------------------------------
# This does not *have* to match the package name, but typically does
project = setup_cfg['package_name']
author = setup_cfg['author']
copyright = '{0}, {1}'.format(
datetime.datetime.now().year, setup_cfg['author'])
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
__import__(setup_cfg['package_name'])
package = sys.modules[setup_cfg['package_name']]
# The short X.Y version.
version = package.__version__.split('-', 1)[0]
# The full version, including alpha/beta/rc tags.
release = package.__version__
# -- Options for HTML output ---------------------------------------------------
# A NOTE ON HTML THEMES
# The global astropy configuration uses a custom theme, 'bootstrap-astropy',
# which is installed along with astropy. A different theme can be used or
# the options for this theme can be modified by overriding some of the
# variables set in the global configuration. The variables set in the
# global configuration are listed below, commented out.
# Add any paths that contain custom themes here, relative to this directory.
# To use a different custom theme, add the directory containing the theme.
#html_theme_path = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. To override the custom theme, set this to the
# name of a builtin theme or the name of a custom theme in html_theme_path.
#html_theme = None
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = ''
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} v{1}'.format(project, release)
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + u' Documentation',
author, 'manual')]
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
## -- Options for the edit_on_github extension ----------------------------------------
if eval(setup_cfg.get('edit_on_github')):
extensions += ['astropy_helpers.sphinx.ext.edit_on_github']
versionmod = __import__(setup_cfg['package_name'] + '.version')
edit_on_github_project = setup_cfg['github_project']
if versionmod.version.release:
edit_on_github_branch = "v" + versionmod.version.version
else:
edit_on_github_branch = "master"
edit_on_github_source_root = ""
edit_on_github_doc_root = "docs"
| rupak0577/ginga | doc/conf.py | Python | bsd-3-clause | 6,363 |
import wx
import wx.lib.masked.numctrl
from vistas.core.timeline import Timeline
from vistas.ui.controls.float_ctrl import FloatCtrl, EVT_FLOAT
class ExportOptionsDialog(wx.Dialog):
""" A Dialog for configuring parameters for an export encoder. """
VIDEO = 0
IMAGE = 1
def __init__(self, parent, id, enable_frames_input=True):
super().__init__(parent, id, "Export Options", style=wx.CAPTION | wx.STAY_ON_TOP)
main_panel = wx.Panel(self, wx.ID_ANY)
encoder_static = wx.StaticText(main_panel, wx.ID_ANY, "Export As:")
self.encoder_choice = wx.Choice(main_panel, wx.ID_ANY, size=wx.Size(220, -1))
self.encoder_choice.Append("Video File")
self.encoder_choice.Append("Individual Image Files")
self.encoder_choice.SetSelection(0)
initial_export_length = 30.0
export_length_static = wx.StaticText(main_panel, wx.ID_ANY, "Length of export (in seconds):")
self.export_length_ctrl = FloatCtrl(main_panel, wx.ID_ANY, value=initial_export_length, size=wx.Size(50, -1))
initial_export_timestamps = Timeline.app().num_timestamps / initial_export_length
export_frames_static = wx.StaticText(main_panel, wx.ID_ANY, "Timestamps per second:")
self.export_frames_ctrl = FloatCtrl(main_panel, wx.ID_ANY, value=initial_export_timestamps, size=wx.Size(50, -1))
self.export_frames_ctrl.Enable(enable_frames_input)
main_sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(main_sizer)
main_panel_sizer = wx.BoxSizer(wx.VERTICAL)
main_panel.SetSizer(main_panel_sizer)
main_panel_sizer.Add(encoder_static)
main_panel_sizer.Add(self.encoder_choice, 0, wx.EXPAND)
export_length_sizer = wx.BoxSizer(wx.HORIZONTAL)
export_length_sizer.Add(export_length_static, 0, wx.RIGHT, 5)
export_length_sizer.AddStretchSpacer(2)
export_length_sizer.Add(self.export_length_ctrl, 0, wx.RIGHT, 5)
main_panel_sizer.Add(export_length_sizer, 0, wx.ALL | wx.EXPAND, 10)
export_frames_sizer = wx.BoxSizer(wx.HORIZONTAL)
export_frames_sizer.Add(export_frames_static, 0, wx.RIGHT, 5)
export_frames_sizer.AddStretchSpacer(2)
export_frames_sizer.Add(self.export_frames_ctrl, 0, wx.RIGHT, 5)
main_panel_sizer.Add(export_frames_sizer, 0, wx.ALL | wx.EXPAND, 10)
main_sizer.Add(main_panel, 1, wx.EXPAND | wx.ALL, 5)
main_sizer.Add(self.CreateButtonSizer(wx.OK | wx.CANCEL), 0, wx.EXPAND | wx.ALL, 5)
self.export_length_ctrl.Bind(EVT_FLOAT, self.OnExportLengthInput)
self.export_frames_ctrl.Bind(EVT_FLOAT, self.OnExportFramesInput)
self.Fit()
def EncoderSelection(self):
choice = self.encoder_choice.GetSelection()
if choice == self.VIDEO:
return self.VIDEO
elif choice == self.IMAGE:
return self.IMAGE
return None
@property
def export_length(self):
val = self.export_length_ctrl.GetValue()
return val if val > 0 else 1
def OnExportLengthInput(self, event):
if self.export_frames_ctrl.IsEnabled():
export_timestamps = Timeline.app().num_timestamps / self.export_length
self.export_frames_ctrl.ChangeValue(str(export_timestamps))
def OnExportFramesInput(self, event):
input_frames = float(self.export_frames_ctrl.GetValue())
self.export_length_ctrl.ChangeValue(str(Timeline.app().num_timestamps / input_frames))
| VISTAS-IVES/pyvistas | source/vistas/ui/windows/export_options_dialog.py | Python | bsd-3-clause | 3,508 |
from django import template
from ..models import Media
register = template.Library()
@register.inclusion_tag('tmdb_tags/_recent_media.html')
def tmdb_recent_media():
recent_media = Media.objects.recently_rated(max_media=5)
return {'recent_media': recent_media}
| rbeagrie/django-tmdb | lib/django_tmdb/templatetags/tmdb_tags.py | Python | mit | 289 |
"""
Copyright 2015 Ricky LeDew
This file is part of Death 87.
Death 87 is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Death 87 is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import pygame
class Groups:
def __init__(self):
self.visible = pygame.sprite.Group()
self.sprites = pygame.sprite.Group()
self.walls = pygame.sprite.Group()
self.text = pygame.sprite.Group()
self.particles = pygame.sprite.Group()
self.emitters = pygame.sprite.Group()
def addtogroup(self, object, group):
group.add(object) | meltedchocolate/Death87 | Groups.py | Python | gpl-3.0 | 1,110 |
#! /usr/bin/env python3
def zeropad_right(base, digits, index):
pass
def digits_of_index(index):
pass
if __name__ == '__main__':
sys.exit(f"This file [{__file__}] is meant to be imported, "
"not executed directly.")
##
#
| zerosyntax/zerotools | timetree/timetree/__init__.py | Python | mit | 253 |
# -*- coding: utf-8 -*-
from flask import Blueprint, render_template, session, url_for, flash, request, jsonify
from werkzeug.utils import redirect
from fpage.utils import login_required
import fpage.user.models
import fpage.comment.models
import fpage.submission.models
blueprint = Blueprint("admin", __name__, url_prefix='/admin',
static_folder="../static")
@blueprint.route("/")
@login_required
def main():
admin = fpage.user.models.User.query.filter_by(is_admin=True).first()
if admin:
if admin.username != session['username']:
return redirect(url_for('submission.page'))
else:
return redirect(url_for('submission.page'))
else:
user = fpage.user.models.User.query.filter_by(username=session['username']).first()
user.update(is_admin=True)
flash('Added as admin', 'success')
return render_template('submission.page')
@blueprint.route('/remove', methods=['POST'])
@login_required
def remove():
admin_user = fpage.user.models.User.query.filter_by(is_admin=True,
username=session['username']).first()
if not admin_user:
return jsonify({"data": "error"})
object_type = request.form['object_type']
object_id = request.form['object_id']
try:
object_id = int(object_id)
except:
return jsonify({"data": "error"})
# TODO: Check that these objects really exist before trying to remove them
if object_type == 'comment':
comment = fpage.comment.models.Comment.query.filter_by(id=object_id).first()
comment.delete()
return jsonify({"data": "removed"})
elif object_type == 'submission':
submission = fpage.submission.models.Submission.query.filter_by(id=object_id).first()
submission.delete()
return jsonify({"data": "removed"})
elif object_type == 'user':
user = fpage.user.models.User.query.filter_by(id=object_id).first()
user.delete()
return jsonify({"data": "removed"})
else:
return jsonify({"data": "unknown"})
| Nikola-K/fpage | fpage/admin/views.py | Python | apache-2.0 | 2,128 |
#!/usr/bin/env python
"""
Sandbox management script
"""
import os
from django.core import management
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.demo")
management.execute_from_command_line()
| sveetch/django-icomoon | sandbox/manage.py | Python | mit | 243 |
# -*- coding: utf-8 -*-
# Copyright (c) 2009 - 2014 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing the history manager.
"""
from __future__ import unicode_literals
import os
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QFileInfo, QDateTime, QDate, \
QTime, QUrl, QTimer, QFile, QIODevice, QByteArray, QDataStream, \
QTemporaryFile
from PyQt5.QtWebKit import QWebHistoryInterface, QWebSettings
from E5Gui import E5MessageBox
from Utilities.AutoSaver import AutoSaver
import Utilities
import Preferences
HISTORY_VERSION = 42
class HistoryEntry(object):
"""
Class implementing a history entry.
"""
def __init__(self, url=None, dateTime=None, title=None):
"""
Constructor
@param url URL of the history entry (string)
@param dateTime date and time this entry was created (QDateTime)
@param title title string for the history entry (string)
"""
self.url = url and url or ""
self.dateTime = dateTime and dateTime or QDateTime()
self.title = title and title or ""
def __eq__(self, other):
"""
Special method determining equality.
@param other reference to the history entry to compare against
(HistoryEntry)
@return flag indicating equality (boolean)
"""
return other.title == self.title and \
other.url == self.url and \
other.dateTime == self.dateTime
def __lt__(self, other):
"""
Special method determining less relation.
Note: History is sorted in reverse order by date and time
@param other reference to the history entry to compare against
(HistoryEntry)
@return flag indicating less (boolean)
"""
return self.dateTime > other.dateTime
def userTitle(self):
"""
Public method to get the title of the history entry.
@return title of the entry (string)
"""
if not self.title:
page = QFileInfo(QUrl(self.url).path()).fileName()
if page:
return page
return self.url
return self.title
class HistoryManager(QWebHistoryInterface):
"""
Class implementing the history manager.
@signal historyCleared() emitted after the history has been cleared
@signal historyReset() emitted after the history has been reset
@signal entryAdded(HistoryEntry) emitted after a history entry has been
added
@signal entryRemoved(HistoryEntry) emitted after a history entry has been
removed
@signal entryUpdated(int) emitted after a history entry has been updated
@signal historySaved() emitted after the history was saved
"""
historyCleared = pyqtSignal()
historyReset = pyqtSignal()
entryAdded = pyqtSignal(HistoryEntry)
entryRemoved = pyqtSignal(HistoryEntry)
entryUpdated = pyqtSignal(int)
historySaved = pyqtSignal()
def __init__(self, parent=None):
"""
Constructor
@param parent reference to the parent object (QObject)
"""
super(HistoryManager, self).__init__(parent)
self.__saveTimer = AutoSaver(self, self.save)
self.__daysToExpire = Preferences.getHelp("HistoryLimit")
self.__history = []
self.__lastSavedUrl = ""
self.__expiredTimer = QTimer(self)
self.__expiredTimer.setSingleShot(True)
self.__expiredTimer.timeout.connect(self.__checkForExpired)
self.__frequencyTimer = QTimer(self)
self.__frequencyTimer.setSingleShot(True)
self.__frequencyTimer.timeout.connect(self.__refreshFrequencies)
self.entryAdded.connect(self.__saveTimer.changeOccurred)
self.entryRemoved.connect(self.__saveTimer.changeOccurred)
self.__load()
from .HistoryModel import HistoryModel
from .HistoryFilterModel import HistoryFilterModel
from .HistoryTreeModel import HistoryTreeModel
self.__historyModel = HistoryModel(self, self)
self.__historyFilterModel = \
HistoryFilterModel(self.__historyModel, self)
self.__historyTreeModel = \
HistoryTreeModel(self.__historyFilterModel, self)
super(HistoryManager, self).setDefaultInterface(self)
self.__startFrequencyTimer()
def close(self):
"""
Public method to close the history manager.
"""
# remove history items on application exit
if self.__daysToExpire == -2:
self.clear()
self.__saveTimer.saveIfNeccessary()
def history(self):
"""
Public method to return the history.
@return reference to the list of history entries (list of HistoryEntry)
"""
return self.__history[:]
def setHistory(self, history, loadedAndSorted=False):
"""
Public method to set a new history.
@param history reference to the list of history entries to be set
(list of HistoryEntry)
@param loadedAndSorted flag indicating that the list is sorted
(boolean)
"""
self.__history = history[:]
if not loadedAndSorted:
self.__history.sort()
self.__checkForExpired()
if loadedAndSorted:
try:
self.__lastSavedUrl = self.__history[0].url
except IndexError:
self.__lastSavedUrl = ""
else:
self.__lastSavedUrl = ""
self.__saveTimer.changeOccurred()
self.historyReset.emit()
def historyContains(self, url):
"""
Public method to check the history for an entry.
@param url URL to check for (string)
@return flag indicating success (boolean)
"""
return self.__historyFilterModel.historyContains(url)
def _addHistoryEntry(self, itm):
"""
Protected method to add a history item.
@param itm reference to the history item to add (HistoryEntry)
"""
globalSettings = QWebSettings.globalSettings()
if globalSettings.testAttribute(QWebSettings.PrivateBrowsingEnabled):
return
self.__history.insert(0, itm)
self.entryAdded.emit(itm)
if len(self.__history) == 1:
self.__checkForExpired()
def _removeHistoryEntry(self, itm):
"""
Protected method to remove a history item.
@param itm reference to the history item to remove (HistoryEntry)
"""
self.__lastSavedUrl = ""
self.__history.remove(itm)
self.entryRemoved.emit(itm)
def addHistoryEntry(self, url):
"""
Public method to add a history entry.
@param url URL to be added (string)
"""
cleanurl = QUrl(url)
if cleanurl.scheme() not in ["eric", "about"]:
if cleanurl.password():
# don't save the password in the history
cleanurl.setPassword("")
if cleanurl.host():
cleanurl.setHost(cleanurl.host().lower())
itm = HistoryEntry(cleanurl.toString(),
QDateTime.currentDateTime())
self._addHistoryEntry(itm)
def updateHistoryEntry(self, url, title):
"""
Public method to update a history entry.
@param url URL of the entry to update (string)
@param title title of the entry to update (string)
"""
cleanurl = QUrl(url)
if cleanurl.scheme() not in ["eric", "about"]:
for index in range(len(self.__history)):
if url == self.__history[index].url:
self.__history[index].title = title
self.__saveTimer.changeOccurred()
if not self.__lastSavedUrl:
self.__lastSavedUrl = self.__history[index].url
self.entryUpdated.emit(index)
break
def removeHistoryEntry(self, url, title=""):
"""
Public method to remove a history entry.
@param url URL of the entry to remove (QUrl)
@param title title of the entry to remove (string)
"""
for index in range(len(self.__history)):
if url == QUrl(self.__history[index].url) and \
(not title or title == self.__history[index].title):
self._removeHistoryEntry(self.__history[index])
break
def historyModel(self):
"""
Public method to get a reference to the history model.
@return reference to the history model (HistoryModel)
"""
return self.__historyModel
def historyFilterModel(self):
"""
Public method to get a reference to the history filter model.
@return reference to the history filter model (HistoryFilterModel)
"""
return self.__historyFilterModel
def historyTreeModel(self):
"""
Public method to get a reference to the history tree model.
@return reference to the history tree model (HistoryTreeModel)
"""
return self.__historyTreeModel
def __checkForExpired(self):
"""
Private slot to check entries for expiration.
"""
if self.__daysToExpire < 0 or len(self.__history) == 0:
return
now = QDateTime.currentDateTime()
nextTimeout = 0
while self.__history:
checkForExpired = QDateTime(self.__history[-1].dateTime)
checkForExpired.setDate(
checkForExpired.date().addDays(self.__daysToExpire))
if now.daysTo(checkForExpired) > 7:
nextTimeout = 7 * 86400
else:
nextTimeout = now.secsTo(checkForExpired)
if nextTimeout > 0:
break
itm = self.__history.pop(-1)
self.__lastSavedUrl = ""
self.entryRemoved.emit(itm)
self.__saveTimer.saveIfNeccessary()
if nextTimeout > 0:
self.__expiredTimer.start(nextTimeout * 1000)
def daysToExpire(self):
"""
Public method to get the days for entry expiration.
@return days for entry expiration (integer)
"""
return self.__daysToExpire
def setDaysToExpire(self, limit):
"""
Public method to set the days for entry expiration.
@param limit days for entry expiration (integer)
"""
if self.__daysToExpire == limit:
return
self.__daysToExpire = limit
self.__checkForExpired()
self.__saveTimer.changeOccurred()
def preferencesChanged(self):
"""
Public method to indicate a change of preferences.
"""
self.setDaysToExpire(Preferences.getHelp("HistoryLimit"))
@pyqtSlot()
def clear(self, period=0):
"""
Public slot to clear the complete history.
@param period history period in milliseconds to be cleared (integer)
"""
if period == 0:
self.__history = []
self.historyReset.emit()
else:
breakMS = QDateTime.currentMSecsSinceEpoch() - period
while self.__history and \
(QDateTime(self.__history[0].dateTime).toMSecsSinceEpoch() >
breakMS):
itm = self.__history.pop(0)
self.entryRemoved.emit(itm)
self.__lastSavedUrl = ""
self.__saveTimer.changeOccurred()
self.__saveTimer.saveIfNeccessary()
self.historyCleared.emit()
def getFileName(self):
"""
Public method to get the file name of the history file.
@return name of the history file (string)
"""
return os.path.join(Utilities.getConfigDir(), "browser", "history")
def reload(self):
"""
Public method to reload the history.
"""
self.__load()
def __load(self):
"""
Private method to load the saved history entries from disk.
"""
historyFile = QFile(self.getFileName())
if not historyFile.exists():
return
if not historyFile.open(QIODevice.ReadOnly):
E5MessageBox.warning(
None,
self.tr("Loading History"),
self.tr(
"""<p>Unable to open history file <b>{0}</b>.<br/>"""
"""Reason: {1}</p>""")
.format(historyFile.fileName, historyFile.errorString()))
return
history = []
# double check, that the history file is sorted as it is read
needToSort = False
lastInsertedItem = HistoryEntry()
data = QByteArray(historyFile.readAll())
stream = QDataStream(data, QIODevice.ReadOnly)
stream.setVersion(QDataStream.Qt_4_6)
while not stream.atEnd():
ver = stream.readUInt32()
if ver != HISTORY_VERSION:
continue
itm = HistoryEntry()
itm.url = Utilities.readStringFromStream(stream)
stream >> itm.dateTime
itm.title = Utilities.readStringFromStream(stream)
if not itm.dateTime.isValid():
continue
if itm == lastInsertedItem:
if not lastInsertedItem.title and len(history) > 0:
history[0].title = itm.title
continue
if not needToSort and history and lastInsertedItem < itm:
needToSort = True
history.insert(0, itm)
lastInsertedItem = itm
historyFile.close()
if needToSort:
history.sort()
self.setHistory(history, True)
# if the history had to be sorted, rewrite the history sorted
if needToSort:
self.__lastSavedUrl = ""
self.__saveTimer.changeOccurred()
def save(self):
"""
Public slot to save the history entries to disk.
"""
historyFile = QFile(self.getFileName())
if not historyFile.exists():
self.__lastSavedUrl = ""
saveAll = self.__lastSavedUrl == ""
first = len(self.__history) - 1
if not saveAll:
# find the first one to save
for index in range(len(self.__history)):
if self.__history[index].url == self.__lastSavedUrl:
first = index - 1
break
if first == len(self.__history) - 1:
saveAll = True
if saveAll:
# use a temporary file when saving everything
f = QTemporaryFile()
f.setAutoRemove(False)
opened = f.open()
else:
f = historyFile
opened = f.open(QIODevice.Append)
if not opened:
E5MessageBox.warning(
None,
self.tr("Saving History"),
self.tr(
"""<p>Unable to open history file <b>{0}</b>.<br/>"""
"""Reason: {1}</p>""")
.format(f.fileName(), f.errorString()))
return
for index in range(first, -1, -1):
data = QByteArray()
stream = QDataStream(data, QIODevice.WriteOnly)
stream.setVersion(QDataStream.Qt_4_6)
itm = self.__history[index]
stream.writeUInt32(HISTORY_VERSION)
stream.writeString(itm.url.encode())
stream << itm.dateTime
stream.writeString(itm.title.encode('utf-8'))
f.write(data)
f.close()
if saveAll:
if historyFile.exists() and not historyFile.remove():
E5MessageBox.warning(
None,
self.tr("Saving History"),
self.tr(
"""<p>Error removing old history file <b>{0}</b>."""
"""<br/>Reason: {1}</p>""")
.format(historyFile.fileName(),
historyFile.errorString()))
if not f.copy(historyFile.fileName()):
E5MessageBox.warning(
None,
self.tr("Saving History"),
self.tr(
"""<p>Error moving new history file over old one """
"""(<b>{0}</b>).<br/>Reason: {1}</p>""")
.format(historyFile.fileName(), f.errorString()))
self.historySaved.emit()
try:
self.__lastSavedUrl = self.__history[0].url
except IndexError:
self.__lastSavedUrl = ""
def __refreshFrequencies(self):
"""
Private slot to recalculate the refresh frequencies.
"""
self.__historyFilterModel.recalculateFrequencies()
self.__startFrequencyTimer()
def __startFrequencyTimer(self):
"""
Private method to start the timer to recalculate the frequencies.
"""
tomorrow = QDateTime(QDate.currentDate().addDays(1), QTime(3, 0))
self.__frequencyTimer.start(
QDateTime.currentDateTime().secsTo(tomorrow) * 1000)
| davy39/eric | Helpviewer/History/HistoryManager.py | Python | gpl-3.0 | 17,735 |
# -*- coding: utf-8 -*-
""" OneLogin_Saml2_Auth class
Copyright (c) 2014, OneLogin, Inc.
All rights reserved.
Main class of OneLogin's Python Toolkit.
Initializes the SP SAML instance
"""
from base64 import b64encode
from urllib import quote_plus
import dm.xmlsec.binding as xmlsec
from onelogin.saml2.settings import OneLogin_Saml2_Settings
from onelogin.saml2.response import OneLogin_Saml2_Response
from onelogin.saml2.errors import OneLogin_Saml2_Error
from onelogin.saml2.logout_response import OneLogin_Saml2_Logout_Response
from onelogin.saml2.constants import OneLogin_Saml2_Constants
from onelogin.saml2.utils import OneLogin_Saml2_Utils
from onelogin.saml2.logout_request import OneLogin_Saml2_Logout_Request
from onelogin.saml2.authn_request import OneLogin_Saml2_Authn_Request
class OneLogin_Saml2_Auth(object):
"""
This class implements the SP SAML instance.
Defines the methods that you can invoke in your application in
order to add SAML support (initiates sso, initiates slo, processes a
SAML Response, a Logout Request or a Logout Response).
"""
def __init__(self, request_data, old_settings=None, custom_base_path=None):
"""
Initializes the SP SAML instance.
:param request_data: Request Data
:type request_data: dict
:param settings: Optional. SAML Toolkit Settings
:type settings: dict|object
:param custom_base_path: Optional. Path where are stored the settings file and the cert folder
:type custom_base_path: string
"""
self.__request_data = request_data
self.__settings = OneLogin_Saml2_Settings(old_settings, custom_base_path)
self.__attributes = []
self.__nameid = None
self.__session_index = None
self.__session_expiration = None
self.__authenticated = False
self.__errors = []
self.__error_reason = None
self.__last_request_id = None
def get_settings(self):
"""
Returns the settings info
:return: Setting info
:rtype: OneLogin_Saml2_Setting object
"""
return self.__settings
def set_strict(self, value):
"""
Set the strict mode active/disable
:param value:
:type value: bool
"""
assert isinstance(value, bool)
self.__settings.set_strict(value)
def process_response(self, request_id=None):
"""
Process the SAML Response sent by the IdP.
:param request_id: Is an optional argumen. Is the ID of the AuthNRequest sent by this SP to the IdP.
:type request_id: string
:raises: OneLogin_Saml2_Error.SAML_RESPONSE_NOT_FOUND, when a POST with a SAMLResponse is not found
"""
self.__errors = []
if 'post_data' in self.__request_data and 'SAMLResponse' in self.__request_data['post_data']:
# AuthnResponse -- HTTP_POST Binding
response = OneLogin_Saml2_Response(self.__settings, self.__request_data['post_data']['SAMLResponse'])
if response.is_valid(self.__request_data, request_id):
self.__attributes = response.get_attributes()
self.__nameid = response.get_nameid()
self.__session_index = response.get_session_index()
self.__session_expiration = response.get_session_not_on_or_after()
self.__authenticated = True
else:
self.__errors.append('invalid_response')
self.__error_reason = response.get_error()
else:
self.__errors.append('invalid_binding')
raise OneLogin_Saml2_Error(
'SAML Response not found, Only supported HTTP_POST Binding',
OneLogin_Saml2_Error.SAML_RESPONSE_NOT_FOUND
)
def process_slo(self, keep_local_session=False, request_id=None, delete_session_cb=None):
"""
Process the SAML Logout Response / Logout Request sent by the IdP.
:param keep_local_session: When false will destroy the local session, otherwise will destroy it
:type keep_local_session: bool
:param request_id: The ID of the LogoutRequest sent by this SP to the IdP
:type request_id: string
:returns: Redirection url
"""
self.__errors = []
if 'get_data' in self.__request_data and 'SAMLResponse' in self.__request_data['get_data']:
logout_response = OneLogin_Saml2_Logout_Response(self.__settings, self.__request_data['get_data']['SAMLResponse'])
if not logout_response.is_valid(self.__request_data, request_id):
self.__errors.append('invalid_logout_response')
self.__error_reason = logout_response.get_error()
elif logout_response.get_status() != OneLogin_Saml2_Constants.STATUS_SUCCESS:
self.__errors.append('logout_not_success')
elif not keep_local_session:
OneLogin_Saml2_Utils.delete_local_session(delete_session_cb)
elif 'get_data' in self.__request_data and 'SAMLRequest' in self.__request_data['get_data']:
logout_request = OneLogin_Saml2_Logout_Request(self.__settings, self.__request_data['get_data']['SAMLRequest'])
if not logout_request.is_valid(self.__request_data):
self.__errors.append('invalid_logout_request')
self.__error_reason = logout_request.get_error()
else:
if not keep_local_session:
OneLogin_Saml2_Utils.delete_local_session(delete_session_cb)
in_response_to = logout_request.id
response_builder = OneLogin_Saml2_Logout_Response(self.__settings)
response_builder.build(in_response_to)
logout_response = response_builder.get_response()
parameters = {'SAMLResponse': logout_response}
if 'RelayState' in self.__request_data['get_data']:
parameters['RelayState'] = self.__request_data['get_data']['RelayState']
else:
parameters['RelayState'] = OneLogin_Saml2_Utils.get_self_url_no_query(self.__request_data)
security = self.__settings.get_security_data()
if 'logoutResponseSigned' in security and security['logoutResponseSigned']:
parameters['SigAlg'] = security['signatureAlgorithm']
parameters['Signature'] = self.build_response_signature(logout_response, parameters.get('RelayState', None), security['signatureAlgorithm'])
return self.redirect_to(self.get_slo_url(), parameters)
else:
self.__errors.append('invalid_binding')
raise OneLogin_Saml2_Error(
'SAML LogoutRequest/LogoutResponse not found. Only supported HTTP_REDIRECT Binding',
OneLogin_Saml2_Error.SAML_LOGOUTMESSAGE_NOT_FOUND
)
def redirect_to(self, url=None, parameters={}):
"""
Redirects the user to the url past by parameter or to the url that we defined in our SSO Request.
:param url: The target URL to redirect the user
:type url: string
:param parameters: Extra parameters to be passed as part of the url
:type parameters: dict
:returns: Redirection url
"""
if url is None and 'RelayState' in self.__request_data['get_data']:
url = self.__request_data['get_data']['RelayState']
return OneLogin_Saml2_Utils.redirect(url, parameters, request_data=self.__request_data)
def is_authenticated(self):
"""
Checks if the user is authenticated or not.
:returns: True if is authenticated, False if not
:rtype: bool
"""
return self.__authenticated
def get_attributes(self):
"""
Returns the set of SAML attributes.
:returns: SAML attributes
:rtype: dict
"""
return self.__attributes
def get_nameid(self):
"""
Returns the nameID.
:returns: NameID
:rtype: string
"""
return self.__nameid
def get_session_index(self):
"""
Returns the SessionIndex from the AuthnStatement.
:returns: The SessionIndex of the assertion
:rtype: string
"""
return self.__session_index
def get_session_expiration(self):
"""
Returns the SessionNotOnOrAfter from the AuthnStatement.
:returns: The SessionNotOnOrAfter of the assertion
:rtype: DateTime|null
"""
return self.__session_expiration
def get_errors(self):
"""
Returns a list with code errors if something went wrong
:returns: List of errors
:rtype: list
"""
return self.__errors
def get_last_error_reason(self):
"""
Returns the reason for the last error
:returns: Reason of the last error
:rtype: None | string
"""
return self.__error_reason
def get_attribute(self, name):
"""
Returns the requested SAML attribute.
:param name: Name of the attribute
:type name: string
:returns: Attribute value if exists or []
:rtype: string
"""
assert isinstance(name, basestring)
value = None
if self.__attributes and name in self.__attributes.keys():
value = self.__attributes[name]
return value
def get_last_request_id(self):
"""
:returns: The ID of the last Request SAML message generated.
:rtype: string
"""
return self.__last_request_id
def login(self, return_to=None, force_authn=False, is_passive=False):
"""
Initiates the SSO process.
:param return_to: Optional argument. The target URL the user should be redirected to after login.
:type return_to: string
:param force_authn: Optional argument. When true the AuthNReuqest will set the ForceAuthn='true'.
:type force_authn: string
:param is_passive: Optional argument. When true the AuthNReuqest will set the Ispassive='true'.
:type is_passive: string
:returns: Redirection url
"""
authn_request = OneLogin_Saml2_Authn_Request(self.__settings, force_authn, is_passive)
self.__last_request_id = authn_request.get_id()
saml_request = authn_request.get_request()
parameters = {'SAMLRequest': saml_request}
if return_to is not None:
parameters['RelayState'] = return_to
else:
parameters['RelayState'] = OneLogin_Saml2_Utils.get_self_url_no_query(self.__request_data)
security = self.__settings.get_security_data()
if security.get('authnRequestsSigned', False):
parameters['SigAlg'] = security['signatureAlgorithm']
parameters['Signature'] = self.build_request_signature(saml_request, parameters['RelayState'], security['signatureAlgorithm'])
return self.redirect_to(self.get_sso_url(), parameters)
def logout(self, return_to=None, name_id=None, session_index=None, nq=None):
"""
Initiates the SLO process.
:param return_to: Optional argument. The target URL the user should be redirected to after logout.
:type return_to: string
:param name_id: The NameID that will be set in the LogoutRequest.
:type name_id: string
:param session_index: SessionIndex that identifies the session of the user.
:type session_index: string
:param nq: IDP Name Qualifier
:type: string
:returns: Redirection url
"""
slo_url = self.get_slo_url()
if slo_url is None:
raise OneLogin_Saml2_Error(
'The IdP does not support Single Log Out',
OneLogin_Saml2_Error.SAML_SINGLE_LOGOUT_NOT_SUPPORTED
)
if name_id is None and self.__nameid is not None:
name_id = self.__nameid
logout_request = OneLogin_Saml2_Logout_Request(
self.__settings,
name_id=name_id,
session_index=session_index,
nq=nq
)
self.__last_request_id = logout_request.id
saml_request = logout_request.get_request()
parameters = {'SAMLRequest': logout_request.get_request()}
if return_to is not None:
parameters['RelayState'] = return_to
else:
parameters['RelayState'] = OneLogin_Saml2_Utils.get_self_url_no_query(self.__request_data)
security = self.__settings.get_security_data()
if security.get('logoutRequestSigned', False):
parameters['SigAlg'] = security['signatureAlgorithm']
parameters['Signature'] = self.build_request_signature(saml_request, parameters['RelayState'], security['signatureAlgorithm'])
return self.redirect_to(slo_url, parameters)
def get_sso_url(self):
"""
Gets the SSO url.
:returns: An URL, the SSO endpoint of the IdP
:rtype: string
"""
idp_data = self.__settings.get_idp_data()
return idp_data['singleSignOnService']['url']
def get_slo_url(self):
"""
Gets the SLO url.
:returns: An URL, the SLO endpoint of the IdP
:rtype: string
"""
url = None
idp_data = self.__settings.get_idp_data()
if 'singleLogoutService' in idp_data.keys() and 'url' in idp_data['singleLogoutService']:
url = idp_data['singleLogoutService']['url']
return url
def build_request_signature(self, saml_request, relay_state, sign_algorithm=OneLogin_Saml2_Constants.RSA_SHA1):
"""
Builds the Signature of the SAML Request.
:param saml_request: The SAML Request
:type saml_request: string
:param relay_state: The target URL the user should be redirected to
:type relay_state: string
:param sign_algorithm: Signature algorithm method
:type sign_algorithm: string
"""
return self.__build_signature(saml_request, relay_state, 'SAMLRequest', sign_algorithm)
def build_response_signature(self, saml_response, relay_state, sign_algorithm=OneLogin_Saml2_Constants.RSA_SHA1):
"""
Builds the Signature of the SAML Response.
:param saml_request: The SAML Response
:type saml_request: string
:param relay_state: The target URL the user should be redirected to
:type relay_state: string
:param sign_algorithm: Signature algorithm method
:type sign_algorithm: string
"""
return self.__build_signature(saml_response, relay_state, 'SAMLResponse', sign_algorithm)
def __build_signature(self, saml_data, relay_state, saml_type, sign_algorithm=OneLogin_Saml2_Constants.RSA_SHA1):
"""
Builds the Signature
:param saml_data: The SAML Data
:type saml_data: string
:param relay_state: The target URL the user should be redirected to
:type relay_state: string
:param saml_type: The target URL the user should be redirected to
:type saml_type: string SAMLRequest | SAMLResponse
:param sign_algorithm: Signature algorithm method
:type sign_algorithm: string
"""
assert saml_type in ['SAMLRequest', 'SAMLResponse']
# Load the key into the xmlsec context
key = self.__settings.get_sp_key()
if not key:
raise OneLogin_Saml2_Error(
"Trying to sign the %s but can't load the SP private key" % saml_type,
OneLogin_Saml2_Error.SP_CERTS_NOT_FOUND
)
xmlsec.initialize()
dsig_ctx = xmlsec.DSigCtx()
dsig_ctx.signKey = xmlsec.Key.loadMemory(key, xmlsec.KeyDataFormatPem, None)
saml_data_str = '%s=%s' % (saml_type, quote_plus(saml_data))
relay_state_str = 'RelayState=%s' % quote_plus(relay_state)
alg_str = 'SigAlg=%s' % quote_plus(sign_algorithm)
sign_data = [saml_data_str, relay_state_str, alg_str]
msg = '&'.join(sign_data)
# Sign the metadata with our private key.
sign_algorithm_transform_map = {
OneLogin_Saml2_Constants.DSA_SHA1: xmlsec.TransformDsaSha1,
OneLogin_Saml2_Constants.RSA_SHA1: xmlsec.TransformRsaSha1,
OneLogin_Saml2_Constants.RSA_SHA256: xmlsec.TransformRsaSha256,
OneLogin_Saml2_Constants.RSA_SHA384: xmlsec.TransformRsaSha384,
OneLogin_Saml2_Constants.RSA_SHA512: xmlsec.TransformRsaSha512
}
sign_algorithm_transform = sign_algorithm_transform_map.get(sign_algorithm, xmlsec.TransformRsaSha1)
signature = dsig_ctx.signBinary(str(msg), sign_algorithm_transform)
return b64encode(signature)
| sandeep048/python-saml | src/onelogin/saml2/auth.py | Python | bsd-3-clause | 16,929 |
# -*- coding: utf-8 -*-
# Copyright (C) 2010 Eduardo Robles Elvira <edulix AT gmail DOT com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django.utils.translation import ugettext_lazy as _
from tinymce.widgets import TinyMCE
from models import Profile
from tbmessages.models import Message
from timebank.utils import (FormCharField, FormEmailField, FormDateField,
FormCaptchaField)
from serv.forms import CustomCharField
class RegisterForm(UserCreationForm):
birth_date = FormDateField(label=_("Birth date"),
input_formats=("%d/%m/%Y",))
first_name = FormCharField(label=_("Name"), required=True, max_length=30)
last_name = FormCharField(label=_("Last name"), required=True, max_length=30)
email = FormEmailField(label=_("Email address"), required=True)
address = FormCharField(label=_("Address"), required=True,
max_length=100, help_text=_("Example: Avda. Molina, 12, Sevilla"))
description = FormCharField(label=_("Personal description"), required=True,
max_length=300, widget=forms.Textarea())
land_line = FormCharField(label=_("Land line"), max_length=20,
required=False, help_text="Example: 954 123 111")
mobile_tlf = FormCharField(label=_("Mobile Telephone"), max_length=20,
required=False, help_text="Example: 651 333 111")
captcha = FormCaptchaField()
class Meta:
model = Profile
fields = ('username', 'first_name', 'last_name', 'email', 'address', 'birth_date', 'description', 'land_line', 'mobile_tlf')
class EditProfileForm(forms.ModelForm):
photo = forms.ImageField(label=_("Avatar"), required=False)
birth_date = FormDateField(label=_("Birth date"),
input_formats=("%d/%m/%Y",))
first_name = FormCharField(label=_("Name"), required=True,
max_length=30)
last_name = FormCharField(label=_("Last name"), required=True, max_length=30)
email = FormEmailField(label=_("Email address"), required=True)
address = FormCharField(label=_("Address"), required=True,
max_length=100, help_text=_("Example: Avda. Molina, 12, Sevilla"))
description = FormCharField(label=_("Personal description"), required=True,
max_length=300, widget=forms.Textarea())
password1 = forms.CharField(label=_("Current password"),
widget=forms.PasswordInput, required=True,
help_text=_("Enter your current password to check your identity"))
land_line = FormCharField(label=_("Land line"), max_length=20,
required=False, help_text="Example: 954 123 111")
mobile_tlf = FormCharField(label=_("Mobile telephone"), max_length=20,
required=False, help_text="Example: 651 333 111")
def __init__(self, request, *args, **kwargs):
super(EditProfileForm, self).__init__(*args, **kwargs)
self.request = request
def clean_password1(self):
password1 = self.cleaned_data["password1"]
if not self.request.user.check_password(password1):
raise forms.ValidationError(_("The two password fields didn't match."))
return password1
class Meta:
model = Profile
hidden = ()
fields = ('photo', 'first_name', 'last_name', 'email', 'address', 'birth_date',
'description', 'land_line', 'mobile_tlf', 'email_updates')
class RemoveForm(forms.Form):
reason = FormCharField(label=_("Reason"), required=True,
min_length=10, max_length=300, widget=forms.Textarea(),
help_text=_("Have we done something wrong? Please tell us why you want"
"rmeove your user."))
class PublicMessageForm(forms.ModelForm):
class Meta:
model = Message
fields = ("body",)
class FindPeopleForm(forms.Form):
USER_CHOICES = (
('0', _('---------')),
('1', _(u'less than 24 hours ago')),
('2', _(u'less than one week ago')),
('3', _(u'less than one month ago')),
('4', _(u'less than 3 months ago')),
('5', _(u'less than 6 months ago')),
('6', _(u'less than one year ago')),
)
user_status = CustomCharField(label=_("User connected"),
widget=forms.Select(choices=USER_CHOICES), required=False)
username = forms.CharField(label=_("Username"), required=False)
def as_url_args(self):
import urllib
return urllib.urlencode(self.data)
class FindPeople4AdminsForm(FindPeopleForm):
USER_CHOICES = FindPeopleForm.USER_CHOICES + (
('7', _(u'more than a week ago')),
('8', _(u'more than one month ago')),
('9', _(u'more than 3 months ago')),
('10', _(u'more than 6 months ago')),
('11', _(u'more than one year')),
)
user_status = CustomCharField(label=_("User connected"),
widget=forms.Select(choices=USER_CHOICES), required=False)
without_services = forms.BooleanField(label=_("Without services"), required=False)
class SendEmailToAllForm(forms.Form):
subject = forms.CharField(label=_(u'Subject'), required=True)
message = forms.CharField(label=_(u'Message body'), required=True,
widget=forms.Textarea)
| wadobo/timebank | user/forms.py | Python | agpl-3.0 | 5,777 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-08-10 10:32
from __future__ import unicode_literals
import _socket
from django.db import migrations, models
import edc_base.model_fields.userfield
class Migration(migrations.Migration):
dependencies = [
('edc_metadata', '0006_auto_20170209_0924'),
]
operations = [
migrations.AddField(
model_name='crfmetadata',
name='device_created',
field=models.CharField(blank=True, max_length=10),
),
migrations.AddField(
model_name='crfmetadata',
name='device_modified',
field=models.CharField(blank=True, max_length=10),
),
migrations.AddField(
model_name='requisitionmetadata',
name='device_created',
field=models.CharField(blank=True, max_length=10),
),
migrations.AddField(
model_name='requisitionmetadata',
name='device_modified',
field=models.CharField(blank=True, max_length=10),
),
migrations.AlterField(
model_name='crfmetadata',
name='hostname_created',
field=models.CharField(blank=True, default=_socket.gethostname, help_text='System field. (modified on create only)', max_length=60),
),
migrations.AlterField(
model_name='crfmetadata',
name='user_created',
field=edc_base.model_fields.userfield.UserField(blank=True, help_text='Updated by admin.save_model', max_length=50, verbose_name='user created'),
),
migrations.AlterField(
model_name='crfmetadata',
name='user_modified',
field=edc_base.model_fields.userfield.UserField(blank=True, help_text='Updated by admin.save_model', max_length=50, verbose_name='user modified'),
),
migrations.AlterField(
model_name='requisitionmetadata',
name='hostname_created',
field=models.CharField(blank=True, default=_socket.gethostname, help_text='System field. (modified on create only)', max_length=60),
),
migrations.AlterField(
model_name='requisitionmetadata',
name='user_created',
field=edc_base.model_fields.userfield.UserField(blank=True, help_text='Updated by admin.save_model', max_length=50, verbose_name='user created'),
),
migrations.AlterField(
model_name='requisitionmetadata',
name='user_modified',
field=edc_base.model_fields.userfield.UserField(blank=True, help_text='Updated by admin.save_model', max_length=50, verbose_name='user modified'),
),
]
| botswana-harvard/edc-entry | edc_metadata/migrations/0007_auto_20170810_1032.py | Python | gpl-2.0 | 2,713 |
#!/usr/bin/env python3
# Forked from https://gist.github.com/zPrototype/b211ae91e2b082420c350c28b6674170
import re
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--target", "-t", action="store", help="Enter the target address", required=True)
parser.add_argument("--dest", "-d", action="store", help="Enter the address where you want to redirect to",
required=True)
parser.add_argument("--output", "-o", action="store", help="Enter output file name")
args = parser.parse_args()
payloads = []
# Remove protocol from url
junk = re.compile(r"https?://")
target = junk.sub("", args.target)
dest = junk.sub("", args.dest)
with open("payloads.txt", "r") as handle:
templates = handle.readlines()
for payload in templates:
payload = payload.rstrip()
payload = re.sub("TARGET", target, payload)
payload = re.sub("DEST", dest, payload)
print(payload)
payloads.append(payload)
if args.output:
with open(args.output, "w")as handle:
[handle.write(f"{x.rstrip()}\n") for x in payloads] | bl4de/security-tools | redir_gen/redirgen.py | Python | mit | 1,060 |
#!/usr/bin/env python3
#
# Copyright 2017 WebAssembly Community Group participants
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import sys
import shared
import support
def files_with_extensions(path, extensions):
for file in sorted(os.listdir(path)):
ext = os.path.splitext(file)[1]
if ext in extensions:
yield file, ext
def generate_wat_files(llvm_bin, emscripten_root):
print('\n[ building wat files from C sources... ]\n')
lld_path = os.path.join(shared.options.binaryen_test, 'lld')
for src_file, ext in files_with_extensions(lld_path, ['.c', '.cpp', '.s']):
print('..', src_file)
obj_file = src_file.replace(ext, '.o')
src_path = os.path.join(lld_path, src_file)
obj_path = os.path.join(lld_path, obj_file)
wasm_file = src_file.replace(ext, '.wasm')
wat_file = src_file.replace(ext, '.wat')
obj_path = os.path.join(lld_path, obj_file)
wasm_path = os.path.join(lld_path, wasm_file)
wat_path = os.path.join(lld_path, wat_file)
is_shared = 'shared' in src_file
is_64 = '64' in src_file
compile_cmd = [
os.path.join(llvm_bin, 'clang'), src_path, '-o', obj_path,
'-mllvm', '-enable-emscripten-sjlj',
'-c',
'-nostdinc',
'-Xclang', '-nobuiltininc',
'-Xclang', '-nostdsysteminc',
'-Xclang', '-I%s/system/include' % emscripten_root,
'-O1',
]
link_cmd = [
os.path.join(llvm_bin, 'wasm-ld'), '-flavor', 'wasm',
'-z', '-stack-size=1048576',
obj_path, '-o', wasm_path,
'--allow-undefined',
'--export', '__wasm_call_ctors',
'--export', '__start_em_asm',
'--export', '__stop_em_asm',
'--global-base=568',
]
# We had a regression where this test only worked if debug names
# were included.
if 'longjmp' in src_file:
link_cmd.append('--strip-debug')
if is_shared:
compile_cmd.append('-fPIC')
compile_cmd.append('-fvisibility=default')
link_cmd.append('-shared')
link_cmd.append('--experimental-pic')
else:
link_cmd.append('--entry=main')
if is_64:
compile_cmd.append('--target=wasm64-emscripten')
link_cmd.append('-mwasm64')
else:
compile_cmd.append('--target=wasm32-emscripten')
try:
support.run_command(compile_cmd)
support.run_command(link_cmd)
support.run_command(shared.WASM_DIS + [wasm_path, '-o', wat_path])
finally:
# Don't need the .o or .wasm files, don't leave them around
shared.delete_from_orbit(obj_path)
shared.delete_from_orbit(wasm_path)
if __name__ == '__main__':
if len(shared.options.positional_args) != 2:
print('Usage: generate_lld_tests.py [llvm/bin/dir] [path/to/emscripten]')
sys.exit(1)
generate_wat_files(*shared.options.positional_args)
| WebAssembly/binaryen | scripts/test/generate_lld_tests.py | Python | apache-2.0 | 3,651 |
# tableizer-query.py
# Copyright (C) 2009-2013 PalominoDB, Inc.
#
# You may contact the maintainers at eng@palominodb.com.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import logging
import re
import sys
import traceback
from datetime import datetime, timedelta
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand
from ttt.collector import CollectorRegistry
from ttt.formatter import Formatter
from ttt.models import TrackingTable
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option(
'--stat',
dest='stat',
default='definition',
help='Collect a single <statistic>. See --list-stats for available statistics.'
),
make_option(
'--list-stats',
action='store_true',
dest='list_stats',
default=False,
help='List available statistics.'
),
make_option(
'-o',
'--output',
dest='output',
default='text',
help='Specifies an output formatter. One of: email, text, nagios, rrd'
),
make_option(
'--debug',
action='store_true',
dest='debug',
default=False,
help='Make the tool VERY noisy.',
),
make_option(
'-s',
'--since',
dest='since',
default='',
help='Where <since> is something like: last(since the last collector run), 4h(4 hours), 1d(1 day), 1w(1 week)',
),
make_option(
'--raw',
action='store_true',
dest='output_raw',
default=False,
help='Disable \'ignore tables\' processing.',
),
make_option(
'--width',
dest='output_width',
default=None,
help='Number of columns to format to.',
type=int,
),
make_option(
'--full',
action='store_true',
dest='output_full',
default=False,
help='Full output',
),
make_option(
'--where',
dest='where',
default=None,
help='Cannot presently be specified when \'last-collect\' is used with --since.',
),
make_option(
'--order',
dest='order_by',
default=None,
help='Select columns to order by. Technically an SQL fragment. Same column list as above.',
),
make_option(
'--group',
dest='group_by',
default=None,
help='(TODO) Comma separated list of columns to group by. Same columns as above.',
),
make_option(
'--select',
dest='select_columns',
default=None,
help='(TODO) SQL fragment. Try running with debug to see your full query.',
),
make_option(
'--limit',
dest='limit',
default=None,
help='Limit to N results.',
type=int,
),
)
def handle(self, *args, **options):
output_cfg = {}
sql_conditions = {}
find_type = 'normal'
code = 0
CollectorRegistry.load()
if options.get('list_stats'):
cols = CollectorRegistry.all()
print 'Available Statistics:'
for col in cols:
print "{0:20} - {1}".format(col.stat.collector, col.desc)
sys.exit(0)
since_regex = re.search('(\d+(?:\.?\d+)?)([hdwm])?', options.get('since', ''))
if options.get('since') == 'last':
find_type = 'last'
elif since_regex is not None:
num, unit = since_regex.groups()
num = float(num)
if unit == 'h':
time = datetime.now() - timedelta(hours=num)
elif unit == 'd':
time = datetime.now() - timedelta(days=num)
elif unit == 'w':
time = datetime.now() - timedelta(weeks=num)
elif unit == 'm':
time = datetime.now() - timedelta(minutes=num)
else:
time = datetime.now() - timedelta(seconds=num)
sql_conditions['since'] = time
if options.get('where') is not None:
sql_conditions['where'] = options.get('where')
if options.get('group_by') is not None:
sql_conditions['group'] = options.get('group_by')
if options.get('select_columns') is not None:
sql_conditions['select'] = options.get('select_columns')
if options.get('order_by') is not None:
sql_conditions['order'] = options.get('order_by')
if options.get('limit') is not None:
sql_conditions['limit'] = options.get('limit')
for k,v in settings.REPORT_OPTIONS.items():
output_cfg[k] = v
output_cfg['full'] = options.get('output_full')
output_cfg['raw'] = options.get('output_raw')
output_cfg['display_width'] = options.get('output_width')
output = Formatter.get_runner_for(options.get('output'))(sys.stderr)
try:
Model = TrackingTable._TrackingTable__tables.get(options.get('stat', 'definition'))
if find_type == 'normal':
query = Model.objects.all()
else:
query = Model.objects.find_most_recent_versions(Model)
if sql_conditions.get('since') is not None:
query = query.filter(run_time__gte=sql_conditions.get('since'))
if sql_conditions.get('where') is not None:
query = query.extra(where=[sql_conditions.get('where'),])
if sql_conditions.get('order') is not None:
cols = sql_conditions.get('order').split(',')
query = query.order_by(*cols)
# Do reject_ignores filter before limit
if not output_cfg.get('raw', False):
if settings.USE_INCLUDE_NOT_IGNORE:
query = output.report_include(query)
else:
query = output.reject_ignores(query)
if sql_conditions.get('limit') is not None:
query = query[:sql_conditions.get('limit')]
code = output.format(query, output_cfg)
except Exception, e:
tb = traceback.format_exc()
if settings.SEND_CRASHREPORTS:
logger = logging.getLogger('tableizer')
else:
logger = logging.getLogger('management_command')
logger.error(tb)
sys.exit(code)
| palominodb/tableizer | tableizer/ttt/management/commands/tableizer-query.py | Python | gpl-2.0 | 7,571 |
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.lint
~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: 0.5
This module provides a middleware that performs sanity checks of the WSGI
application. It checks that :pep:`333` is properly implemented and warns
on some common HTTP errors such as non-empty responses for 304 status
codes.
This module provides a middleware, the :class:`LintMiddleware`. Wrap your
application with it and it will warn about common problems with WSGI and
HTTP while your application is running.
It's strongly recommended to use it during development.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from urlparse import urlparse
from warnings import warn
from werkzeug.datastructures import Headers
from werkzeug.http import is_entity_header
from werkzeug.wsgi import FileWrapper
class WSGIWarning(Warning):
"""Warning class for WSGI warnings."""
class HTTPWarning(Warning):
"""Warning class for HTTP warnings."""
def check_string(context, obj, stacklevel=3):
if type(obj) is not str:
warn(WSGIWarning('%s requires bytestrings, got %s' %
(context, obj.__class__.__name__)))
class InputStream(object):
def __init__(self, stream):
self._stream = stream
def read(self, *args):
if len(args) == 0:
warn(WSGIWarning('wsgi does not guarantee an EOF marker on the '
'input stream, thus making calls to '
'wsgi.input.read() unsafe. Conforming servers '
'may never return from this call.'),
stacklevel=2)
elif len(args) != 1:
warn(WSGIWarning('too many parameters passed to wsgi.input.read()'),
stacklevel=2)
return self._stream.read(*args)
def readline(self, *args):
if len(args) == 0:
warn(WSGIWarning('Calls to wsgi.input.readline() without arguments'
' are unsafe. Use wsgi.input.read() instead.'),
stacklevel=2)
elif len(args) == 1:
warn(WSGIWarning('wsgi.input.readline() was called with a size hint. '
'WSGI does not support this, although it\'s available '
'on all major servers.'),
stacklevel=2)
else:
raise TypeError('too many arguments passed to wsgi.input.readline()')
return self._stream.readline(*args)
def __iter__(self):
try:
return iter(self._stream)
except TypeError:
warn(WSGIWarning('wsgi.input is not iterable.'), stacklevel=2)
return iter(())
def close(self):
warn(WSGIWarning('application closed the input stream!'),
stacklevel=2)
self._stream.close()
class ErrorStream(object):
def __init__(self, stream):
self._stream = stream
def write(self, s):
check_string('wsgi.error.write()', s)
self._stream.write(s)
def flush(self):
self._stream.flush()
def writelines(self, seq):
for line in seq:
self.write(seq)
def close(self):
warn(WSGIWarning('application closed the error stream!'),
stacklevel=2)
self._stream.close()
class GuardedWrite(object):
def __init__(self, write, chunks):
self._write = write
self._chunks = chunks
def __call__(self, s):
check_string('write()', s)
self._write.write(s)
self._chunks.append(len(s))
class GuardedIterator(object):
def __init__(self, iterator, headers_set, chunks):
self._iterator = iterator
self._next = iter(iterator).next
self.closed = False
self.headers_set = headers_set
self.chunks = chunks
def __iter__(self):
return self
def next(self):
if self.closed:
warn(WSGIWarning('iterated over closed app_iter'),
stacklevel=2)
rv = self._next()
if not self.headers_set:
warn(WSGIWarning('Application returned before it '
'started the response'), stacklevel=2)
check_string('application iterator items', rv)
self.chunks.append(len(rv))
return rv
def close(self):
self.closed = True
if hasattr(self._iterator, 'close'):
self._iterator.close()
if self.headers_set:
status_code, headers = self.headers_set
bytes_sent = sum(self.chunks)
content_length = headers.get('content-length', type=int)
if status_code == 304:
for key, value in headers:
key = key.lower()
if key not in ('expires', 'content-location') and \
is_entity_header(key):
warn(HTTPWarning('entity header %r found in 304 '
'response' % key))
if bytes_sent:
warn(HTTPWarning('304 responses must not have a body'))
elif 100 <= status_code < 200 or status_code == 204:
if content_length != 0:
warn(HTTPWarning('%r responses must have an empty '
'content length') % status_code)
if bytes_sent:
warn(HTTPWarning('%r responses must not have a body' %
status_code))
elif content_length is not None and content_length != bytes_sent:
warn(WSGIWarning('Content-Length and the number of bytes '
'sent to the client do not match.'))
def __del__(self):
if not self.closed:
try:
warn(WSGIWarning('Iterator was garbage collected before '
'it was closed.'))
except Exception:
pass
class LintMiddleware(object):
"""This middleware wraps an application and warns on common errors.
Among other thing it currently checks for the following problems:
- invalid status codes
- non-bytestrings sent to the WSGI server
- strings returned from the WSGI application
- non-empty conditional responses
- unquoted etags
- relative URLs in the Location header
- unsafe calls to wsgi.input
- unclosed iterators
Detected errors are emitted using the standard Python :mod:`warnings`
system and usually end up on :data:`stderr`.
::
from werkzeug.contrib.lint import LintMiddleware
app = LintMiddleware(app)
:param app: the application to wrap
"""
def __init__(self, app):
self.app = app
def check_environ(self, environ):
if type(environ) is not dict:
warn(WSGIWarning('WSGI environment is not a standard python dict.'),
stacklevel=4)
for key in ('REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT',
'wsgi.version', 'wsgi.input', 'wsgi.errors',
'wsgi.multithread', 'wsgi.multiprocess',
'wsgi.run_once'):
if key not in environ:
warn(WSGIWarning('required environment key %r not found'
% key), stacklevel=3)
if environ['wsgi.version'] != (1, 0):
warn(WSGIWarning('environ is not a WSGI 1.0 environ'),
stacklevel=3)
script_name = environ.get('SCRIPT_NAME', '')
if script_name and script_name[:1] != '/':
warn(WSGIWarning('SCRIPT_NAME does not start with a slash: %r'
% script_name), stacklevel=3)
path_info = environ.get('PATH_INFO', '')
if path_info[:1] != '/':
warn(WSGIWarning('PATH_INFO does not start with a slash: %r'
% path_info), stacklevel=3)
def check_start_response(self, status, headers, exc_info):
check_string('status', status)
status_code = status.split(None, 1)[0]
if len(status_code) != 3 or not status_code.isdigit():
warn(WSGIWarning('Status code must be three digits'), stacklevel=3)
if len(status) < 4 or status[3] != ' ':
warn(WSGIWarning('Invalid value for status %r. Valid '
'status strings are three digits, a space '
'and a status explanation'), stacklevel=3)
status_code = int(status_code)
if status_code < 100:
warn(WSGIWarning('status code < 100 detected'), stacklevel=3)
if type(headers) is not list:
warn(WSGIWarning('header list is not a list'), stacklevel=3)
for item in headers:
if type(item) is not tuple or len(item) != 2:
warn(WSGIWarning('Headers must tuple 2-item tuples'),
stacklevel=3)
name, value = item
if type(name) is not str or type(value) is not str:
warn(WSGIWarning('header items must be strings'),
stacklevel=3)
if name.lower() == 'status':
warn(WSGIWarning('The status header is not supported due to '
'conflicts with the CGI spec.'),
stacklevel=3)
if exc_info is not None and not isinstance(exc_info, tuple):
warn(WSGIWarning('invalid value for exc_info'), stacklevel=3)
headers = Headers(headers)
self.check_headers(headers)
return status_code, headers
def check_headers(self, headers):
etag = headers.get('etag')
if etag is not None:
if etag.startswith('w/'):
etag = etag[2:]
if not (etag[:1] == etag[-1:] == '"'):
warn(HTTPWarning('unquoted etag emitted.'), stacklevel=4)
location = headers.get('location')
if location is not None:
if not urlparse(location).netloc:
warn(HTTPWarning('absolute URLs required for location header'),
stacklevel=4)
def check_iterator(self, app_iter):
if isinstance(app_iter, basestring):
warn(WSGIWarning('application returned string. Response will '
'send character for character to the client '
'which will kill the performance. Return a '
'list or iterable instead.'), stacklevel=3)
def __call__(self, *args, **kwargs):
if len(args) != 2:
warn(WSGIWarning('Two arguments to WSGI app required'), stacklevel=2)
if kwargs:
warn(WSGIWarning('No keyword arguments to WSGI app allowed'),
stacklevel=2)
environ, start_response = args
self.check_environ(environ)
environ['wsgi.input'] = InputStream(environ['wsgi.input'])
environ['wsgi.errors'] = ErrorStream(environ['wsgi.errors'])
# hook our own file wrapper in so that applications will always
# iterate to the end and we can check the content length
environ['wsgi.file_wrapper'] = FileWrapper
headers_set = []
chunks = []
def checking_start_response(*args, **kwargs):
if len(args) not in (2, 3):
warn(WSGIWarning('Invalid number of arguments: %s, expected '
'2 or 3' % len(args), stacklevel=2))
if kwargs:
warn(WSGIWarning('no keyword arguments allowed.'))
status, headers = args[:2]
if len(args) == 3:
exc_info = args[2]
else:
exc_info = None
headers_set[:] = self.check_start_response(status, headers,
exc_info)
return GuardedWrite(start_response(status, headers, exc_info),
chunks)
app_iter = self.app(environ, checking_start_response)
self.check_iterator(app_iter)
return GuardedIterator(app_iter, headers_set, chunks)
| sauloal/PiCastPy | werkzeug/contrib/lint.py | Python | mit | 12,238 |
#!/usr/bin/env python
""" Description
Define simple difference operator and convolve simple input with it."""
from vsip import vector
from vsip.signal import *
from vsip.signal.conv import convolution
from matplotlib.pyplot import *
import numpy
# define differentiation operator
K = vector(numpy.array([-1., 0., 1.]))
# define tics for the X axis
X = vector(numpy.arange(1024, dtype=numpy.float64))
# set up input array
input = vector(numpy.sin(X/numpy.float64(100.)))
# set up output array
output = vector(numpy.float64, 1022)
# create convolution object
conv = convolution(K, symmetry.none, 1024, 1, support_region.min, 0, alg_hint.time)
# run convolution
conv(input, output)
# scale
output *= numpy.float64(50.)
# plot input and output
plot(X, input)
plot(X[:1022], output)
show()
| maxywb/vsipl | sourceryvsipl++-x86-3.1/src/vsipl++/examples/python/conv.py | Python | gpl-2.0 | 794 |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015-2019 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import subprocess
from unittest import mock
import fixtures
from testtools.matchers import Equals
from snapcraft.internal import sources
from tests import unit
# LP: #1733584
class TestBazaar(unit.sources.SourceTestCase): # type: ignore
def setUp(self):
super().setUp()
# Mock _get_source_details() since not all tests have a
# full repo checkout
patcher = mock.patch("snapcraft.sources.Bazaar._get_source_details")
self.mock_get_source_details = patcher.start()
self.mock_get_source_details.return_value = ""
self.addCleanup(patcher.stop)
def test_pull(self):
bzr = sources.Bazaar("lp:my-source", "source_dir")
bzr.pull()
self.mock_rmdir.assert_called_once_with("source_dir")
self.mock_run.assert_called_once_with(
["bzr", "branch", "lp:my-source", "source_dir"]
)
def test_pull_tag(self):
bzr = sources.Bazaar("lp:my-source", "source_dir", source_tag="tag")
bzr.pull()
self.mock_run.assert_called_once_with(
["bzr", "branch", "-r", "tag:tag", "lp:my-source", "source_dir"]
)
def test_pull_existing_with_tag(self):
self.mock_path_exists.return_value = True
bzr = sources.Bazaar("lp:my-source", "source_dir", source_tag="tag")
bzr.pull()
self.mock_run.assert_called_once_with(
["bzr", "pull", "-r", "tag:tag", "lp:my-source", "-d", "source_dir"]
)
def test_pull_commit(self):
bzr = sources.Bazaar("lp:my-source", "source_dir", source_commit="2")
bzr.pull()
self.mock_run.assert_called_once_with(
["bzr", "branch", "-r", "2", "lp:my-source", "source_dir"]
)
def test_pull_existing_with_commit(self):
self.mock_path_exists.return_value = True
bzr = sources.Bazaar("lp:my-source", "source_dir", source_commit="2")
bzr.pull()
self.mock_run.assert_called_once_with(
["bzr", "pull", "-r", "2", "lp:my-source", "-d", "source_dir"]
)
def test_init_with_source_branch_raises_exception(self):
raised = self.assertRaises(
sources.errors.SnapcraftSourceInvalidOptionError,
sources.Bazaar,
"lp:mysource",
"source_dir",
source_branch="branch",
)
self.assertThat(raised.source_type, Equals("bzr"))
self.assertThat(raised.option, Equals("source-branch"))
def test_init_with_source_depth_raises_exception(self):
raised = self.assertRaises(
sources.errors.SnapcraftSourceInvalidOptionError,
sources.Bazaar,
"lp://mysource",
"source_dir",
source_depth=2,
)
self.assertThat(raised.source_type, Equals("bzr"))
self.assertThat(raised.option, Equals("source-depth"))
def test_init_with_source_tag_and_commit_raises_exception(self):
raised = self.assertRaises(
sources.errors.SnapcraftSourceIncompatibleOptionsError,
sources.Bazaar,
"lp://mysource",
"source_dir",
source_tag="tag",
source_commit="2",
)
self.assertThat(raised.source_type, Equals("bzr"))
self.assertThat(raised.options, Equals(["source-tag", "source-commit"]))
def test_source_checksum_raises_exception(self):
raised = self.assertRaises(
sources.errors.SnapcraftSourceInvalidOptionError,
sources.Bazaar,
"lp://mysource",
"source_dir",
source_checksum="md5/d9210476aac5f367b14e513bdefdee08",
)
self.assertThat(raised.source_type, Equals("bzr"))
self.assertThat(raised.option, Equals("source-checksum"))
def test_has_source_handler_entry(self):
self.assertTrue(sources._source_handler["bzr"] is sources.Bazaar)
def test_pull_failure(self):
self.mock_run.side_effect = subprocess.CalledProcessError(1, [])
bzr = sources.Bazaar("lp:my-source", "source_dir")
raised = self.assertRaises(sources.errors.SnapcraftPullError, bzr.pull)
self.assertThat(raised.command, Equals("bzr branch lp:my-source source_dir"))
self.assertThat(raised.exit_code, Equals(1))
def get_side_effect(original_call):
def side_effect(cmd, *args, **kwargs):
if len(cmd) > 1 and cmd[1] == "revno":
return "mock-commit".encode()
elif cmd[0] == "bzr":
return
return original_call(cmd, *args, **kwargs)
return side_effect
class BazaarDetailsTestCase(unit.TestCase):
def setUp(self):
super().setUp()
self.working_tree = "bzr-working-tree"
self.source_dir = "bzr-source-dir"
os.mkdir(self.source_dir)
# Simulate that we have already branched code out.
os.mkdir(os.path.join(self.source_dir, ".bzr"))
self.fake_check_output = self.useFixture(
fixtures.MockPatch(
"subprocess.check_output",
side_effect=get_side_effect(subprocess.check_output),
)
)
self.fake_check_call = self.useFixture(
fixtures.MockPatch(
"subprocess.check_call",
side_effect=get_side_effect(subprocess.check_call),
)
)
def test_bzr_details_commit(self):
bzr = sources.Bazaar(self.working_tree, self.source_dir, silent=True)
bzr.pull()
source_details = bzr._get_source_details()
self.assertThat(source_details["source-commit"], Equals("mock-commit"))
self.fake_check_output.mock.assert_has_calls(
[
mock.call(["bzr", "revno", self.source_dir]),
mock.call(["bzr", "revno", self.source_dir]),
]
)
self.fake_check_call.mock.assert_called_once_with(
["bzr", "pull", self.working_tree, "-d", self.source_dir],
stderr=-3,
stdout=-3,
)
def test_bzr_details_tag(self):
bzr = sources.Bazaar(
self.working_tree, self.source_dir, source_tag="mock-tag", silent=True
)
bzr.pull()
source_details = bzr._get_source_details()
self.assertThat(source_details["source-tag"], Equals("mock-tag"))
self.fake_check_output.mock.assert_not_called()
self.fake_check_call.mock.assert_called_once_with(
[
"bzr",
"pull",
"-r",
"tag:mock-tag",
self.working_tree,
"-d",
self.source_dir,
],
stderr=-3,
stdout=-3,
)
| ubuntu-core/snapcraft | tests/unit/sources/test_bazaar.py | Python | gpl-3.0 | 7,406 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import oslo_db.exception
from heat.common import exception
from heat.common.i18n import _
from heat.common import identifier
from heat.objects import event as event_object
class Event(object):
"""Class representing a Resource state change."""
def __init__(self, context, stack, action, status, reason,
physical_resource_id, resource_properties, resource_name,
resource_type, uuid=None, timestamp=None, id=None):
"""Initialise from a context, stack, and event information.
The timestamp and database ID may also be initialised if the event is
already in the database.
"""
self.context = context
self.stack = stack
self.action = action
self.status = status
self.reason = reason
self.physical_resource_id = physical_resource_id
self.resource_name = resource_name
self.resource_type = resource_type
try:
self.resource_properties = dict(resource_properties)
except ValueError as ex:
self.resource_properties = {'Error': six.text_type(ex)}
self.uuid = uuid
self.timestamp = timestamp
self.id = id
@classmethod
def load(cls, context, event_id, event=None, stack=None):
"""Retrieve an Event from the database."""
from heat.engine import stack as parser
ev = (event if event is not None else
event_object.Event.get_by_id(context, event_id))
if ev is None:
message = _('No event exists with id "%s"') % str(event_id)
raise exception.NotFound(message)
st = (stack if stack is not None else
parser.Stack.load(context, ev.stack_id))
return cls(context, st, ev.resource_action, ev.resource_status,
ev.resource_status_reason, ev.physical_resource_id,
ev.resource_properties, ev.resource_name,
ev.resource_type, ev.uuid, ev.created_at, ev.id)
def store(self):
"""Store the Event in the database."""
ev = {
'resource_name': self.resource_name,
'physical_resource_id': self.physical_resource_id,
'stack_id': self.stack.id,
'resource_action': self.action,
'resource_status': self.status,
'resource_status_reason': self.reason,
'resource_type': self.resource_type,
'resource_properties': self.resource_properties,
}
if self.uuid is not None:
ev['uuid'] = self.uuid
if self.timestamp is not None:
ev['created_at'] = self.timestamp
try:
new_ev = event_object.Event.create(self.context, ev)
except oslo_db.exception.DBError:
# Attempt do drop the largest key and re-store as we expect
# This to mostly happen with one large config blob property
max_key, max_val = max(ev['resource_properties'].items(),
key=lambda i: len(repr(i[1])))
err = 'Resource properties are too large to store'
ev['resource_properties'].update({'Error': err})
ev['resource_properties'][max_key] = '<Deleted, too large>'
try:
new_ev = event_object.Event.create(self.context, ev)
except oslo_db.exception.DBError:
# Give up and drop all properties..
ev['resource_properties'] = {'Error': err}
new_ev = event_object.Event.create(self.context, ev)
self.id = new_ev.id
return self.id
def identifier(self):
"""Return a unique identifier for the event."""
if self.uuid is None:
return None
res_id = identifier.ResourceIdentifier(
resource_name=self.resource_name, **self.stack.identifier())
return identifier.EventIdentifier(event_id=str(self.uuid), **res_id)
| dragorosson/heat | heat/engine/event.py | Python | apache-2.0 | 4,537 |
#!/usr/bin/python3
import os
import subprocess
import sys
env1 = dict(
SMS_MESSAGES="1",
DECODED_PARTS="0",
SMS_1_NUMBER="0124",
SMS_1_CLASS="-1",
SMS_1_TEXT="ghgg",
)
env2 = dict(
SMS_2_CLASS="-1",
SMS_MESSAGES="3",
DECODED_PARTS="1",
SMS_3_NUMBER="0124",
SMS_3_TEXT="ernvgigfhhykfkkfdknxdhkjxhkkckjghfkbfujvjjxtyfjcuhdy8hdyujftucdyjxyhfyhrijfiigujugkcjtfdskfgjhfyhffyernvgi",
SMS_2_TEXT="cuhdy8hdyujftucdyjxyhfyhrijfiigujugkcjtfdskfgjhfyhffyernvgigfhhykfkkfdknxdhkjxhkkckjghfkbfujvjjxtyfjcuhdy8hdyujftucdyjxyhfyhrijfiigujugkcjtfdskfgjhfyhffy",
SMS_2_NUMBER="0124",
SMS_3_CLASS="-1",
DECODED_0_TEXT="ijhgfhhykfkkfdknxdhkjxhkkckjghfkbfujvjjxtyfjcuhdy8hdyujftucdyjxyhfyhrijfiigujugkcjtfdskfgjhfyhffyernvgifoxhidtuhgfhhykfkkfdknxdhkjxhkkckjghfkbfujvjjxtyfjcuhdy8hdyujftucdyjxyhfyhrijfiigujugkcjtfdskfgjhfyhffyernvgigfhhykfkkfdknxdhkjxhkkckjghfkbfujvjjxtyfjcuhdy8hdyujftucdyjxyhfyhrijfiigujugkcjtfdskfgjhfyhffyernvgigfhhykfkkfdknxdhkjxhkkckjghfkbfujvjjxtyfjcuhdy8hdyujftucdyjxyhfyhrijfiigujugkcjtfdskfgjhfyhffyernvgi",
SMS_1_NUMBER="0124",
SMS_1_CLASS="-1",
SMS_1_TEXT="ijhgfhhykfkkfdknxdhkjxhkkckjghfkbfujvjjxtyfjcuhdy8hdyujftucdyjxyhfyhrijfiigujugkcjtfdskfgjhfyhffyernvgifoxhidtuhgfhhykfkkfdknxdhkjxhkkckjghfkbfujvjjxtyfj"
)
env = env2
for key in env:
os.environ[key] = env[key]
subprocess.call(['../bin/sms-shell-parser'], stdout=sys.stdout)
| Turgon37/SMSShell | tests/parser.py | Python | gpl-3.0 | 1,418 |
__all__ = ["Util", 'etsi']
| superfluidity/RDCL3D | code/lib/__init__.py | Python | apache-2.0 | 27 |
#!/usr/bin/python
#
# Copyright (c) 2017 Sertac Ozercan, <seozerca@microsoft.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_virtualmachine_scaleset_facts
version_added: "2.4"
short_description: Get Virtual Machine Scale Set facts
description:
- Get facts for a virtual machine scale set
options:
name:
description:
- Limit results to a specific virtual machine scale set
resource_group:
description:
- The resource group to search for the desired virtual machine scale set
extends_documentation_fragment:
- azure
author:
- "Sertac Ozercan (@sozercan)"
'''
EXAMPLES = '''
- name: Get facts for a virtual machine scale set
azure_rm_virtualmachine_scaleset_facts:
resource_group: Testing
name: testvmss001
- name: Get facts for all virtual networks
azure_rm_virtualmachine_scaleset_facts:
resource_group: Testing
- name: Get facts by tags
azure_rm_virtualmachine_scaleset_facts:
resource_group: Testing
tags:
- testing
'''
RETURN = '''
azure_vmss:
description: List of virtual machine scale sets
returned: always
type: list
example: [{
"location": "eastus",
"properties": {
"overprovision": true,
"singlePlacementGroup": true,
"upgradePolicy": {
"mode": "Manual"
},
"virtualMachineProfile": {
"networkProfile": {
"networkInterfaceConfigurations": [
{
"name": "testvmss",
"properties": {
"dnsSettings": {
"dnsServers": []
},
"enableAcceleratedNetworking": false,
"ipConfigurations": [
{
"name": "default",
"properties": {
"privateIPAddressVersion": "IPv4",
"subnet": {
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/virtualNetworks/testvnet/subnets/testsubnet"
}
}
}
],
"primary": true
}
}
]
},
"osProfile": {
"adminUsername": "testuser",
"computerNamePrefix": "testvmss",
"linuxConfiguration": {
"disablePasswordAuthentication": true,
"ssh": {
"publicKeys": [
{
"keyData": "",
"path": "/home/testuser/.ssh/authorized_keys"
}
]
}
},
"secrets": []
},
"storageProfile": {
"imageReference": {
"offer": "CoreOS",
"publisher": "CoreOS",
"sku": "Stable",
"version": "899.17.0"
},
"osDisk": {
"caching": "ReadWrite",
"createOption": "fromImage",
"managedDisk": {
"storageAccountType": "Standard_LRS"
}
}
}
}
},
"sku": {
"capacity": 1,
"name": "Standard_DS1_v2",
"tier": "Standard"
}
}]
''' # NOQA
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
except:
# handled in azure_rm_common
pass
AZURE_OBJECT_CLASS = 'VirtualMachineScaleSet'
AZURE_ENUM_MODULES = ['azure.mgmt.compute.models']
class AzureRMVirtualMachineScaleSetFacts(AzureRMModuleBase):
"""Utility class to get virtual machine scale set facts"""
def __init__(self):
self.module_args = dict(
name=dict(type='str'),
resource_group=dict(type='str'),
tags=dict(type='list')
)
self.results = dict(
changed=False,
ansible_facts=dict(
azure_vmss=[]
)
)
self.name = None
self.resource_group = None
self.tags = None
super(AzureRMVirtualMachineScaleSetFacts, self).__init__(
derived_arg_spec=self.module_args,
supports_tags=False,
facts_module=True
)
def exec_module(self, **kwargs):
for key in self.module_args:
setattr(self, key, kwargs[key])
if self.name and not self.resource_group:
self.fail("Parameter error: resource group required when filtering by name.")
if self.name:
self.results['ansible_facts']['azure_vmss'] = self.get_item()
else:
self.results['ansible_facts']['azure_vmss'] = self.list_items()
return self.results
def get_item(self):
"""Get a single virtual machine scale set"""
self.log('Get properties for {}'.format(self.name))
item = None
results = []
try:
item = self.compute_client.virtual_machine_scale_sets.get(self.resource_group, self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
results = [self.serialize_obj(item, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES)]
return results
def list_items(self):
"""Get all virtual machine scale sets"""
self.log('List all virtual machine scale sets')
try:
response = self.compute_client.virtual_machine_scale_sets.list(self.resource_group)
except CloudError as exc:
self.fail('Failed to list all items - {}'.format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES))
return results
def main():
"""Main module execution code path"""
AzureRMVirtualMachineScaleSetFacts()
if __name__ == '__main__':
main()
| bregman-arie/ansible | lib/ansible/modules/cloud/azure/azure_rm_virtualmachine_scaleset_facts.py | Python | gpl-3.0 | 7,164 |
__author__ = 'Icarus'
# Solves in the form ax = b (mod mod)
def solve_linear_congruence(a, b, mod):
gcd, x, y = extended_euclidean(a, mod)
if gcd == 1:
return x*b % mod
elif b % gcd == 0:
newa = a / gcd
newb = b / gcd
newmod = mod / gcd
ans = solve_linear_congruence(newa, newb, newmod)
ret = []
while ans < mod:
ret.append(ans)
ans = ans + newmod
return ret
else:
return None
# ax + by = gcd(a, b)1
# This takes in a and b and returns the gcd, x, and y.
def extended_euclidean(a, b):
x, y, u, v = 0, 1, 1, 0
while a != 0:
q, r = b//a, b % a
m, n = x-u*q, y-v*q
b, a, x, y, u, v = a, r, u, v, m, n
gcd = b
return gcd, x, y
def split_message(message):
result_list = []
for i in range(0, len(message), 3):
string_value = message[i] + message[i+1] + message[i+2]
result_list.append(int(string_value))
return result_list
def encode_to_ascii(char_list):
result = []
for char in char_list:
result.append(chr(char))
return result
def smart_power(base, exp):
if exp < 10:
return base**exp
accum = 1
power_accum = base
while accum < exp/2:
power_accum *= power_accum
accum *= 2
diff = exp - accum
power_accum = (power_accum * smart_power(base, diff))
return power_accum
def smart_mod_power(base, exp, mod):
if exp < 10:
return (base**exp) % mod
accum = 1
power_accum = base
while accum < exp/2:
power_accum *= power_accum
power_accum %= mod
accum *= 2
diff = exp - accum
power_accum = (power_accum * smart_mod_power(base, diff, mod)) % mod
return power_accum
def main():
message_text = input('Encrypted Message: ')
n_text = input('n value: ')
n_int = int(n_text)
p = smart_power(10, 110) + 7
q = smart_power(10, 111) + 139
p_minus = p - 1
q_minus = q - 1
d = solve_linear_congruence(5, 1, q_minus * p_minus)
message_int = int(message_text)
decrypted = smart_mod_power(message_int, d, n_int)
string_decrypted = repr(decrypted)
if len(string_decrypted) % 3 == 0:
split = split_message(repr(decrypted))
elif len(string_decrypted) % 3 == 1:
split = split_message('00'+repr(decrypted))
else:
split = split_message('0'+repr(decrypted))
char_message = encode_to_ascii(split)
print(''.join(char_message))
return
if __name__ == "__main__":
main() | timaeudg/CSSE479-Cryptography | RSAHomework.py | Python | gpl-2.0 | 2,554 |
import sys
import django
from django.conf import settings
from django.test.runner import DiscoverRunner
settings.configure(
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
},
INSTALLED_APPS=('tests',)
)
django.setup()
test_runner = DiscoverRunner(verbosity=1)
failures = test_runner.run_tests([])
if failures:
sys.exit(failures)
| mixxorz/django-service-objects | runtests.py | Python | mit | 427 |
from os import chdir
from grslra.visualization import phaseplot
import numpy as np
from scipy.io import loadmat
# This function can be used to visualize the phaseplot experiments of the MATLAB algorithms
algoname = 'rmc'
folder='phasetransitions/' + algoname
algoname = 'rmc_fast'
resultfile= 'result_' + algoname
chdir(folder)
data = loadmat(resultfile + '.mat')
errs = data['result'][:,:,0]
maxval = data['maxval']
step = data['step']
values = np.int(np.round(maxval / step))
phaseplot(np.log10(errs), (-8, -1), values, maxval, 'pt_' + algoname + '.pdf') | clemenshage/grslra | experiments/4_grpca/phaseplot_from_mat.py | Python | mit | 561 |
# -*- coding: utf-8 -*-
import openerp
from openerp.addons.web import http
from openerp.addons.web.http import request
from datetime import datetime
class google_map(http.Controller):
@http.route(['/google_map'], type='http', auth="public", website=True)
def google_map(self, *arg, **post):
values = {
'partner_ids': post.get('partner_ids', ""),
'width': post.get('width', 900),
'height': post.get('height', 460),
'partner_url': post.get('partner_url'),
}
return request.website.render("website_google_map.google_map", values)
@http.route(['/google_map/partners.json'], type='http', auth="public", website=True)
def google_map_data(self, *arg, **post):
partner_obj = request.registry['res.partner']
domain = [("id", "in", [int(p) for p in post.get('partner_ids', "").split(",") if p])]
domain_public = domain + [('website_published', '=', True)]
partner_ids = partner_obj.search(request.cr, openerp.SUPERUSER_ID,
domain_public, context=request.context)
return partner_obj.google_map_json(request.cr, openerp.SUPERUSER_ID,
partner_ids, request.context)
@http.route(['/google_map/set_partner_position'], type='http', auth="public", website=True)
def google_map_set_partner_position(self, *arg, **post):
partner_obj = request.registry['res.partner']
partner_id = post.get('partner_id') and int(post['partner_id'])
latitude = post.get('latitude') and float(post['latitude'])
longitude = post.get('longitude') and float(post['longitude'])
values = {
'partner_latitude': latitude,
'partner_longitude': longitude,
'date_localization': datetime.now().strftime('%Y-%m-%d'),
}
partner_obj.write(request.cr, openerp.SUPERUSER_ID, [partner_id], values,
request.context)
# vim:expandtab:tabstop=4:softtabstop=4:shiftwidth=4:
| poiesisconsulting/openerp-restaurant | website_google_map/controllers/main.py | Python | agpl-3.0 | 2,060 |
#!/usr/bin/env python
# Copyright (C) 2007--2016 the X-ray Polarimetry Explorer (XPE) team.
#
# For the license terms see the file LICENSE, distributed along with this
# software.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import numpy
import matplotlib.pyplot as plt
from pyxpe.recon.xpol import XPOL_COLUMN_PITCH
from pyxpe.recon.geometry import xpePoint2d, xpeRay2d
from pyxpe.recon.xpol import xpeHexagonCollection, adc2colors
class xpeCluster:
"""Class representing a cluster of pixels, i.e., a photoelectron track.
"""
def __init__(self, x, y, adc_values):
"""Constructor.
"""
assert len(x) == len(y) == len(adc_values)
self.x = x
self.y = y
self.adc_values = adc_values
# Calculate the pulse height.
self.pulse_height = self.adc_values.sum()
# Calculate the baricenter position.
_x = numpy.sum(self.x*self.adc_values)/self.pulse_height
_y = numpy.sum(self.y*self.adc_values)/self.pulse_height
self.baricenter = xpePoint2d(_x, _y)
def num_pixels(self):
"""Return the cluster size.
"""
return len(self.adc_values)
def __cmp__(self, other):
"""Comparison operator (sort the clusters by pulse height).
"""
return other.pulse_height - self.pulse_height
def projection1d(self, pivot, phi):
"""Project the charge distribution on the ray passing by the pivot
point at an angle phi, and return the corresponding one-dimensional
array of coordinates.
"""
return numpy.cos(phi)*(self.x - pivot.x()) +\
numpy.sin(phi)*(self.y - pivot.y())
def moment(self, order, pivot, phi):
"""Calculate the nth-order moment of the 1-dimensional charge
distribution projected on a given ray.
"""
xp = self.projection1d(pivot, phi)
return numpy.sum((xp**order)*self.adc_values)/self.pulse_height
def fit_spline(self, zero_suppression=25):
"""To be moved into recon.
"""
from scipy.interpolate import UnivariateSpline
_mask = self.adc_values >= zero_suppression
x = self.x[_mask]
y = self.y[_mask]
adc_values = self.adc_values[_mask]
weights = (adc_values/float(adc_values.max()))**0.5
dx = (x - self.baricenter.x())
dy = (y - self.baricenter.y())
xp = numpy.cos(self.phi0)*dx + numpy.sin(self.phi0)*dy
yp = -numpy.sin(self.phi0)*dx + numpy.cos(self.phi0)*dy
s = UnivariateSpline(xp, yp, w=weights, s=0.5)
_xp = numpy.linspace(xp.min(), xp.max(), 25)
_yp = s(_xp)
dx = numpy.cos(-self.phi0)*_xp + numpy.sin(-self.phi0)*_yp
dy = -numpy.sin(-self.phi0)*_xp + numpy.cos(-self.phi0)*_yp
x = dx + self.baricenter.x()
y = dy + self.baricenter.y()
plt.plot(x, y, '-', lw=2, color='black')
def fit_spine(self, num_nodes=7):
"""Just playing around. To be moved into recon.
Warning
-------
This is horrible and it should be either rewritten properly or
deleted.
"""
dx = (self.x - self.baricenter.x())
dy = (self.y - self.baricenter.y())
xp = numpy.cos(self.phi0)*dx + numpy.sin(self.phi0)*dy
yp = -numpy.sin(self.phi0)*dx + numpy.cos(self.phi0)*dy
bins = numpy.linspace(xp.min(), xp.max(), num_nodes + 1)
xsp = []
ysp = []
for i in range(num_nodes):
_xmin, _xmax = bins[i:i+2]
_mask = (xp > _xmin)*(xp < _xmax)
_w = numpy.sum(self.adc_values[_mask])
_xs = numpy.sum(xp[_mask]*self.adc_values[_mask])/_w
_ys = numpy.sum(yp[_mask]*self.adc_values[_mask])/_w
xsp.append(_xs)
ysp.append(_ys)
xsp = numpy.array(xsp)
ysp = numpy.array(ysp)
xs = numpy.cos(-self.phi0)*xsp + numpy.sin(-self.phi0)*ysp
ys = -numpy.sin(-self.phi0)*xsp + numpy.cos(-self.phi0)*ysp
xs = (xs + self.baricenter.x())
ys = (ys + self.baricenter.y())
plt.plot(xs, ys, lw=2)
def draw(self, coordinate_system, color_map='Reds', hexcol_padding=0.1,
text=True, show=True):
"""Draw the cluster. To be moved into a separate module.
"""
hit_positions = numpy.vstack((self.x, self.y),).transpose()
colors = adc2colors(self.adc_values, 0, color_map)
if coordinate_system == 'pixy':
angle = numpy.pi/2.
else:
angle = 0
hex_col = xpeHexagonCollection(padding=hexcol_padding,
offsets=hit_positions, rotation=angle,
edgecolors='gray', facecolors=colors)
fig = hex_col.figure
if text:
adc_ref = 0.5*self.adc_values.max()
for x, y, val in zip(self.x, self.y, self.adc_values):
if val < adc_ref:
col = 'black'
else:
col = 'white'
plt.text(x, y, '%s' % val, horizontalalignment='center',
verticalalignment='center', size=8, color=col)
plt.xlabel('x [mm]')
plt.ylabel('y [mm]')
#self.baricenter.draw()
#self.axis0.draw()
#self.conversion_point.draw()
#self.conversion_baricenter.draw()
#self.axis1.draw()
#self.fit_spline()
if show:
plt.show()
return fig
def __str__(self):
"""String formatting.
"""
return 'cluster @ %s, %d pixels, pulse height = %d ADC counts' %\
(self.baricenter, self.num_pixels(), self.pulse_height)
def single_clustering(event, zero_suppression, coordinate_system):
"""Dummy single-clustering algorithm for testing purposes.
This takes all the pixels above the zero-suppression threshold in the window
and returns the corresponing cluster object.
Args
----
event : pXpeEventBase instance
The underlying event object.
zero_suppression : float or array
The zero suppression threshold.
"""
x, y, adc_values = event.hit_data(zero_suppression, coordinate_system)
return [xpeCluster(x, y, adc_values)]
def hierarchical_clustering(event, zero_suppression, coordinate_system,
method='single', metric='euclidean',
criterion='distance',
max_distance=1.01*XPOL_COLUMN_PITCH):
"""Lightweight wrapper over the scipy.cluster.hierarchy module.
This is essentially calling scipy.cluster.hierarchy.linkage and
scipy.cluster.hierarchy.fcluster, returning a list of xpeCluster objects
sorted by pulse height.
The default parameters in the method signature are those producing the
behaviour you would naively expect, i.e., contiguous pixels are gruped
together in the same cluster (the clustering is done using the
pixel-to-pixel euclidean distance and the distance cut is placed just above
the longest among the readout pitches in the two directions).
Warning
-------
Do not mess around with the function arguments unless you know what you're
doing---scipy is capable of producing a surprising variety of different
behaviours, and the problem we're trying to solve here is fairly simple.
Args
----
event : pXpeEventBase instance
The underlying event object.
zero_suppression : float or array
The zero suppression threshold.
method : str (default 'single')
The clustering method passed to scipy.cluster.hierarchy.linkage
metric : str (default 'euclidean')
The metric passed to scipy.cluster.hierarchy.linkage
criterion: str (default 'distance')
The criterion passed to scipy.cluster.hierarchy.fcluster
max_distance : float
The maximum distance used by scipy.cluster.hierarchy.fcluster
Return
------
a list of xpeCluster objects sorted by pulse height.
"""
import scipy.cluster.hierarchy
x, y, adc_values = event.hit_data(zero_suppression, coordinate_system)
data = numpy.vstack((x, y),).transpose()
Z = scipy.cluster.hierarchy.linkage(data, method, metric)
cluster_ids = scipy.cluster.hierarchy.fcluster(Z, max_distance, criterion)
cluster_list = []
for i in xrange(1, max(cluster_ids) + 1):
_mask = numpy.where(cluster_ids == i)
cluster_list.append(xpeCluster(x[_mask], y[_mask], adc_values[_mask]))
cluster_list.sort()
return cluster_list
def test(filePath, num_events, zero_suppression=9, coordinate_system='xpedaq'):
"""
"""
from pyxpe.recon.binio import xpeBinaryFileWindowed
input_file = xpeBinaryFileWindowed(filePath)
for i in xrange(num_events):
event = input_file.next()
print event
cluster = hierarchical_clustering(event, zero_suppression,
coordinate_system)[0]
print cluster
cluster.draw(coordinate_system)
if __name__ == '__main__':
import argparse
formatter = argparse.ArgumentDefaultsHelpFormatter
parser = argparse.ArgumentParser(formatter_class=formatter)
parser.add_argument('infile', type=str,
help='the input binary file')
parser.add_argument('-n', '--num_events', type=int, default=10,
help = 'number of events to be processed')
parser.add_argument('-z', '--zero-suppression', type=int, default=9,
help = 'zero-suppression threshold')
parser.add_argument('-c', '--coordinate-system', type=str, default='pixy',
help = 'coordinate system for the clustering')
args = parser.parse_args()
test(args.infile, args.num_events, args.zero_suppression,
args.coordinate_system)
| lucabaldini/pyxpe | pyxpe/recon/clustering.py | Python | gpl-3.0 | 10,575 |
#!/usr/local/bin/python3
from bs4 import BeautifulSoup
from random import randint
from time import sleep
from datetime import datetime
import re
import requests
import xlsxwriter
# Yelp Scraper v0.0.1
# DESCRIPTION: This program allows to browse Yelp France results to extract
# (scrap) desired informations.
# AUTHOR: David Dahan (david-dahan.com)
# LICENCE: MIT
# -----------------------------------------------------------------------------
# ---------------------------------- CONFIG -----------------------------------
# -----------------------------------------------------------------------------
# -- Functional configuration
CITY = "Paris"
# Districts are optionnals but override CITY if written
PARIS_DISTRICTS = ["Grands_Boulevards/Sentier", "Châtelet/Les_Halles"]
# Can be one of Yelp cflt (check yelp.fr to list them)
CFLTS = [
# food
"bagels", "sandwiches", "burgers", "hotdogs", "icecream", "desserts",
"cupcakes", "juicebars", "friterie",
# beautysvc
"tanning", "hair", "hair_extensions", "othersalons", "massage",
"eroticmassage", "piercing", "eyelashservice", "skincare", "spas",
"hairremoval"]
# -----------------------------------------------------------------------------
# -- Technical configuration
DEBUG = False # If True, won't parse Yelp URL but HTML test document
MAX_SLEEP = 30000 # in milliseconds
# -----------------------------------------------------------------------------
# ---------------------------- CLASSES / FUNCTIONS ----------------------------
# -----------------------------------------------------------------------------
class YelpShop(object):
''' Used to store desired informations about a Yelp shop '''
def __init__(self, name="", address="", zipcode="", district="", phone="",
url="", categories=[]):
self.name = name
self.address = address
self.zipcode = zipcode
self.district = district
self.phone = phone
self.url = url # Url
self.categories = categories # WARN : Textuals categories <> cflt
def __str__(self):
return "{0} ({1})".format(self.name, self.phone)
def mylog(msg):
''' Personalized print() tool, used for dummy logging '''
print("-- " + msg)
def page_to_index(page_num):
''' Transforms page number into start index to be written in Yelp URL '''
return (page_num - 1)*10
def build_arglist(elts):
''' Return a Yelp url-friendly string created from a Python list'''
res = "["
for elt in elts[:-1]:
res += elt + ","
res += elts[-1] + "]"
return res
def build_yelp_url(page, c):
''' Builds Yelp URL for the given page and cflt to be parsed according to
config variables '''
url = "http://www.yelp.fr/search?&start={0}".format(page_to_index(page))
if CITY:
url += "&find_loc={0}".format(CITY)
url += "&cflt={0}".format(c) # We assume that CFLTS list is not empty
if PARIS_DISTRICTS:
url += "&l=p:FR-75:Paris::{0}".format(build_arglist(PARIS_DISTRICTS))
return url
def extract_zipcode(adr):
''' get a zipcode in the middle of an address '''
try:
res = re.compile('\d{5}').findall(adr)[0] # Only 1 result is expected
except: # WARN : any exception caugth
res = ""
return res
def is_advertisement(search_result):
''' Return True is the search result is an add '''
if search_result.find('span', attrs={"class":u"yloca-tip"}):
return True
return False
def r_sleep():
''' generates a random sleep between 2.000 and MAX_SLEEP seconds '''
length = float(randint(2000, MAX_SLEEP)) / 1000
mylog("Safety Random Sleep has started for {0} sec".format(length))
sleep(length)
mylog("Safety Random Sleep is over")
def write_query():
''' Creates a summary of the query '''
res = ""
if CITY:
res += "City: {0} - ".format(CITY)
if PARIS_DISTRICTS:
res += "Paris districts: {0} - ".format(';'.join(PARIS_DISTRICTS))
res += "Cflts: {0}".format(';'.join(CFLTS))
return res
# -----------------------------------------------------------------------------
# ---------------------------------- SCRIPT -----------------------------------
# -----------------------------------------------------------------------------
mylog("Script has started")
shops = [] # Init shops list
for cflt in CFLTS: # Check every cflt chosen in the config file
cur_page = 0 # We are 'placed before' the first page
while True: # Infinite loop will exit as soon as there is no more shops
cur_page += 1
# -- URL OPENING/ HTML PARSING
cur_url = build_yelp_url(page=cur_page, c=cflt)
mylog("Start scraping page {0} at {1}".format(cur_page, cur_url))
# Process the URL with a fake header
fake_headers = {
# Headers taken from Chrome spy mode
'Connection': 'keep-alive',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'fr,en-US;q=0.8,en;q=0.6'}
r = requests.get(cur_url, headers=fake_headers)
soup = BeautifulSoup(r.text)
# -- CLEANING AND PYTHON SHOP OBJECT FILLING
cpt = 0 # Count init
# Each shop is in a 'search-result' div
for sr in soup.find_all('div', attrs={"class":u"search-result"}):
# If the search result is an advertisement, go to the next one
if is_advertisement(sr): # Won't allow ads
continue
try: # Try to parse desired informations per shop
cpt += 1
ext_name = sr.find('a', attrs={"class":u"biz-name"}) \
.get_text().strip()
ext_address = sr.find('address').get_text().strip()
ext_phone = sr.find('span', attrs={"class":u"biz-phone"}) \
.get_text().strip()
ext_url = sr.find('a', attrs={"class":u"biz-name"})['href']
ext_district = sr.find(
'span',
attrs={"class":u"neighborhood-str-list"}).get_text().strip()
ext_categories = [e.get_text().strip() for e in sr.find(
'span', attrs={"class":u"category-str-list"}).find_all('a')]
except: # If parsing does not work for any reason
mylog("A shop has been ignored because of parsing error")
continue
# Creates a YelpShop only if does not exist, using URL as uniq ID
if not ext_url in [s.url for s in shops]: # Won't allow duplicates
shops.append(YelpShop(
name=ext_name,
address=ext_address,
zipcode=extract_zipcode(ext_address),
district=ext_district,
phone=ext_phone,
url=ext_url,
categories=ext_categories))
mylog("New shop created: {0}".format(ext_name))
if cpt == 0: # There is no more shops to aspire, time to exit
break
mylog("Finish scraping page {0} ({1} shops aspirated)".format(cur_page,
cpt))
# Time to sleep for safety
r_sleep()
mylog("Scraping finished")
# -- XLSX EXPORT
mylog("Start XLSX export, there is {0} shops to write".format(len(shops)))
# Init workbook/worksheet
now = datetime.now()
filename = "yelpscrap-{date}.xlsx".format(date=str(now))
workbook = xlsxwriter.Workbook(filename)
worksheet = workbook.add_worksheet()
# Write Metadata
# -- Query
row = 0
col = 0
worksheet.write(row, col, write_query())
# -- Headers
row = 1
col = 0
heads = ("Shop name", "Address", "ZipCode", "District", "Phone", "Categories")
for head in heads:
worksheet.write(row, col, head)
col += 1
# Write Data
row = 2
col = 0
url_format = url_format = workbook.add_format({'font_color': 'blue',
'underline': 1}) # for URLs
for shop in shops:
worksheet.write_url(row, col, "http://www.yelp.fr{0}".format(shop.url),
url_format, shop.name)
worksheet.write(row, col+1, shop.address)
worksheet.write(row, col+2, shop.zipcode)
worksheet.write(row, col+3, shop.district)
worksheet.write(row, col+4, shop.phone)
worksheet.write(row, col+5, ';'.join(shop.categories)) # Clean display
row += 1
workbook.close()
mylog("Finish XLSX export at {0}".format(filename))
| ddahan/yelp-scraper | scrapper.py | Python | mit | 8,743 |
from mudpyl.library.colourtells import TellColourer
from mudpyl.metaline import RunLengthList, Metaline, simpleml
from mudpyl.net.telnet import TelnetClientFactory, TelnetClient
from mudpyl.colours import WHITE, BLACK, fg_code, bg_code
from mock import Mock
class Test_TellColourer:
def setUp(self):
self.fact = TelnetClientFactory(None, 'ascii', None)
self.p = Mock()
self.fact.realm.addProtocol(self.p)
self.fact.realm.telnet = Mock(spec = TelnetClient)
self.tc = TellColourer(self.fact.realm)
def test_adds_aliases(self):
assert self.tc.sending_tell in self.fact.realm.aliases
def test_adds_triggers(self):
assert self.tc.tell_sent in self.fact.realm.triggers
assert self.tc.no_tell_sent in self.fact.realm.triggers
assert self.tc.tell_received in self.fact.realm.triggers
def test_tell_received_matches_on_tells_in_languages(self):
ml = Metaline('Foo tells you in Foolang, "Bar."',
None, None)
assert list(self.tc.tell_received.match(ml))
def test_tell_sent_matches_on_tells_in_languages(self):
ml = Metaline('You tell Mister Foo in Barlang, "Qux."',
None, None)
assert list(self.tc.tell_sent.match(ml))
def test_tell_sent_pattern_is_not_greedy(self):
ml = Metaline('You tell Foo, "Bar, "baz.""', None, None)
match = list(self.tc.tell_sent.match(ml))
assert match[0].group(1) == 'Foo'
def test_no_tell_sent_doesnt_cock_up(self):
ml = simpleml('Bar tells you, "Blah."', fg_code(WHITE, False),
bg_code(BLACK))
self.fact.realm.metalineReceived(ml)
ml_written = self.p.metalineReceived.call_args[0][0]
colour_expecting = ml_written.fores.get_at(0)
self.fact.realm.send("tell baz blah")
ml = simpleml("Whom do you wish to tell to?", fg_code(WHITE, False),
bg_code(BLACK))
self.fact.realm.metalineReceived(ml)
self.fact.realm.send("tell bar blah")
ml = simpleml('You tell Bar, "Blah."', fg_code(WHITE, False),
bg_code(BLACK))
self.fact.realm.metalineReceived(ml)
ml_written_2 = self.p.metalineReceived.call_args[0][0]
assert ml_written_2.fores.get_at(10) == colour_expecting
def test_tell_sending_alias_is_caseless_wrt_matching(self):
assert list(self.tc.sending_tell.match("TELL foo bar"))
#XXX
| Nik0las1984/mudpyl | mudpyl/library/tests/test_colourtells.py | Python | gpl-2.0 | 2,485 |
class Solution:
# @param {string} beginWord
# @param {string} endWord
# @param {set<string>} wordDict
# @return {integer}
def ladderLength(self, beginWord, endWord, wordDict):
front, back=set([beginWord]), set([endWord])
length=2
width=len(beginWord)
charSet='abcdefghijklmnopqrstuvwxyz'
wordDict.remove(beginWord)
wordDict.remove(endWord)
while front:
newFront=set()
for phrase in front:
for i in range(width):
for c in charSet:
nw=phrase[:i]+c+phrase[i+1:]
print nw
if nw in back:
return length
if nw in wordDict:
newFront.add(nw)
front=newFront
if len(front)>len(back):
front,back=back,front
wordDict-=front
length+=1
return 0
s = Solution()
beginWord = 'hit'
endWord = 'cog'
wordDict = ['hot','dot','dog','lot','log']
wordDict.append(beginWord)
wordDict.append(endWord)
print s.ladderLength(beginWord,endWord,wordDict) | dramaticlly/Python4Interview | wordladder.py | Python | mit | 1,185 |
import os
import re
import gzip
import shutil
import gzip
import subprocess
import nibabel as nib
import ntpath
import pandas as pd
import numpy as np
import tempfile
import nibabel as nib
from nipype.interfaces.base import (TraitedSpec, File, traits, InputMultiPath, CommandLine, CommandLineInputSpec,
BaseInterface, OutputMultiPath, BaseInterfaceInputSpec, isdefined)
def nib_load_3d(fn):
img = nib.load(fn)
vol = img.get_data()
vol = vol.reshape(vol.shape[0:3])
img_3d = nib.Nifti1Image(vol, img.affine)
return img_3d
def cmd(command):
try:
output = subprocess.check_output(command,stderr=subprocess.STDOUT, shell=True, universal_newlines=True)
except subprocess.CalledProcessError as exc:
print("Status : FAIL", exc.returncode, exc.output)
exit(1)
else:
print("Output: \n{}\n".format(output))
def splitext(s):
try :
ssplit = os.path.basename(s).split('.')
ext='.'+'.'.join(ssplit[1:])
basepath= re.sub(ext,'',s)
return [basepath, ext]
except TypeError :
return s
def gz(ii, oo):
with open(ii, 'rb') as in_file:
with gzip.open(oo, 'wb') as out_file:
shutil.copyfileobj(in_file, out_file)
def gunzip(ii, oo):
with gzip.open(ii, 'rb') as in_file:
with open(oo, 'wb') as out_file:
shutil.copyfileobj(in_file, out_file)
def check_gz(in_file_fn) :
img, ext = splitext(in_file_fn)
if '.gz' in ext :
out_file_fn = tempfile.mkdtemp() + os.path.basename(img) + '.nii'
sif = img + '.sif'
if os.path.exists(sif) :
shutil.copy(sif, '/tmp/'+os.path.basename(img)+'.sif' )
gunzip(in_file_fn, out_file_fn)
return out_file_fn
else :
return in_file_fn
class separate_mask_labelsOutput(TraitedSpec):
out_file=traits.File(argstr="%s", desc="4D label image")
class separate_mask_labelsInput(TraitedSpec):
in_file=traits.File(argstr="%s", desc="3D label image")
out_file=traits.File(argstr="%s", desc="4D label image")
class separate_mask_labelsCommand(BaseInterface ):
input_spec = separate_mask_labelsInput
output_spec = separate_mask_labelsOutput
def _run_interface(self, runtime):
vol = nib.load(self.inputs.in_file)
data = vol.get_data()
data = data.reshape(*data.shape[0:3])
if not isdefined(self.inputs.out_file) :
self.inputs.out_file = self._gen_outputs(self.inputs.in_file)
unique = np.unique( data ).astype(int)
nUnique = len(unique)-1
out = np.zeros( [data.shape[0], data.shape[1], data.shape[2], nUnique] )
print('unique', unique)
print('shape',out.shape)
print('data', data.shape)
for t,i in enumerate( unique ) :
if i != 0 :
print(t-1, i )
out[ data == i, t-1 ] = 1
out_file=nib.Nifti1Image(out, vol.get_affine(), vol.header)
out_file.to_filename(self.inputs.out_file)
return(runtime)
def _gen_outputs(self, fn) :
fn_split = splitext(fn)
return os.getcwd() + os.sep + os.path.basename( fn_split[0] ) + "_4d" + fn_split[1]
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined(self.inputs.out_file) :
self.inputs.out_file = self._gen_outputs(self.inputs.in_file)
outputs["out_file"] = self.inputs.out_file
return outputs
class concat_dfOutput(TraitedSpec):
out_file = traits.File(desc="Output file")
class concat_dfInput(BaseInterfaceInputSpec):
in_list = traits.List(mandatory=True, exists=True, desc="Input list")
out_file = traits.File(mandatory=True, desc="Output file")
test = traits.Bool(default=False, usedefault=True, desc="Flag for if df is part of test run of pipeline")
class concat_df(BaseInterface):
input_spec = concat_dfInput
output_spec = concat_dfOutput
def _run_interface(self, runtime):
df=pd.DataFrame([])
test = self.inputs.test
for f in self.inputs.in_list:
dft = pd.read_csv(f)
df = pd.concat([df, dft], axis=0)
#if test : print df
df.to_csv(self.inputs.out_file, index=False)
return(runtime)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["out_file"] = os.getcwd() + os.sep + self.inputs.out_file
return outputs
class ConcatOutput(TraitedSpec):
out_file = File(exists=True, desc="resampled image")
class ConcatInput(CommandLineInputSpec):
in_file = InputMultiPath(File(mandatory=True), position=0, argstr='%s', desc='List of input images.')
out_file = File(position=1, argstr="%s", mandatory=True, desc="Output image.")
dimension = traits.Str(argstr="-concat_dimension %s", desc="Concatenate along a given dimension.")
start = traits.Float(argstr="-start %s", desc="Starting coordinate for new dimension.")
step = traits.Float(argstr="-step %s", desc="Step size for new dimension.")
clobber = traits.Bool(argstr="-clobber", usedefault=True, default_value=True, desc="Overwrite output file")
verbose = traits.Bool(argstr="-verbose", usedefault=True, default_value=True, desc="Write messages indicating progress")
class copyOutput(TraitedSpec):
output_file=traits.File(argstr="%s", desc="input")
class copyInput(TraitedSpec):
input_file=traits.File(argstr="%s", desc="input")
output_file=traits.File(argstr="%s", desc="output")
class copyCommand(BaseInterface ):
input_spec = copyInput
output_spec = copyOutput
def _run_interface(self, runtime):
if not isdefined(self.inputs.output_file) :
self.inputs.output_file = self._gen_output(self.inputs.input_file)
shutil.copy(self.inputs.input_file, self.inputs.output_file)
return(runtime)
def _gen_output(self, fn) :
return os.getcwd() + os.sep + os.path.basename( fn )
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined(self.inputs.output_file) :
self.inputs.output_file = self._gen_output(self.inputs.input_file)
outputs["output_file"] = self.inputs.output_file
return outputs
#In theory, this information should be contained in the header.
# Often, however, the information will either not be present in the header or it will be saved under an unexpected variable name (e.g., "Patient_Weight", "body_weight", "weight" ).
# One way around this problem is to allow the user to create a .csv file with the subject
#name and the parameter of interest. This way, if the paramter cannot be read from the header, it can still be read from the text file.
class subject_parameterOutput(TraitedSpec):
parameter=traits.String(argstr="%s", desc="Subject parameter")
class subject_parameterInput(TraitedSpec):
parameter_name=traits.String(argstr="%s", desc="File containing subject parameters")
header = traits.Dict(desc="Python dictionary containing PET header")
parameter=traits.String(argstr="%s", desc="Subject parameter")
sid=traits.String(desc="Subject ID")
class subject_parameterCommand(BaseInterface ):
input_spec = subject_parameterInput
output_spec = subject_parameterOutput
def _run_interface(self, runtime):
parameter_name = self.inputs.parameter_name
header = self.inputs.header
sid = self.inputs.sid
if os.path.exists(parameter_name):
#Case 1: paramter_name is a file name containing the subjects and parameters
# --> attempt to extract parameter from header
df=pd.read_csv(parameter_name, header=None)
parameter=df.iloc[:, 1][ df.iloc[:,0] == sid ].values[0]
#Case 2: parameter_name is a string representing the name of the parameter
else:
parameter=_finditem(header, parameter_name)
if type(parameter) == list:
parameter=parameter[0]
#convert scientific notation number to floating point number, stored as string
try:
parameter=format(float(parameter), 'f')
except ValueError: pass
self.inputs.parameter=str(parameter)
return(runtime)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["parameter"] = self.inputs.parameter
return outputs
| APPIAN-PET/APPIAN | src/utils.py | Python | mit | 8,440 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from __future__ import unicode_literals
import logging
from django.contrib import admin
from models import *
# Get an instance of a logger
logger = logging.getLogger(__name__)
class AccessAdmin(admin.ModelAdmin):
list_display = ('id', 'created', 'updated', 'username', 'password')
list_display_links = ('id', 'created', 'updated')
list_editable = ('username', 'password')
search_fields = ('id', 'username', 'password')
#prepopulated_fields = {'slug': ('title', )}
"""
fieldsets = (
(None, {
'fields': ('title', 'body')
}),
('Descriptions', {
'classes': ('collapse',),
'fields': ('active', 'subtitle', 'description')
}),
('Media/Files/Graphics', {
'classes': ('collapse',),
'fields': ('manpages')
}),
)
"""
#list_filter = ('menuitem__section','created',)
#raw_id_fields = ('manpages', 'images', 'files', 'videos')
admin.site.register(Access, AccessAdmin)
| rangertaha/salt-manager | salt-manager/webapp/apps/fabric/fabhistory/admin.py | Python | mit | 1,074 |
#* dslib - Python library for Datove schranky
#* Copyright (C) 2009-2012 CZ.NIC, z.s.p.o. (http://www.nic.cz)
#*
#* This library is free software; you can redistribute it and/or
#* modify it under the terms of the GNU Library General Public
#* License as published by the Free Software Foundation; either
#* version 2 of the License, or (at your option) any later version.
#*
#* This library is distributed in the hope that it will be useful,
#* but WITHOUT ANY WARRANTY; without even the implied warranty of
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
#* Library General Public License for more details.
#*
#* You should have received a copy of the GNU Library General Public
#* License along with this library; if not, write to the Free
#* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#*
'''
Created on Dec 3, 2009
'''
# standard library imports
import string
# dslib imports
from pyasn1.type import tag,namedtype,univ,useful
from pyasn1 import error
# local imports
from tools import *
from oid import oid_map as oid_map
from general_types import *
class Extension(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('extnID', univ.ObjectIdentifier()),
namedtype.DefaultedNamedType('critical', univ.Boolean('False')),
namedtype.NamedType('extnValue', univ.OctetString())
#namedtype.NamedType('extnValue', ExtensionValue())
)
class Extensions(univ.SequenceOf):
componentType = Extension()
sizeSpec = univ.SequenceOf.sizeSpec
class SubjectPublicKeyInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('algorithm', AlgorithmIdentifier()),
namedtype.NamedType('subjectPublicKey', ConvertibleBitString())
)
class UniqueIdentifier(ConvertibleBitString): pass
class Time(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('utcTime', useful.UTCTime()),
namedtype.NamedType('generalTime', useful.GeneralizedTime())
)
def __str__(self):
return str(self.getComponent())
class Validity(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('notBefore', Time()),
namedtype.NamedType('notAfter', Time())
)
class CertificateSerialNumber(univ.Integer): pass
class Version(univ.Integer):
namedValues = namedval.NamedValues(
('v1', 0), ('v2', 1), ('v3', 2)
)
class TBSCertificate(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version', Version('v1', tagSet=Version.tagSet.tagExplicitly(tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))),
namedtype.NamedType('serialNumber', CertificateSerialNumber()),
namedtype.NamedType('signature', AlgorithmIdentifier()),
namedtype.NamedType('issuer', Name()),
namedtype.NamedType('validity', Validity()),
namedtype.NamedType('subject', Name()),
namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo()),
namedtype.OptionalNamedType('issuerUniqueID', UniqueIdentifier().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('subjectUniqueID', UniqueIdentifier().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('extensions', Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
)
class Certificate(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsCertificate', TBSCertificate()),
namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
namedtype.NamedType('signatureValue', ConvertibleBitString())
)
class Certificates(univ.SetOf):
componentType = Certificate()
| yarda/dslib | pkcs7/asn1_models/X509_certificate.py | Python | lgpl-2.1 | 3,946 |
import numpy as np
from ._layout import Layout
from ._multivector import MultiVector
class ConformalLayout(Layout):
r"""
A layout for a conformal algebra, which adds extra constants and helpers.
Typically these should be constructed via :func:`clifford.conformalize`.
.. versionadded:: 1.2.0
Attributes
----------
ep : MultiVector
The first added basis element, :math:`e_{+}`, usually with :math:`e_{+}^2 = +1`
en : MultiVector
The first added basis element, :math:`e_{-}`, usually with :math:`e_{-}^2 = -1`
eo : MultiVector
The null basis vector at the origin, :math:`e_o = 0.5(e_{-} - e_{+})`
einf : MultiVector
The null vector at infinity, :math:`e_\infty = e_{-} + e_{+}`
E0 : MultiVector
The minkowski subspace bivector, :math:`e_\infty \wedge e_o`
I_base : MultiVector
The pseudoscalar of the base ga, in cga layout
"""
def __init__(self, *args, layout=None, **kwargs):
super().__init__(*args, **kwargs)
self._base_layout = layout
ep, en = self.basis_vectors_lst[-2:]
# setup null basis, and minkowski subspace bivector
eo = .5 ^ (en - ep)
einf = en + ep
E0 = einf ^ eo
I_base = self.pseudoScalar*E0
# helper properties
self.ep = ep
self.en = en
self.eo = eo
self.einf = einf
self.E0 = E0
self.I_base = I_base
@classmethod
def _from_base_layout(cls, layout, added_sig=[1, -1], **kwargs) -> 'ConformalLayout':
""" helper to implement :func:`clifford.conformalize` """
sig_c = list(layout.sig) + added_sig
return cls(
sig_c,
ids=layout._basis_vector_ids.augmented_with(len(added_sig)),
layout=layout, **kwargs)
# some convenience functions
def up(self, x: MultiVector) -> MultiVector:
""" up-project a vector from GA to CGA """
try:
if x.layout == self._base_layout:
# vector is in original space, map it into conformal space
old_val = x.value
new_val = np.zeros(self.gaDims)
new_val[:len(old_val)] = old_val
x = self.MultiVector(value=new_val)
except(AttributeError):
# if x is a scalar it doesnt have layout but following
# will still work
pass
# then up-project into a null vector
return x + (.5 ^ ((x**2)*self.einf)) + self.eo
def homo(self, x: MultiVector) -> MultiVector:
""" homogenize a CGA vector """
return x/(-x | self.einf)[()]
def down(self, x: MultiVector) -> MultiVector:
""" down-project a vector from CGA to GA """
x_down = (self.homo(x) ^ self.E0)*self.E0
# new_val = x_down.value[:self.base_layout.gaDims]
# create vector in self.base_layout (not cga)
# x_down = self.base_layout.MultiVector(value=new_val)
return x_down
| arsenovic/clifford | clifford/_conformal_layout.py | Python | bsd-3-clause | 2,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.