code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class TimeStampedModel(models.Model):
"""
An abstract base class model that provides self-updating "created" and "modified" fields.
"""
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
abstract = True | joehalloran/shoppinglist_project | shoppinglist/core/models.py | Python | apache-2.0 | 368 |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.http import urlencode
from django.utils.translation import ugettext_lazy as _
from keystoneclient import exceptions
from horizon import messages
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.domains import constants
LOG = logging.getLogger(__name__)
class ViewGroupsLink(tables.LinkAction):
name = "groups"
verbose_name = _("Modify Groups")
url = "horizon:admin:domains:update"
classes = ("ajax-modal",)
icon = "pencil"
def get_link_url(self, domain):
step = 'update_group_members'
base_url = reverse(self.url, args=[domain.id])
param = urlencode({"step": step})
return "?".join([base_url, param])
class CreateDomainLink(tables.LinkAction):
name = "create"
verbose_name = _("Create Domain")
url = constants.DOMAINS_CREATE_URL
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (('identity', 'identity:create_domain'),)
def allowed(self, request, domain):
return api.keystone.keystone_can_edit_domain()
class EditDomainLink(tables.LinkAction):
name = "edit"
verbose_name = _("Edit")
url = constants.DOMAINS_UPDATE_URL
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (('identity', 'identity:update_domain'),)
def allowed(self, request, domain):
return api.keystone.keystone_can_edit_domain()
class DeleteDomainsAction(tables.DeleteAction):
name = "delete"
data_type_singular = _("Domain")
data_type_plural = _("Domains")
policy_rules = (('identity', 'identity:delete_domain'),)
def allowed(self, request, datum):
return api.keystone.keystone_can_edit_domain()
def delete(self, request, obj_id):
domain = self.table.get_object_by_id(obj_id)
if domain.enabled:
msg = _('Domain "%s" must be disabled before it can be deleted.') \
% domain.name
messages.error(request, msg)
raise exceptions.ClientException(409, msg)
else:
LOG.info('Deleting domain "%s".' % obj_id)
api.keystone.domain_delete(request, obj_id)
class DomainFilterAction(tables.FilterAction):
def allowed(self, request, datum):
multidomain_support = getattr(settings,
'OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT',
False)
return multidomain_support
def filter(self, table, domains, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
def comp(domain):
if q in domain.name.lower():
return True
return False
return filter(comp, domains)
class SetDomainContext(tables.Action):
name = "set_domain_context"
verbose_name = _("Set Domain Context")
url = constants.DOMAINS_INDEX_URL
preempt = True
policy_rules = (('identity', 'admin_required'),)
def allowed(self, request, datum):
multidomain_support = getattr(settings,
'OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT',
False)
if not multidomain_support:
return False
ctx = request.session.get("domain_context", None)
if ctx and datum.id == ctx:
return False
return True
def single(self, table, request, obj_id):
if ('domain_context' not in request.session or
request.session['domain_context'] != obj_id):
try:
domain = api.keystone.domain_get(request, obj_id)
request.session['domain_context'] = obj_id
request.session['domain_context_name'] = domain.name
messages.success(request,
_('Domain Context updated to Domain %s.') %
domain.name)
except Exception:
messages.error(request,
_('Unable to set Domain Context.'))
class UnsetDomainContext(tables.Action):
name = "clear_domain_context"
verbose_name = _("Clear Domain Context")
url = constants.DOMAINS_INDEX_URL
preempt = True
requires_input = False
policy_rules = (('identity', 'admin_required'),)
def allowed(self, request, datum):
ctx = request.session.get("domain_context", None)
return ctx is not None
def single(self, table, request, obj_id):
if 'domain_context' in request.session:
request.session.pop("domain_context")
request.session.pop("domain_context_name")
messages.success(request, _('Domain Context cleared.'))
class DomainsTable(tables.DataTable):
name = tables.Column('name', verbose_name=_('Name'))
description = tables.Column(lambda obj: getattr(obj, 'description', None),
verbose_name=_('Description'))
id = tables.Column('id', verbose_name=_('Domain ID'))
enabled = tables.Column('enabled', verbose_name=_('Enabled'), status=True)
class Meta:
name = "domains"
verbose_name = _("Domains")
row_actions = (SetDomainContext, ViewGroupsLink, EditDomainLink,
DeleteDomainsAction)
table_actions = (DomainFilterAction, CreateDomainLink,
DeleteDomainsAction, UnsetDomainContext)
| JioCloud/horizon | openstack_dashboard/dashboards/admin/domains/tables.py | Python | apache-2.0 | 6,160 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.orchestration import base
from tempest.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
from tempest import test
class TestSoftwareConfig(base.BaseOrchestrationTest):
def setUp(self):
super(TestSoftwareConfig, self).setUp()
self.configs = []
# Add 2 sets of software configuration
self.configs.append(self._config_create('a'))
self.configs.append(self._config_create('b'))
# Create a deployment using config a's id
self._deployment_create(self.configs[0]['id'])
def _config_create(self, suffix):
configuration = {'group': 'script',
'inputs': [],
'outputs': [],
'options': {}}
configuration['name'] = 'heat_soft_config_%s' % suffix
configuration['config'] = '#!/bin/bash echo init-%s' % suffix
api_config = self.client.create_software_config(**configuration)
configuration['id'] = api_config['software_config']['id']
self.addCleanup(self._config_delete, configuration['id'])
self._validate_config(configuration, api_config)
return configuration
def _validate_config(self, configuration, api_config):
# Assert all expected keys are present with matching data
for k in configuration:
self.assertEqual(configuration[k],
api_config['software_config'][k])
def _deployment_create(self, config_id):
self.server_id = data_utils.rand_name('dummy-server')
self.action = 'ACTION_0'
self.status = 'STATUS_0'
self.input_values = {}
self.output_values = []
self.status_reason = 'REASON_0'
self.signal_transport = 'NO_SIGNAL'
self.deployment = self.client.create_software_deploy(
self.server_id, config_id, self.action, self.status,
self.input_values, self.output_values, self.status_reason,
self.signal_transport)
self.deployment_id = self.deployment['software_deployment']['id']
self.addCleanup(self._deployment_delete, self.deployment_id)
def _deployment_delete(self, deploy_id):
self.client.delete_software_deploy(deploy_id)
# Testing that it is really gone
self.assertRaises(
lib_exc.NotFound, self.client.show_software_deployment,
self.deployment_id)
def _config_delete(self, config_id):
self.client.delete_software_config(config_id)
# Testing that it is really gone
self.assertRaises(
lib_exc.NotFound, self.client.show_software_config, config_id)
@test.attr(type='smoke')
@test.idempotent_id('136162ed-9445-4b9c-b7fc-306af8b5da99')
def test_get_software_config(self):
"""Testing software config get."""
for conf in self.configs:
api_config = self.client.show_software_config(conf['id'])
self._validate_config(conf, api_config)
@test.attr(type='smoke')
@test.idempotent_id('1275c835-c967-4a2c-8d5d-ad533447ed91')
def test_get_deployment_list(self):
"""Getting a list of all deployments"""
deploy_list = self.client.list_software_deployments()
deploy_ids = [deploy['id'] for deploy in
deploy_list['software_deployments']]
self.assertIn(self.deployment_id, deploy_ids)
@test.attr(type='smoke')
@test.idempotent_id('fe7cd9f9-54b1-429c-a3b7-7df8451db913')
def test_get_deployment_metadata(self):
"""Testing deployment metadata get"""
metadata = self.client.show_software_deployment_metadata(
self.server_id)
conf_ids = [conf['id'] for conf in metadata['metadata']]
self.assertIn(self.configs[0]['id'], conf_ids)
def _validate_deployment(self, action, status, reason, config_id):
deployment = self.client.show_software_deployment(self.deployment_id)
self.assertEqual(action, deployment['software_deployment']['action'])
self.assertEqual(status, deployment['software_deployment']['status'])
self.assertEqual(reason,
deployment['software_deployment']['status_reason'])
self.assertEqual(config_id,
deployment['software_deployment']['config_id'])
@test.attr(type='smoke')
@test.idempotent_id('f29d21f3-ed75-47cf-8cdc-ef1bdeb4c674')
def test_software_deployment_create_validate(self):
"""Testing software deployment was created as expected."""
# Asserting that all fields were created
self.assert_fields_in_dict(
self.deployment['software_deployment'], 'action', 'config_id',
'id', 'input_values', 'output_values', 'server_id', 'status',
'status_reason')
# Testing get for this deployment and verifying parameters
self._validate_deployment(self.action, self.status,
self.status_reason, self.configs[0]['id'])
@test.attr(type='smoke')
@test.idempotent_id('2ac43ab3-34f2-415d-be2e-eabb4d14ee32')
def test_software_deployment_update_no_metadata_change(self):
"""Testing software deployment update without metadata change."""
metadata = self.client.show_software_deployment_metadata(
self.server_id)
# Updating values without changing the configuration ID
new_action = 'ACTION_1'
new_status = 'STATUS_1'
new_reason = 'REASON_1'
self.client.update_software_deploy(
self.deployment_id, self.server_id, self.configs[0]['id'],
new_action, new_status, self.input_values, self.output_values,
new_reason, self.signal_transport)
# Verifying get and that the deployment was updated as expected
self._validate_deployment(new_action, new_status,
new_reason, self.configs[0]['id'])
# Metadata should not be changed at this point
test_metadata = self.client.show_software_deployment_metadata(
self.server_id)
for key in metadata['metadata'][0]:
self.assertEqual(
metadata['metadata'][0][key],
test_metadata['metadata'][0][key])
@test.attr(type='smoke')
@test.idempotent_id('92c48944-d79d-4595-a840-8e1a581c1a72')
def test_software_deployment_update_with_metadata_change(self):
"""Testing software deployment update with metadata change."""
metadata = self.client.show_software_deployment_metadata(
self.server_id)
self.client.update_software_deploy(
self.deployment_id, self.server_id, self.configs[1]['id'],
self.action, self.status, self.input_values,
self.output_values, self.status_reason, self.signal_transport)
self._validate_deployment(self.action, self.status,
self.status_reason, self.configs[1]['id'])
# Metadata should now be changed
new_metadata = self.client.show_software_deployment_metadata(
self.server_id)
# Its enough to test the ID in this case
meta_id = metadata['metadata'][0]['id']
test_id = new_metadata['metadata'][0]['id']
self.assertNotEqual(meta_id, test_id)
| sebrandon1/tempest | tempest/api/orchestration/stacks/test_soft_conf.py | Python | apache-2.0 | 7,882 |
"""
Test parsing of complex date and times
"""
import unittest, time, datetime
import parsedatetime as pdt
class test(unittest.TestCase):
@pdt.tests.assertEqualWithComparator
def assertExpectedResult(self, result, check, **kwargs):
return pdt.tests.compareResultByTimeTuplesAndFlags(result, check, **kwargs)
def setUp(self):
self.cal = pdt.Calendar()
self.yr, self.mth, self.dy, self.hr, self.mn, self.sec, self.wd, self.yd, self.isdst = time.localtime()
def testDates(self):
start = datetime.datetime(self.yr, self.mth, self.dy, self.hr, self.mn, self.sec).timetuple()
target = datetime.datetime(2006, 8, 25, 17, 0, 0).timetuple()
self.assertExpectedResult(self.cal.parse('08/25/2006 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5pm on 08.25.2006', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5pm August 25, 2006', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5pm August 25th, 2006', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5pm 25 August, 2006', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5pm 25th August, 2006', start), (target, 3))
self.assertExpectedResult(self.cal.parse('Aug 25, 2006 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('Aug 25th, 2006 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('25 Aug, 2006 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('25th Aug 2006, 5pm', start), (target, 3))
if self.mth > 8 or (self.mth == 8 and self.dy > 5):
target = datetime.datetime(self.yr + 1, 8, 5, 17, 0, 0).timetuple()
else:
target = datetime.datetime(self.yr, 8, 5, 17, 0, 0).timetuple()
self.assertExpectedResult(self.cal.parse('8/5 at 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5pm 8.5', start), (target, 3))
self.assertExpectedResult(self.cal.parse('08/05 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('August 5 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5pm Aug 05', start), (target, 3))
self.assertExpectedResult(self.cal.parse('Aug 05 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('Aug 05th 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5 August 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5th August 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5pm 05 Aug', start), (target, 3))
self.assertExpectedResult(self.cal.parse('05 Aug 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('05th Aug 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('August 5th 5pm', start), (target, 3))
if self.mth > 8 or (self.mth == 8 and self.dy > 5):
target = datetime.datetime(self.yr + 1, 8, 5, 12, 0, 0).timetuple()
else:
target = datetime.datetime(self.yr, 8, 5, 12, 0, 0).timetuple()
self.assertExpectedResult(self.cal.parse('August 5th 12pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('August 5th 12:00', start), (target, 3))
if __name__ == "__main__":
unittest.main()
| r3tard/BartusBot | lib/parsedatetime/tests/TestComplexDateTimes.py | Python | apache-2.0 | 3,487 |
# Code in this file is copied and adapted from
# https://github.com/openai/evolution-strategies-starter.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
class Optimizer(object):
def __init__(self, policy):
self.policy = policy
self.dim = policy.num_params
self.t = 0
def update(self, globalg):
self.t += 1
step = self._compute_step(globalg)
theta = self.policy.get_weights()
ratio = np.linalg.norm(step) / np.linalg.norm(theta)
return theta + step, ratio
def _compute_step(self, globalg):
raise NotImplementedError
class SGD(Optimizer):
def __init__(self, policy, stepsize, momentum=0.0):
Optimizer.__init__(self, policy)
self.v = np.zeros(self.dim, dtype=np.float32)
self.stepsize, self.momentum = stepsize, momentum
def _compute_step(self, globalg):
self.v = self.momentum * self.v + (1. - self.momentum) * globalg
step = -self.stepsize * self.v
return step
class Adam(Optimizer):
def __init__(self, policy, stepsize, beta1=0.9, beta2=0.999,
epsilon=1e-08):
Optimizer.__init__(self, policy)
self.stepsize = stepsize
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.m = np.zeros(self.dim, dtype=np.float32)
self.v = np.zeros(self.dim, dtype=np.float32)
def _compute_step(self, globalg):
a = self.stepsize * (np.sqrt(1 - self.beta2**self.t) /
(1 - self.beta1**self.t))
self.m = self.beta1 * self.m + (1 - self.beta1) * globalg
self.v = self.beta2 * self.v + (1 - self.beta2) * (globalg * globalg)
step = -a * self.m / (np.sqrt(self.v) + self.epsilon)
return step
| ujvl/ray-ng | rllib/agents/ars/optimizers.py | Python | apache-2.0 | 1,862 |
# Generated from java-escape by ANTLR 4.4
from antlr4 import *
from io import StringIO
package = globals().get("__package__", None)
ischild = len(package)>0 if package is not None else False
if ischild:
from .ProtoParserListener import ProtoParserListener
else:
from ProtoParserListener import ProtoParserListener
def serializedATN():
with StringIO() as buf:
buf.write("\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3,")
buf.write("\u0120\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23\t\23")
buf.write("\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31")
buf.write("\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36")
buf.write("\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\3\2\3\2\3\3")
buf.write("\3\3\5\3M\n\3\3\4\3\4\3\5\3\5\5\5S\n\5\3\6\3\6\3\6\3\6")
buf.write("\3\6\3\6\3\6\7\6\\\n\6\f\6\16\6_\13\6\3\6\3\6\3\7\3\7")
buf.write("\3\7\3\7\3\b\3\b\3\t\3\t\5\tk\n\t\3\t\3\t\3\t\3\n\3\n")
buf.write("\3\13\3\13\3\13\3\13\3\13\3\13\3\f\3\f\3\f\3\f\7\f|\n")
buf.write("\f\f\f\16\f\177\13\f\3\f\3\f\3\r\3\r\3\r\3\r\3\16\3\16")
buf.write("\5\16\u0089\n\16\3\17\3\17\7\17\u008d\n\17\f\17\16\17")
buf.write("\u0090\13\17\3\17\3\17\3\20\3\20\3\20\3\20\3\21\3\21\3")
buf.write("\21\3\21\3\21\7\21\u009d\n\21\f\21\16\21\u00a0\13\21\5")
buf.write("\21\u00a2\n\21\3\22\3\22\3\22\3\22\3\22\3\22\3\23\3\23")
buf.write("\3\24\3\24\7\24\u00ae\n\24\f\24\16\24\u00b1\13\24\3\25")
buf.write("\3\25\3\25\3\25\5\25\u00b7\n\25\3\25\3\25\3\26\3\26\3")
buf.write("\26\3\26\5\26\u00bf\n\26\3\26\3\26\3\27\3\27\3\30\3\30")
buf.write("\3\30\3\30\3\30\6\30\u00ca\n\30\r\30\16\30\u00cb\3\31")
buf.write("\3\31\3\31\3\31\3\31\3\31\5\31\u00d4\n\31\3\31\3\31\3")
buf.write("\32\3\32\3\32\3\32\3\32\5\32\u00dd\n\32\3\32\3\32\3\33")
buf.write("\3\33\3\33\3\33\5\33\u00e5\n\33\3\33\3\33\3\34\3\34\3")
buf.write("\35\3\35\3\35\3\35\6\35\u00ef\n\35\r\35\16\35\u00f0\3")
buf.write("\36\3\36\3\36\3\36\5\36\u00f7\n\36\3\36\3\36\3\37\3\37")
buf.write("\3 \3 \6 \u00ff\n \r \16 \u0100\3!\3!\3!\3!\3!\3!\3!\3")
buf.write("!\3!\3!\3!\7!\u010e\n!\f!\16!\u0111\13!\3!\3!\5!\u0115")
buf.write("\n!\3!\5!\u0118\n!\3\"\3\"\3#\3#\3$\3$\3$\2\2%\2\4\6\b")
buf.write("\n\f\16\20\22\24\26\30\32\34\36 \"$&(*,.\60\62\64\668")
buf.write(":<>@BDF\2\4\3\2#$\3\2\37\"\u0121\2H\3\2\2\2\4L\3\2\2\2")
buf.write("\6N\3\2\2\2\bR\3\2\2\2\n]\3\2\2\2\fb\3\2\2\2\16f\3\2\2")
buf.write("\2\20h\3\2\2\2\22o\3\2\2\2\24q\3\2\2\2\26w\3\2\2\2\30")
buf.write("\u0082\3\2\2\2\32\u0088\3\2\2\2\34\u008a\3\2\2\2\36\u0093")
buf.write("\3\2\2\2 \u00a1\3\2\2\2\"\u00a3\3\2\2\2$\u00a9\3\2\2\2")
buf.write("&\u00af\3\2\2\2(\u00b2\3\2\2\2*\u00ba\3\2\2\2,\u00c2\3")
buf.write("\2\2\2.\u00c9\3\2\2\2\60\u00cd\3\2\2\2\62\u00d7\3\2\2")
buf.write("\2\64\u00e0\3\2\2\2\66\u00e8\3\2\2\28\u00ee\3\2\2\2:\u00f2")
buf.write("\3\2\2\2<\u00fa\3\2\2\2>\u00fe\3\2\2\2@\u0102\3\2\2\2")
buf.write("B\u0119\3\2\2\2D\u011b\3\2\2\2F\u011d\3\2\2\2HI\t\2\2")
buf.write("\2I\3\3\2\2\2JM\7#\2\2KM\5\6\4\2LJ\3\2\2\2LK\3\2\2\2M")
buf.write("\5\3\2\2\2NO\t\3\2\2O\7\3\2\2\2PS\7\36\2\2QS\5\2\2\2R")
buf.write("P\3\2\2\2RQ\3\2\2\2S\t\3\2\2\2T\\\5\f\7\2U\\\5\20\t\2")
buf.write("V\\\5\24\13\2W\\\5\"\22\2X\\\5\64\33\2Y\\\5*\26\2Z\\\5")
buf.write(":\36\2[T\3\2\2\2[U\3\2\2\2[V\3\2\2\2[W\3\2\2\2[X\3\2\2")
buf.write("\2[Y\3\2\2\2[Z\3\2\2\2\\_\3\2\2\2][\3\2\2\2]^\3\2\2\2")
buf.write("^`\3\2\2\2_]\3\2\2\2`a\7\2\2\3a\13\3\2\2\2bc\7\7\2\2c")
buf.write("d\5\16\b\2de\7\34\2\2e\r\3\2\2\2fg\5\2\2\2g\17\3\2\2\2")
buf.write("hj\7\b\2\2ik\7\6\2\2ji\3\2\2\2jk\3\2\2\2kl\3\2\2\2lm\5")
buf.write("\22\n\2mn\7\34\2\2n\21\3\2\2\2op\7 \2\2p\23\3\2\2\2qr")
buf.write("\7\t\2\2rs\5 \21\2st\7\31\2\2tu\5\32\16\2uv\7\34\2\2v")
buf.write("\25\3\2\2\2wx\7\27\2\2x}\5\30\r\2yz\7\33\2\2z|\5\30\r")
buf.write("\2{y\3\2\2\2|\177\3\2\2\2}{\3\2\2\2}~\3\2\2\2~\u0080\3")
buf.write("\2\2\2\177}\3\2\2\2\u0080\u0081\7\30\2\2\u0081\27\3\2")
buf.write("\2\2\u0082\u0083\5 \21\2\u0083\u0084\7\31\2\2\u0084\u0085")
buf.write("\5\32\16\2\u0085\31\3\2\2\2\u0086\u0089\5\4\3\2\u0087")
buf.write("\u0089\5\34\17\2\u0088\u0086\3\2\2\2\u0088\u0087\3\2\2")
buf.write("\2\u0089\33\3\2\2\2\u008a\u008e\7\23\2\2\u008b\u008d\5")
buf.write("\36\20\2\u008c\u008b\3\2\2\2\u008d\u0090\3\2\2\2\u008e")
buf.write("\u008c\3\2\2\2\u008e\u008f\3\2\2\2\u008f\u0091\3\2\2\2")
buf.write("\u0090\u008e\3\2\2\2\u0091\u0092\7\24\2\2\u0092\35\3\2")
buf.write("\2\2\u0093\u0094\7#\2\2\u0094\u0095\7\32\2\2\u0095\u0096")
buf.write("\5\32\16\2\u0096\37\3\2\2\2\u0097\u00a2\7#\2\2\u0098\u0099")
buf.write("\7\25\2\2\u0099\u009a\5\2\2\2\u009a\u009e\7\26\2\2\u009b")
buf.write("\u009d\7%\2\2\u009c\u009b\3\2\2\2\u009d\u00a0\3\2\2\2")
buf.write("\u009e\u009c\3\2\2\2\u009e\u009f\3\2\2\2\u009f\u00a2\3")
buf.write("\2\2\2\u00a0\u009e\3\2\2\2\u00a1\u0097\3\2\2\2\u00a1\u0098")
buf.write("\3\2\2\2\u00a2!\3\2\2\2\u00a3\u00a4\7\n\2\2\u00a4\u00a5")
buf.write("\5$\23\2\u00a5\u00a6\7\23\2\2\u00a6\u00a7\5&\24\2\u00a7")
buf.write("\u00a8\7\24\2\2\u00a8#\3\2\2\2\u00a9\u00aa\7#\2\2\u00aa")
buf.write("%\3\2\2\2\u00ab\u00ae\5\24\13\2\u00ac\u00ae\5(\25\2\u00ad")
buf.write("\u00ab\3\2\2\2\u00ad\u00ac\3\2\2\2\u00ae\u00b1\3\2\2\2")
buf.write("\u00af\u00ad\3\2\2\2\u00af\u00b0\3\2\2\2\u00b0\'\3\2\2")
buf.write("\2\u00b1\u00af\3\2\2\2\u00b2\u00b3\7#\2\2\u00b3\u00b4")
buf.write("\7\31\2\2\u00b4\u00b6\7\37\2\2\u00b5\u00b7\5\26\f\2\u00b6")
buf.write("\u00b5\3\2\2\2\u00b6\u00b7\3\2\2\2\u00b7\u00b8\3\2\2\2")
buf.write("\u00b8\u00b9\7\34\2\2\u00b9)\3\2\2\2\u00ba\u00bb\7\13")
buf.write("\2\2\u00bb\u00bc\5,\27\2\u00bc\u00be\7\23\2\2\u00bd\u00bf")
buf.write("\5.\30\2\u00be\u00bd\3\2\2\2\u00be\u00bf\3\2\2\2\u00bf")
buf.write("\u00c0\3\2\2\2\u00c0\u00c1\7\24\2\2\u00c1+\3\2\2\2\u00c2")
buf.write("\u00c3\7#\2\2\u00c3-\3\2\2\2\u00c4\u00ca\5\24\13\2\u00c5")
buf.write("\u00ca\5\60\31\2\u00c6\u00ca\5*\26\2\u00c7\u00ca\5\"\22")
buf.write("\2\u00c8\u00ca\5\62\32\2\u00c9\u00c4\3\2\2\2\u00c9\u00c5")
buf.write("\3\2\2\2\u00c9\u00c6\3\2\2\2\u00c9\u00c7\3\2\2\2\u00c9")
buf.write("\u00c8\3\2\2\2\u00ca\u00cb\3\2\2\2\u00cb\u00c9\3\2\2\2")
buf.write("\u00cb\u00cc\3\2\2\2\u00cc/\3\2\2\2\u00cd\u00ce\7\35\2")
buf.write("\2\u00ce\u00cf\5\b\5\2\u00cf\u00d0\7#\2\2\u00d0\u00d1")
buf.write("\7\31\2\2\u00d1\u00d3\7\37\2\2\u00d2\u00d4\5\26\f\2\u00d3")
buf.write("\u00d2\3\2\2\2\u00d3\u00d4\3\2\2\2\u00d4\u00d5\3\2\2\2")
buf.write("\u00d5\u00d6\7\34\2\2\u00d6\61\3\2\2\2\u00d7\u00d8\7\r")
buf.write("\2\2\u00d8\u00d9\7\37\2\2\u00d9\u00dc\7\16\2\2\u00da\u00dd")
buf.write("\7\37\2\2\u00db\u00dd\7\17\2\2\u00dc\u00da\3\2\2\2\u00dc")
buf.write("\u00db\3\2\2\2\u00dd\u00de\3\2\2\2\u00de\u00df\7\34\2")
buf.write("\2\u00df\63\3\2\2\2\u00e0\u00e1\7\f\2\2\u00e1\u00e2\5")
buf.write("\66\34\2\u00e2\u00e4\7\23\2\2\u00e3\u00e5\58\35\2\u00e4")
buf.write("\u00e3\3\2\2\2\u00e4\u00e5\3\2\2\2\u00e5\u00e6\3\2\2\2")
buf.write("\u00e6\u00e7\7\24\2\2\u00e7\65\3\2\2\2\u00e8\u00e9\5\2")
buf.write("\2\2\u00e9\67\3\2\2\2\u00ea\u00ef\5\24\13\2\u00eb\u00ef")
buf.write("\5\60\31\2\u00ec\u00ef\5*\26\2\u00ed\u00ef\5\"\22\2\u00ee")
buf.write("\u00ea\3\2\2\2\u00ee\u00eb\3\2\2\2\u00ee\u00ec\3\2\2\2")
buf.write("\u00ee\u00ed\3\2\2\2\u00ef\u00f0\3\2\2\2\u00f0\u00ee\3")
buf.write("\2\2\2\u00f0\u00f1\3\2\2\2\u00f19\3\2\2\2\u00f2\u00f3")
buf.write("\7\20\2\2\u00f3\u00f4\5<\37\2\u00f4\u00f6\7\23\2\2\u00f5")
buf.write("\u00f7\5> \2\u00f6\u00f5\3\2\2\2\u00f6\u00f7\3\2\2\2\u00f7")
buf.write("\u00f8\3\2\2\2\u00f8\u00f9\7\24\2\2\u00f9;\3\2\2\2\u00fa")
buf.write("\u00fb\7#\2\2\u00fb=\3\2\2\2\u00fc\u00ff\5\24\13\2\u00fd")
buf.write("\u00ff\5@!\2\u00fe\u00fc\3\2\2\2\u00fe\u00fd\3\2\2\2\u00ff")
buf.write("\u0100\3\2\2\2\u0100\u00fe\3\2\2\2\u0100\u0101\3\2\2\2")
buf.write("\u0101?\3\2\2\2\u0102\u0103\7\22\2\2\u0103\u0104\5B\"")
buf.write("\2\u0104\u0105\7\25\2\2\u0105\u0106\5D#\2\u0106\u0107")
buf.write("\7\26\2\2\u0107\u0108\7\21\2\2\u0108\u0109\7\25\2\2\u0109")
buf.write("\u010a\5F$\2\u010a\u0117\7\26\2\2\u010b\u010f\7\23\2\2")
buf.write("\u010c\u010e\5\24\13\2\u010d\u010c\3\2\2\2\u010e\u0111")
buf.write("\3\2\2\2\u010f\u010d\3\2\2\2\u010f\u0110\3\2\2\2\u0110")
buf.write("\u0112\3\2\2\2\u0111\u010f\3\2\2\2\u0112\u0114\7\24\2")
buf.write("\2\u0113\u0115\7\34\2\2\u0114\u0113\3\2\2\2\u0114\u0115")
buf.write("\3\2\2\2\u0115\u0118\3\2\2\2\u0116\u0118\7\34\2\2\u0117")
buf.write("\u010b\3\2\2\2\u0117\u0116\3\2\2\2\u0118A\3\2\2\2\u0119")
buf.write("\u011a\7#\2\2\u011aC\3\2\2\2\u011b\u011c\5\2\2\2\u011c")
buf.write("E\3\2\2\2\u011d\u011e\5\2\2\2\u011eG\3\2\2\2\35LR[]j}")
buf.write("\u0088\u008e\u009e\u00a1\u00ad\u00af\u00b6\u00be\u00c9")
buf.write("\u00cb\u00d3\u00dc\u00e4\u00ee\u00f0\u00f6\u00fe\u0100")
buf.write("\u010f\u0114\u0117")
return buf.getvalue()
class ProtoParser ( Parser ):
grammarFileName = "java-escape"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
EOF = Token.EOF
BLOCK_OPEN=17
MESSAGE_LITERAL=9
SERVICE_LITERAL=14
PROTOBUF_SCOPE_LITERAL=27
EXTENSIONS_TO_LITERAL=12
LINE_COMMENT=1
PAREN_CLOSE=20
ITEM_TERMINATOR=26
OPTION_LITERAL=7
EXTENSIONS_MAX_LITERAL=13
WHITESPACE=3
EQUALS=23
BOOL_LITERAL=31
EXTENSIONS_DEF_LITERAL=11
FLOAT_LITERAL=32
COLON=24
QUALIFIED_IDENTIFIER=34
MULTILINE_COMMENT=2
STRING_LITERAL=30
BLOCK_CLOSE=18
FIELD_IDENTIFIER=35
PACKAGE_LITERAL=5
IMPORT_PUBLIC=4
COMMA=25
IDENTIFIER=33
INTEGER_LITERAL=29
EXTEND_LITERAL=10
PROTOBUF_TYPE_LITERAL=28
ENUM_LITERAL=8
IMPORT_LITERAL=6
PAREN_OPEN=19
BRACKET_CLOSE=22
RPC_LITERAL=16
RETURNS_LITERAL=15
BRACKET_OPEN=21
PROTO=36
OPTION_PREDEFINED=37
OPTION_CUSTOMIZED=38
OPTION_VALUE_ITEM=39
OPTION_VALUE_OBJECT=40
ENUM_FIELD=41
MESSAGE_FIELD=42
tokenNames = [ "<INVALID>", "LINE_COMMENT", "MULTILINE_COMMENT", "WHITESPACE",
"'public'", "'package'", "'import'", "'option'", "'enum'",
"'message'", "'extend'", "'extensions'", "'to'", "'max'",
"'service'", "'returns'", "'rpc'", "'{'", "'}'", "'('",
"')'", "'['", "']'", "'='", "':'", "','", "';'", "PROTOBUF_SCOPE_LITERAL",
"PROTOBUF_TYPE_LITERAL", "INTEGER_LITERAL", "STRING_LITERAL",
"BOOL_LITERAL", "FLOAT_LITERAL", "IDENTIFIER", "QUALIFIED_IDENTIFIER",
"FIELD_IDENTIFIER", "PROTO", "OPTION_PREDEFINED", "OPTION_CUSTOMIZED",
"OPTION_VALUE_ITEM", "OPTION_VALUE_OBJECT", "ENUM_FIELD",
"MESSAGE_FIELD" ]
RULE_all_identifier = 0
RULE_all_value = 1
RULE_literal_value = 2
RULE_proto_type = 3
RULE_proto = 4
RULE_package_def = 5
RULE_package_name = 6
RULE_import_def = 7
RULE_import_file_name = 8
RULE_option_line_def = 9
RULE_option_field_def = 10
RULE_option_field_item = 11
RULE_option_all_value = 12
RULE_option_value_object = 13
RULE_option_value_item = 14
RULE_option_name = 15
RULE_enum_def = 16
RULE_enum_name = 17
RULE_enum_content = 18
RULE_enum_item_def = 19
RULE_message_def = 20
RULE_message_name = 21
RULE_message_content = 22
RULE_message_item_def = 23
RULE_message_ext_def = 24
RULE_ext_def = 25
RULE_ext_name = 26
RULE_ext_content = 27
RULE_service_def = 28
RULE_service_name = 29
RULE_service_content = 30
RULE_rpc_def = 31
RULE_rpc_name = 32
RULE_req_name = 33
RULE_resp_name = 34
ruleNames = [ "all_identifier", "all_value", "literal_value", "proto_type",
"proto", "package_def", "package_name", "import_def",
"import_file_name", "option_line_def", "option_field_def",
"option_field_item", "option_all_value", "option_value_object",
"option_value_item", "option_name", "enum_def", "enum_name",
"enum_content", "enum_item_def", "message_def", "message_name",
"message_content", "message_item_def", "message_ext_def",
"ext_def", "ext_name", "ext_content", "service_def",
"service_name", "service_content", "rpc_def", "rpc_name",
"req_name", "resp_name" ]
def __init__(self, input:TokenStream):
super().__init__(input)
self.checkVersion("4.4")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class All_identifierContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def IDENTIFIER(self):
return self.getToken(ProtoParser.IDENTIFIER, 0)
def QUALIFIED_IDENTIFIER(self):
return self.getToken(ProtoParser.QUALIFIED_IDENTIFIER, 0)
def getRuleIndex(self):
return ProtoParser.RULE_all_identifier
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterAll_identifier(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitAll_identifier(self)
def all_identifier(self):
localctx = ProtoParser.All_identifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_all_identifier)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 70
_la = self._input.LA(1)
if not(_la==ProtoParser.IDENTIFIER or _la==ProtoParser.QUALIFIED_IDENTIFIER):
self._errHandler.recoverInline(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class All_valueContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def IDENTIFIER(self):
return self.getToken(ProtoParser.IDENTIFIER, 0)
def literal_value(self):
return self.getTypedRuleContext(ProtoParser.Literal_valueContext,0)
def getRuleIndex(self):
return ProtoParser.RULE_all_value
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterAll_value(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitAll_value(self)
def all_value(self):
localctx = ProtoParser.All_valueContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_all_value)
try:
self.state = 74
token = self._input.LA(1)
if token in [self.IDENTIFIER]:
self.enterOuterAlt(localctx, 1)
self.state = 72
self.match(self.IDENTIFIER)
elif token in [self.INTEGER_LITERAL, self.STRING_LITERAL, self.BOOL_LITERAL, self.FLOAT_LITERAL]:
self.enterOuterAlt(localctx, 2)
self.state = 73
self.literal_value()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Literal_valueContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def FLOAT_LITERAL(self):
return self.getToken(ProtoParser.FLOAT_LITERAL, 0)
def INTEGER_LITERAL(self):
return self.getToken(ProtoParser.INTEGER_LITERAL, 0)
def STRING_LITERAL(self):
return self.getToken(ProtoParser.STRING_LITERAL, 0)
def BOOL_LITERAL(self):
return self.getToken(ProtoParser.BOOL_LITERAL, 0)
def getRuleIndex(self):
return ProtoParser.RULE_literal_value
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterLiteral_value(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitLiteral_value(self)
def literal_value(self):
localctx = ProtoParser.Literal_valueContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_literal_value)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 76
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << self.INTEGER_LITERAL) | (1 << self.STRING_LITERAL) | (1 << self.BOOL_LITERAL) | (1 << self.FLOAT_LITERAL))) != 0)):
self._errHandler.recoverInline(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Proto_typeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def PROTOBUF_TYPE_LITERAL(self):
return self.getToken(ProtoParser.PROTOBUF_TYPE_LITERAL, 0)
def all_identifier(self):
return self.getTypedRuleContext(ProtoParser.All_identifierContext,0)
def getRuleIndex(self):
return ProtoParser.RULE_proto_type
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterProto_type(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitProto_type(self)
def proto_type(self):
localctx = ProtoParser.Proto_typeContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_proto_type)
try:
self.state = 80
token = self._input.LA(1)
if token in [self.PROTOBUF_TYPE_LITERAL]:
self.enterOuterAlt(localctx, 1)
self.state = 78
self.match(self.PROTOBUF_TYPE_LITERAL)
elif token in [self.IDENTIFIER, self.QUALIFIED_IDENTIFIER]:
self.enterOuterAlt(localctx, 2)
self.state = 79
self.all_identifier()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ProtoContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def EOF(self):
return self.getToken(ProtoParser.EOF, 0)
def option_line_def(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ProtoParser.Option_line_defContext)
else:
return self.getTypedRuleContext(ProtoParser.Option_line_defContext,i)
def ext_def(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ProtoParser.Ext_defContext)
else:
return self.getTypedRuleContext(ProtoParser.Ext_defContext,i)
def import_def(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ProtoParser.Import_defContext)
else:
return self.getTypedRuleContext(ProtoParser.Import_defContext,i)
def message_def(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ProtoParser.Message_defContext)
else:
return self.getTypedRuleContext(ProtoParser.Message_defContext,i)
def enum_def(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ProtoParser.Enum_defContext)
else:
return self.getTypedRuleContext(ProtoParser.Enum_defContext,i)
def service_def(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ProtoParser.Service_defContext)
else:
return self.getTypedRuleContext(ProtoParser.Service_defContext,i)
def package_def(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ProtoParser.Package_defContext)
else:
return self.getTypedRuleContext(ProtoParser.Package_defContext,i)
def getRuleIndex(self):
return ProtoParser.RULE_proto
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterProto(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitProto(self)
def proto(self):
localctx = ProtoParser.ProtoContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_proto)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 91
self._errHandler.sync(self)
_la = self._input.LA(1)
while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << self.PACKAGE_LITERAL) | (1 << self.IMPORT_LITERAL) | (1 << self.OPTION_LITERAL) | (1 << self.ENUM_LITERAL) | (1 << self.MESSAGE_LITERAL) | (1 << self.EXTEND_LITERAL) | (1 << self.SERVICE_LITERAL))) != 0):
self.state = 89
token = self._input.LA(1)
if token in [self.PACKAGE_LITERAL]:
self.state = 82
self.package_def()
elif token in [self.IMPORT_LITERAL]:
self.state = 83
self.import_def()
elif token in [self.OPTION_LITERAL]:
self.state = 84
self.option_line_def()
elif token in [self.ENUM_LITERAL]:
self.state = 85
self.enum_def()
elif token in [self.EXTEND_LITERAL]:
self.state = 86
self.ext_def()
elif token in [self.MESSAGE_LITERAL]:
self.state = 87
self.message_def()
elif token in [self.SERVICE_LITERAL]:
self.state = 88
self.service_def()
else:
raise NoViableAltException(self)
self.state = 93
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 94
self.match(self.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Package_defContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def PACKAGE_LITERAL(self):
return self.getToken(ProtoParser.PACKAGE_LITERAL, 0)
def package_name(self):
return self.getTypedRuleContext(ProtoParser.Package_nameContext,0)
def ITEM_TERMINATOR(self):
return self.getToken(ProtoParser.ITEM_TERMINATOR, 0)
def getRuleIndex(self):
return ProtoParser.RULE_package_def
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterPackage_def(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitPackage_def(self)
def package_def(self):
localctx = ProtoParser.Package_defContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_package_def)
try:
self.enterOuterAlt(localctx, 1)
self.state = 96
self.match(self.PACKAGE_LITERAL)
self.state = 97
self.package_name()
self.state = 98
self.match(self.ITEM_TERMINATOR)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Package_nameContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def all_identifier(self):
return self.getTypedRuleContext(ProtoParser.All_identifierContext,0)
def getRuleIndex(self):
return ProtoParser.RULE_package_name
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterPackage_name(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitPackage_name(self)
def package_name(self):
localctx = ProtoParser.Package_nameContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_package_name)
try:
self.enterOuterAlt(localctx, 1)
self.state = 100
self.all_identifier()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Import_defContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def IMPORT_PUBLIC(self):
return self.getToken(ProtoParser.IMPORT_PUBLIC, 0)
def import_file_name(self):
return self.getTypedRuleContext(ProtoParser.Import_file_nameContext,0)
def IMPORT_LITERAL(self):
return self.getToken(ProtoParser.IMPORT_LITERAL, 0)
def ITEM_TERMINATOR(self):
return self.getToken(ProtoParser.ITEM_TERMINATOR, 0)
def getRuleIndex(self):
return ProtoParser.RULE_import_def
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterImport_def(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitImport_def(self)
def import_def(self):
localctx = ProtoParser.Import_defContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_import_def)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 102
self.match(self.IMPORT_LITERAL)
self.state = 104
_la = self._input.LA(1)
if _la==ProtoParser.IMPORT_PUBLIC:
self.state = 103
self.match(self.IMPORT_PUBLIC)
self.state = 106
self.import_file_name()
self.state = 107
self.match(self.ITEM_TERMINATOR)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Import_file_nameContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def STRING_LITERAL(self):
return self.getToken(ProtoParser.STRING_LITERAL, 0)
def getRuleIndex(self):
return ProtoParser.RULE_import_file_name
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterImport_file_name(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitImport_file_name(self)
def import_file_name(self):
localctx = ProtoParser.Import_file_nameContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_import_file_name)
try:
self.enterOuterAlt(localctx, 1)
self.state = 109
self.match(self.STRING_LITERAL)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Option_line_defContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def option_all_value(self):
return self.getTypedRuleContext(ProtoParser.Option_all_valueContext,0)
def EQUALS(self):
return self.getToken(ProtoParser.EQUALS, 0)
def ITEM_TERMINATOR(self):
return self.getToken(ProtoParser.ITEM_TERMINATOR, 0)
def option_name(self):
return self.getTypedRuleContext(ProtoParser.Option_nameContext,0)
def OPTION_LITERAL(self):
return self.getToken(ProtoParser.OPTION_LITERAL, 0)
def getRuleIndex(self):
return ProtoParser.RULE_option_line_def
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterOption_line_def(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitOption_line_def(self)
def option_line_def(self):
localctx = ProtoParser.Option_line_defContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_option_line_def)
try:
self.enterOuterAlt(localctx, 1)
self.state = 111
self.match(self.OPTION_LITERAL)
self.state = 112
self.option_name()
self.state = 113
self.match(self.EQUALS)
self.state = 114
self.option_all_value()
self.state = 115
self.match(self.ITEM_TERMINATOR)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Option_field_defContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def option_field_item(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ProtoParser.Option_field_itemContext)
else:
return self.getTypedRuleContext(ProtoParser.Option_field_itemContext,i)
def BRACKET_CLOSE(self):
return self.getToken(ProtoParser.BRACKET_CLOSE, 0)
def BRACKET_OPEN(self):
return self.getToken(ProtoParser.BRACKET_OPEN, 0)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(ProtoParser.COMMA)
else:
return self.getToken(ProtoParser.COMMA, i)
def getRuleIndex(self):
return ProtoParser.RULE_option_field_def
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterOption_field_def(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitOption_field_def(self)
def option_field_def(self):
localctx = ProtoParser.Option_field_defContext(self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_option_field_def)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 117
self.match(self.BRACKET_OPEN)
self.state = 118
self.option_field_item()
self.state = 123
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ProtoParser.COMMA:
self.state = 119
self.match(self.COMMA)
self.state = 120
self.option_field_item()
self.state = 125
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 126
self.match(self.BRACKET_CLOSE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Option_field_itemContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def option_all_value(self):
return self.getTypedRuleContext(ProtoParser.Option_all_valueContext,0)
def EQUALS(self):
return self.getToken(ProtoParser.EQUALS, 0)
def option_name(self):
return self.getTypedRuleContext(ProtoParser.Option_nameContext,0)
def getRuleIndex(self):
return ProtoParser.RULE_option_field_item
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterOption_field_item(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitOption_field_item(self)
def option_field_item(self):
localctx = ProtoParser.Option_field_itemContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_option_field_item)
try:
self.enterOuterAlt(localctx, 1)
self.state = 128
self.option_name()
self.state = 129
self.match(self.EQUALS)
self.state = 130
self.option_all_value()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Option_all_valueContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def option_value_object(self):
return self.getTypedRuleContext(ProtoParser.Option_value_objectContext,0)
def all_value(self):
return self.getTypedRuleContext(ProtoParser.All_valueContext,0)
def getRuleIndex(self):
return ProtoParser.RULE_option_all_value
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterOption_all_value(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitOption_all_value(self)
def option_all_value(self):
localctx = ProtoParser.Option_all_valueContext(self, self._ctx, self.state)
self.enterRule(localctx, 24, self.RULE_option_all_value)
try:
self.state = 134
token = self._input.LA(1)
if token in [self.INTEGER_LITERAL, self.STRING_LITERAL, self.BOOL_LITERAL, self.FLOAT_LITERAL, self.IDENTIFIER]:
self.enterOuterAlt(localctx, 1)
self.state = 132
self.all_value()
elif token in [self.BLOCK_OPEN]:
self.enterOuterAlt(localctx, 2)
self.state = 133
self.option_value_object()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Option_value_objectContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def BLOCK_OPEN(self):
return self.getToken(ProtoParser.BLOCK_OPEN, 0)
def BLOCK_CLOSE(self):
return self.getToken(ProtoParser.BLOCK_CLOSE, 0)
def option_value_item(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ProtoParser.Option_value_itemContext)
else:
return self.getTypedRuleContext(ProtoParser.Option_value_itemContext,i)
def getRuleIndex(self):
return ProtoParser.RULE_option_value_object
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterOption_value_object(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitOption_value_object(self)
def option_value_object(self):
localctx = ProtoParser.Option_value_objectContext(self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_option_value_object)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 136
self.match(self.BLOCK_OPEN)
self.state = 140
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ProtoParser.IDENTIFIER:
self.state = 137
self.option_value_item()
self.state = 142
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 143
self.match(self.BLOCK_CLOSE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Option_value_itemContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def option_all_value(self):
return self.getTypedRuleContext(ProtoParser.Option_all_valueContext,0)
def IDENTIFIER(self):
return self.getToken(ProtoParser.IDENTIFIER, 0)
def COLON(self):
return self.getToken(ProtoParser.COLON, 0)
def getRuleIndex(self):
return ProtoParser.RULE_option_value_item
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterOption_value_item(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitOption_value_item(self)
def option_value_item(self):
localctx = ProtoParser.Option_value_itemContext(self, self._ctx, self.state)
self.enterRule(localctx, 28, self.RULE_option_value_item)
try:
self.enterOuterAlt(localctx, 1)
self.state = 145
self.match(self.IDENTIFIER)
self.state = 146
self.match(self.COLON)
self.state = 147
self.option_all_value()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Option_nameContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def PAREN_CLOSE(self):
return self.getToken(ProtoParser.PAREN_CLOSE, 0)
def IDENTIFIER(self):
return self.getToken(ProtoParser.IDENTIFIER, 0)
def PAREN_OPEN(self):
return self.getToken(ProtoParser.PAREN_OPEN, 0)
def FIELD_IDENTIFIER(self, i:int=None):
if i is None:
return self.getTokens(ProtoParser.FIELD_IDENTIFIER)
else:
return self.getToken(ProtoParser.FIELD_IDENTIFIER, i)
def all_identifier(self):
return self.getTypedRuleContext(ProtoParser.All_identifierContext,0)
def getRuleIndex(self):
return ProtoParser.RULE_option_name
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterOption_name(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitOption_name(self)
def option_name(self):
localctx = ProtoParser.Option_nameContext(self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_option_name)
self._la = 0 # Token type
try:
self.state = 159
token = self._input.LA(1)
if token in [self.IDENTIFIER]:
self.enterOuterAlt(localctx, 1)
self.state = 149
self.match(self.IDENTIFIER)
elif token in [self.PAREN_OPEN]:
self.enterOuterAlt(localctx, 2)
self.state = 150
self.match(self.PAREN_OPEN)
self.state = 151
self.all_identifier()
self.state = 152
self.match(self.PAREN_CLOSE)
self.state = 156
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ProtoParser.FIELD_IDENTIFIER:
self.state = 153
self.match(self.FIELD_IDENTIFIER)
self.state = 158
self._errHandler.sync(self)
_la = self._input.LA(1)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Enum_defContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def BLOCK_OPEN(self):
return self.getToken(ProtoParser.BLOCK_OPEN, 0)
def BLOCK_CLOSE(self):
return self.getToken(ProtoParser.BLOCK_CLOSE, 0)
def enum_content(self):
return self.getTypedRuleContext(ProtoParser.Enum_contentContext,0)
def enum_name(self):
return self.getTypedRuleContext(ProtoParser.Enum_nameContext,0)
def ENUM_LITERAL(self):
return self.getToken(ProtoParser.ENUM_LITERAL, 0)
def getRuleIndex(self):
return ProtoParser.RULE_enum_def
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterEnum_def(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitEnum_def(self)
def enum_def(self):
localctx = ProtoParser.Enum_defContext(self, self._ctx, self.state)
self.enterRule(localctx, 32, self.RULE_enum_def)
try:
self.enterOuterAlt(localctx, 1)
self.state = 161
self.match(self.ENUM_LITERAL)
self.state = 162
self.enum_name()
self.state = 163
self.match(self.BLOCK_OPEN)
self.state = 164
self.enum_content()
self.state = 165
self.match(self.BLOCK_CLOSE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Enum_nameContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def IDENTIFIER(self):
return self.getToken(ProtoParser.IDENTIFIER, 0)
def getRuleIndex(self):
return ProtoParser.RULE_enum_name
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterEnum_name(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitEnum_name(self)
def enum_name(self):
localctx = ProtoParser.Enum_nameContext(self, self._ctx, self.state)
self.enterRule(localctx, 34, self.RULE_enum_name)
try:
self.enterOuterAlt(localctx, 1)
self.state = 167
self.match(self.IDENTIFIER)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Enum_contentContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def option_line_def(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ProtoParser.Option_line_defContext)
else:
return self.getTypedRuleContext(ProtoParser.Option_line_defContext,i)
def enum_item_def(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ProtoParser.Enum_item_defContext)
else:
return self.getTypedRuleContext(ProtoParser.Enum_item_defContext,i)
def getRuleIndex(self):
return ProtoParser.RULE_enum_content
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterEnum_content(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitEnum_content(self)
def enum_content(self):
localctx = ProtoParser.Enum_contentContext(self, self._ctx, self.state)
self.enterRule(localctx, 36, self.RULE_enum_content)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 173
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ProtoParser.OPTION_LITERAL or _la==ProtoParser.IDENTIFIER:
self.state = 171
token = self._input.LA(1)
if token in [self.OPTION_LITERAL]:
self.state = 169
self.option_line_def()
elif token in [self.IDENTIFIER]:
self.state = 170
self.enum_item_def()
else:
raise NoViableAltException(self)
self.state = 175
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Enum_item_defContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def INTEGER_LITERAL(self):
return self.getToken(ProtoParser.INTEGER_LITERAL, 0)
def option_field_def(self):
return self.getTypedRuleContext(ProtoParser.Option_field_defContext,0)
def EQUALS(self):
return self.getToken(ProtoParser.EQUALS, 0)
def IDENTIFIER(self):
return self.getToken(ProtoParser.IDENTIFIER, 0)
def ITEM_TERMINATOR(self):
return self.getToken(ProtoParser.ITEM_TERMINATOR, 0)
def getRuleIndex(self):
return ProtoParser.RULE_enum_item_def
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterEnum_item_def(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitEnum_item_def(self)
def enum_item_def(self):
localctx = ProtoParser.Enum_item_defContext(self, self._ctx, self.state)
self.enterRule(localctx, 38, self.RULE_enum_item_def)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 176
self.match(self.IDENTIFIER)
self.state = 177
self.match(self.EQUALS)
self.state = 178
self.match(self.INTEGER_LITERAL)
self.state = 180
_la = self._input.LA(1)
if _la==ProtoParser.BRACKET_OPEN:
self.state = 179
self.option_field_def()
self.state = 182
self.match(self.ITEM_TERMINATOR)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Message_defContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def MESSAGE_LITERAL(self):
return self.getToken(ProtoParser.MESSAGE_LITERAL, 0)
def BLOCK_OPEN(self):
return self.getToken(ProtoParser.BLOCK_OPEN, 0)
def message_content(self):
return self.getTypedRuleContext(ProtoParser.Message_contentContext,0)
def BLOCK_CLOSE(self):
return self.getToken(ProtoParser.BLOCK_CLOSE, 0)
def message_name(self):
return self.getTypedRuleContext(ProtoParser.Message_nameContext,0)
def getRuleIndex(self):
return ProtoParser.RULE_message_def
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterMessage_def(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitMessage_def(self)
def message_def(self):
localctx = ProtoParser.Message_defContext(self, self._ctx, self.state)
self.enterRule(localctx, 40, self.RULE_message_def)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 184
self.match(self.MESSAGE_LITERAL)
self.state = 185
self.message_name()
self.state = 186
self.match(self.BLOCK_OPEN)
self.state = 188
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << self.OPTION_LITERAL) | (1 << self.ENUM_LITERAL) | (1 << self.MESSAGE_LITERAL) | (1 << self.EXTENSIONS_DEF_LITERAL) | (1 << self.PROTOBUF_SCOPE_LITERAL))) != 0):
self.state = 187
self.message_content()
self.state = 190
self.match(self.BLOCK_CLOSE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Message_nameContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def IDENTIFIER(self):
return self.getToken(ProtoParser.IDENTIFIER, 0)
def getRuleIndex(self):
return ProtoParser.RULE_message_name
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterMessage_name(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitMessage_name(self)
def message_name(self):
localctx = ProtoParser.Message_nameContext(self, self._ctx, self.state)
self.enterRule(localctx, 42, self.RULE_message_name)
try:
self.enterOuterAlt(localctx, 1)
self.state = 192
self.match(self.IDENTIFIER)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Message_contentContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def message_ext_def(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ProtoParser.Message_ext_defContext)
else:
return self.getTypedRuleContext(ProtoParser.Message_ext_defContext,i)
def message_item_def(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ProtoParser.Message_item_defContext)
else:
return self.getTypedRuleContext(ProtoParser.Message_item_defContext,i)
def option_line_def(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ProtoParser.Option_line_defContext)
else:
return self.getTypedRuleContext(ProtoParser.Option_line_defContext,i)
def enum_def(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ProtoParser.Enum_defContext)
else:
return self.getTypedRuleContext(ProtoParser.Enum_defContext,i)
def message_def(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ProtoParser.Message_defContext)
else:
return self.getTypedRuleContext(ProtoParser.Message_defContext,i)
def getRuleIndex(self):
return ProtoParser.RULE_message_content
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterMessage_content(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitMessage_content(self)
def message_content(self):
localctx = ProtoParser.Message_contentContext(self, self._ctx, self.state)
self.enterRule(localctx, 44, self.RULE_message_content)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 199
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 199
token = self._input.LA(1)
if token in [self.OPTION_LITERAL]:
self.state = 194
self.option_line_def()
elif token in [self.PROTOBUF_SCOPE_LITERAL]:
self.state = 195
self.message_item_def()
elif token in [self.MESSAGE_LITERAL]:
self.state = 196
self.message_def()
elif token in [self.ENUM_LITERAL]:
self.state = 197
self.enum_def()
elif token in [self.EXTENSIONS_DEF_LITERAL]:
self.state = 198
self.message_ext_def()
else:
raise NoViableAltException(self)
self.state = 201
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << self.OPTION_LITERAL) | (1 << self.ENUM_LITERAL) | (1 << self.MESSAGE_LITERAL) | (1 << self.EXTENSIONS_DEF_LITERAL) | (1 << self.PROTOBUF_SCOPE_LITERAL))) != 0)):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Message_item_defContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def INTEGER_LITERAL(self):
return self.getToken(ProtoParser.INTEGER_LITERAL, 0)
def proto_type(self):
return self.getTypedRuleContext(ProtoParser.Proto_typeContext,0)
def PROTOBUF_SCOPE_LITERAL(self):
return self.getToken(ProtoParser.PROTOBUF_SCOPE_LITERAL, 0)
def option_field_def(self):
return self.getTypedRuleContext(ProtoParser.Option_field_defContext,0)
def EQUALS(self):
return self.getToken(ProtoParser.EQUALS, 0)
def IDENTIFIER(self):
return self.getToken(ProtoParser.IDENTIFIER, 0)
def ITEM_TERMINATOR(self):
return self.getToken(ProtoParser.ITEM_TERMINATOR, 0)
def getRuleIndex(self):
return ProtoParser.RULE_message_item_def
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterMessage_item_def(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitMessage_item_def(self)
def message_item_def(self):
localctx = ProtoParser.Message_item_defContext(self, self._ctx, self.state)
self.enterRule(localctx, 46, self.RULE_message_item_def)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 203
self.match(self.PROTOBUF_SCOPE_LITERAL)
self.state = 204
self.proto_type()
self.state = 205
self.match(self.IDENTIFIER)
self.state = 206
self.match(self.EQUALS)
self.state = 207
self.match(self.INTEGER_LITERAL)
self.state = 209
_la = self._input.LA(1)
if _la==ProtoParser.BRACKET_OPEN:
self.state = 208
self.option_field_def()
self.state = 211
self.match(self.ITEM_TERMINATOR)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Message_ext_defContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.v = None # Token
def INTEGER_LITERAL(self, i:int=None):
if i is None:
return self.getTokens(ProtoParser.INTEGER_LITERAL)
else:
return self.getToken(ProtoParser.INTEGER_LITERAL, i)
def EXTENSIONS_MAX_LITERAL(self):
return self.getToken(ProtoParser.EXTENSIONS_MAX_LITERAL, 0)
def EXTENSIONS_TO_LITERAL(self):
return self.getToken(ProtoParser.EXTENSIONS_TO_LITERAL, 0)
def EXTENSIONS_DEF_LITERAL(self):
return self.getToken(ProtoParser.EXTENSIONS_DEF_LITERAL, 0)
def ITEM_TERMINATOR(self):
return self.getToken(ProtoParser.ITEM_TERMINATOR, 0)
def getRuleIndex(self):
return ProtoParser.RULE_message_ext_def
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterMessage_ext_def(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitMessage_ext_def(self)
def message_ext_def(self):
localctx = ProtoParser.Message_ext_defContext(self, self._ctx, self.state)
self.enterRule(localctx, 48, self.RULE_message_ext_def)
try:
self.enterOuterAlt(localctx, 1)
self.state = 213
self.match(self.EXTENSIONS_DEF_LITERAL)
self.state = 214
self.match(self.INTEGER_LITERAL)
self.state = 215
self.match(self.EXTENSIONS_TO_LITERAL)
self.state = 218
token = self._input.LA(1)
if token in [self.INTEGER_LITERAL]:
self.state = 216
localctx.v = self.match(self.INTEGER_LITERAL)
elif token in [self.EXTENSIONS_MAX_LITERAL]:
self.state = 217
localctx.v = self.match(self.EXTENSIONS_MAX_LITERAL)
else:
raise NoViableAltException(self)
self.state = 220
self.match(self.ITEM_TERMINATOR)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Ext_defContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def BLOCK_OPEN(self):
return self.getToken(ProtoParser.BLOCK_OPEN, 0)
def ext_name(self):
return self.getTypedRuleContext(ProtoParser.Ext_nameContext,0)
def BLOCK_CLOSE(self):
return self.getToken(ProtoParser.BLOCK_CLOSE, 0)
def EXTEND_LITERAL(self):
return self.getToken(ProtoParser.EXTEND_LITERAL, 0)
def ext_content(self):
return self.getTypedRuleContext(ProtoParser.Ext_contentContext,0)
def getRuleIndex(self):
return ProtoParser.RULE_ext_def
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterExt_def(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitExt_def(self)
def ext_def(self):
localctx = ProtoParser.Ext_defContext(self, self._ctx, self.state)
self.enterRule(localctx, 50, self.RULE_ext_def)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 222
self.match(self.EXTEND_LITERAL)
self.state = 223
self.ext_name()
self.state = 224
self.match(self.BLOCK_OPEN)
self.state = 226
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << self.OPTION_LITERAL) | (1 << self.ENUM_LITERAL) | (1 << self.MESSAGE_LITERAL) | (1 << self.PROTOBUF_SCOPE_LITERAL))) != 0):
self.state = 225
self.ext_content()
self.state = 228
self.match(self.BLOCK_CLOSE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Ext_nameContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def all_identifier(self):
return self.getTypedRuleContext(ProtoParser.All_identifierContext,0)
def getRuleIndex(self):
return ProtoParser.RULE_ext_name
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterExt_name(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitExt_name(self)
def ext_name(self):
localctx = ProtoParser.Ext_nameContext(self, self._ctx, self.state)
self.enterRule(localctx, 52, self.RULE_ext_name)
try:
self.enterOuterAlt(localctx, 1)
self.state = 230
self.all_identifier()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Ext_contentContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def message_item_def(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ProtoParser.Message_item_defContext)
else:
return self.getTypedRuleContext(ProtoParser.Message_item_defContext,i)
def option_line_def(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ProtoParser.Option_line_defContext)
else:
return self.getTypedRuleContext(ProtoParser.Option_line_defContext,i)
def enum_def(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ProtoParser.Enum_defContext)
else:
return self.getTypedRuleContext(ProtoParser.Enum_defContext,i)
def message_def(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ProtoParser.Message_defContext)
else:
return self.getTypedRuleContext(ProtoParser.Message_defContext,i)
def getRuleIndex(self):
return ProtoParser.RULE_ext_content
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterExt_content(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitExt_content(self)
def ext_content(self):
localctx = ProtoParser.Ext_contentContext(self, self._ctx, self.state)
self.enterRule(localctx, 54, self.RULE_ext_content)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 236
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 236
token = self._input.LA(1)
if token in [self.OPTION_LITERAL]:
self.state = 232
self.option_line_def()
elif token in [self.PROTOBUF_SCOPE_LITERAL]:
self.state = 233
self.message_item_def()
elif token in [self.MESSAGE_LITERAL]:
self.state = 234
self.message_def()
elif token in [self.ENUM_LITERAL]:
self.state = 235
self.enum_def()
else:
raise NoViableAltException(self)
self.state = 238
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << self.OPTION_LITERAL) | (1 << self.ENUM_LITERAL) | (1 << self.MESSAGE_LITERAL) | (1 << self.PROTOBUF_SCOPE_LITERAL))) != 0)):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Service_defContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def service_content(self):
return self.getTypedRuleContext(ProtoParser.Service_contentContext,0)
def service_name(self):
return self.getTypedRuleContext(ProtoParser.Service_nameContext,0)
def BLOCK_OPEN(self):
return self.getToken(ProtoParser.BLOCK_OPEN, 0)
def SERVICE_LITERAL(self):
return self.getToken(ProtoParser.SERVICE_LITERAL, 0)
def BLOCK_CLOSE(self):
return self.getToken(ProtoParser.BLOCK_CLOSE, 0)
def getRuleIndex(self):
return ProtoParser.RULE_service_def
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterService_def(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitService_def(self)
def service_def(self):
localctx = ProtoParser.Service_defContext(self, self._ctx, self.state)
self.enterRule(localctx, 56, self.RULE_service_def)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 240
self.match(self.SERVICE_LITERAL)
self.state = 241
self.service_name()
self.state = 242
self.match(self.BLOCK_OPEN)
self.state = 244
_la = self._input.LA(1)
if _la==ProtoParser.OPTION_LITERAL or _la==ProtoParser.RPC_LITERAL:
self.state = 243
self.service_content()
self.state = 246
self.match(self.BLOCK_CLOSE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Service_nameContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def IDENTIFIER(self):
return self.getToken(ProtoParser.IDENTIFIER, 0)
def getRuleIndex(self):
return ProtoParser.RULE_service_name
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterService_name(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitService_name(self)
def service_name(self):
localctx = ProtoParser.Service_nameContext(self, self._ctx, self.state)
self.enterRule(localctx, 58, self.RULE_service_name)
try:
self.enterOuterAlt(localctx, 1)
self.state = 248
self.match(self.IDENTIFIER)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Service_contentContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def rpc_def(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ProtoParser.Rpc_defContext)
else:
return self.getTypedRuleContext(ProtoParser.Rpc_defContext,i)
def option_line_def(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ProtoParser.Option_line_defContext)
else:
return self.getTypedRuleContext(ProtoParser.Option_line_defContext,i)
def getRuleIndex(self):
return ProtoParser.RULE_service_content
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterService_content(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitService_content(self)
def service_content(self):
localctx = ProtoParser.Service_contentContext(self, self._ctx, self.state)
self.enterRule(localctx, 60, self.RULE_service_content)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 252
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 252
token = self._input.LA(1)
if token in [self.OPTION_LITERAL]:
self.state = 250
self.option_line_def()
elif token in [self.RPC_LITERAL]:
self.state = 251
self.rpc_def()
else:
raise NoViableAltException(self)
self.state = 254
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==ProtoParser.OPTION_LITERAL or _la==ProtoParser.RPC_LITERAL):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Rpc_defContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def req_name(self):
return self.getTypedRuleContext(ProtoParser.Req_nameContext,0)
def resp_name(self):
return self.getTypedRuleContext(ProtoParser.Resp_nameContext,0)
def PAREN_OPEN(self, i:int=None):
if i is None:
return self.getTokens(ProtoParser.PAREN_OPEN)
else:
return self.getToken(ProtoParser.PAREN_OPEN, i)
def option_line_def(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ProtoParser.Option_line_defContext)
else:
return self.getTypedRuleContext(ProtoParser.Option_line_defContext,i)
def BLOCK_OPEN(self):
return self.getToken(ProtoParser.BLOCK_OPEN, 0)
def rpc_name(self):
return self.getTypedRuleContext(ProtoParser.Rpc_nameContext,0)
def RPC_LITERAL(self):
return self.getToken(ProtoParser.RPC_LITERAL, 0)
def ITEM_TERMINATOR(self):
return self.getToken(ProtoParser.ITEM_TERMINATOR, 0)
def BLOCK_CLOSE(self):
return self.getToken(ProtoParser.BLOCK_CLOSE, 0)
def RETURNS_LITERAL(self):
return self.getToken(ProtoParser.RETURNS_LITERAL, 0)
def PAREN_CLOSE(self, i:int=None):
if i is None:
return self.getTokens(ProtoParser.PAREN_CLOSE)
else:
return self.getToken(ProtoParser.PAREN_CLOSE, i)
def getRuleIndex(self):
return ProtoParser.RULE_rpc_def
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterRpc_def(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitRpc_def(self)
def rpc_def(self):
localctx = ProtoParser.Rpc_defContext(self, self._ctx, self.state)
self.enterRule(localctx, 62, self.RULE_rpc_def)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 256
self.match(self.RPC_LITERAL)
self.state = 257
self.rpc_name()
self.state = 258
self.match(self.PAREN_OPEN)
self.state = 259
self.req_name()
self.state = 260
self.match(self.PAREN_CLOSE)
self.state = 261
self.match(self.RETURNS_LITERAL)
self.state = 262
self.match(self.PAREN_OPEN)
self.state = 263
self.resp_name()
self.state = 264
self.match(self.PAREN_CLOSE)
self.state = 277
token = self._input.LA(1)
if token in [self.BLOCK_OPEN]:
self.state = 265
self.match(self.BLOCK_OPEN)
self.state = 269
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ProtoParser.OPTION_LITERAL:
self.state = 266
self.option_line_def()
self.state = 271
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 272
self.match(self.BLOCK_CLOSE)
self.state = 274
_la = self._input.LA(1)
if _la==ProtoParser.ITEM_TERMINATOR:
self.state = 273
self.match(self.ITEM_TERMINATOR)
elif token in [self.ITEM_TERMINATOR]:
self.state = 276
self.match(self.ITEM_TERMINATOR)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Rpc_nameContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def IDENTIFIER(self):
return self.getToken(ProtoParser.IDENTIFIER, 0)
def getRuleIndex(self):
return ProtoParser.RULE_rpc_name
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterRpc_name(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitRpc_name(self)
def rpc_name(self):
localctx = ProtoParser.Rpc_nameContext(self, self._ctx, self.state)
self.enterRule(localctx, 64, self.RULE_rpc_name)
try:
self.enterOuterAlt(localctx, 1)
self.state = 279
self.match(self.IDENTIFIER)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Req_nameContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def all_identifier(self):
return self.getTypedRuleContext(ProtoParser.All_identifierContext,0)
def getRuleIndex(self):
return ProtoParser.RULE_req_name
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterReq_name(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitReq_name(self)
def req_name(self):
localctx = ProtoParser.Req_nameContext(self, self._ctx, self.state)
self.enterRule(localctx, 66, self.RULE_req_name)
try:
self.enterOuterAlt(localctx, 1)
self.state = 281
self.all_identifier()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Resp_nameContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def all_identifier(self):
return self.getTypedRuleContext(ProtoParser.All_identifierContext,0)
def getRuleIndex(self):
return ProtoParser.RULE_resp_name
def enterRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.enterResp_name(self)
def exitRule(self, listener:ParseTreeListener):
if isinstance( listener, ProtoParserListener ):
listener.exitResp_name(self)
def resp_name(self):
localctx = ProtoParser.Resp_nameContext(self, self._ctx, self.state)
self.enterRule(localctx, 68, self.RULE_resp_name)
try:
self.enterOuterAlt(localctx, 1)
self.state = 283
self.all_identifier()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
| asifmadnan/protobuf2uml | bin/protobuf/ProtoParser.py | Python | apache-2.0 | 86,857 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2014, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from py2neo.legacy.batch import *
from py2neo.legacy.core import *
from py2neo.legacy.index import *
__all__ = ["LegacyResource", "LegacyNode", "Index", "LegacyReadBatch", "LegacyWriteBatch"]
| nicolewhite/py2neo | py2neo/legacy/__init__.py | Python | apache-2.0 | 826 |
import os
from invoke import task
WHEELHOUSE_PATH = os.environ.get('WHEELHOUSE')
def monkey_patch(ctx):
# Force an older cacert.pem from certifi v2015.4.28, prevents an ssl failure w/ identity.api.rackspacecloud.com.
#
# SubjectAltNameWarning: Certificate for identity.api.rackspacecloud.com has no `subjectAltName`, falling
# back to check for a `commonName` for now. This feature is being removed by major browsers and deprecated by
# RFC 2818. (See https://github.com/shazow/urllib3/issues/497 for details.)
# SubjectAltNameWarning
import ssl
import certifi
_create_default_context = ssl.create_default_context
def create_default_context(purpose=ssl.Purpose.SERVER_AUTH, *, cafile=None, capath=None, cadata=None):
if cafile is None:
cafile = certifi.where()
return _create_default_context(purpose=purpose, cafile=cafile, capath=capath, cadata=cadata)
ssl.create_default_context = create_default_context
@task
def wheelhouse(ctx, develop=False, pty=True):
req_file = 'dev-requirements.txt' if develop else 'requirements.txt'
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={}'.format(WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH)
ctx.run(cmd, pty=pty)
@task
def install(ctx, develop=False, pty=True):
ctx.run('python setup.py develop')
req_file = 'dev-requirements.txt' if develop else 'requirements.txt'
cmd = 'pip install --upgrade -r {}'.format(req_file)
if WHEELHOUSE_PATH:
cmd += ' --no-index --find-links={}'.format(WHEELHOUSE_PATH)
ctx.run(cmd, pty=pty)
@task
def flake(ctx):
"""
Run style and syntax checker. Follows options defined in setup.cfg
"""
ctx.run('flake8 .', pty=True)
@task
def mypy(ctx):
"""
Check python types using mypy (additional level of linting). Follows options defined in setup.cfg
"""
ctx.run('mypy waterbutler/', pty=True)
@task
def test(ctx, verbose=False, types=False):
flake(ctx)
if types:
mypy(ctx)
cmd = 'py.test --cov-report term-missing --cov waterbutler tests'
if verbose:
cmd += ' -v'
ctx.run(cmd, pty=True)
@task
def celery(ctx, loglevel='INFO', hostname='%h'):
monkey_patch(ctx)
from waterbutler.tasks.app import app
command = ['worker']
if loglevel:
command.extend(['--loglevel', loglevel])
if hostname:
command.extend(['--hostname', hostname])
app.worker_main(command)
@task
def rabbitmq(ctx):
ctx.run('rabbitmq-server', pty=True)
@task
def server(ctx):
monkey_patch(ctx)
if os.environ.get('REMOTE_DEBUG', None):
import pydevd
# e.g. '127.0.0.1:5678'
remote_parts = os.environ.get('REMOTE_DEBUG').split(':')
pydevd.settrace(remote_parts[0], port=int(remote_parts[1]), suspend=False, stdoutToServer=True, stderrToServer=True)
from waterbutler.server.app import serve
serve()
@task
def clean(ctx, verbose=False):
cmd = 'find . -name "*.pyc" -delete'
if verbose:
print(cmd)
ctx.run(cmd, pty=True)
| TomBaxter/waterbutler | tasks.py | Python | apache-2.0 | 3,064 |
# (c) Copyright [2015] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.core.urlresolvers import NoReverseMatch # noqa
from django.core.urlresolvers import reverse
from django.utils import html
from django.utils.translation import ugettext_lazy as _
from django.utils import safestring
from horizon import forms
from horizon import tables
import horizon_hpe_storage.api.keystone_api as keystone
import horizon_hpe_storage.api.barbican_api as barbican
class LicenseLink(tables.LinkAction):
name = "licenses"
verbose_name = _("View License Information")
classes = ("btn-log",)
def get_link_url(self, datum):
link_url = "storage_arrays/" + \
datum['name'] + "::" + datum['test_name'] + "/system_detail"
# tab_query_string = tabs.LicenseTab(
# tabs.BackendDetailTabs).get_query_string()
# return "?".join([base_url, tab_query_string])
return link_url
class OpenstackFeaturesLink(tables.LinkAction):
name = "openstack_features"
verbose_name = _("View License Information")
classes = ("btn-log",)
def get_link_url(self, datum):
link_url = "storage_arrays/" + \
datum['name'] + "::" + datum['test_name'] + "/license_detail"
# tab_query_string = tabs.LicenseTab(
# tabs.BackendDetailTabs).get_query_string()
# return "?".join([base_url, tab_query_string])
return link_url
class RunDiscoveryAction(tables.LinkAction):
name = "run_discovery"
verbose_name = _("Discover Storage Arrays")
url = "horizon:admin:hpe_storage:storage_arrays:discover_arrays"
classes = ("ajax-modal",)
keystone_api = keystone.KeystoneAPI()
barbican_api = barbican.BarbicanAPI()
def allowed(self, request, node=None):
self.keystone_api.do_setup(request)
self.barbican_api.do_setup(self.keystone_api.get_session())
return self.barbican_api.nodes_exist(
barbican.CINDER_NODE_TYPE)
def get_pool_name(pool_name):
try:
url = reverse("horizon:admin:hpe_storage:storage_arrays:" +
"pool_detail", args=(pool_name,)) + "pool_details"
pool = '<a href="%s">%s</a>' % (url, html.escape(pool_name))
except NoReverseMatch:
pool = html.escape(pool_name)
return pool
class PoolsColumn(tables.Column):
# Customized column class.
def get_raw_data(self, backend_system):
link = _('%(pool_name)s')
pool_name_start = backend_system['host_name'] + "@"
pools = []
for cinder_host in backend_system['cinder_hosts']:
pool_name = get_pool_name(pool_name_start + cinder_host)
vals = {"pool_name": pool_name}
pools.append(link % vals)
return safestring.mark_safe("<br>".join(pools))
class StorageArraysTable(tables.DataTable):
system_name = tables.Column(
'name',
verbose_name=_('Array Name'),
form_field=forms.CharField(max_length=64))
system_ip = tables.Column(
'ip_address',
verbose_name=_('IP Address'),
form_field=forms.CharField(max_length=64))
model = tables.Column(
'model',
verbose_name=_('Model'),
form_field=forms.CharField(max_length=64))
serial_number = tables.Column(
'serial_number',
verbose_name=_('Serial Number'),
form_field=forms.CharField(max_length=64))
os_version = tables.Column(
'os_version',
verbose_name=_('OS Version'),
form_field=forms.CharField(max_length=64))
wsapi_version = tables.Column(
'wsapi_version',
verbose_name=_('WSAPI Version'),
form_field=forms.CharField(max_length=64))
pools = PoolsColumn(
"pools",
verbose_name=_("Cinder Hosts (Pools)"),
wrap_list=True)
def get_object_id(self, storage_array):
return storage_array['name'] + "::" + storage_array['test_name']
class Meta(object):
name = "storage_arrays"
verbose_name = _("Discovered by Diagnostic Tests")
# hidden_title = False
table_actions = (RunDiscoveryAction,)
row_actions = (LicenseLink,)
| hpe-storage/horizon-hpe-storage-ui | horizon_hpe_storage/storage_panel/storage_arrays/tables.py | Python | apache-2.0 | 4,728 |
# Copyright (c) 2012 OpenStack Foundation.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import netaddr
from neutron_lib.api import converters
from neutron_lib.api import extensions as api_extensions
from neutron_lib.api import validators
from neutron_lib import constants as const
from neutron_lib.db import constants as db_const
from neutron_lib import exceptions
from neutron_lib.plugins import directory
from oslo_utils import netutils
from neutron._i18n import _
from neutron.api import extensions
from neutron.api.v2 import base
from neutron.conf import quota
from neutron.extensions import standardattrdescription as stdattr_ext
from neutron.quota import resource_registry
# Security group Exceptions
class SecurityGroupInvalidPortRange(exceptions.InvalidInput):
message = _("For TCP/UDP protocols, port_range_min must be "
"<= port_range_max")
class SecurityGroupInvalidProtocolForPort(exceptions.InvalidInput):
message = _("Ports cannot be specified for protocol %(protocol)s. "
"Ports are only supported for %(valid_port_protocols)s.")
class SecurityGroupInvalidPortValue(exceptions.InvalidInput):
message = _("Invalid value for port %(port)s")
class SecurityGroupInvalidIcmpValue(exceptions.InvalidInput):
message = _("Invalid value for ICMP %(field)s (%(attr)s) "
"%(value)s. It must be 0 to 255.")
class SecurityGroupEthertypeConflictWithProtocol(exceptions.InvalidInput):
message = _("Invalid ethertype %(ethertype)s for protocol "
"%(protocol)s.")
class SecurityGroupMissingIcmpType(exceptions.InvalidInput):
message = _("ICMP code (port-range-max) %(value)s is provided"
" but ICMP type (port-range-min) is missing.")
class SecurityGroupInUse(exceptions.InUse):
message = _("Security Group %(id)s %(reason)s.")
def __init__(self, **kwargs):
if 'reason' not in kwargs:
kwargs['reason'] = _("in use")
super(SecurityGroupInUse, self).__init__(**kwargs)
class SecurityGroupCannotRemoveDefault(exceptions.InUse):
message = _("Insufficient rights for removing default security group.")
class SecurityGroupCannotUpdateDefault(exceptions.InUse):
message = _("Updating default security group not allowed.")
class SecurityGroupDefaultAlreadyExists(exceptions.InUse):
message = _("Default security group already exists.")
class SecurityGroupRuleInvalidProtocol(exceptions.InvalidInput):
message = _("Security group rule protocol %(protocol)s not supported. "
"Only protocol values %(values)s and integer representations "
"[0 to 255] are supported.")
class SecurityGroupRulesNotSingleTenant(exceptions.InvalidInput):
message = _("Multiple tenant_ids in bulk security group rule create"
" not allowed")
class SecurityGroupMultipleRemoteEntites(exceptions.InvalidInput):
message = _("Only one of remote_ip_prefix or remote_group_id or "
"remote_address_group_id may be provided.")
class SecurityGroupProtocolRequiredWithPorts(exceptions.InvalidInput):
message = _("Must also specify protocol if port range is given.")
class SecurityGroupNotSingleGroupRules(exceptions.InvalidInput):
message = _("Only allowed to update rules for "
"one security profile at a time")
class SecurityGroupNotFound(exceptions.NotFound):
message = _("Security group %(id)s does not exist")
class SecurityGroupRuleNotFound(exceptions.NotFound):
message = _("Security group rule %(id)s does not exist")
class DuplicateSecurityGroupRuleInPost(exceptions.InUse):
message = _("Duplicate Security Group Rule in POST.")
class SecurityGroupRuleExists(exceptions.InUse):
message = _("Security group rule already exists. Rule id is %(rule_id)s.")
class SecurityGroupRuleInUse(exceptions.InUse):
message = _("Security Group Rule %(id)s %(reason)s.")
def __init__(self, **kwargs):
if 'reason' not in kwargs:
kwargs['reason'] = _("in use")
super(SecurityGroupRuleInUse, self).__init__(**kwargs)
class SecurityGroupRuleParameterConflict(exceptions.InvalidInput):
message = _("Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s")
class SecurityGroupConflict(exceptions.Conflict):
message = _("Error %(reason)s while attempting the operation.")
class SecurityGroupRuleInvalidEtherType(exceptions.InvalidInput):
message = _("Security group rule for ethertype '%(ethertype)s' not "
"supported. Allowed values are %(values)s.")
def convert_protocol(value):
if value is None:
return
try:
val = int(value)
if 0 <= val <= 255:
# Set value of protocol number to string due to bug 1381379,
# PostgreSQL fails when it tries to compare integer with string,
# that exists in db.
return str(value)
raise SecurityGroupRuleInvalidProtocol(
protocol=value, values=sg_supported_protocols)
except (ValueError, TypeError):
if value.lower() in sg_supported_protocols:
return value.lower()
raise SecurityGroupRuleInvalidProtocol(
protocol=value, values=sg_supported_protocols)
except AttributeError:
raise SecurityGroupRuleInvalidProtocol(
protocol=value, values=sg_supported_protocols)
def convert_ethertype_to_case_insensitive(value):
if isinstance(value, str):
for ethertype in sg_supported_ethertypes:
if ethertype.lower() == value.lower():
return ethertype
raise SecurityGroupRuleInvalidEtherType(
ethertype=value, values=sg_supported_ethertypes)
def convert_validate_port_value(port):
if port is None:
return port
if netutils.is_valid_port(port):
return int(port)
else:
raise SecurityGroupInvalidPortValue(port=port)
def convert_ip_prefix_to_cidr(ip_prefix):
if not ip_prefix:
return
try:
cidr = netaddr.IPNetwork(ip_prefix)
return str(cidr)
except (ValueError, TypeError, netaddr.AddrFormatError):
raise exceptions.InvalidCIDR(input=ip_prefix)
def _validate_name_not_default(data, max_len=db_const.NAME_FIELD_SIZE):
msg = validators.validate_string(data, max_len)
if msg:
return msg
if data.lower() == "default":
raise SecurityGroupDefaultAlreadyExists()
validators.add_validator('name_not_default', _validate_name_not_default)
sg_supported_protocols = ([None] + list(const.IP_PROTOCOL_MAP.keys()))
sg_supported_ethertypes = ['IPv4', 'IPv6']
SECURITYGROUPS = 'security_groups'
SECURITYGROUPRULES = 'security_group_rules'
# Attribute Map
RESOURCE_ATTRIBUTE_MAP = {
SECURITYGROUPS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'is_filter': True,
'is_sort_key': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': '', 'is_filter': True,
'is_sort_key': True,
'validate': {
'type:name_not_default': db_const.NAME_FIELD_SIZE}},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'is_sort_key': True,
'validate': {
'type:string': db_const.PROJECT_ID_FIELD_SIZE},
'is_visible': True, 'is_filter': True},
SECURITYGROUPRULES: {'allow_post': False, 'allow_put': False,
'is_visible': True},
},
SECURITYGROUPRULES: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'is_filter': True,
'is_sort_key': True,
'primary_key': True},
'security_group_id': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'required_by_policy': True,
'is_sort_key': True, 'is_filter': True},
'remote_group_id': {'allow_post': True, 'allow_put': False,
'default': None, 'is_visible': True,
'is_sort_key': True, 'is_filter': True},
'direction': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'is_filter': True,
'is_sort_key': True,
'validate': {'type:values': ['ingress', 'egress']}},
'protocol': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': None,
'is_sort_key': True, 'is_filter': True,
'convert_to': convert_protocol},
'port_range_min': {'allow_post': True, 'allow_put': False,
'convert_to': convert_validate_port_value,
'default': None, 'is_visible': True,
'is_sort_key': True, 'is_filter': True},
'port_range_max': {'allow_post': True, 'allow_put': False,
'convert_to': convert_validate_port_value,
'default': None, 'is_visible': True,
'is_sort_key': True, 'is_filter': True},
'ethertype': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': 'IPv4',
'is_filter': True, 'is_sort_key': True,
'convert_to': convert_ethertype_to_case_insensitive,
'validate': {'type:values': sg_supported_ethertypes}},
'remote_ip_prefix': {'allow_post': True, 'allow_put': False,
'default': None, 'is_visible': True,
'is_sort_key': True, 'is_filter': True,
'convert_to': convert_ip_prefix_to_cidr},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'is_sort_key': True,
'validate': {
'type:string': db_const.PROJECT_ID_FIELD_SIZE},
'is_visible': True, 'is_filter': True},
}
}
EXTENDED_ATTRIBUTES_2_0 = {
'ports': {SECURITYGROUPS: {'allow_post': True,
'allow_put': True,
'is_visible': True,
'is_filter': True,
'convert_to':
converters.convert_none_to_empty_list,
'validate': {'type:uuid_list': None},
'default': const.ATTR_NOT_SPECIFIED}}}
# Register the configuration options
quota.register_quota_opts(quota.security_group_quota_opts)
class Securitygroup(api_extensions.ExtensionDescriptor):
"""Security group extension."""
@classmethod
def get_name(cls):
return "security-group"
@classmethod
def get_alias(cls):
return "security-group"
@classmethod
def get_description(cls):
return "The security groups extension."
@classmethod
def get_updated(cls):
return "2012-10-05T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
exts = []
plugin = directory.get_plugin()
for resource_name in ['security_group', 'security_group_rule']:
collection_name = resource_name.replace('_', '-') + "s"
params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + "s", dict())
resource_registry.register_resource_by_name(resource_name)
controller = base.create_resource(collection_name,
resource_name,
plugin, params, allow_bulk=True,
allow_pagination=True,
allow_sorting=True)
ex = extensions.ResourceExtension(collection_name,
controller,
attr_map=params)
exts.append(ex)
return exts
def update_attributes_map(self, attributes):
super(Securitygroup, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
def get_extended_resources(self, version):
if version == "2.0":
return dict(list(EXTENDED_ATTRIBUTES_2_0.items()) +
list(RESOURCE_ATTRIBUTE_MAP.items()))
else:
return {}
def get_required_extensions(self):
return [stdattr_ext.Standardattrdescription.get_alias()]
class SecurityGroupPluginBase(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def create_security_group(self, context, security_group):
pass
@abc.abstractmethod
def update_security_group(self, context, id, security_group):
pass
@abc.abstractmethod
def delete_security_group(self, context, id):
pass
@abc.abstractmethod
def get_security_groups(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
pass
@abc.abstractmethod
def get_security_group(self, context, id, fields=None):
pass
@abc.abstractmethod
def create_security_group_rule(self, context, security_group_rule):
pass
@abc.abstractmethod
def delete_security_group_rule(self, context, id):
pass
@abc.abstractmethod
def get_security_group_rules(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
pass
@abc.abstractmethod
def get_security_group_rule(self, context, id, fields=None):
pass
| mahak/neutron | neutron/extensions/securitygroup.py | Python | apache-2.0 | 14,709 |
# Copyright 2014 OpenStack Foundation.
# Copyright 2014 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Icehouse Initial
Revision ID: 0_1
Revises: None
Create Date: 2014-02-19 17:23:47.705197
"""
# revision identifiers, used by Alembic.
revision = '0_1'
down_revision = None
import uuid
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects.mysql import MEDIUMTEXT
def _generate_unicode_uuid():
return unicode(str(uuid.uuid4()))
def MediumText():
return sa.Text().with_variant(MEDIUMTEXT(), 'mysql')
def _id_column():
return sa.Column('id', sa.String(36), primary_key=True,
default=_generate_unicode_uuid)
def upgrade():
op.create_table(
'computehosts',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
_id_column(),
sa.Column('vcpus', sa.Integer(), nullable=False),
sa.Column('cpu_info', MediumText(), nullable=False),
sa.Column('hypervisor_type', MediumText(), nullable=False),
sa.Column('hypervisor_version', sa.Integer(), nullable=False),
sa.Column('hypervisor_hostname', sa.String(length=255), nullable=True),
sa.Column('memory_mb', sa.Integer(), nullable=False),
sa.Column('local_gb', sa.Integer(), nullable=False),
sa.Column('status', sa.String(length=13)),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'leases',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
_id_column(),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('user_id', sa.String(length=255), nullable=True),
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('start_date', sa.DateTime(), nullable=False),
sa.Column('end_date', sa.DateTime(), nullable=False),
sa.Column('trust_id', sa.String(length=36)),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name'))
op.create_table(
'reservations',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
_id_column(),
sa.Column('lease_id', sa.String(length=36), nullable=False),
sa.Column('resource_id', sa.String(length=36)),
sa.Column('resource_type', sa.String(length=66)),
sa.Column('status', sa.String(length=13)),
sa.ForeignKeyConstraint(['lease_id'], ['leases.id'], ),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'computehost_extra_capabilities',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
_id_column(),
sa.Column('computehost_id', sa.String(length=36), nullable=True),
sa.Column('capability_name', sa.String(length=64), nullable=False),
sa.Column('capability_value', MediumText(), nullable=False),
sa.ForeignKeyConstraint(['computehost_id'], ['computehosts.id'], ),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'events',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
_id_column(),
sa.Column('lease_id', sa.String(length=36), nullable=True),
sa.Column('event_type', sa.String(length=66)),
sa.Column('time', sa.DateTime()),
sa.Column('status', sa.String(length=13)),
sa.ForeignKeyConstraint(['lease_id'], ['leases.id'], ),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'computehost_allocations',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
_id_column(),
sa.Column('compute_host_id', sa.String(length=36), nullable=True),
sa.Column('reservation_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['compute_host_id'], ['computehosts.id'], ),
sa.ForeignKeyConstraint(['reservation_id'], ['reservations.id'], ),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'computehost_reservations',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
_id_column(),
sa.Column('reservation_id', sa.String(length=36), nullable=True),
sa.Column('resource_properties', MediumText()),
sa.Column('count_range', sa.String(length=36)),
sa.Column('hypervisor_properties', MediumText()),
sa.Column('status', sa.String(length=13)),
sa.ForeignKeyConstraint(['reservation_id'], ['reservations.id'], ),
sa.PrimaryKeyConstraint('id'))
def downgrade():
op.drop_table('computehost_extra_capabilities')
op.drop_table('computehost_allocations')
op.drop_table('computehost_reservations')
op.drop_table('computehosts')
op.drop_table('reservations')
op.drop_table('events')
op.drop_table('leases')
| paramite/blazar | climate/db/migration/alembic_migrations/versions/0_1_initial.py | Python | apache-2.0 | 5,677 |
__author__ = 'mthompson'
import copy
import inspect
import importlib
import urllib
import json
from urlparse import urlparse
import collections
import re
import os
import keyword
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
class A10BaseClass(object):
is_POST = True
def __init__(self):
self.ERROR_MSG = ""
@classmethod
def __json__(self, class_object, **kwargs):
try:
temp_require = copy.deepcopy(class_object.required)
del class_object.required
except:
pass
try:
dp = copy.deepcopy(class_object.DeviceProxy)
delattr(class_object, "DeviceProxy")
except:
pass
json_object = self.Obj_to_json(class_object.__dict__)
if 'a10-url' in json_object:
del json_object['a10-url']
if "b-key" in json_object:
del json_object['b-key']
debug_keys = ["DEBUG-CONNECTION", "DEBUG-Payload", "DEBUG-Response", "DEBUG-URL", "DEBUG-headers"]
for d_keys in debug_keys:
try:
del json_object[d_keys]
except:
try:
del json_object[class_object.b_key][d_keys]
except:
pass
try:
class_object.__setattr__("DeviceProxy", dp)
except:
pass
#(Raunak):json_object is None or empty dictionary return None
try:
class_object.__setattr__("required", temp_require)
except:
pass
if not json_object:
return ''
elif "sub" in kwargs and kwargs['sub'] == 0:
return json_object
else:
r_object = {}
r_object[class_object.b_key] = json_object
return r_object
@classmethod
def Obj_to_json(self, obj):
new_obj = {}
for k, v in obj.items():
key = k.replace("_", "-").replace("A10WW_", "").replace(
"A10WW-", "")
if "class" in str(type(v)):
json_obj = self.__json__(v)
#(Raunak):If the response exists update the new_obj dictionary
#with the json data
if json_obj:
new_obj.update(json_obj)
elif (isinstance(v, dict) or
isinstance(v, unicode) or
isinstance(v, str) or
isinstance(v, list)) and len(v) != 0:
if isinstance(v, list):
temp_list = []
if len(v) != 0:
for i in v:
temp_list.append(i.__json__(i, sub=0))
new_obj[key] = temp_list
elif isinstance(v, dict) and len(v) != 0:
new_obj[key] = self.Obj_to_json(v)
elif v is True:
new_obj[key] = "1"
elif v is False:
new_obj[key] = "0"
elif v is not None:
if len(v) != 0:
new_obj[key] = v
elif isinstance(v, int):
new_obj[key] = v
# If it's an attribute and it's value is None, use None
# (grao): adding null value attributes only when POST (avoiding for PUT)
elif v is None and A10BaseClass.is_POST:
new_obj[key] = v
return new_obj
'''
Converts from Unicode
'''
def convert(self, data):
if isinstance(data, basestring):
return str(data)
elif isinstance(data, collections.Mapping):
return dict(map(self.convert, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(self.convert, data))
else:
return data
"""
GET THE PARENT KEY ASS NEEDED
"""
def find_key(self, d, key):
for k, v in d.items():
if isinstance(v, dict):
p = self.find_key(v, key)
if p:
return [k] + p
elif v == key:
return [k]
def _format_key_name_for_module_lookup(self, key, lambda_expression):
if key.startswith('A10WW_'):
return lambda_expression(key)
else:
return lambda_expression(key).title()
def _search_for_child_object_module(self, caller, sub_class):
try:
if caller.__name__.endswith('_oper'):
module_name = caller.__name__.replace('_oper', '')
caller_name = importlib.import_module(module_name + "_" + sub_class.lower() + "_oper")
elif caller.__name__.endswith('_stats'):
module_name = caller.__name__.replace('_stats', '')
caller_name = importlib.import_module(module_name + "_" + sub_class.lower() + "_stats")
else:
caller_name = importlib.import_module(caller.__name__ + "_" + sub_class.lower())
except:
caller_name = importlib.import_module(caller.__name__)
return caller_name
def _search_for_child_class_inside_module(self, child_node_name, caller_name, DeviceProxy):
k = child_node_name
sub_class = ''.join(
x for x in k.replace("-list", "", 1).replace("-", "_").title() if
not x.isspace())
try:
obj_class = getattr(caller_name, sub_class)(DeviceProxy=DeviceProxy)
except:
try:
sub_class = ''.join(
x for x in k.replace("-", " ").replace("-list", "", 1).title()
if not x.isspace())
obj_class = getattr(caller_name, sub_class)(DeviceProxy=DeviceProxy)
except:
try:
sub_class = ''.join(x for x in
k.replace("-", "").replace("list", "").replace("list",
"").title()
if not x.isspace())
obj_class = getattr(caller_name, sub_class)(DeviceProxy=DeviceProxy)
except:
try:
sub_class = ''.join(x for x in
k.replace("-", "_").replace("list", "").replace("list",
"").title()
if not x.isspace())
obj_class = getattr(caller_name, sub_class)(DeviceProxy=DeviceProxy)
except:
#(RAUNAK):Added one more check for the VLAN tagged_ethernet_list like cases
#Would check for the class with the name TaggedEthernet
try:
sub_class = ''.join(x for x in
k.replace("-", "_").replace("list", "").replace(
"list",
"").title().replace('_', '')
if not x.isspace())
obj_class = getattr(caller_name, sub_class)(DeviceProxy=DeviceProxy)
except:
#(Raunak):Added this for ClassList
try:
sub_class = ''.join(x for x in
k.replace("-list", "").title() + 'List'
if not x.isspace())
obj_class = getattr(caller_name, sub_class)(DeviceProxy=DeviceProxy)
except:
pass
return obj_class
def Factory(self, DeviceProxy, obj, name, parent=''):
obj = self.convert(obj)
# for i in obj:
# self.default_object_key = i
if parent:
caller = inspect.getmodule(parent)
else:
caller = inspect.getmodule(self)
top_node_obj_list = []
for list_name_key, list_name_value in obj.items():
if list_name_key == 'class-list-list':
class_object = 'ClassList'
break
else:
class_object = ''.join(
x for x in list_name_key.replace("-", " ").replace(
"list", "").title() if not x.isspace())
if not class_object:
class_object = ''.join(
x for x in list_name_key.replace("-", " ").title() if not x.isspace())
class_object = kwbl().kwbl(class_object)
try:
caller = importlib.import_module(caller.__name__)
new_class = getattr(caller, class_object)(DeviceProxy=DeviceProxy)
except:
try:
caller = importlib.import_module(caller.__name__)
new_class = getattr(caller, class_object + 'List')(DeviceProxy=DeviceProxy)
except:
try:
caller = importlib.import_module(caller.__name__ + "_" + list_name_key.lower())
new_class = getattr(caller, class_object)(DeviceProxy=DeviceProxy)
except:
try:
caller = importlib.import_module(
caller.__name__ + "_" + list_name_key.replace('-', '_').lower())
new_class = getattr(caller, class_object)(DeviceProxy=DeviceProxy)
except:
try:
temp_name = list_name_key.split("-")
test_module = temp_name[0]
caller = importlib.import_module(caller.__name__ + "_" + test_module.lower())
new_class = getattr(caller, class_object)(DeviceProxy=DeviceProxy)
except:
try:
caller = importlib.import_module(
caller.__name__ + "_" + parent.lower() + "_" + list_name_key.lower())
new_class = getattr(caller, class_object)(DeviceProxy=DeviceProxy)
except:
try:
caller = importlib.import_module(caller.__name__ + "_" + parent.lower())
new_class = getattr(caller, class_object)(DeviceProxy=DeviceProxy)
except Exception as e:
try:
# try to import class from all sibling modules
found_in_sibling = False
for module in os.listdir(os.path.dirname(caller.__file__)):
if module.endswith("py") and module != "__init__.py":
try:
class_name = ''.join(
x for x in list_name_key.split("-").pop().title() if
not x.isspace())
caller_sibling_modules = importlib.import_module(
"." + module.replace(".py", ""), caller.__package__)
new_class = getattr(caller_sibling_modules, class_name)(
DeviceProxy=DeviceProxy)
found_in_sibling = True
break
except:
try:
class_name = ''.join(
x for x in list_name_key.replace("-", " ").title() if
not x.isspace())
caller_sibling_modules = importlib.import_module(
"." + module.replace(".py", ""), caller.__package__)
new_class = getattr(caller_sibling_modules, class_name)(
DeviceProxy=DeviceProxy)
found_in_sibling = True
break
except:
pass
pass
if found_in_sibling is False:
raise e
except:
return obj
# class_object
if isinstance(list_name_value, list):
for list_obj in list_name_value:
sdk_obj = copy.deepcopy(new_class)
for k, v in list_obj.items():
k = kwbl().kwbl(k)
new_obj_name = k.replace("-list", "", 1).replace("-", "_")
if new_obj_name[len(new_obj_name) - 1] == "_":
new_obj_name = ''.join(
x for x in k.replace("-", " ").replace("-list", "", 1).title() if
not x.isspace())
if isinstance(v, list):
obj_name_list = []
caller_name = self._search_for_child_object_module(caller, new_obj_name)
for keys in v:
obj_class = self._search_for_child_class_inside_module(k, caller_name, DeviceProxy)
# obj_class = getattr(caller_name, new_obj_name)(DeviceProxy=DeviceProxy)
for v_key, v_val in keys.items():
v_key = kwbl().kwbl(v_key)
if v_key == "a10-url":
temp_v = urlparse(v_val)
v_val = temp_v.path
obj_class.__setattr__(v_key.replace("-", "_"), v_val)
obj_name_list.append(obj_class)
sdk_obj.__setattr__(k.replace("-", "_"), obj_name_list)
elif isinstance(v, dict):
sdk_obj.__setattr__(k.replace("-", "_"),
(sdk_obj.Factory(DeviceProxy, {k: v}, k, sdk_obj)))
else:
sdk_obj.__setattr__(new_obj_name.replace("-", "_"), v)
top_node_obj_list.append(copy.deepcopy(sdk_obj))
else:
with_native_list_in_name = ['access-list', 'ip-map-list', 'acl-id-list-list', 'acl-name-list-list',
'lsn-rule-list',
'inside-src-permit-list', 'nat-ip-list', 'inside-ip-list', 'ipv4-list']
for k, v in list_name_value.items():
k = kwbl().kwbl(k)
if k == "a10-url":
temp_v = urlparse(v)
v = temp_v.path
if k in with_native_list_in_name:
new_obj_name = with_native_list_in_name[with_native_list_in_name.index(k)].replace('-', '_')
# Bug 191464 Mike Thompson detecting type vs name.
elif not isinstance(v, list):
new_obj_name = k.replace("-", "_")
else:
new_obj_name = k.replace("-list", "", 1).replace("-", "_") if k.endswith('list') else k.replace(
"-", "_")
if isinstance(v, list):
obj_name_list = []
caller_name = self._search_for_child_object_module(caller, new_obj_name)
for keys in v:
obj_class = self._search_for_child_class_inside_module(k, caller_name, DeviceProxy)
for v_key, v_val in keys.items():
v_key = kwbl().kwbl(v_key)
if v_key == "a10-url":
temp_v = urlparse(v_val)
v_val = temp_v.path
elif isinstance(v_val, dict):
obj_class.__setattr__(v_key.replace("-", "_"), (
obj_class.Factory(DeviceProxy, {v_key: v_val}, v_key, obj_class)))
continue
elif isinstance(v_val, list):
sub_obj = obj_class.Factory(DeviceProxy, {v_key: v_val}, v_key, obj_class)
sub_obj_list = getattr(obj_class, v_key, [])
if isinstance(sub_obj, list):
sub_obj_list = sub_obj_list + sub_obj
else:
sub_obj_list.append(sub_obj)
setattr(obj_class, v_key.replace("-", "_"), sub_obj_list)
continue
obj_class.__setattr__(v_key.replace("-", "_"), v_val)
obj_name_list.append(obj_class)
new_class.__setattr__(k.replace("-", "_"), copy.deepcopy(obj_name_list))
elif isinstance(v, dict):
new_class.__setattr__(new_obj_name, (new_class.Factory(DeviceProxy, {k: v}, k, new_class)))
else:
new_class.__setattr__(new_obj_name, v)
try:
if top_node_obj_list and len(top_node_obj_list) > 0:
return top_node_obj_list
else:
return new_class
except:
pass
def depth_finder(self, d, depth=0):
if isinstance(d, list):
if not isinstance(d, dict) or not d or not isinstance(d, list):
return depth
elif isinstance(d, dict):
depth
if not isinstance(d, dict) or not d:
return depth
return max(self.depth_finder(v, depth + 1) for k, v in d.iteritems())
def get(self, query_params=None, **kwargs):
if len(kwargs) > 0:
self.a10_url_update(**kwargs)
else:
self.a10_url_parent()
request = self.DeviceProxy.GET(self, query_params)
try:
#Hack decode unicode something:zli, bug:237218
hack = kwargs.get("json_before_load", None)
if hack and callable(hack):
try:
request = hack(request)
except:
pass
request = json.loads(request, encoding='utf-8')
except:
if request is None:
self.ERROR_MSG = "None Returned"
return self
elif 'response' in request and request['response']['err']:
self.ERROR_MSG = request
return self
elif "Session Timeout" in request:
self.ERROR_MSG = request
return self
if request is None:
self.ERROR_MSG = "None Returned"
return self
elif not isinstance(request, dict):
self.ERROR_MSG = 'Invalid Response'
return self
# elif 'response' in request and request['response']['err']:
elif 'response' in request and request.get('response').get('err', None):
self.ERROR_MSG = request
return self
elif "Session Timeout" in request:
self.ERROR_MSG = request
return self
if len(kwargs) > 0:
temp_object = self.Factory(self.DeviceProxy, request, name=1)
try:
temp_object.__setattr__("_HTTP_RESPONSE", self._HTTP_RESPONSE)
temp_object.__setattr__("DEBUG_CONNECTION", self.DEBUG_CONNECTION)
temp_object.__setattr__("DEBUG_Payload", self.DEBUG_Payload)
temp_object.__setattr__("DEBUG_Response", self.DEBUG_Response)
temp_object.__setattr__("DEBUG_URL", self.DEBUG_URL)
temp_object.__setattr__("DEBUG_headers", self.DEBUG_headers)
except Exception:
pass
return temp_object
else:
r_list = []
for k, v in request.items():
if "List" in k:
wrapper_key = k.replace("-list", "", 1)
elif "list" in k:
wrapper_key = k.replace("-list", "", 1)
else:
wrapper_key = k
if wrapper_key[len(wrapper_key) - 1] == "_":
wrapper_key = wrapper_key[:len(wrapper_key) - 1]
if isinstance(v, list):
for i in v:
new_object = self.Factory(self.DeviceProxy, {wrapper_key: i}, name=0, parent="")
new_object.__setattr__("_HTTP_RESPONSE", self._HTTP_RESPONSE)
new_object.__setattr__("DEBUG_CONNECTION", self.DEBUG_CONNECTION)
new_object.__setattr__("DEBUG_Payload", self.DEBUG_Payload)
new_object.__setattr__("DEBUG_Response", self.DEBUG_Response)
new_object.__setattr__("DEBUG_URL", self.DEBUG_URL)
new_object.__setattr__("DEBUG_headers", self.DEBUG_headers)
r_list.append(new_object)
else:
try:
new_object = self.Factory(self.DeviceProxy, request, name=1)
new_object.__setattr__("_HTTP_RESPONSE", self._HTTP_RESPONSE)
new_object.__setattr__("DEBUG_CONNECTION", self.DEBUG_CONNECTION)
new_object.__setattr__("DEBUG_Payload", self.DEBUG_Payload)
new_object.__setattr__("DEBUG_Response", self.DEBUG_Response)
new_object.__setattr__("DEBUG_URL", self.DEBUG_URL)
new_object.__setattr__("DEBUG_headers", self.DEBUG_headers)
except:
pass
return new_object
return r_list
def get_stream_response(self, **kwargs):
A10BaseClass.is_POST = True
o_url = self.a10_url
if len(kwargs) > 0:
self.a10_url_update(**kwargs)
else:
self.a10_url_parent()
response = self.DeviceProxy.POST(self)
self.a10_url = o_url
return response
def create(self, **kwargs):
A10BaseClass.is_POST = True
o_url = self.a10_url
if len(kwargs) > 0:
self.a10_url_update(**kwargs)
else:
self.a10_url_parent()
response = self.response_handler(self.DeviceProxy.POST(self))
self.a10_url = o_url
return response
def update(self, **kwargs):
A10BaseClass.is_POST = True
o_url = self.a10_url
if len(kwargs) > 0:
self.a10_url_update(**kwargs)
else:
self.a10_url_parent()
response = self.response_handler(self.DeviceProxy.POST(self))
self.a10_url = o_url
return response
def replace(self, **kwargs):
A10BaseClass.is_POST = False
o_url = self.a10_url
if len(kwargs) > 0:
self.a10_url_update(**kwargs)
else:
self.a10_url_parent()
response = self.response_handler(self.DeviceProxy.PUT(self))
self.a10_url = o_url
return response
def replace_all(self, obj_list):
A10BaseClass.is_POST = False
o_url = self.a10_url
self.a10_url_parent()
response = self.response_handler(self.DeviceProxy.PUT_ALL(self, obj_list))
self.a10_url = o_url
return response
def create_all(self, obj_list):
A10BaseClass.is_POST = True
o_url = self.a10_url
self.a10_url_parent()
response = self.response_handler(self.DeviceProxy.POST_ALL(self, obj_list))
self.a10_url = o_url
return response
def delete(self, query_params=None, **kwargs):
o_url = self.a10_url
if len(kwargs) > 0:
self.a10_url_update(**kwargs)
else:
self.a10_url_parent()
response = self.response_handler(self.DeviceProxy.DELETE(self, query_params))
self.a10_url = o_url
return response
def a10_url_update(self, **kwargs):
temp_url = self.a10_url
try:
for key, value in kwargs.items():
if isinstance(value, tuple) or isinstance(value, list):
for v in value:
v = urllib.quote_plus(str(v).replace(' ', "%20"))
v = v.replace('%2520', '%20')
self.a10_url = self.a10_url.replace('{%s}' % key, v, 1)
else:
#Python converts white spaces to +
#Modifying this behavior to use %20 encoding instead
value = urllib.quote_plus(str(value).replace(' ', '%20'))
value = value.replace('%2520', '%20')
self.a10_url = self.a10_url.replace('{%s}' % key, value, 1)
#Removing any unresolved keys and removing the +
while(len(kwargs)> 0 and '{' in self.a10_url):
try:
start_index = self.a10_url.index('{')
end_index = self.a10_url.index('}')
self.a10_url = self.a10_url.replace(self.a10_url[start_index:end_index+1], '')
except ValueError as e:
print 'Substring not found', e
break
self.a10_url = self.a10_url.replace('+/', '/')
self.a10_url = self.a10_url.replace('/+', '/')
#zli fixed bug:243614
p = re.compile('\+$')
#self.a10_url = self.a10_url.replace('+', '', -1) if self.a10_url.endswith('+') else self.a10_url
self.a10_url = p.sub('', self.a10_url) if self.a10_url.endswith('+') else self.a10_url
if '{' in self.a10_url:
self.a10_url_parent(**kwargs)
except:
try:
self.a10_url_parent(**kwargs)
except:
self.a10_url = temp_url
def a10_url_override(self, url):
self.a10_url = url
#TODO: Need to build a more intelligent handler.
def a10_url_parent(self, **kwargs):
if "{" in self.a10_url:
p_list = self.a10_url.split("/")
if self.a10_url.endswith('oper'):
temp = self.a10_url.replace(p_list[(len(p_list) - 2)] + '/', "")
self.a10_url = temp
elif self.a10_url.endswith('stats'):
temp = self.a10_url.replace(p_list[(len(p_list) - 2)] + '/', "")
self.a10_url = temp
else:
temp = self.a10_url.replace(p_list[(len(p_list) - 1)], "")
self.a10_url = temp[0:len(temp) - 1]
if len(kwargs) > 0:
try:
self.a10_url = self.a10_url.format(**kwargs)
except:
pass
def get_stats(self, url="", Filters={}, **kwargs):
if url == "":
o_url = self.a10_url
else:
o_url = self.a10_url
self.a10_url = url
if len(kwargs) > 0:
try:
self.a10_url = self.a10_url.format(**kwargs)
except:
pass
"""
If you are utilizing the stats class &| you want to get stats from a parent.
"""
if "{" in self.a10_url:
self.a10_url = re.match(r'^(/.+?/{)', self.a10_url).group(0).replace("/{", "")
if "stats" not in self.a10_url:
self.a10_url = self.a10_url + "/stats"
if len(Filters) > 0:
query = urllib.urlencode(**Filters)
endpoint = self.a10_url + "?" + query
temp_url = self.a10_url
self.a10_url = endpoint
response = self.response_handler(self.DeviceProxy.GET(self), False)
self.a10_url = o_url
return response
def del_stats(self, url="", Filters={}, **kwargs):
if url == "":
o_url = self.a10_url
else:
o_url = self.a10_url
self.a10_url = url
if len(kwargs) > 0:
try:
self.a10_url = self.a10_url.format(**kwargs)
except:
pass
"""
If you are utilizing the stats class &| you want to get stats from a parent.
"""
if "{" in self.a10_url:
self.a10_url = re.match(r'^(/.+?/{)', self.a10_url).group(0).replace("/{", "")
if "stats" not in self.a10_url:
self.a10_url = self.a10_url + "/stats"
if len(Filters) > 0:
query = urllib.urlencode(**Filters)
endpoint = self.a10_url + "?" + query
temp_url = self.a10_url
self.a10_url = endpoint
response = self.response_handler(self.DeviceProxy.DELETE(self), False)
self.a10_url = o_url
return response
def get_oper(self, url="", Filters={}, **kwargs):
if url == "":
o_url = self.a10_url
else:
o_url = self.a10_url
self.a10_url = url
if len(kwargs) > 0:
try:
self.a10_url = self.a10_url.format(**kwargs)
except:
pass
"""
If you are utilizing the stats class &| you want to get stats from a parent.
"""
if "{" in self.a10_url:
self.a10_url = re.match(r'^(/.+?/{)', self.a10_url).group(0).replace("/{", "")
if "oper" not in self.a10_url:
self.a10_url = self.a10_url + "/oper"
if len(Filters) > 0:
query = urllib.urlencode(**Filters)
endpoint = self.a10_url + "?" + query
temp_url = self.a10_url
self.a10_url = endpoint
response = self.response_handler(self.DeviceProxy.GET(self), False)
self.a10_url = o_url
return response
def response_handler(self, response, r_obj=True):
try:
response = json.loads(response, encoding='utf-8')
except:
pass
try:
if response is None:
self.ERROR_MSG = "None Returned"
return self
elif 'response' in response:
if "err" in response['response']:
self.ERROR_MSG = response
return self
elif "status" in response['response']:
if response['response']['status'] == "OK":
return self
elif "Session Timeout" in response:
self.ERROR_MSG = response
return self
else:
if r_obj == True:
self.ERROR_MSG = ""
return self
else:
return response
except:
self.ERROR_MSG = response
return self
#Helper method for single file upload
def file_upload(self, filename=None, file_handle=None, file_obj=None):
u_fields = [("json", {self.b_key:{"file":filename,
"file-handle":file_handle,
"action":'import'}})]
u_files = [(filename, file_handle, file_obj)]
return self.files_upload(u_fields, u_files)
#Added to address file_upload and downloads
def files_upload(self, fields=[], files=[]):
'''
API only supports one file being uploaded at a time.
:params fields: [("json", {self.b_key:{"file":filename,
"file-handle":file_handle,
"action":action}})]
:params files:[("file", filename, file_obj)]
'''
self.fields = fields
self.files = files
return self.DeviceProxy.post_multipart(self)
def file_download(self, name=None):
'''
This bypasses the factory so the raw file content can be returned.
'''
self.a10_url = self.a10_url + "/" + name
return self.DeviceProxy.multi_part_get(self)
def file_replace(self, filename=None, file_handle=None, file_obj=None):
u_fields = [("json", {self.b_key:{"file":filename,
"file-handle":file_handle,
"action":'import'}})]
u_files = [(filename, file_handle, file_obj)]
return self.files_upload(u_fields, u_files)
def file_delete(self, name=None):
self.a10_url = self.a10_url + "/" + name
return self.DeviceProxy.DELETE(self)
class kwbl():
def kwbl(self, word, key=0):
# if word in dir(__builtins__) or 'copy' in word:
# (echou): Replace to workaround Django vs Python __builtins__
# (raunak): Use the Python builtins to have a consistent behavior with the sdk_generator
python_built_in = ['bytearray', 'IndexError', 'all', 'help', 'vars', 'SyntaxError', 'unicode',
'UnicodeDecodeError', 'memoryview',
'isinstance', 'copyright', 'NameError', 'BytesWarning', 'dict', 'IOError', 'oct', 'bin',
'SystemExit',
'StandardError', 'format', 'TabError', 'sorted', 'False', 'RuntimeWarning', 'list', 'iter',
'reload',
'Warning', '__package__', 'round', 'dir', 'cmp', 'set', 'bytes', 'UnicodeTranslateError',
'intern',
'issubclass', 'Ellipsis', 'EOFError', 'locals', 'BufferError', 'slice', 'FloatingPointError',
'sum', 'getattr',
'abs', 'exit', 'print', 'True', 'FutureWarning', 'ImportWarning', 'None', 'hash',
'ReferenceError', 'len',
'credits', 'frozenset', '__name__', 'ord', 'super', '_', 'TypeError', 'license',
'KeyboardInterrupt',
'UserWarning', 'filter', 'range', 'staticmethod', 'SystemError', 'BaseException', 'pow',
'RuntimeError',
'float', 'GeneratorExit', 'StopIteration', 'globals', 'divmod', 'enumerate', 'apply',
'LookupError', 'open',
'quit', 'basestring', 'UnicodeError', 'zip', 'hex', 'long', 'next', 'ImportError', 'chr',
'__import__', 'type',
'Exception', 'tuple', 'reduce', 'reversed', 'UnicodeEncodeError', 'input', 'hasattr',
'delattr', 'setattr',
'raw_input', 'PendingDeprecationWarning', 'compile', 'ArithmeticError', 'str', 'property',
'MemoryError',
'int', 'xrange', 'KeyError', 'coerce', 'SyntaxWarning', 'file', 'EnvironmentError', 'unichr',
'id', 'OSError',
'DeprecationWarning', 'min', 'UnicodeWarning', 'execfile', 'any', 'complex', 'bool',
'ValueError',
'NotImplemented', 'map', 'buffer', 'max', 'object', 'repr', 'callable', 'ZeroDivisionError',
'eval',
'__debug__', 'IndentationError', 'AssertionError', 'classmethod', 'UnboundLocalError',
'NotImplementedError',
'AttributeError', 'OverflowError']
if word in dir(__builtins__) or 'copy' in word or word in python_built_in:
if key == 0:
return "A10WW_" + word
else:
return "A10_" + word
# (echou): Comment out sys.modules check per discussionw with mthompson and raunaka
# elif word in sys.modules.keys():
# if key == 0:
# return "A10WW_" + word
# else:
# return "A10_" + word
elif word in keyword.kwlist:
if key == 0:
return "A10WW_" + word
else:
return "A10_" + word
elif re.search(r'^[0-9]', word):
if key == 0:
return "A10WW_" + word
else:
return "A10_" + word
else:
return word
| a10networks/a10sdk-python | a10sdk/common/A10BaseClass.py | Python | apache-2.0 | 37,904 |
# Copyright [2015] Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from trove.common.i18n import _
from trove.common import utils
ALTER_DB_CFG = "ALTER DATABASE %s SET %s = %s"
ALTER_DB_RESET_CFG = "ALTER DATABASE %s CLEAR %s"
ALTER_USER_PASSWORD = "ALTER USER %s IDENTIFIED BY '%s'"
ADD_DB_TO_NODE = ("/opt/vertica/bin/adminTools -t db_add_node -a"
" %s -d %s -p '%s'")
REMOVE_DB_FROM_NODE = ("/opt/vertica/bin/adminTools -t db_remove_node -s"
" %s -d %s -i -p '%s'")
CREATE_DB = ("/opt/vertica/bin/adminTools -t create_db -s"
" %s -d %s -c %s -D %s -p '%s'")
CREATE_USER = "CREATE USER %s IDENTIFIED BY '%s'"
ENABLE_FOR_USER = "ALTER USER %s DEFAULT ROLE %s"
GRANT_TO_USER = "GRANT %s to %s"
INSTALL_VERTICA = ("/opt/vertica/sbin/install_vertica -s %s"
" -d %s -X -N -S default -r"
" /vertica.deb -L CE -Y --no-system-checks"
" --ignore-aws-instance-type")
MARK_DESIGN_KSAFE = "SELECT MARK_DESIGN_KSAFE(%s)"
NODE_STATUS = "SELECT node_state FROM nodes where node_state <> '%s'"
STOP_DB = "/opt/vertica/bin/adminTools -t stop_db -F -d %s -p '%s'"
START_DB = "/opt/vertica/bin/adminTools -t start_db -d %s -p '%s'"
STATUS_ACTIVE_DB = "/opt/vertica/bin/adminTools -t show_active_db"
STATUS_DB_DOWN = "/opt/vertica/bin/adminTools -t db_status -s DOWN"
SET_RESTART_POLICY = ("/opt/vertica/bin/adminTools -t set_restart_policy "
"-d %s -p '%s'")
SEND_CONF_TO_SERVER = ("rsync -v -e 'ssh -o "
"UserKnownHostsFile=/dev/null -o "
"StrictHostKeyChecking=no' --perms --owner --group "
"%s %s:%s")
SSH_KEY_GEN = "ssh-keygen -f %s/.ssh/id_rsa -t rsa -N ''"
UPDATE_VERTICA = ("/opt/vertica/sbin/update_vertica %s %s "
" -d %s -X -N -S default -r"
" /vertica.deb -L CE -Y --no-system-checks"
" --ignore-aws-instance-type")
UPDATE_REMOVE = ("/opt/vertica/sbin/update_vertica --remove-hosts %s "
" -d %s -X -N -S default -r"
" /vertica.deb -L CE -Y --no-system-checks"
" --ignore-aws-instance-type")
UPDATE_ADD = ("/opt/vertica/sbin/update_vertica --add-hosts %s "
" -d %s -X -N -S default -r"
" /vertica.deb -L CE -Y --no-system-checks"
" --ignore-aws-instance-type")
USER_EXISTS = ("/opt/vertica/bin/vsql -w '%s' -c "
"\"select 1 from users where user_name = '%s'\" "
"| grep row | awk '{print $1}' | cut -c2-")
VERTICA_ADMIN = "dbadmin"
VERTICA_ADMIN_GRP = "verticadba"
VERTICA_AGENT_SERVICE_COMMAND = "service vertica_agent %s"
VERTICA_CONF = "/etc/vertica.cnf"
INSTALL_TIMEOUT = 1000
CREATE_LIBRARY = "CREATE LIBRARY %s AS '%s'"
CREATE_SOURCE = "CREATE SOURCE %s AS LANGUAGE '%s' NAME '%s' LIBRARY %s"
UDL_LIBS = [
{
'func_name': "curl",
'lib_name': "curllib",
'language': "C++",
'factory': "CurlSourceFactory",
'path': "/opt/vertica/sdk/examples/build/cURLLib.so"
},
]
def shell_execute(command, command_executor="root"):
# This method encapsulates utils.execute for 2 purpose:
# 1. Helps in safe testing.
# 2. Helps in executing commands as other user, using their environment.
# Note: This method uses su because using sudo -i -u <user> <command>
# does not works with vertica installer
# and it has problems while executing remote commands.
return utils.execute("sudo", "su", "-", command_executor, "-c", "%s"
% command)
class VSqlError(object):
def __init__(self, stderr):
"""Parse the stderr part of the VSql output.
stderr looks like: "ERROR 3117: Division by zero"
:param stderr: string from executing statement via vsql
"""
parse = re.match("^(ERROR|WARNING) (\d+): (.+)$", stderr)
if not parse:
raise ValueError(_("VSql stderr %(msg)s not recognized.")
% {'msg': stderr})
self.type = parse.group(1)
self.code = int(parse.group(2))
self.msg = parse.group(3)
def is_warning(self):
return bool(self.type == "WARNING")
def __str__(self):
return "Vertica %s (%s): %s" % (self.type, self.code, self.msg)
def exec_vsql_command(dbadmin_password, command):
"""Executes a VSQL command with the given dbadmin password."""
out, err = shell_execute("/opt/vertica/bin/vsql -w \'%s\' -c \"%s\""
% (dbadmin_password, command),
VERTICA_ADMIN)
if err:
err = VSqlError(err)
return out, err
| zhangg/trove | trove/guestagent/datastore/experimental/vertica/system.py | Python | apache-2.0 | 5,231 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf_export tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from tensorflow.python.platform import test
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_export
def _test_function(unused_arg=0):
pass
def _test_function2(unused_arg=0):
pass
class TestClassA(object):
pass
class TestClassB(TestClassA):
pass
class ValidateExportTest(test.TestCase):
"""Tests for tf_export class."""
class MockModule(object):
def __init__(self, name):
self.__name__ = name
def setUp(self):
self._modules = []
def tearDown(self):
for name in self._modules:
del sys.modules[name]
self._modules = []
for symbol in [_test_function, _test_function, TestClassA, TestClassB]:
if hasattr(symbol, '_tf_api_names'):
del symbol._tf_api_names
if hasattr(symbol, '_tf_api_names_v1'):
del symbol._tf_api_names_v1
def _CreateMockModule(self, name):
mock_module = self.MockModule(name)
sys.modules[name] = mock_module
self._modules.append(name)
return mock_module
def testExportSingleFunction(self):
export_decorator = tf_export.tf_export('nameA', 'nameB')
decorated_function = export_decorator(_test_function)
self.assertEquals(decorated_function, _test_function)
self.assertEquals(('nameA', 'nameB'), decorated_function._tf_api_names)
def testExportMultipleFunctions(self):
export_decorator1 = tf_export.tf_export('nameA', 'nameB')
export_decorator2 = tf_export.tf_export('nameC', 'nameD')
decorated_function1 = export_decorator1(_test_function)
decorated_function2 = export_decorator2(_test_function2)
self.assertEquals(decorated_function1, _test_function)
self.assertEquals(decorated_function2, _test_function2)
self.assertEquals(('nameA', 'nameB'), decorated_function1._tf_api_names)
self.assertEquals(('nameC', 'nameD'), decorated_function2._tf_api_names)
def testExportClasses(self):
export_decorator_a = tf_export.tf_export('TestClassA1')
export_decorator_a(TestClassA)
self.assertEquals(('TestClassA1',), TestClassA._tf_api_names)
self.assertTrue('_tf_api_names' not in TestClassB.__dict__)
export_decorator_b = tf_export.tf_export('TestClassB1')
export_decorator_b(TestClassB)
self.assertEquals(('TestClassA1',), TestClassA._tf_api_names)
self.assertEquals(('TestClassB1',), TestClassB._tf_api_names)
def testExportSingleConstant(self):
module1 = self._CreateMockModule('module1')
export_decorator = tf_export.tf_export('NAME_A', 'NAME_B')
export_decorator.export_constant('module1', 'test_constant')
self.assertEquals([(('NAME_A', 'NAME_B'), 'test_constant')],
module1._tf_api_constants)
def testExportMultipleConstants(self):
module1 = self._CreateMockModule('module1')
module2 = self._CreateMockModule('module2')
test_constant1 = 123
test_constant2 = 'abc'
test_constant3 = 0.5
export_decorator1 = tf_export.tf_export('NAME_A', 'NAME_B')
export_decorator2 = tf_export.tf_export('NAME_C', 'NAME_D')
export_decorator3 = tf_export.tf_export('NAME_E', 'NAME_F')
export_decorator1.export_constant('module1', test_constant1)
export_decorator2.export_constant('module2', test_constant2)
export_decorator3.export_constant('module2', test_constant3)
self.assertEquals([(('NAME_A', 'NAME_B'), 123)],
module1._tf_api_constants)
self.assertEquals([(('NAME_C', 'NAME_D'), 'abc'),
(('NAME_E', 'NAME_F'), 0.5)],
module2._tf_api_constants)
def testRaisesExceptionIfAlreadyHasAPINames(self):
_test_function._tf_api_names = ['abc']
export_decorator = tf_export.tf_export('nameA', 'nameB')
with self.assertRaises(tf_export.SymbolAlreadyExposedError):
export_decorator(_test_function)
def testRaisesExceptionIfInvalidSymbolName(self):
# TensorFlow code is not allowed to export symbols under package
# tf.estimator
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.tf_export('estimator.invalid')
# All symbols exported by Estimator must be under tf.estimator package.
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.estimator_export('invalid')
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.estimator_export('Estimator.invalid')
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.estimator_export('invalid.estimator')
def testRaisesExceptionIfInvalidV1SymbolName(self):
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.tf_export('valid', v1=['estimator.invalid'])
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.estimator_export('estimator.valid', v1=['invalid'])
def testOverridesFunction(self):
_test_function2._tf_api_names = ['abc']
export_decorator = tf_export.tf_export(
'nameA', 'nameB', overrides=[_test_function2])
export_decorator(_test_function)
# _test_function overrides _test_function2. So, _tf_api_names
# should be removed from _test_function2.
self.assertFalse(hasattr(_test_function2, '_tf_api_names'))
def testMultipleDecorators(self):
def get_wrapper(func):
def wrapper(*unused_args, **unused_kwargs):
pass
return tf_decorator.make_decorator(func, wrapper)
decorated_function = get_wrapper(_test_function)
export_decorator = tf_export.tf_export('nameA', 'nameB')
exported_function = export_decorator(decorated_function)
self.assertEquals(decorated_function, exported_function)
self.assertEquals(('nameA', 'nameB'), _test_function._tf_api_names)
if __name__ == '__main__':
test.main()
| hehongliang/tensorflow | tensorflow/python/util/tf_export_test.py | Python | apache-2.0 | 6,573 |
# Copyright (c) 2010-2019 openpyxl
from warnings import warn
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Typed,
)
from openpyxl.descriptors.sequence import NestedSequence
from openpyxl.descriptors.excel import ExtensionList
from openpyxl.utils.indexed_list import IndexedList
from openpyxl.xml.constants import ARC_STYLE, SHEET_MAIN_NS
from openpyxl.xml.functions import fromstring
from .builtins import styles
from .colors import ColorList, COLOR_INDEX
from .differential import DifferentialStyle
from .table import TableStyleList
from .borders import Border
from .fills import Fill
from .fonts import Font
from .numbers import (
NumberFormatList,
BUILTIN_FORMATS,
BUILTIN_FORMATS_MAX_SIZE,
BUILTIN_FORMATS_REVERSE,
is_date_format,
builtin_format_code
)
from .named_styles import (
_NamedCellStyleList
)
from .cell_style import CellStyle, CellStyleList
class Stylesheet(Serialisable):
tagname = "styleSheet"
numFmts = Typed(expected_type=NumberFormatList)
fonts = NestedSequence(expected_type=Font, count=True)
fills = NestedSequence(expected_type=Fill, count=True)
borders = NestedSequence(expected_type=Border, count=True)
cellStyleXfs = Typed(expected_type=CellStyleList)
cellXfs = Typed(expected_type=CellStyleList)
cellStyles = Typed(expected_type=_NamedCellStyleList)
dxfs = NestedSequence(expected_type=DifferentialStyle, count=True)
tableStyles = Typed(expected_type=TableStyleList, allow_none=True)
colors = Typed(expected_type=ColorList, allow_none=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('numFmts', 'fonts', 'fills', 'borders', 'cellStyleXfs',
'cellXfs', 'cellStyles', 'dxfs', 'tableStyles', 'colors')
def __init__(self,
numFmts=None,
fonts=(),
fills=(),
borders=(),
cellStyleXfs=None,
cellXfs=None,
cellStyles=None,
dxfs=(),
tableStyles=None,
colors=None,
extLst=None,
):
if numFmts is None:
numFmts = NumberFormatList()
self.numFmts = numFmts
self.number_formats = IndexedList()
self.fonts = fonts
self.fills = fills
self.borders = borders
if cellStyleXfs is None:
cellStyleXfs = CellStyleList()
self.cellStyleXfs = cellStyleXfs
if cellXfs is None:
cellXfs = CellStyleList()
self.cellXfs = cellXfs
if cellStyles is None:
cellStyles = _NamedCellStyleList()
self.cellStyles = cellStyles
self.dxfs = dxfs
self.tableStyles = tableStyles
self.colors = colors
self.cell_styles = self.cellXfs._to_array()
self.alignments = self.cellXfs.alignments
self.protections = self.cellXfs.prots
self._normalise_numbers()
self.named_styles = self._merge_named_styles()
@classmethod
def from_tree(cls, node):
# strip all attribs
attrs = dict(node.attrib)
for k in attrs:
del node.attrib[k]
return super(Stylesheet, cls).from_tree(node)
def _merge_named_styles(self):
"""
Merge named style names "cellStyles" with their associated styles
"cellStyleXfs"
"""
named_styles = self.cellStyles.names
for style in named_styles:
self._expand_named_style(style)
return named_styles
def _expand_named_style(self, named_style):
"""
Bind format definitions for a named style from the associated style
record
"""
xf = self.cellStyleXfs[named_style.xfId]
named_style.font = self.fonts[xf.fontId]
named_style.fill = self.fills[xf.fillId]
named_style.border = self.borders[xf.borderId]
if xf.numFmtId < BUILTIN_FORMATS_MAX_SIZE:
formats = BUILTIN_FORMATS
else:
formats = self.custom_formats
if xf.numFmtId in formats:
named_style.number_format = formats[xf.numFmtId]
if xf.alignment:
named_style.alignment = xf.alignment
if xf.protection:
named_style.protection = xf.protection
def _split_named_styles(self, wb):
"""
Convert NamedStyle into separate CellStyle and Xf objects
"""
for style in wb._named_styles:
self.cellStyles.cellStyle.append(style.as_name())
self.cellStyleXfs.xf.append(style.as_xf())
@property
def custom_formats(self):
return dict([(n.numFmtId, n.formatCode) for n in self.numFmts.numFmt])
def _normalise_numbers(self):
"""
Rebase custom numFmtIds with a floor of 164 when reading stylesheet
And index datetime formats
"""
date_formats = set()
custom = self.custom_formats
formats = self.number_formats
for idx, style in enumerate(self.cell_styles):
if style.numFmtId in custom:
fmt = custom[style.numFmtId]
if fmt in BUILTIN_FORMATS_REVERSE: # remove builtins
style.numFmtId = BUILTIN_FORMATS_REVERSE[fmt]
else:
style.numFmtId = formats.add(fmt) + BUILTIN_FORMATS_MAX_SIZE
else:
fmt = builtin_format_code(style.numFmtId)
if is_date_format(fmt):
# Create an index of which styles refer to datetimes
date_formats.add(idx)
self.date_formats = date_formats
def to_tree(self, tagname=None, idx=None, namespace=None):
tree = super(Stylesheet, self).to_tree(tagname, idx, namespace)
tree.set("xmlns", SHEET_MAIN_NS)
return tree
def apply_stylesheet(archive, wb):
"""
Add styles to workbook if present
"""
try:
src = archive.read(ARC_STYLE)
except KeyError:
return wb
node = fromstring(src)
stylesheet = Stylesheet.from_tree(node)
wb._borders = IndexedList(stylesheet.borders)
wb._fonts = IndexedList(stylesheet.fonts)
wb._fills = IndexedList(stylesheet.fills)
wb._differential_styles.styles = stylesheet.dxfs
wb._number_formats = stylesheet.number_formats
wb._protections = stylesheet.protections
wb._alignments = stylesheet.alignments
wb._table_styles = stylesheet.tableStyles
# need to overwrite openpyxl defaults in case workbook has different ones
wb._cell_styles = stylesheet.cell_styles
wb._named_styles = stylesheet.named_styles
wb._date_formats = stylesheet.date_formats
for ns in wb._named_styles:
ns.bind(wb)
if not wb._named_styles:
normal = styles['Normal']
wb.add_named_style(normal)
warn("Workbook contains no default style, apply openpyxl's default")
if stylesheet.colors is not None:
wb._colors = stylesheet.colors.index
def write_stylesheet(wb):
stylesheet = Stylesheet()
stylesheet.fonts = wb._fonts
stylesheet.fills = wb._fills
stylesheet.borders = wb._borders
stylesheet.dxfs = wb._differential_styles.styles
from .numbers import NumberFormat
fmts = []
for idx, code in enumerate(wb._number_formats, BUILTIN_FORMATS_MAX_SIZE):
fmt = NumberFormat(idx, code)
fmts.append(fmt)
stylesheet.numFmts.numFmt = fmts
xfs = []
for style in wb._cell_styles:
xf = CellStyle.from_array(style)
if style.alignmentId:
xf.alignment = wb._alignments[style.alignmentId]
if style.protectionId:
xf.protection = wb._protections[style.protectionId]
xfs.append(xf)
stylesheet.cellXfs = CellStyleList(xf=xfs)
stylesheet._split_named_styles(wb)
stylesheet.tableStyles = wb._table_styles
return stylesheet.to_tree()
| kawamon/hue | desktop/core/ext-py/openpyxl-2.6.4/openpyxl/styles/stylesheet.py | Python | apache-2.0 | 7,976 |
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Neutron
"""
import os
from oslo.config import cfg
from oslo import messaging
from paste import deploy
from neutron.api.v2 import attributes
from neutron.common import utils
from neutron.openstack.common.db import options as db_options
from neutron.openstack.common import log as logging
from neutron import version
LOG = logging.getLogger(__name__)
core_opts = [
cfg.StrOpt('bind_host', default='0.0.0.0',
help=_("The host IP to bind to")),
cfg.IntOpt('bind_port', default=9696,
help=_("The port to bind to")),
cfg.StrOpt('api_paste_config', default="api-paste.ini",
help=_("The API paste config file to use")),
cfg.StrOpt('api_extensions_path', default="",
help=_("The path for API extensions")),
cfg.StrOpt('policy_file', default="policy.json",
help=_("The policy file to use")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.StrOpt('core_plugin',
help=_("The core plugin Neutron will use")),
cfg.ListOpt('service_plugins', default=[],
help=_("The service plugins Neutron will use")),
cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00",
help=_("The base MAC address Neutron will use for VIFs")),
cfg.IntOpt('mac_generation_retries', default=16,
help=_("How many times Neutron will retry MAC generation")),
cfg.BoolOpt('allow_bulk', default=True,
help=_("Allow the usage of the bulk API")),
cfg.BoolOpt('allow_pagination', default=False,
help=_("Allow the usage of the pagination")),
cfg.BoolOpt('allow_sorting', default=False,
help=_("Allow the usage of the sorting")),
cfg.StrOpt('pagination_max_limit', default="-1",
help=_("The maximum number of items returned in a single "
"response, value was 'infinite' or negative integer "
"means no limit")),
cfg.IntOpt('max_dns_nameservers', default=5,
help=_("Maximum number of DNS nameservers")),
cfg.IntOpt('max_subnet_host_routes', default=20,
help=_("Maximum number of host routes per subnet")),
cfg.IntOpt('max_fixed_ips_per_port', default=5,
help=_("Maximum number of fixed ips per port")),
cfg.IntOpt('dhcp_lease_duration', default=86400,
deprecated_name='dhcp_lease_time',
help=_("DHCP lease duration (in seconds). Use -1 to tell "
"dnsmasq to use infinite lease times.")),
cfg.BoolOpt('dhcp_agent_notification', default=True,
help=_("Allow sending resource operation"
" notification to DHCP agent")),
cfg.BoolOpt('allow_overlapping_ips', default=False,
help=_("Allow overlapping IP support in Neutron")),
cfg.StrOpt('host', default=utils.get_hostname(),
help=_("The hostname Neutron is running on")),
cfg.BoolOpt('force_gateway_on_subnet', default=False,
help=_("Ensure that configured gateway is on subnet")),
cfg.BoolOpt('notify_nova_on_port_status_changes', default=True,
help=_("Send notification to nova when port status changes")),
cfg.BoolOpt('notify_nova_on_port_data_changes', default=True,
help=_("Send notification to nova when port data (fixed_ips/"
"floatingip) changes so nova can update its cache.")),
cfg.StrOpt('nova_url',
default='http://127.0.0.1:8774/v2',
help=_('URL for connection to nova')),
cfg.StrOpt('nova_admin_username',
help=_('Username for connecting to nova in admin context')),
cfg.StrOpt('nova_admin_password',
help=_('Password for connection to nova in admin context'),
secret=True),
cfg.StrOpt('nova_admin_tenant_id',
help=_('The uuid of the admin nova tenant')),
cfg.StrOpt('nova_admin_auth_url',
default='http://localhost:5000/v2.0',
help=_('Authorization URL for connecting to nova in admin '
'context')),
cfg.StrOpt('nova_ca_certificates_file',
help=_('CA file for novaclient to verify server certificates')),
cfg.BoolOpt('nova_api_insecure', default=False,
help=_("If True, ignore any SSL validation issues")),
cfg.StrOpt('nova_region_name',
help=_('Name of nova region to use. Useful if keystone manages'
' more than one region.')),
cfg.IntOpt('send_events_interval', default=2,
help=_('Number of seconds between sending events to nova if '
'there are any events to send.')),
]
core_cli_opts = [
cfg.StrOpt('state_path',
default='/var/lib/neutron',
help=_("Where to store Neutron state files. "
"This directory must be writable by the agent.")),
]
# Register the configuration options
cfg.CONF.register_opts(core_opts)
cfg.CONF.register_cli_opts(core_cli_opts)
# Ensure that the control exchange is set correctly
messaging.set_transport_defaults(control_exchange='neutron')
_SQL_CONNECTION_DEFAULT = 'sqlite://'
# Update the default QueuePool parameters. These can be tweaked by the
# configuration variables - max_pool_size, max_overflow and pool_timeout
db_options.set_defaults(sql_connection=_SQL_CONNECTION_DEFAULT,
sqlite_db='', max_pool_size=10,
max_overflow=20, pool_timeout=10)
def init(args, **kwargs):
cfg.CONF(args=args, project='neutron',
version='%%prog %s' % version.version_info.release_string(),
**kwargs)
# FIXME(ihrachys): if import is put in global, circular import
# failure occurs
from neutron.common import rpc as n_rpc
n_rpc.init(cfg.CONF)
# Validate that the base_mac is of the correct format
msg = attributes._validate_regex(cfg.CONF.base_mac,
attributes.MAC_PATTERN)
if msg:
msg = _("Base MAC: %s") % msg
raise Exception(msg)
def setup_logging(conf):
"""Sets up the logging options for a log with supplied name.
:param conf: a cfg.ConfOpts object
"""
product_name = "neutron"
logging.setup(product_name)
LOG.info(_("Logging enabled!"))
def load_paste_app(app_name):
"""Builds and returns a WSGI app from a paste config file.
:param app_name: Name of the application to load
:raises ConfigFilesNotFoundError when config file cannot be located
:raises RuntimeError when application cannot be loaded from config file
"""
config_path = cfg.CONF.find_file(cfg.CONF.api_paste_config)
if not config_path:
raise cfg.ConfigFilesNotFoundError(
config_files=[cfg.CONF.api_paste_config])
config_path = os.path.abspath(config_path)
LOG.info(_("Config paste file: %s"), config_path)
try:
app = deploy.loadapp("config:%s" % config_path, name=app_name)
except (LookupError, ImportError):
msg = (_("Unable to load %(app_name)s from "
"configuration file %(config_path)s.") %
{'app_name': app_name,
'config_path': config_path})
LOG.exception(msg)
raise RuntimeError(msg)
return app
| onecloud/neutron | neutron/common/config.py | Python | apache-2.0 | 8,079 |
import re
from nltk.corpus import stopwords
from nltk.corpus import words
from nltk.stem.snowball import SnowballStemmer
from apiv2.models import QuestionText, Question
from apiv2.search.fsearch import formula_extractor as fe
cachedStopWords = stopwords.words("english")
english_vocab = set(w.lower() for w in words.words())
stemmer = SnowballStemmer("english")
# Full text index search
def to_lower(text):
return ' '.join([word.lower() for word in text.split()])
def remove_stopwords(text):
return ' '.join([word for word in text.split() if len(word) > 2 and word
not in cachedStopWords])
def english_only(text):
return ' '.join([word for word in text.split() if word in english_vocab])
def stem_text(text):
return ' '.join([stemmer.stem(word) for word in text.split()])
def preprocess(text, **kwargs):
preprocessed_text = text
# Recognise and remove LaTeX (detect formula function)
preprocessed_text = clean_latex(preprocessed_text)
# Remove non alphabetical characters
preprocessed_text = remove_non_alphabet(preprocessed_text)
# Convert to lower case
preprocessed_text = to_lower(preprocessed_text)
# Remove stopwords
preprocessed_text = remove_stopwords(preprocessed_text)
# Filter words
if kwargs.get("english", True):
preprocessed_text = english_only(preprocessed_text)
if kwargs.get("stem", True):
preprocessed_text = stem_text(preprocessed_text)
return preprocessed_text
def preprocess_unique(text, **kwargs):
results = preprocess(text, **kwargs).split()
return ' '.join(set(results))
def remove_non_alphabet(text):
text = re.sub(r'[^a-zA-Z]', " ", text)
return text
def clean_latex(text):
text = re.sub(fe.DOUBLE_DOLLAR_NOTATION, " ", text)
text = re.sub(fe.PAREN_NOTATION, " ", text)
text = re.sub(fe.BRACKET_NOTATION, " ", text)
return text
def preprocess_query(text):
text = preprocess(text)
return text
def preprocess_question_text_object(stem=True):
QuestionText.objects.all().delete()
questions = Question.objects.all()
for question in questions:
preprocessed_text = preprocess(question.content, stem)
print(preprocessed_text)
question_text = QuestionText(
content=preprocessed_text,
question=question
)
question_text.save() | deka108/meas_deka | apiv2/utils/text_util.py | Python | apache-2.0 | 2,391 |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility to handle vocabularies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import codecs
import os
import tensorflow.compat.v1 as tf
from REDACTED.tensorflow.python.ops import lookup_ops
from REDACTED.nmt.utils import misc_utils as utils
# word level special token
UNK = "<unk>"
SOS = "<s>"
EOS = "</s>"
UNK_ID = 0
# char ids 0-255 come from utf-8 encoding bytes
# assign 256-300 to special chars
BOS_CHAR_ID = 256 # <begin sentence>
EOS_CHAR_ID = 257 # <end sentence>
BOW_CHAR_ID = 258 # <begin word>
EOW_CHAR_ID = 259 # <end word>
PAD_CHAR_ID = 260 # <padding>
DEFAULT_CHAR_MAXLEN = 50 # max number of chars for each word.
def _string_to_bytes(text, max_length):
"""Given string and length, convert to byte seq of at most max_length.
This process mimics docqa/elmo's preprocessing:
https://github.com/allenai/document-qa/blob/master/docqa/elmo/data.py
Note that we make use of BOS_CHAR_ID and EOS_CHAR_ID in iterator_utils.py &
our usage differs from docqa/elmo.
Args:
text: tf.string tensor of shape []
max_length: max number of chars for each word.
Returns:
A tf.int32 tensor of the byte encoded text.
"""
byte_ids = tf.to_int32(tf.decode_raw(text, tf.uint8))
byte_ids = byte_ids[:max_length - 2]
padding = tf.fill([max_length - tf.shape(byte_ids)[0] - 2], PAD_CHAR_ID)
byte_ids = tf.concat(
[[BOW_CHAR_ID], byte_ids, [EOW_CHAR_ID], padding], axis=0)
tf.logging.info(byte_ids)
byte_ids = tf.reshape(byte_ids, [max_length])
tf.logging.info(byte_ids.get_shape().as_list())
return byte_ids + 1
def tokens_to_bytes(tokens):
"""Given a sequence of strings, map to sequence of bytes.
Args:
tokens: A tf.string tensor
Returns:
A tensor of shape words.shape + [bytes_per_word] containing byte versions
of each word.
"""
bytes_per_word = DEFAULT_CHAR_MAXLEN
with tf.device("/cpu:0"):
tf.assert_rank(tokens, 1)
shape = tf.shape(tokens)
tf.logging.info(tokens)
tokens_flat = tf.reshape(tokens, [-1])
as_bytes_flat = tf.map_fn(
fn=lambda x: _string_to_bytes(x, max_length=bytes_per_word),
elems=tokens_flat,
dtype=tf.int32,
back_prop=False)
tf.logging.info(as_bytes_flat)
as_bytes = tf.reshape(as_bytes_flat, [shape[0], bytes_per_word])
return as_bytes
def load_vocab(vocab_file):
vocab = []
with codecs.getreader("utf-8")(tf.gfile.GFile(vocab_file, "rb")) as f:
vocab_size = 0
for word in f:
vocab_size += 1
vocab.append(word.strip())
return vocab, vocab_size
def check_vocab(vocab_file, out_dir, check_special_token=True, sos=None,
eos=None, unk=None):
"""Check if vocab_file doesn't exist, create from corpus_file."""
if tf.gfile.Exists(vocab_file):
utils.print_out("# Vocab file %s exists" % vocab_file)
vocab, vocab_size = load_vocab(vocab_file)
if check_special_token:
# Verify if the vocab starts with unk, sos, eos
# If not, prepend those tokens & generate a new vocab file
if not unk: unk = UNK
if not sos: sos = SOS
if not eos: eos = EOS
assert len(vocab) >= 3
if vocab[0] != unk or vocab[1] != sos or vocab[2] != eos:
utils.print_out("The first 3 vocab words [%s, %s, %s]"
" are not [%s, %s, %s]" %
(vocab[0], vocab[1], vocab[2], unk, sos, eos))
vocab = [unk, sos, eos] + vocab
vocab_size += 3
new_vocab_file = os.path.join(out_dir, os.path.basename(vocab_file))
with codecs.getwriter("utf-8")(
tf.gfile.GFile(new_vocab_file, "wb")) as f:
for word in vocab:
f.write("%s\n" % word)
vocab_file = new_vocab_file
else:
raise ValueError("vocab_file '%s' does not exist." % vocab_file)
vocab_size = len(vocab)
return vocab_size, vocab_file
def create_vocab_tables(src_vocab_file):
"""Creates vocab tables for src_vocab_file and tgt_vocab_file."""
src_vocab_table = lookup_ops.index_table_from_file(
src_vocab_file, default_value=UNK_ID)
tgt_vocab_table = src_vocab_table
return src_vocab_table, tgt_vocab_table
def load_embed_txt(embed_file):
"""Load embed_file into a python dictionary.
Note: the embed_file should be a Glove/word2vec formatted txt file. Assuming
Here is an exampe assuming embed_size=5:
the -0.071549 0.093459 0.023738 -0.090339 0.056123
to 0.57346 0.5417 -0.23477 -0.3624 0.4037
and 0.20327 0.47348 0.050877 0.002103 0.060547
For word2vec format, the first line will be: <num_words> <emb_size>.
Args:
embed_file: file path to the embedding file.
Returns:
a dictionary that maps word to vector, and the size of embedding dimensions.
"""
emb_dict = dict()
emb_size = None
is_first_line = True
with codecs.getreader("utf-8")(tf.gfile.GFile(embed_file, "rb")) as f:
for line in f:
tokens = line.rstrip().split(" ")
if is_first_line:
is_first_line = False
if len(tokens) == 2: # header line
emb_size = int(tokens[1])
continue
word = tokens[0]
vec = list(map(float, tokens[1:]))
emb_dict[word] = vec
if emb_size:
if emb_size != len(vec):
utils.print_out(
"Ignoring %s since embeding size is inconsistent." % word)
del emb_dict[word]
else:
emb_size = len(vec)
return emb_dict, emb_size
| mlperf/training_results_v0.7 | Google/benchmarks/gnmt/implementations/gnmt-research-TF-tpu-v4-512/utils/vocab_utils.py | Python | apache-2.0 | 6,145 |
#!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Cohorte Debug REST API
:authors: Bassem Debbabi
:copyright: Copyright 2015, isandlaTech
:license: Apache Software License 2.0
"""
# iPOPO decorators
from pelix.ipopo.decorators import ComponentFactory, Provides, Property, Instantiate, \
Validate, Invalidate, Requires, RequiresMap, Bind, BindField, UnbindField
import pelix.remote
# Herald
import herald
import herald.beans as beans
# Cohorte
import cohorte.composer
import cohorte.monitor
# Standard library
import logging
import threading
import json, time, os
try:
# Python 3
import urllib.parse as urlparse
except ImportError:
# Python 2
import urlparse
# cohorte plutform debug agent and api
import debug
_logger = logging.getLogger("debug.debug")
# collecting information
SUBJECT_GET_HTTP = "cohorte/shell/agent/get_http"
# API path
DEBUG_REST_API_PATH = "debug/api/v1"
# API Version
DEBUG_REST_API_VERSION = "v1"
# VERSION
COHORTE_VERSION = "1.0.1"
@ComponentFactory("cohorte-debug-api-factory")
@Provides(['pelix.http.servlet'])
@Property('_path', 'pelix.http.path', "/debug")\
@Requires("_agent", debug.SERVICE_DEBUG)
# Consume a single Herald Directory service
@Requires("_directory", herald.SERVICE_DIRECTORY)
@Requires('_herald', herald.SERVICE_HERALD)
# Consume an Isolate Composer service
@RequiresMap("_icomposers", cohorte.composer.SERVICE_COMPOSER_ISOLATE, 'endpoint.framework.uuid',
optional=True, allow_none=False)
@Requires("_icomposerlocal", cohorte.composer.SERVICE_COMPOSER_ISOLATE,
optional=True, spec_filter="(!(service.imported=*))")
@Requires("_isolates", cohorte.composer.SERVICE_COMPOSER_ISOLATE, aggregate=True, optional=True)
@Property('_reject', pelix.remote.PROP_EXPORT_REJECT, ['pelix.http.servlet', herald.SERVICE_DIRECTORY_LISTENER])
@Instantiate('cohorte-debug-api')
class DebugAPI(object):
"""
A Component that provides the REST Admin API
"""
def __init__(self):
# lock
self._lock = threading.Lock()
# servlet's path
self._path = None
# cohorte platform debug agent
self._agent = None
# herald directory service
self._directory = None
self._herald = None
# isolate composer service
self._icomposers = {}
self._icomposerlocal = None
self._isolates = []
# List of platform activities
self._platform_activities = []
self._platform_activities_index = 0
# a Map of last updated lists
self._last_updates = {}
time_now = time.time()
self._last_updates["nodes"] = time_now
self._last_updates["platform_activities"] = time_now
def decrypt_request(self, request, action="GET"):
"""
Decrypts the request and extracts these information:
:return path: full path without host:port (first and last / are removed)
:return parts: list of query parts
:return in_data: json object of the associated request data
"""
o = urlparse.urlparse(request.get_path())
path = o.path
query = o.query
# prepare query path: remove first and last '/' if exists
if path[0] == '/':
path = path[1:]
if path[-1] == '/':
path = path[:-1]
parts = str(path).split('/')
in_data = None
if action == "GET":
in_data = urlparse.parse_qs(query, keep_blank_values=True)
else:
data = request.read_data()
if data != None:
in_data = json.loads(str(data))
else:
in_data = urlparse.parse_qs(query, keep_blank_values=True)
#print(json.dumps(in_data, sort_keys=False, indent=4, separators=(',', ': ')))
return (path, parts, in_data)
def prepare_response(self, request, action):
data = {"meta": {}}
data["meta"]["status"] = 200
data["meta"]["msg"] = "OK"
data["meta"]["api-version"] = DEBUG_REST_API_VERSION
data["meta"]["api-method"] = ""
data["meta"]["cohorte-version"] = COHORTE_VERSION
data["meta"]["request-path"] = request.get_path()
data["meta"]["request-method"] = action
data["meta"]["duration"] = 0.0
return data
def send_json(self, data, response):
result = json.dumps(data, sort_keys=False,
indent=4, separators=(',', ': '))
response.send_content(data["meta"]["status"], result, "application/json")
def send_text(self, data, response, status):
response.send_content(status, data, "text/plain")
def bad_request(self, request, response, in_data, out_data):
out_data["meta"]["status"] = 400
out_data["meta"]["msg"] = "BAD REQUEST"
"""
GET actions ========================================================================
"""
def get_api_info(self, request, response, in_data, out_data):
out_data["api"] = {"name": "debug"}
def get_isolates(self, request, response, in_data, out_data):
out_data["isolates"] = []
lp = self._directory.get_local_peer()
out_data["isolates"].append({"uid": lp.uid, "name": lp.name,
"node_uid": lp.node_uid, "node_name": lp.node_name})
count = 1
for p in self._directory.get_peers():
out_data["isolates"].append({"uid": p.uid, "name": p.name,
"node_uid": p.node_uid, "node_name": p.node_name})
count += 1
out_data["meta"]["count"] = count
def get_isolate(self, request, response, in_data, out_data, uuid):
out_data["isolate"] = self._get_isolate_detail(uuid)
def get_isolate_bundles(self, request, response, in_data, out_data, uuid):
out_data["isolate"] = {"uuid" : uuid}
bundles = self._get_isolate_bundles(uuid)
out_data["bundles"] = bundles
if bundles is not None:
count = len(bundles)
else:
count = 0
out_data["meta"]["count"] = count
def get_bundle_detail(self, request, response, in_data, out_data, isolate_uuid, bundle_id):
out_data["isolate"] = {"uuid" : isolate_uuid}
out_data["bundle"] = {}
out_data["bundle"] = self._get_bundle_detail(isolate_uuid, bundle_id)
def get_isolate_factories(self, request, response, in_data, out_data, uuid):
out_data["isolate"] = {"uuid" : uuid}
factories = self._get_isolate_factories(uuid)
out_data["factories"] = factories
if factories is not None:
count = len(factories)
else:
count = 0
out_data["meta"]["count"] = count
def get_factory_detail(self, request, response, in_data, out_data, isolate_uuid, factory_name):
out_data["isolate"] = {"uuid" : isolate_uuid}
out_data["factory"] = {}
out_data["factory"] = self._get_factory_detail(isolate_uuid, factory_name)
def get_isolate_instances(self, request, response, in_data, out_data, uuid):
out_data["isolate"] = {"uuid" : uuid}
instances = self._get_isolate_instances(uuid)
out_data["instances"] = instances
if instances is not None:
count = len(instances)
else:
count = 0
out_data["meta"]["count"] = count
def get_instance_detail(self, request, response, in_data, out_data, isolate_uuid, instance_name):
out_data["isolate"] = {"uuid" : isolate_uuid}
out_data["instance"] = {}
out_data["instance"] = self._get_instance_detail(isolate_uuid, instance_name)
def get_isolate_services(self, request, response, in_data, out_data, uuid):
out_data["isolate"] = {"uuid" : uuid}
services = self._get_isolate_services(uuid)
out_data["services"] = services
if services is not None:
count = len(services)
else:
count = 0
out_data["meta"]["count"] = count
def get_isolate_threads(self, request, response, in_data, out_data, uuid):
out_data["isolate"] = {"uuid" : uuid}
threads = self._get_isolate_threads(uuid)
out_data["threads"] = threads
if threads is not None:
count = len(threads)
else:
count = 0
out_data["meta"]["count"] = count
def get_isolate_logs(self, request, response, in_data, out_data, uuid):
out_data["isolate"] = {"uuid" : uuid}
logs = self._get_isolate_logs(uuid)
out_data["logs"] = logs
if logs is not None:
count = len(logs)
else:
count = 0
out_data["meta"]["count"] = count
def get_isolate_log(self, request, response, in_data, out_data, isolate_uuid, log_id):
out_data["isolate"] = {"uuid" : isolate_uuid}
out_data["log"] = self._get_isolate_log(isolate_uuid, log_id)
"""
Internal agent methods ===========================================================================
"""
def _get_isolate_detail(self, uuid):
lp = self._directory.get_local_peer()
if lp.uid != uuid:
# this is another isolate
try:
msg = beans.Message(debug.agent.SUBJECT_GET_ISOLATE_DETAIL)
reply = self._herald.send(uuid, msg)
return reply.content
except KeyError:
return None
else:
# this is the local isolate
return self._agent.get_isolate_detail()
def _get_isolate_bundles(self, uuid):
lp = self._directory.get_local_peer()
if lp.uid != uuid:
# this is another isolate
msg = beans.Message(debug.agent.SUBJECT_GET_BUNDLES)
reply = self._herald.send(uuid, msg)
return reply.content
else:
# this is the local isolate
return self._agent.get_bundles()
def _get_bundle_detail(self, uuid, bundle_id):
lp = self._directory.get_local_peer()
if lp.uid != uuid:
# this is another isolate
msg = beans.Message(debug.agent.SUBJECT_GET_BUNDLE_DETAIL, bundle_id)
reply = self._herald.send(uuid, msg)
return reply.content
else:
# this is the local isolate
return self._agent.get_bundle_detail(bundle_id)
def _get_isolate_factories(self, uuid):
lp = self._directory.get_local_peer()
if lp.uid != uuid:
# this is another isolate
msg = beans.Message(debug.agent.SUBJECT_GET_FACTORIES)
reply = self._herald.send(uuid, msg)
return reply.content
else:
# this is the local isolate
return self._agent.get_factories()
def _get_factory_detail(self, uuid, factory_name):
lp = self._directory.get_local_peer()
if lp.uid != uuid:
# this is another isolate
msg = beans.Message(debug.agent.SUBJECT_GET_FACTORY_DETAIL, factory_name)
reply = self._herald.send(uuid, msg)
return reply.content
else:
# this is the local isolate
return self._agent.get_factory_detail(factory_name)
def _get_isolate_instances(self, uuid):
lp = self._directory.get_local_peer()
if lp.uid != uuid:
# this is another isolate
msg = beans.Message(debug.agent.SUBJECT_GET_INSTANCES)
reply = self._herald.send(uuid, msg)
return reply.content
else:
# this is the local isolate
return self._agent.get_instances()
def _get_instance_detail(self, uuid, instance_name):
lp = self._directory.get_local_peer()
if lp.uid != uuid:
# this is another isolate
msg = beans.Message(debug.agent.SUBJECT_GET_INSTANCE_DETAIL, instance_name)
reply = self._herald.send(uuid, msg)
return reply.content
else:
# this is the local isolate
return self._agent.get_instance_detail(instance_name)
def _get_isolate_services(self, uuid):
lp = self._directory.get_local_peer()
if lp.uid != uuid:
# this is another isolate
msg = beans.Message(debug.agent.SUBJECT_GET_SERVICES)
reply = self._herald.send(uuid, msg)
return reply.content
else:
# this is the local isolate
return self._agent.get_services()
def _get_isolate_threads(self, uuid):
lp = self._directory.get_local_peer()
if lp.uid != uuid:
# this is another isolate
msg = beans.Message(debug.agent.SUBJECT_GET_THREADS)
reply = self._herald.send(uuid, msg)
return reply.content
else:
# this is the local isolate
return self._agent.get_threads()
def _get_isolate_logs(self, uuid):
lp = self._directory.get_local_peer()
if lp.uid != uuid:
# this is another isolate
msg = beans.Message(debug.agent.SUBJECT_GET_ISOLATE_LOGS)
reply = self._herald.send(uuid, msg)
return reply.content
else:
# this is the local isolate
return self._agent.get_isolate_logs()
def _get_isolate_log(self, uuid, log_id):
lp = self._directory.get_local_peer()
if lp.uid != uuid:
# this is another isolate
msg = beans.Message(debug.agent.SUBJECT_GET_ISOLATE_LOG, log_id)
reply = self._herald.send(uuid, msg)
return reply.content
else:
# this is the local isolate
return self._agent.get_isolate_log(log_id)
"""
Servlet (url mapping to rest api) ================================================================
"""
def do_GET(self, request, response):
"""
Handle a GET
"""
path, parts, in_data = self.decrypt_request(request)
out_data = self.prepare_response(request, "GET")
if path.startswith(DEBUG_REST_API_PATH):
if path == DEBUG_REST_API_PATH:
out_data["meta"]["api-method"] = "get_api_info"
self.get_api_info(request, response, in_data, out_data)
elif path == DEBUG_REST_API_PATH + "/isolates":
out_data["meta"]["api-method"] = "get_isolates"
self.get_isolates(request, response, in_data, out_data)
elif len(parts) == 5:
if path == DEBUG_REST_API_PATH + "/isolates/" + parts[4]:
out_data["meta"]["api-method"] = "get_isolate"
self.get_isolate(request, response, in_data, out_data, parts[4])
else:
self.bad_request(request, response, in_data, out_data)
elif len(parts) == 6:
if path == DEBUG_REST_API_PATH + "/isolates/" + parts[4] + "/bundles":
out_data["meta"]["api-method"] = "get_isolate_bundles"
self.get_isolate_bundles(request, response, in_data, out_data, parts[4])
elif path == DEBUG_REST_API_PATH + "/isolates/" + parts[4] + "/factories":
out_data["meta"]["api-method"] = "get_isolate_factories"
self.get_isolate_factories(request, response, in_data, out_data, parts[4])
elif path == DEBUG_REST_API_PATH + "/isolates/" + parts[4] + "/instances":
out_data["meta"]["api-method"] = "get_isolate_instances"
self.get_isolate_instances(request, response, in_data, out_data, parts[4])
elif path == DEBUG_REST_API_PATH + "/isolates/" + parts[4] + "/services":
out_data["meta"]["api-method"] = "get_isolate_services"
self.get_isolate_services(request, response, in_data, out_data, parts[4])
elif path == DEBUG_REST_API_PATH + "/isolates/" + parts[4] + "/threads":
out_data["meta"]["api-method"] = "get_isolate_threads"
self.get_isolate_threads(request, response, in_data, out_data, parts[4])
elif path == DEBUG_REST_API_PATH + "/isolates/" + parts[4] + "/logs":
out_data["meta"]["api-method"] = "get_isolate_logs"
self.get_isolate_logs(request, response, in_data, out_data, parts[4])
elif len(parts) == 7:
if path == DEBUG_REST_API_PATH + "/isolates/" + parts[4] + "/bundles/" + parts[6]:
out_data["meta"]["api-method"] = "get_bundle_detail"
self.get_bundle_detail(request, response, in_data, out_data, parts[4], parts[6])
elif path == DEBUG_REST_API_PATH + "/isolates/" + parts[4] + "/factories/" + parts[6]:
out_data["meta"]["api-method"] = "get_factory_detail"
self.get_factory_detail(request, response, in_data, out_data, parts[4], parts[6])
elif path == DEBUG_REST_API_PATH + "/isolates/" + parts[4] + "/instances/" + parts[6]:
out_data["meta"]["api-method"] = "get_instance_detail"
self.get_instance_detail(request, response, in_data, out_data, parts[4], parts[6])
elif path == DEBUG_REST_API_PATH + "/isolates/" + parts[4] + "/logs/" + parts[6]:
if 'raw' in in_data:
# send raw log
log = self._get_isolate_log(parts[4], parts[6])
self.send_text(log, response, 200)
else:
# send log within a json object data["log"]
out_data["meta"]["api-method"] = "get_isolate_log"
self.get_isolate_log(request, response, in_data, out_data, parts[4], parts[6])
else:
self.bad_request(request, response, in_data, out_data)
else:
self.bad_request(request, response, in_data, out_data)
self.send_json(out_data, response)
"""
iPOPO STUFF --------------------------------------------------------------------------------------------------------
"""
@Validate
def validate(self, context):
_logger.info("Debug REST API validated")
self._context = context
@Invalidate
def invalidate(self, context):
_logger.info("Debug REST API invalidated")
def bound_to(self, path, params):
"""
Servlet bound to a path
"""
_logger.info('Bound to ' + path)
return True
def unbound_from(self, path, params):
"""
Servlet unbound from a path
"""
_logger.info('Unbound from ' + path)
return None | gattazr/cohorte-platforms | repo/debug/api.py | Python | apache-2.0 | 19,258 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
import re
from unittest import mock
from stestr import selection
from stestr.tests import base
class TestSelection(base.TestCase):
def test_filter_tests_no_filter(self):
test_list = ['a', 'b', 'c']
result = selection.filter_tests(None, test_list)
self.assertEqual(test_list, result)
def test_filter_tests(self):
test_list = ['a', 'b', 'c']
result = selection.filter_tests(['a'], test_list)
self.assertEqual(['a'], result)
def test_filter_invalid_regex(self):
test_list = ['a', 'b', 'c']
with mock.patch('sys.exit', side_effect=ImportError) as mock_exit:
self.assertRaises(ImportError, selection.filter_tests,
['fake_regex_with_bad_part[The-BAD-part]'],
test_list)
mock_exit.assert_called_once_with(5)
class TestExclusionReader(base.TestCase):
def test_exclusion_reader(self):
exclude_list = io.StringIO()
for i in range(4):
exclude_list.write('fake_regex_%s\n' % i)
exclude_list.write('fake_regex_with_note_%s # note\n' % i)
exclude_list.seek(0)
with mock.patch('builtins.open',
return_value=exclude_list):
result = selection.exclusion_reader('fake_path')
self.assertEqual(2 * 4, len(result))
note_cnt = 0
# not assuming ordering, mainly just testing the type
for r in result:
self.assertEqual(r[2], [])
if r[1] == 'note':
note_cnt += 1
self.assertIn('search', dir(r[0])) # like a compiled regexp
self.assertEqual(note_cnt, 4)
def test_invalid_regex(self):
exclude_list = io.StringIO()
exclude_list.write("fake_regex_with_bad_part[The-BAD-part]")
exclude_list.seek(0)
with mock.patch('builtins.open',
return_value=exclude_list):
with mock.patch('sys.exit') as mock_exit:
selection.exclusion_reader('fake_path')
mock_exit.assert_called_once_with(5)
class TestConstructList(base.TestCase):
def test_simple_re(self):
test_lists = ['fake_test(scen)[tag,bar])', 'fake_test(scen)[egg,foo])']
result = selection.construct_list(test_lists, regexes=['foo'])
self.assertEqual(list(result), ['fake_test(scen)[egg,foo])'])
def test_simple_exclusion_re(self):
test_lists = ['fake_test(scen)[tag,bar])', 'fake_test(scen)[egg,foo])']
result = selection.construct_list(test_lists, exclude_regex='foo')
self.assertEqual(list(result), ['fake_test(scen)[tag,bar])'])
def test_invalid_exclusion_re(self):
test_lists = ['fake_test(scen)[tag,bar])', 'fake_test(scen)[egg,foo])']
invalid_regex = "fake_regex_with_bad_part[The-BAD-part]"
with mock.patch('sys.exit', side_effect=ImportError) as exit_mock:
self.assertRaises(ImportError, selection.construct_list,
test_lists, exclude_regex=invalid_regex)
exit_mock.assert_called_once_with(5)
def test_exclusion_list(self):
exclude_list = [(re.compile('foo'), 'foo not liked', [])]
test_lists = ['fake_test(scen)[tag,bar])', 'fake_test(scen)[egg,foo])']
with mock.patch('stestr.selection.exclusion_reader',
return_value=exclude_list):
result = selection.construct_list(test_lists,
exclude_list='file',
regexes=['fake_test'])
self.assertEqual(list(result), ['fake_test(scen)[tag,bar])'])
def test_inclusion_list(self):
include_list = [re.compile('fake_test1'), re.compile('fake_test2')]
test_lists = ['fake_test1[tg]', 'fake_test2[tg]', 'fake_test3[tg]']
include_getter = 'stestr.selection._get_regex_from_include_list'
with mock.patch(include_getter,
return_value=include_list):
result = selection.construct_list(test_lists,
include_list='file')
self.assertEqual(set(result),
{'fake_test1[tg]', 'fake_test2[tg]'})
def test_inclusion_list_invalid_regex(self):
include_list = io.StringIO()
include_list.write("fake_regex_with_bad_part[The-BAD-part]")
include_list.seek(0)
with mock.patch('builtins.open',
return_value=include_list):
with mock.patch('sys.exit') as mock_exit:
selection._get_regex_from_include_list('fake_path')
mock_exit.assert_called_once_with(5)
def test_inclusion_exclusion_list_re(self):
include_list = [re.compile('fake_test1'), re.compile('fake_test2')]
test_lists = ['fake_test1[tg]', 'fake_test2[spam]',
'fake_test3[tg,foo]', 'fake_test4[spam]']
exclude_list = [(re.compile('spam'), 'spam not liked', [])]
include_getter = 'stestr.selection._get_regex_from_include_list'
with mock.patch(include_getter,
return_value=include_list):
with mock.patch('stestr.selection.exclusion_reader',
return_value=exclude_list):
result = selection.construct_list(
test_lists, exclude_list='exclude_file',
include_list='include_file', regexes=['foo'])
self.assertEqual(set(result),
{'fake_test1[tg]', 'fake_test3[tg,foo]'})
def test_overlapping_exclude_regex(self):
exclude_list = [(re.compile('compute.test_keypairs.KeypairsTestV210'),
'', []),
(re.compile('compute.test_keypairs.KeypairsTestV21'),
'', [])]
test_lists = [
'compute.test_keypairs.KeypairsTestV210.test_create_keypair',
'compute.test_keypairs.KeypairsTestV21.test_create_keypair',
'compute.test_fake.FakeTest.test_fake_test']
with mock.patch('stestr.selection.exclusion_reader',
return_value=exclude_list):
result = selection.construct_list(test_lists,
exclude_list='file',
regexes=['fake_test'])
self.assertEqual(
list(result), ['compute.test_fake.FakeTest.test_fake_test'])
| mtreinish/stestr | stestr/tests/test_selection.py | Python | apache-2.0 | 7,058 |
#!/usr/bin/env python
"""
Script to update the current edX iOS App with different names, resources, etc.
Requirements:
pip install pyyaml
"""
import argparse
import logging
import os
import shutil
import subprocess
import sys
import yaml
class WhitelabelApp:
"""
Update the current edX iOS App using configured resources and properties.
"""
EXAMPLE_CONFIG_FILE = """
---
# Notes:
# * All configuration items are optional.
# * Use absolute paths if the property is not relative to the config_dir (or project_dir, for project_* properties).
# Path to your overridden project properties file, which may list your private project config files.
properties: 'edx.properties'
# Path to the Resources to override. Omit to copy no resources.
resources: 'Resources'
# List of patch files to apply to the source. Omit to apply no patches.
patches:
- patches/0001_update_text.patch
- patches/0001_version.patch
# Update the iOS app properties (plist file):
plist:
CFBundleName: 'MySchoolApp'
CFBundleDisplayName: 'MySchoolApp'
CFBundleSpokenName: 'My School App'
FacebookDisplayName: 'MySchoolApp'
CFBundleVersion: 2.6.1.6
CFBundleIconFiles: !!null # using null deletes the property.
CFBundleIcons: !!null
CFBundleIcons~ipad: !!null
# Path to the base dir containing your properties_file and resources dir.
# Defaults to the dir containing config file passed to this script.
config_dir: '/path/to/your/config/'
# You probably don't need to provide anything below this line.
# Defaults are as shown.
# Base dir of the project to update.
project_dir: '.'
# All project_ paths below can be relative to the project_dir
# Path to the application's plist file
project_plist: 'Source/edX-Info.plist'
# Path to the project's Resources dir
project_resources: 'Source/Resources'
# Path to the OSX utility command, PlistBuddy
plist_buddy = '/usr/libexec/PlistBuddy'
# Path to git
git_command = '/usr/bin/env git'
"""
def __init__(self, **kwargs):
# Config-relative paths
self.config_dir = kwargs.get('config_dir')
if not self.config_dir:
self.config_dir = '.'
# Assume that these paths are relative to config_dir.
# (If 'properties' is absolute, then it will be unchanged by the path join)
self.resources = kwargs.get('resources')
if self.resources:
self.resources = os.path.join(self.config_dir, self.resources)
self.patches = []
for patchfile in kwargs.get('patches', []):
self.patches.append(os.path.join(self.config_dir, patchfile))
# Project-relative paths
self.project_dir = kwargs.get('project_dir')
if not self.project_dir:
self.project_dir = '.'
self.project_resources = kwargs.get('project_resources')
if not self.project_resources:
self.project_resources = os.path.join(self.project_dir, 'Source', 'Resources')
self.project_properties = kwargs.get('properties')
if self.project_properties:
self.project_properties = os.path.join(self.project_dir, self.project_properties)
self.project_plist = kwargs.get('project_plist')
if not self.project_plist:
self.project_plist = os.path.join(self.project_dir, 'Source', 'edX-Info.plist')
self.plist = kwargs.get('plist', {})
self.plist_buddy = kwargs.get('plist_buddy')
if not self.plist_buddy:
self.plist_buddy = '/usr/libexec/PlistBuddy'
self.git_command = kwargs.get('git_command')
if not self.git_command:
self.git_command = '/usr/bin/env git'
def whitelabel(self):
"""
Update the properties, resources, and configuration of the current app.
"""
if self.apply_patches():
self.create_project_properties()
self.update_plist()
self.copy_resources()
else:
logging.error("Update aborted until patches are repaired.")
def create_project_properties(self):
"""
Create a project .properties file that points to the config_dir file, if configured.
"""
if self.project_properties and self.config_dir:
logging.info("Creating %s", self.project_properties)
content = "edx.dir = '{config_dir}'"
with open(self.project_properties, 'w') as f:
f.write(content.format(config_dir=self.config_dir))
else:
logging.debug("Not creating %s, properties or config_dir not set", self.project_properties)
def update_plist(self):
"""
Update the app's plist file.
"""
for name, value in self.plist.items():
if self._update_plist(name, value):
logging.info("Updated %s: %s=%s", self.project_plist, name, value)
else:
logging.error("Error updating %s: %s=%s", self.project_plist, name, value)
def copy_resources(self):
"""
Copy over the existing resources files.
"""
if self.resources:
self._copytree(self.resources, self.project_resources)
else:
logging.debug("No resources to copy to %s", self.project_resources)
def apply_patches(self):
"""
Apply the given patches to the project source.
"""
git_error = False
for reference in self.patches:
if git_error:
logging.error(" %s", reference)
elif not self._apply_patch(reference):
git_error = True
logging.error("Issue detected while applying patch %s. "
"Please fix the issue and manually apply the remaining patches:", reference)
if not self.patches:
logging.debug("No patches to apply")
return not git_error
def _copytree(self, src, dst, symlinks=False, ignore=None):
"""
Recursively copy the files and dirs from src to dst.
We can't use os.path.copytree here, because it balks if dst exists.
"""
if not os.path.exists(dst):
os.makedirs(dst)
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
self._copytree(s, d, symlinks, ignore)
else:
logging.info("Copying %s to %s", s, d)
shutil.copy2(s, d)
def _update_plist(self, name, value):
"""Update the app .plist file using PlistBuddy"""
cmd = 'Delete' if value is None else 'Set'
command = '{cmd} :{name} {value}'.format(cmd=cmd, name=name, value=value)
call_args = self.plist_buddy.split(' ') + ['-c', command, self.project_plist]
return self._system_command(call_args)
def _apply_patch(self, filename):
"""Apply the given patch using a 3-way merge."""
call_args = self.git_command.split(' ') + ['apply', '--3way', filename]
return self._system_command(call_args)
@staticmethod
def _system_command(call_args):
"""Make the given subprocess call, and pipe output/errors to logger."""
logging.debug("System call: %s", " ".join(call_args))
process = subprocess.Popen(call_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(output, error) = process.communicate()
if output:
logging.info(output)
if error:
logging.error(error)
return process.returncode == 0
def main():
"""
Parse the command line arguments, and pass them to WhitelabelApp.
"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--help-config-file', action='store_true', help="Print out a sample config-file, and exit")
parser.add_argument('--config-file', '-c', help="Path to the configuration file")
parser.add_argument('--verbose', '-v', action='count', help="Enable verbose logging. Repeat -v for more output.")
args = parser.parse_args()
if args.help_config_file:
print(WhitelabelApp.EXAMPLE_CONFIG_FILE)
sys.exit(0)
if not args.config_file:
parser.print_help()
sys.exit(1)
log_level = logging.WARN
if args.verbose > 0:
log_level = logging.INFO
if args.verbose > 1:
log_level = logging.DEBUG
logging.basicConfig(level=log_level)
with open(args.config_file) as f:
config = yaml.load(f) or {}
# Use the config_file's directory as the default config_dir
config.setdefault('config_dir', os.path.dirname(args.config_file))
whitelabeler = WhitelabelApp(**config)
whitelabeler.whitelabel()
if __name__ == "__main__":
main()
| proversity-org/edx-app-ios | script/whitelabel.py | Python | apache-2.0 | 8,765 |
"""The vimdoc parser."""
from vimdoc import codeline
from vimdoc import docline
from vimdoc import error
from vimdoc import regex
def IsComment(line):
return regex.comment_leader.match(line)
def IsContinuation(line):
return regex.line_continuation.match(line)
def StripContinuator(line):
assert regex.line_continuation.match(line)
return regex.line_continuation.sub('', line)
def EnumerateStripNewlinesAndJoinContinuations(lines):
"""Preprocesses the lines of a vimscript file.
Enumerates the lines, strips the newlines from the end, and joins the
continuations.
Args:
lines: The lines of the file.
Yields:
Each preprocessed line.
"""
lineno, cached = (None, None)
for i, line in enumerate(lines):
line = line.rstrip('\n')
if IsContinuation(line):
if cached is None:
raise error.CannotContinue('No preceding line.', i)
elif IsComment(cached) and not IsComment(line):
raise error.CannotContinue('No comment to continue.', i)
else:
cached += StripContinuator(line)
continue
if cached is not None:
yield lineno, cached
lineno, cached = (i, line)
if cached is not None:
yield lineno, cached
def EnumerateParsedLines(lines):
vimdoc_mode = False
for i, line in EnumerateStripNewlinesAndJoinContinuations(lines):
if not vimdoc_mode:
if regex.vimdoc_leader.match(line):
vimdoc_mode = True
# There's no need to yield the blank line if it's an empty starter line.
# For example, in:
# ""
# " @usage whatever
# " description
# There's no need to yield the first docline as a blank.
if not regex.empty_vimdoc_leader.match(line):
# A starter line starts with two comment leaders.
# If we strip one of them it's a normal comment line.
yield i, ParseCommentLine(regex.comment_leader.sub('', line))
elif IsComment(line):
yield i, ParseCommentLine(line)
else:
vimdoc_mode = False
yield i, ParseCodeLine(line)
def ParseCodeLine(line):
"""Parses one line of code and creates the appropriate CodeLine."""
if regex.blank_code_line.match(line):
return codeline.Blank()
fmatch = regex.function_line.match(line)
if fmatch:
namespace, name, args = fmatch.groups()
return codeline.Function(name, namespace, regex.function_arg.findall(args))
cmatch = regex.command_line.match(line)
if cmatch:
args, name = cmatch.groups()
flags = {
'bang': '-bang' in args,
'range': '-range' in args,
'count': '-count' in args,
'register': '-register' in args,
'buffer': '-buffer' in args,
'bar': '-bar' in args,
}
return codeline.Command(name, **flags)
smatch = regex.setting_line.match(line)
if smatch:
name, = smatch.groups()
return codeline.Setting('g:' + name)
flagmatch = regex.flag_line.match(line)
if flagmatch:
a, b, default = flagmatch.groups()
return codeline.Flag(a or b, default)
return codeline.Unrecognized(line)
def ParseCommentLine(line):
"""Parses one line of documentation and creates the appropriate DocLine."""
block = regex.block_directive.match(line)
if block:
return ParseBlockDirective(*block.groups())
return docline.Text(regex.comment_leader.sub('', line))
def ParseBlockDirective(name, rest):
if name in docline.BLOCK_DIRECTIVES:
try:
return docline.BLOCK_DIRECTIVES[name](rest)
except ValueError:
raise error.InvalidBlockArgs(rest)
raise error.UnrecognizedBlockDirective(name)
def ParseBlocks(lines, filename):
blocks = []
selection = []
lineno = 0
try:
for lineno, line in EnumerateParsedLines(lines):
for block in line.Affect(blocks, selection):
yield block.Close()
for block in codeline.EndOfFile().Affect(blocks, selection):
yield block.Close()
except error.ParseError as e:
e.lineno = lineno + 1
e.filename = filename
raise
| google/vimdoc | vimdoc/parser.py | Python | apache-2.0 | 3,977 |
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import re
import requests
from pants.subsystem.subsystem import Subsystem
from pants.util.memo import memoized_method
from pants.contrib.go.subsystems.imported_repo import ImportedRepo
class GoImportMetaTagReader(Subsystem):
"""Implements a reader for the <meta name="go-import"> protocol.
See https://golang.org/cmd/go/#hdr-Remote_import_paths .
"""
options_scope = "go-import-metatag-reader"
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--retries",
type=int,
default=1,
advanced=True,
help="How many times to retry when fetching meta tags.",
)
_META_IMPORT_REGEX = re.compile(
r"""
<meta
\s+
name=['"]go-import['"]
\s+
content=['"](?P<root>[^\s]+)\s+(?P<vcs>[^\s]+)\s+(?P<url>[^\s]+)['"]
\s*
/?>""",
flags=re.VERBOSE,
)
@classmethod
def find_meta_tags(cls, page_html):
"""Returns the content of the meta tag if found inside of the provided HTML."""
return cls._META_IMPORT_REGEX.findall(page_html)
@memoized_method
def get_imported_repo(self, import_path):
"""Looks for a go-import meta tag for the provided import_path.
Returns an ImportedRepo instance with the information in the meta tag, or None if no go-
import meta tag is found.
"""
try:
session = requests.session()
# TODO: Support https with (optional) fallback to http, as Go does.
# See https://github.com/pantsbuild/pants/issues/3503.
session.mount(
"http://", requests.adapters.HTTPAdapter(max_retries=self.get_options().retries)
)
page_data = session.get(f"http://{import_path}?go-get=1")
except requests.ConnectionError:
return None
if not page_data:
return None
# Return the first match, rather than doing some kind of longest prefix search.
# Hopefully no one returns multiple valid go-import meta tags.
for (root, vcs, url) in self.find_meta_tags(page_data.text):
if root and vcs and url:
# Check to make sure returned root is an exact match to the provided import path. If it is
# not then run a recursive check on the returned and return the values provided by that call.
if root == import_path:
return ImportedRepo(root, vcs, url)
elif import_path.startswith(root):
return self.get_imported_repo(root)
return None
| tdyas/pants | contrib/go/src/python/pants/contrib/go/subsystems/go_import_meta_tag_reader.py | Python | apache-2.0 | 2,841 |
''' Example dangerous usage of urllib[2] opener functions
The urllib and urllib2 opener functions and object can open http, ftp,
and file urls. Often, the ability to open file urls is overlooked leading
to code that can unexpectedly open files on the local server. This
could be used by an attacker to leak information about the server.
'''
import urllib
import urllib2
# Python 3
import urllib.request
# Six
import six
def test_urlopen():
# urllib
url = urllib.quote('file:///bin/ls')
urllib.urlopen(url, 'blah', 32)
urllib.urlretrieve('file:///bin/ls', '/bin/ls2')
opener = urllib.URLopener()
opener.open('file:///bin/ls')
opener.retrieve('file:///bin/ls')
opener = urllib.FancyURLopener()
opener.open('file:///bin/ls')
opener.retrieve('file:///bin/ls')
# urllib2
handler = urllib2.HTTPBasicAuthHandler()
handler.add_password(realm='test',
uri='http://mysite.com',
user='bob')
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
urllib2.urlopen('file:///bin/ls')
urllib2.Request('file:///bin/ls')
# Python 3
urllib.request.urlopen('file:///bin/ls')
urllib.request.urlretrieve('file:///bin/ls', '/bin/ls2')
opener = urllib.request.URLopener()
opener.open('file:///bin/ls')
opener.retrieve('file:///bin/ls')
opener = urllib.request.FancyURLopener()
opener.open('file:///bin/ls')
opener.retrieve('file:///bin/ls')
# Six
six.moves.urllib.request.urlopen('file:///bin/ls')
six.moves.urllib.request.urlretrieve('file:///bin/ls', '/bin/ls2')
opener = six.moves.urllib.request.URLopener()
opener.open('file:///bin/ls')
opener.retrieve('file:///bin/ls')
opener = six.moves.urllib.request.FancyURLopener()
opener.open('file:///bin/ls')
opener.retrieve('file:///bin/ls')
| pombredanne/bandit | examples/urlopen.py | Python | apache-2.0 | 1,879 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteSink
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-logging
# [START logging_v2_generated_ConfigServiceV2_DeleteSink_sync]
from google.cloud import logging_v2
def sample_delete_sink():
# Create a client
client = logging_v2.ConfigServiceV2Client()
# Initialize request argument(s)
request = logging_v2.DeleteSinkRequest(
sink_name="sink_name_value",
)
# Make the request
client.delete_sink(request=request)
# [END logging_v2_generated_ConfigServiceV2_DeleteSink_sync]
| googleapis/gapic-generator-python | tests/integration/goldens/logging/samples/generated_samples/logging_v2_generated_config_service_v2_delete_sink_sync.py | Python | apache-2.0 | 1,381 |
"""Helper functions for an SVN build."""
from __future__ import absolute_import
from __future__ import print_function
import re
import os
import errno
from .constants import SVN_ROOT, SVN_REDHAT_PATH, SVN_RESTRICTED_BRANCHES, KOJI_RESTRICTED_TARGETS
from .error import Error, SVNError, UsageError
from . import utils
def is_svn(package_dir):
"""Determine whether a given directory is part of an SVN repo."""
# If package_dir is a URL, not a directory, then we can't cd into it to
# check. Assume True for now.
if utils.is_url(package_dir):
return True
# TODO: Allow specifying a git URL to build from.
pwd = os.getcwd()
try:
try:
os.chdir(package_dir)
except OSError as ose:
if ose.errno == errno.ENOENT:
raise Error("%s is not a valid package directory\n(%s)" % (package_dir, ose))
command = ["svn", "info"]
try:
err = utils.sbacktick(command, err2out=True)[1]
except OSError as ose:
if ose.errno != errno.ENOENT:
raise
err = 1
if err:
return False
finally:
os.chdir(pwd)
return True
def is_uncommitted(package_dir):
"""Return True if there are uncommitted changes in the SVN working dir."""
if utils.is_url(package_dir):
return False
out, err = utils.sbacktick("svn status -q " + package_dir, err2out=True)
if err:
raise SVNError("Exit code %d getting SVN status. Output:\n%s" % (err, out))
if out:
print("The following uncommitted changes exist:")
print(out)
return True
else:
return False
def is_outdated(package_dir):
"""Return True if the package has been changed since the revision in the
SVN working dir.
"""
if utils.is_url(package_dir):
return False
out, err = utils.sbacktick("svn status -u -q " + package_dir)
if err:
raise SVNError("Exit code %d getting SVN status. Output:\n%s" % (err, out))
outdated_files = []
for line in out.split("\n"):
try:
outdated_flag = line[8]
except IndexError:
continue
if outdated_flag == "*":
outdated_files.append(line)
if outdated_files:
print("The following outdated files exist:")
print("\n".join(outdated_files))
return True
else:
return False
def verify_working_dir(pkg):
"""Verify if a package working directory has uncommitted changes or is
outdated and ask the user what to do. Return True if it's ok to continue.
"""
if is_uncommitted(pkg):
if not utils.ask_yn("""\
Package working directory %s has uncommitted changes that will not be included
in the SVN build.
Continue (yes/no)?""" % pkg):
return False
if is_outdated(pkg):
if not utils.ask_yn("""\
Package working directory %s is out of date and its contents may not reflect
what will be built.
Continue (yes/no)?""" % pkg):
return False
return True
def verify_package_info(package_info):
"""Check if package_info points to a valid package dir (i.e. contains
at least an osg/ dir or an upstream/ dir).
"""
url = package_info['canon_url']
rev = package_info['revision']
command = ["svn", "ls", url, "-r", rev]
out, err = utils.sbacktick(command, err2out=True)
if err:
raise SVNError("Exit code %d getting SVN listing of %s (rev %s). Output:\n%s" % (err, url, rev, out))
for line in out.split("\n"):
if line.startswith('osg/') or line.startswith('upstream/'):
return True
return False
#
# Branch checking
#
# We need to forbid building from certain SVN branches into certain Koji
# targets. This is implemented by having two dicts mapping regexp patterns to
# names, one containing the restricted SVN branches and one containing the
# restricted Koji targets.
#
# We're permissive by default: if neither the branch nor the target match any
# of the regexps in their respective dicts, the build is allowed. On the other
# hand, if both are restricted then the branch name has to match the target
# name.
#
def is_restricted_branch(branch):
"""branch is an SVN branch such as 'trunk' or 'branches/osg-3.1'.
Assumes no extra characters on either side (no 'native/redhat/trunk' or
'trunk/gums')
"""
for pattern in SVN_RESTRICTED_BRANCHES:
if re.search(pattern, branch):
return True
return False
def is_restricted_target(target):
"""target is a koji target such as 'el5-osg' or 'osg-3.1-el5'.
Assumes no extra characters on either side.
"""
for pattern in KOJI_RESTRICTED_TARGETS:
if re.search(pattern, target):
return True
return False
def restricted_branch_matches_target(branch, target):
"""Return True if the pattern that matches `branch` is associated with the
same name (e.g. 'devops', 'main', 'upcoming', 'versioned') as the pattern that
matches `target`; False otherwise.
Special cases:
- if the name is 'versioned' (e.g. we're building from 'branches/osg-3.1') then the versions also have to match.
- treat 'main' (i.e. 'trunk') as 'versioned' with a version of '3.5'
- if the name is 'upcoming' (e.g. building from 'branches/3.6-upcoming') then the versions also have to match.
treat a missing version ('branches/upcoming') as '3.5'
Precondition: is_restricted_branch(branch) and is_restricted_target(target)
are True.
"""
branch_match = branch_name = target_match = target_name = None
for (branch_pattern, branch_name) in SVN_RESTRICTED_BRANCHES.items():
branch_match = re.search(branch_pattern, branch)
if branch_match:
break
assert branch_match, \
"No SVN_RESTRICTED_BRANCHES pattern matching %s -- is_restricted_branch() should have caught this" % branch
for (target_pattern, target_name) in KOJI_RESTRICTED_TARGETS.items():
target_match = re.search(target_pattern, target)
if target_match:
break
assert target_match, \
"No KOJI_RESTRICTED_TARGETS pattern matching %s -- is_restricted_target() should have caught this" % target
# At this point branch_name should be one of the values (right-hand side) of
# SVN_RESTRICTED_BRANCHES, and target_name should be one of the values of
# KOJI_RESTRICTED_TARGETS.
# These might have OSG version numbers ("3.5") in them; make sure they match
branch_osgver = branch_match.groupdict().get("osgver", None)
target_osgver = target_match.groupdict().get("osgver", None)
# Deal with "main" (i.e. the "trunk" branch or the "osg-elX" targets), which are aliases for "3.5"
if branch_name == "main":
branch_name = "versioned"
branch_osgver = "3.5"
if target_name == "main":
target_name = "versioned"
target_osgver = "3.5"
# Deal with "upcoming", which is the same as "3.5-upcoming"
if branch_name == "upcoming":
branch_osgver = branch_osgver or "3.5"
if target_name == "upcoming":
target_osgver = target_osgver or "3.5"
# branch_osgver and target_osgver might be None, e.g. for devops but that's OK
return (branch_name == target_name) and (branch_osgver == target_osgver)
def verify_correct_branch(package_dir, buildopts):
"""Check that the user is not trying to build with bad branch/target
combinations. For example, building from trunk into upcoming, or building
from osg-3.1 into osg-3.2.
"""
package_info = get_package_info(package_dir)
url = package_info['canon_url']
branch_match = re.search(SVN_REDHAT_PATH + r'/(trunk|branches/[^/]+)/', url)
if not branch_match:
# Building from a weird path (such as a tag). Be permissive -- koji
# itself will catch building from outside SVN so we don't have to
return
branch = branch_match.group(1)
if not is_restricted_branch(branch):
# Developer branch -- any target ok
return
for dver in buildopts['enabled_dvers']:
target = buildopts['targetopts_by_dver'][dver]['koji_target']
if not is_restricted_target(target):
# Some custom target -- any branch ok
continue
if not restricted_branch_matches_target(branch, target):
raise SVNError("Forbidden to build from %s branch into %s target" % (branch, target))
def get_package_info(package_dir):
"""Return the svn info for a package dir."""
command = ["svn", "info", package_dir]
# If we don't specify the revision in the argument (e.g. no foo@19999)
# then explicitly specify HEAD to make sure we're not getting an older
# version.
if not re.search(r'@\d+$', package_dir):
command += ['-r', 'HEAD']
out, err = utils.sbacktick(command, err2out=True)
if err:
raise SVNError("Exit code %d getting SVN info. Output:\n%s" % (err, out))
info = dict()
for line in out.split("\n"):
label, value = line.strip().split(": ", 1)
label = label.strip().lower().replace(' ', '_')
info[label] = value
info['canon_url'] = re.sub("^" + re.escape(info['repository_root']), SVN_ROOT, info['url'])
return info
def koji(package_dir, koji_obj, buildopts):
"""koji task with an svn build."""
package_info = get_package_info(package_dir)
package_name = os.path.basename(package_info['canon_url'])
if not re.match(r"\w+", package_name): # sanity check
raise Error("Package directory '%s' gives invalid package name '%s'" % (package_dir, package_name))
if not verify_package_info(package_info):
raise UsageError("%s isn't a package directory "
"(must have either osg/ or upstream/ dirs or both)" % (package_dir))
if not buildopts.get('scratch'):
koji_obj.add_pkg(package_name)
return koji_obj.build_svn(package_info['canon_url'],
package_info['revision'])
| opensciencegrid/osg-build | osgbuild/svn.py | Python | apache-2.0 | 10,034 |
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Rupesh Tare <rupesht@saltstack.com>`
'''
# Import Python libs
from __future__ import absolute_import
import os.path
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
from salt.modules import linux_lvm
from salt.exceptions import CommandExecutionError
# Globals
linux_lvm.__salt__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class LinuxLVMTestCase(TestCase):
'''
TestCase for the salt.modules.linux_lvm module
'''
def test_version(self):
'''
Tests LVM version info from lvm version
'''
mock = MagicMock(return_value='Library version : 1')
with patch.dict(linux_lvm.__salt__, {'cmd.run': mock}):
self.assertEqual(linux_lvm.version(), '1')
def test_fullversion(self):
'''
Tests all version info from lvm version
'''
mock = MagicMock(return_value='Library version : 1')
with patch.dict(linux_lvm.__salt__, {'cmd.run': mock}):
self.assertDictEqual(linux_lvm.fullversion(),
{'Library version': '1'})
def test_pvdisplay(self):
'''
Tests information about the physical volume(s)
'''
mock = MagicMock(return_value={'retcode': 1})
with patch.dict(linux_lvm.__salt__, {'cmd.run_all': mock}):
self.assertDictEqual(linux_lvm.pvdisplay(), {})
mock = MagicMock(return_value={'retcode': 0,
'stdout': 'A:B:C:D:E:F:G:H:I:J:K'})
with patch.dict(linux_lvm.__salt__, {'cmd.run_all': mock}):
self.assertDictEqual(linux_lvm.pvdisplay(),
{'A': {'Allocated Physical Extents': 'K',
'Current Logical Volumes Here': 'G',
'Free Physical Extents': 'J',
'Internal Physical Volume Number': 'D',
'Physical Extent Size (kB)': 'H',
'Physical Volume (not) Allocatable': 'F',
'Physical Volume Device': 'A',
'Physical Volume Size (kB)': 'C',
'Physical Volume Status': 'E',
'Total Physical Extents': 'I',
'Volume Group Name': 'B'}})
mockpath = MagicMock(return_value='Z')
with patch.object(os.path, 'realpath', mockpath):
self.assertDictEqual(linux_lvm.pvdisplay(real=True),
{'Z': {'Allocated Physical Extents': 'K',
'Current Logical Volumes Here': 'G',
'Free Physical Extents': 'J',
'Internal Physical Volume Number': 'D',
'Physical Extent Size (kB)': 'H',
'Physical Volume (not) Allocatable': 'F',
'Physical Volume Device': 'A',
'Physical Volume Size (kB)': 'C',
'Physical Volume Status': 'E',
'Real Physical Volume Device': 'Z',
'Total Physical Extents': 'I',
'Volume Group Name': 'B'}})
def test_vgdisplay(self):
'''
Tests information about the volume group(s)
'''
mock = MagicMock(return_value={'retcode': 1})
with patch.dict(linux_lvm.__salt__, {'cmd.run_all': mock}):
self.assertDictEqual(linux_lvm.vgdisplay(), {})
mock = MagicMock(return_value={'retcode': 0,
'stdout': 'A:B:C:D:E:F:G:H:I:J:K:L:M:N:O:P:Q'})
with patch.dict(linux_lvm.__salt__, {'cmd.run_all': mock}):
self.assertDictEqual(linux_lvm.vgdisplay(),
{'A': {'Actual Physical Volumes': 'K',
'Allocated Physical Extents': 'O',
'Current Logical Volumes': 'F',
'Current Physical Volumes': 'J',
'Free Physical Extents': 'P',
'Internal Volume Group Number': 'D',
'Maximum Logical Volume Size': 'H',
'Maximum Logical Volumes': 'E',
'Maximum Physical Volumes': 'I',
'Open Logical Volumes': 'G',
'Physical Extent Size (kB)': 'M',
'Total Physical Extents': 'N',
'UUID': 'Q',
'Volume Group Access': 'B',
'Volume Group Name': 'A',
'Volume Group Size (kB)': 'L',
'Volume Group Status': 'C'}})
def test__lvdisplay(self):
'''
Return information about the logical volume(s)
'''
mock = MagicMock(return_value={'retcode': 1})
with patch.dict(linux_lvm.__salt__, {'cmd.run_all': mock}):
self.assertDictEqual(linux_lvm.lvdisplay(), {})
mock = MagicMock(return_value={'retcode': 0,
'stdout': 'A:B:C:D:E:F:G:H:I:J:K:L:M'})
with patch.dict(linux_lvm.__salt__, {'cmd.run_all': mock}):
self.assertDictEqual(linux_lvm.lvdisplay(),
{'A': {'Allocated Logical Extents': 'I',
'Allocation Policy': 'J',
'Current Logical Extents Associated': 'H',
'Internal Logical Volume Number': 'E',
'Logical Volume Access': 'C',
'Logical Volume Name': 'A',
'Logical Volume Size': 'G',
'Logical Volume Status': 'D',
'Major Device Number': 'L',
'Minor Device Number': 'M',
'Open Logical Volumes': 'F',
'Read Ahead Sectors': 'K',
'Volume Group Name': 'B'}})
def test_pvcreate(self):
'''
Tests for set a physical device to be used as an LVM physical volume
'''
self.assertEqual(linux_lvm.pvcreate(''),
'Error: at least one device is required')
self.assertRaises(CommandExecutionError, linux_lvm.pvcreate, 'A')
pvdisplay = MagicMock(return_value=True)
with patch('salt.modules.linux_lvm.pvdisplay', pvdisplay):
with patch.object(os.path, 'exists', return_value=True):
ret = {'stdout': 'saltines', 'stderr': 'cheese', 'retcode': 0, 'pid': '1337'}
mock = MagicMock(return_value=ret)
with patch.dict(linux_lvm.__salt__, {'cmd.run_all': mock}):
self.assertEqual(linux_lvm.pvcreate('A', metadatasize=1000), True)
def test_pvremove(self):
'''
Tests for remove a physical device being used as an LVM physical volume
'''
pvdisplay = MagicMock(return_value=False)
with patch('salt.modules.linux_lvm.pvdisplay', pvdisplay):
self.assertRaises(CommandExecutionError, linux_lvm.pvremove, 'A', override=False)
pvdisplay = MagicMock(return_value=False)
with patch('salt.modules.linux_lvm.pvdisplay', pvdisplay):
mock = MagicMock(return_value=True)
with patch.dict(linux_lvm.__salt__, {'lvm.pvdisplay': mock}):
ret = {'stdout': 'saltines', 'stderr': 'cheese', 'retcode': 0, 'pid': '1337'}
mock = MagicMock(return_value=ret)
with patch.dict(linux_lvm.__salt__, {'cmd.run_all': mock}):
self.assertEqual(linux_lvm.pvremove('A'), True)
def test_vgcreate(self):
'''
Tests create an LVM volume group
'''
self.assertEqual(linux_lvm.vgcreate('', ''),
'Error: vgname and device(s) are both required')
mock = MagicMock(return_value='A\nB')
with patch.dict(linux_lvm.__salt__, {'cmd.run': mock}):
with patch.object(linux_lvm, 'vgdisplay', return_value={}):
self.assertDictEqual(linux_lvm.vgcreate('A', 'B'),
{'Output from vgcreate': 'A'})
def test_vgextend(self):
'''
Tests add physical volumes to an LVM volume group
'''
self.assertEqual(linux_lvm.vgextend('', ''),
'Error: vgname and device(s) are both required')
mock = MagicMock(return_value='A\nB')
with patch.dict(linux_lvm.__salt__, {'cmd.run': mock}):
with patch.object(linux_lvm, 'vgdisplay', return_value={}):
self.assertDictEqual(linux_lvm.vgextend('A', 'B'),
{'Output from vgextend': 'A'})
def test_lvcreate(self):
'''
Test create a new logical volume, with option
for which physical volume to be used
'''
self.assertEqual(linux_lvm.lvcreate(None, None, 1, 1),
'Error: Please specify only one of size or extents')
self.assertEqual(linux_lvm.lvcreate(None, None, None, None),
'Error: Either size or extents must be specified')
mock = MagicMock(return_value='A\nB')
with patch.dict(linux_lvm.__salt__, {'cmd.run': mock}):
with patch.object(linux_lvm, 'lvdisplay', return_value={}):
self.assertDictEqual(linux_lvm.lvcreate(None, None, None, 1),
{'Output from lvcreate': 'A'})
def test_vgremove(self):
'''
Tests to remove an LVM volume group
'''
mock = MagicMock(return_value='A')
with patch.dict(linux_lvm.__salt__, {'cmd.run': mock}):
self.assertEqual(linux_lvm.vgremove('A'), 'A')
def test_lvremove(self):
'''
Test to remove a given existing logical volume
from a named existing volume group
'''
mock = MagicMock(return_value='A')
with patch.dict(linux_lvm.__salt__, {'cmd.run': mock}):
self.assertEqual(linux_lvm.lvremove('', ''), 'A')
def test_lvresize(self):
'''
Test to return information about the logical volume(s)
'''
mock = MagicMock(return_value={'retcode': 1})
with patch.dict(linux_lvm.__salt__, {'cmd.run_all': mock}):
self.assertDictEqual(linux_lvm.lvresize(1, 'a'), {})
mock = MagicMock(return_value={'retcode': 0})
with patch.dict(linux_lvm.__salt__, {'cmd.run_all': mock}):
self.assertDictEqual(linux_lvm.lvresize(1, 'a'), {})
if __name__ == '__main__':
from integration import run_tests
run_tests(LinuxLVMTestCase, needs_daemon=False)
| stephane-martin/salt-debian-packaging | salt-2016.3.3/tests/unit/modules/linux_lvm_test.py | Python | apache-2.0 | 11,687 |
# -*- coding: utf-8 -*-
'''
FanFilm Add-on
Copyright (C) 2016 mrknow
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse, json, base64
from resources.lib.libraries import cleantitle
from resources.lib.libraries import client
from resources.lib.libraries import control
from resources.lib import resolvers
class source:
def __init__(self):
self.base_link = 'http://segos.es'
self.search_link = '/?search=%s'
#self.episode_link = '-Season-%01d-Episode-%01d'
def get_movie(self, imdb, title, year):
try:
query = self.search_link % (urllib.quote_plus(cleantitle.query2(title)))
query = urlparse.urljoin(self.base_link, query)
result = client.request(query)
title = cleantitle.movie(title)
result = client.parseDOM(result, 'div', attrs={'style':'overflow: hidden; margin-top: 15px;'})
result = [(
client.parseDOM(i, 'a', ret='href')[0],
client.parseDOM(i, 'a')[1],
str(re.findall(r"(\d{4})", client.parseDOM(i, 'a')[1])[0])) for i in result]
years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]
result = [i for i in result if title in cleantitle.movie(i[1])]
result = [i[0] for i in result if any(x in i[2] for x in years)][0]
try: url = re.compile('//.+?(/.+)').findall(result)[0]
except: url = result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
control.log('Segos URL %s' % url)
return url
except:
return
def get_show(self, imdb, tvdb, tvshowtitle, year):
try:
query = self.moviesearch_link % (urllib.unquote(tvshowtitle))
query = urlparse.urljoin(self.base_link, query)
result = client.source(query)
result = json.loads(result)
tvshowtitle = cleantitle.tv(tvshowtitle)
years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]
result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'h2', ret='title')[0], client.parseDOM(i, 'span', attrs = {'itemprop': 'copyrightYear'})) for i in result]
result = [i for i in result if len(i[2]) > 0]
result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])]
result = [i[0] for i in result if any(x in i[2][0] for x in years)][0]
try: url = re.compile('//.+?(/.+)').findall(result)[0]
except: url = result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
if url == None: return
url += self.episode_link % (int(season), int(episode))
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
result = client.request(url)
vtype = re.findall('<div class="col-lg-9 col-md-9 col-sm-9">\s.*<b>Język</b>:(.*?)\.*</div>',result)[0].strip()
q = re.findall('<div class="col-lg-9 col-md-9 col-sm-9">\s.*<b>Jakość</b>:(.*?)\.*</div>', result)[0].strip()
quality = 'SD'
if '720' in q: quality = 'HD'
if '1080' in q: quality = '1080p'
links = client.parseDOM(result, 'div', attrs={'id':'Film'})
links = [client.parseDOM(i, 'a', ret='href', attrs={'target':'_blank'})[0] for i in links]
for i in links:
try:
host = urlparse.urlparse(i).netloc
host = host.split('.')
host = host[-2]+"."+host[-1]
host = host.lower()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'provider': 'SEGOS', 'url': i, 'vtype':vtype})
except:
pass
return sources
except:
return sources
def resolve(self, url):
control.log('CDA-ONLINE RESOLVE URL %s' % url)
try:
url = resolvers.request(url)
return url
except:
return
| mrknow/filmkodi | plugin.video.fanfilm/resources/lib/sources/segos_mv.py | Python | apache-2.0 | 5,195 |
from django.http import FileResponse
from django.core.files.base import ContentFile
from rest_framework import generics
from rest_framework import permissions as drf_permissions
from rest_framework.exceptions import NotFound, PermissionDenied, ValidationError
from framework.auth.oauth_scopes import CoreScopes
from osf.models import (
Guid,
BaseFileNode,
FileVersion,
QuickFilesNode,
)
from api.base.exceptions import Gone
from api.base.permissions import PermissionWithGetter
from api.base.throttling import CreateGuidThrottle, NonCookieAuthThrottle, UserRateThrottle
from api.base import utils
from api.base.views import JSONAPIBaseView
from api.base import permissions as base_permissions
from api.nodes.permissions import ContributorOrPublic
from api.nodes.permissions import ReadOnlyIfRegistration
from api.files.permissions import IsPreprintFile
from api.files.permissions import CheckedOutOrAdmin
from api.files.permissions import FileMetadataRecordPermission
from api.files.serializers import FileSerializer
from api.files.serializers import FileDetailSerializer, QuickFilesDetailSerializer
from api.files.serializers import FileMetadataRecordSerializer
from api.files.serializers import FileVersionSerializer
from osf.utils.permissions import ADMIN
class FileMixin(object):
"""Mixin with convenience methods for retrieving the current file based on the
current URL. By default, fetches the file based on the file_id kwarg.
"""
serializer_class = FileSerializer
file_lookup_url_kwarg = 'file_id'
def get_file(self, check_permissions=True):
try:
obj = utils.get_object_or_error(BaseFileNode, self.kwargs[self.file_lookup_url_kwarg], self.request, display_name='file')
except NotFound:
obj = utils.get_object_or_error(Guid, self.kwargs[self.file_lookup_url_kwarg], self.request).referent
if not isinstance(obj, BaseFileNode):
raise NotFound
if obj.is_deleted:
raise Gone(detail='The requested file is no longer available.')
if getattr(obj.target, 'deleted', None):
raise Gone(detail='The requested file is no longer available')
if getattr(obj.target, 'is_quickfiles', False) and getattr(obj.target, 'creator'):
if obj.target.creator.is_disabled:
raise Gone(detail='This user has been deactivated and their quickfiles are no longer available.')
if check_permissions:
# May raise a permission denied
self.check_object_permissions(self.request, obj)
return obj
class FileDetail(JSONAPIBaseView, generics.RetrieveUpdateAPIView, FileMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/files_detail).
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
IsPreprintFile,
CheckedOutOrAdmin,
base_permissions.TokenHasScope,
PermissionWithGetter(ContributorOrPublic, 'target'),
PermissionWithGetter(ReadOnlyIfRegistration, 'target'),
)
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NODE_FILE_WRITE]
serializer_class = FileDetailSerializer
throttle_classes = (CreateGuidThrottle, NonCookieAuthThrottle, UserRateThrottle, )
view_category = 'files'
view_name = 'file-detail'
def get_serializer_class(self):
try:
target = self.get_target()
except (NotFound, Gone, PermissionDenied):
return FileDetailSerializer
else:
if isinstance(target, QuickFilesNode):
return QuickFilesDetailSerializer
return FileDetailSerializer
def get_target(self):
return self.get_file().target
# overrides RetrieveAPIView
def get_object(self):
user = utils.get_user_auth(self.request).user
file = self.get_file()
if self.request.GET.get('create_guid', False):
# allows quickfiles to be given guids when another user wants a permanent link to it
if (self.get_target().has_permission(user, ADMIN) and utils.has_admin_scope(self.request)) or getattr(file.target, 'is_quickfiles', False):
file.get_guid(create=True)
return file
class FileVersionsList(JSONAPIBaseView, generics.ListAPIView, FileMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/files_versions).
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
PermissionWithGetter(ContributorOrPublic, 'target'),
)
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NODE_FILE_WRITE]
serializer_class = FileVersionSerializer
view_category = 'files'
view_name = 'file-versions'
ordering = ('-modified',)
def get_queryset(self):
self.file = self.get_file()
return self.file.versions.all()
def get_serializer_context(self):
context = JSONAPIBaseView.get_serializer_context(self)
context['file'] = self.file
return context
def node_from_version(request, view, obj):
return view.get_file(check_permissions=False).target
class FileVersionDetail(JSONAPIBaseView, generics.RetrieveAPIView, FileMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/files_version_detail).
"""
version_lookup_url_kwarg = 'version_id'
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
PermissionWithGetter(ContributorOrPublic, node_from_version),
)
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NODE_FILE_WRITE]
serializer_class = FileVersionSerializer
view_category = 'files'
view_name = 'version-detail'
# overrides RetrieveAPIView
def get_object(self):
self.file = self.get_file()
maybe_version = self.file.get_version(self.kwargs[self.version_lookup_url_kwarg])
# May raise a permission denied
# Kinda hacky but versions have no reference to node or file
self.check_object_permissions(self.request, self.file)
return utils.get_object_or_error(FileVersion, getattr(maybe_version, '_id', ''), self.request)
def get_serializer_context(self):
context = JSONAPIBaseView.get_serializer_context(self)
context['file'] = self.file
return context
class FileMetadataRecordsList(JSONAPIBaseView, generics.ListAPIView, FileMixin):
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
PermissionWithGetter(ContributorOrPublic, 'target'),
)
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = FileMetadataRecordSerializer
view_category = 'files'
view_name = 'metadata-records'
ordering = ('-created',)
def get_queryset(self):
return self.get_file().records.all()
class FileMetadataRecordDetail(JSONAPIBaseView, generics.RetrieveUpdateAPIView, FileMixin):
record_lookup_url_kwarg = 'record_id'
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
FileMetadataRecordPermission(ContributorOrPublic),
FileMetadataRecordPermission(ReadOnlyIfRegistration),
)
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NODE_FILE_WRITE]
serializer_class = FileMetadataRecordSerializer
view_category = 'files'
view_name = 'metadata-record-detail'
def get_object(self):
return utils.get_object_or_error(
self.get_file().records.filter(_id=self.kwargs[self.record_lookup_url_kwarg]),
request=self.request,
)
class FileMetadataRecordDownload(JSONAPIBaseView, generics.RetrieveAPIView, FileMixin):
record_lookup_url_kwarg = 'record_id'
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
PermissionWithGetter(ContributorOrPublic, 'target'),
)
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NULL]
view_category = 'files'
view_name = 'metadata-record-download'
def get_serializer_class(self):
return None
def get_object(self):
return utils.get_object_or_error(
self.get_file().records.filter(_id=self.kwargs[self.record_lookup_url_kwarg]).select_related('schema', 'file'),
request=self.request,
)
def get(self, request, **kwargs):
file_type = self.request.query_params.get('export', 'json')
record = self.get_object()
try:
response = FileResponse(ContentFile(record.serialize(format=file_type)))
except ValueError as e:
detail = str(e).replace('.', '')
raise ValidationError(detail='{} for metadata file export.'.format(detail))
file_name = 'file_metadata_{}_{}.{}'.format(record.schema._id, record.file.name, file_type)
response['Content-Disposition'] = 'attachment; filename="{}"'.format(file_name)
response['Content-Type'] = 'application/{}'.format(file_type)
return response
| mattclark/osf.io | api/files/views.py | Python | apache-2.0 | 9,504 |
# -*- coding: utf-8 -*-
from django.conf import settings
from django.test import TestCase
from zilencer.models import Deployment
from zerver.models import (
get_realm, get_user_profile_by_email,
PreregistrationUser, Realm, ScheduledJob, UserProfile,
)
from zerver.lib.actions import (
create_stream_if_needed,
do_add_subscription,
set_default_streams,
)
from zerver.lib.digest import send_digest_email
from zerver.lib.notifications import enqueue_welcome_emails, one_click_unsubscribe_link
from zerver.lib.test_helpers import AuthedTestCase, find_key_by_email, queries_captured
from zerver.lib.test_runner import slow
from zerver.lib.session_user import get_session_dict_user
import re
import ujson
from urlparse import urlparse
class PublicURLTest(TestCase):
"""
Account creation URLs are accessible even when not logged in. Authenticated
URLs redirect to a page.
"""
def fetch(self, method, urls, expected_status):
for url in urls:
if method == "get":
response = self.client.get(url)
else:
response = self.client.post(url)
self.assertEqual(response.status_code, expected_status,
msg="Expected %d, received %d for %s to %s" % (
expected_status, response.status_code, method, url))
def test_public_urls(self):
"""
Test which views are accessible when not logged in.
"""
# FIXME: We should also test the Tornado URLs -- this codepath
# can't do so because this Django test mechanism doesn't go
# through Tornado.
get_urls = {200: ["/accounts/home/", "/accounts/login/"],
302: ["/"],
401: ["/api/v1/streams/Denmark/members",
"/api/v1/users/me/subscriptions",
"/api/v1/messages",
],
}
post_urls = {200: ["/accounts/login/"],
302: ["/accounts/logout/"],
401: ["/json/get_public_streams",
"/json/get_old_messages",
"/json/update_pointer",
"/json/send_message",
"/json/invite_users",
"/json/settings/change",
"/json/subscriptions/remove",
"/json/subscriptions/exists",
"/json/subscriptions/add",
"/json/subscriptions/property",
"/json/get_subscribers",
"/json/fetch_api_key",
"/api/v1/users/me/subscriptions",
],
400: ["/api/v1/send_message",
"/api/v1/external/github",
"/api/v1/fetch_api_key",
],
}
for status_code, url_set in get_urls.iteritems():
self.fetch("get", url_set, status_code)
for status_code, url_set in post_urls.iteritems():
self.fetch("post", url_set, status_code)
def test_get_gcid_when_not_configured(self):
with self.settings(GOOGLE_CLIENT_ID=None):
resp = self.client.get("/api/v1/fetch_google_client_id")
self.assertEquals(400, resp.status_code,
msg="Expected 400, received %d for GET /api/v1/fetch_google_client_id" % resp.status_code,
)
data = ujson.loads(resp.content)
self.assertEqual('error', data['result'])
def test_get_gcid_when_configured(self):
with self.settings(GOOGLE_CLIENT_ID="ABCD"):
resp = self.client.get("/api/v1/fetch_google_client_id")
self.assertEquals(200, resp.status_code,
msg="Expected 200, received %d for GET /api/v1/fetch_google_client_id" % resp.status_code,
)
data = ujson.loads(resp.content)
self.assertEqual('success', data['result'])
self.assertEqual('ABCD', data['google_client_id'])
class LoginTest(AuthedTestCase):
"""
Logging in, registration, and logging out.
"""
def test_login(self):
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_bad_password(self):
self.login("hamlet@zulip.com", "wrongpassword")
self.assertIsNone(get_session_dict_user(self.client.session))
def test_login_nonexist_user(self):
result = self.login("xxx@zulip.com", "xxx")
self.assertIn("Please enter a correct email and password", result.content)
def test_register(self):
realm = get_realm("zulip.com")
streams = ["stream_%s" % i for i in xrange(40)]
for stream in streams:
create_stream_if_needed(realm, stream)
set_default_streams(realm, streams)
with queries_captured() as queries:
self.register("test", "test")
# Ensure the number of queries we make is not O(streams)
self.assert_length(queries, 67)
user_profile = get_user_profile_by_email('test@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_register_deactivated(self):
"""
If you try to register for a deactivated realm, you get a clear error
page.
"""
realm = get_realm("zulip.com")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.register("test", "test")
self.assertIn("has been deactivated", result.content.replace("\n", " "))
with self.assertRaises(UserProfile.DoesNotExist):
get_user_profile_by_email('test@zulip.com')
def test_login_deactivated(self):
"""
If you try to log in to a deactivated realm, you get a clear error page.
"""
realm = get_realm("zulip.com")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.login("hamlet@zulip.com")
self.assertIn("has been deactivated", result.content.replace("\n", " "))
def test_logout(self):
self.login("hamlet@zulip.com")
self.client.post('/accounts/logout/')
self.assertIsNone(get_session_dict_user(self.client.session))
def test_non_ascii_login(self):
"""
You can log in even if your password contain non-ASCII characters.
"""
email = "test@zulip.com"
password = u"hümbüǵ"
# Registering succeeds.
self.register("test", password)
user_profile = get_user_profile_by_email(email)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
self.client.post('/accounts/logout/')
self.assertIsNone(get_session_dict_user(self.client.session))
# Logging in succeeds.
self.client.post('/accounts/logout/')
self.login(email, password)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_register_first_user_with_invites(self):
"""
The first user in a realm has a special step in their signup workflow
for inviting coworkers. Do as realistic an end-to-end test as we can
without Tornado running.
"""
username = "user1"
password = "test"
domain = "test.com"
email = "user1@test.com"
# Create a new realm to ensure that we're the first user in it.
Realm.objects.create(domain=domain, name="Test Inc.")
# Start the signup process by supplying an email address.
result = self.client.post('/accounts/home/', {'email': email})
# Check the redirect telling you to check your mail for a confirmation
# link.
self.assertEquals(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s@%s" % (username, domain)))
result = self.client.get(result["Location"])
self.assertIn("Check your email so we can get started.", result.content)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise ValueError("Couldn't find a confirmation email.")
result = self.client.get(confirmation_url)
self.assertEquals(result.status_code, 200)
# Pick a password and agree to the ToS.
result = self.submit_reg_form_for_user(username, password, domain)
self.assertEquals(result.status_code, 302)
self.assertTrue(result["Location"].endswith("/invite/"))
# Invite coworkers to join you.
result = self.client.get(result["Location"])
self.assertIn("You're the first one here!", result.content)
# Reset the outbox for our invites.
outbox.pop()
invitees = ['alice@' + domain, 'bob@' + domain]
params = {
'invitee_emails': ujson.dumps(invitees)
}
result = self.client.post('/json/bulk_invite_users', params)
self.assert_json_success(result)
# We really did email these users, and they have PreregistrationUser
# objects.
email_recipients = [message.recipients()[0] for message in outbox]
self.assertEqual(len(outbox), len(invitees))
self.assertItemsEqual(email_recipients, invitees)
user_profile = get_user_profile_by_email(email)
self.assertEqual(len(invitees), PreregistrationUser.objects.filter(
referred_by=user_profile).count())
# After this we start manipulating browser information, so stop here.
class InviteUserTest(AuthedTestCase):
def invite(self, users, streams):
"""
Invites the specified users to Zulip with the specified streams.
users should be a string containing the users to invite, comma or
newline separated.
streams should be a list of strings.
"""
return self.client.post("/json/invite_users",
{"invitee_emails": users,
"stream": streams})
def check_sent_emails(self, correct_recipients):
from django.core.mail import outbox
self.assertEqual(len(outbox), len(correct_recipients))
email_recipients = [email.recipients()[0] for email in outbox]
self.assertItemsEqual(email_recipients, correct_recipients)
def test_bulk_invite_users(self):
# The bulk_invite_users code path is for the first user in a realm.
self.login('hamlet@zulip.com')
invitees = ['alice@zulip.com', 'bob@zulip.com']
params = {
'invitee_emails': ujson.dumps(invitees)
}
result = self.client.post('/json/bulk_invite_users', params)
self.assert_json_success(result)
self.check_sent_emails(invitees)
def test_successful_invite_user(self):
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("hamlet@zulip.com")
invitee = "alice-test@zulip.com"
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee])
def test_multi_user_invite(self):
"""
Invites multiple users with a variety of delimiters.
"""
self.login("hamlet@zulip.com")
# Intentionally use a weird string.
self.assert_json_success(self.invite(
"""bob-test@zulip.com, carol-test@zulip.com,
dave-test@zulip.com
earl-test@zulip.com""", ["Denmark"]))
for user in ("bob", "carol", "dave", "earl"):
self.assertTrue(find_key_by_email("%s-test@zulip.com" % user))
self.check_sent_emails(["bob-test@zulip.com", "carol-test@zulip.com",
"dave-test@zulip.com", "earl-test@zulip.com"])
def test_missing_or_invalid_params(self):
"""
Tests inviting with various missing or invalid parameters.
"""
self.login("hamlet@zulip.com")
self.assert_json_error(
self.client.post("/json/invite_users", {"invitee_emails": "foo@zulip.com"}),
"You must specify at least one stream for invitees to join.")
for address in ("noatsign.com", "outsideyourdomain@example.net"):
self.assert_json_error(
self.invite(address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
self.check_sent_emails([])
def test_invalid_stream(self):
"""
Tests inviting to a non-existent stream.
"""
self.login("hamlet@zulip.com")
self.assert_json_error(self.invite("iago-test@zulip.com", ["NotARealStream"]),
"Stream does not exist: NotARealStream. No invites were sent.")
self.check_sent_emails([])
def test_invite_existing_user(self):
"""
If you invite an address already using Zulip, no invitation is sent.
"""
self.login("hamlet@zulip.com")
self.assert_json_error(
self.client.post("/json/invite_users",
{"invitee_emails": "hamlet@zulip.com",
"stream": ["Denmark"]}),
"We weren't able to invite anyone.")
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email="hamlet@zulip.com"))
self.check_sent_emails([])
def test_invite_some_existing_some_new(self):
"""
If you invite a mix of already existing and new users, invitations are
only sent to the new users.
"""
self.login("hamlet@zulip.com")
existing = ["hamlet@zulip.com", "othello@zulip.com"]
new = ["foo-test@zulip.com", "bar-test@zulip.com"]
result = self.client.post("/json/invite_users",
{"invitee_emails": "\n".join(existing + new),
"stream": ["Denmark"]})
self.assert_json_error(result,
"Some of those addresses are already using Zulip, \
so we didn't send them an invitation. We did send invitations to everyone else!")
# We only created accounts for the new users.
for email in existing:
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email=email))
for email in new:
self.assertTrue(PreregistrationUser.objects.get(email=email))
# We only sent emails to the new users.
self.check_sent_emails(new)
def test_invite_outside_domain_in_closed_realm(self):
"""
In a realm with `restricted_to_domain = True`, you can't invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm("zulip.com")
zulip_realm.restricted_to_domain = True
zulip_realm.save()
self.login("hamlet@zulip.com")
external_address = "foo@example.com"
self.assert_json_error(
self.invite(external_address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
@slow(0.20, 'inviting is slow')
def test_invite_outside_domain_in_open_realm(self):
"""
In a realm with `restricted_to_domain = False`, you can invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm("zulip.com")
zulip_realm.restricted_to_domain = False
zulip_realm.save()
self.login("hamlet@zulip.com")
external_address = "foo@example.com"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
def test_invite_with_non_ascii_streams(self):
"""
Inviting someone to streams with non-ASCII characters succeeds.
"""
self.login("hamlet@zulip.com")
invitee = "alice-test@zulip.com"
stream_name = u"hümbüǵ"
realm = get_realm("zulip.com")
stream, _ = create_stream_if_needed(realm, stream_name)
# Make sure we're subscribed before inviting someone.
do_add_subscription(
get_user_profile_by_email("hamlet@zulip.com"),
stream, no_log=True)
self.assert_json_success(self.invite(invitee, [stream_name]))
class EmailUnsubscribeTests(AuthedTestCase):
def test_missedmessage_unsubscribe(self):
"""
We provide one-click unsubscribe links in missed message
e-mails that you can click even when logged out to update your
email notification settings.
"""
user_profile = get_user_profile_by_email("hamlet@zulip.com")
user_profile.enable_offline_email_notifications = True
user_profile.save()
unsubscribe_link = one_click_unsubscribe_link(user_profile,
"missed_messages")
result = self.client.get(urlparse(unsubscribe_link).path)
self.assertEqual(result.status_code, 200)
# Circumvent user_profile caching.
user_profile = UserProfile.objects.get(email="hamlet@zulip.com")
self.assertFalse(user_profile.enable_offline_email_notifications)
def test_welcome_unsubscribe(self):
"""
We provide one-click unsubscribe links in welcome e-mails that you can
click even when logged out to stop receiving them.
"""
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email("hamlet@zulip.com")
# Simulate a new user signing up, which enqueues 2 welcome e-mails.
enqueue_welcome_emails(email, "King Hamlet")
self.assertEqual(2, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
# Simulate unsubscribing from the welcome e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "welcome")
result = self.client.get(urlparse(unsubscribe_link).path)
# The welcome email jobs are no longer scheduled.
self.assertEqual(result.status_code, 200)
self.assertEqual(0, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
def test_digest_unsubscribe(self):
"""
We provide one-click unsubscribe links in digest e-mails that you can
click even when logged out to stop receiving them.
Unsubscribing from these emails also dequeues any digest email jobs that
have been queued.
"""
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email("hamlet@zulip.com")
self.assertTrue(user_profile.enable_digest_emails)
# Enqueue a fake digest email.
send_digest_email(user_profile, "", "")
self.assertEqual(1, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
# Simulate unsubscribing from digest e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "digest")
result = self.client.get(urlparse(unsubscribe_link).path)
# The setting is toggled off, and scheduled jobs have been removed.
self.assertEqual(result.status_code, 200)
# Circumvent user_profile caching.
user_profile = UserProfile.objects.get(email="hamlet@zulip.com")
self.assertFalse(user_profile.enable_digest_emails)
self.assertEqual(0, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
| gkotian/zulip | zerver/test_signup.py | Python | apache-2.0 | 20,333 |
from simpletcp.clientsocket import ClientSocket
s1 = ClientSocket("localhost", 5000)
response = s1.send("Hello, World!")
s2 = ClientSocket("localhost", 5000, single_use=False)
r1 = s2.send("Hello for the first time...")
r2 = s2.send("...and hello for the last!")
s2.close()
# Display the correspondence
print("s1 sent\t\tHello, World!")
print("s1 received\t\t{}".format(response.decode("UTF-8")))
print("-------------------------------------------------")
print("s2 sent\t\tHello for the first time....")
print("s2 received\t\t{}".format(r1.decode("UTF-8")))
print("s2 sent\t\t...and hello for the last!.")
print("s2 received\t\t{}".format(r2.decode("UTF-8")))
| gragas/simpletcp | examples/echoreverse/client.py | Python | apache-2.0 | 664 |
ACCOUNT_NAME = 'Bosch'
| 0--key/lib | portfolio/Python/scrapy/bosch_russian/__init__.py | Python | apache-2.0 | 23 |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import textwrap
LOG = logging.getLogger(__name__)
def do_get_shell_config(self, config_name):
rows = []
headers = ["KEY", "VAlUE"]
if not config_name:
for i in self.global_shell_config.items():
rows.append([i[0], i[1]])
else:
if config_name in self.global_shell_config.keys():
rows.append([config_name, self.global_shell_config[config_name]])
self.generate_output(headers, rows)
def do_set_shell_config(self, config=None):
kv = config.split(" ")
if len(kv) != 2:
self.help_set_shell_config()
return
config_name = kv[0]
config_value = kv[1]
if config_name in self.global_shell_config.keys():
self.global_shell_config[config_name] = config_value
self.do_get_shell_config(config_name=None)
def help_get_shell_config(self):
print textwrap.dedent("""
Usage:
> get_shell_config <config_name> get all shell config
""")
def help_set_shell_config(self):
print textwrap.dedent("""
Usage:
> set_shell_config <config_name> <config_value> sets shell config
""")
def complete_get_shell_config(self, pattern, line, start_index, end_index):
if pattern:
return [
c for c in self.global_shell_config.keys() if c.startswith(pattern)]
else:
return self.CLUSTERS
def complete_set_shell_config(self, pattern, line, start_index, end_index):
if pattern:
return [
c for c in self.global_shell_config.keys() if c.startswith(pattern)]
else:
return self.CLUSTERS
| radicalbit/ambari | ambari-shell/ambari-python-shell/src/main/python/ambari_shell/plugins/shell_config.py | Python | apache-2.0 | 2,414 |
#!/usr/bin/env python
# vim:ts=4:sw=4:et:
# no unicode literals
from __future__ import absolute_import, division, print_function
import argparse
import json
import math
import multiprocessing
import os
import os.path
import random
import shutil
import signal
import subprocess
import sys
import tempfile
import threading
import time
import traceback
# in the FB internal test infra, ensure that we are running from the
# dir that houses this script rather than some other higher level dir
# in the containing tree. We can't use __file__ to determine this
# because our PAR machinery can generate a name like /proc/self/fd/3/foo
# which won't resolve to anything useful by the time we get here.
if not os.path.exists("runtests.py") and os.path.exists("watchman/runtests.py"):
os.chdir("watchman")
try:
import unittest2 as unittest
except ImportError:
import unittest
# Ensure that we can find pywatchman and integration tests (if we're not the
# main module, a wrapper is probably loading us up and we shouldn't screw around
# with sys.path).
if __name__ == "__main__":
sys.path.insert(0, os.path.join(os.getcwd(), "python"))
sys.path.insert(1, os.path.join(os.getcwd(), "tests", "integration"))
sys.path.insert(1, os.path.join(os.getcwd(), "tests", "integration", "facebook"))
# Only Python 3.5+ supports native asyncio
has_asyncio = sys.version_info >= (3, 5)
if has_asyncio:
sys.path.insert(0, os.path.join(os.getcwd(), "tests", "async"))
import asyncio
try:
import queue
except Exception:
import Queue
queue = Queue
parser = argparse.ArgumentParser(
description="Run the watchman unit and integration tests"
)
parser.add_argument("-v", "--verbosity", default=2, help="test runner verbosity")
parser.add_argument(
"--keep",
action="store_true",
help="preserve all temporary files created during test execution",
)
parser.add_argument(
"--keep-if-fail",
action="store_true",
help="preserve all temporary files created during test execution if failed",
)
parser.add_argument("files", nargs="*", help="specify which test files to run")
parser.add_argument(
"--method", action="append", help="specify which python test method names to run"
)
def default_concurrency():
# Python 2.7 hangs when we use threads, so avoid it
# https://bugs.python.org/issue20318
if sys.version_info >= (3, 0):
level = min(4, math.ceil(1.5 * multiprocessing.cpu_count()))
if "CIRCLECI" in os.environ:
# Use fewer cores in circle CI because the inotify sysctls
# are pretty low, and we sometimes hit those limits.
level = level / 2
return int(level)
return 1
parser.add_argument(
"--concurrency",
default=default_concurrency(),
type=int,
help="How many tests to run at once",
)
parser.add_argument(
"--watcher",
action="store",
default="auto",
help="Specify which watcher should be used to run the tests",
)
parser.add_argument(
"--debug-watchman",
action="store_true",
help="Pauses start up and prints out the PID for watchman server process."
+ "Forces concurrency to 1.",
)
parser.add_argument(
"--watchman-path", action="store", help="Specify the path to the watchman binary"
)
parser.add_argument(
"--win7", action="store_true", help="Set env to force win7 compatibility tests"
)
parser.add_argument(
"--retry-flaky",
action="store",
type=int,
default=2,
help="How many additional times to retry flaky tests.",
)
parser.add_argument(
"--testpilot-json",
action="store_true",
help="Output test results in Test Pilot JSON format",
)
parser.add_argument(
"--pybuild-dir",
action="store",
help="For out-of-src-tree builds, where the generated python lives",
)
args = parser.parse_args()
if args.pybuild_dir is not None:
sys.path.insert(0, os.path.realpath(args.pybuild_dir))
# Import our local stuff after we've had a chance to look at args.pybuild_dir.
# The `try` block prevents the imports from being reordered
try:
import Interrupt
import TempDir
import WatchmanInstance
import pywatchman
except ImportError:
raise
# We test for this in a test case
os.environ["WATCHMAN_EMPTY_ENV_VAR"] = ""
os.environ["HGUSER"] = "John Smith <smith@example.com>"
os.environ["NOSCMLOG"] = "1"
os.environ["WATCHMAN_NO_SPAWN"] = "1"
if args.win7:
os.environ["WATCHMAN_WIN7_COMPAT"] = "1"
# Ensure that we find the watchman we built in the tests
if args.watchman_path:
args.watchman_path = os.path.realpath(args.watchman_path)
bin_dir = os.path.dirname(args.watchman_path)
os.environ["WATCHMAN_BINARY"] = args.watchman_path
else:
bin_dir = os.path.dirname(__file__)
os.environ["PYWATCHMAN_PATH"] = os.path.join(os.getcwd(), "python")
os.environ["WATCHMAN_PYTHON_BIN"] = os.path.abspath(
os.path.join(os.getcwd(), "python", "bin")
)
os.environ["PATH"] = "%s%s%s" % (
os.path.abspath(bin_dir),
os.pathsep,
os.environ["PATH"],
)
# We'll put all our temporary stuff under one dir so that we
# can clean it all up at the end
temp_dir = TempDir.get_temp_dir(args.keep)
def interrupt_handler(signo, frame):
Interrupt.setInterrupted()
signal.signal(signal.SIGINT, interrupt_handler)
class Result(unittest.TestResult):
# Make it easier to spot success/failure by coloring the status
# green for pass, red for fail and yellow for skip.
# also print the elapsed time per test
transport = None
encoding = None
attempt = 0
def shouldStop(self):
if Interrupt.wasInterrupted():
return True
return super(Result, self).shouldStop()
def startTest(self, test):
self.startTime = time.time()
super(Result, self).startTest(test)
def addSuccess(self, test):
elapsed = time.time() - self.startTime
super(Result, self).addSuccess(test)
if args.testpilot_json:
print(
json.dumps(
{
"op": "test_done",
"status": "passed",
"test": test.id(),
"start_time": self.startTime,
"end_time": time.time(),
}
)
)
else:
print(
"\033[32mPASS\033[0m %s (%.3fs)%s"
% (test.id(), elapsed, self._attempts())
)
def addSkip(self, test, reason):
elapsed = time.time() - self.startTime
super(Result, self).addSkip(test, reason)
if args.testpilot_json:
print(
json.dumps(
{
"op": "test_done",
"status": "skipped",
"test": test.id(),
"details": reason,
"start_time": self.startTime,
"end_time": time.time(),
}
)
)
else:
print("\033[33mSKIP\033[0m %s (%.3fs) %s" % (test.id(), elapsed, reason))
def __printFail(self, test, err):
elapsed = time.time() - self.startTime
t, val, trace = err
if args.testpilot_json:
print(
json.dumps(
{
"op": "test_done",
"status": "failed",
"test": test.id(),
"details": "".join(traceback.format_exception(t, val, trace)),
"start_time": self.startTime,
"end_time": time.time(),
}
)
)
else:
print(
"\033[31mFAIL\033[0m %s (%.3fs)%s\n%s"
% (
test.id(),
elapsed,
self._attempts(),
"".join(traceback.format_exception(t, val, trace)),
)
)
def addFailure(self, test, err):
self.__printFail(test, err)
super(Result, self).addFailure(test, err)
def addError(self, test, err):
self.__printFail(test, err)
super(Result, self).addError(test, err)
def setAttemptNumber(self, attempt):
self.attempt = attempt
def _attempts(self):
if self.attempt > 0:
return " (%d attempts)" % self.attempt
return ""
def expandFilesList(files):
""" expand any dir names into a full list of files """
res = []
for g in args.files:
if os.path.isdir(g):
for dirname, _dirs, files in os.walk(g):
for f in files:
if not f.startswith("."):
res.append(os.path.normpath(os.path.join(dirname, f)))
else:
res.append(os.path.normpath(g))
return res
if args.files:
args.files = expandFilesList(args.files)
def shouldIncludeTestFile(filename):
""" used by our loader to respect the set of tests to run """
global args
fname = os.path.relpath(filename.replace(".pyc", ".py"))
if args.files:
for f in args.files:
if f == fname:
return True
return False
if args.method:
# implies python tests only
if not fname.endswith(".py"):
return False
return True
def shouldIncludeTestName(name):
""" used by our loader to respect the set of tests to run """
global args
if args.method:
for f in args.method:
if f in name:
# the strict original interpretation of this flag
# was pretty difficult to use in practice, so we
# now also allow substring matches against the
# entire test name.
return True
return False
return True
class Loader(unittest.TestLoader):
""" allows us to control the subset of which tests are run """
def __init__(self):
super(Loader, self).__init__()
def loadTestsFromTestCase(self, testCaseClass):
return super(Loader, self).loadTestsFromTestCase(testCaseClass)
def getTestCaseNames(self, testCaseClass):
names = super(Loader, self).getTestCaseNames(testCaseClass)
return filter(lambda name: shouldIncludeTestName(name), names)
def loadTestsFromModule(self, module, *args, **kw):
if not shouldIncludeTestFile(module.__file__):
return unittest.TestSuite()
return super(Loader, self).loadTestsFromModule(module, *args, **kw)
loader = Loader()
suite = unittest.TestSuite()
directories = ["python/tests", "tests/integration"]
facebook_directory = "tests/integration/facebook"
if os.path.exists(facebook_directory):
# the facebook dir isn't sync'd to github, but it
# is present internally, so it should remain in this list
directories += [facebook_directory]
if has_asyncio:
directories += ["tests/async"]
for d in directories:
suite.addTests(loader.discover(d, top_level_dir=d))
if os.name == "nt":
t_globs = "tests/*.exe"
else:
t_globs = "tests/*.t"
tls = threading.local()
# Manage printing from concurrent threads
# http://stackoverflow.com/a/3030755/149111
class ThreadSafeFile(object):
def __init__(self, f):
self.f = f
self.lock = threading.RLock()
self.nesting = 0
def _getlock(self):
self.lock.acquire()
self.nesting += 1
def _droplock(self):
nesting = self.nesting
self.nesting = 0
for _ in range(nesting):
self.lock.release()
def __getattr__(self, name):
if name == "softspace":
return tls.softspace
else:
raise AttributeError(name)
def __setattr__(self, name, value):
if name == "softspace":
tls.softspace = value
else:
return object.__setattr__(self, name, value)
def write(self, data):
self._getlock()
self.f.write(data)
if data == "\n":
self._droplock()
def flush(self):
self._getlock()
self.f.flush()
self._droplock()
sys.stdout = ThreadSafeFile(sys.stdout)
tests_queue = queue.Queue()
results_queue = queue.Queue()
def runner():
global results_queue
global tests_queue
broken = False
try:
# Start up a shared watchman instance for the tests.
inst = WatchmanInstance.Instance(
{"watcher": args.watcher}, debug_watchman=args.debug_watchman
)
inst.start()
# Allow tests to locate this default instance
WatchmanInstance.setSharedInstance(inst)
if has_asyncio:
# Each thread will have its own event loop
asyncio.set_event_loop(asyncio.new_event_loop())
except Exception as e:
print("while starting watchman: %s" % str(e))
traceback.print_exc()
broken = True
while not broken:
test = tests_queue.get()
try:
if test == "terminate":
break
if Interrupt.wasInterrupted() or broken:
continue
result = None
for attempt in range(0, args.retry_flaky + 1):
# Check liveness of the server
try:
client = pywatchman.client(timeout=3.0, sockpath=inst.getSockPath())
client.query("version")
client.close()
except Exception as exc:
print(
"Failed to connect to watchman server: %s; starting a new one"
% exc
)
try:
inst.stop()
except Exception:
pass
try:
inst = WatchmanInstance.Instance(
{"watcher": args.watcher},
debug_watchman=args.debug_watchman,
)
inst.start()
# Allow tests to locate this default instance
WatchmanInstance.setSharedInstance(inst)
except Exception as e:
print("while starting watchman: %s" % str(e))
traceback.print_exc()
broken = True
continue
try:
result = Result()
result.setAttemptNumber(attempt)
if hasattr(test, "setAttemptNumber"):
test.setAttemptNumber(attempt)
test.run(result)
if hasattr(test, "setAttemptNumber") and not result.wasSuccessful():
# Facilitate retrying this possibly flaky test
continue
break
except Exception as e:
print(e)
if hasattr(test, "setAttemptNumber") and not result.wasSuccessful():
# Facilitate retrying this possibly flaky test
continue
if (
not result.wasSuccessful()
and "TRAVIS" in os.environ
and hasattr(test, "dumpLogs")
):
test.dumpLogs()
results_queue.put(result)
finally:
tests_queue.task_done()
if not broken:
inst.stop()
def expand_suite(suite, target=None):
""" recursively expand a TestSuite into a list of TestCase """
if target is None:
target = []
for test in suite:
if isinstance(test, unittest.TestSuite):
expand_suite(test, target)
else:
target.append(test)
# randomize both because we don't want tests to have relatively
# dependency ordering and also because this can help avoid clumping
# longer running tests together
random.shuffle(target)
return target
def queue_jobs(tests):
for test in tests:
tests_queue.put(test)
all_tests = expand_suite(suite)
if args.debug_watchman:
args.concurrency = 1
elif len(all_tests) < args.concurrency:
args.concurrency = len(all_tests)
queue_jobs(all_tests)
if args.concurrency > 1:
for _ in range(args.concurrency):
t = threading.Thread(target=runner)
t.daemon = True
t.start()
# also send a termination sentinel
tests_queue.put("terminate")
# Wait for all tests to have been dispatched
tests_queue.join()
else:
# add a termination sentinel
tests_queue.put("terminate")
runner()
# Now pull out and aggregate the results
tests_run = 0
tests_failed = 0
tests_skipped = 0
while not results_queue.empty():
res = results_queue.get()
tests_run = tests_run + res.testsRun
tests_failed = tests_failed + len(res.errors) + len(res.failures)
tests_skipped = tests_skipped + len(res.skipped)
if not args.testpilot_json:
print(
"Ran %d, failed %d, skipped %d, concurrency %d"
% (tests_run, tests_failed, tests_skipped, args.concurrency)
)
if "APPVEYOR" in os.environ:
logdir = "logs7" if args.win7 else "logs"
logzip = "%s.zip" % logdir
shutil.copytree(tempfile.tempdir, logdir)
subprocess.call(["7z", "a", logzip, logdir])
subprocess.call(["appveyor", "PushArtifact", logzip])
if "CIRCLE_ARTIFACTS" in os.environ:
print("Creating %s/logs.zip" % os.environ["CIRCLE_ARTIFACTS"])
subprocess.call(
[
"zip",
"-q",
"-r",
"%s/logs.zip" % os.environ["CIRCLE_ARTIFACTS"],
temp_dir.get_dir(),
]
)
if tests_failed or (tests_run == 0):
if args.keep_if_fail:
temp_dir.set_keep(True)
if args.testpilot_json:
# When outputting JSON, our return code indicates if we successfully
# produced output or not, not whether the tests passed. The JSON
# output contains the detailed test pass/failure information.
sys.exit(0)
sys.exit(1)
| nodakai/watchman | runtests.py | Python | apache-2.0 | 18,169 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import netaddr
from neutron.agent.common import utils # noqa
from neutron.agent.linux import ip_lib
from neutron.common import exceptions
from neutron.tests import base
NETNS_SAMPLE = [
'12345678-1234-5678-abcd-1234567890ab',
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'cccccccc-cccc-cccc-cccc-cccccccccccc']
LINK_SAMPLE = [
'1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN \\'
'link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 promiscuity 0',
'2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP '
'qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff'
'\ alias openvswitch',
'3: br-int: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN '
'\ link/ether aa:bb:cc:dd:ee:ff brd ff:ff:ff:ff:ff:ff promiscuity 0',
'4: gw-ddc717df-49: <BROADCAST,MULTICAST> mtu 1500 qdisc noop '
'state DOWN \ link/ether fe:dc:ba:fe:dc:ba brd ff:ff:ff:ff:ff:ff '
'promiscuity 0',
'5: foo:foo: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state '
'UP qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff '
'promiscuity 0',
'6: foo@foo: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state '
'UP qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff '
'promiscuity 0',
'7: foo:foo@foo: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq '
'state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0',
'8: foo@foo:foo: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq '
'state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0',
'9: bar.9@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc '
' noqueue master brq0b24798c-07 state UP mode DEFAULT'
'\ link/ether ab:04:49:b6:ab:a0 brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan protocol 802.1q id 9 <REORDER_HDR>',
'10: bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc '
' noqueue master brq0b24798c-07 state UP mode DEFAULT'
'\ link/ether ab:04:49:b6:ab:a0 brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan protocol 802.1Q id 10 <REORDER_HDR>',
'11: bar:bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq '
'state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan id 11 <REORDER_HDR>',
'12: bar@bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq '
'state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan id 12 <REORDER_HDR>',
'13: bar:bar@bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 '
'qdisc mq state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan protocol 802.1q id 13 <REORDER_HDR>',
'14: bar@bar:bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 '
'qdisc mq state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan protocol 802.1Q id 14 <REORDER_HDR>']
ADDR_SAMPLE = ("""
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff
inet 172.16.77.240/24 brd 172.16.77.255 scope global eth0
inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link
valid_lft forever preferred_lft forever
""")
ADDR_SAMPLE2 = ("""
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff
inet 172.16.77.240/24 scope global eth0
inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link
valid_lft forever preferred_lft forever
""")
GATEWAY_SAMPLE1 = ("""
default via 10.35.19.254 metric 100
10.35.16.0/22 proto kernel scope link src 10.35.17.97
""")
GATEWAY_SAMPLE2 = ("""
default via 10.35.19.254 metric 100
""")
GATEWAY_SAMPLE3 = ("""
10.35.16.0/22 proto kernel scope link src 10.35.17.97
""")
GATEWAY_SAMPLE4 = ("""
default via 10.35.19.254
""")
GATEWAY_SAMPLE5 = ("""
default via 192.168.99.1 proto static
""")
GATEWAY_SAMPLE6 = ("""
default via 192.168.99.1 proto static metric 100
""")
IPv6_GATEWAY_SAMPLE1 = ("""
default via 2001:470:9:1224:4508:b885:5fb:740b metric 100
2001:db8::/64 proto kernel scope link src 2001:470:9:1224:dfcc:aaff:feb9:76ce
""")
IPv6_GATEWAY_SAMPLE2 = ("""
default via 2001:470:9:1224:4508:b885:5fb:740b metric 100
""")
IPv6_GATEWAY_SAMPLE3 = ("""
2001:db8::/64 proto kernel scope link src 2001:470:9:1224:dfcc:aaff:feb9:76ce
""")
IPv6_GATEWAY_SAMPLE4 = ("""
default via fe80::dfcc:aaff:feb9:76ce
""")
IPv6_GATEWAY_SAMPLE5 = ("""
default via 2001:470:9:1224:4508:b885:5fb:740b metric 1024
""")
DEVICE_ROUTE_SAMPLE = ("10.0.0.0/24 scope link src 10.0.0.2")
SUBNET_SAMPLE1 = ("10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1\n"
"10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2")
SUBNET_SAMPLE2 = ("10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2\n"
"10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1")
RULE_V4_SAMPLE = ("""
0: from all lookup local
32766: from all lookup main
32767: from all lookup default
101: from 192.168.45.100 lookup 2
""")
RULE_V6_SAMPLE = ("""
0: from all lookup local
32766: from all lookup main
32767: from all lookup default
201: from 2001:db8::1 lookup 3
""")
class TestSubProcessBase(base.BaseTestCase):
def setUp(self):
super(TestSubProcessBase, self).setUp()
self.execute_p = mock.patch('neutron.agent.common.utils.execute')
self.execute = self.execute_p.start()
def test_execute_wrapper(self):
ip_lib.SubProcessBase._execute(['o'], 'link', ('list',),
run_as_root=True)
self.execute.assert_called_once_with(['ip', '-o', 'link', 'list'],
run_as_root=True,
log_fail_as_error=True)
def test_execute_wrapper_int_options(self):
ip_lib.SubProcessBase._execute([4], 'link', ('list',))
self.execute.assert_called_once_with(['ip', '-4', 'link', 'list'],
run_as_root=False,
log_fail_as_error=True)
def test_execute_wrapper_no_options(self):
ip_lib.SubProcessBase._execute([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'link', 'list'],
run_as_root=False,
log_fail_as_error=True)
def test_run_no_namespace(self):
base = ip_lib.SubProcessBase()
base._run([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'link', 'list'],
run_as_root=False,
log_fail_as_error=True)
def test_run_namespace(self):
base = ip_lib.SubProcessBase(namespace='ns')
base._run([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns',
'ip', 'link', 'list'],
run_as_root=True,
log_fail_as_error=True)
def test_as_root_namespace(self):
base = ip_lib.SubProcessBase(namespace='ns')
base._as_root([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns',
'ip', 'link', 'list'],
run_as_root=True,
log_fail_as_error=True)
class TestIpWrapper(base.BaseTestCase):
def setUp(self):
super(TestIpWrapper, self).setUp()
self.execute_p = mock.patch.object(ip_lib.IPWrapper, '_execute')
self.execute = self.execute_p.start()
@mock.patch('os.path.islink')
@mock.patch('os.listdir', return_value=['lo'])
def test_get_devices(self, mocked_listdir, mocked_islink):
retval = ip_lib.IPWrapper().get_devices()
mocked_islink.assert_called_once_with('/sys/class/net/lo')
self.assertEqual(retval, [ip_lib.IPDevice('lo')])
@mock.patch('neutron.agent.common.utils.execute')
def test_get_devices_namespaces(self, mocked_execute):
fake_str = mock.Mock()
fake_str.split.return_value = ['lo']
mocked_execute.return_value = fake_str
retval = ip_lib.IPWrapper(namespace='foo').get_devices()
mocked_execute.assert_called_once_with(
['ip', 'netns', 'exec', 'foo', 'find', '/sys/class/net',
'-maxdepth', '1', '-type', 'l', '-printf', '%f '],
run_as_root=True, log_fail_as_error=True)
self.assertTrue(fake_str.split.called)
self.assertEqual(retval, [ip_lib.IPDevice('lo', namespace='foo')])
def test_get_namespaces(self):
self.execute.return_value = '\n'.join(NETNS_SAMPLE)
retval = ip_lib.IPWrapper.get_namespaces()
self.assertEqual(retval,
['12345678-1234-5678-abcd-1234567890ab',
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'cccccccc-cccc-cccc-cccc-cccccccccccc'])
self.execute.assert_called_once_with([], 'netns', ('list',))
def test_add_tuntap(self):
ip_lib.IPWrapper().add_tuntap('tap0')
self.execute.assert_called_once_with([], 'tuntap',
('add', 'tap0', 'mode', 'tap'),
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_add_veth(self):
ip_lib.IPWrapper().add_veth('tap0', 'tap1')
self.execute.assert_called_once_with([], 'link',
('add', 'tap0', 'type', 'veth',
'peer', 'name', 'tap1'),
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_del_veth(self):
ip_lib.IPWrapper().del_veth('fpr-1234')
self.execute.assert_called_once_with([], 'link',
('del', 'fpr-1234'),
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_add_veth_with_namespaces(self):
ns2 = 'ns2'
with mock.patch.object(ip_lib.IPWrapper, 'ensure_namespace') as en:
ip_lib.IPWrapper().add_veth('tap0', 'tap1', namespace2=ns2)
en.assert_has_calls([mock.call(ns2)])
self.execute.assert_called_once_with([], 'link',
('add', 'tap0', 'type', 'veth',
'peer', 'name', 'tap1',
'netns', ns2),
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_get_device(self):
dev = ip_lib.IPWrapper(namespace='ns').device('eth0')
self.assertEqual(dev.namespace, 'ns')
self.assertEqual(dev.name, 'eth0')
def test_ensure_namespace(self):
with mock.patch.object(ip_lib, 'IPDevice') as ip_dev:
ip = ip_lib.IPWrapper()
with mock.patch.object(ip.netns, 'exists') as ns_exists:
with mock.patch('neutron.agent.common.utils.execute'):
ns_exists.return_value = False
ip.ensure_namespace('ns')
self.execute.assert_has_calls(
[mock.call([], 'netns', ('add', 'ns'),
run_as_root=True, namespace=None,
log_fail_as_error=True)])
ip_dev.assert_has_calls([mock.call('lo', namespace='ns'),
mock.call().link.set_up()])
def test_ensure_namespace_existing(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd:
ip_ns_cmd.exists.return_value = True
ns = ip_lib.IPWrapper().ensure_namespace('ns')
self.assertFalse(self.execute.called)
self.assertEqual(ns.namespace, 'ns')
def test_namespace_is_empty_no_devices(self):
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'get_devices') as get_devices:
get_devices.return_value = []
self.assertTrue(ip.namespace_is_empty())
get_devices.assert_called_once_with(exclude_loopback=True)
def test_namespace_is_empty(self):
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'get_devices') as get_devices:
get_devices.return_value = [mock.Mock()]
self.assertFalse(ip.namespace_is_empty())
get_devices.assert_called_once_with(exclude_loopback=True)
def test_garbage_collect_namespace_does_not_exist(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = False
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
self.assertFalse(ip.garbage_collect_namespace())
ip_ns_cmd_cls.assert_has_calls([mock.call().exists('ns')])
self.assertNotIn(mock.call().delete('ns'),
ip_ns_cmd_cls.return_value.mock_calls)
self.assertEqual(mock_is_empty.mock_calls, [])
def test_garbage_collect_namespace_existing_empty_ns(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = True
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
mock_is_empty.return_value = True
self.assertTrue(ip.garbage_collect_namespace())
mock_is_empty.assert_called_once_with()
expected = [mock.call().exists('ns'),
mock.call().delete('ns')]
ip_ns_cmd_cls.assert_has_calls(expected)
def test_garbage_collect_namespace_existing_not_empty(self):
lo_device = mock.Mock()
lo_device.name = 'lo'
tap_device = mock.Mock()
tap_device.name = 'tap1'
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = True
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
mock_is_empty.return_value = False
self.assertFalse(ip.garbage_collect_namespace())
mock_is_empty.assert_called_once_with()
expected = [mock.call(ip),
mock.call().exists('ns')]
self.assertEqual(ip_ns_cmd_cls.mock_calls, expected)
self.assertNotIn(mock.call().delete('ns'),
ip_ns_cmd_cls.mock_calls)
def test_add_vxlan_valid_port_length(self):
retval = ip_lib.IPWrapper().add_vxlan('vxlan0', 'vni0',
group='group0',
dev='dev0', ttl='ttl0',
tos='tos0',
local='local0', proxy=True,
port=('1', '2'))
self.assertIsInstance(retval, ip_lib.IPDevice)
self.assertEqual(retval.name, 'vxlan0')
self.execute.assert_called_once_with([], 'link',
['add', 'vxlan0', 'type',
'vxlan', 'id', 'vni0', 'group',
'group0', 'dev', 'dev0',
'ttl', 'ttl0', 'tos', 'tos0',
'local', 'local0', 'proxy',
'port', '1', '2'],
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_add_vxlan_invalid_port_length(self):
wrapper = ip_lib.IPWrapper()
self.assertRaises(exceptions.NetworkVxlanPortRangeError,
wrapper.add_vxlan, 'vxlan0', 'vni0', group='group0',
dev='dev0', ttl='ttl0', tos='tos0',
local='local0', proxy=True,
port=('1', '2', '3'))
def test_add_device_to_namespace(self):
dev = mock.Mock()
ip_lib.IPWrapper(namespace='ns').add_device_to_namespace(dev)
dev.assert_has_calls([mock.call.link.set_netns('ns')])
def test_add_device_to_namespace_is_none(self):
dev = mock.Mock()
ip_lib.IPWrapper().add_device_to_namespace(dev)
self.assertEqual(dev.mock_calls, [])
class TestIPDevice(base.BaseTestCase):
def test_eq_same_name(self):
dev1 = ip_lib.IPDevice('tap0')
dev2 = ip_lib.IPDevice('tap0')
self.assertEqual(dev1, dev2)
def test_eq_diff_name(self):
dev1 = ip_lib.IPDevice('tap0')
dev2 = ip_lib.IPDevice('tap1')
self.assertNotEqual(dev1, dev2)
def test_eq_same_namespace(self):
dev1 = ip_lib.IPDevice('tap0', 'ns1')
dev2 = ip_lib.IPDevice('tap0', 'ns1')
self.assertEqual(dev1, dev2)
def test_eq_diff_namespace(self):
dev1 = ip_lib.IPDevice('tap0', namespace='ns1')
dev2 = ip_lib.IPDevice('tap0', namespace='ns2')
self.assertNotEqual(dev1, dev2)
def test_eq_other_is_none(self):
dev1 = ip_lib.IPDevice('tap0', namespace='ns1')
self.assertIsNotNone(dev1)
def test_str(self):
self.assertEqual(str(ip_lib.IPDevice('tap0')), 'tap0')
class TestIPCommandBase(base.BaseTestCase):
def setUp(self):
super(TestIPCommandBase, self).setUp()
self.ip = mock.Mock()
self.ip.namespace = 'namespace'
self.ip_cmd = ip_lib.IpCommandBase(self.ip)
self.ip_cmd.COMMAND = 'foo'
def test_run(self):
self.ip_cmd._run([], ('link', 'show'))
self.ip.assert_has_calls([mock.call._run([], 'foo', ('link', 'show'))])
def test_run_with_options(self):
self.ip_cmd._run(['o'], ('link'))
self.ip.assert_has_calls([mock.call._run(['o'], 'foo', ('link'))])
def test_as_root_namespace_false(self):
self.ip_cmd._as_root([], ('link'))
self.ip.assert_has_calls(
[mock.call._as_root([],
'foo',
('link'),
use_root_namespace=False)])
def test_as_root_namespace_true(self):
self.ip_cmd._as_root([], ('link'), use_root_namespace=True)
self.ip.assert_has_calls(
[mock.call._as_root([],
'foo',
('link'),
use_root_namespace=True)])
def test_as_root_namespace_true_with_options(self):
self.ip_cmd._as_root('o', 'link', use_root_namespace=True)
self.ip.assert_has_calls(
[mock.call._as_root('o',
'foo',
('link'),
use_root_namespace=True)])
class TestIPDeviceCommandBase(base.BaseTestCase):
def setUp(self):
super(TestIPDeviceCommandBase, self).setUp()
self.ip_dev = mock.Mock()
self.ip_dev.name = 'eth0'
self.ip_dev._execute = mock.Mock(return_value='executed')
self.ip_cmd = ip_lib.IpDeviceCommandBase(self.ip_dev)
self.ip_cmd.COMMAND = 'foo'
def test_name_property(self):
self.assertEqual(self.ip_cmd.name, 'eth0')
class TestIPCmdBase(base.BaseTestCase):
def setUp(self):
super(TestIPCmdBase, self).setUp()
self.parent = mock.Mock()
self.parent.name = 'eth0'
def _assert_call(self, options, args):
self.parent.assert_has_calls([
mock.call._run(options, self.command, args)])
def _assert_sudo(self, options, args, use_root_namespace=False):
self.parent.assert_has_calls(
[mock.call._as_root(options, self.command, args,
use_root_namespace=use_root_namespace)])
class TestIpRuleCommand(TestIPCmdBase):
def setUp(self):
super(TestIpRuleCommand, self).setUp()
self.parent._as_root.return_value = ''
self.command = 'rule'
self.rule_cmd = ip_lib.IpRuleCommand(self.parent)
def _test_add_rule(self, ip, table, priority):
ip_version = netaddr.IPNetwork(ip).version
self.rule_cmd.add(ip, table, priority)
self._assert_sudo([ip_version], (['show']))
self._assert_sudo([ip_version], ('add', 'from', ip,
'table', table, 'priority', priority))
def _test_add_rule_exists(self, ip, table, priority, output):
self.parent._as_root.return_value = output
ip_version = netaddr.IPNetwork(ip).version
self.rule_cmd.add(ip, table, priority)
self._assert_sudo([ip_version], (['show']))
def _test_delete_rule(self, ip, table, priority):
ip_version = netaddr.IPNetwork(ip).version
self.rule_cmd.delete(ip, table, priority)
self._assert_sudo([ip_version],
('del', 'table', table,
'priority', priority))
def test_add_rule_v4(self):
self._test_add_rule('192.168.45.100', 2, 100)
def test_add_rule_v4_exists(self):
self._test_add_rule_exists('192.168.45.100', 2, 101, RULE_V4_SAMPLE)
def test_add_rule_v6(self):
self._test_add_rule('2001:db8::1', 3, 200)
def test_add_rule_v6_exists(self):
self._test_add_rule_exists('2001:db8::1', 3, 201, RULE_V6_SAMPLE)
def test_delete_rule_v4(self):
self._test_delete_rule('192.168.45.100', 2, 100)
def test_delete_rule_v6(self):
self._test_delete_rule('2001:db8::1', 3, 200)
class TestIpLinkCommand(TestIPCmdBase):
def setUp(self):
super(TestIpLinkCommand, self).setUp()
self.parent._run.return_value = LINK_SAMPLE[1]
self.command = 'link'
self.link_cmd = ip_lib.IpLinkCommand(self.parent)
def test_set_address(self):
self.link_cmd.set_address('aa:bb:cc:dd:ee:ff')
self._assert_sudo([], ('set', 'eth0', 'address', 'aa:bb:cc:dd:ee:ff'))
def test_set_mtu(self):
self.link_cmd.set_mtu(1500)
self._assert_sudo([], ('set', 'eth0', 'mtu', 1500))
def test_set_up(self):
self.link_cmd.set_up()
self._assert_sudo([], ('set', 'eth0', 'up'))
def test_set_down(self):
self.link_cmd.set_down()
self._assert_sudo([], ('set', 'eth0', 'down'))
def test_set_netns(self):
self.link_cmd.set_netns('foo')
self._assert_sudo([], ('set', 'eth0', 'netns', 'foo'))
self.assertEqual(self.parent.namespace, 'foo')
def test_set_name(self):
self.link_cmd.set_name('tap1')
self._assert_sudo([], ('set', 'eth0', 'name', 'tap1'))
self.assertEqual(self.parent.name, 'tap1')
def test_set_alias(self):
self.link_cmd.set_alias('openvswitch')
self._assert_sudo([], ('set', 'eth0', 'alias', 'openvswitch'))
def test_delete(self):
self.link_cmd.delete()
self._assert_sudo([], ('delete', 'eth0'))
def test_address_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.address, 'cc:dd:ee:ff:ab:cd')
def test_mtu_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.mtu, 1500)
def test_qdisc_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.qdisc, 'mq')
def test_qlen_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.qlen, 1000)
def test_alias_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.alias, 'openvswitch')
def test_state_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.state, 'UP')
def test_settings_property(self):
expected = {'mtu': 1500,
'qlen': 1000,
'state': 'UP',
'qdisc': 'mq',
'brd': 'ff:ff:ff:ff:ff:ff',
'link/ether': 'cc:dd:ee:ff:ab:cd',
'alias': 'openvswitch'}
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.attributes, expected)
self._assert_call(['o'], ('show', 'eth0'))
class TestIpAddrCommand(TestIPCmdBase):
def setUp(self):
super(TestIpAddrCommand, self).setUp()
self.parent.name = 'tap0'
self.command = 'addr'
self.addr_cmd = ip_lib.IpAddrCommand(self.parent)
def test_add_address(self):
self.addr_cmd.add('192.168.45.100/24')
self._assert_sudo([4],
('add', '192.168.45.100/24',
'scope', 'global',
'dev', 'tap0',
'brd', '192.168.45.255'))
def test_add_address_scoped(self):
self.addr_cmd.add('192.168.45.100/24', scope='link')
self._assert_sudo([4],
('add', '192.168.45.100/24',
'scope', 'link',
'dev', 'tap0',
'brd', '192.168.45.255'))
def test_del_address(self):
self.addr_cmd.delete('192.168.45.100/24')
self._assert_sudo([4],
('del', '192.168.45.100/24', 'dev', 'tap0'))
def test_flush(self):
self.addr_cmd.flush(6)
self._assert_sudo([6], ('flush', 'tap0'))
def test_list(self):
expected = [
dict(scope='global',
dynamic=False, cidr='172.16.77.240/24'),
dict(scope='global',
dynamic=True, cidr='2001:470:9:1224:5595:dd51:6ba2:e788/64'),
dict(scope='global',
dynamic=True, cidr='2001:470:9:1224:fd91:272:581e:3a32/64'),
dict(scope='global',
dynamic=True, cidr='2001:470:9:1224:4508:b885:5fb:740b/64'),
dict(scope='global',
dynamic=True, cidr='2001:470:9:1224:dfcc:aaff:feb9:76ce/64'),
dict(scope='link',
dynamic=False, cidr='fe80::dfcc:aaff:feb9:76ce/64')]
test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2]
for test_case in test_cases:
self.parent._run = mock.Mock(return_value=test_case)
self.assertEqual(self.addr_cmd.list(), expected)
self._assert_call([], ('show', 'tap0'))
def test_list_filtered(self):
expected = [
dict(scope='global',
dynamic=False, cidr='172.16.77.240/24')]
test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2]
for test_case in test_cases:
output = '\n'.join(test_case.split('\n')[0:4])
self.parent._run.return_value = output
self.assertEqual(self.addr_cmd.list('global',
filters=['permanent']), expected)
self._assert_call([], ('show', 'tap0', 'permanent', 'scope',
'global'))
class TestIpRouteCommand(TestIPCmdBase):
def setUp(self):
super(TestIpRouteCommand, self).setUp()
self.parent.name = 'eth0'
self.command = 'route'
self.route_cmd = ip_lib.IpRouteCommand(self.parent)
self.ip_version = 4
self.table = 14
self.metric = 100
self.cidr = '192.168.45.100/24'
self.ip = '10.0.0.1'
self.gateway = '192.168.45.100'
self.test_cases = [{'sample': GATEWAY_SAMPLE1,
'expected': {'gateway': '10.35.19.254',
'metric': 100}},
{'sample': GATEWAY_SAMPLE2,
'expected': {'gateway': '10.35.19.254',
'metric': 100}},
{'sample': GATEWAY_SAMPLE3,
'expected': None},
{'sample': GATEWAY_SAMPLE4,
'expected': {'gateway': '10.35.19.254'}},
{'sample': GATEWAY_SAMPLE5,
'expected': {'gateway': '192.168.99.1'}},
{'sample': GATEWAY_SAMPLE6,
'expected': {'gateway': '192.168.99.1',
'metric': 100}}]
def test_add_gateway(self):
self.route_cmd.add_gateway(self.gateway, self.metric, self.table)
self._assert_sudo([self.ip_version],
('replace', 'default',
'via', self.gateway,
'metric', self.metric,
'dev', self.parent.name,
'table', self.table))
def test_del_gateway(self):
self.route_cmd.delete_gateway(self.gateway, table=self.table)
self._assert_sudo([self.ip_version],
('del', 'default',
'via', self.gateway,
'dev', self.parent.name,
'table', self.table))
def test_get_gateway(self):
for test_case in self.test_cases:
self.parent._run = mock.Mock(return_value=test_case['sample'])
self.assertEqual(self.route_cmd.get_gateway(),
test_case['expected'])
def test_pullup_route(self):
# NOTE(brian-haley) Currently we do not have any IPv6-specific usecase
# for pullup_route, hence skipping. Revisit, if required, in future.
if self.ip_version == 6:
return
# interface is not the first in the list - requires
# deleting and creating existing entries
output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE1]
def pullup_side_effect(self, *args):
result = output.pop(0)
return result
self.parent._run = mock.Mock(side_effect=pullup_side_effect)
self.route_cmd.pullup_route('tap1d7888a7-10')
self._assert_sudo([], ('del', '10.0.0.0/24', 'dev', 'qr-23380d11-d2'))
self._assert_sudo([], ('append', '10.0.0.0/24', 'proto', 'kernel',
'src', '10.0.0.1', 'dev', 'qr-23380d11-d2'))
def test_pullup_route_first(self):
# NOTE(brian-haley) Currently we do not have any IPv6-specific usecase
# for pullup_route, hence skipping. Revisit, if required, in future.
if self.ip_version == 6:
return
# interface is first in the list - no changes
output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE2]
def pullup_side_effect(self, *args):
result = output.pop(0)
return result
self.parent._run = mock.Mock(side_effect=pullup_side_effect)
self.route_cmd.pullup_route('tap1d7888a7-10')
# Check two calls - device get and subnet get
self.assertEqual(len(self.parent._run.mock_calls), 2)
def test_add_route(self):
self.route_cmd.add_route(self.cidr, self.ip, self.table)
self._assert_sudo([self.ip_version],
('replace', self.cidr,
'via', self.ip,
'dev', self.parent.name,
'table', self.table))
def test_delete_route(self):
self.route_cmd.delete_route(self.cidr, self.ip, self.table)
self._assert_sudo([self.ip_version],
('del', self.cidr,
'via', self.ip,
'dev', self.parent.name,
'table', self.table))
class TestIPv6IpRouteCommand(TestIpRouteCommand):
def setUp(self):
super(TestIPv6IpRouteCommand, self).setUp()
self.ip_version = 6
self.cidr = '2001:db8::/64'
self.ip = '2001:db8::100'
self.gateway = '2001:db8::1'
self.test_cases = [{'sample': IPv6_GATEWAY_SAMPLE1,
'expected':
{'gateway': '2001:470:9:1224:4508:b885:5fb:740b',
'metric': 100}},
{'sample': IPv6_GATEWAY_SAMPLE2,
'expected':
{'gateway': '2001:470:9:1224:4508:b885:5fb:740b',
'metric': 100}},
{'sample': IPv6_GATEWAY_SAMPLE3,
'expected': None},
{'sample': IPv6_GATEWAY_SAMPLE4,
'expected':
{'gateway': 'fe80::dfcc:aaff:feb9:76ce'}},
{'sample': IPv6_GATEWAY_SAMPLE5,
'expected':
{'gateway': '2001:470:9:1224:4508:b885:5fb:740b',
'metric': 1024}}]
class TestIpNetnsCommand(TestIPCmdBase):
def setUp(self):
super(TestIpNetnsCommand, self).setUp()
self.command = 'netns'
self.netns_cmd = ip_lib.IpNetnsCommand(self.parent)
def test_add_namespace(self):
with mock.patch('neutron.agent.common.utils.execute') as execute:
ns = self.netns_cmd.add('ns')
self._assert_sudo([], ('add', 'ns'), use_root_namespace=True)
self.assertEqual(ns.namespace, 'ns')
execute.assert_called_once_with(
['ip', 'netns', 'exec', 'ns',
'sysctl', '-w', 'net.ipv4.conf.all.promote_secondaries=1'],
run_as_root=True, check_exit_code=True, extra_ok_codes=None,
log_fail_as_error=True)
def test_delete_namespace(self):
with mock.patch('neutron.agent.common.utils.execute'):
self.netns_cmd.delete('ns')
self._assert_sudo([], ('delete', 'ns'), use_root_namespace=True)
def test_namespace_exists_use_helper(self):
self.config(group='AGENT', use_helper_for_ns_read=True)
retval = '\n'.join(NETNS_SAMPLE)
# need another instance to avoid mocking
netns_cmd = ip_lib.IpNetnsCommand(ip_lib.SubProcessBase())
with mock.patch('neutron.agent.common.utils.execute') as execute:
execute.return_value = retval
self.assertTrue(
netns_cmd.exists('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'))
execute.assert_called_once_with(['ip', '-o', 'netns', 'list'],
run_as_root=True,
log_fail_as_error=True)
def test_namespace_doest_not_exist_no_helper(self):
self.config(group='AGENT', use_helper_for_ns_read=False)
retval = '\n'.join(NETNS_SAMPLE)
# need another instance to avoid mocking
netns_cmd = ip_lib.IpNetnsCommand(ip_lib.SubProcessBase())
with mock.patch('neutron.agent.common.utils.execute') as execute:
execute.return_value = retval
self.assertFalse(
netns_cmd.exists('bbbbbbbb-1111-2222-3333-bbbbbbbbbbbb'))
execute.assert_called_once_with(['ip', '-o', 'netns', 'list'],
run_as_root=False,
log_fail_as_error=True)
def test_execute(self):
self.parent.namespace = 'ns'
with mock.patch('neutron.agent.common.utils.execute') as execute:
self.netns_cmd.execute(['ip', 'link', 'list'])
execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns', 'ip',
'link', 'list'],
run_as_root=True,
check_exit_code=True,
extra_ok_codes=None,
log_fail_as_error=True)
def test_execute_env_var_prepend(self):
self.parent.namespace = 'ns'
with mock.patch('neutron.agent.common.utils.execute') as execute:
env = dict(FOO=1, BAR=2)
self.netns_cmd.execute(['ip', 'link', 'list'], env)
execute.assert_called_once_with(
['ip', 'netns', 'exec', 'ns', 'env'] +
['%s=%s' % (k, v) for k, v in env.items()] +
['ip', 'link', 'list'],
run_as_root=True, check_exit_code=True, extra_ok_codes=None,
log_fail_as_error=True)
def test_execute_nosudo_with_no_namespace(self):
with mock.patch('neutron.agent.common.utils.execute') as execute:
self.parent.namespace = None
self.netns_cmd.execute(['test'])
execute.assert_called_once_with(['test'],
check_exit_code=True,
extra_ok_codes=None,
run_as_root=False,
log_fail_as_error=True)
class TestDeviceExists(base.BaseTestCase):
def test_device_exists(self):
with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute:
_execute.return_value = LINK_SAMPLE[1]
self.assertTrue(ip_lib.device_exists('eth0'))
_execute.assert_called_once_with(['o'], 'link', ('show', 'eth0'),
log_fail_as_error=False)
def test_device_does_not_exist(self):
with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute:
_execute.return_value = ''
_execute.side_effect = RuntimeError
self.assertFalse(ip_lib.device_exists('eth0'))
def test_ensure_device_is_ready(self):
ip_lib_mock = mock.Mock()
with mock.patch.object(ip_lib, 'IPDevice', return_value=ip_lib_mock):
self.assertTrue(ip_lib.ensure_device_is_ready("eth0"))
self.assertTrue(ip_lib_mock.link.set_up.called)
ip_lib_mock.reset_mock()
# device doesn't exists
ip_lib_mock.link.set_up.side_effect = RuntimeError
self.assertFalse(ip_lib.ensure_device_is_ready("eth0"))
class TestIpNeighCommand(TestIPCmdBase):
def setUp(self):
super(TestIpNeighCommand, self).setUp()
self.parent.name = 'tap0'
self.command = 'neigh'
self.neigh_cmd = ip_lib.IpNeighCommand(self.parent)
def test_add_entry(self):
self.neigh_cmd.add('192.168.45.100', 'cc:dd:ee:ff:ab:cd')
self._assert_sudo([4],
('replace', '192.168.45.100',
'lladdr', 'cc:dd:ee:ff:ab:cd',
'nud', 'permanent',
'dev', 'tap0'))
def test_delete_entry(self):
self.neigh_cmd.delete('192.168.45.100', 'cc:dd:ee:ff:ab:cd')
self._assert_sudo([4],
('del', '192.168.45.100',
'lladdr', 'cc:dd:ee:ff:ab:cd',
'dev', 'tap0'))
class TestArpPing(TestIPCmdBase):
@mock.patch.object(ip_lib, 'IPWrapper')
@mock.patch('eventlet.spawn_n')
def test_send_ipv4_addr_adv_notif(self, spawn_n, mIPWrapper):
spawn_n.side_effect = lambda f: f()
ARPING_COUNT = 3
address = '20.0.0.1'
config = mock.Mock()
config.send_arp_for_ha = ARPING_COUNT
ip_lib.send_ip_addr_adv_notif(mock.sentinel.ns_name,
mock.sentinel.iface_name,
address,
config)
self.assertTrue(spawn_n.called)
mIPWrapper.assert_called_once_with(namespace=mock.sentinel.ns_name)
ip_wrapper = mIPWrapper(namespace=mock.sentinel.ns_name)
# Just test that arping is called with the right arguments
arping_cmd = ['arping', '-A',
'-I', mock.sentinel.iface_name,
'-c', ARPING_COUNT,
'-w', mock.ANY,
address]
ip_wrapper.netns.execute.assert_any_call(arping_cmd,
check_exit_code=True)
@mock.patch('eventlet.spawn_n')
def test_no_ipv6_addr_notif(self, spawn_n):
ipv6_addr = 'fd00::1'
config = mock.Mock()
config.send_arp_for_ha = 3
ip_lib.send_ip_addr_adv_notif(mock.sentinel.ns_name,
mock.sentinel.iface_name,
ipv6_addr,
config)
self.assertFalse(spawn_n.called)
class TestAddNamespaceToCmd(base.BaseTestCase):
def test_add_namespace_to_cmd_with_namespace(self):
cmd = ['ping', '8.8.8.8']
self.assertEqual(['ip', 'netns', 'exec', 'tmp'] + cmd,
ip_lib.add_namespace_to_cmd(cmd, 'tmp'))
def test_add_namespace_to_cmd_without_namespace(self):
cmd = ['ping', '8.8.8.8']
self.assertEqual(cmd, ip_lib.add_namespace_to_cmd(cmd, None))
| alexandrucoman/vbox-neutron-agent | neutron/tests/unit/agent/linux/test_ip_lib.py | Python | apache-2.0 | 43,884 |
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
cls = get_driver(Provider.AZURE)
driver = cls(subscription_id="subscription-id", key_file="/path/to/azure_cert.pem")
| apache/libcloud | docs/examples/compute/azure/instantiate.py | Python | apache-2.0 | 212 |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import json
import uuid
from keystoneclient.contrib.ec2 import utils as ec2_utils
from oslo_config import cfg
from six.moves import http_client
from testtools import matchers
from keystone import exception
from keystone.tests.unit import test_v3
CONF = cfg.CONF
class CredentialBaseTestCase(test_v3.RestfulTestCase):
def _create_dict_blob_credential(self):
blob = {"access": uuid.uuid4().hex,
"secret": uuid.uuid4().hex}
credential_id = hashlib.sha256(blob['access']).hexdigest()
credential = self.new_credential_ref(
user_id=self.user['id'],
project_id=self.project_id)
credential['id'] = credential_id
# Store the blob as a dict *not* JSON ref bug #1259584
# This means we can test the dict->json workaround, added
# as part of the bugfix for backwards compatibility works.
credential['blob'] = blob
credential['type'] = 'ec2'
# Create direct via the DB API to avoid validation failure
self.credential_api.create_credential(
credential_id,
credential)
expected_blob = json.dumps(blob)
return expected_blob, credential_id
class CredentialTestCase(CredentialBaseTestCase):
"""Test credential CRUD."""
def setUp(self):
super(CredentialTestCase, self).setUp()
self.credential_id = uuid.uuid4().hex
self.credential = self.new_credential_ref(
user_id=self.user['id'],
project_id=self.project_id)
self.credential['id'] = self.credential_id
self.credential_api.create_credential(
self.credential_id,
self.credential)
def test_credential_api_delete_credentials_for_project(self):
self.credential_api.delete_credentials_for_project(self.project_id)
# Test that the credential that we created in .setUp no longer exists
# once we delete all credentials for self.project_id
self.assertRaises(exception.CredentialNotFound,
self.credential_api.get_credential,
credential_id=self.credential_id)
def test_credential_api_delete_credentials_for_user(self):
self.credential_api.delete_credentials_for_user(self.user_id)
# Test that the credential that we created in .setUp no longer exists
# once we delete all credentials for self.user_id
self.assertRaises(exception.CredentialNotFound,
self.credential_api.get_credential,
credential_id=self.credential_id)
def test_list_credentials(self):
"""Call ``GET /credentials``."""
r = self.get('/credentials')
self.assertValidCredentialListResponse(r, ref=self.credential)
def test_list_credentials_filtered_by_user_id(self):
"""Call ``GET /credentials?user_id={user_id}``."""
credential = self.new_credential_ref(
user_id=uuid.uuid4().hex)
self.credential_api.create_credential(
credential['id'], credential)
r = self.get('/credentials?user_id=%s' % self.user['id'])
self.assertValidCredentialListResponse(r, ref=self.credential)
for cred in r.result['credentials']:
self.assertEqual(self.user['id'], cred['user_id'])
def test_list_credentials_filtered_by_type(self):
"""Call ``GET /credentials?type={type}``."""
# The type ec2 was chosen, instead of a random string,
# because the type must be in the list of supported types
ec2_credential = self.new_credential_ref(user_id=uuid.uuid4().hex,
project_id=self.project_id,
cred_type='ec2')
ec2_resp = self.credential_api.create_credential(
ec2_credential['id'], ec2_credential)
# The type cert was chosen for the same reason as ec2
r = self.get('/credentials?type=cert')
# Testing the filter for two different types
self.assertValidCredentialListResponse(r, ref=self.credential)
for cred in r.result['credentials']:
self.assertEqual('cert', cred['type'])
r_ec2 = self.get('/credentials?type=ec2')
self.assertThat(r_ec2.result['credentials'], matchers.HasLength(1))
cred_ec2 = r_ec2.result['credentials'][0]
self.assertValidCredentialListResponse(r_ec2, ref=ec2_resp)
self.assertEqual('ec2', cred_ec2['type'])
self.assertEqual(ec2_credential['id'], cred_ec2['id'])
def test_list_credentials_filtered_by_type_and_user_id(self):
"""Call ``GET /credentials?user_id={user_id}&type={type}``."""
user1_id = uuid.uuid4().hex
user2_id = uuid.uuid4().hex
# Creating credentials for two different users
credential_user1_ec2 = self.new_credential_ref(
user_id=user1_id, cred_type='ec2')
credential_user1_cert = self.new_credential_ref(
user_id=user1_id)
credential_user2_cert = self.new_credential_ref(
user_id=user2_id)
self.credential_api.create_credential(
credential_user1_ec2['id'], credential_user1_ec2)
self.credential_api.create_credential(
credential_user1_cert['id'], credential_user1_cert)
self.credential_api.create_credential(
credential_user2_cert['id'], credential_user2_cert)
r = self.get('/credentials?user_id=%s&type=ec2' % user1_id)
self.assertValidCredentialListResponse(r, ref=credential_user1_ec2)
self.assertThat(r.result['credentials'], matchers.HasLength(1))
cred = r.result['credentials'][0]
self.assertEqual('ec2', cred['type'])
self.assertEqual(user1_id, cred['user_id'])
def test_create_credential(self):
"""Call ``POST /credentials``."""
ref = self.new_credential_ref(user_id=self.user['id'])
r = self.post(
'/credentials',
body={'credential': ref})
self.assertValidCredentialResponse(r, ref)
def test_get_credential(self):
"""Call ``GET /credentials/{credential_id}``."""
r = self.get(
'/credentials/%(credential_id)s' % {
'credential_id': self.credential_id})
self.assertValidCredentialResponse(r, self.credential)
def test_update_credential(self):
"""Call ``PATCH /credentials/{credential_id}``."""
ref = self.new_credential_ref(
user_id=self.user['id'],
project_id=self.project_id)
del ref['id']
r = self.patch(
'/credentials/%(credential_id)s' % {
'credential_id': self.credential_id},
body={'credential': ref})
self.assertValidCredentialResponse(r, ref)
def test_delete_credential(self):
"""Call ``DELETE /credentials/{credential_id}``."""
self.delete(
'/credentials/%(credential_id)s' % {
'credential_id': self.credential_id})
def test_create_ec2_credential(self):
"""Call ``POST /credentials`` for creating ec2 credential."""
ref = self.new_credential_ref(user_id=self.user['id'],
project_id=self.project_id)
blob = {"access": uuid.uuid4().hex,
"secret": uuid.uuid4().hex}
ref['blob'] = json.dumps(blob)
ref['type'] = 'ec2'
r = self.post(
'/credentials',
body={'credential': ref})
self.assertValidCredentialResponse(r, ref)
# Assert credential id is same as hash of access key id for
# ec2 credentials
self.assertEqual(hashlib.sha256(blob['access']).hexdigest(),
r.result['credential']['id'])
# Create second ec2 credential with the same access key id and check
# for conflict.
self.post(
'/credentials',
body={'credential': ref}, expected_status=http_client.CONFLICT)
def test_get_ec2_dict_blob(self):
"""Ensure non-JSON blob data is correctly converted."""
expected_blob, credential_id = self._create_dict_blob_credential()
r = self.get(
'/credentials/%(credential_id)s' % {
'credential_id': credential_id})
self.assertEqual(expected_blob, r.result['credential']['blob'])
def test_list_ec2_dict_blob(self):
"""Ensure non-JSON blob data is correctly converted."""
expected_blob, credential_id = self._create_dict_blob_credential()
list_r = self.get('/credentials')
list_creds = list_r.result['credentials']
list_ids = [r['id'] for r in list_creds]
self.assertIn(credential_id, list_ids)
for r in list_creds:
if r['id'] == credential_id:
self.assertEqual(expected_blob, r['blob'])
def test_create_non_ec2_credential(self):
"""Call ``POST /credentials`` for creating non-ec2 credential."""
ref = self.new_credential_ref(user_id=self.user['id'])
blob = {"access": uuid.uuid4().hex,
"secret": uuid.uuid4().hex}
ref['blob'] = json.dumps(blob)
r = self.post(
'/credentials',
body={'credential': ref})
self.assertValidCredentialResponse(r, ref)
# Assert credential id is not same as hash of access key id for
# non-ec2 credentials
self.assertNotEqual(hashlib.sha256(blob['access']).hexdigest(),
r.result['credential']['id'])
def test_create_ec2_credential_with_missing_project_id(self):
"""Call ``POST /credentials`` for creating ec2
credential with missing project_id.
"""
ref = self.new_credential_ref(user_id=self.user['id'])
blob = {"access": uuid.uuid4().hex,
"secret": uuid.uuid4().hex}
ref['blob'] = json.dumps(blob)
ref['type'] = 'ec2'
# Assert bad request status when missing project_id
self.post(
'/credentials',
body={'credential': ref}, expected_status=http_client.BAD_REQUEST)
def test_create_ec2_credential_with_invalid_blob(self):
"""Call ``POST /credentials`` for creating ec2
credential with invalid blob.
"""
ref = self.new_credential_ref(user_id=self.user['id'],
project_id=self.project_id)
ref['blob'] = '{"abc":"def"d}'
ref['type'] = 'ec2'
# Assert bad request status when request contains invalid blob
response = self.post(
'/credentials',
body={'credential': ref}, expected_status=http_client.BAD_REQUEST)
self.assertValidErrorResponse(response)
def test_create_credential_with_admin_token(self):
# Make sure we can create credential with the static admin token
ref = self.new_credential_ref(user_id=self.user['id'])
r = self.post(
'/credentials',
body={'credential': ref},
token=CONF.admin_token)
self.assertValidCredentialResponse(r, ref)
class TestCredentialTrustScoped(test_v3.RestfulTestCase):
"""Test credential with trust scoped token."""
def setUp(self):
super(TestCredentialTrustScoped, self).setUp()
self.trustee_user = self.new_user_ref(domain_id=self.domain_id)
password = self.trustee_user['password']
self.trustee_user = self.identity_api.create_user(self.trustee_user)
self.trustee_user['password'] = password
self.trustee_user_id = self.trustee_user['id']
def config_overrides(self):
super(TestCredentialTrustScoped, self).config_overrides()
self.config_fixture.config(group='trust', enabled=True)
def test_trust_scoped_ec2_credential(self):
"""Call ``POST /credentials`` for creating ec2 credential."""
# Create the trust
ref = self.new_trust_ref(
trustor_user_id=self.user_id,
trustee_user_id=self.trustee_user_id,
project_id=self.project_id,
impersonation=True,
expires=dict(minutes=1),
role_ids=[self.role_id])
del ref['id']
r = self.post('/OS-TRUST/trusts', body={'trust': ref})
trust = self.assertValidTrustResponse(r)
# Get a trust scoped token
auth_data = self.build_authentication_request(
user_id=self.trustee_user['id'],
password=self.trustee_user['password'],
trust_id=trust['id'])
r = self.v3_create_token(auth_data)
self.assertValidProjectTrustScopedTokenResponse(r, self.user)
trust_id = r.result['token']['OS-TRUST:trust']['id']
token_id = r.headers.get('X-Subject-Token')
# Create the credential with the trust scoped token
ref = self.new_credential_ref(user_id=self.user['id'],
project_id=self.project_id)
blob = {"access": uuid.uuid4().hex,
"secret": uuid.uuid4().hex}
ref['blob'] = json.dumps(blob)
ref['type'] = 'ec2'
r = self.post(
'/credentials',
body={'credential': ref},
token=token_id)
# We expect the response blob to contain the trust_id
ret_ref = ref.copy()
ret_blob = blob.copy()
ret_blob['trust_id'] = trust_id
ret_ref['blob'] = json.dumps(ret_blob)
self.assertValidCredentialResponse(r, ref=ret_ref)
# Assert credential id is same as hash of access key id for
# ec2 credentials
self.assertEqual(hashlib.sha256(blob['access']).hexdigest(),
r.result['credential']['id'])
# Create second ec2 credential with the same access key id and check
# for conflict.
self.post(
'/credentials',
body={'credential': ref},
token=token_id,
expected_status=http_client.CONFLICT)
class TestCredentialEc2(CredentialBaseTestCase):
"""Test v3 credential compatibility with ec2tokens."""
def setUp(self):
super(TestCredentialEc2, self).setUp()
def _validate_signature(self, access, secret):
"""Test signature validation with the access/secret provided."""
signer = ec2_utils.Ec2Signer(secret)
params = {'SignatureMethod': 'HmacSHA256',
'SignatureVersion': '2',
'AWSAccessKeyId': access}
request = {'host': 'foo',
'verb': 'GET',
'path': '/bar',
'params': params}
signature = signer.generate(request)
# Now make a request to validate the signed dummy request via the
# ec2tokens API. This proves the v3 ec2 credentials actually work.
sig_ref = {'access': access,
'signature': signature,
'host': 'foo',
'verb': 'GET',
'path': '/bar',
'params': params}
r = self.post(
'/ec2tokens',
body={'ec2Credentials': sig_ref},
expected_status=http_client.OK)
self.assertValidTokenResponse(r)
def test_ec2_credential_signature_validate(self):
"""Test signature validation with a v3 ec2 credential."""
ref = self.new_credential_ref(
user_id=self.user['id'],
project_id=self.project_id)
blob = {"access": uuid.uuid4().hex,
"secret": uuid.uuid4().hex}
ref['blob'] = json.dumps(blob)
ref['type'] = 'ec2'
r = self.post(
'/credentials',
body={'credential': ref})
self.assertValidCredentialResponse(r, ref)
# Assert credential id is same as hash of access key id
self.assertEqual(hashlib.sha256(blob['access']).hexdigest(),
r.result['credential']['id'])
cred_blob = json.loads(r.result['credential']['blob'])
self.assertEqual(blob, cred_blob)
self._validate_signature(access=cred_blob['access'],
secret=cred_blob['secret'])
def test_ec2_credential_signature_validate_legacy(self):
"""Test signature validation with a legacy v3 ec2 credential."""
cred_json, credential_id = self._create_dict_blob_credential()
cred_blob = json.loads(cred_json)
self._validate_signature(access=cred_blob['access'],
secret=cred_blob['secret'])
def _get_ec2_cred_uri(self):
return '/users/%s/credentials/OS-EC2' % self.user_id
def _get_ec2_cred(self):
uri = self._get_ec2_cred_uri()
r = self.post(uri, body={'tenant_id': self.project_id})
return r.result['credential']
def test_ec2_create_credential(self):
"""Test ec2 credential creation."""
ec2_cred = self._get_ec2_cred()
self.assertEqual(self.user_id, ec2_cred['user_id'])
self.assertEqual(self.project_id, ec2_cred['tenant_id'])
self.assertIsNone(ec2_cred['trust_id'])
self._validate_signature(access=ec2_cred['access'],
secret=ec2_cred['secret'])
uri = '/'.join([self._get_ec2_cred_uri(), ec2_cred['access']])
self.assertThat(ec2_cred['links']['self'],
matchers.EndsWith(uri))
def test_ec2_get_credential(self):
ec2_cred = self._get_ec2_cred()
uri = '/'.join([self._get_ec2_cred_uri(), ec2_cred['access']])
r = self.get(uri)
self.assertDictEqual(ec2_cred, r.result['credential'])
self.assertThat(ec2_cred['links']['self'],
matchers.EndsWith(uri))
def test_ec2_list_credentials(self):
"""Test ec2 credential listing."""
self._get_ec2_cred()
uri = self._get_ec2_cred_uri()
r = self.get(uri)
cred_list = r.result['credentials']
self.assertEqual(1, len(cred_list))
self.assertThat(r.result['links']['self'],
matchers.EndsWith(uri))
def test_ec2_delete_credential(self):
"""Test ec2 credential deletion."""
ec2_cred = self._get_ec2_cred()
uri = '/'.join([self._get_ec2_cred_uri(), ec2_cred['access']])
cred_from_credential_api = (
self.credential_api
.list_credentials_for_user(self.user_id))
self.assertEqual(1, len(cred_from_credential_api))
self.delete(uri)
self.assertRaises(exception.CredentialNotFound,
self.credential_api.get_credential,
cred_from_credential_api[0]['id'])
| ajayaa/keystone | keystone/tests/unit/test_v3_credential.py | Python | apache-2.0 | 19,309 |
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Most of this code originated in sphinx.domains.python and
# sphinx.ext.autodoc and has been only slightly adapted for use in
# subclasses here.
# :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
# :license: BSD, see LICENSE for details.
import re
from sphinx import addnodes
from sphinx.domains.python import _pseudo_parse_arglist
from sphinx.domains.python import PyModulelevel
from sphinx.ext.autodoc import Documenter
from sphinx.ext.autodoc import FunctionDocumenter
from sphinx.locale import _
yaml_sig_re = re.compile(r'yaml:\s*(.*)')
class PyYAMLFunction(PyModulelevel):
def handle_signature(self, sig, signode):
"""Transform a Python signature into RST nodes.
Return (fully qualified name of the thing, classname if any).
If inside a class, the current class name is handled intelligently:
* it is stripped from the displayed name if present
* it is added to the full name (return value) if not present
"""
name_prefix = None
name = sig
arglist = None
retann = None
# determine module and class name (if applicable), as well as full name
modname = self.options.get(
'module', self.env.temp_data.get('py:module'))
classname = self.env.temp_data.get('py:class')
fullname = name
signode['module'] = modname
signode['class'] = classname
signode['fullname'] = fullname
sig_prefix = self.get_signature_prefix(sig)
if sig_prefix:
signode += addnodes.desc_annotation(sig_prefix, sig_prefix)
if name_prefix:
signode += addnodes.desc_addname(name_prefix, name_prefix)
anno = self.options.get('annotation')
signode += addnodes.desc_name(name, name)
if not arglist:
if self.needs_arglist():
# for callables, add an empty parameter list
signode += addnodes.desc_parameterlist()
if retann:
signode += addnodes.desc_returns(retann, retann)
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, name_prefix
_pseudo_parse_arglist(signode, arglist)
if retann:
signode += addnodes.desc_returns(retann, retann)
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, name_prefix
def get_index_text(self, modname, name_cls):
return _('%s (in module %s)') % (name_cls[0], modname)
class YAMLFunctionDocumenter(FunctionDocumenter):
priority = FunctionDocumenter.priority + 10
objtype = 'yamlfunction'
directivetype = 'yamlfunction'
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
if not FunctionDocumenter.can_document_member(member, membername,
isattr, parent):
return False
if member.__doc__ is not None and yaml_sig_re.match(member.__doc__):
return True
return False
def _find_signature(self, encoding=None):
docstrings = Documenter.get_doc(self, encoding, 2)
if len(docstrings) != 1:
return
doclines = docstrings[0]
setattr(self, '__new_doclines', doclines)
if not doclines:
return
# match first line of docstring against signature RE
match = yaml_sig_re.match(doclines[0])
if not match:
return
name = match.group(1)
# ok, now jump over remaining empty lines and set the remaining
# lines as the new doclines
i = 1
while i < len(doclines) and not doclines[i].strip():
i += 1
setattr(self, '__new_doclines', doclines[i:])
return name
def get_doc(self, encoding=None, ignore=1):
lines = getattr(self, '__new_doclines', None)
if lines is not None:
return [lines]
return Documenter.get_doc(self, encoding, ignore)
def format_signature(self):
result = self._find_signature()
self._name = result
return ''
def format_name(self):
return self._name
def setup(app):
app.add_autodocumenter(YAMLFunctionDocumenter)
app.add_directive_to_domain('py', 'yamlfunction', PyYAMLFunction)
| gforcada/jenkins-job-builder | jenkins_jobs/sphinx/yaml.py | Python | apache-2.0 | 4,991 |
from django.shortcuts import render
# Create your views here.
def proindex(request):
return render(request, 'example/probase.html' )
def index(request):
return render(request, 'e_index.html' )
def badges_labels(request):
return render(request, 'badges_labels.html' )
def four(request):
return render(request, '404.html' )
def five(request):
return render(request, '500.html' )
def basic_gallery(request):
return render(request, 'basic_gallery.html' )
def buttons(request):
return render(request, 'buttons.html' )
def calendar(request):
return render(request, 'calendar.html' )
def carousel(request):
return render(request, 'carousel.html' )
def chat_view(request):
return render(request, 'chat_view.html' )
def code_editor(request):
return render(request, 'code_editor.html' )
def contacts(request):
return render(request, 'contacts.html' )
def css_animation(request):
return render(request, 'css_animation.html' )
def draggable_panels(request):
return render(request, 'draggable_panels.html' )
def empty_page(request):
return render(request, 'empty_page.html' )
def faq(request):
return render(request, 'faq.html' )
def file_manager(request):
return render(request, 'file_manager.html' )
def form_advanced(request):
return render(request, 'form_advanced.html' )
def form_avatar(request):
return render(request, 'form_avatar.html' )
def form_basic(request):
return render(request, 'form_basic.html' )
def form_builder(request):
return render(request, 'form_builder.html' )
def form_editors(request):
return render(request, 'form_editors.html' )
def form_file_upload(request):
return render(request, 'form_file_upload.html' )
def form_markdown(request):
return render(request, 'form_markdown.html' )
def form_simditor(request):
return render(request, 'form_simditor.html' )
def form_validate(request):
return render(request, 'form_validate.html' )
def form_webuploader(request):
return render(request, 'form_webuploader.html' )
def form_wizard(request):
return render(request, 'form_wizard.html' )
def forum_main(request):
return render(request, 'forum_main.html' )
def graph_echarts(request):
return render(request, 'graph_echarts.html' )
def graph_flot(request):
return render(request, 'graph_flot.html' )
def graph_morris(request):
return render(request, 'graph_morris.html' )
def graph_peity(request):
return render(request, 'graph_peity.html' )
def graph_rickshaw(request):
return render(request, 'graph_rickshaw.html' )
def graph_sparkline(request):
return render(request, 'graph_sparkline.html' )
def grid_options(request):
return render(request, 'grid_options.html' )
def iconfont(request):
return render(request, 'iconfont.html' )
def icons(request):
return render(request, 'icons.html' )
def index_1(request):
return render(request, 'index_1.html' )
def index_2(request):
return render(request, 'index_2.html' )
def index_3(request):
return render(request, 'index_3.html' )
def index_4(request):
return render(request, 'index_4.html' )
def invoice(request):
return render(request, 'invoice.html' )
def invoice_print(request):
return render(request, 'invoice_print.html' )
def layer(request):
return render(request, 'layer.html' )
def layerdate(request):
return render(request, 'layerdate.html' )
def layouts(request):
return render(request, 'layouts.html' )
def lockscreen(request):
return render(request, 'lockscreen.html' )
def login(request):
return render(request, 'login.html' )
def mailbox(request):
return render(request, 'mailbox.html' )
def mail_compose(request):
return render(request, 'mail_compose.html' )
def mail_detail(request):
return render(request, 'mail_detail.html' )
def modal_window(request):
return render(request, 'modal_window.html' )
def nestable_list(request):
return render(request, 'nestable_list.html' )
def notifications(request):
return render(request, 'notifications.html' )
def pin_board(request):
return render(request, 'pin_board.html' )
def profile(request):
return render(request, 'profile.html' )
def projects(request):
return render(request, 'projects.html' )
def project_detail(request):
return render(request, 'project_detail.html' )
def register(request):
return render(request, 'register.html' )
def search_results(request):
return render(request, 'search_results.html' )
def table_basic(request):
return render(request, 'table_basic.html' )
def table_data_tables(request):
return render(request, 'table_data_tables.html' )
def table_jqgrid(request):
return render(request, 'table_jqgrid.html' )
def tabs_panels(request):
return render(request, 'tabs_panels.html' )
def timeline(request):
return render(request, 'timeline.html' )
def timeline_v2(request):
return render(request, 'timeline_v2.html' )
def toastr_notifications(request):
return render(request, 'toastr_notifications.html' )
def tree_view(request):
return render(request, 'tree_view.html' )
def tree_view_v2(request):
return render(request, 'tree_view_v2.html' )
def typography(request):
return render(request, 'typography.html' )
def validation(request):
return render(request, 'validation.html' )
def webim(request):
return render(request, 'webim.html' )
def widgets(request):
return render(request, 'widgets.html' ) | chenqi123/ipaas | example/views.py | Python | apache-2.0 | 6,131 |
#!/usr/bin/env python
##
## Copyright 2009 Adriana Lukas & Alec Muffett
##
## Licensed under the Apache License, Version 2.0 (the "License"); you
## may not use this file except in compliance with the License. You
## may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
## implied. See the License for the specific language governing
## permissions and limitations under the License.
##
"""docstring goes here""" # :-)
# spec: http://www.flickr.com/groups/api/discuss/72157616713786392/
__b58chars = '123456789abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ'
__b58base = len(__b58chars) # let's not bother hard-coding
def b58encode(value):
"""
encode integer 'value' as a base58 string; returns string
"""
encoded = ''
while value >= __b58base:
div, mod = divmod(value, __b58base)
encoded = __b58chars[mod] + encoded # add to left
value = div
encoded = __b58chars[value] + encoded # most significant remainder
return encoded
def b58decode(encoded):
"""
decodes base58 string 'encoded' to return integer
"""
value = 0
column_multiplier = 1;
for c in encoded[::-1]:
column = __b58chars.index(c)
value += column * column_multiplier
column_multiplier *= __b58base
return value
if __name__ == '__main__':
x = b58encode(12345678)
print x, '26gWw'
print b58decode(x), 12345678
| vavavr00m/pymine | util/base58.py | Python | apache-2.0 | 1,596 |
import contextlib
import gzip
import hashlib
import io
import mmap
from builtins import (
map as imap,
)
def gzip_compress(data, compresslevel=6):
compressed = io.BytesIO()
with gzip.GzipFile(fileobj=compressed,
mode="wb",
compresslevel=compresslevel) as compressor:
compressor.write(data)
return compressed.getvalue()
def hash_file(fn, hn):
h = hashlib.new(hn)
with open(fn, "r") as fh:
with contextlib.closing(mmap.mmap(fh.fileno(), 0, prot=mmap.PROT_READ)) as mm:
h.update(mm)
return h.digest()
def indent(text, spaces):
spaces = " " * int(spaces)
return "\n".join(imap(lambda l: spaces + l, text.splitlines()))
| nanshe-org/nanshe_workflow | nanshe_workflow/util.py | Python | apache-2.0 | 733 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START aiplatform_cancel_data_labeling_job_sample]
from google.cloud import aiplatform
def cancel_data_labeling_job_sample(
project: str,
data_labeling_job_id: str,
location: str = "us-central1",
api_endpoint: str = "us-central1-aiplatform.googleapis.com",
):
# The AI Platform services require regional API endpoints.
client_options = {"api_endpoint": api_endpoint}
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.JobServiceClient(client_options=client_options)
name = client.data_labeling_job_path(
project=project, location=location, data_labeling_job=data_labeling_job_id
)
response = client.cancel_data_labeling_job(name=name)
print("response:", response)
# [END aiplatform_cancel_data_labeling_job_sample]
| sasha-gitg/python-aiplatform | samples/snippets/job_service/cancel_data_labeling_job_sample.py | Python | apache-2.0 | 1,485 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from qpid.client import Client, Closed
from qpid.queue import Empty
from qpid.content import Content
from qpid.testlib import TestBase
class TxTests(TestBase):
"""
Tests for 'methods' on the amqp tx 'class'
"""
def test_commit(self):
"""
Test that commited publishes are delivered and commited acks are not re-delivered
"""
channel = self.channel
queue_a, queue_b, queue_c = self.perform_txn_work(channel, "tx-commit-a", "tx-commit-b", "tx-commit-c")
channel.tx_commit()
#check results
for i in range(1, 5):
msg = queue_c.get(timeout=self.recv_timeout())
self.assertEqual("TxMessage %d" % i, msg.content.body)
msg = queue_b.get(timeout=self.recv_timeout())
self.assertEqual("TxMessage 6", msg.content.body)
msg = queue_a.get(timeout=self.recv_timeout())
self.assertEqual("TxMessage 7", msg.content.body)
for q in [queue_a, queue_b, queue_c]:
try:
extra = q.get(timeout=self.recv_timeout_negative())
self.fail("Got unexpected message: " + extra.content.body)
except Empty: None
#cleanup
channel.basic_ack(delivery_tag=0, multiple=True)
channel.tx_commit()
def test_auto_rollback(self):
"""
Test that a channel closed with an open transaction is effectively rolled back
"""
channel = self.channel
queue_a, queue_b, queue_c = self.perform_txn_work(channel, "tx-autorollback-a", "tx-autorollback-b", "tx-autorollback-c")
for q in [queue_a, queue_b, queue_c]:
try:
extra = q.get(timeout=self.recv_timeout_negative())
self.fail("Got unexpected message: " + extra.content.body)
except Empty: None
channel.tx_rollback()
#check results
for i in range(1, 5):
msg = queue_a.get(timeout=self.recv_timeout())
self.assertEqual("Message %d" % i, msg.content.body)
msg = queue_b.get(timeout=self.recv_timeout())
self.assertEqual("Message 6", msg.content.body)
msg = queue_c.get(timeout=self.recv_timeout())
self.assertEqual("Message 7", msg.content.body)
for q in [queue_a, queue_b, queue_c]:
try:
extra = q.get(timeout=self.recv_timeout_negative())
self.fail("Got unexpected message: " + extra.content.body)
except Empty: None
#cleanup
channel.basic_ack(delivery_tag=0, multiple=True)
channel.tx_commit()
def test_rollback(self):
"""
Test that rolled back publishes are not delivered and rolled back acks are re-delivered
"""
channel = self.channel
queue_a, queue_b, queue_c = self.perform_txn_work(channel, "tx-rollback-a", "tx-rollback-b", "tx-rollback-c")
for q in [queue_a, queue_b, queue_c]:
try:
extra = q.get(timeout=self.recv_timeout_negative())
self.fail("Got unexpected message: " + extra.content.body)
except Empty: None
channel.tx_rollback()
#check results
for i in range(1, 5):
msg = queue_a.get(timeout=self.recv_timeout())
self.assertEqual("Message %d" % i, msg.content.body)
msg = queue_b.get(timeout=self.recv_timeout())
self.assertEqual("Message 6", msg.content.body)
msg = queue_c.get(timeout=self.recv_timeout())
self.assertEqual("Message 7", msg.content.body)
for q in [queue_a, queue_b, queue_c]:
try:
extra = q.get(timeout=self.recv_timeout_negative())
self.fail("Got unexpected message: " + extra.content.body)
except Empty: None
#cleanup
channel.basic_ack(delivery_tag=0, multiple=True)
channel.tx_commit()
def perform_txn_work(self, channel, name_a, name_b, name_c):
"""
Utility method that does some setup and some work under a transaction. Used for testing both
commit and rollback
"""
#setup:
channel.queue_declare(queue=name_a, exclusive=True)
channel.queue_declare(queue=name_b, exclusive=True)
channel.queue_declare(queue=name_c, exclusive=True)
key = "my_key_" + name_b
topic = "my_topic_" + name_c
channel.queue_bind(queue=name_b, exchange="amq.direct", routing_key=key)
channel.queue_bind(queue=name_c, exchange="amq.topic", routing_key=topic)
for i in range(1, 5):
channel.basic_publish(routing_key=name_a, content=Content("Message %d" % i))
channel.basic_publish(routing_key=key, exchange="amq.direct", content=Content("Message 6"))
channel.basic_publish(routing_key=topic, exchange="amq.topic", content=Content("Message 7"))
channel.tx_select()
#consume and ack messages
sub_a = channel.basic_consume(queue=name_a, no_ack=False)
queue_a = self.client.queue(sub_a.consumer_tag)
for i in range(1, 5):
msg = queue_a.get(timeout=self.recv_timeout())
self.assertEqual("Message %d" % i, msg.content.body)
channel.basic_ack(delivery_tag=msg.delivery_tag, multiple=True)
sub_b = channel.basic_consume(queue=name_b, no_ack=False)
queue_b = self.client.queue(sub_b.consumer_tag)
msg = queue_b.get(timeout=self.recv_timeout())
self.assertEqual("Message 6", msg.content.body)
channel.basic_ack(delivery_tag=msg.delivery_tag)
sub_c = channel.basic_consume(queue=name_c, no_ack=False)
queue_c = self.client.queue(sub_c.consumer_tag)
msg = queue_c.get(timeout=self.recv_timeout())
self.assertEqual("Message 7", msg.content.body)
channel.basic_ack(delivery_tag=msg.delivery_tag)
#publish messages
for i in range(1, 5):
channel.basic_publish(routing_key=topic, exchange="amq.topic", content=Content("TxMessage %d" % i))
channel.basic_publish(routing_key=key, exchange="amq.direct", content=Content("TxMessage 6"))
channel.basic_publish(routing_key=name_a, content=Content("TxMessage 7"))
return queue_a, queue_b, queue_c
def test_commit_overlapping_acks(self):
"""
Test that logically 'overlapping' acks do not cause errors on commit
"""
channel = self.channel
channel.queue_declare(queue="commit-overlapping", exclusive=True)
for i in range(1, 10):
channel.basic_publish(routing_key="commit-overlapping", content=Content("Message %d" % i))
channel.tx_select()
sub = channel.basic_consume(queue="commit-overlapping", no_ack=False)
queue = self.client.queue(sub.consumer_tag)
for i in range(1, 10):
msg = queue.get(timeout=self.recv_timeout())
self.assertEqual("Message %d" % i, msg.content.body)
if i in [3, 6, 10]:
channel.basic_ack(delivery_tag=msg.delivery_tag)
channel.tx_commit()
#check all have been acked:
try:
extra = queue.get(timeout=self.recv_timeout_negative())
self.fail("Got unexpected message: " + extra.content.body)
except Empty: None
| irinabov/debian-qpid-python | qpid_tests/broker_0_8/tx.py | Python | apache-2.0 | 8,155 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for running legacy optimizer code with DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python.single_loss_example import batchnorm_example
from tensorflow.contrib.distribute.python.single_loss_example import minimize_loss_example
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import reduce_util
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.layers import core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.ops.losses import losses_impl
class MinimizeLossStepTest(test.TestCase, parameterized.TestCase):
def _get_iterator(self, ds):
if context.executing_eagerly():
iterator = ds.make_one_shot_iterator()
else:
iterator = ds.make_initializable_iterator()
self.evaluate(iterator.initializer)
return iterator
@combinations.generate(
combinations.times(
combinations.distributions_and_v1_optimizers(),
combinations.combine(mode=["graph"], use_callable_loss=[True, False])
+ combinations.combine(mode=["eager"], use_callable_loss=[True])) +
combinations.combine(
distribution=[combinations.tpu_strategy],
optimizer_fn=combinations.optimizers_v1,
mode=["graph"],
use_callable_loss=[True, False]))
def testTrainNetwork(self, distribution, optimizer_fn, use_callable_loss):
with distribution.scope():
model_fn, dataset_fn, layer = minimize_loss_example(
optimizer_fn, use_bias=True, use_callable_loss=use_callable_loss)
def step_fn(ctx, inputs):
del ctx # Unused
return distribution.group(
distribution.call_for_each_replica(model_fn, args=(inputs,)))
iterator = self._get_iterator(distribution.distribute_dataset(dataset_fn))
def run_step():
return distribution.run_steps_on_dataset(
step_fn, iterator, iterations=2).run_op
self.evaluate(distribution.initialize())
if not context.executing_eagerly():
with self.cached_session() as sess:
run_step = sess.make_callable(run_step())
self.evaluate(variables_lib.global_variables_initializer())
weights, biases = [], []
for _ in range(5):
run_step()
weights.append(self.evaluate(layer.kernel))
biases.append(self.evaluate(layer.bias))
self.evaluate(distribution.finalize())
error = abs(numpy.add(numpy.squeeze(weights), numpy.squeeze(biases)) - 1)
is_not_increasing = all(y <= x for x, y in zip(error, error[1:]))
self.assertTrue(is_not_increasing)
@combinations.generate(
combinations.times(
combinations.distributions_and_v1_optimizers(),
combinations.combine(mode=["graph"], use_callable_loss=[True, False])
+ combinations.combine(mode=["eager"], use_callable_loss=[True])))
def testTrainNetworkByCallForEachReplica(self, distribution, optimizer_fn,
use_callable_loss):
with distribution.scope():
model_fn, dataset_fn, layer = minimize_loss_example(
optimizer_fn, use_bias=True, use_callable_loss=use_callable_loss)
iterator = self._get_iterator(distribution.distribute_dataset(dataset_fn))
def run_step():
return distribution.group(
distribution.call_for_each_replica(
model_fn, args=(iterator.get_next(),)))
if not context.executing_eagerly():
with self.cached_session() as sess:
run_step = sess.make_callable(run_step())
self.evaluate(variables_lib.global_variables_initializer())
weights, biases = [], []
for _ in range(10):
run_step()
weights.append(self.evaluate(layer.kernel))
biases.append(self.evaluate(layer.bias))
error = abs(numpy.add(numpy.squeeze(weights), numpy.squeeze(biases)) - 1)
is_not_increasing = all(y <= x for x, y in zip(error, error[1:]))
self.assertTrue(is_not_increasing)
@combinations.generate(
combinations.times(
combinations.distributions_and_v1_optimizers() +
combinations.distributions_and_v2_optimizers(),
combinations.combine(mode=["graph", "eager"])) +
combinations.combine(
distribution=[combinations.tpu_strategy],
optimizer_fn=combinations.optimizers_v1+combinations.optimizers_v2,
mode=["graph"]))
def testOptimizerInsideModelFn(self, distribution, optimizer_fn):
created_variables = []
trainable_variables = []
def appending_creator(next_creator, *args, **kwargs):
v = next_creator(*args, **kwargs)
created_variables.append(v.name)
if "trainable" in kwargs and kwargs["trainable"]:
trainable_variables.append(v.name)
return v
# Creator scope needs to be set before it's used inside
# `distribution.scope`.
with variable_scope.variable_creator_scope(
appending_creator), distribution.scope():
model_fn, dataset_fn, layer = minimize_loss_example(
optimizer_fn,
use_bias=True,
use_callable_loss=True,
create_optimizer_inside_model_fn=True)
def step_fn(ctx, inputs):
del ctx # Unused
return distribution.group(
distribution.call_for_each_replica(model_fn, args=(inputs,)))
iterator = self._get_iterator(distribution.distribute_dataset(dataset_fn))
def run_step():
return distribution.run_steps_on_dataset(
step_fn, iterator, iterations=1).run_op
self.evaluate(distribution.initialize())
if not context.executing_eagerly():
with self.cached_session() as sess:
run_step = sess.make_callable(run_step())
self.evaluate(variables_lib.global_variables_initializer())
run_step()
self.evaluate(distribution.finalize())
def get_expected_variables(optimizer_fn, num_parameter_devices):
variables_map = {
"GradientDescent": ["dense/kernel", "dense/bias"],
"Adagrad": [
"dense/kernel/Adagrad", "dense/kernel",
"dense/bias/Adagrad", "dense/bias"
]
}
variables = variables_map[optimizer_fn().get_name()]
variables.extend([
v + "/replica_{}".format(replica)
for v in variables
for replica in range(1, num_parameter_devices)
])
return set([v + ":0" for v in variables])
self.assertEqual(
get_expected_variables(optimizer_fn,
len(distribution.parameter_devices)),
set(created_variables))
@combinations.generate(
combinations.times(
combinations.combine(momentum=[0.8, 0.9, 0.99], renorm=[False, True]),
combinations.times(
combinations.distributions_and_v1_optimizers(),
combinations.combine(
mode=["graph", "eager"],
# TODO(isaprykin): Allow False here. Currently subsequent
# replicas will re-execute UPDATE_OPS of previous replicas.
update_ops_in_cross_replica_mode=[True])) +
combinations.combine(
distribution=[combinations.tpu_strategy],
optimizer_fn=combinations.optimizers_v1,
mode=["graph"],
update_ops_in_cross_replica_mode=[False])))
def testTrainNetworkWithBatchNorm(self, distribution, optimizer_fn, momentum,
renorm, update_ops_in_cross_replica_mode):
"""Verifies that moving mean updates are reduced across replicas."""
with distribution.scope():
num_replicas = distribution.num_replicas_in_sync
model_fn, dataset_fn, batchnorm = batchnorm_example(
optimizer_fn,
batch_per_epoch=num_replicas,
momentum=momentum,
renorm=renorm,
update_ops_in_replica_mode=not update_ops_in_cross_replica_mode)
def step_fn(ctx, inputs):
del ctx # Unused
fetches = distribution.unwrap(
distribution.call_for_each_replica(model_fn, args=(inputs,)))
if update_ops_in_cross_replica_mode:
fetches += tuple(ops.get_collection(ops.GraphKeys.UPDATE_OPS))
return control_flow_ops.group(fetches)
iterator = self._get_iterator(distribution.distribute_dataset(dataset_fn))
def run_step():
return distribution.run_steps_on_dataset(
step_fn, iterator, iterations=1).run_op
self.evaluate(distribution.initialize())
if not context.executing_eagerly():
with self.cached_session() as sess:
run_step = sess.make_callable(run_step())
self.evaluate(variables_lib.global_variables_initializer())
expected_moving_means = [0.] * 8
def averaged_batch_mean(i):
# Each batch has shape [16, 8] where the ith element in jth list is
# (8 * j + i + replica_id * 100). So the batch mean in each replica is
# (60 + i + replica_id * 100). So here comes its batch mean over all
# replicas:
return 60. + i + (num_replicas - 1.) / 2. * 100.
for _ in range(10):
run_step()
moving_means = self.evaluate(batchnorm.moving_mean)
# We make sure that the moving_mean is updated as if the sample mean is
# calculated over all replicas.
for i, expected_moving_mean in enumerate(expected_moving_means):
expected_moving_means[i] -= ((
expected_moving_mean - averaged_batch_mean(i)) * (1.0 - momentum))
self.assertNear(expected_moving_means[i], moving_means[i], 0.0001)
self.evaluate(distribution.finalize())
@combinations.generate(
combinations.times(
combinations.combine(
optimizer_fn=[
combinations.gradient_descent_optimizer_v1_fn,
combinations.gradient_descent_optimizer_v2_fn
],
loss_reduction=[
losses_impl.Reduction.SUM, losses_impl.Reduction.MEAN,
losses_impl.Reduction.SUM_OVER_BATCH_SIZE,
losses_impl.Reduction.SUM_OVER_NONZERO_WEIGHTS
]),
combinations.times(
combinations.combine(
distribution=[
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus
]),
combinations.combine(
mode=["graph"], use_callable_loss=[True, False]) +
combinations.combine(mode=["eager"], use_callable_loss=[True])) +
combinations.combine(
distribution=[combinations.tpu_strategy],
mode=["graph"],
use_callable_loss=[True, False])))
def testMeanVsSum(self, distribution, optimizer_fn, loss_reduction,
use_callable_loss):
with distribution.scope():
all_vars = []
def model_fn(inputs):
x, y = inputs
def loss_fn():
# Use fixed initialization to make the steps deterministic.
w = variable_scope.get_variable("w", initializer=[[2.]])
all_vars.append(w)
predict = math_ops.matmul(x, w)
return losses_impl.mean_squared_error(
y, predict, reduction=loss_reduction)
optimizer = optimizer_fn() # GradientDescent with 0.2 learning rate
if use_callable_loss:
return optimizer.minimize(loss_fn)
else:
return optimizer.minimize(loss_fn())
def dataset_fn():
features = dataset_ops.Dataset.from_tensors([[2.], [7.]])
labels = dataset_ops.Dataset.from_tensors([[6.], [21.]])
return dataset_ops.Dataset.zip((features, labels)).repeat()
def step_fn(ctx, inputs):
del ctx # Unused
return distribution.group(
distribution.call_for_each_replica(model_fn, args=(inputs,)))
iterator = self._get_iterator(distribution.distribute_dataset(dataset_fn))
def run_step():
return distribution.run_steps_on_dataset(
step_fn, iterator, iterations=1).run_op
self.evaluate(distribution.initialize())
if not context.executing_eagerly():
with self.cached_session() as sess:
run_step = sess.make_callable(run_step())
self.evaluate(variables_lib.global_variables_initializer())
run_step()
v = all_vars[0]
self.assertTrue(all(v is vi for vi in all_vars[1:]))
weight = numpy.squeeze(self.evaluate(v))
# Our model is:
# predict = x * w
# loss = (predict - y)^2
# dloss/dpredict = 2*(predict - y)
# dloss/dw = 2 * x^T @ (predict - y)
# For our batch size of 2, assuming sum loss reduction:
# x = [2, 7]
# y = [6, 21]
# w_initial = 2
# predict = [4, 14]
# predict - y = [-2, -7]
# dloss/dw = 2 <[2, 7], [-2, -7]> = - 2(4 + 49) = -106
# So unreplicated the update to w with lr=0.2 is -0.2 * -106 = 21.2
# with sum loss reduction, or 10.6 with mean.
if loss_reduction == losses_impl.Reduction.SUM:
# Note that the "distribution.num_replicas_in_sync" factor will go away
# once we split the input across replicas, instead of pulling a complete
# batch of input per replica.
self.assertNear(weight, 2 + 21.2 * distribution.num_replicas_in_sync,
0.0001)
else:
# One of the mean loss reductions.
self.assertNear(weight, 2 + 10.6, 0.0001)
self.evaluate(distribution.finalize())
@combinations.generate(
combinations.times(
combinations.distributions_and_v1_optimizers(),
combinations.combine(mode=["graph", "eager"]),
combinations.combine(is_tpu=[False])) +
combinations.combine(
distribution=[combinations.tpu_strategy],
optimizer_fn=combinations.optimizers_v1,
mode=["graph"],
is_tpu=[True]))
def testRunStepsWithOutputContext(self, distribution, optimizer_fn, is_tpu):
with distribution.scope():
def dataset_fn():
dataset = dataset_ops.Dataset.from_tensors([[1.]]).repeat()
# TODO(priyag): batch with drop_remainder=True causes shapes to be
# fully defined for TPU. Remove this when XLA supports dynamic shapes.
return dataset.batch(batch_size=1, drop_remainder=True)
optimizer = optimizer_fn()
layer = core.Dense(1, use_bias=True)
key1 = "foo"
value1 = "bar"
def model_fn(output_context, x):
"""A very simple model written by the user."""
def loss_fn():
y = array_ops.reshape(layer(x), []) - constant_op.constant(1.)
return y * y
train_op = optimizer.minimize(loss_fn)
loss = loss_fn()
output_context.set_last_step_output(
name="replica_loss_reduced",
output=loss,
reduce_op=reduce_util.ReduceOp.MEAN)
output_context.set_non_tensor_output(key1, value1)
return (train_op, loss)
def step_fn(output_context, inputs):
(train_op, loss) = distribution.call_for_each_replica(
model_fn, args=(output_context, inputs))
output_context.set_last_step_output(
name="cross_replica_loss_reduced",
output=loss,
reduce_op=reduce_util.ReduceOp.MEAN)
output_context.set_last_step_output(
name="cross_replica_loss_not_reduced",
output=loss)
return distribution.group(train_op)
iterator = self._get_iterator(distribution.distribute_dataset(dataset_fn))
def run_step():
initial_loss = lambda: constant_op.constant(1e7)
# Initial values corresponding to reduced losses are just single
# tensors. But for non reduced losses, we need to have initial
# values that are of the same structure as non reduced losses. In
# MirroredStrategy, this will be a list of losses, in TPUStrategy
# it will be single tensor. Using `broadcast` followed by `unwrap`
# gives us the desired initial value structure.
initial_loop_values = {
"replica_loss_reduced": initial_loss(),
"cross_replica_loss_reduced": initial_loss(),
"cross_replica_loss_not_reduced":
distribution.unwrap(distribution.broadcast(initial_loss()))
}
ctx = distribution.run_steps_on_dataset(
step_fn, iterator, iterations=2,
initial_loop_values=initial_loop_values)
self.assertEqual({key1: (value1,)}, ctx.non_tensor_outputs)
self._verify_loss_output(
initial_loss(),
loss_output=ctx.last_step_outputs["replica_loss_reduced"],
reduced=True, distribution=distribution)
self._verify_loss_output(
initial_loss(),
loss_output=ctx.last_step_outputs["cross_replica_loss_reduced"],
reduced=True, distribution=distribution)
self._verify_loss_output(
initial_loss(),
loss_output=ctx.last_step_outputs["cross_replica_loss_not_reduced"],
reduced=False, distribution=distribution)
return (ctx.run_op, ctx.last_step_outputs["replica_loss_reduced"])
self.evaluate(distribution.initialize())
if not context.executing_eagerly():
with self.cached_session() as sess:
run_step = sess.make_callable(run_step())
self.evaluate(variables_lib.global_variables_initializer())
weights, biases, losses = [], [], []
for _ in range(5):
_, loss = run_step()
losses.append(loss)
weights.append(self.evaluate(layer.kernel))
biases.append(self.evaluate(layer.bias))
self.evaluate(distribution.finalize())
loss_is_not_increasing = all(y <= x for x, y in zip(losses, losses[1:]))
self.assertTrue(loss_is_not_increasing)
error = abs(
numpy.add(numpy.squeeze(weights), numpy.squeeze(biases)) - 1)
error_is_not_increasing = all(y <= x for x, y in zip(error, error[1:]))
self.assertTrue(error_is_not_increasing)
def _verify_loss_output(self, initial_loss, loss_output, reduced,
distribution):
if not reduced:
self.assertLen(distribution.unwrap(loss_output),
distribution.num_replicas_in_sync)
loss_tensor = distribution.reduce(reduce_util.ReduceOp.MEAN, loss_output)
else:
unwrapped_output = distribution.unwrap(loss_output)
self.assertLen(unwrapped_output, 1)
loss_tensor = unwrapped_output[0]
self.assertEqual(initial_loss.dtype, loss_tensor.dtype)
self.assertEqual(initial_loss.shape, loss_tensor.shape)
if __name__ == "__main__":
test.main()
| hfp/tensorflow-xsmm | tensorflow/contrib/distribute/python/minimize_loss_test.py | Python | apache-2.0 | 20,274 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for remote procedure calls using carrot
"""
from nova import log as logging
from nova.rpc import impl_carrot
from nova.tests.rpc import common
LOG = logging.getLogger('nova.tests.rpc')
class RpcCarrotTestCase(common._BaseRpcTestCase):
def setUp(self):
self.rpc = impl_carrot
super(RpcCarrotTestCase, self).setUp()
def tearDown(self):
super(RpcCarrotTestCase, self).tearDown()
def test_connectionpool_single(self):
"""Test that ConnectionPool recycles a single connection."""
conn1 = self.rpc.ConnectionPool.get()
self.rpc.ConnectionPool.put(conn1)
conn2 = self.rpc.ConnectionPool.get()
self.rpc.ConnectionPool.put(conn2)
self.assertEqual(conn1, conn2)
| rcbops/nova-buildpackage | nova/tests/rpc/test_carrot.py | Python | apache-2.0 | 1,534 |
#!/usr/bin/python3
# Generate .js files defining Blockly core and language messages.
#
# Copyright 2013 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import codecs
import os
import re
import sys
from common import read_json_file
_NEWLINE_PATTERN = re.compile('[\n\r]')
def string_is_ascii(s):
try:
# This approach is better for compatibility
return all(ord(c) < 128 for c in s)
except TypeError:
return False
def load_constants(filename):
"""Read in constants file, which must be output in every language."""
constant_defs = read_json_file(filename)
constants_text = '\n'
for key in constant_defs:
value = constant_defs[key]
value = value.replace('"', '\\"')
constants_text += u'\nBlockly.Msg["{0}"] = \"{1}\";'.format(
key, value)
return constants_text
def main():
"""Generate .js files defining Blockly core and language messages."""
# Process command-line arguments.
parser = argparse.ArgumentParser(description='Convert JSON files to JS.')
parser.add_argument('--source_lang', default='en',
help='ISO 639-1 source language code')
parser.add_argument('--source_lang_file',
default=os.path.join('json', 'en.json'),
help='Path to .json file for source language')
parser.add_argument('--source_synonym_file',
default=os.path.join('json', 'synonyms.json'),
help='Path to .json file with synonym definitions')
parser.add_argument('--source_constants_file',
default=os.path.join('json', 'constants.json'),
help='Path to .json file with constant definitions')
parser.add_argument('--output_dir', default='js/',
help='relative directory for output files')
parser.add_argument('--key_file', default='keys.json',
help='relative path to input keys file')
parser.add_argument('--quiet', action='store_true', default=False,
help='do not write anything to standard output')
parser.add_argument('files', nargs='+', help='input files')
args = parser.parse_args()
if not args.output_dir.endswith(os.path.sep):
args.output_dir += os.path.sep
# Read in source language .json file, which provides any values missing
# in target languages' .json files.
source_defs = read_json_file(os.path.join(os.curdir, args.source_lang_file))
# Make sure the source file doesn't contain a newline or carriage return.
for key, value in source_defs.items():
if _NEWLINE_PATTERN.search(value):
print('ERROR: definition of {0} in {1} contained a newline character.'.
format(key, args.source_lang_file))
sys.exit(1)
sorted_keys = sorted(source_defs.keys())
# Read in synonyms file, which must be output in every language.
synonym_defs = read_json_file(os.path.join(
os.curdir, args.source_synonym_file))
# synonym_defs is also being sorted to ensure the same order is kept
synonym_text = '\n'.join([u'Blockly.Msg["{0}"] = Blockly.Msg["{1}"];'
.format(key, synonym_defs[key]) for key in sorted(synonym_defs)])
# Read in constants file, which must be output in every language.
constants_text = load_constants(os.path.join(os.curdir, args.source_constants_file))
# Create each output file.
for arg_file in args.files:
(_, filename) = os.path.split(arg_file)
target_lang = filename[:filename.index('.')]
if target_lang not in ('qqq', 'keys', 'synonyms', 'constants'):
target_defs = read_json_file(os.path.join(os.curdir, arg_file))
# Verify that keys are 'ascii'
bad_keys = [key for key in target_defs if not string_is_ascii(key)]
if bad_keys:
print(u'These keys in {0} contain non ascii characters: {1}'.format(
filename, ', '.join(bad_keys)))
# If there's a '\n' or '\r', remove it and print a warning.
for key, value in target_defs.items():
if _NEWLINE_PATTERN.search(value):
print(u'WARNING: definition of {0} in {1} contained '
'a newline character.'.
format(key, arg_file))
target_defs[key] = _NEWLINE_PATTERN.sub(' ', value)
# Output file.
outname = os.path.join(os.curdir, args.output_dir, target_lang + '.js')
with codecs.open(outname, 'w', 'utf-8') as outfile:
outfile.write(
"""// This file was automatically generated. Do not modify.
'use strict';
""".format(target_lang.replace('-', '.')))
# For each key in the source language file, output the target value
# if present; otherwise, output the source language value with a
# warning comment.
for key in sorted_keys:
if key in target_defs:
value = target_defs[key]
comment = ''
del target_defs[key]
else:
value = source_defs[key]
comment = ' // untranslated'
value = value.replace('"', '\\"')
outfile.write(u'Blockly.Msg["{0}"] = "{1}";{2}\n'
.format(key, value, comment))
# Announce any keys defined only for target language.
if target_defs:
extra_keys = [key for key in target_defs if key not in synonym_defs]
synonym_keys = [key for key in target_defs if key in synonym_defs]
if not args.quiet:
if extra_keys:
print(u'These extra keys appeared in {0}: {1}'.format(
filename, ', '.join(extra_keys)))
if synonym_keys:
print(u'These synonym keys appeared in {0}: {1}'.format(
filename, ', '.join(synonym_keys)))
outfile.write(synonym_text)
outfile.write(constants_text)
if not args.quiet:
print('Created {0}'.format(outname))
if __name__ == '__main__':
main()
| rachel-fenichel/blockly | scripts/i18n/create_messages.py | Python | apache-2.0 | 6,375 |
from setuptools import setup
import re
def read(filename):
with open(filename) as f:
return f.read()
__version__ = re.search(r'^__version__ = ([\'"])(?P<version>.*)\1$',
read('numtraits.py'), re.M).groupdict()['version']
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError):
long_description = read('README.md')
setup(
version=__version__,
url="https://github.com/astrofrog/numtraits",
name="numtraits",
description='Numerical traits for Python objects',
long_description=long_description,
py_modules=['numtraits'],
license='BSD',
author='Thomas Robitaille',
author_email='thomas.robitaille@gmail.com',
install_requires=['numpy','traitlets']
)
| astrofrog/numtraits | setup.py | Python | bsd-2-clause | 790 |
# -*- coding: utf-8 -*-
# pylint: disable=no-self-use, line-too-long
from __future__ import absolute_import, print_function, with_statement
import sys
import unittest
from mock import Mock, patch
from nose.tools import * # pylint: disable=wildcard-import, unused-wildcard-import
import six
from six.moves import range # pylint: disable=redefined-builtin
from six.moves import zip # pylint: disable=redefined-builtin
from behave.model_core import FileLocation
from behave.model import Feature, Scenario, ScenarioOutline, Step
from behave.model import Table, Row
from behave.matchers import NoMatch
from behave.configuration import Configuration
from behave.compat.collections import OrderedDict
from behave import step_registry
# -- CONVENIENCE-ALIAS:
_text = six.text_type
class TestFeatureRun(unittest.TestCase):
# pylint: disable=invalid-name
def setUp(self):
self.runner = Mock()
self.runner.feature.tags = []
self.config = self.runner.config = Mock()
self.context = self.runner.context = Mock()
self.formatters = self.runner.formatters = [Mock()]
self.run_hook = self.runner.run_hook = Mock()
def test_formatter_feature_called(self):
feature = Feature('foo.feature', 1, u'Feature', u'foo',
background=Mock())
feature.run(self.runner)
self.formatters[0].feature.assert_called_with(feature)
def test_formatter_background_called_when_feature_has_background(self):
feature = Feature('foo.feature', 1, u'Feature', u'foo',
background=Mock())
feature.run(self.runner)
self.formatters[0].background.assert_called_with(feature.background)
def test_formatter_background_not_called_when_feature_has_no_background(self):
feature = Feature('foo.feature', 1, u'Feature', u'foo')
feature.run(self.runner)
assert not self.formatters[0].background.called
def test_run_runs_scenarios(self):
scenarios = [Mock(), Mock()]
for scenario in scenarios:
scenario.tags = []
scenario.run.return_value = False
self.config.tags.check.return_value = True # pylint: disable=no-member
self.config.name = []
feature = Feature('foo.feature', 1, u'Feature', u'foo',
scenarios=scenarios)
feature.run(self.runner)
for scenario in scenarios:
scenario.run.assert_called_with(self.runner)
def test_run_runs_named_scenarios(self):
scenarios = [Mock(Scenario), Mock(Scenario)]
scenarios[0].name = 'first scenario'
scenarios[1].name = 'second scenario'
scenarios[0].tags = []
scenarios[1].tags = []
# -- FAKE-CHECK:
scenarios[0].should_run_with_name_select.return_value = True
scenarios[1].should_run_with_name_select.return_value = False
for scenario in scenarios:
scenario.run.return_value = False
self.config.tags.check.return_value = True # pylint: disable=no-member
self.config.name = ['first', 'third']
self.config.name_re = Configuration.build_name_re(self.config.name)
feature = Feature('foo.feature', 1, u'Feature', u'foo',
scenarios=scenarios)
feature.run(self.runner)
scenarios[0].run.assert_called_with(self.runner)
assert not scenarios[1].run.called
scenarios[0].should_run_with_name_select.assert_called_with(self.config)
scenarios[1].should_run_with_name_select.assert_called_with(self.config)
def test_run_runs_named_scenarios_with_regexp(self):
scenarios = [Mock(), Mock()]
scenarios[0].name = 'first scenario'
scenarios[1].name = 'second scenario'
scenarios[0].tags = []
scenarios[1].tags = []
# -- FAKE-CHECK:
scenarios[0].should_run_with_name_select.return_value = False
scenarios[1].should_run_with_name_select.return_value = True
for scenario in scenarios:
scenario.run.return_value = False
self.config.tags.check.return_value = True # pylint: disable=no-member
self.config.name = ['third .*', 'second .*']
self.config.name_re = Configuration.build_name_re(self.config.name)
feature = Feature('foo.feature', 1, u'Feature', u'foo',
scenarios=scenarios)
feature.run(self.runner)
assert not scenarios[0].run.called
scenarios[1].run.assert_called_with(self.runner)
scenarios[0].should_run_with_name_select.assert_called_with(self.config)
scenarios[1].should_run_with_name_select.assert_called_with(self.config)
def test_feature_hooks_not_run_if_feature_not_being_run(self):
self.config.tags.check.return_value = False # pylint: disable=no-member
feature = Feature('foo.feature', 1, u'Feature', u'foo')
feature.run(self.runner)
assert not self.run_hook.called
class TestScenarioRun(unittest.TestCase):
# pylint: disable=invalid-name
def setUp(self):
self.runner = Mock()
self.runner.feature.tags = []
self.config = self.runner.config = Mock()
self.config.dry_run = False
self.context = self.runner.context = Mock()
self.formatters = self.runner.formatters = [Mock()]
self.run_hook = self.runner.run_hook = Mock()
def test_run_invokes_formatter_scenario_and_steps_correctly(self):
self.config.stdout_capture = False
self.config.log_capture = False
self.config.tags.check.return_value = True # pylint: disable=no-member
steps = [Mock(), Mock()]
scenario = Scenario('foo.feature', 17, u'Scenario', u'foo',
steps=steps)
scenario.run(self.runner)
self.formatters[0].scenario.assert_called_with(scenario)
for step in steps:
step.run.assert_called_with(self.runner)
if sys.version_info[0] == 3:
stringio_target = 'io.StringIO'
else:
stringio_target = 'StringIO.StringIO'
def test_handles_stdout_and_log_capture(self):
self.config.stdout_capture = True
self.config.log_capture = True
self.config.tags.check.return_value = True # pylint: disable=no-member
steps = [Mock(), Mock()]
scenario = Scenario('foo.feature', 17, u'Scenario', u'foo',
steps=steps)
scenario.run(self.runner)
self.runner.setup_capture.assert_called_with()
self.runner.teardown_capture.assert_called_with()
def test_failed_step_causes_remaining_steps_to_be_skipped(self):
self.config.stdout_capture = False
self.config.log_capture = False
self.config.tags.check.return_value = True # pylint: disable=no-member
steps = [Mock(), Mock()]
scenario = Scenario('foo.feature', 17, u'Scenario', u'foo',
steps=steps)
steps[0].run.return_value = False
steps[1].step_type = "when"
steps[1].name = "step1"
def step1_function(context): # pylint: disable=unused-argument
pass
my_step_registry = step_registry.StepRegistry()
my_step_registry.add_step_definition("when", "step1", step1_function)
with patch("behave.step_registry.registry", my_step_registry):
assert scenario.run(self.runner)
eq_(steps[1].status, 'skipped')
def test_failed_step_causes_context_failure_to_be_set(self):
self.config.stdout_capture = False
self.config.log_capture = False
self.config.tags.check.return_value = True # pylint: disable=no-member
steps = [
Mock(step_type="given", name="step0"),
Mock(step_type="then", name="step1"),
]
scenario = Scenario('foo.feature', 17, u'Scenario', u'foo',
steps=steps)
steps[0].run.return_value = False
assert scenario.run(self.runner)
# pylint: disable=protected-access
self.context._set_root_attribute.assert_called_with('failed', True)
def test_undefined_step_causes_failed_scenario_status(self):
self.config.stdout_capture = False
self.config.log_capture = False
self.config.tags.check.return_value = True # pylint: disable=no-member
passed_step = Mock()
undefined_step = Mock()
steps = [passed_step, undefined_step]
scenario = Scenario('foo.feature', 17, u'Scenario', u'foo',
steps=steps)
passed_step.run.return_value = True
passed_step.status = 'passed'
undefined_step.run.return_value = False
undefined_step.status = 'undefined'
assert scenario.run(self.runner)
eq_(undefined_step.status, 'undefined')
eq_(scenario.status, 'failed')
# pylint: disable=protected-access
self.context._set_root_attribute.assert_called_with('failed', True)
def test_skipped_steps_set_step_status_and_scenario_status_if_not_set(self):
self.config.stdout_capture = False
self.config.log_capture = False
self.config.tags.check.return_value = False # pylint: disable=no-member
steps = [Mock(), Mock()]
scenario = Scenario('foo.feature', 17, u'Scenario', u'foo',
steps=steps)
scenario.run(self.runner)
assert False not in [s.status == 'skipped' for s in steps]
eq_(scenario.status, 'skipped')
def test_scenario_hooks_not_run_if_scenario_not_being_run(self):
self.config.tags.check.return_value = False # pylint: disable=no-member
scenario = Scenario('foo.feature', 17, u'Scenario', u'foo')
scenario.run(self.runner)
assert not self.run_hook.called
def test_should_run_with_name_select(self):
scenario_name = u"first scenario"
scenario = Scenario("foo.feature", 17, u"Scenario", scenario_name)
self.config.name = ['first .*', 'second .*']
self.config.name_re = Configuration.build_name_re(self.config.name)
assert scenario.should_run_with_name_select(self.config)
class TestScenarioOutline(unittest.TestCase):
# pylint: disable=invalid-name
def test_run_calls_run_on_each_generated_scenario(self):
# pylint: disable=protected-access
outline = ScenarioOutline('foo.feature', 17, u'Scenario Outline',
u'foo')
outline._scenarios = [Mock(), Mock()]
for scenario in outline._scenarios:
scenario.run.return_value = False
runner = Mock()
runner.context = Mock()
outline.run(runner)
for s in outline._scenarios:
s.run.assert_called_with(runner)
def test_run_stops_on_first_failure_if_requested(self):
# pylint: disable=protected-access
outline = ScenarioOutline('foo.feature', 17, u'Scenario Outline',
u'foo')
outline._scenarios = [Mock(), Mock()]
outline._scenarios[0].run.return_value = True
runner = Mock()
runner.context = Mock()
config = runner.config = Mock()
config.stop = True
outline.run(runner)
outline._scenarios[0].run.assert_called_with(runner)
assert not outline._scenarios[1].run.called
def test_run_sets_context_variable_for_outline(self):
# pylint: disable=protected-access
outline = ScenarioOutline('foo.feature', 17, u'Scenario Outline',
u'foo')
outline._scenarios = [Mock(), Mock(), Mock()]
for scenario in outline._scenarios:
scenario.run.return_value = False
runner = Mock()
context = runner.context = Mock()
config = runner.config = Mock()
config.stop = True
outline.run(runner)
eq_(context._set_root_attribute.call_args_list, [
(('active_outline', outline._scenarios[0]._row), {}),
(('active_outline', outline._scenarios[1]._row), {}),
(('active_outline', outline._scenarios[2]._row), {}),
(('active_outline', None), {}),
])
def test_run_should_pass_when_all_examples_pass(self):
# pylint: disable=protected-access
outline = ScenarioOutline('foo.feature', 17, u'Scenario Outline',
u'foo')
outline._scenarios = [Mock(), Mock(), Mock()]
for scenario in outline._scenarios:
scenario.run.return_value = False
runner = Mock()
context = runner.context = Mock()
config = runner.config = Mock()
config.stop = True
resultFailed = outline.run(runner)
eq_(resultFailed, False)
def test_run_should_fail_when_first_examples_fails(self):
outline = ScenarioOutline('foo.feature', 17, u'Scenario Outline',
u'foo')
failed = True
# pylint: disable=protected-access
outline._scenarios = [Mock(), Mock()]
outline._scenarios[0].run.return_value = failed
outline._scenarios[1].run.return_value = not failed
runner = Mock()
context = runner.context = Mock()
config = runner.config = Mock()
config.stop = True
resultFailed = outline.run(runner)
eq_(resultFailed, True)
def test_run_should_fail_when_last_examples_fails(self):
outline = ScenarioOutline('foo.feature', 17, u'Scenario Outline',
u'foo')
failed = True
# pylint: disable=protected-access
outline._scenarios = [Mock(), Mock()]
outline._scenarios[0].run.return_value = not failed
outline._scenarios[1].run.return_value = failed
runner = Mock()
context = runner.context = Mock()
config = runner.config = Mock()
config.stop = True
resultFailed = outline.run(runner)
eq_(resultFailed, True)
def test_run_should_fail_when_middle_examples_fails(self):
outline = ScenarioOutline('foo.feature', 17, u'Scenario Outline',
u'foo')
failed = True
# pylint: disable=protected-access
outline._scenarios = [Mock(), Mock(), Mock()]
outline._scenarios[0].run.return_value = not failed
outline._scenarios[1].run.return_value = failed
outline._scenarios[2].run.return_value = not failed
runner = Mock()
context = runner.context = Mock()
config = runner.config = Mock()
config.stop = True
resultFailed = outline.run(runner)
eq_(resultFailed, True)
def raiser(exception):
def func(*args, **kwargs): # pylint: disable=unused-argument
raise exception
return func
class TestStepRun(unittest.TestCase):
# pylint: disable=invalid-name
def setUp(self):
self.step_registry = Mock()
self.runner = Mock()
self.runner.step_registry = self.step_registry
self.config = self.runner.config = Mock()
self.config.outputs = [None]
self.context = self.runner.context = Mock()
print('context is %s' % self.context)
self.formatters = self.runner.formatters = [Mock()]
self.stdout_capture = self.runner.stdout_capture = Mock()
self.stdout_capture.getvalue.return_value = ''
self.stderr_capture = self.runner.stderr_capture = Mock()
self.stderr_capture.getvalue.return_value = ''
self.log_capture = self.runner.log_capture = Mock()
self.log_capture.getvalue.return_value = ''
self.run_hook = self.runner.run_hook = Mock()
def test_run_appends_step_to_undefined_when_no_match_found(self):
step = Step('foo.feature', 17, u'Given', 'given', u'foo')
self.runner.step_registry.find_match.return_value = None
self.runner.undefined_steps = []
assert not step.run(self.runner)
assert step in self.runner.undefined_steps
eq_(step.status, 'undefined')
def test_run_reports_undefined_step_via_formatter_when_not_quiet(self):
step = Step('foo.feature', 17, u'Given', 'given', u'foo')
self.runner.step_registry.find_match.return_value = None
assert not step.run(self.runner)
self.formatters[0].match.assert_called_with(NoMatch())
self.formatters[0].result.assert_called_with(step)
def test_run_with_no_match_does_not_touch_formatter_when_quiet(self):
step = Step('foo.feature', 17, u'Given', 'given', u'foo')
self.runner.step_registry.find_match.return_value = None
assert not step.run(self.runner, quiet=True)
assert not self.formatters[0].match.called
assert not self.formatters[0].result.called
def test_run_when_not_quiet_reports_match_and_result(self):
step = Step('foo.feature', 17, u'Given', 'given', u'foo')
match = Mock()
self.runner.step_registry.find_match.return_value = match
side_effects = (None, raiser(AssertionError('whee')),
raiser(Exception('whee')))
for side_effect in side_effects:
match.run.side_effect = side_effect
step.run(self.runner)
self.formatters[0].match.assert_called_with(match)
self.formatters[0].result.assert_called_with(step)
def test_run_when_quiet_reports_nothing(self):
step = Step('foo.feature', 17, u'Given', 'given', u'foo')
match = Mock()
self.runner.step_registry.find_match.return_value = match
side_effects = (None, raiser(AssertionError('whee')),
raiser(Exception('whee')))
for side_effect in side_effects:
match.run.side_effect = side_effect
step.run(self.runner, quiet=True)
assert not self.formatters[0].match.called
assert not self.formatters[0].result.called
def test_run_runs_before_hook_then_match_then_after_hook(self):
step = Step('foo.feature', 17, u'Given', 'given', u'foo')
match = Mock()
self.runner.step_registry.find_match.return_value = match
side_effects = (None, AssertionError('whee'), Exception('whee'))
for side_effect in side_effects:
# Make match.run() and runner.run_hook() the same mock so
# we can make sure things happen in the right order.
self.runner.run_hook = match.run = Mock()
def effect(thing):
# pylint: disable=unused-argument
def raiser_(*args, **kwargs):
match.run.side_effect = None
if thing:
raise thing
def nonraiser(*args, **kwargs):
match.run.side_effect = raiser_
return nonraiser
match.run.side_effect = effect(side_effect)
step.run(self.runner)
eq_(match.run.call_args_list, [
(('before_step', self.context, step), {}),
((self.context,), {}),
(('after_step', self.context, step), {}),
])
def test_run_sets_table_if_present(self):
step = Step('foo.feature', 17, u'Given', 'given', u'foo',
table=Mock())
self.runner.step_registry.find_match.return_value = Mock()
step.run(self.runner)
eq_(self.context.table, step.table)
def test_run_sets_text_if_present(self):
step = Step('foo.feature', 17, u'Given', 'given', u'foo',
text=Mock(name='text'))
self.runner.step_registry.find_match.return_value = Mock()
step.run(self.runner)
eq_(self.context.text, step.text)
def test_run_sets_status_to_passed_if_nothing_goes_wrong(self):
step = Step('foo.feature', 17, u'Given', 'given', u'foo')
step.error_message = None
self.runner.step_registry.find_match.return_value = Mock()
step.run(self.runner)
eq_(step.status, 'passed')
eq_(step.error_message, None)
def test_run_sets_status_to_failed_on_assertion_error(self):
step = Step('foo.feature', 17, u'Given', 'given', u'foo')
step.error_message = None
match = Mock()
match.run.side_effect = raiser(AssertionError('whee'))
self.runner.step_registry.find_match.return_value = match
step.run(self.runner)
eq_(step.status, 'failed')
assert step.error_message.startswith('Assertion Failed')
@patch('traceback.format_exc')
def test_run_sets_status_to_failed_on_exception(self, format_exc):
step = Step('foo.feature', 17, u'Given', 'given', u'foo')
step.error_message = None
match = Mock()
match.run.side_effect = raiser(Exception('whee'))
self.runner.step_registry.find_match.return_value = match
format_exc.return_value = 'something to do with an exception'
step.run(self.runner)
eq_(step.status, 'failed')
eq_(step.error_message, format_exc.return_value)
@patch('time.time')
def test_run_calculates_duration(self, time_time):
step = Step('foo.feature', 17, u'Given', 'given', u'foo')
match = Mock()
self.runner.step_registry.find_match.return_value = match
def time_time_1():
def time_time_2():
return 23
time_time.side_effect = time_time_2
return 17
side_effects = (None, raiser(AssertionError('whee')),
raiser(Exception('whee')))
for side_effect in side_effects:
match.run.side_effect = side_effect
time_time.side_effect = time_time_1
step.run(self.runner)
eq_(step.duration, 23 - 17)
def test_run_captures_stdout_and_logging(self):
step = Step('foo.feature', 17, u'Given', 'given', u'foo')
match = Mock()
self.runner.step_registry.find_match.return_value = match
assert step.run(self.runner)
self.runner.start_capture.assert_called_with()
self.runner.stop_capture.assert_called_with()
def test_run_appends_any_captured_stdout_on_failure(self):
step = Step('foo.feature', 17, u'Given', 'given', u'foo')
match = Mock()
self.runner.step_registry.find_match.return_value = match
self.stdout_capture.getvalue.return_value = 'frogs'
match.run.side_effect = raiser(Exception('halibut'))
assert not step.run(self.runner)
assert 'Captured stdout:' in step.error_message
assert 'frogs' in step.error_message
def test_run_appends_any_captured_logging_on_failure(self):
step = Step('foo.feature', 17, u'Given', 'given', u'foo')
match = Mock()
self.runner.step_registry.find_match.return_value = match
self.log_capture.getvalue.return_value = 'toads'
match.run.side_effect = raiser(AssertionError('kipper'))
assert not step.run(self.runner)
assert 'Captured logging:' in step.error_message
assert 'toads' in step.error_message
class TestTableModel(unittest.TestCase):
# pylint: disable=invalid-name
HEAD = [u'type of stuff', u'awesomeness', u'ridiculousness']
DATA = [
[u'fluffy', u'large', u'frequent'],
[u'lint', u'low', u'high'],
[u'green', u'variable', u'awkward'],
]
def setUp(self):
self.table = Table(self.HEAD, 0, self.DATA)
def test_equivalence(self):
t1 = self.table
self.setUp()
eq_(t1, self.table)
def test_table_iteration(self):
for i, row in enumerate(self.table):
for j, cell in enumerate(row):
eq_(cell, self.DATA[i][j])
def test_table_row_by_index(self):
for i in range(3):
eq_(self.table[i], Row(self.HEAD, self.DATA[i], 0))
def test_table_row_name(self):
eq_(self.table[0]['type of stuff'], 'fluffy')
eq_(self.table[1]['awesomeness'], 'low')
eq_(self.table[2]['ridiculousness'], 'awkward')
def test_table_row_index(self):
eq_(self.table[0][0], 'fluffy')
eq_(self.table[1][1], 'low')
eq_(self.table[2][2], 'awkward')
@raises(KeyError)
def test_table_row_keyerror(self):
self.table[0]['spam'] # pylint: disable=pointless-statement
def test_table_row_items(self):
eq_(list(self.table[0].items()), list(zip(self.HEAD, self.DATA[0])))
class TestModelRow(unittest.TestCase):
# pylint: disable=invalid-name, bad-whitespace
HEAD = [u'name', u'sex', u'age']
DATA = [u'Alice', u'female', u'12']
def setUp(self):
self.row = Row(self.HEAD, self.DATA, 0)
def test_len(self):
eq_(len(self.row), 3)
def test_getitem_with_valid_colname(self):
# pylint: disable=bad-whitespace
eq_(self.row['name'], u'Alice')
eq_(self.row['sex'], u'female')
eq_(self.row['age'], u'12')
@raises(KeyError)
def test_getitem_with_unknown_colname(self):
self.row['__UNKNOWN_COLUMN__'] # pylint: disable=pointless-statement
def test_getitem_with_valid_index(self):
eq_(self.row[0], u'Alice')
eq_(self.row[1], u'female')
eq_(self.row[2], u'12')
@raises(IndexError)
def test_getitem_with_invalid_index(self):
colsize = len(self.row)
eq_(colsize, 3)
self.row[colsize] # pylint: disable=pointless-statement
def test_get_with_valid_colname(self):
# pylint: disable=bad-whitespace
eq_(self.row.get('name'), u'Alice')
eq_(self.row.get('sex'), u'female')
eq_(self.row.get('age'), u'12')
def test_getitem_with_unknown_colname_should_return_default(self):
eq_(self.row.get('__UNKNOWN_COLUMN__', 'XXX'), u'XXX')
def test_as_dict(self):
data1 = self.row.as_dict()
data2 = dict(self.row.as_dict())
assert isinstance(data1, dict)
assert isinstance(data2, dict)
assert isinstance(data1, OrderedDict)
# -- REQUIRES: Python2.7 or ordereddict installed.
# assert not isinstance(data2, OrderedDict)
eq_(data1, data2)
# pylint: disable=bad-whitespace
eq_(data1['name'], u'Alice')
eq_(data1['sex'], u'female')
eq_(data1['age'], u'12')
class TestFileLocation(unittest.TestCase):
# pylint: disable=invalid-name
ordered_locations1 = [
FileLocation("features/alice.feature", 1),
FileLocation("features/alice.feature", 5),
FileLocation("features/alice.feature", 10),
FileLocation("features/alice.feature", 11),
FileLocation("features/alice.feature", 100),
]
ordered_locations2 = [
FileLocation("features/alice.feature", 1),
FileLocation("features/alice.feature", 10),
FileLocation("features/bob.feature", 5),
FileLocation("features/charly.feature", None),
FileLocation("features/charly.feature", 0),
FileLocation("features/charly.feature", 100),
]
same_locations = [
(FileLocation("alice.feature"),
FileLocation("alice.feature", None),
),
(FileLocation("alice.feature", 10),
FileLocation("alice.feature", 10),
),
(FileLocation("features/bob.feature", 11),
FileLocation("features/bob.feature", 11),
),
]
def test_compare_equal(self):
for value1, value2 in self.same_locations:
eq_(value1, value2)
def test_compare_equal_with_string(self):
for location in self.ordered_locations2:
eq_(location, location.filename)
eq_(location.filename, location)
def test_compare_not_equal(self):
for value1, value2 in self.same_locations:
assert not(value1 != value2) # pylint: disable=unneeded-not, superfluous-parens
for locations in [self.ordered_locations1, self.ordered_locations2]:
for value1, value2 in zip(locations, locations[1:]):
assert value1 != value2
def test_compare_less_than(self):
for locations in [self.ordered_locations1, self.ordered_locations2]:
for value1, value2 in zip(locations, locations[1:]):
assert value1 < value2, "FAILED: %s < %s" % (_text(value1), _text(value2))
assert value1 != value2
def test_compare_less_than_with_string(self):
locations = self.ordered_locations2
for value1, value2 in zip(locations, locations[1:]):
if value1.filename == value2.filename:
continue
assert value1 < value2.filename, \
"FAILED: %s < %s" % (_text(value1), _text(value2.filename))
assert value1.filename < value2, \
"FAILED: %s < %s" % (_text(value1.filename), _text(value2))
def test_compare_greater_than(self):
for locations in [self.ordered_locations1, self.ordered_locations2]:
for value1, value2 in zip(locations, locations[1:]):
assert value2 > value1, "FAILED: %s > %s" % (_text(value2), _text(value1))
assert value2 != value1
def test_compare_less_or_equal(self):
for value1, value2 in self.same_locations:
assert value1 <= value2, "FAILED: %s <= %s" % (_text(value1), _text(value2))
assert value1 == value2
for locations in [self.ordered_locations1, self.ordered_locations2]:
for value1, value2 in zip(locations, locations[1:]):
assert value1 <= value2, "FAILED: %s <= %s" % (_text(value1), _text(value2))
assert value1 != value2
def test_compare_greater_or_equal(self):
for value1, value2 in self.same_locations:
assert value2 >= value1, "FAILED: %s >= %s" % (_text(value2), _text(value1))
assert value2 == value1
for locations in [self.ordered_locations1, self.ordered_locations2]:
for value1, value2 in zip(locations, locations[1:]):
assert value2 >= value1, "FAILED: %s >= %s" % (_text(value2), _text(value1))
assert value2 != value1
def test_filename_should_be_same_as_self(self):
for location in self.ordered_locations2:
assert location == location.filename
assert location.filename == location
def test_string_conversion(self):
for location in self.ordered_locations2:
expected = u"%s:%s" % (location.filename, location.line)
if location.line is None:
expected = location.filename
assert six.text_type(location) == expected
def test_repr_conversion(self):
for location in self.ordered_locations2:
expected = u'<FileLocation: filename="%s", line=%s>' % \
(location.filename, location.line)
actual = repr(location)
assert actual == expected, "FAILED: %s == %s" % (actual, expected)
| benthomasson/behave | test/test_model.py | Python | bsd-2-clause | 31,063 |
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Alex Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD
import numpy as np
from nose.tools import assert_equal, assert_raises
from numpy.testing import assert_array_equal
from distutils.version import LooseVersion
from scipy import signal
from ..fixes import (_in1d, _tril_indices, _copysign, _unravel_index,
_Counter, _unique, _bincount, _digitize)
from ..fixes import _firwin2 as mne_firwin2
from ..fixes import _filtfilt as mne_filtfilt
def test_counter():
"""Test Counter replacement"""
import collections
try:
Counter = collections.Counter
except:
pass
else:
a = Counter([1, 2, 1, 3])
b = _Counter([1, 2, 1, 3])
for key, count in zip([1, 2, 3], [2, 1, 1]):
assert_equal(a[key], b[key])
def test_unique():
"""Test unique() replacement
"""
# skip test for np version < 1.5
if LooseVersion(np.__version__) < LooseVersion('1.5'):
return
for arr in [np.array([]), np.random.rand(10), np.ones(10)]:
# basic
assert_array_equal(np.unique(arr), _unique(arr))
# with return_index=True
x1, x2 = np.unique(arr, return_index=True, return_inverse=False)
y1, y2 = _unique(arr, return_index=True, return_inverse=False)
assert_array_equal(x1, y1)
assert_array_equal(x2, y2)
# with return_inverse=True
x1, x2 = np.unique(arr, return_index=False, return_inverse=True)
y1, y2 = _unique(arr, return_index=False, return_inverse=True)
assert_array_equal(x1, y1)
assert_array_equal(x2, y2)
# with both:
x1, x2, x3 = np.unique(arr, return_index=True, return_inverse=True)
y1, y2, y3 = _unique(arr, return_index=True, return_inverse=True)
assert_array_equal(x1, y1)
assert_array_equal(x2, y2)
assert_array_equal(x3, y3)
def test_bincount():
"""Test bincount() replacement
"""
# skip test for np version < 1.6
if LooseVersion(np.__version__) < LooseVersion('1.6'):
return
for minlength in [None, 100]:
x = _bincount(np.ones(10, int), None, minlength)
y = np.bincount(np.ones(10, int), None, minlength)
assert_array_equal(x, y)
def test_in1d():
"""Test numpy.in1d() replacement"""
a = np.arange(10)
b = a[a % 2 == 0]
assert_equal(_in1d(a, b).sum(), 5)
def test_digitize():
"""Test numpy.digitize() replacement"""
data = np.arange(9)
bins = [0, 5, 10]
left = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
right = np.array([0, 1, 1, 1, 1, 1, 2, 2, 2])
assert_array_equal(_digitize(data, bins), left)
assert_array_equal(_digitize(data, bins, True), right)
assert_raises(NotImplementedError, _digitize, data + 0.1, bins, True)
assert_raises(NotImplementedError, _digitize, data, [0., 5, 10], True)
def test_tril_indices():
"""Test numpy.tril_indices() replacement"""
il1 = _tril_indices(4)
il2 = _tril_indices(4, -1)
a = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]])
assert_array_equal(a[il1],
np.array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16]))
assert_array_equal(a[il2], np.array([5, 9, 10, 13, 14, 15]))
def test_unravel_index():
"""Test numpy.unravel_index() replacement"""
assert_equal(_unravel_index(2, (2, 3)), (0, 2))
assert_equal(_unravel_index(2, (2, 2)), (1, 0))
assert_equal(_unravel_index(254, (17, 94)), (2, 66))
assert_equal(_unravel_index((2 * 3 + 1) * 6 + 4, (4, 3, 6)), (2, 1, 4))
assert_array_equal(_unravel_index(np.array([22, 41, 37]), (7, 6)),
[[3, 6, 6], [4, 5, 1]])
assert_array_equal(_unravel_index(1621, (6, 7, 8, 9)), (3, 1, 4, 1))
def test_copysign():
"""Test numpy.copysign() replacement"""
a = np.array([-1, 1, -1])
b = np.array([1, -1, 1])
assert_array_equal(_copysign(a, b), b)
assert_array_equal(_copysign(b, a), a)
def test_firwin2():
"""Test firwin2 backport
"""
taps1 = mne_firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
taps2 = signal.firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
assert_array_equal(taps1, taps2)
def test_filtfilt():
"""Test IIR filtfilt replacement
"""
x = np.r_[1, np.zeros(100)]
# Filter with an impulse
y = mne_filtfilt([1, 0], [1, 0], x, padlen=0)
assert_array_equal(x, y)
| jaeilepp/eggie | mne/tests/test_fixes.py | Python | bsd-2-clause | 4,590 |
'''
Created on Jan 06, 2016
@author: kashefy
'''
from nose.tools import assert_greater, assert_equal, assert_is_instance, assert_true
import nideep.iow.lmdb_utils as lu
class TestLMDBConsts:
def test_map_sz(self):
assert_greater(lu.MAP_SZ, 0)
assert_is_instance(lu.MAP_SZ, int)
def test_num_idx_digits(self):
assert_greater(lu.NUM_IDX_DIGITS, 0)
assert_is_instance(lu.NUM_IDX_DIGITS, int)
class TestIdxFormat:
def test_idx_format(self):
assert_greater(len(lu.IDX_FMT), 0)
assert_is_instance(lu.IDX_FMT, str)
assert_true(lu.IDX_FMT.startswith('{'))
assert_true(lu.IDX_FMT.endswith('}'))
def test_idx_format_zero(self):
assert_equal(lu.IDX_FMT.format(0), ''.join(['0'] * lu.NUM_IDX_DIGITS))
def test_idx_format_nonzero(self):
assert_greater(lu.NUM_IDX_DIGITS, 1)
s = ''.join(['0'] * lu.NUM_IDX_DIGITS)
for i in xrange(10):
assert_equal(lu.IDX_FMT.format(i), s[:-1] + '%d' % i)
for i in xrange(10, 100):
assert_equal(lu.IDX_FMT.format(i), s[:-2] + '%d' % i)
| kashefy/caffe_sandbox | nideep/iow/test_lmdb_utils.py | Python | bsd-2-clause | 1,119 |
import numpy
try:
import matplotlib.pyplot as pypl
plotting = True
except:
plotting = False
import os, shutil
this_dir = os.path.dirname(os.path.realpath(__file__))
import condor
import logging
logger = logging.getLogger('condor')
logger.setLevel("INFO")
#logger.setLevel("DEBUG")
out_dir = this_dir + "/pngs"
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
os.mkdir(out_dir)
# Source
src = condor.Source(wavelength=0.1E-9, pulse_energy=1E-3, focus_diameter=1E-6)
# Detector
det = condor.Detector(distance=0.5, pixel_size=750E-6, nx=100, ny=100)#, cx=55, cy=55)
#angles_d = numpy.array([0., 22.5, 45.])
angles_d = numpy.array([72.5])
for angle_d in angles_d:
angle = angle_d/360.*2*numpy.pi
rotation_axis = numpy.array([1.,1.,0.])/numpy.sqrt(2.)
quaternion = condor.utils.rotation.quat(angle,rotation_axis[0],rotation_axis[1], rotation_axis[2])
rotation_values = numpy.array([quaternion])
rotation_formalism = "quaternion"
rotation_mode = "extrinsic"
#rotation_values = None
#rotation_formalism = "random"
#rotation_mode = "extrinsic"
#rotation_values = None
#rotation_formalism = None
#rotation_mode = "extrinsic"
#print("Angle = %.2f degrees" % angle_d)
short_diameter = 25E-9*12/100.
long_diameter = 2*short_diameter
spheroid_diameter = condor.utils.spheroid_diffraction.to_spheroid_diameter(short_diameter/2.,long_diameter/2.)
spheroid_flattening = condor.utils.spheroid_diffraction.to_spheroid_flattening(short_diameter/2.,long_diameter/2.)
N_long = 20
N_short = int(round(short_diameter/long_diameter * N_long))
# Spheroid
if True:
# Ideal spheroid
#print("Simulating spheroid")
par = condor.ParticleSpheroid(diameter=spheroid_diameter, material_type="water", flattening=spheroid_flattening, rotation_values=rotation_values, rotation_formalism=rotation_formalism, rotation_mode=rotation_mode)
s = "particle_spheroid"
E = condor.Experiment(src, {s : par}, det)
res = E.propagate()
real_space = numpy.fft.fftshift(numpy.fft.ifftn(res["entry_1"]["data_1"]["data_fourier"]))
vmin = numpy.log10(res["entry_1"]["data_1"]["data"].max()/10000.)
if plotting:
pypl.imsave(out_dir + "/%s_%2.2fdeg.png" % (s,angle_d), numpy.log10(res["entry_1"]["data_1"]["data"]), vmin=vmin)
pypl.imsave(out_dir + "/%s_rs_%2.2fdeg.png" % (s,angle_d), abs(real_space))
if True:
# Map (spheroid)
#print("Simulating map (spheroid)")
par = condor.ParticleMap(diameter=spheroid_diameter, material_type="water", flattening=spheroid_flattening, geometry="spheroid", rotation_values=rotation_values, rotation_formalism=rotation_formalism, rotation_mode=rotation_mode)
s = "particle_map_spheroid"
E = condor.Experiment(src, {s : par}, det)
res = E.propagate()
real_space = numpy.fft.fftshift(numpy.fft.ifftn(res["entry_1"]["data_1"]["data_fourier"]))
vmin = numpy.log10(res["entry_1"]["data_1"]["data"].max()/10000.)
if plotting:
pypl.imsave(out_dir + "/%s_%2.2f.png" % (s,angle_d), numpy.log10(res["entry_1"]["data_1"]["data"]), vmin=vmin)
pypl.imsave(out_dir + "/%s_rs_%2.2f.png" % (s,angle_d), abs(real_space))
# Box
if True:
# Map (box)
dx = long_diameter/(N_long-1)
map3d = numpy.zeros(shape=(N_long,N_long,N_long))
map3d[:N_short,:,:N_short] = 1.
map3d[N_short:N_short+N_short,:N_short,:N_short] = 1.
# Map
#print("Simulating map (custom)")
par = condor.ParticleMap(diameter=long_diameter, material_type="water", geometry="custom", map3d=map3d, dx=dx, rotation_values=rotation_values, rotation_formalism=rotation_formalism, rotation_mode=rotation_mode)
s = "particle_map_custom"
E = condor.Experiment(src, {s : par}, det)
res = E.propagate()
if plotting:
data_fourier = res["entry_1"]["data_1"]["data_fourier"]
#data_fourier = abs(data_fourier)*numpy.exp(-1.j*numpy.angle(data_fourier))
real_space = numpy.fft.fftshift(numpy.fft.ifftn(numpy.fft.fftshift(data_fourier)))
vmin = numpy.log10(res["entry_1"]["data_1"]["data"].max()/10000.)
pypl.imsave(out_dir + "/%s_map.png" % (s),map3d.sum(0))
pypl.imsave(out_dir + "/%s_%2.2f.png" % (s,angle_d), numpy.log10(res["entry_1"]["data_1"]["data"]), vmin=vmin)
pypl.imsave(out_dir + "/%s_%2.2f_phases.png" % (s,angle_d), numpy.angle(res["entry_1"]["data_1"]["data_fourier"])%(2*numpy.pi))
pypl.imsave(out_dir + "/%s_rs_%2.2f.png" % (s,angle_d), abs(real_space))
if True:
# Atoms (box)
#print("Simulating atoms")
Z1,Y1,X1 = numpy.meshgrid(numpy.linspace(0, short_diameter, N_short),
numpy.linspace(0, long_diameter, N_long),
numpy.linspace(0, short_diameter, N_short),
indexing="ij")
Z2,Y2,X2 = numpy.meshgrid(numpy.linspace(0, short_diameter, N_short) + long_diameter/2.,
numpy.linspace(0, short_diameter, N_short),
numpy.linspace(0, short_diameter, N_short),
indexing="ij")
Z = numpy.concatenate((Z1.ravel(),Z2.ravel()))
Y = numpy.concatenate((Y1.ravel(),Y2.ravel()))
X = numpy.concatenate((X1.ravel(),X2.ravel()))
proj = numpy.zeros(shape=(N_long,N_long))
dx = long_diameter/(N_long-1)
for (x,y,z) in zip(X.ravel(),Y.ravel(),Z.ravel()):
proj[int(round(y/dx)),int(round(x/dx))] += 1
if plotting:
pypl.imsave(out_dir + "/%s_proj.png" % (s),proj)
atomic_positions = numpy.array([[x,y,z] for x,y,z in zip(X.ravel(),Y.ravel(),Z.ravel())])
atomic_numbers = numpy.ones(int(atomic_positions.size/3), dtype=numpy.int16)
par = condor.ParticleAtoms(atomic_positions=atomic_positions, atomic_numbers=atomic_numbers, rotation_values=rotation_values, rotation_formalism=rotation_formalism, rotation_mode=rotation_mode)
s = "particle_atoms"
E = condor.Experiment(src, {s : par}, det)
res = E.propagate()
if plotting:
real_space = numpy.fft.fftshift(numpy.fft.ifftn(numpy.fft.fftshift(res["entry_1"]["data_1"]["data_fourier"])))
fourier_space = res["entry_1"]["data_1"]["data_fourier"]
vmin = numpy.log10(res["entry_1"]["data_1"]["data"].max()/10000.)
pypl.imsave(out_dir + "/%s_%2.2f.png" % (s,angle_d), numpy.log10(res["entry_1"]["data_1"]["data"]), vmin=vmin)
pypl.imsave(out_dir + "/%s_%2.2f_phases.png" % (s,angle_d), numpy.angle(fourier_space)%(2*numpy.pi))
pypl.imsave(out_dir + "/%s_rs_%2.2f.png" % (s,angle_d), abs(real_space))
| FXIhub/condor | examples/scripts/rotations/example.py | Python | bsd-2-clause | 6,922 |
# -*- coding: utf-8 -*-
"""
Extensible permission system for pybbm
"""
from __future__ import unicode_literals
from django.db.models import Q
from pybb import defaults, util
class DefaultPermissionHandler(object):
"""
Default Permission handler. If you want to implement custom permissions (for example,
private forums based on some application-specific settings), you can inherit from this
class and override any of the `filter_*` and `may_*` methods. Methods starting with
`may` are expected to return `True` or `False`, whereas methods starting with `filter_*`
should filter the queryset they receive, and return a new queryset containing only the
objects the user is allowed to see.
To activate your custom permission handler, set `settings.PYBB_PERMISSION_HANDLER` to
the full qualified name of your class, e.g. "`myapp.pybb_adapter.MyPermissionHandler`".
"""
#
# permission checks on categories
#
def filter_categories(self, user, qs):
""" return a queryset with categories `user` is allowed to see """
return qs.filter(hidden=False) if not user.is_staff else qs
def may_view_category(self, user, category):
""" return True if `user` may view this category, False if not """
return user.is_staff or not category.hidden
#
# permission checks on forums
#
def filter_forums(self, user, qs):
""" return a queryset with forums `user` is allowed to see """
return qs.filter(Q(hidden=False) & Q(category__hidden=False)) if not user.is_staff else qs
def may_view_forum(self, user, forum):
""" return True if user may view this forum, False if not """
return user.is_staff or ( forum.hidden == False and forum.category.hidden == False )
def may_create_topic(self, user, forum):
""" return True if `user` is allowed to create a new topic in `forum` """
return user.has_perm('pybb.add_post')
#
# permission checks on topics
#
def filter_topics(self, user, qs):
""" return a queryset with topics `user` is allowed to see """
if not user.is_staff:
qs = qs.filter(Q(forum__hidden=False) & Q(forum__category__hidden=False))
if not user.is_superuser:
if user.is_authenticated():
qs = qs.filter(Q(forum__moderators=user) | Q(user=user) | Q(on_moderation=False)).distinct()
else:
qs = qs.filter(on_moderation=False)
return qs
def may_view_topic(self, user, topic):
""" return True if user may view this topic, False otherwise """
if user.is_superuser:
return True
if not user.is_staff and (topic.forum.hidden or topic.forum.category.hidden):
return False # only staff may see hidden forum / category
if topic.on_moderation:
return user.is_authenticated() and (user == topic.user or user in topic.forum.moderators)
return True
def may_moderate_topic(self, user, topic):
return user.is_superuser or user in topic.forum.moderators.all()
def may_close_topic(self, user, topic):
""" return True if `user` may close `topic` """
return self.may_moderate_topic(user, topic)
def may_open_topic(self, user, topic):
""" return True if `user` may open `topic` """
return self.may_moderate_topic(user, topic)
def may_stick_topic(self, user, topic):
""" return True if `user` may stick `topic` """
return self.may_moderate_topic(user, topic)
def may_unstick_topic(self, user, topic):
""" return True if `user` may unstick `topic` """
return self.may_moderate_topic(user, topic)
def may_vote_in_topic(self, user, topic):
""" return True if `user` may unstick `topic` """
return (
user.is_authenticated() and topic.poll_type != topic.POLL_TYPE_NONE and not topic.closed and
not user.poll_answers.filter(poll_answer__topic=topic).exists()
)
def may_create_post(self, user, topic):
""" return True if `user` is allowed to create a new post in `topic` """
if topic.forum.hidden and (not user.is_staff):
# if topic is hidden, only staff may post
return False
if topic.closed and (not user.is_staff):
# if topic is closed, only staff may post
return False
# only user which have 'pybb.add_post' permission may post
return defaults.PYBB_ENABLE_ANONYMOUS_POST or user.has_perm('pybb.add_post')
def may_post_as_admin(self, user):
""" return True if `user` may post as admin """
return user.is_staff
def may_subscribe_topic(self, user, forum):
""" return True if `user` is allowed to subscribe to a `topic` """
return not defaults.PYBB_DISABLE_SUBSCRIPTIONS
#
# permission checks on posts
#
def filter_posts(self, user, qs):
""" return a queryset with posts `user` is allowed to see """
# first filter by topic availability
if not user.is_staff:
qs = qs.filter(Q(topic__forum__hidden=False) & Q(topic__forum__category__hidden=False))
if not defaults.PYBB_PREMODERATION or user.is_superuser:
# superuser may see all posts, also if premoderation is turned off moderation
# flag is ignored
return qs
elif user.is_authenticated():
# post is visible if user is author, post is not on moderation, or user is moderator
# for this forum
qs = qs.filter(Q(user=user) | Q(on_moderation=False) | Q(topic__forum__moderators=user))
else:
# anonymous user may not see posts which are on moderation
qs = qs.filter(on_moderation=False)
return qs
def may_view_post(self, user, post):
""" return True if `user` may view `post`, False otherwise """
if user.is_superuser:
return True
if post.on_moderation:
return post.user == user or user in post.topic.forum.moderators.all()
return self.may_view_topic(user, post.topic)
def may_edit_post(self, user, post):
""" return True if `user` may edit `post` """
return user.is_superuser or post.user == user or self.may_moderate_topic(user, post.topic)
def may_delete_post(self, user, post):
""" return True if `user` may delete `post` """
return self.may_moderate_topic(user, post.topic)
#
# permission checks on users
#
def may_block_user(self, user, user_to_block):
""" return True if `user` may block `user_to_block` """
return user.has_perm('pybb.block_users')
def may_attach_files(self, user):
"""
return True if `user` may attach files to posts, False otherwise.
By default controlled by PYBB_ATTACHMENT_ENABLE setting
"""
return defaults.PYBB_ATTACHMENT_ENABLE
def may_create_poll(self, user):
"""
return True if `user` may attach files to posts, False otherwise.
By default always True
"""
return True
def may_edit_topic_slug(self, user):
"""
returns True if `user` may choose topic's slug, False otherwise.
When True adds field slug in the Topic form.
By default always False
"""
return False
perms = util.resolve_class(defaults.PYBB_PERMISSION_HANDLER)
| skolsuper/pybbm | pybb/permissions.py | Python | bsd-2-clause | 7,490 |
#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
import logging
import click
import socket
from mkdocs import __version__
from mkdocs import utils
from mkdocs import exceptions
from mkdocs import config
from mkdocs.commands import build, gh_deploy, new, serve
log = logging.getLogger(__name__)
# Disable the warning that Click displays (as of Click version 5.0) when users
# use unicode_literals in Python 2.
# See http://click.pocoo.org/dev/python3/#unicode-literals for more details.
click.disable_unicode_literals_warning = True
class State(object):
''' Maintain logging level.'''
def __init__(self, log_name='mkdocs', level=logging.INFO):
self.logger = logging.getLogger(log_name)
self.logger.propagate = False
stream = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)-7s - %(message)s ")
stream.setFormatter(formatter)
self.logger.addHandler(stream)
self.logger.setLevel(level)
pass_state = click.make_pass_decorator(State, ensure=True)
def verbose_option(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
if value:
state.logger.setLevel(logging.DEBUG)
return click.option('-v', '--verbose',
is_flag=True,
expose_value=False,
help='Enable verbose output',
callback=callback)(f)
def quiet_option(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
if value:
state.logger.setLevel(logging.ERROR)
return click.option('-q', '--quiet',
is_flag=True,
expose_value=False,
help='Silence warnings',
callback=callback)(f)
def common_options(f):
f = verbose_option(f)
f = quiet_option(f)
return f
clean_help = "Remove old files from the site_dir before building (the default)."
config_help = "Provide a specific MkDocs config"
dev_addr_help = ("IP address and port to serve documentation locally (default: "
"localhost:8000)")
strict_help = ("Enable strict mode. This will cause MkDocs to abort the build "
"on any warnings.")
theme_dir_help = "The theme directory to use when building your documentation."
theme_help = "The theme to use when building your documentation."
theme_choices = utils.get_theme_names()
site_dir_help = "The directory to output the result of the documentation build."
reload_help = "Enable the live reloading in the development server (this is the default)"
no_reload_help = "Disable the live reloading in the development server."
dirty_reload_help = "Enable the live reloading in the development server, but only re-build files that have changed"
commit_message_help = ("A commit message to use when commiting to the "
"Github Pages remote branch")
remote_branch_help = ("The remote branch to commit to for Github Pages. This "
"overrides the value specified in config")
remote_name_help = ("The remote name to commit to for Github Pages. This "
"overrides the value specified in config")
force_help = "Force the push to the repository."
@click.group(context_settings={'help_option_names': ['-h', '--help']})
@click.version_option(__version__, '-V', '--version')
@common_options
def cli():
"""
MkDocs - Project documentation with Markdown.
"""
@cli.command(name="serve")
@click.option('-f', '--config-file', type=click.File('rb'), help=config_help)
@click.option('-a', '--dev-addr', help=dev_addr_help, metavar='<IP:PORT>')
@click.option('-s', '--strict', is_flag=True, help=strict_help)
@click.option('-t', '--theme', type=click.Choice(theme_choices), help=theme_help)
@click.option('-e', '--theme-dir', type=click.Path(), help=theme_dir_help)
@click.option('--livereload', 'livereload', flag_value='livereload', help=reload_help, default=True)
@click.option('--no-livereload', 'livereload', flag_value='no-livereload', help=no_reload_help)
@click.option('--dirtyreload', 'livereload', flag_value='dirty', help=dirty_reload_help)
@common_options
def serve_command(dev_addr, config_file, strict, theme, theme_dir, livereload):
"""Run the builtin development server"""
logging.getLogger('tornado').setLevel(logging.WARNING)
# Don't override config value if user did not specify --strict flag
# Conveniently, load_config drops None values
strict = strict or None
try:
serve.serve(
config_file=config_file,
dev_addr=dev_addr,
strict=strict,
theme=theme,
theme_dir=theme_dir,
livereload=livereload
)
except (exceptions.ConfigurationError, socket.error) as e: # pragma: no cover
# Avoid ugly, unhelpful traceback
raise SystemExit('\n' + str(e))
@cli.command(name="build")
@click.option('-c', '--clean/--dirty', is_flag=True, default=True, help=clean_help)
@click.option('-f', '--config-file', type=click.File('rb'), help=config_help)
@click.option('-s', '--strict', is_flag=True, help=strict_help)
@click.option('-t', '--theme', type=click.Choice(theme_choices), help=theme_help)
@click.option('-e', '--theme-dir', type=click.Path(), help=theme_dir_help)
@click.option('-d', '--site-dir', type=click.Path(), help=site_dir_help)
@common_options
def build_command(clean, config_file, strict, theme, theme_dir, site_dir):
"""Build the MkDocs documentation"""
# Don't override config value if user did not specify --strict flag
# Conveniently, load_config drops None values
strict = strict or None
try:
build.build(config.load_config(
config_file=config_file,
strict=strict,
theme=theme,
theme_dir=theme_dir,
site_dir=site_dir
), dirty=not clean)
except exceptions.ConfigurationError as e: # pragma: no cover
# Avoid ugly, unhelpful traceback
raise SystemExit('\n' + str(e))
@cli.command(name="json")
@click.option('-c', '--clean/--dirty', is_flag=True, default=True, help=clean_help)
@click.option('-f', '--config-file', type=click.File('rb'), help=config_help)
@click.option('-s', '--strict', is_flag=True, help=strict_help)
@click.option('-d', '--site-dir', type=click.Path(), help=site_dir_help)
@common_options
def json_command(clean, config_file, strict, site_dir):
"""Build the MkDocs documentation to JSON files
Rather than building your documentation to HTML pages, this
outputs each page in a simple JSON format. This command is
useful if you want to index your documentation in an external
search engine.
"""
log.warning("The json command is deprecated and will be removed in a "
"future MkDocs release. For details on updating: "
"http://www.mkdocs.org/about/release-notes/")
# Don't override config value if user did not specify --strict flag
# Conveniently, load_config drops None values
strict = strict or None
try:
build.build(config.load_config(
config_file=config_file,
strict=strict,
site_dir=site_dir
), dump_json=True, dirty=not clean)
except exceptions.ConfigurationError as e: # pragma: no cover
# Avoid ugly, unhelpful traceback
raise SystemExit('\n' + str(e))
@cli.command(name="gh-deploy")
@click.option('-c', '--clean/--dirty', is_flag=True, default=True, help=clean_help)
@click.option('-f', '--config-file', type=click.File('rb'), help=config_help)
@click.option('-m', '--message', help=commit_message_help)
@click.option('-b', '--remote-branch', help=remote_branch_help)
@click.option('-r', '--remote-name', help=remote_name_help)
@click.option('--force', is_flag=True, help=force_help)
@common_options
def gh_deploy_command(config_file, clean, message, remote_branch, remote_name, force):
"""Deploy your documentation to GitHub Pages"""
try:
cfg = config.load_config(
config_file=config_file,
remote_branch=remote_branch,
remote_name=remote_name
)
build.build(cfg, dirty=not clean)
gh_deploy.gh_deploy(cfg, message=message, force=force)
except exceptions.ConfigurationError as e: # pragma: no cover
# Avoid ugly, unhelpful traceback
raise SystemExit('\n' + str(e))
@cli.command(name="new")
@click.argument("project_directory")
@common_options
def new_command(project_directory):
"""Create a new MkDocs project"""
new.new(project_directory)
if __name__ == '__main__': # pragma: no cover
cli()
| lukfor/mkdocs | mkdocs/__main__.py | Python | bsd-2-clause | 8,721 |
from __future__ import absolute_import
from sentry import tsdb, ratelimits
from sentry.api.serializers import serialize
from sentry.plugins.base import Plugin
from sentry.plugins.base.configuration import react_plugin_config
from sentry.plugins.status import PluginStatus
class DataForwardingPlugin(Plugin):
status = PluginStatus.BETA
def configure(self, project, request):
return react_plugin_config(self, project, request)
def has_project_conf(self):
return True
def get_rate_limit(self):
# number of requests, number of seconds (window)
return (50, 1)
def forward_event(self, payload):
"""
Forward the event and return a boolean if it was successful.
"""
raise NotImplementedError
def get_event_payload(self, event):
return serialize(event)
def get_plugin_type(self):
return "data-forwarding"
def post_process(self, event, **kwargs):
rl_key = u"{}:{}".format(self.conf_key, event.project.organization_id)
# limit segment to 50 requests/second
limit, window = self.get_rate_limit()
if limit and window and ratelimits.is_limited(rl_key, limit=limit, window=window):
return
payload = self.get_event_payload(event)
success = self.forward_event(event, payload)
if success is False:
# TODO(dcramer): record failure
pass
tsdb.incr(tsdb.models.project_total_forwarded, event.project.id, count=1)
| mvaled/sentry | src/sentry/plugins/bases/data_forwarding.py | Python | bsd-3-clause | 1,514 |
#!/usr/bin/env python
#######################################################
# Copyright (c) 2015, ArrayFire
# All rights reserved.
#
# This file is distributed under 3-clause BSD license.
# The complete license agreement can be obtained at:
# http://arrayfire.com/licenses/BSD-3-Clause
########################################################
import arrayfire as af
from . import _util
def simple_algorithm(verbose=False):
display_func = _util.display_func(verbose)
print_func = _util.print_func(verbose)
a = af.randu(3, 3)
k = af.constant(1, 3, 3, dtype=af.Dtype.u32)
af.eval(k)
print_func(af.sum(a), af.product(a), af.min(a), af.max(a), af.count(a), af.any_true(a), af.all_true(a))
display_func(af.sum(a, 0))
display_func(af.sum(a, 1))
rk = af.constant(1, 3, dtype=af.Dtype.u32)
rk[2] = 0
af.eval(rk)
display_func(af.sumByKey(rk, a, dim=0))
display_func(af.sumByKey(rk, a, dim=1))
display_func(af.productByKey(rk, a, dim=0))
display_func(af.productByKey(rk, a, dim=1))
display_func(af.minByKey(rk, a, dim=0))
display_func(af.minByKey(rk, a, dim=1))
display_func(af.maxByKey(rk, a, dim=0))
display_func(af.maxByKey(rk, a, dim=1))
display_func(af.anyTrueByKey(rk, a, dim=0))
display_func(af.anyTrueByKey(rk, a, dim=1))
display_func(af.allTrueByKey(rk, a, dim=0))
display_func(af.allTrueByKey(rk, a, dim=1))
display_func(af.countByKey(rk, a, dim=0))
display_func(af.countByKey(rk, a, dim=1))
display_func(af.product(a, 0))
display_func(af.product(a, 1))
display_func(af.min(a, 0))
display_func(af.min(a, 1))
display_func(af.max(a, 0))
display_func(af.max(a, 1))
display_func(af.count(a, 0))
display_func(af.count(a, 1))
display_func(af.any_true(a, 0))
display_func(af.any_true(a, 1))
display_func(af.all_true(a, 0))
display_func(af.all_true(a, 1))
display_func(af.accum(a, 0))
display_func(af.accum(a, 1))
display_func(af.scan(a, 0, af.BINARYOP.ADD))
display_func(af.scan(a, 1, af.BINARYOP.MAX))
display_func(af.scan_by_key(k, a, 0, af.BINARYOP.ADD))
display_func(af.scan_by_key(k, a, 1, af.BINARYOP.MAX))
display_func(af.sort(a, is_ascending=True))
display_func(af.sort(a, is_ascending=False))
b = (a > 0.1) * a
c = (a > 0.4) * a
d = b / c
print_func(af.sum(d))
print_func(af.sum(d, nan_val=0.0))
display_func(af.sum(d, dim=0, nan_val=0.0))
val, idx = af.sort_index(a, is_ascending=True)
display_func(val)
display_func(idx)
val, idx = af.sort_index(a, is_ascending=False)
display_func(val)
display_func(idx)
b = af.randu(3, 3)
keys, vals = af.sort_by_key(a, b, is_ascending=True)
display_func(keys)
display_func(vals)
keys, vals = af.sort_by_key(a, b, is_ascending=False)
display_func(keys)
display_func(vals)
c = af.randu(5, 1)
d = af.randu(5, 1)
cc = af.set_unique(c, is_sorted=False)
dd = af.set_unique(af.sort(d), is_sorted=True)
display_func(cc)
display_func(dd)
display_func(af.set_union(cc, dd, is_unique=True))
display_func(af.set_union(cc, dd, is_unique=False))
display_func(af.set_intersect(cc, cc, is_unique=True))
display_func(af.set_intersect(cc, cc, is_unique=False))
_util.tests["algorithm"] = simple_algorithm
| arrayfire/arrayfire_python | tests/simple/algorithm.py | Python | bsd-3-clause | 3,357 |
import pytest
import numpy as np
import scipy.linalg
import scipy.sparse
import qutip
if qutip.settings.has_mkl:
from qutip._mkl.spsolve import mkl_splu, mkl_spsolve
pytestmark = [
pytest.mark.skipif(not qutip.settings.has_mkl,
reason='MKL extensions not found.'),
]
class Test_spsolve:
def test_single_rhs_vector_real(self):
Adense = np.array([[0, 1, 1],
[1, 0, 1],
[0, 0, 1]])
As = scipy.sparse.csr_matrix(Adense)
np.random.seed(1234)
x = np.random.randn(3)
b = As * x
x2 = mkl_spsolve(As, b, verbose=True)
np.testing.assert_allclose(x, x2)
def test_single_rhs_vector_complex(self):
A = qutip.rand_herm(10)
x = qutip.rand_ket(10).full()
b = A.full() @ x
y = mkl_spsolve(A.data, b, verbose=True)
np.testing.assert_allclose(x, y)
@pytest.mark.parametrize('dtype', [np.float64, np.complex128])
def test_multi_rhs_vector(self, dtype):
M = np.array([
[1, 0, 2],
[0, 0, 3],
[-4, 5, 6],
], dtype=dtype)
sM = scipy.sparse.csr_matrix(M)
N = np.array([
[3, 0, 1],
[0, 2, 0],
[0, 0, 0],
], dtype=dtype)
sX = mkl_spsolve(sM, N, verbose=True)
X = scipy.linalg.solve(M, N)
np.testing.assert_allclose(X, sX)
def test_rhs_shape_is_maintained(self):
A = scipy.sparse.csr_matrix(np.array([
[1, 0, 2],
[0, 0, 3],
[-4, 5, 6],
], dtype=np.complex128))
b = np.array([0, 2, 0], dtype=np.complex128)
out = mkl_spsolve(A, b, verbose=True)
assert b.shape == out.shape
b = np.array([0, 2, 0], dtype=np.complex128).reshape((3, 1))
out = mkl_spsolve(A, b, verbose=True)
assert b.shape == out.shape
def test_sparse_rhs(self):
A = scipy.sparse.csr_matrix([
[1, 2, 0],
[0, 3, 0],
[0, 0, 5],
])
b = scipy.sparse.csr_matrix([
[0, 1],
[1, 0],
[0, 0],
])
x = mkl_spsolve(A, b, verbose=True)
ans = np.array([[-0.66666667, 1],
[0.33333333, 0],
[0, 0]])
np.testing.assert_allclose(x.toarray(), ans)
@pytest.mark.parametrize('dtype', [np.float64, np.complex128])
def test_symmetric_solver(self, dtype):
A = qutip.rand_herm(np.arange(1, 11)).data
if dtype == np.float64:
A = A.real
x = np.ones(10, dtype=dtype)
b = A.dot(x)
y = mkl_spsolve(A, b, hermitian=1, verbose=True)
np.testing.assert_allclose(x, y)
class Test_splu:
@pytest.mark.parametrize('dtype', [np.float64, np.complex128])
def test_repeated_rhs_solve(self, dtype):
M = np.array([
[1, 0, 2],
[0, 0, 3],
[-4, 5, 6],
], dtype=dtype)
sM = scipy.sparse.csr_matrix(M)
N = np.array([
[3, 0, 1],
[0, 2, 0],
[0, 0, 0],
], dtype=dtype)
test_X = np.zeros((3, 3), dtype=dtype)
lu = mkl_splu(sM, verbose=True)
for k in range(3):
test_X[:, k] = lu.solve(N[:, k])
lu.delete()
expected_X = scipy.linalg.solve(M, N)
np.testing.assert_allclose(test_X, expected_X)
| qutip/qutip | qutip/tests/test_mkl.py | Python | bsd-3-clause | 3,447 |
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pytest
import dask.dataframe as dd
from dask.dataframe.utils import assert_eq, PANDAS_ge_0190
@pytest.mark.parametrize('data', [
pd.Series([1, 1, 1, 2, 2, 1, 3, 4], dtype='category'),
pd.Series(pd.Categorical([1, 1, 1, 2, 2, 1, 3, 4], categories=[4, 3, 2, 1])),
pd.DataFrame({'a': [1, 2, 3, 4, 4, 3, 2, 1],
'b': pd.Categorical(list('abcdabcd'))})]
)
def test_get_dummies(data):
exp = pd.get_dummies(data)
ddata = dd.from_pandas(data, 2)
res = dd.get_dummies(ddata)
assert_eq(res, exp)
tm.assert_index_equal(res.columns, exp.columns)
def test_get_dummies_object():
df = pd.DataFrame({'a': pd.Categorical([1, 2, 3, 4, 4, 3, 2, 1]),
'b': list('abcdabcd'),
'c': pd.Categorical(list('abcdabcd'))})
ddf = dd.from_pandas(df, 2)
# Explicitly exclude object columns
exp = pd.get_dummies(df, columns=['a', 'c'])
res = dd.get_dummies(ddf, columns=['a', 'c'])
assert_eq(res, exp)
tm.assert_index_equal(res.columns, exp.columns)
with pytest.raises(NotImplementedError):
dd.get_dummies(ddf)
with pytest.raises(NotImplementedError):
dd.get_dummies(ddf.b)
with pytest.raises(NotImplementedError):
dd.get_dummies(ddf, columns=['b'])
def test_get_dummies_kwargs():
s = pd.Series([1, 1, 1, 2, 2, 1, 3, 4], dtype='category')
exp = pd.get_dummies(s, prefix='X', prefix_sep='-')
ds = dd.from_pandas(s, 2)
res = dd.get_dummies(ds, prefix='X', prefix_sep='-')
assert_eq(res, exp)
tm.assert_index_equal(res.columns, pd.Index(['X-1', 'X-2', 'X-3', 'X-4']))
exp = pd.get_dummies(s, drop_first=True)
ds = dd.from_pandas(s, 2)
res = dd.get_dummies(ds, drop_first=True)
assert_eq(res, exp)
tm.assert_index_equal(res.columns, exp.columns)
# nan
s = pd.Series([1, 1, 1, 2, np.nan, 3, np.nan, 5], dtype='category')
exp = pd.get_dummies(s)
ds = dd.from_pandas(s, 2)
res = dd.get_dummies(ds)
assert_eq(res, exp)
tm.assert_index_equal(res.columns, exp.columns)
# dummy_na
exp = pd.get_dummies(s, dummy_na=True)
ds = dd.from_pandas(s, 2)
res = dd.get_dummies(ds, dummy_na=True)
assert_eq(res, exp)
tm.assert_index_equal(res.columns, pd.Index([1, 2, 3, 5, np.nan]))
msg = 'sparse=True is not supported'
with tm.assertRaisesRegexp(NotImplementedError, msg):
dd.get_dummies(ds, sparse=True)
def test_get_dummies_errors():
with pytest.raises(NotImplementedError):
# not Categorical
s = pd.Series([1, 1, 1, 2, 2, 1, 3, 4])
ds = dd.from_pandas(s, 2)
dd.get_dummies(ds)
@pytest.mark.parametrize('aggfunc', ['mean', 'sum', 'count'])
def test_pivot_table(aggfunc):
df = pd.DataFrame({'A': np.random.choice(list('XYZ'), size=100),
'B': np.random.randn(100),
'C': pd.Categorical(np.random.choice(list('abc'), size=100))})
ddf = dd.from_pandas(df, 5)
res = dd.pivot_table(ddf, index='A', columns='C', values='B',
aggfunc=aggfunc)
exp = pd.pivot_table(df, index='A', columns='C', values='B',
aggfunc=aggfunc)
if aggfunc == 'count':
# dask result cannot be int64 dtype depending on divisions because of NaN
exp = exp.astype(np.float64)
if PANDAS_ge_0190:
assert_eq(res, exp)
else:
# because of a pandas 0.18.x bug, categorical dtype is not preserved
assert_eq(res, exp, check_names=False, check_column_type=False)
# method
res = ddf.pivot_table(index='A', columns='C', values='B',
aggfunc=aggfunc)
exp = df.pivot_table(index='A', columns='C', values='B',
aggfunc=aggfunc)
if aggfunc == 'count':
# dask result cannot be int64 dtype depending on divisions because of NaN
exp = exp.astype(np.float64)
if PANDAS_ge_0190:
assert_eq(res, exp)
else:
# because of a pandas 0.18.x bug, categorical dtype is not preserved
assert_eq(res, exp, check_names=False, check_column_type=False)
def test_pivot_table_dtype():
df = pd.DataFrame({'A': list('AABB'),
'B': pd.Categorical(list('ABAB')),
'C': [1, 2, 3, 4]})
ddf = dd.from_pandas(df, 2)
res = dd.pivot_table(ddf, index='A', columns='B',
values='C', aggfunc='count')
exp_index = pd.CategoricalIndex(['A', 'B'], name='B')
exp = pd.Series([np.float64] * 2, index=exp_index)
tm.assert_series_equal(res.dtypes, exp)
exp = pd.pivot_table(df, index='A', columns='B',
values='C', aggfunc='count').astype(np.float64)
if PANDAS_ge_0190:
assert_eq(res, exp)
else:
# because of a pandas 0.18.x bug, categorical dtype is not preserved
assert_eq(res, exp, check_names=False, check_column_type=False)
def test_pivot_table_errors():
df = pd.DataFrame({'A': np.random.choice(list('abc'), size=10),
'B': np.random.randn(10),
'C': pd.Categorical(np.random.choice(list('abc'), size=10))})
ddf = dd.from_pandas(df, 2)
msg = "'index' must be the name of an existing column"
with tm.assertRaisesRegexp(ValueError, msg):
dd.pivot_table(ddf, index=['A'], columns='C', values='B')
msg = "'columns' must be the name of an existing column"
with tm.assertRaisesRegexp(ValueError, msg):
dd.pivot_table(ddf, index='A', columns=['C'], values='B')
msg = "'values' must be the name of an existing column"
with tm.assertRaisesRegexp(ValueError, msg):
dd.pivot_table(ddf, index='A', columns='C', values=['B'])
msg = "aggfunc must be either 'mean', 'sum' or 'count'"
with tm.assertRaisesRegexp(ValueError, msg):
dd.pivot_table(ddf, index='A', columns='C', values='B', aggfunc=['sum'])
with tm.assertRaisesRegexp(ValueError, msg):
dd.pivot_table(ddf, index='A', columns='C', values='B', aggfunc='xx')
df = pd.DataFrame({'A': np.random.choice(list('abc'), size=10),
'B': np.random.randn(10),
'C': np.random.choice(list('abc'), size=10)})
ddf = dd.from_pandas(df, 2)
msg = "'columns' must be category dtype"
with tm.assertRaisesRegexp(ValueError, msg):
dd.pivot_table(ddf, index='A', columns='C', values='B')
| chrisbarber/dask | dask/dataframe/tests/test_reshape.py | Python | bsd-3-clause | 6,521 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('custom_image', '0002_auto_20160621_1510'),
]
operations = [
migrations.AlterModelOptions(
name='image',
options={'verbose_name': 'image', 'verbose_name_plural': 'images'},
),
migrations.AlterField(
model_name='image',
name='file_ptr',
field=models.OneToOneField(primary_key=True, serialize=False, related_name='custom_image_image_file', parent_link=True, to='filer.File', on_delete=models.CASCADE),
),
]
operations += [
migrations.AlterModelOptions(
name='image',
options={'default_manager_name': 'objects', 'verbose_name': 'image', 'verbose_name_plural': 'images'},
),
]
| webu/django-filer | tests/utils/custom_image/migrations/0003_auto_20180414_2059.py | Python | bsd-3-clause | 839 |
#!/usr/bin/env python
from mvbb.box_db import MVBBLoader
import multiprocessing, subprocess
from multiprocessing import Pool
import sys
from plugins import soft_hand
def grasp_boxes(filename):
subprocess.call(['python', './grasp_boxes_batch.py', filename])
if __name__ == '__main__':
try:
import os.path
filename = os.path.splitext(sys.argv[1])[0]
except:
filename = 'box_db'
if not os.path.isfile(filename+'.csv'):
print "Error: file", filename, "doesn't exist"
exit()
try:
n_dofs = int(sys.argv[2])
n_l = int(sys.argv[3])
except:
n_dofs = soft_hand.numJoints
n_l = len(soft_hand.links_to_check)
# for SoftHand
box_db = MVBBLoader(filename, n_dofs, n_l)
filenames = box_db.split_db()
p = Pool(multiprocessing.cpu_count())
p.map(grasp_boxes, filenames)
box_db.join_results(filenames)
| lia2790/grasp_learning | python/simple_batch_splitter.py | Python | bsd-3-clause | 913 |
# encoding: latin1
"""createVariable
"""
__author__ = "Juan C. Duque, Alejandro Betancourt, Juan Sebastian Marín"
__credits__ = "Copyright (c) 2010-11 Juan C. Duque"
__license__ = "New BSD License"
__version__ = "1.0.0"
__maintainer__ = "RiSE Group"
__email__ = "contacto@rise-group.org"
__all__ = ['fieldOperation']
import re
def fieldOperation(function, Y, fieldnames):
"""
This method receives a string which contains a function or formula written by the user. That function has operations between the variables of Y (a data dictionary) which names are contained in fieldnames (a list), the function is applied to the corresponding values in each element of Y. The return value is a list containing the results of the function.
:param function: function defined by the user, written like a python operation
:type function: string
:rtype: list (Y dictionary with the results)
"""
variables = []
positions = []
auxiliar1 = []
count = 0
results = []
newfunc = ''
for i in fieldnames[0:]:
if re.search(i,function):
if not (function[function.index(i) - 2: function.index(i)].isalpha()):
variables.append(i)
positions.append(fieldnames.index(i))
for j in Y:
auxiliar1 = []
count = 0
newfunc = function
for k in positions:
auxiliar1.append(Y[j][k])
for l in variables:
if len(re.findall(l,newfunc)) == 1:
newfunc = re.compile(l).sub(str(auxiliar1[variables.index(l)]), newfunc)
else:
if newfunc.index(re.findall(l, newfunc)[0]) != newfunc.index(re.findall('(\D)' + l, newfunc)[1]):
newfunc = re.compile('(\W)-[+,-]' + l).sub(str(auxiliar1[variables.index(l)]), newfunc)
for l in variables:
newfunc = re.compile(l).sub(str(auxiliar1[variables.index(l)]), newfunc)
try:
n = eval(newfunc)
except ZeroDivisionError:
raise ZeroDivisionError("Division by zero was detected")
results.append(n)
return results
| clusterpy/clusterpy | clusterpy/core/data/createVariable.py | Python | bsd-3-clause | 2,144 |
tests=[
("python","UnitTestBuildComposite.py",{}),
("python","UnitTestScreenComposite.py",{}),
("python","UnitTestAnalyzeComposite.py",{}),
]
for dir in ['Cluster','Composite','Data','DecTree','Descriptors','FeatureSelect','InfoTheory','KNN','ModelPackage','NaiveBayes','Neural','SLT']:
tests.append(('python','test_list.py',{'dir':dir}))
longTests=[
]
if __name__=='__main__':
import sys
from rdkit import TestRunner
failed,tests = TestRunner.RunScript('test_list.py',0,1)
sys.exit(len(failed))
| rdkit/rdkit-orig | rdkit/ML/test_list.py | Python | bsd-3-clause | 523 |
from unittest import TestCase
from rfxcom.protocol.temperature import Temperature
from rfxcom.exceptions import (InvalidPacketLength, UnknownPacketSubtype,
UnknownPacketType)
class TemperatureTestCase(TestCase):
def setUp(self):
self.data = bytearray(b'\x08\x50\x02\x11\x70\x02\x00\xA7\x89')
self.parser = Temperature()
def test_parse_bytes(self):
self.assertTrue(self.parser.validate_packet(self.data))
self.assertTrue(self.parser.can_handle(self.data))
result = self.parser.load(self.data)
self.assertEquals(result, {
'packet_length': 8,
'packet_type': 80,
'packet_type_name': 'Temperature sensors',
'sequence_number': 17,
'packet_subtype': 2,
'packet_subtype_name':
'THC238/268,THN132,THWR288,THRN122,THN122,AW129/131',
'temperature': 16.7,
'id': '0x7002',
# 'channel': 2, TBC
'signal_level': 8,
'battery_level': 9
})
self.assertEquals(str(self.parser), "<Temperature ID:0x7002>")
def test_parse_bytes2(self):
self.data = bytearray(b'\x08\x50\x03\x02\xAE\x01\x00\x63\x59')
self.assertTrue(self.parser.validate_packet(self.data))
self.assertTrue(self.parser.can_handle(self.data))
result = self.parser.load(self.data)
self.assertEquals(result, {
'packet_length': 8,
'packet_type': 80,
'packet_type_name': 'Temperature sensors',
'sequence_number': 2,
'packet_subtype': 3,
'packet_subtype_name': 'THWR800',
'temperature': 9.9,
'id': '0xAE01',
# 'channel': 1, TBC
'signal_level': 5,
'battery_level': 9
})
self.assertEquals(str(self.parser), "<Temperature ID:0xAE01>")
def test_parse_bytes_negative_temp(self):
self.data = bytearray(b'\x08\x50\x06\x02\xAE\x01\x80\x55\x59')
self.assertTrue(self.parser.validate_packet(self.data))
self.assertTrue(self.parser.can_handle(self.data))
result = self.parser.load(self.data)
self.assertEquals(result, {
'packet_length': 8,
'packet_type': 80,
'packet_type_name': 'Temperature sensors',
'sequence_number': 2,
'packet_subtype': 6,
'packet_subtype_name': 'TS15C',
'temperature': -8.5,
'id': '0xAE01',
# 'channel': 1, TBC
'signal_level': 5,
'battery_level': 9
})
self.assertEquals(str(self.parser), "<Temperature ID:0xAE01>")
def test_validate_bytes_short(self):
data = self.data[:1]
with self.assertRaises(InvalidPacketLength):
self.parser.validate_packet(data)
def test_validate_unkown_packet_type(self):
self.data[1] = 0xFF
self.assertFalse(self.parser.can_handle(self.data))
with self.assertRaises(UnknownPacketType):
self.parser.validate_packet(self.data)
def test_validate_unknown_sub_type(self):
self.data[2] = 0xEE
self.assertFalse(self.parser.can_handle(self.data))
with self.assertRaises(UnknownPacketSubtype):
self.parser.validate_packet(self.data)
def test_log_name(self):
self.assertEquals(self.parser.log.name, 'rfxcom.protocol.Temperature')
| skimpax/python-rfxcom | tests/protocol/test_temperature.py | Python | bsd-3-clause | 3,484 |
from MergeIndependent import *
| mica-gossip/MiCA | tools/micavis/custom/RoundRobinMerge.py | Python | bsd-3-clause | 31 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from .. import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.table`.
"""
auto_colname = _config.ConfigItem(
'col{0}',
'The template that determines the name of a column if it cannot be '
'determined. Uses new-style (format method) string formatting.',
aliases=['astropy.table.column.auto_colname'])
default_notebook_table_class = _config.ConfigItem(
'table-striped table-bordered table-condensed',
'The table class to be used in Jupyter notebooks when displaying '
'tables (and not overridden). See <http://getbootstrap.com/css/#tables '
'for a list of useful bootstrap classes.')
replace_warnings = _config.ConfigItem(
['slice'],
'List of conditions for issuing a warning when replacing a table '
"column using setitem, e.g. t['a'] = value. Allowed options are "
"'always', 'slice', 'refcount', 'attributes'.",
'list',
)
replace_inplace = _config.ConfigItem(
False,
'Always use in-place update of a table column when using setitem, '
"e.g. t['a'] = value. This overrides the default behavior of "
"replacing the column entirely with the new value when possible. "
"This configuration option will be deprecated and then removed in "
"subsequent major releases."
)
conf = Conf()
from .column import Column, MaskedColumn, StringTruncateWarning, ColumnInfo
from .groups import TableGroups, ColumnGroups
from .table import (Table, QTable, TableColumns, Row, TableFormatter,
NdarrayMixin, TableReplaceWarning)
from .operations import join, setdiff, hstack, vstack, unique, TableMergeError
from .bst import BST, FastBST, FastRBT
from .sorted_array import SortedArray
from .serialize import SerializedColumn
# Finally import the formats for the read and write method but delay building
# the documentation until all are loaded. (#5275)
from ..io import registry
with registry.delay_doc_updates(Table):
# Import routines that connect readers/writers to astropy.table
from .jsviewer import JSViewer
from ..io.ascii import connect
from ..io.fits import connect
from ..io.misc import connect
from ..io.votable import connect
| funbaker/astropy | astropy/table/__init__.py | Python | bsd-3-clause | 2,381 |
from django.db.models import CharField
from django.utils.translation import ugettext_lazy as _
from localflavor.deprecation import DeprecatedPhoneNumberField
from . import forms
from .au_states import STATE_CHOICES
from .validators import AUBusinessNumberFieldValidator, AUCompanyNumberFieldValidator, AUTaxFileNumberFieldValidator
class AUStateField(CharField):
"""
A model field that stores the three-letter Australian state abbreviation in the database.
It is represented with :data:`~localflavor.au.au_states.STATE_CHOICES`` choices.
"""
description = _("Australian State")
def __init__(self, *args, **kwargs):
kwargs['choices'] = STATE_CHOICES
kwargs['max_length'] = 3
super(AUStateField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(AUStateField, self).deconstruct()
del kwargs['choices']
return name, path, args, kwargs
class AUPostCodeField(CharField):
"""
A model field that stores the four-digit Australian postcode in the database.
This field is represented by forms as a :class:`~localflavor.au.forms.AUPostCodeField` field.
"""
description = _("Australian Postcode")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 4
super(AUPostCodeField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
defaults = {'form_class': forms.AUPostCodeField}
defaults.update(kwargs)
return super(AUPostCodeField, self).formfield(**defaults)
class AUPhoneNumberField(CharField, DeprecatedPhoneNumberField):
"""
A model field that checks that the value is a valid Australian phone number (ten digits).
.. deprecated:: 1.4
Use the django-phonenumber-field_ library instead.
.. _django-phonenumber-field: https://github.com/stefanfoulis/django-phonenumber-field
"""
description = _("Australian Phone number")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 20
super(AUPhoneNumberField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
defaults = {'form_class': forms.AUPhoneNumberField}
defaults.update(kwargs)
return super(AUPhoneNumberField, self).formfield(**defaults)
class AUBusinessNumberField(CharField):
"""
A model field that checks that the value is a valid Australian Business Number (ABN).
.. versionadded:: 1.3
"""
description = _("Australian Business Number")
validators = [AUBusinessNumberFieldValidator()]
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 11
super(AUBusinessNumberField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
defaults = {'form_class': forms.AUBusinessNumberField}
defaults.update(kwargs)
return super(AUBusinessNumberField, self).formfield(**defaults)
def to_python(self, value):
"""Ensure the ABN is stored without spaces."""
value = super(AUBusinessNumberField, self).to_python(value)
if value is not None:
return ''.join(value.split())
return value
class AUCompanyNumberField(CharField):
"""
A model field that checks that the value is a valid Australian Company Number (ACN).
.. versionadded:: 1.5
"""
description = _("Australian Company Number")
validators = [AUCompanyNumberFieldValidator()]
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 9
super(AUCompanyNumberField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
defaults = {'form_class': forms.AUCompanyNumberField}
defaults.update(kwargs)
return super(AUCompanyNumberField, self).formfield(**defaults)
def to_python(self, value):
"""Ensure the ACN is stored without spaces."""
value = super(AUCompanyNumberField, self).to_python(value)
if value is not None:
return ''.join(value.split())
return value
class AUTaxFileNumberField(CharField):
"""
A model field that checks that the value is a valid Tax File Number (TFN).
A TFN is a number issued to a person by the Commissioner of Taxation and
is used to verify client identity and establish their income levels.
It is a eight or nine digit number without any embedded meaning.
.. versionadded:: 1.4
"""
description = _("Australian Tax File Number")
validators = [AUTaxFileNumberFieldValidator()]
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 11
super(AUTaxFileNumberField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
defaults = {'form_class': forms.AUTaxFileNumberField}
defaults.update(kwargs)
return super(AUTaxFileNumberField, self).formfield(**defaults)
def to_python(self, value):
"""Ensure the TFN is stored without spaces."""
value = super(AUTaxFileNumberField, self).to_python(value)
if value is not None:
return ''.join(value.split())
return value
| thor/django-localflavor | localflavor/au/models.py | Python | bsd-3-clause | 5,128 |
from reportlab.test import unittest
from reportlab.test.utils import makeSuiteForClasses, outputfile, printLocation, NearTestCase
from reportlab.pdfgen.canvas import Canvas
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfbase import pdfutils
from reportlab.platypus.paragraph import Paragraph
from reportlab.lib.styles import ParagraphStyle
from reportlab.graphics.shapes import Drawing, String, Ellipse
import re
import codecs
textPat = re.compile(r'\([^(]*\)')
#test sentences
testCp1252 = 'copyright %s trademark %s registered %s ReportLab! Ol%s!' % (chr(169), chr(153),chr(174), chr(0xe9))
testUni = unicode(testCp1252, 'cp1252')
testUTF8 = testUni.encode('utf-8')
# expected result is octal-escaped text in the PDF
expectedCp1252 = pdfutils._escape(testCp1252)
def extractText(pdfOps):
"""Utility to rip out the PDF text within a block of PDF operators.
PDF will show a string draw as something like "(Hello World) Tj"
i.e. text is in curved brackets. Crude and dirty, probably fails
on escaped brackets.
"""
found = textPat.findall(pdfOps)
#chop off '(' and ')'
return map(lambda x:x[1:-1], found)
def subsetToUnicode(ttf, subsetCodeStr):
"""Return unicode string represented by given subsetCode string
as found when TrueType font rendered to PDF, ttf must be the font
object that was used."""
# This relies on TTFont internals and uses the first document
# and subset it finds
subset = ttf.state.values()[0].subsets[0]
chrs = []
for codeStr in subsetCodeStr.split('\\'):
if codeStr:
chrs.append(unichr(subset[int(codeStr[1:], 8)]))
return u''.join(chrs)
class TextEncodingTestCase(NearTestCase):
"""Tests of expected Unicode and encoding behaviour
"""
def setUp(self):
self.luxi = TTFont("Luxi", "luxiserif.ttf")
pdfmetrics.registerFont(self.luxi)
self.styNormal = ParagraphStyle(name='Helvetica', fontName='Helvetica-Oblique')
self.styTrueType = ParagraphStyle(name='TrueType', fontName='luxi')
def testStringWidth(self):
msg = 'Hello World'
self.assertNear(pdfmetrics.stringWidth(msg, 'Courier', 10),66.0)
self.assertNear(pdfmetrics.stringWidth(msg, 'Helvetica', 10),51.67)
self.assertNear(pdfmetrics.stringWidth(msg, 'Times-Roman', 10),50.27)
self.assertNear(pdfmetrics.stringWidth(msg, 'Luxi', 10),50.263671875)
uniMsg1 = u"Hello World"
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Courier', 10),66.0)
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Helvetica', 10),51.67)
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Times-Roman', 10),50.27)
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Luxi', 10),50.263671875)
# Courier are all 600 ems wide. So if one 'measures as utf8' one will
# get a wrong width as extra characters are seen
self.assertEquals(len(testCp1252),52)
self.assertNear(pdfmetrics.stringWidth(testCp1252, 'Courier', 10, 'cp1252'),312.0)
# the test string has 5 more bytes and so "measures too long" if passed to
# a single-byte font which treats it as a single-byte string.
self.assertEquals(len(testUTF8),57)
self.assertNear(pdfmetrics.stringWidth(testUTF8, 'Courier', 10),312.0)
self.assertEquals(len(testUni),52)
self.assertNear(pdfmetrics.stringWidth(testUni, 'Courier', 10),312.0)
# now try a TrueType font. Should be able to accept Unicode or UTF8
self.assertNear(pdfmetrics.stringWidth(testUTF8, 'Luxi', 10),224.638671875)
self.assertNear(pdfmetrics.stringWidth(testUni, 'Luxi', 10),224.638671875)
def testUtf8Canvas(self):
"""Verify canvas declared as utf8 autoconverts.
This assumes utf8 input. It converts to the encoding of the
underlying font, so both text lines APPEAR the same."""
c = Canvas(outputfile('test_pdfbase_encodings_utf8.pdf'))
c.drawString(100,700, testUTF8)
# Set a font with UTF8 encoding
c.setFont('Luxi', 12)
# This should pass the UTF8 through unchanged
c.drawString(100,600, testUTF8)
# and this should convert from Unicode to UTF8
c.drawString(100,500, testUni)
# now add a paragraph in Latin-1 in the latin-1 style
p = Paragraph(testUTF8, style=self.styNormal, encoding="utf-8")
w, h = p.wrap(150, 100)
p.drawOn(c, 100, 400) #3
c.rect(100,300,w,h)
# now add a paragraph in UTF-8 in the UTF-8 style
p2 = Paragraph(testUTF8, style=self.styTrueType, encoding="utf-8")
w, h = p2.wrap(150, 100)
p2.drawOn(c, 300, 400) #4
c.rect(100,300,w,h)
# now add a paragraph in Unicode in the latin-1 style
p3 = Paragraph(testUni, style=self.styNormal)
w, h = p3.wrap(150, 100)
p3.drawOn(c, 100, 300)
c.rect(100,300,w,h)
# now add a paragraph in Unicode in the UTF-8 style
p4 = Paragraph(testUni, style=self.styTrueType)
p4.wrap(150, 100)
p4.drawOn(c, 300, 300)
c.rect(300,300,w,h)
# now a graphic
d1 = Drawing(400,50)
d1.add(Ellipse(200,25,200,12.5, fillColor=None))
d1.add(String(200,25,testUTF8, textAnchor='middle', encoding='utf-8'))
d1.drawOn(c, 100, 150)
# now a graphic in utf8
d2 = Drawing(400,50)
d2.add(Ellipse(200,25,200,12.5, fillColor=None))
d2.add(String(200,25,testUTF8, fontName='Luxi', textAnchor='middle', encoding='utf-8'))
d2.drawOn(c, 100, 100)
# now a graphic in Unicode with T1 font
d3 = Drawing(400,50)
d3.add(Ellipse(200,25,200,12.5, fillColor=None))
d3.add(String(200,25,testUni, textAnchor='middle'))
d3.drawOn(c, 100, 50)
# now a graphic in Unicode with TT font
d4 = Drawing(400,50)
d4.add(Ellipse(200,25,200,12.5, fillColor=None))
d4.add(String(200,25,testUni, fontName='Luxi', textAnchor='middle'))
d4.drawOn(c, 100, 0)
extracted = extractText(c.getCurrentPageContent())
self.assertEquals(extracted[0], expectedCp1252)
self.assertEquals(extracted[1], extracted[2])
#self.assertEquals(subsetToUnicode(self.luxi, extracted[1]), testUni)
c.save()
class FontEncodingTestCase(unittest.TestCase):
"""Make documents with custom encodings of Type 1 built-in fonts.
Nothing really to do with character encodings; this is about hacking the font itself"""
def test0(self):
"Make custom encodings of standard fonts"
# make a custom encoded font.
c = Canvas(outputfile('test_pdfbase_encodings.pdf'))
c.setPageCompression(0)
c.setFont('Helvetica', 12)
c.drawString(100, 700, 'The text below should be in a custom encoding in which all vowels become "z"')
# invent a new language where vowels are replaced with letter 'z'
zenc = pdfmetrics.Encoding('EncodingWithoutVowels', 'WinAnsiEncoding')
for ch in 'aeiou':
zenc[ord(ch)] = 'z'
for ch in 'AEIOU':
zenc[ord(ch)] = 'Z'
pdfmetrics.registerEncoding(zenc)
# now we can make a font based on this encoding
# AR hack/workaround: the name of the encoding must be a Python codec!
f = pdfmetrics.Font('FontWithoutVowels', 'Helvetica-Oblique', 'EncodingWithoutVowels')
pdfmetrics.registerFont(f)
c.setFont('FontWithoutVowels', 12)
c.drawString(125, 675, "The magic word is squamish ossifrage")
# now demonstrate adding a Euro to MacRoman, which lacks one
c.setFont('Helvetica', 12)
c.drawString(100, 650, "MacRoman encoding lacks a Euro. We'll make a Mac font with the Euro at #219:")
# WinAnsi Helvetica
pdfmetrics.registerFont(pdfmetrics.Font('Helvetica-WinAnsi', 'Helvetica-Oblique', 'WinAnsiEncoding'))
c.setFont('Helvetica-WinAnsi', 12)
c.drawString(125, 625, 'WinAnsi with Euro: character 128 = "\200"')
pdfmetrics.registerFont(pdfmetrics.Font('MacHelvNoEuro', 'Helvetica-Oblique', 'MacRomanEncoding'))
c.setFont('MacHelvNoEuro', 12)
c.drawString(125, 600, 'Standard MacRoman, no Euro: Character 219 = "\333"') # oct(219)=0333
# now make our hacked encoding
euroMac = pdfmetrics.Encoding('MacWithEuro', 'MacRomanEncoding')
euroMac[219] = 'Euro'
pdfmetrics.registerEncoding(euroMac)
pdfmetrics.registerFont(pdfmetrics.Font('MacHelvWithEuro', 'Helvetica-Oblique', 'MacWithEuro'))
c.setFont('MacHelvWithEuro', 12)
c.drawString(125, 575, 'Hacked MacRoman with Euro: Character 219 = "\333"') # oct(219)=0333
# now test width setting with and without _rl_accel - harder
# make an encoding where 'm' becomes 'i'
c.setFont('Helvetica', 12)
c.drawString(100, 500, "Recode 'm' to 'i' and check we can measure widths. Boxes should surround letters.")
sample = 'Mmmmm. ' * 6 + 'Mmmm'
c.setFont('Helvetica-Oblique',12)
c.drawString(125, 475, sample)
w = c.stringWidth(sample, 'Helvetica-Oblique', 12)
c.rect(125, 475, w, 12)
narrowEnc = pdfmetrics.Encoding('m-to-i')
narrowEnc[ord('m')] = 'i'
narrowEnc[ord('M')] = 'I'
pdfmetrics.registerEncoding(narrowEnc)
pdfmetrics.registerFont(pdfmetrics.Font('narrow', 'Helvetica-Oblique', 'm-to-i'))
c.setFont('narrow', 12)
c.drawString(125, 450, sample)
w = c.stringWidth(sample, 'narrow', 12)
c.rect(125, 450, w, 12)
c.setFont('Helvetica', 12)
c.drawString(100, 400, "Symbol & Dingbats fonts - check we still get valid PDF in StandardEncoding")
c.setFont('Symbol', 12)
c.drawString(100, 375, 'abcdefghijklmn')
c.setFont('ZapfDingbats', 12)
c.drawString(300, 375, 'abcdefghijklmn')
c.save()
def makeSuite():
return makeSuiteForClasses(
TextEncodingTestCase,
#FontEncodingTestCase - nobbled for now due to old stuff which needs removing.
)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
| alexissmirnov/donomo | donomo_archive/lib/reportlab/test/test_pdfbase_encodings.py | Python | bsd-3-clause | 10,317 |
# -*- coding: utf-8 -*-
"""
Auth-SHA1/HMAC
~~~~~~~~~~~~~~
Securing an Eve-powered API with Basic Authentication (RFC2617).
This script assumes that user accounts are stored in a MongoDB collection
('accounts'), and that passwords are stored as SHA1/HMAC hashes. All API
resources/methods will be secured unless they are made explicitly public
(by fiddling with some settings you can open one or more resources and/or
methods to public access -see docs).
Since we are using werkzeug we don't need any extra import (werkzeug being
one of Flask/Eve prerequisites).
Checkout Eve at https://github.com/nicolaiarocci/eve
This snippet by Nicola Iarocci can be used freely for anything you like.
Consider it public domain.
"""
from eve import Eve
from eve.auth import BasicAuth
from werkzeug.security import check_password_hash
from settings_security import SETTINGS
class Sha1Auth(BasicAuth):
def check_auth(self, username, password, allowed_roles, resource, method):
# use Eve's own db driver; no additional connections/resources are used
accounts = app.data.driver.db['accounts']
account = accounts.find_one({'username': username})
return account and \
check_password_hash(account['password'], password)
if __name__ == '__main__':
app = Eve(auth=Sha1Auth, settings=SETTINGS)
app.run()
| mugurrus/eve | examples/security/sha1-hmac.py | Python | bsd-3-clause | 1,400 |
r"""
Used to configure the main parameters for each implemented model.
.. currentmodule:: compmech.conecyl.modelDB
"""
import numpy as np
from scipy.sparse import coo_matrix
from clpt import *
from fsdt import *
db = {
'clpt_donnell_bc1': {
'linear static': True,
'linear buckling': True,
'non-linear static': True,
'commons': clpt_commons_bc1,
'linear': clpt_donnell_bc1_linear,
'non-linear': clpt_donnell_bc1_nonlinear,
'dofs': 3,
'e_num': 6,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 3,
'num2': 6,
},
'clpt_donnell_bc2': {
'linear static': True,
'linear buckling': True,
'non-linear static': True,
'commons': clpt_commons_bc2,
'linear': clpt_donnell_bc2_linear,
'non-linear': clpt_donnell_bc2_nonlinear,
'dofs': 3,
'e_num': 6,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 3,
'num2': 6,
},
'iso_clpt_donnell_bc2': {
'linear static': True,
'linear buckling': True,
'non-linear static': True,
'commons': clpt_commons_bc2,
'linear': iso_clpt_donnell_bc2_linear,
'non-linear': iso_clpt_donnell_bc2_nonlinear,
'dofs': 3,
'e_num': 6,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 3,
'num2': 6,
},
'clpt_donnell_bc3': {
'linear static': True,
'linear buckling': True,
'non-linear static': True,
'commons': clpt_commons_bc3,
'linear': clpt_donnell_bc3_linear,
'non-linear': clpt_donnell_bc3_nonlinear,
'dofs': 3,
'e_num': 6,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 3,
'num2': 6,
},
'iso_clpt_donnell_bc3': {
'linear static': True,
'linear buckling': True,
'non-linear static': True,
'commons': clpt_commons_bc3,
'linear': iso_clpt_donnell_bc3_linear,
'non-linear': iso_clpt_donnell_bc3_nonlinear,
'dofs': 3,
'e_num': 6,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 3,
'num2': 6,
},
'clpt_donnell_bc4': {
'linear static': True,
'linear buckling': True,
'non-linear static': True,
'commons': clpt_commons_bc4,
'linear': clpt_donnell_bc4_linear,
'non-linear': clpt_donnell_bc4_nonlinear,
'dofs': 3,
'e_num': 6,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 3,
'num2': 6,
},
'clpt_donnell_bcn': {
'linear static': True,
'linear buckling': False,
'non-linear static': None,
'commons': clpt_commons_bcn,
'linear': clpt_donnell_bcn_linear,
'non-linear': None,
'dofs': 3,
'e_num': 6,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 3,
'num2': 8,
},
'clpt_sanders_bc1': {
'linear static': True,
'linear buckling': True,
'non-linear static': True,
'commons': clpt_commons_bc1,
'linear': clpt_sanders_bc1_linear,
'non-linear': clpt_sanders_bc1_nonlinear,
'dofs': 3,
'e_num': 6,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 3,
'num2': 6,
},
'clpt_sanders_bc2': {
'linear static': True,
'linear buckling': True,
'non-linear static': True,
'commons': clpt_commons_bc2,
'linear': clpt_sanders_bc2_linear,
'non-linear': clpt_sanders_bc2_nonlinear,
'dofs': 3,
'e_num': 6,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 3,
'num2': 6,
},
'clpt_sanders_bc3': {
'linear static': True,
'linear buckling': True,
'non-linear static': True,
'commons': clpt_commons_bc3,
'linear': clpt_sanders_bc3_linear,
'non-linear': clpt_sanders_bc3_nonlinear,
'dofs': 3,
'e_num': 6,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 3,
'num2': 6,
},
'clpt_sanders_bc4': {
'linear static': True,
'linear buckling': True,
'non-linear static': True,
'commons': clpt_commons_bc4,
'linear': clpt_sanders_bc4_linear,
'non-linear': clpt_sanders_bc4_nonlinear,
'dofs': 3,
'e_num': 6,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 3,
'num2': 6,
},
'clpt_geier1997_bc2': {
'linear static': None,
'linear buckling': True,
'non-linear static': None,
'commons': clpt_geier1997_bc2,
'linear': clpt_geier1997_bc2,
'non-linear': None,
'dofs': 3,
'e_num': 6,
'i0': 0,
'j0': 0,
'num0': 0,
'num1': 0,
'num2': 3,
},
'fsdt_donnell_bcn': {
'linear static': True,
'linear buckling': False,
'non-linear static': True,
'commons': fsdt_commons_bcn,
'linear': fsdt_donnell_bcn_linear,
'non-linear': fsdt_donnell_bcn_nonlinear,
'dofs': 5,
'e_num': 8,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 5,
'num2': 10,
},
'fsdt_donnell_bc1': {
'linear static': True,
'linear buckling': True,
'non-linear static': True,
'commons': fsdt_commons_bc1,
'linear': fsdt_donnell_bc1_linear,
'non-linear': fsdt_donnell_bc1_nonlinear,
'dofs': 5,
'e_num': 8,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 5,
'num2': 10,
},
'fsdt_donnell_bc2': {
'linear static': True,
'linear buckling': True,
'non-linear static': False,
'commons': fsdt_commons_bc2,
'linear': fsdt_donnell_bc2_linear,
'non-linear': fsdt_donnell_bc2_nonlinear,
'dofs': 5,
'e_num': 8,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 5,
'num2': 10,
},
'fsdt_donnell_bc3': {
'linear static': True,
'linear buckling': True,
'non-linear static': False,
'commons': fsdt_commons_bc3,
'linear': fsdt_donnell_bc3_linear,
'non-linear': fsdt_donnell_bc3_nonlinear,
'dofs': 5,
'e_num': 8,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 5,
'num2': 10,
},
'fsdt_donnell_bc4': {
'linear static': True,
'linear buckling': True,
'non-linear static': False,
'commons': fsdt_commons_bc4,
'linear': fsdt_donnell_bc4_linear,
'non-linear': fsdt_donnell_bc4_nonlinear,
'dofs': 5,
'e_num': 8,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 5,
'num2': 10,
},
'fsdt_sanders_bcn': {
'linear static': True,
'linear buckling': False,
'non-linear static': False,
'commons': fsdt_commons_bcn,
'linear': fsdt_sanders_bcn_linear,
'non-linear': None,
'dofs': 5,
'e_num': 8,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 5,
'num2': 10,
},
'fsdt_shadmehri2012_bc2': {
'linear static': None,
'linear buckling': True,
'non-linear static': None,
'commons': fsdt_shadmehri2012_bc2,
'linear': fsdt_shadmehri2012_bc2,
'non-linear': None,
'dofs': 5,
'e_num': 8,
'i0': 0,
'j0': 0,
'num0': 0,
'num1': 0,
'num2': 5,
},
'fsdt_shadmehri2012_bc3': {
'linear static': None,
'linear buckling': True,
'non-linear static': None,
'commons': fsdt_shadmehri2012_bc3,
'linear': fsdt_shadmehri2012_bc3,
'non-linear': None,
'dofs': 5,
'e_num': 8,
'i0': 0,
'j0': 0,
'num0': 0,
'num1': 0,
'num2': 5,
},
'fsdt_geier1997_bc2': {
'linear static': None,
'linear buckling': True,
'non-linear static': None,
'commons': fsdt_geier1997_bc2,
'linear': fsdt_geier1997_bc2,
'non-linear': None,
'dofs': 5,
'e_num': 8,
'i0': 0,
'j0': 0,
'num0': 0,
'num1': 0,
'num2': 5,
},
}
def get_linear_matrices(cc, combined_load_case=None):
r"""Obtain the right functions to calculate hte linear matrices
for a given model.
The ``model`` parameter of the ``ConeCyl`` object is used to search
for the functions ``fG0``, ``fG0_cyl``, ``fkG0``, ``fkG0_cyl``,
and the matrix ``k0edges`` is calculated, when applicable.
Parameters
----------
cc : compmech.conecyl.ConeCyl
The ``ConeCyl`` object.
combined_load_case : int, optional
As explained in the :meth:`ConeCyl.lb() <compmech.conecyl.ConeCyl.lb>`
method, the integer indicating
which combined load case should be used. Default is ``None``.
Returns
-------
out : tuple
A tuple containing ``(fk0, fk0_cyl, fkG0, fkG0_cyl, k0edges)``.
"""
r1 = cc.r1
r2 = cc.r2
L = cc.L
m1 = cc.m1
m2 = cc.m2
n2 = cc.n2
model = cc.model
try:
if 'iso_' in model:
fk0edges = db[model[4:]]['linear'].fk0edges
else:
fk0edges = db[model]['linear'].fk0edges
except AttributeError:
k0edges = None
if model == 'clpt_donnell_bc1':
k0edges = fk0edges(m1, m2, n2, r1, r2, L,
cc.kphixBot, cc.kphixTop)
elif model == 'clpt_donnell_bc2':
k0edges = fk0edges(m1, m2, n2, r1, r2, L,
cc.kuBot, cc.kuTop,
cc.kphixBot, cc.kphixTop)
elif model == 'iso_clpt_donnell_bc2':
k0edges = fk0edges(m1, m2, n2, r1, r2, L,
cc.kuBot, cc.kuTop,
cc.kphixBot, cc.kphixTop)
elif model == 'clpt_donnell_bc3':
k0edges = fk0edges(m1, m2, n2, r1, r2, L,
cc.kvBot, cc.kvTop,
cc.kphixBot, cc.kphixTop)
elif model == 'iso_clpt_donnell_bc3':
k0edges = fk0edges(m1, m2, n2, r1, r2, L,
cc.kvBot, cc.kvTop,
cc.kphixBot, cc.kphixTop)
elif model == 'clpt_donnell_bc4':
k0edges = fk0edges(m1, m2, n2, r1, r2, L,
cc.kuBot, cc.kuTop,
cc.kvBot, cc.kvTop,
cc.kphixBot, cc.kphixTop)
elif model == 'clpt_donnell_bcn':
k0edges = fk0edges(m1, m2, n2, r1, r2, L,
cc.kuBot, cc.kuTop,
cc.kvBot, cc.kvTop,
cc.kwBot, cc.kwTop,
cc.kphixBot, cc.kphixTop,
cc.kphitBot, cc.kphitTop)
elif model == 'clpt_sanders_bc1':
k0edges = fk0edges(m1, m2, n2, r1, r2, L,
cc.kphixBot, cc.kphixTop)
elif model == 'clpt_sanders_bc2':
k0edges = fk0edges(m1, m2, n2, r1, r2, L,
cc.kuBot, cc.kuTop,
cc.kphixBot, cc.kphixTop)
elif model == 'clpt_sanders_bc3':
k0edges = fk0edges(m1, m2, n2, r1, r2, L,
cc.kvBot, cc.kvTop,
cc.kphixBot, cc.kphixTop)
elif model == 'clpt_sanders_bc4':
k0edges = fk0edges(m1, m2, n2, r1, r2, L,
cc.kuBot, cc.kuTop,
cc.kvBot, cc.kvTop,
cc.kphixBot, cc.kphixTop)
elif model == 'clpt_geier1997_bc2':
k0edges = fk0edges(m1, m2, n2, r1, r2, L,
cc.kuBot, cc.kuTop,
cc.kphixBot, cc.kphixTop)
elif model == 'fsdt_donnell_bc1':
k0edges = fk0edges(m1, m2, n2, r1, r2,
cc.kphixBot, cc.kphixTop)
elif model == 'fsdt_donnell_bc2':
k0edges = fk0edges(m1, m2, n2, r1, r2,
cc.kuBot, cc.kuTop,
cc.kphixBot, cc.kphixTop)
elif model == 'fsdt_donnell_bc3':
k0edges = fk0edges(m1, m2, n2, r1, r2,
cc.kvBot, cc.kvTop,
cc.kphixBot, cc.kphixTop)
elif model == 'fsdt_donnell_bc4':
k0edges = fk0edges(m1, m2, n2, r1, r2,
cc.kuBot, cc.kuTop,
cc.kvBot, cc.kvTop,
cc.kphixBot, cc.kphixTop)
elif model == 'fsdt_donnell_bcn':
k0edges = fk0edges(m1, m2, n2, r1, r2,
cc.kuBot, cc.kuTop,
cc.kvBot, cc.kvTop,
cc.kwBot, cc.kwTop,
cc.kphixBot, cc.kphixTop,
cc.kphitBot, cc.kphitTop)
elif model == 'fsdt_sanders_bcn':
k0edges = fk0edges(m1, m2, n2, r1, r2,
cc.kuBot, cc.kuTop,
cc.kvBot, cc.kvTop,
cc.kwBot, cc.kwTop,
cc.kphixBot, cc.kphixTop,
cc.kphitBot, cc.kphitTop)
elif model == 'fsdt_shadmehri2012_bc2':
k0edges = fk0edges(m1, m2, n2, r1, r2,
cc.kuBot, cc.kuTop,
cc.kphixBot, cc.kphixTop)
elif model == 'fsdt_shadmehri2012_bc3':
k0edges = fk0edges(m1, m2, n2, r1, r2,
cc.kvBot, cc.kvTop,
cc.kphixBot, cc.kphixTop)
elif model == 'fsdt_geier1997_bc2':
k0edges = fk0edges(m1, m2, n2, r1, r2,
cc.kuBot, cc.kuTop,
cc.kphixBot, cc.kphixTop)
fk0 = db[model]['linear'].fk0
fk0_cyl = db[model]['linear'].fk0_cyl
if 'iso_' in model:
fkG0 = db[model[4:]]['linear'].fkG0
fkG0_cyl = db[model[4:]]['linear'].fkG0_cyl
else:
fkG0 = db[model]['linear'].fkG0
fkG0_cyl = db[model]['linear'].fkG0_cyl
return fk0, fk0_cyl, fkG0, fkG0_cyl, k0edges
valid_models = sorted(db.keys())
def get_model(model_name):
if not model_name in valid_models:
raise ValueError('ERROR - valid models are:\n ' +
'\n '.join(valid_models))
else:
return db[model_name]
| saullocastro/compmech | compmech/conecyl/modelDB.py | Python | bsd-3-clause | 17,932 |
import os.path
import gwt
from ...weights import W
from warnings import warn
__author__ = "Myunghwa Hwang <mhwang4@gmail.com>"
__all__ = ["DatIO"]
class DatIO(gwt.GwtIO):
"""
Opens, reads, and writes file objects in DAT format.
Spatial weights objects in DAT format are used in
Dr. LeSage's MatLab Econ library.
This DAT format is a simple text file with DAT or dat extension.
Without header line, it includes three data columns
for origin id, destination id, and weight values as follows:
[Line 1] 2 1 0.25
[Line 2] 5 1 0.50
...
Origin/destination IDs in this file format are simply record
numbers starting with 1. IDs are not necessarily integers.
Data values for all columns should be numeric.
"""
FORMATS = ['dat']
MODES = ['r', 'w']
def _read(self):
"""Reads .dat file
Returns a pysal.weights.weights.W object
Examples
--------
Type 'dir(w)' at the interpreter to see what methods are supported.
Open .dat file and read it into a pysal weights object
>>> w = pysal.open(pysal.examples.get_path('wmat.dat'),'r').read()
Get the number of observations from the header
>>> w.n
49
Get the mean number of neighbors
>>> w.mean_neighbors
4.7346938775510203
Get neighbor distances for a single observation
>>> w[1]
{2.0: 0.3333, 5.0: 0.3333, 6.0: 0.3333}
"""
if self.pos > 0:
raise StopIteration
id_type = float
weights, neighbors = self._readlines(id_type)
self.pos += 1
return W(neighbors, weights)
def write(self, obj):
"""
Parameters
----------
.write(weightsObject)
accepts a weights object
Returns
------
a DAT file
write a weights object to the opened DAT file.
Examples
--------
>>> import tempfile, pysal, os
>>> testfile = pysal.open(pysal.examples.get_path('wmat.dat'),'r')
>>> w = testfile.read()
Create a temporary file for this example
>>> f = tempfile.NamedTemporaryFile(suffix='.dat')
Reassign to new var
>>> fname = f.name
Close the temporary named file
>>> f.close()
Open the new file in write mode
>>> o = pysal.open(fname,'w')
Write the Weights object into the open file
>>> o.write(w)
>>> o.close()
Read in the newly created dat file
>>> wnew = pysal.open(fname,'r').read()
Compare values from old to new
>>> wnew.pct_nonzero == w.pct_nonzero
True
Clean up temporary file created for this example
>>> os.remove(fname)
"""
self._complain_ifclosed(self.closed)
if issubclass(type(obj), W):
self._writelines(obj)
else:
raise TypeError("Expected a pysal weights object, got: %s" % (
type(obj)))
| sjsrey/pysal_core | pysal_core/io/IOHandlers/dat.py | Python | bsd-3-clause | 3,059 |
"""
Test lldb data formatter subsystem.
"""
from __future__ import print_function
import os
import time
import lldb
from lldbsuite.test.lldbtest import *
import lldbsuite.test.lldbutil as lldbutil
class AdvDataFormatterTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break at.
self.line = line_number('main.cpp', '// Set break point at this line.')
def test_with_run_command(self):
"""Test that that file and class static variables display correctly."""
self.build()
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
self.runCmd(
"settings set target.max-children-count 256",
check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
self.runCmd("type summary add --summary-string \"pippo\" \"i_am_cool\"")
self.runCmd(
"type summary add --summary-string \"pluto\" -x \"i_am_cool[a-z]*\"")
self.expect("frame variable cool_boy",
substrs=['pippo'])
self.expect("frame variable cooler_boy",
substrs=['pluto'])
self.runCmd("type summary delete i_am_cool")
self.expect("frame variable cool_boy",
substrs=['pluto'])
self.runCmd("type summary clear")
self.runCmd(
"type summary add --summary-string \"${var[]}\" -x \"int \\[[0-9]\\]")
self.expect("frame variable int_array",
substrs=['1,2,3,4,5'])
# this will fail if we don't do [] as regex correctly
self.runCmd(
'type summary add --summary-string "${var[].integer}" "i_am_cool[]')
self.expect("frame variable cool_array",
substrs=['1,1,1,1,6'])
self.runCmd("type summary clear")
self.runCmd(
"type summary add --summary-string \"${var[1-0]%x}\" \"int\"")
self.expect("frame variable iAmInt",
substrs=['01'])
self.runCmd(
"type summary add --summary-string \"${var[0-1]%x}\" \"int\"")
self.expect("frame variable iAmInt",
substrs=['01'])
self.runCmd("type summary clear")
self.runCmd("type summary add --summary-string \"${var[0-1]%x}\" int")
self.runCmd(
"type summary add --summary-string \"${var[0-31]%x}\" float")
self.expect("frame variable *pointer",
substrs=['0x',
'2'])
# check fix for <rdar://problem/11338654> LLDB crashes when using a
# "type summary" that uses bitfields with no format
self.runCmd("type summary add --summary-string \"${var[0-1]}\" int")
self.expect("frame variable iAmInt",
substrs=['9 1'])
self.expect("frame variable cool_array[3].floating",
substrs=['0x'])
self.runCmd(
"type summary add --summary-string \"low bits are ${*var[0-1]} tgt is ${*var}\" \"int *\"")
self.expect("frame variable pointer",
substrs=['low bits are',
'tgt is 6'])
self.expect(
"frame variable int_array --summary-string \"${*var[0-1]}\"",
substrs=['3'])
self.runCmd("type summary clear")
self.runCmd(
'type summary add --summary-string \"${var[0-1]}\" -x \"int \[[0-9]\]\"')
self.expect("frame variable int_array",
substrs=['1,2'])
self.runCmd(
'type summary add --summary-string \"${var[0-1]}\" "int []"')
self.expect("frame variable int_array",
substrs=['1,2'])
self.runCmd("type summary clear")
self.runCmd("type summary add -c -x \"i_am_cool \[[0-9]\]\"")
self.runCmd("type summary add -c i_am_cool")
self.expect("frame variable cool_array",
substrs=['[0]',
'[1]',
'[2]',
'[3]',
'[4]',
'integer',
'character',
'floating'])
self.runCmd(
"type summary add --summary-string \"int = ${*var.int_pointer}, float = ${*var.float_pointer}\" IWrapPointers")
self.expect("frame variable wrapper",
substrs=['int = 4',
'float = 1.1'])
self.runCmd(
"type summary add --summary-string \"low bits = ${*var.int_pointer[2]}\" IWrapPointers -p")
self.expect("frame variable wrapper",
substrs=['low bits = 1'])
self.expect("frame variable *wrap_pointer",
substrs=['low bits = 1'])
self.runCmd("type summary clear")
self.expect(
"frame variable int_array --summary-string \"${var[0][0-2]%hex}\"",
substrs=[
'0x',
'7'])
self.runCmd("type summary clear")
self.runCmd(
"type summary add --summary-string \"${*var[].x[0-3]%hex} is a bitfield on a set of integers\" -x \"SimpleWithPointers \[[0-9]\]\"")
self.expect(
"frame variable couple --summary-string \"${*var.sp.x[0-2]} are low bits of integer ${*var.sp.x}. If I pretend it is an array I get ${var.sp.x[0-5]}\"",
substrs=[
'1 are low bits of integer 9.',
'If I pretend it is an array I get [9,'])
# if the summary has an error, we still display the value
self.expect(
"frame variable couple --summary-string \"${*var.sp.foo[0-2]\"",
substrs=[
'(Couple) couple = {',
'x = 0x',
'y = 0x',
'z = 0x',
's = 0x'])
self.runCmd(
"type summary add --summary-string \"${*var.sp.x[0-2]} are low bits of integer ${*var.sp.x}. If I pretend it is an array I get ${var.sp.x[0-5]}\" Couple")
self.expect("frame variable sparray",
substrs=['[0x0000000f,0x0000000c,0x00000009]'])
# check that we can format a variable in a summary even if a format is
# defined for its datatype
self.runCmd("type format add -f hex int")
self.runCmd(
"type summary add --summary-string \"x=${var.x%d}\" Simple")
self.expect("frame variable a_simple_object",
substrs=['x=3'])
self.expect("frame variable a_simple_object", matching=False,
substrs=['0x0'])
# now check that the default is applied if we do not hand out a format
self.runCmd("type summary add --summary-string \"x=${var.x}\" Simple")
self.expect("frame variable a_simple_object", matching=False,
substrs=['x=3'])
self.expect("frame variable a_simple_object", matching=True,
substrs=['x=0x00000003'])
# check that we can correctly cap the number of children shown
self.runCmd("settings set target.max-children-count 5")
self.expect('frame variable a_long_guy', matching=True,
substrs=['a_1',
'b_1',
'c_1',
'd_1',
'e_1',
'...'])
# check that no further stuff is printed (not ALL values are checked!)
self.expect('frame variable a_long_guy', matching=False,
substrs=['f_1',
'g_1',
'h_1',
'i_1',
'j_1',
'q_1',
'a_2',
'f_2',
't_2',
'w_2'])
self.runCmd("settings set target.max-children-count 1")
self.expect('frame variable a_long_guy', matching=True,
substrs=['a_1',
'...'])
self.expect('frame variable a_long_guy', matching=False,
substrs=['b_1',
'c_1',
'd_1',
'e_1'])
self.expect('frame variable a_long_guy', matching=False,
substrs=['f_1',
'g_1',
'h_1',
'i_1',
'j_1',
'q_1',
'a_2',
'f_2',
't_2',
'w_2'])
self.runCmd("settings set target.max-children-count 30")
self.expect('frame variable a_long_guy', matching=True,
substrs=['a_1',
'b_1',
'c_1',
'd_1',
'e_1',
'z_1',
'a_2',
'b_2',
'c_2',
'd_2',
'...'])
self.expect('frame variable a_long_guy', matching=False,
substrs=['e_2',
'n_2',
'r_2',
'i_2',
'k_2',
'o_2'])
# override the cap
self.expect(
'frame variable a_long_guy --show-all-children',
matching=True,
substrs=[
'a_1',
'b_1',
'c_1',
'd_1',
'e_1',
'z_1',
'a_2',
'b_2',
'c_2',
'd_2'])
self.expect(
'frame variable a_long_guy --show-all-children',
matching=True,
substrs=[
'e_2',
'n_2',
'r_2',
'i_2',
'k_2',
'o_2'])
self.expect(
'frame variable a_long_guy --show-all-children',
matching=False,
substrs=['...'])
| youtube/cobalt | third_party/llvm-project/lldb/packages/Python/lldbsuite/test/functionalities/data-formatter/data-formatter-advanced/TestDataFormatterAdv.py | Python | bsd-3-clause | 11,243 |
""" Module to correct pulsar and FRB DMs for the MW ISM """
from ne2001 import ne_io, density #ne2001 ism model
import pygedm #ymw ism model
import numpy as np
import pandas as pd
from astropy import units as u
from astropy.coordinates import SkyCoord, Galactic
import logging
logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S', level=logging.INFO)
ne = density.ElectronDensity()
def find_delta_dm(transient_type,transient_data,ism_model,b_val,mc_deg=5,save_df=True):
"""
Find pulsar/FRB DMs corrected for by the MW ISM DM and remove observations in complex DM regions.
Returns array of DMs
FRB data is available as a csv in the FRBs/FRB/frb/data/FRBs repo (FRB catalogue [Petroff et al. 2017])
Pulsar data is avaiable as a csv in the FRBs/pulsars/pulsars/data/atnf_cat repo (v1.61 ATNF pulsar catalogue [Manchester et al. 2005])
Arguments:
transient_type (str):
Accepts 'frb' or 'pulsar'.
transient_data (str):
Path to data (in .csv format).
ism_model (str):
Model used to calculated the MW halo DM.
Accepts 'ymw16' [Yao et al. 2017] or 'ne2001' [Cordes & Lazio 2003].
b_val (int):
Galactic latitude considered (b>b_val, b<-b_val).
mc_deg (int):
Number of degrees from Magellanic clouds within which transients are removed.
save_df (str, optional):
Save transient DMs and coords to csv.
Outputs:
"""
# Sort data and get coords
if transient_type=='frb':
transcat_df = pd.read_csv(transient_data, skiprows=1, usecols= [0,5,6,7], names=['Name','l','b','dm'])
transcat_df['dm'] = transcat_df['dm'].str.split('&').str[0].astype(float).values
coords = SkyCoord(l=transcat_df['l'], b=transcat_df['b'], unit=(u.degree),frame=Galactic)
elif transient_type=='pulsar':
transcat_df = pd.read_csv(transient_data, skiprows=2, usecols = [1,2,3,9,10], names=['Name','Pref','dm','RAJD','DECJD'])
transcat_df = transcat_df[~transcat_df['dm'].str.contains('*', regex=False)].reset_index(drop=True)
transcat_df['dm'] = transcat_df['dm'].astype(float)
c_icrs = SkyCoord(ra=transcat_df['RAJD'], dec=transcat_df['DECJD'], unit=(u.degree), frame='icrs')
transcat_df['l'] = pd.DataFrame(c_icrs.galactic.l.value)
transcat_df['b'] = pd.DataFrame(c_icrs.galactic.b.value)
coords = SkyCoord(l=transcat_df['l'], b=transcat_df['b'], unit=(u.degree),frame=Galactic)
# Find transients in line of sight of MCs
logging.info('Removing transients near Magellanic clouds...')
# LMC
lmc_distance = 50*u.kpc
lmc_coord = SkyCoord('J052334.6-694522',unit=(u.hourangle, u.deg),distance=lmc_distance)
close_to_lmc = lmc_coord.separation(coords) < mc_deg*u.deg
lmc_trans = list(transcat_df[close_to_lmc]['Name'])
# SMC
smc_distance = 61*u.kpc
smc_coord = SkyCoord('J005238.0-724801',unit=(u.hourangle, u.deg),distance=smc_distance)
close_to_smc = smc_coord.separation(coords) < mc_deg*u.deg
smc_trans = list(transcat_df[close_to_smc]['Name'])
transcat_df = transcat_df[~transcat_df['Name'].isin(lmc_trans)].reset_index(drop=True)
transcat_df = transcat_df[~transcat_df['Name'].isin(smc_trans)].reset_index(drop=True)
if transient_type=='pulsar':
transcat_df = transcat_df[~transcat_df['Pref'].str.contains('mfl+06', regex=False)].reset_index(drop=True)
elif transient_type=='frb':
pass
# Remove transients with low Galactic lattitudes
logging.info('Removing transients with low Galactic lattitudes...')
transcat_df = pd.concat([transcat_df[transcat_df.b > b_val], transcat_df[transcat_df.b < -b_val]], ignore_index=True)
# ISM model
logging.info('Correcting transient DMs for ISM...')
trans_ism = []
if ism_model=='ymw16':
for i in range(len(transcat_df['dm'])):
trans_ism_ = pygedm.dist_to_dm(transcat_df['l'].iloc[i], transcat_df['b'].iloc[i], 100000)[0].value
trans_ism = np.append(trans_ism,trans_ism_)
elif ism_model=='ne2001':
for i in range(len(transcat_df['dm'])):
trans_ism_ = ne.DM(transcat_df['l'].iloc[i], transcat_df['b'].iloc[i], 100.).value
trans_ism = np.append(trans_ism,trans_ism_)
transcat_df['trans_ism'] = pd.DataFrame(trans_ism)
transcat_df['deltaDM'] = pd.DataFrame(transcat_df['dm']-transcat_df['trans_ism'])
if save_df==True:
transcat_df.to_csv('transient_data/'+transient_type+'cat_df_'+ism_model+'_'+str(int(b_val))+'.csv')
logging.info('Transient data saved to csv.')
else:
pass
return np.array(transcat_df['deltaDM'])
| FRBs/DM | frb/dm_kde/sort_transient_data.py | Python | bsd-3-clause | 4,742 |
from django import forms
METHOD_CHOICES = (
("GET", "GET"),
("POST", "POST")
)
class ApiCallForm(forms.Form):
method = forms.ChoiceField(choices=METHOD_CHOICES, initial="GET")
path = forms.CharField(initial="/api/myself/")
data = forms.CharField(widget=forms.Textarea, required=False)
| hylje/lbtcex | lbtcex/main/forms.py | Python | bsd-3-clause | 307 |
# -*- coding:utf-8 -*-
import unittest
import mock
from ..models import JobPosting
class JobPostingTestCase(unittest.TestCase):
def test_unicode_should_return_position_name(self):
# setup
model = JobPosting()
model.position_name = 'Position Name'
# action
email = unicode(model)
# assert
self.assertEqual(model.position_name, email)
| hellhovnd/dentexchange | dentexchange/apps/employer/tests/test_job_posting.py | Python | bsd-3-clause | 398 |
# -*- coding: utf-8 -*-
"""
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from __future__ import print_function
from datetime import datetime
import itertools
from operator import methodcaller
import os
import re
import sys
import textwrap
import warnings
import dateutil
import numpy as np
import pytest
import pytz
import pandas.compat as compat
from pandas.compat import (
PY3, StringIO, is_platform_32bit, is_platform_windows, lrange, lzip, range,
u, zip)
import pandas as pd
from pandas import (
DataFrame, Index, MultiIndex, NaT, Series, Timestamp, date_range, read_csv)
from pandas.core.config import (
get_option, option_context, reset_option, set_option)
import pandas.util.testing as tm
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
from pandas.io.formats.terminal import get_terminal_size
use_32bit_repr = is_platform_windows() or is_platform_32bit()
_frame = DataFrame(tm.getSeriesData())
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split('\n')[0].startswith("<class")
c2 = r.split('\n')[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = len(r.split('\n')) == 6
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == '...')[0][0]
except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == '...':
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match(r'^[\.\ ]+$', row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(
df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(
df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split('\n'):
if line.endswith('\\'):
return True
return False
class TestDataFrameFormatting(object):
def setup_method(self, method):
self.warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
self.frame = _frame.copy()
def teardown_method(self, method):
warnings.filters = self.warn_filters
def test_repr_embedded_ndarray(self):
arr = np.empty(10, dtype=[('err', object)])
for i in range(len(arr)):
arr['err'][i] = np.random.randn(i)
df = DataFrame(arr)
repr(df['err'])
repr(df)
df.to_string()
def test_eng_float_formatter(self):
self.frame.loc[5] = 0
fmt.set_eng_float_format()
repr(self.frame)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(self.frame)
fmt.set_eng_float_format(accuracy=0)
repr(self.frame)
tm.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1, columns=range(10), index=range(10))
df.iloc[1, 1] = np.nan
def check(null_counts, result):
buf = StringIO()
df.info(buf=buf, null_counts=null_counts)
assert ('non-null' in buf.getvalue()) is result
with option_context('display.max_info_rows', 20,
'display.max_info_columns', 20):
check(None, True)
check(True, True)
check(False, False)
with option_context('display.max_info_rows', 5,
'display.max_info_columns', 5):
check(None, False)
check(True, False)
check(False, False)
def test_repr_tuples(self):
buf = StringIO()
df = DataFrame({'tups': lzip(range(10), range(10))})
repr(df)
df.to_string(col_space=10, buf=buf)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame({'A': np.random.randn(10),
'B': [tm.rands(np.random.randint(
max_len - 1, max_len + 1)) for i in range(10)
]})
r = repr(df)
r = r[r.find('\n') + 1:]
adj = fmt._get_adjustment()
for line, value in lzip(r.split('\n'), df['B']):
if adj.len(value) + 1 > max_len:
assert '...' in line
else:
assert '...' not in line
with option_context("display.max_colwidth", 999999):
assert '...' not in repr(df)
with option_context("display.max_colwidth", max_len + 2):
assert '...' not in repr(df)
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
pd.reset_option("display.chop_threshold") # default None
assert repr(df) == ' 0 1\n0 0.1 0.5\n1 0.5 -0.1'
with option_context("display.chop_threshold", 0.2):
assert repr(df) == ' 0 1\n0 0.0 0.5\n1 0.5 0.0'
with option_context("display.chop_threshold", 0.6):
assert repr(df) == ' 0 1\n0 0.0 0.0\n1 0.0 0.0'
with option_context("display.chop_threshold", None):
assert repr(df) == ' 0 1\n0 0.1 0.5\n1 0.5 -0.1'
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
df = pd.DataFrame([[10, 20, 30, 40],
[8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(df) == (' 0 1\n'
'0 10.0 8.000000e-10\n'
'1 20.0 -1.000000e-11\n'
'2 30.0 2.000000e-09\n'
'3 40.0 -2.000000e-11')
with option_context("display.chop_threshold", 1e-8):
assert repr(df) == (' 0 1\n'
'0 10.0 0.000000e+00\n'
'1 20.0 0.000000e+00\n'
'2 30.0 0.000000e+00\n'
'3 40.0 0.000000e+00')
with option_context("display.chop_threshold", 5e-11):
assert repr(df) == (' 0 1\n'
'0 10.0 8.000000e-10\n'
'1 20.0 0.000000e+00\n'
'2 30.0 2.000000e-09\n'
'3 40.0 0.000000e+00')
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items", 2000):
assert len(printing.pprint_thing(lrange(1000))) > 1000
with option_context("display.max_seq_items", 5):
assert len(printing.pprint_thing(lrange(1000))) < 100
def test_repr_set(self):
assert printing.pprint_thing({1}) == '{1}'
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather then
# stylized
idx = Index(['a', 'b'])
res = eval("pd." + repr(idx))
tm.assert_series_equal(Series(res), Series(idx))
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = [u("\u03c3"), u("\u03c4"), u("\u03c5"), u("\u03c6")]
cols = [u("\u03c8")]
df = DataFrame(data, columns=cols, index=index1)
assert type(df.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context('mode.sim_interactive', True):
df = DataFrame(np.random.randn(10, 4))
assert '\\' not in repr(df)
def test_expand_frame_repr(self):
df_small = DataFrame('hello', [0], [0])
df_wide = DataFrame('hello', [0], lrange(10))
df_tall = DataFrame('hello', lrange(30), lrange(5))
with option_context('mode.sim_interactive', True):
with option_context('display.max_columns', 10, 'display.width', 20,
'display.max_rows', 20,
'display.show_dimensions', True):
with option_context('display.expand_frame_repr', True):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_truncated_repr(df_wide)
assert has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert has_expanded_repr(df_tall)
with option_context('display.expand_frame_repr', False):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_horizontally_truncated_repr(df_wide)
assert not has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert not has_expanded_repr(df_tall)
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame('hello', lrange(1000), lrange(5))
with option_context('mode.sim_interactive', False, 'display.width', 0,
'display.max_rows', 5000):
assert not has_truncated_repr(df)
assert not has_expanded_repr(df)
def test_repr_truncates_terminal_size(self, monkeypatch):
# see gh-21180
terminal_size = (118, 96)
monkeypatch.setattr('pandas.io.formats.console.get_terminal_size',
lambda: terminal_size)
monkeypatch.setattr('pandas.io.formats.format.get_terminal_size',
lambda: terminal_size)
index = range(5)
columns = pd.MultiIndex.from_tuples([
('This is a long title with > 37 chars.', 'cat'),
('This is a loooooonger title with > 43 chars.', 'dog'),
])
df = pd.DataFrame(1, index=index, columns=columns)
result = repr(df)
h1, h2 = result.split('\n')[:2]
assert 'long' in h1
assert 'loooooonger' in h1
assert 'cat' in h2
assert 'dog' in h2
# regular columns
df2 = pd.DataFrame({"A" * 41: [1, 2], 'B' * 41: [1, 2]})
result = repr(df2)
assert df2.columns[0] in result.split('\n')[0]
def test_repr_truncates_terminal_size_full(self, monkeypatch):
# GH 22984 ensure entire window is filled
terminal_size = (80, 24)
df = pd.DataFrame(np.random.rand(1, 7))
monkeypatch.setattr('pandas.io.formats.console.get_terminal_size',
lambda: terminal_size)
monkeypatch.setattr('pandas.io.formats.format.get_terminal_size',
lambda: terminal_size)
assert "..." not in str(df)
def test_repr_truncation_column_size(self):
# dataframe with last column very wide -> check it is not used to
# determine size of truncation (...) column
df = pd.DataFrame({'a': [108480, 30830], 'b': [12345, 12345],
'c': [12345, 12345], 'd': [12345, 12345],
'e': ['a' * 50] * 2})
assert "..." in str(df)
assert " ... " not in str(df)
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
pytest.skip("terminal size too small, "
"{0} x {1}".format(term_width, term_height))
def mkframe(n):
index = ['{i:05d}'.format(i=i) for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context('mode.sim_interactive', True):
with option_context('display.width', term_width * 2):
with option_context('display.max_rows', 5,
'display.max_columns', 5):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(df6)
assert has_doubly_truncated_repr(df6)
with option_context('display.max_rows', 20,
'display.max_columns', 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
assert not has_expanded_repr(df6)
assert not has_truncated_repr(df6)
with option_context('display.max_rows', 9,
'display.max_columns', 10):
# out vertical bounds can not result in exanded repr
assert not has_expanded_repr(df10)
assert has_vertically_truncated_repr(df10)
# width=None in terminal, auto detection
with option_context('display.max_columns', 100, 'display.max_rows',
term_width * 20, 'display.width', None):
df = mkframe((term_width // 7) - 2)
assert not has_expanded_repr(df)
df = mkframe((term_width // 7) + 2)
printing.pprint_thing(df._repr_fits_horizontal_())
assert has_expanded_repr(df)
def test_str_max_colwidth(self):
# GH 7856
df = pd.DataFrame([{'a': 'foo',
'b': 'bar',
'c': 'uncomfortably long line with lots of stuff',
'd': 1}, {'a': 'foo',
'b': 'bar',
'c': 'stuff',
'd': 1}])
df.set_index(['a', 'b', 'c'])
assert str(df) == (
' a b c d\n'
'0 foo bar uncomfortably long line with lots of stuff 1\n'
'1 foo bar stuff 1')
with option_context('max_colwidth', 20):
assert str(df) == (' a b c d\n'
'0 foo bar uncomfortably lo... 1\n'
'1 foo bar stuff 1')
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term width
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with option_context('mode.sim_interactive', True):
with option_context('max_rows', None):
with option_context('max_columns', None):
# Wrap around with None
assert has_expanded_repr(df)
with option_context('max_rows', 0):
with option_context('max_columns', 0):
# Truncate with auto detection.
assert has_horizontally_truncated_repr(df)
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context('max_rows', 0):
with option_context('max_columns', None):
# Wrap around with None
assert has_expanded_repr(df)
# Truncate vertically
assert has_vertically_truncated_repr(df)
with option_context('max_rows', None):
with option_context('max_columns', 0):
assert has_horizontally_truncated_repr(df)
def test_to_string_repr_unicode(self):
buf = StringIO()
unicode_values = [u('\u03c3')] * 10
unicode_values = np.array(unicode_values, dtype=object)
df = DataFrame({'unicode': unicode_values})
df.to_string(col_space=10, buf=buf)
# it works!
repr(df)
idx = Index(['abc', u('\u03c3a'), 'aegdvg'])
ser = Series(np.random.randn(len(idx)), idx)
rs = repr(ser).split('\n')
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except AttributeError:
pass
if not line.startswith('dtype:'):
assert len(line) == line_len
# it works even if sys.stdin in None
_stdin = sys.stdin
try:
sys.stdin = None
repr(df)
finally:
sys.stdin = _stdin
def test_to_string_unicode_columns(self):
df = DataFrame({u('\u03c3'): np.arange(10.)})
buf = StringIO()
df.to_string(buf=buf)
buf.getvalue()
buf = StringIO()
df.info(buf=buf)
buf.getvalue()
result = self.frame.to_string()
assert isinstance(result, compat.text_type)
def test_to_string_utf8_columns(self):
n = u("\u05d0").encode('utf-8')
with option_context('display.max_rows', 1):
df = DataFrame([1, 2], columns=[n])
repr(df)
def test_to_string_unicode_two(self):
dm = DataFrame({u('c/\u03c3'): []})
buf = StringIO()
dm.to_string(buf)
def test_to_string_unicode_three(self):
dm = DataFrame(['\xc2'])
buf = StringIO()
dm.to_string(buf)
def test_to_string_with_formatters(self):
df = DataFrame({'int': [1, 2, 3],
'float': [1.0, 2.0, 3.0],
'object': [(1, 2), True, False]},
columns=['int', 'float', 'object'])
formatters = [('int', lambda x: '0x{x:x}'.format(x=x)),
('float', lambda x: '[{x: 4.1f}]'.format(x=x)),
('object', lambda x: '-{x!s}-'.format(x=x))]
result = df.to_string(formatters=dict(formatters))
result2 = df.to_string(formatters=lzip(*formatters)[1])
assert result == (' int float object\n'
'0 0x1 [ 1.0] -(1, 2)-\n'
'1 0x2 [ 2.0] -True-\n'
'2 0x3 [ 3.0] -False-')
assert result == result2
def test_to_string_with_datetime64_monthformatter(self):
months = [datetime(2016, 1, 1), datetime(2016, 2, 2)]
x = DataFrame({'months': months})
def format_func(x):
return x.strftime('%Y-%m')
result = x.to_string(formatters={'months': format_func})
expected = 'months\n0 2016-01\n1 2016-02'
assert result.strip() == expected
def test_to_string_with_datetime64_hourformatter(self):
x = DataFrame({'hod': pd.to_datetime(['10:10:10.100', '12:12:12.120'],
format='%H:%M:%S.%f')})
def format_func(x):
return x.strftime('%H:%M')
result = x.to_string(formatters={'hod': format_func})
expected = 'hod\n0 10:10\n1 12:12'
assert result.strip() == expected
def test_to_string_with_formatters_unicode(self):
df = DataFrame({u('c/\u03c3'): [1, 2, 3]})
result = df.to_string(
formatters={u('c/\u03c3'): lambda x: '{x}'.format(x=x)})
assert result == u(' c/\u03c3\n') + '0 1\n1 2\n2 3'
def test_east_asian_unicode_false(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
# not alighned properly because of east asian width
# mid col
df = DataFrame({'a': [u'あ', u'いいい', u'う', u'ええええええ'],
'b': [1, 222, 33333, 4]},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\na あ 1\n"
u"bb いいい 222\nc う 33333\n"
u"ddd ええええええ 4")
assert _rep(df) == expected
# last col
df = DataFrame({'a': [1, 222, 33333, 4],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\na 1 あ\n"
u"bb 222 いいい\nc 33333 う\n"
u"ddd 4 ええええええ")
assert _rep(df) == expected
# all col
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\na あああああ あ\n"
u"bb い いいい\nc う う\n"
u"ddd えええ ええええええ")
assert _rep(df) == expected
# column name
df = DataFrame({'b': [u'あ', u'いいい', u'う', u'ええええええ'],
u'あああああ': [1, 222, 33333, 4]},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" b あああああ\na あ 1\n"
u"bb いいい 222\nc う 33333\n"
u"ddd ええええええ 4")
assert _rep(df) == expected
# index
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=[u'あああ', u'いいいいいい', u'うう', u'え'])
expected = (u" a b\nあああ あああああ あ\n"
u"いいいいいい い いいい\nうう う う\n"
u"え えええ ええええええ")
assert _rep(df) == expected
# index name
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=pd.Index([u'あ', u'い', u'うう', u'え'],
name=u'おおおお'))
expected = (u" a b\n"
u"おおおお \n"
u"あ あああああ あ\n"
u"い い いいい\n"
u"うう う う\n"
u"え えええ ええええええ")
assert _rep(df) == expected
# all
df = DataFrame({u'あああ': [u'あああ', u'い', u'う', u'えええええ'],
u'いいいいい': [u'あ', u'いいい', u'う', u'ええ']},
index=pd.Index([u'あ', u'いいい', u'うう', u'え'],
name=u'お'))
expected = (u" あああ いいいいい\n"
u"お \n"
u"あ あああ あ\n"
u"いいい い いいい\n"
u"うう う う\n"
u"え えええええ ええ")
assert _rep(df) == expected
# MultiIndex
idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'), (
u'おおお', u'かかかか'), (u'き', u'くく')])
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=idx)
expected = (u" a b\n"
u"あ いい あああああ あ\n"
u"う え い いいい\n"
u"おおお かかかか う う\n"
u"き くく えええ ええええええ")
assert _rep(df) == expected
# truncate
with option_context('display.max_rows', 3, 'display.max_columns', 3):
df = pd.DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ'],
'c': [u'お', u'か', u'ききき', u'くくくくくく'],
u'ああああ': [u'さ', u'し', u'す', u'せ']},
columns=['a', 'b', 'c', u'ああああ'])
expected = (u" a ... ああああ\n0 あああああ ... さ\n"
u".. ... ... ...\n3 えええ ... せ\n"
u"\n[4 rows x 4 columns]")
assert _rep(df) == expected
df.index = [u'あああ', u'いいいい', u'う', 'aaa']
expected = (u" a ... ああああ\nあああ あああああ ... さ\n"
u".. ... ... ...\naaa えええ ... せ\n"
u"\n[4 rows x 4 columns]")
assert _rep(df) == expected
def test_east_asian_unicode_true(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
# Emable Unicode option -----------------------------------------
with option_context('display.unicode.east_asian_width', True):
# mid col
df = DataFrame({'a': [u'あ', u'いいい', u'う', u'ええええええ'],
'b': [1, 222, 33333, 4]},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\na あ 1\n"
u"bb いいい 222\nc う 33333\n"
u"ddd ええええええ 4")
assert _rep(df) == expected
# last col
df = DataFrame({'a': [1, 222, 33333, 4],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\na 1 あ\n"
u"bb 222 いいい\nc 33333 う\n"
u"ddd 4 ええええええ")
assert _rep(df) == expected
# all col
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\n"
u"a あああああ あ\n"
u"bb い いいい\n"
u"c う う\n"
u"ddd えええ ええええええ")
assert _rep(df) == expected
# column name
df = DataFrame({'b': [u'あ', u'いいい', u'う', u'ええええええ'],
u'あああああ': [1, 222, 33333, 4]},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" b あああああ\n"
u"a あ 1\n"
u"bb いいい 222\n"
u"c う 33333\n"
u"ddd ええええええ 4")
assert _rep(df) == expected
# index
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=[u'あああ', u'いいいいいい', u'うう', u'え'])
expected = (u" a b\n"
u"あああ あああああ あ\n"
u"いいいいいい い いいい\n"
u"うう う う\n"
u"え えええ ええええええ")
assert _rep(df) == expected
# index name
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=pd.Index([u'あ', u'い', u'うう', u'え'],
name=u'おおおお'))
expected = (u" a b\n"
u"おおおお \n"
u"あ あああああ あ\n"
u"い い いいい\n"
u"うう う う\n"
u"え えええ ええええええ")
assert _rep(df) == expected
# all
df = DataFrame({u'あああ': [u'あああ', u'い', u'う', u'えええええ'],
u'いいいいい': [u'あ', u'いいい', u'う', u'ええ']},
index=pd.Index([u'あ', u'いいい', u'うう', u'え'],
name=u'お'))
expected = (u" あああ いいいいい\n"
u"お \n"
u"あ あああ あ\n"
u"いいい い いいい\n"
u"うう う う\n"
u"え えええええ ええ")
assert _rep(df) == expected
# MultiIndex
idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'), (
u'おおお', u'かかかか'), (u'き', u'くく')])
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=idx)
expected = (u" a b\n"
u"あ いい あああああ あ\n"
u"う え い いいい\n"
u"おおお かかかか う う\n"
u"き くく えええ ええええええ")
assert _rep(df) == expected
# truncate
with option_context('display.max_rows', 3, 'display.max_columns',
3):
df = pd.DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ'],
'c': [u'お', u'か', u'ききき', u'くくくくくく'],
u'ああああ': [u'さ', u'し', u'す', u'せ']},
columns=['a', 'b', 'c', u'ああああ'])
expected = (u" a ... ああああ\n"
u"0 あああああ ... さ\n"
u".. ... ... ...\n"
u"3 えええ ... せ\n"
u"\n[4 rows x 4 columns]")
assert _rep(df) == expected
df.index = [u'あああ', u'いいいい', u'う', 'aaa']
expected = (u" a ... ああああ\n"
u"あああ あああああ ... さ\n"
u"... ... ... ...\n"
u"aaa えええ ... せ\n"
u"\n[4 rows x 4 columns]")
assert _rep(df) == expected
# ambiguous unicode
df = DataFrame({'b': [u'あ', u'いいい', u'¡¡', u'ええええええ'],
u'あああああ': [1, 222, 33333, 4]},
index=['a', 'bb', 'c', '¡¡¡'])
expected = (u" b あああああ\n"
u"a あ 1\n"
u"bb いいい 222\n"
u"c ¡¡ 33333\n"
u"¡¡¡ ええええええ 4")
assert _rep(df) == expected
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({u('c/\u03c3'): Series()})
nonempty = DataFrame({u('c/\u03c3'): Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
def test_to_string_with_col_space(self):
df = DataFrame(np.random.random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
assert c10 < c20 < c30
# GH 8230
# col_space wasn't being applied with header=False
with_header = df.to_string(col_space=20)
with_header_row1 = with_header.splitlines()[1]
no_header = df.to_string(col_space=20, header=False)
assert len(with_header_row1) == len(no_header)
def test_to_string_truncate_indices(self):
for index in [tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex]:
for column in [tm.makeStringIndex]:
for h in [10, 20]:
for w in [10, 20]:
with option_context("display.expand_frame_repr",
False):
df = DataFrame(index=index(h), columns=column(w))
with option_context("display.max_rows", 15):
if h == 20:
assert has_vertically_truncated_repr(df)
else:
assert not has_vertically_truncated_repr(
df)
with option_context("display.max_columns", 15):
if w == 20:
assert has_horizontally_truncated_repr(df)
else:
assert not (
has_horizontally_truncated_repr(df))
with option_context("display.max_rows", 15,
"display.max_columns", 15):
if h == 20 and w == 20:
assert has_doubly_truncated_repr(df)
else:
assert not has_doubly_truncated_repr(
df)
def test_to_string_truncate_multilevel(self):
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
df = DataFrame(index=arrays, columns=arrays)
with option_context("display.max_rows", 7, "display.max_columns", 7):
assert has_doubly_truncated_repr(df)
def test_truncate_with_different_dtypes(self):
# 11594, 12045
# when truncated the dtypes of the splits can differ
# 11594
import datetime
s = Series([datetime.datetime(2012, 1, 1)] * 10 +
[datetime.datetime(1012, 1, 2)] + [
datetime.datetime(2012, 1, 3)] * 10)
with pd.option_context('display.max_rows', 8):
result = str(s)
assert 'object' in result
# 12045
df = DataFrame({'text': ['some words'] + [None] * 9})
with pd.option_context('display.max_rows', 8,
'display.max_columns', 3):
result = str(df)
assert 'None' in result
assert 'NaN' not in result
def test_datetimelike_frame(self):
# GH 12211
df = DataFrame(
{'date': [pd.Timestamp('20130101').tz_localize('UTC')] +
[pd.NaT] * 5})
with option_context("display.max_rows", 5):
result = str(df)
assert '2013-01-01 00:00:00+00:00' in result
assert 'NaT' in result
assert '...' in result
assert '[6 rows x 1 columns]' in result
dts = [pd.Timestamp('2011-01-01', tz='US/Eastern')] * 5 + [pd.NaT] * 5
df = pd.DataFrame({"dt": dts,
"x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context('display.max_rows', 5):
expected = (' dt x\n'
'0 2011-01-01 00:00:00-05:00 1\n'
'1 2011-01-01 00:00:00-05:00 2\n'
'.. ... ..\n'
'8 NaT 9\n'
'9 NaT 10\n\n'
'[10 rows x 2 columns]')
assert repr(df) == expected
dts = [pd.NaT] * 5 + [pd.Timestamp('2011-01-01', tz='US/Eastern')] * 5
df = pd.DataFrame({"dt": dts,
"x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context('display.max_rows', 5):
expected = (' dt x\n'
'0 NaT 1\n'
'1 NaT 2\n'
'.. ... ..\n'
'8 2011-01-01 00:00:00-05:00 9\n'
'9 2011-01-01 00:00:00-05:00 10\n\n'
'[10 rows x 2 columns]')
assert repr(df) == expected
dts = ([pd.Timestamp('2011-01-01', tz='Asia/Tokyo')] * 5 +
[pd.Timestamp('2011-01-01', tz='US/Eastern')] * 5)
df = pd.DataFrame({"dt": dts,
"x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context('display.max_rows', 5):
expected = (' dt x\n'
'0 2011-01-01 00:00:00+09:00 1\n'
'1 2011-01-01 00:00:00+09:00 2\n'
'.. ... ..\n'
'8 2011-01-01 00:00:00-05:00 9\n'
'9 2011-01-01 00:00:00-05:00 10\n\n'
'[10 rows x 2 columns]')
assert repr(df) == expected
@pytest.mark.parametrize('start_date', [
'2017-01-01 23:59:59.999999999',
'2017-01-01 23:59:59.99999999',
'2017-01-01 23:59:59.9999999',
'2017-01-01 23:59:59.999999',
'2017-01-01 23:59:59.99999',
'2017-01-01 23:59:59.9999',
])
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
df = DataFrame({'A': date_range(start=start_date,
freq='D', periods=5)})
result = str(df)
assert start_date in result
dti = date_range(start=start_date,
freq='D', periods=5)
df = DataFrame({'A': range(5)}, index=dti)
result = str(df.index)
assert start_date in result
def test_nonunicode_nonascii_alignment(self):
df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
rep_str = df.to_string()
lines = rep_str.split('\n')
assert len(lines[1]) == len(lines[2])
def test_unicode_problem_decoding_as_ascii(self):
dm = DataFrame({u('c/\u03c3'): Series({'test': np.nan})})
compat.text_type(dm.to_string())
def test_string_repr_encoding(self, datapath):
filepath = datapath('io', 'parser', 'data', 'unicode_series.csv')
df = pd.read_csv(filepath, header=None, encoding='latin1')
repr(df)
repr(df[1])
def test_repr_corner(self):
# representing infs poses no problems
df = DataFrame({'foo': [-np.inf, np.inf]})
repr(df)
def test_frame_info_encoding(self):
index = ['\'Til There Was You (1997)',
'ldum klaka (Cold Fever) (1994)']
fmt.set_option('display.max_rows', 1)
df = DataFrame(columns=['a', 'b', 'c'], index=index)
repr(df)
repr(df.T)
fmt.set_option('display.max_rows', 200)
def test_pprint_thing(self):
from pandas.io.formats.printing import pprint_thing as pp_t
if PY3:
pytest.skip("doesn't work on Python 3")
assert pp_t('a') == u('a')
assert pp_t(u('a')) == u('a')
assert pp_t(None) == 'None'
assert pp_t(u('\u05d0'), quote_strings=True) == u("u'\u05d0'")
assert pp_t(u('\u05d0'), quote_strings=False) == u('\u05d0')
assert (pp_t((u('\u05d0'), u('\u05d1')), quote_strings=True) ==
u("(u'\u05d0', u'\u05d1')"))
assert (pp_t((u('\u05d0'), (u('\u05d1'), u('\u05d2'))),
quote_strings=True) == u("(u'\u05d0', "
"(u'\u05d1', u'\u05d2'))"))
assert (pp_t(('foo', u('\u05d0'), (u('\u05d0'), u('\u05d0'))),
quote_strings=True) == u("(u'foo', u'\u05d0', "
"(u'\u05d0', u'\u05d0'))"))
# gh-2038: escape embedded tabs in string
assert "\t" not in pp_t("a\tb", escape_chars=("\t", ))
def test_wide_repr(self):
with option_context('mode.sim_interactive', True,
'display.show_dimensions', True,
'display.max_columns', 20):
max_cols = get_option('display.max_columns')
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
assert "10 rows x {c} columns".format(c=max_cols - 1) in rep_str
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context('display.width', 120):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option('display.expand_frame_repr')
def test_wide_repr_wide_columns(self):
with option_context('mode.sim_interactive', True,
'display.max_columns', 20):
df = DataFrame(np.random.randn(5, 3),
columns=['a' * 90, 'b' * 90, 'c' * 90])
rep_str = repr(df)
assert len(rep_str.splitlines()) == 20
def test_wide_repr_named(self):
with option_context('mode.sim_interactive', True,
'display.max_columns', 20):
max_cols = get_option('display.max_columns')
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
df.index.name = 'DataFrame Index'
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context('display.width', 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert 'DataFrame Index' in line
reset_option('display.expand_frame_repr')
def test_wide_repr_multiindex(self):
with option_context('mode.sim_interactive', True,
'display.max_columns', 20):
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
max_cols = get_option('display.max_columns')
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)),
index=midx)
df.index.names = ['Level 0', 'Level 1']
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context('display.width', 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert 'Level 0 Level 1' in line
reset_option('display.expand_frame_repr')
def test_wide_repr_multiindex_cols(self):
with option_context('mode.sim_interactive', True,
'display.max_columns', 20):
max_cols = get_option('display.max_columns')
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
mcols = MultiIndex.from_arrays(
tm.rands_array(3, size=(2, max_cols - 1)))
df = DataFrame(tm.rands_array(25, (10, max_cols - 1)),
index=midx, columns=mcols)
df.index.names = ['Level 0', 'Level 1']
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context('display.width', 150, 'display.max_columns', 20):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option('display.expand_frame_repr')
def test_wide_repr_unicode(self):
with option_context('mode.sim_interactive', True,
'display.max_columns', 20):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context('display.width', 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option('display.expand_frame_repr')
def test_wide_repr_wide_long_columns(self):
with option_context('mode.sim_interactive', True):
df = DataFrame({'a': ['a' * 30, 'b' * 30],
'b': ['c' * 70, 'd' * 80]})
result = repr(df)
assert 'ccccc' in result
assert 'ddddd' in result
def test_long_series(self):
n = 1000
s = Series(
np.random.randint(-50, 50, n),
index=['s{x:04d}'.format(x=x) for x in range(n)], dtype='int64')
import re
str_rep = str(s)
nmatches = len(re.findall('dtype', str_rep))
assert nmatches == 1
def test_index_with_nan(self):
# GH 2850
df = DataFrame({'id1': {0: '1a3',
1: '9h4'},
'id2': {0: np.nan,
1: 'd67'},
'id3': {0: '78d',
1: '79d'},
'value': {0: 123,
1: 64}})
# multi-index
y = df.set_index(['id1', 'id2', 'id3'])
result = y.to_string()
expected = u(
' value\nid1 id2 id3 \n'
'1a3 NaN 78d 123\n9h4 d67 79d 64')
assert result == expected
# index
y = df.set_index('id2')
result = y.to_string()
expected = u(
' id1 id3 value\nid2 \n'
'NaN 1a3 78d 123\nd67 9h4 79d 64')
assert result == expected
# with append (this failed in 0.12)
y = df.set_index(['id1', 'id2']).set_index('id3', append=True)
result = y.to_string()
expected = u(
' value\nid1 id2 id3 \n'
'1a3 NaN 78d 123\n9h4 d67 79d 64')
assert result == expected
# all-nan in mi
df2 = df.copy()
df2.loc[:, 'id2'] = np.nan
y = df2.set_index('id2')
result = y.to_string()
expected = u(
' id1 id3 value\nid2 \n'
'NaN 1a3 78d 123\nNaN 9h4 79d 64')
assert result == expected
# partial nan in mi
df2 = df.copy()
df2.loc[:, 'id2'] = np.nan
y = df2.set_index(['id2', 'id3'])
result = y.to_string()
expected = u(
' id1 value\nid2 id3 \n'
'NaN 78d 1a3 123\n 79d 9h4 64')
assert result == expected
df = DataFrame({'id1': {0: np.nan,
1: '9h4'},
'id2': {0: np.nan,
1: 'd67'},
'id3': {0: np.nan,
1: '79d'},
'value': {0: 123,
1: 64}})
y = df.set_index(['id1', 'id2', 'id3'])
result = y.to_string()
expected = u(
' value\nid1 id2 id3 \n'
'NaN NaN NaN 123\n9h4 d67 79d 64')
assert result == expected
def test_to_string(self):
# big mixed
biggie = DataFrame({'A': np.random.randn(200),
'B': tm.makeStringIndex(200)},
index=lrange(200))
biggie.loc[:20, 'A'] = np.nan
biggie.loc[:20, 'B'] = np.nan
s = biggie.to_string()
buf = StringIO()
retval = biggie.to_string(buf=buf)
assert retval is None
assert buf.getvalue() == s
assert isinstance(s, compat.string_types)
# print in right order
result = biggie.to_string(columns=['B', 'A'], col_space=17,
float_format='%.5f'.__mod__)
lines = result.split('\n')
header = lines[0].strip().split()
joined = '\n'.join(re.sub(r'\s+', ' ', x).strip() for x in lines[1:])
recons = read_csv(StringIO(joined), names=header,
header=None, sep=' ')
tm.assert_series_equal(recons['B'], biggie['B'])
assert recons['A'].count() == biggie['A'].count()
assert (np.abs(recons['A'].dropna() -
biggie['A'].dropna()) < 0.1).all()
# expected = ['B', 'A']
# assert header == expected
result = biggie.to_string(columns=['A'], col_space=17)
header = result.split('\n')[0].strip().split()
expected = ['A']
assert header == expected
biggie.to_string(columns=['B', 'A'],
formatters={'A': lambda x: '{x:.1f}'.format(x=x)})
biggie.to_string(columns=['B', 'A'], float_format=str)
biggie.to_string(columns=['B', 'A'], col_space=12, float_format=str)
frame = DataFrame(index=np.arange(200))
frame.to_string()
def test_to_string_no_header(self):
df = DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
df_s = df.to_string(header=False)
expected = "0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
def test_to_string_specified_header(self):
df = DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
df_s = df.to_string(header=['X', 'Y'])
expected = ' X Y\n0 1 4\n1 2 5\n2 3 6'
assert df_s == expected
with pytest.raises(ValueError):
df.to_string(header=['X'])
def test_to_string_no_index(self):
# GH 16839, GH 13032
df = DataFrame({'x': [11, 22], 'y': [33, -44], 'z': ['AAA', ' ']})
df_s = df.to_string(index=False)
# Leading space is expected for positive numbers.
expected = (" x y z\n"
" 11 33 AAA\n"
" 22 -44 ")
assert df_s == expected
df_s = df[['y', 'x', 'z']].to_string(index=False)
expected = (" y x z\n"
" 33 11 AAA\n"
"-44 22 ")
assert df_s == expected
def test_to_string_line_width_no_index(self):
# GH 13998, GH 22505
df = DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 1 \n 2 \n 3 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({'x': [11, 22, 33], 'y': [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 11 \n 22 \n 33 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({'x': [11, 22, -33], 'y': [4, 5, -6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 11 \n 22 \n-33 \n\n y \n 4 \n 5 \n-6 "
assert df_s == expected
def test_to_string_float_formatting(self):
tm.reset_display_options()
fmt.set_option('display.precision', 5, 'display.column_space', 12,
'display.notebook_repr_html', False)
df = DataFrame({'x': [0, 0.25, 3456.000, 12e+45, 1.64e+6, 1.7e+8,
1.253456, np.pi, -1e6]})
df_s = df.to_string()
if _three_digit_exp():
expected = (' x\n0 0.00000e+000\n1 2.50000e-001\n'
'2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n'
'5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n'
'8 -1.00000e+006')
else:
expected = (' x\n0 0.00000e+00\n1 2.50000e-01\n'
'2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n'
'5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n'
'8 -1.00000e+06')
assert df_s == expected
df = DataFrame({'x': [3234, 0.253]})
df_s = df.to_string()
expected = (' x\n' '0 3234.000\n' '1 0.253')
assert df_s == expected
tm.reset_display_options()
assert get_option("display.precision") == 6
df = DataFrame({'x': [1e9, 0.2512]})
df_s = df.to_string()
if _three_digit_exp():
expected = (' x\n'
'0 1.000000e+009\n'
'1 2.512000e-001')
else:
expected = (' x\n'
'0 1.000000e+09\n'
'1 2.512000e-01')
assert df_s == expected
def test_to_string_float_format_no_fixed_width(self):
# GH 21625
df = DataFrame({'x': [0.19999]})
expected = ' x\n0 0.200'
assert df.to_string(float_format='%.3f') == expected
# GH 22270
df = DataFrame({'x': [100.0]})
expected = ' x\n0 100'
assert df.to_string(float_format='%.0f') == expected
def test_to_string_small_float_values(self):
df = DataFrame({'a': [1.5, 1e-17, -5.5e-7]})
result = df.to_string()
# sadness per above
if '{x:.4g}'.format(x=1.7e8) == '1.7e+008':
expected = (' a\n'
'0 1.500000e+000\n'
'1 1.000000e-017\n'
'2 -5.500000e-007')
else:
expected = (' a\n'
'0 1.500000e+00\n'
'1 1.000000e-17\n'
'2 -5.500000e-07')
assert result == expected
# but not all exactly zero
df = df * 0
result = df.to_string()
expected = (' 0\n' '0 0\n' '1 0\n' '2 -0')
def test_to_string_float_index(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(lrange(5), index=index)
result = df.to_string()
expected = (' 0\n'
'1.5 0\n'
'2.0 1\n'
'3.0 2\n'
'4.0 3\n'
'5.0 4')
assert result == expected
def test_to_string_ascii_error(self):
data = [('0 ', u(' .gitignore '), u(' 5 '),
' \xe2\x80\xa2\xe2\x80\xa2\xe2\x80'
'\xa2\xe2\x80\xa2\xe2\x80\xa2')]
df = DataFrame(data)
# it works!
repr(df)
def test_to_string_int_formatting(self):
df = DataFrame({'x': [-15, 20, 25, -35]})
assert issubclass(df['x'].dtype.type, np.integer)
output = df.to_string()
expected = (' x\n' '0 -15\n' '1 20\n' '2 25\n' '3 -35')
assert output == expected
def test_to_string_index_formatter(self):
df = DataFrame([lrange(5), lrange(5, 10), lrange(10, 15)])
rs = df.to_string(formatters={'__index__': lambda x: 'abc' [x]})
xp = """\
0 1 2 3 4
a 0 1 2 3 4
b 5 6 7 8 9
c 10 11 12 13 14\
"""
assert rs == xp
def test_to_string_left_justify_cols(self):
tm.reset_display_options()
df = DataFrame({'x': [3234, 0.253]})
df_s = df.to_string(justify='left')
expected = (' x \n' '0 3234.000\n' '1 0.253')
assert df_s == expected
def test_to_string_format_na(self):
tm.reset_display_options()
df = DataFrame({'A': [np.nan, -1, -2.1234, 3, 4],
'B': [np.nan, 'foo', 'foooo', 'fooooo', 'bar']})
result = df.to_string()
expected = (' A B\n'
'0 NaN NaN\n'
'1 -1.0000 foo\n'
'2 -2.1234 foooo\n'
'3 3.0000 fooooo\n'
'4 4.0000 bar')
assert result == expected
df = DataFrame({'A': [np.nan, -1., -2., 3., 4.],
'B': [np.nan, 'foo', 'foooo', 'fooooo', 'bar']})
result = df.to_string()
expected = (' A B\n'
'0 NaN NaN\n'
'1 -1.0 foo\n'
'2 -2.0 foooo\n'
'3 3.0 fooooo\n'
'4 4.0 bar')
assert result == expected
def test_to_string_format_inf(self):
# Issue #24861
tm.reset_display_options()
df = DataFrame({
'A': [-np.inf, np.inf, -1, -2.1234, 3, 4],
'B': [-np.inf, np.inf, 'foo', 'foooo', 'fooooo', 'bar']
})
result = df.to_string()
expected = (' A B\n'
'0 -inf -inf\n'
'1 inf inf\n'
'2 -1.0000 foo\n'
'3 -2.1234 foooo\n'
'4 3.0000 fooooo\n'
'5 4.0000 bar')
assert result == expected
df = DataFrame({
'A': [-np.inf, np.inf, -1., -2., 3., 4.],
'B': [-np.inf, np.inf, 'foo', 'foooo', 'fooooo', 'bar']
})
result = df.to_string()
expected = (' A B\n'
'0 -inf -inf\n'
'1 inf inf\n'
'2 -1.0 foo\n'
'3 -2.0 foooo\n'
'4 3.0 fooooo\n'
'5 4.0 bar')
assert result == expected
def test_to_string_decimal(self):
# Issue #23614
df = DataFrame({'A': [6.0, 3.1, 2.2]})
expected = ' A\n0 6,0\n1 3,1\n2 2,2'
assert df.to_string(decimal=',') == expected
def test_to_string_line_width(self):
df = DataFrame(123, lrange(10, 15), lrange(30))
s = df.to_string(line_width=80)
assert max(len(l) for l in s.split('\n')) == 80
def test_show_dimensions(self):
df = DataFrame(123, lrange(10, 15), lrange(30))
with option_context('display.max_rows', 10, 'display.max_columns', 40,
'display.width', 500, 'display.expand_frame_repr',
'info', 'display.show_dimensions', True):
assert '5 rows' in str(df)
assert '5 rows' in df._repr_html_()
with option_context('display.max_rows', 10, 'display.max_columns', 40,
'display.width', 500, 'display.expand_frame_repr',
'info', 'display.show_dimensions', False):
assert '5 rows' not in str(df)
assert '5 rows' not in df._repr_html_()
with option_context('display.max_rows', 2, 'display.max_columns', 2,
'display.width', 500, 'display.expand_frame_repr',
'info', 'display.show_dimensions', 'truncate'):
assert '5 rows' in str(df)
assert '5 rows' in df._repr_html_()
with option_context('display.max_rows', 10, 'display.max_columns', 40,
'display.width', 500, 'display.expand_frame_repr',
'info', 'display.show_dimensions', 'truncate'):
assert '5 rows' not in str(df)
assert '5 rows' not in df._repr_html_()
def test_repr_html(self):
self.frame._repr_html_()
fmt.set_option('display.max_rows', 1, 'display.max_columns', 1)
self.frame._repr_html_()
fmt.set_option('display.notebook_repr_html', False)
self.frame._repr_html_()
tm.reset_display_options()
df = DataFrame([[1, 2], [3, 4]])
fmt.set_option('display.show_dimensions', True)
assert '2 rows' in df._repr_html_()
fmt.set_option('display.show_dimensions', False)
assert '2 rows' not in df._repr_html_()
tm.reset_display_options()
def test_repr_html_mathjax(self):
df = DataFrame([[1, 2], [3, 4]])
assert 'tex2jax_ignore' not in df._repr_html_()
with pd.option_context('display.html.use_mathjax', False):
assert 'tex2jax_ignore' in df._repr_html_()
def test_repr_html_wide(self):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
with option_context('display.max_rows', 60, 'display.max_columns', 20):
assert "..." not in df._repr_html_()
wide_df = DataFrame(tm.rands_array(25, size=(10, max_cols + 1)))
with option_context('display.max_rows', 60, 'display.max_columns', 20):
assert "..." in wide_df._repr_html_()
def test_repr_html_wide_multiindex_cols(self):
max_cols = 20
mcols = MultiIndex.from_product([np.arange(max_cols // 2),
['foo', 'bar']],
names=['first', 'second'])
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))),
columns=mcols)
reg_repr = df._repr_html_()
assert '...' not in reg_repr
mcols = MultiIndex.from_product((np.arange(1 + (max_cols // 2)),
['foo', 'bar']),
names=['first', 'second'])
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))),
columns=mcols)
with option_context('display.max_rows', 60, 'display.max_columns', 20):
assert '...' in df._repr_html_()
def test_repr_html_long(self):
with option_context('display.max_rows', 60):
max_rows = get_option('display.max_rows')
h = max_rows - 1
df = DataFrame({'A': np.arange(1, 1 + h),
'B': np.arange(41, 41 + h)})
reg_repr = df._repr_html_()
assert '..' not in reg_repr
assert str(41 + max_rows // 2) in reg_repr
h = max_rows + 1
df = DataFrame({'A': np.arange(1, 1 + h),
'B': np.arange(41, 41 + h)})
long_repr = df._repr_html_()
assert '..' in long_repr
assert str(41 + max_rows // 2) not in long_repr
assert u('{h} rows ').format(h=h) in long_repr
assert u('2 columns') in long_repr
def test_repr_html_float(self):
with option_context('display.max_rows', 60):
max_rows = get_option('display.max_rows')
h = max_rows - 1
df = DataFrame({'idx': np.linspace(-10, 10, h),
'A': np.arange(1, 1 + h),
'B': np.arange(41, 41 + h)}).set_index('idx')
reg_repr = df._repr_html_()
assert '..' not in reg_repr
assert '<td>{val}</td>'.format(val=str(40 + h)) in reg_repr
h = max_rows + 1
df = DataFrame({'idx': np.linspace(-10, 10, h),
'A': np.arange(1, 1 + h),
'B': np.arange(41, 41 + h)}).set_index('idx')
long_repr = df._repr_html_()
assert '..' in long_repr
assert '<td>{val}</td>'.format(val='31') not in long_repr
assert u('{h} rows ').format(h=h) in long_repr
assert u('2 columns') in long_repr
def test_repr_html_long_multiindex(self):
max_rows = 60
max_L1 = max_rows // 2
tuples = list(itertools.product(np.arange(max_L1), ['foo', 'bar']))
idx = MultiIndex.from_tuples(tuples, names=['first', 'second'])
df = DataFrame(np.random.randn(max_L1 * 2, 2), index=idx,
columns=['A', 'B'])
with option_context('display.max_rows', 60, 'display.max_columns', 20):
reg_repr = df._repr_html_()
assert '...' not in reg_repr
tuples = list(itertools.product(np.arange(max_L1 + 1), ['foo', 'bar']))
idx = MultiIndex.from_tuples(tuples, names=['first', 'second'])
df = DataFrame(np.random.randn((max_L1 + 1) * 2, 2), index=idx,
columns=['A', 'B'])
long_repr = df._repr_html_()
assert '...' in long_repr
def test_repr_html_long_and_wide(self):
max_cols = 20
max_rows = 60
h, w = max_rows - 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context('display.max_rows', 60, 'display.max_columns', 20):
assert '...' not in df._repr_html_()
h, w = max_rows + 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context('display.max_rows', 60, 'display.max_columns', 20):
assert '...' in df._repr_html_()
def test_info_repr(self):
# GH#21746 For tests inside a terminal (i.e. not CI) we need to detect
# the terminal size to ensure that we try to print something "too big"
term_width, term_height = get_terminal_size()
max_rows = 60
max_cols = 20 + (max(term_width, 80) - 80) // 4
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_vertically_truncated_repr(df)
with option_context('display.large_repr', 'info'):
assert has_info_repr(df)
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_horizontally_truncated_repr(df)
with option_context('display.large_repr', 'info',
'display.max_columns', max_cols):
assert has_info_repr(df)
def test_info_repr_max_cols(self):
# GH #6939
df = DataFrame(np.random.randn(10, 5))
with option_context('display.large_repr', 'info',
'display.max_columns', 1,
'display.max_info_columns', 4):
assert has_non_verbose_info_repr(df)
with option_context('display.large_repr', 'info',
'display.max_columns', 1,
'display.max_info_columns', 5):
assert not has_non_verbose_info_repr(df)
# test verbose overrides
# fmt.set_option('display.max_info_columns', 4) # exceeded
def test_info_repr_html(self):
max_rows = 60
max_cols = 20
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert r'<class' not in df._repr_html_()
with option_context('display.large_repr', 'info'):
assert r'<class' in df._repr_html_()
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert '<class' not in df._repr_html_()
with option_context('display.large_repr', 'info',
'display.max_columns', max_cols):
assert '<class' in df._repr_html_()
def test_fake_qtconsole_repr_html(self):
def get_ipython():
return {'config': {'KernelApp':
{'parent_appname': 'ipython-qtconsole'}}}
repstr = self.frame._repr_html_()
assert repstr is not None
fmt.set_option('display.max_rows', 5, 'display.max_columns', 2)
repstr = self.frame._repr_html_()
assert 'class' in repstr # info fallback
tm.reset_display_options()
def test_pprint_pathological_object(self):
"""
If the test fails, it at least won't hang.
"""
class A(object):
def __getitem__(self, key):
return 3 # obviously simplified
df = DataFrame([A()])
repr(df) # just don't die
def test_float_trim_zeros(self):
vals = [2.08430917305e+10, 3.52205017305e+10, 2.30674817305e+10,
2.03954217305e+10, 5.59897817305e+10]
skip = True
for line in repr(DataFrame({'A': vals})).split('\n')[:-2]:
if line.startswith('dtype:'):
continue
if _three_digit_exp():
assert ('+010' in line) or skip
else:
assert ('+10' in line) or skip
skip = False
def test_dict_entries(self):
df = DataFrame({'A': [{'a': 1, 'b': 2}]})
val = df.to_string()
assert "'a': 1" in val
assert "'b': 2" in val
def test_period(self):
# GH 12615
df = pd.DataFrame({'A': pd.period_range('2013-01',
periods=4, freq='M'),
'B': [pd.Period('2011-01', freq='M'),
pd.Period('2011-02-01', freq='D'),
pd.Period('2011-03-01 09:00', freq='H'),
pd.Period('2011-04', freq='M')],
'C': list('abcd')})
exp = (" A B C\n"
"0 2013-01 2011-01 a\n"
"1 2013-02 2011-02-01 b\n"
"2 2013-03 2011-03-01 09:00 c\n"
"3 2013-04 2011-04 d")
assert str(df) == exp
def gen_series_formatting():
s1 = pd.Series(['a'] * 100)
s2 = pd.Series(['ab'] * 100)
s3 = pd.Series(['a', 'ab', 'abc', 'abcd', 'abcde', 'abcdef'])
s4 = s3[::-1]
test_sers = {'onel': s1, 'twol': s2, 'asc': s3, 'desc': s4}
return test_sers
class TestSeriesFormatting(object):
def setup_method(self, method):
self.ts = tm.makeTimeSeries()
def test_repr_unicode(self):
s = Series([u('\u03c3')] * 10)
repr(s)
a = Series([u("\u05d0")] * 1000)
a.name = 'title1'
repr(a)
def test_to_string(self):
buf = StringIO()
s = self.ts.to_string()
retval = self.ts.to_string(buf=buf)
assert retval is None
assert buf.getvalue().strip() == s
# pass float_format
format = '%.4f'.__mod__
result = self.ts.to_string(float_format=format)
result = [x.split()[1] for x in result.split('\n')[:-1]]
expected = [format(x) for x in self.ts]
assert result == expected
# empty string
result = self.ts[:0].to_string()
assert result == 'Series([], Freq: B)'
result = self.ts[:0].to_string(length=0)
assert result == 'Series([], Freq: B)'
# name and length
cp = self.ts.copy()
cp.name = 'foo'
result = cp.to_string(length=True, name=True, dtype=True)
last_line = result.split('\n')[-1].strip()
assert last_line == ("Freq: B, Name: foo, "
"Length: {cp}, dtype: float64".format(cp=len(cp)))
def test_freq_name_separation(self):
s = Series(np.random.randn(10),
index=date_range('1/1/2000', periods=10), name=0)
result = repr(s)
assert 'Freq: D, Name: 0' in result
def test_to_string_mixed(self):
s = Series(['foo', np.nan, -1.23, 4.56])
result = s.to_string()
expected = (u('0 foo\n') + u('1 NaN\n') + u('2 -1.23\n') +
u('3 4.56'))
assert result == expected
# but don't count NAs as floats
s = Series(['foo', np.nan, 'bar', 'baz'])
result = s.to_string()
expected = (u('0 foo\n') + '1 NaN\n' + '2 bar\n' + '3 baz')
assert result == expected
s = Series(['foo', 5, 'bar', 'baz'])
result = s.to_string()
expected = (u('0 foo\n') + '1 5\n' + '2 bar\n' + '3 baz')
assert result == expected
def test_to_string_float_na_spacing(self):
s = Series([0., 1.5678, 2., -3., 4.])
s[::2] = np.nan
result = s.to_string()
expected = (u('0 NaN\n') + '1 1.5678\n' + '2 NaN\n' +
'3 -3.0000\n' + '4 NaN')
assert result == expected
def test_to_string_without_index(self):
# GH 11729 Test index=False option
s = Series([1, 2, 3, 4])
result = s.to_string(index=False)
expected = (u(' 1\n') + ' 2\n' + ' 3\n' + ' 4')
assert result == expected
def test_unicode_name_in_footer(self):
s = Series([1, 2], name=u('\u05e2\u05d1\u05e8\u05d9\u05ea'))
sf = fmt.SeriesFormatter(s, name=u('\u05e2\u05d1\u05e8\u05d9\u05ea'))
sf._get_footer() # should not raise exception
def test_east_asian_unicode_series(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
# not aligned properly because of east asian width
# unicode index
s = Series(['a', 'bb', 'CCC', 'D'],
index=[u'あ', u'いい', u'ううう', u'ええええ'])
expected = (u"あ a\nいい bb\nううう CCC\n"
u"ええええ D\ndtype: object")
assert _rep(s) == expected
# unicode values
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
index=['a', 'bb', 'c', 'ddd'])
expected = (u"a あ\nbb いい\nc ううう\n"
u"ddd ええええ\ndtype: object")
assert _rep(s) == expected
# both
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
index=[u'ああ', u'いいいい', u'う', u'えええ'])
expected = (u"ああ あ\nいいいい いい\nう ううう\n"
u"えええ ええええ\ndtype: object")
assert _rep(s) == expected
# unicode footer
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
index=[u'ああ', u'いいいい', u'う', u'えええ'],
name=u'おおおおおおお')
expected = (u"ああ あ\nいいいい いい\nう ううう\n"
u"えええ ええええ\nName: おおおおおおお, dtype: object")
assert _rep(s) == expected
# MultiIndex
idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'), (
u'おおお', u'かかかか'), (u'き', u'くく')])
s = Series([1, 22, 3333, 44444], index=idx)
expected = (u"あ いい 1\n"
u"う え 22\n"
u"おおお かかかか 3333\n"
u"き くく 44444\ndtype: int64")
assert _rep(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, 'AB', np.nan, u'あああ'])
expected = (u"1 1\nAB 22\nNaN 3333\n"
u"あああ 44444\ndtype: int64")
assert _rep(s) == expected
# object dtype, longer than unicode repr
s = Series([1, 22, 3333, 44444],
index=[1, 'AB', pd.Timestamp('2011-01-01'), u'あああ'])
expected = (u"1 1\n"
u"AB 22\n"
u"2011-01-01 00:00:00 3333\n"
u"あああ 44444\ndtype: int64")
assert _rep(s) == expected
# truncate
with option_context('display.max_rows', 3):
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
name=u'おおおおおおお')
expected = (u"0 あ\n ... \n"
u"3 ええええ\n"
u"Name: おおおおおおお, Length: 4, dtype: object")
assert _rep(s) == expected
s.index = [u'ああ', u'いいいい', u'う', u'えええ']
expected = (u"ああ あ\n ... \n"
u"えええ ええええ\n"
u"Name: おおおおおおお, Length: 4, dtype: object")
assert _rep(s) == expected
# Emable Unicode option -----------------------------------------
with option_context('display.unicode.east_asian_width', True):
# unicode index
s = Series(['a', 'bb', 'CCC', 'D'],
index=[u'あ', u'いい', u'ううう', u'ええええ'])
expected = (u"あ a\nいい bb\nううう CCC\n"
u"ええええ D\ndtype: object")
assert _rep(s) == expected
# unicode values
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
index=['a', 'bb', 'c', 'ddd'])
expected = (u"a あ\nbb いい\nc ううう\n"
u"ddd ええええ\ndtype: object")
assert _rep(s) == expected
# both
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
index=[u'ああ', u'いいいい', u'う', u'えええ'])
expected = (u"ああ あ\n"
u"いいいい いい\n"
u"う ううう\n"
u"えええ ええええ\ndtype: object")
assert _rep(s) == expected
# unicode footer
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
index=[u'ああ', u'いいいい', u'う', u'えええ'],
name=u'おおおおおおお')
expected = (u"ああ あ\n"
u"いいいい いい\n"
u"う ううう\n"
u"えええ ええええ\n"
u"Name: おおおおおおお, dtype: object")
assert _rep(s) == expected
# MultiIndex
idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'), (
u'おおお', u'かかかか'), (u'き', u'くく')])
s = Series([1, 22, 3333, 44444], index=idx)
expected = (u"あ いい 1\n"
u"う え 22\n"
u"おおお かかかか 3333\n"
u"き くく 44444\n"
u"dtype: int64")
assert _rep(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, 'AB', np.nan, u'あああ'])
expected = (u"1 1\nAB 22\nNaN 3333\n"
u"あああ 44444\ndtype: int64")
assert _rep(s) == expected
# object dtype, longer than unicode repr
s = Series([1, 22, 3333, 44444],
index=[1, 'AB', pd.Timestamp('2011-01-01'), u'あああ'])
expected = (u"1 1\n"
u"AB 22\n"
u"2011-01-01 00:00:00 3333\n"
u"あああ 44444\ndtype: int64")
assert _rep(s) == expected
# truncate
with option_context('display.max_rows', 3):
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
name=u'おおおおおおお')
expected = (u"0 あ\n ... \n"
u"3 ええええ\n"
u"Name: おおおおおおお, Length: 4, dtype: object")
assert _rep(s) == expected
s.index = [u'ああ', u'いいいい', u'う', u'えええ']
expected = (u"ああ あ\n"
u" ... \n"
u"えええ ええええ\n"
u"Name: おおおおおおお, Length: 4, dtype: object")
assert _rep(s) == expected
# ambiguous unicode
s = Series([u'¡¡', u'い¡¡', u'ううう', u'ええええ'],
index=[u'ああ', u'¡¡¡¡いい', u'¡¡', u'えええ'])
expected = (u"ああ ¡¡\n"
u"¡¡¡¡いい い¡¡\n"
u"¡¡ ううう\n"
u"えええ ええええ\ndtype: object")
assert _rep(s) == expected
def test_float_trim_zeros(self):
vals = [2.08430917305e+10, 3.52205017305e+10, 2.30674817305e+10,
2.03954217305e+10, 5.59897817305e+10]
for line in repr(Series(vals)).split('\n'):
if line.startswith('dtype:'):
continue
if _three_digit_exp():
assert '+010' in line
else:
assert '+10' in line
def test_datetimeindex(self):
index = date_range('20130102', periods=6)
s = Series(1, index=index)
result = s.to_string()
assert '2013-01-02' in result
# nat in index
s2 = Series(2, index=[Timestamp('20130111'), NaT])
s = s2.append(s)
result = s.to_string()
assert 'NaT' in result
# nat in summary
result = str(s2.index)
assert 'NaT' in result
@pytest.mark.parametrize('start_date', [
'2017-01-01 23:59:59.999999999',
'2017-01-01 23:59:59.99999999',
'2017-01-01 23:59:59.9999999',
'2017-01-01 23:59:59.999999',
'2017-01-01 23:59:59.99999',
'2017-01-01 23:59:59.9999'
])
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
s1 = Series(date_range(start=start_date, freq='D', periods=5))
result = str(s1)
assert start_date in result
dti = date_range(start=start_date, freq='D', periods=5)
s2 = Series(3, index=dti)
result = str(s2.index)
assert start_date in result
def test_timedelta64(self):
from datetime import datetime, timedelta
Series(np.array([1100, 20], dtype='timedelta64[ns]')).to_string()
s = Series(date_range('2012-1-1', periods=3, freq='D'))
# GH2146
# adding NaTs
y = s - s.shift(1)
result = y.to_string()
assert '1 days' in result
assert '00:00:00' not in result
assert 'NaT' in result
# with frac seconds
o = Series([datetime(2012, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.to_string()
assert '-1 days +23:59:59.999850' in result
# rounding?
o = Series([datetime(2012, 1, 1, 1)] * 3)
y = s - o
result = y.to_string()
assert '-1 days +23:00:00' in result
assert '1 days 23:00:00' in result
o = Series([datetime(2012, 1, 1, 1, 1)] * 3)
y = s - o
result = y.to_string()
assert '-1 days +22:59:00' in result
assert '1 days 22:59:00' in result
o = Series([datetime(2012, 1, 1, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.to_string()
assert '-1 days +22:58:59.999850' in result
assert '0 days 22:58:59.999850' in result
# neg time
td = timedelta(minutes=5, seconds=3)
s2 = Series(date_range('2012-1-1', periods=3, freq='D')) + td
y = s - s2
result = y.to_string()
assert '-1 days +23:54:57' in result
td = timedelta(microseconds=550)
s2 = Series(date_range('2012-1-1', periods=3, freq='D')) + td
y = s - td
result = y.to_string()
assert '2012-01-01 23:59:59.999450' in result
# no boxing of the actual elements
td = Series(pd.timedelta_range('1 days', periods=3))
result = td.to_string()
assert result == u("0 1 days\n1 2 days\n2 3 days")
def test_mixed_datetime64(self):
df = DataFrame({'A': [1, 2], 'B': ['2012-01-01', '2012-01-02']})
df['B'] = pd.to_datetime(df.B)
result = repr(df.loc[0])
assert '2012-01-01' in result
def test_period(self):
# GH 12615
index = pd.period_range('2013-01', periods=6, freq='M')
s = Series(np.arange(6, dtype='int64'), index=index)
exp = ("2013-01 0\n"
"2013-02 1\n"
"2013-03 2\n"
"2013-04 3\n"
"2013-05 4\n"
"2013-06 5\n"
"Freq: M, dtype: int64")
assert str(s) == exp
s = Series(index)
exp = ("0 2013-01\n"
"1 2013-02\n"
"2 2013-03\n"
"3 2013-04\n"
"4 2013-05\n"
"5 2013-06\n"
"dtype: period[M]")
assert str(s) == exp
# periods with mixed freq
s = Series([pd.Period('2011-01', freq='M'),
pd.Period('2011-02-01', freq='D'),
pd.Period('2011-03-01 09:00', freq='H')])
exp = ("0 2011-01\n1 2011-02-01\n"
"2 2011-03-01 09:00\ndtype: object")
assert str(s) == exp
def test_max_multi_index_display(self):
# GH 7101
# doc example (indexing.rst)
# multi-index
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = list(zip(*arrays))
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
s = Series(np.random.randn(8), index=index)
with option_context("display.max_rows", 10):
assert len(str(s).split('\n')) == 10
with option_context("display.max_rows", 3):
assert len(str(s).split('\n')) == 5
with option_context("display.max_rows", 2):
assert len(str(s).split('\n')) == 5
with option_context("display.max_rows", 1):
assert len(str(s).split('\n')) == 4
with option_context("display.max_rows", 0):
assert len(str(s).split('\n')) == 10
# index
s = Series(np.random.randn(8), None)
with option_context("display.max_rows", 10):
assert len(str(s).split('\n')) == 9
with option_context("display.max_rows", 3):
assert len(str(s).split('\n')) == 4
with option_context("display.max_rows", 2):
assert len(str(s).split('\n')) == 4
with option_context("display.max_rows", 1):
assert len(str(s).split('\n')) == 3
with option_context("display.max_rows", 0):
assert len(str(s).split('\n')) == 9
# Make sure #8532 is fixed
def test_consistent_format(self):
s = pd.Series([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9999, 1, 1] * 10)
with option_context("display.max_rows", 10,
"display.show_dimensions", False):
res = repr(s)
exp = ('0 1.0000\n1 1.0000\n2 1.0000\n3 '
'1.0000\n4 1.0000\n ... \n125 '
'1.0000\n126 1.0000\n127 0.9999\n128 '
'1.0000\n129 1.0000\ndtype: float64')
assert res == exp
def chck_ncols(self, s):
with option_context("display.max_rows", 10):
res = repr(s)
lines = res.split('\n')
lines = [line for line in repr(s).split('\n')
if not re.match(r'[^\.]*\.+', line)][:-1]
ncolsizes = len({len(line.strip()) for line in lines})
assert ncolsizes == 1
def test_format_explicit(self):
test_sers = gen_series_formatting()
with option_context("display.max_rows", 4,
"display.show_dimensions", False):
res = repr(test_sers['onel'])
exp = '0 a\n1 a\n ..\n98 a\n99 a\ndtype: object'
assert exp == res
res = repr(test_sers['twol'])
exp = ('0 ab\n1 ab\n ..\n98 ab\n99 ab\ndtype:'
' object')
assert exp == res
res = repr(test_sers['asc'])
exp = ('0 a\n1 ab\n ... \n4 abcde\n5'
' abcdef\ndtype: object')
assert exp == res
res = repr(test_sers['desc'])
exp = ('5 abcdef\n4 abcde\n ... \n1 ab\n0'
' a\ndtype: object')
assert exp == res
def test_ncols(self):
test_sers = gen_series_formatting()
for s in test_sers.values():
self.chck_ncols(s)
def test_max_rows_eq_one(self):
s = Series(range(10), dtype='int64')
with option_context("display.max_rows", 1):
strrepr = repr(s).split('\n')
exp1 = ['0', '0']
res1 = strrepr[0].split()
assert exp1 == res1
exp2 = ['..']
res2 = strrepr[1].split()
assert exp2 == res2
def test_truncate_ndots(self):
def getndots(s):
return len(re.match(r'[^\.]*(\.*)', s).groups()[0])
s = Series([0, 2, 3, 6])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace('\n', '')
assert getndots(strrepr) == 2
s = Series([0, 100, 200, 400])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace('\n', '')
assert getndots(strrepr) == 3
def test_show_dimensions(self):
# gh-7117
s = Series(range(5))
assert 'Length' not in repr(s)
with option_context("display.max_rows", 4):
assert 'Length' in repr(s)
with option_context("display.show_dimensions", True):
assert 'Length' in repr(s)
with option_context("display.max_rows", 4,
"display.show_dimensions", False):
assert 'Length' not in repr(s)
def test_to_string_name(self):
s = Series(range(100), dtype='int64')
s.name = 'myser'
res = s.to_string(max_rows=2, name=True)
exp = '0 0\n ..\n99 99\nName: myser'
assert res == exp
res = s.to_string(max_rows=2, name=False)
exp = '0 0\n ..\n99 99'
assert res == exp
def test_to_string_dtype(self):
s = Series(range(100), dtype='int64')
res = s.to_string(max_rows=2, dtype=True)
exp = '0 0\n ..\n99 99\ndtype: int64'
assert res == exp
res = s.to_string(max_rows=2, dtype=False)
exp = '0 0\n ..\n99 99'
assert res == exp
def test_to_string_length(self):
s = Series(range(100), dtype='int64')
res = s.to_string(max_rows=2, length=True)
exp = '0 0\n ..\n99 99\nLength: 100'
assert res == exp
def test_to_string_na_rep(self):
s = pd.Series(index=range(100))
res = s.to_string(na_rep='foo', max_rows=2)
exp = '0 foo\n ..\n99 foo'
assert res == exp
def test_to_string_float_format(self):
s = pd.Series(range(10), dtype='float64')
res = s.to_string(float_format=lambda x: '{0:2.1f}'.format(x),
max_rows=2)
exp = '0 0.0\n ..\n9 9.0'
assert res == exp
def test_to_string_header(self):
s = pd.Series(range(10), dtype='int64')
s.index.name = 'foo'
res = s.to_string(header=True, max_rows=2)
exp = 'foo\n0 0\n ..\n9 9'
assert res == exp
res = s.to_string(header=False, max_rows=2)
exp = '0 0\n ..\n9 9'
assert res == exp
def _three_digit_exp():
return '{x:.4g}'.format(x=1.7e8) == '1.7e+008'
class TestFloatArrayFormatter(object):
def test_misc(self):
obj = fmt.FloatArrayFormatter(np.array([], dtype=np.float64))
result = obj.get_result()
assert len(result) == 0
def test_format(self):
obj = fmt.FloatArrayFormatter(np.array([12, 0], dtype=np.float64))
result = obj.get_result()
assert result[0] == " 12.0"
assert result[1] == " 0.0"
def test_output_significant_digits(self):
# Issue #9764
# In case default display precision changes:
with pd.option_context('display.precision', 6):
# DataFrame example from issue #9764
d = pd.DataFrame(
{'col1': [9.999e-8, 1e-7, 1.0001e-7, 2e-7, 4.999e-7, 5e-7,
5.0001e-7, 6e-7, 9.999e-7, 1e-6, 1.0001e-6, 2e-6,
4.999e-6, 5e-6, 5.0001e-6, 6e-6]})
expected_output = {
(0, 6):
' col1\n'
'0 9.999000e-08\n'
'1 1.000000e-07\n'
'2 1.000100e-07\n'
'3 2.000000e-07\n'
'4 4.999000e-07\n'
'5 5.000000e-07',
(1, 6):
' col1\n'
'1 1.000000e-07\n'
'2 1.000100e-07\n'
'3 2.000000e-07\n'
'4 4.999000e-07\n'
'5 5.000000e-07',
(1, 8):
' col1\n'
'1 1.000000e-07\n'
'2 1.000100e-07\n'
'3 2.000000e-07\n'
'4 4.999000e-07\n'
'5 5.000000e-07\n'
'6 5.000100e-07\n'
'7 6.000000e-07',
(8, 16):
' col1\n'
'8 9.999000e-07\n'
'9 1.000000e-06\n'
'10 1.000100e-06\n'
'11 2.000000e-06\n'
'12 4.999000e-06\n'
'13 5.000000e-06\n'
'14 5.000100e-06\n'
'15 6.000000e-06',
(9, 16):
' col1\n'
'9 0.000001\n'
'10 0.000001\n'
'11 0.000002\n'
'12 0.000005\n'
'13 0.000005\n'
'14 0.000005\n'
'15 0.000006'
}
for (start, stop), v in expected_output.items():
assert str(d[start:stop]) == v
def test_too_long(self):
# GH 10451
with pd.option_context('display.precision', 4):
# need both a number > 1e6 and something that normally formats to
# having length > display.precision + 6
df = pd.DataFrame(dict(x=[12345.6789]))
assert str(df) == ' x\n0 12345.6789'
df = pd.DataFrame(dict(x=[2e6]))
assert str(df) == ' x\n0 2000000.0'
df = pd.DataFrame(dict(x=[12345.6789, 2e6]))
assert str(df) == ' x\n0 1.2346e+04\n1 2.0000e+06'
class TestRepr_timedelta64(object):
def test_none(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1s = pd.to_timedelta(1, unit='s')
delta_500ms = pd.to_timedelta(500, unit='ms')
drepr = lambda x: x._repr_base()
assert drepr(delta_1d) == "1 days"
assert drepr(-delta_1d) == "-1 days"
assert drepr(delta_0d) == "0 days"
assert drepr(delta_1s) == "0 days 00:00:01"
assert drepr(delta_500ms) == "0 days 00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000"
def test_sub_day(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1s = pd.to_timedelta(1, unit='s')
delta_500ms = pd.to_timedelta(500, unit='ms')
drepr = lambda x: x._repr_base(format='sub_day')
assert drepr(delta_1d) == "1 days"
assert drepr(-delta_1d) == "-1 days"
assert drepr(delta_0d) == "00:00:00"
assert drepr(delta_1s) == "00:00:01"
assert drepr(delta_500ms) == "00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000"
def test_long(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1s = pd.to_timedelta(1, unit='s')
delta_500ms = pd.to_timedelta(500, unit='ms')
drepr = lambda x: x._repr_base(format='long')
assert drepr(delta_1d) == "1 days 00:00:00"
assert drepr(-delta_1d) == "-1 days +00:00:00"
assert drepr(delta_0d) == "0 days 00:00:00"
assert drepr(delta_1s) == "0 days 00:00:01"
assert drepr(delta_500ms) == "0 days 00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000"
def test_all(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1ns = pd.to_timedelta(1, unit='ns')
drepr = lambda x: x._repr_base(format='all')
assert drepr(delta_1d) == "1 days 00:00:00.000000000"
assert drepr(-delta_1d) == "-1 days +00:00:00.000000000"
assert drepr(delta_0d) == "0 days 00:00:00.000000000"
assert drepr(delta_1ns) == "0 days 00:00:00.000000001"
assert drepr(-delta_1d + delta_1ns) == "-1 days +00:00:00.000000001"
class TestTimedelta64Formatter(object):
def test_days(self):
x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D')
result = fmt.Timedelta64Formatter(x, box=True).get_result()
assert result[0].strip() == "'0 days'"
assert result[1].strip() == "'1 days'"
result = fmt.Timedelta64Formatter(x[1:2], box=True).get_result()
assert result[0].strip() == "'1 days'"
result = fmt.Timedelta64Formatter(x, box=False).get_result()
assert result[0].strip() == "0 days"
assert result[1].strip() == "1 days"
result = fmt.Timedelta64Formatter(x[1:2], box=False).get_result()
assert result[0].strip() == "1 days"
def test_days_neg(self):
x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D')
result = fmt.Timedelta64Formatter(-x, box=True).get_result()
assert result[0].strip() == "'0 days'"
assert result[1].strip() == "'-1 days'"
def test_subdays(self):
y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='s')
result = fmt.Timedelta64Formatter(y, box=True).get_result()
assert result[0].strip() == "'00:00:00'"
assert result[1].strip() == "'00:00:01'"
def test_subdays_neg(self):
y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='s')
result = fmt.Timedelta64Formatter(-y, box=True).get_result()
assert result[0].strip() == "'00:00:00'"
assert result[1].strip() == "'-1 days +23:59:59'"
def test_zero(self):
x = pd.to_timedelta(list(range(1)) + [pd.NaT], unit='D')
result = fmt.Timedelta64Formatter(x, box=True).get_result()
assert result[0].strip() == "'0 days'"
x = pd.to_timedelta(list(range(1)), unit='D')
result = fmt.Timedelta64Formatter(x, box=True).get_result()
assert result[0].strip() == "'0 days'"
class TestDatetime64Formatter(object):
def test_mixed(self):
x = Series([datetime(2013, 1, 1), datetime(2013, 1, 1, 12), pd.NaT])
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 00:00:00"
assert result[1].strip() == "2013-01-01 12:00:00"
def test_dates(self):
x = Series([datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT])
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01"
assert result[1].strip() == "2013-01-02"
def test_date_nanos(self):
x = Series([Timestamp(200)])
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "1970-01-01 00:00:00.000000200"
def test_dates_display(self):
# 10170
# make sure that we are consistently display date formatting
x = Series(date_range('20130101 09:00:00', periods=5, freq='D'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-05 09:00:00"
x = Series(date_range('20130101 09:00:00', periods=5, freq='s'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:04"
x = Series(date_range('20130101 09:00:00', periods=5, freq='ms'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00.000"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:00.004"
x = Series(date_range('20130101 09:00:00', periods=5, freq='us'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00.000000"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:00.000004"
x = Series(date_range('20130101 09:00:00', periods=5, freq='N'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00.000000000"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:00.000000004"
def test_datetime64formatter_yearmonth(self):
x = Series([datetime(2016, 1, 1), datetime(2016, 2, 2)])
def format_func(x):
return x.strftime('%Y-%m')
formatter = fmt.Datetime64Formatter(x, formatter=format_func)
result = formatter.get_result()
assert result == ['2016-01', '2016-02']
def test_datetime64formatter_hoursecond(self):
x = Series(pd.to_datetime(['10:10:10.100', '12:12:12.120'],
format='%H:%M:%S.%f'))
def format_func(x):
return x.strftime('%H:%M')
formatter = fmt.Datetime64Formatter(x, formatter=format_func)
result = formatter.get_result()
assert result == ['10:10', '12:12']
class TestNaTFormatting(object):
def test_repr(self):
assert repr(pd.NaT) == "NaT"
def test_str(self):
assert str(pd.NaT) == "NaT"
class TestDatetimeIndexFormat(object):
def test_datetime(self):
formatted = pd.to_datetime([datetime(2003, 1, 1, 12), pd.NaT]).format()
assert formatted[0] == "2003-01-01 12:00:00"
assert formatted[1] == "NaT"
def test_date(self):
formatted = pd.to_datetime([datetime(2003, 1, 1), pd.NaT]).format()
assert formatted[0] == "2003-01-01"
assert formatted[1] == "NaT"
def test_date_tz(self):
formatted = pd.to_datetime([datetime(2013, 1, 1)], utc=True).format()
assert formatted[0] == "2013-01-01 00:00:00+00:00"
formatted = pd.to_datetime(
[datetime(2013, 1, 1), pd.NaT], utc=True).format()
assert formatted[0] == "2013-01-01 00:00:00+00:00"
def test_date_explicit_date_format(self):
formatted = pd.to_datetime([datetime(2003, 2, 1), pd.NaT]).format(
date_format="%m-%d-%Y", na_rep="UT")
assert formatted[0] == "02-01-2003"
assert formatted[1] == "UT"
class TestDatetimeIndexUnicode(object):
def test_dates(self):
text = str(pd.to_datetime([datetime(2013, 1, 1), datetime(2014, 1, 1)
]))
assert "['2013-01-01'," in text
assert ", '2014-01-01']" in text
def test_mixed(self):
text = str(pd.to_datetime([datetime(2013, 1, 1), datetime(
2014, 1, 1, 12), datetime(2014, 1, 1)]))
assert "'2013-01-01 00:00:00'," in text
assert "'2014-01-01 00:00:00']" in text
class TestStringRepTimestamp(object):
def test_no_tz(self):
dt_date = datetime(2013, 1, 2)
assert str(dt_date) == str(Timestamp(dt_date))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3)
assert str(dt_datetime) == str(Timestamp(dt_datetime))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45)
assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us))
ts_nanos_only = Timestamp(200)
assert str(ts_nanos_only) == "1970-01-01 00:00:00.000000200"
ts_nanos_micros = Timestamp(1200)
assert str(ts_nanos_micros) == "1970-01-01 00:00:00.000001200"
def test_tz_pytz(self):
dt_date = datetime(2013, 1, 2, tzinfo=pytz.utc)
assert str(dt_date) == str(Timestamp(dt_date))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=pytz.utc)
assert str(dt_datetime) == str(Timestamp(dt_datetime))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=pytz.utc)
assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us))
def test_tz_dateutil(self):
utc = dateutil.tz.tzutc()
dt_date = datetime(2013, 1, 2, tzinfo=utc)
assert str(dt_date) == str(Timestamp(dt_date))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=utc)
assert str(dt_datetime) == str(Timestamp(dt_datetime))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=utc)
assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us))
def test_nat_representations(self):
for f in (str, repr, methodcaller('isoformat')):
assert f(pd.NaT) == 'NaT'
def test_format_percentiles():
result = fmt.format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999])
expected = ['1.999%', '2.001%', '50%', '66.667%', '99.99%']
assert result == expected
result = fmt.format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999])
expected = ['0%', '50%', '2.0%', '50%', '66.67%', '99.99%']
assert result == expected
msg = r"percentiles should all be in the interval \[0,1\]"
with pytest.raises(ValueError, match=msg):
fmt.format_percentiles([0.1, np.nan, 0.5])
with pytest.raises(ValueError, match=msg):
fmt.format_percentiles([-0.001, 0.1, 0.5])
with pytest.raises(ValueError, match=msg):
fmt.format_percentiles([2, 0.1, 0.5])
with pytest.raises(ValueError, match=msg):
fmt.format_percentiles([0.1, 0.5, 'a'])
def test_repr_html_ipython_config(ip):
code = textwrap.dedent("""\
import pandas as pd
df = pd.DataFrame({"A": [1, 2]})
df._repr_html_()
cfg = get_ipython().config
cfg['IPKernelApp']['parent_appname']
df._repr_html_()
""")
result = ip.run_cell(code)
assert not result.error_in_exec
| GuessWhoSamFoo/pandas | pandas/tests/io/formats/test_format.py | Python | bsd-3-clause | 110,932 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from copy import deepcopy
from recipe_engine import recipe_test_api
class iOSTestApi(recipe_test_api.RecipeTestApi):
@recipe_test_api.mod_test_data
@staticmethod
def build_config(config):
return deepcopy(config)
def make_test_build_config(self, config):
return self.build_config(config)
@recipe_test_api.mod_test_data
@staticmethod
def parent_build_config(config):
return deepcopy(config)
def make_test_build_config_for_parent(self, config):
return self.parent_build_config(config)
def host_info(self):
return self.m.json.output({
'Mac OS X Version': '1.2.3',
'Xcode Version': '6.7.8',
'Xcode Build Version': '5D342509a',
'Xcode SDKs': [
'fake sdk 1.0',
'fake sdk 1.1',
'fake sdk 2.0',
],
})
def test_results(self):
return self.m.json.output({
'links': {
'fake URL text': 'fake URL',
},
'logs': {
'fake log': [
'fake log line 1',
'fake log line 2',
],
}
})
| eunchong/build | scripts/slave/recipe_modules/ios/test_api.py | Python | bsd-3-clause | 1,200 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from mock import patch
from oauthlib.oauth2 import LegacyApplicationClient
from ....unittest import TestCase
@patch('time.time', new=lambda: 1000)
class LegacyApplicationClientTest(TestCase):
client_id = "someclientid"
scope = ["/profile"]
kwargs = {
"some": "providers",
"require": "extra arguments"
}
username = "foo"
password = "bar"
body = "not=empty"
body_up = "not=empty&grant_type=password&username=%s&password=%s" % (username, password)
body_kwargs = body_up + "&some=providers&require=extra+arguments"
token_json = ('{ "access_token":"2YotnFZFEjr1zCsicMWpAA",'
' "token_type":"example",'
' "expires_in":3600,'
' "scope":"/profile",'
' "refresh_token":"tGzv3JOkF0XG5Qx2TlKWIA",'
' "example_parameter":"example_value"}')
token = {
"access_token": "2YotnFZFEjr1zCsicMWpAA",
"token_type": "example",
"expires_in": 3600,
"expires_at": 4600,
"scope": scope,
"refresh_token": "tGzv3JOkF0XG5Qx2TlKWIA",
"example_parameter": "example_value"
}
def test_request_body(self):
client = LegacyApplicationClient(self.client_id)
# Basic, no extra arguments
body = client.prepare_request_body(self.username, self.password,
body=self.body)
self.assertFormBodyEqual(body, self.body_up)
# With extra parameters
body = client.prepare_request_body(self.username, self.password,
body=self.body, **self.kwargs)
self.assertFormBodyEqual(body, self.body_kwargs)
def test_parse_token_response(self):
client = LegacyApplicationClient(self.client_id)
# Parse code and state
response = client.parse_request_body_response(self.token_json, scope=self.scope)
self.assertEqual(response, self.token)
self.assertEqual(client.access_token, response.get("access_token"))
self.assertEqual(client.refresh_token, response.get("refresh_token"))
self.assertEqual(client.token_type, response.get("token_type"))
# Mismatching state
self.assertRaises(Warning, client.parse_request_body_response, self.token_json, scope="invalid")
| lepture/oauthlib | tests/oauth2/rfc6749/clients/test_legacy_application.py | Python | bsd-3-clause | 2,383 |
# (c) 2011, 2012 Georgia Tech Research Corporation
# This source code is released under the New BSD license. Please see
# http://wiki.quantsoftware.org/index.php?title=QSTK_License
# for license details.
#
# Created on October <day>, 2011
#
# @author: Vishal Shekhar
# @contact: mailvishalshekhar@gmail.com
# @summary: Utiltiy script to create list of symbols for study.
import qstkutil.DataAccess as da
import qstkutil.qsdateutil as du
import datetime as dt
dataobj = da.DataAccess('Norgate')
delistSymbols = set(dataobj.get_symbols_in_sublist('/US/Delisted Securities'))
allSymbols = set(dataobj.get_all_symbols()) #by default Alive symbols only
aliveSymbols = list(allSymbols - delistSymbols) # set difference is smart
startday = dt.datetime(2008,1,1)
endday = dt.datetime(2009,12,31)
timeofday=dt.timedelta(hours=16)
timestamps = du.getNYSEdays(startday,endday,timeofday)
#Actual Close Prices of aliveSymbols and allSymbols
aliveSymbsclose = dataobj.get_data(timestamps, aliveSymbols, 'actual_close')
allSymbsclose = dataobj.get_data(timestamps, allSymbols, 'actual_close')
file = open('aliveSymbols2','w')
for symbol in aliveSymbols:
belowdollar = len(aliveSymbsclose[symbol][aliveSymbsclose[symbol]<1.0])
if belowdollar and (len(aliveSymbsclose[symbol]) > belowdollar):
file.write(str(symbol)+'\n')
file.close()
file = open('allSymbols2','w')
for symbol in allSymbols:
belowdollar = len(allSymbsclose[symbol][allSymbsclose[symbol]<1.0])
if belowdollar and (len(allSymbsclose[symbol]) > belowdollar):
file.write(str(symbol)+'\n')
file.close()
| grahesh/Stock-Market-Event-Analysis | qstkstudy/stockListGen.py | Python | bsd-3-clause | 1,592 |
import collections
def make_enum(name, fields):
return collections.namedtuple(name, fields)(*range(len(fields)))
| tingelst/pymanopt | pymanopt/tools/__init__.py | Python | bsd-3-clause | 119 |
# coding=utf-8
"""
Collector that reports amavis metrics as reported by amavisd-agent
#### Dependencies
* amavisd-agent must be present in PATH
"""
import os
import subprocess
import re
import diamond.collector
import diamond.convertor
from diamond.collector import str_to_bool
class AmavisCollector(diamond.collector.Collector):
# From the source of amavisd-agent and it seems like the three interesting
# formats are these: ("x y/h", "xMB yMB/h", "x s y s/msg"),
# so this, ugly as it is to hardcode it this way, it should be right.
#
# The other option would be to directly read and decode amavis' berkeley
# db, and I don't even want to get there
matchers = [
re.compile(r'^\s*(?P<name>[\w]+)\s+(?P<time>[\d]+) s\s+'
r'(?P<frequency>[\d.]+) s/msg\s+\([\w]+\)\s*$'),
re.compile(r'^\s*(?P<name>[\w.-]+)\s+(?P<count>[\d]+)\s+'
r'(?P<frequency>[\d.]+)/h\s+(?P<percentage>[\d.]+) %'
r'\s\([\w]+\)\s*$'),
re.compile(r'^\s*(?P<name>[\w.-]+)\s+(?P<size>[\d]+)MB\s+'
r'(?P<frequency>[\d.]+)MB/h\s+(?P<percentage>[\d.]+) %'
r'\s\([\w]+\)\s*$'),
]
def get_default_config_help(self):
config_help = super(AmavisCollector, self).get_default_config_help()
config_help.update({
'amavisd_exe': 'The path to amavisd-agent',
'use_sudo': 'Call amavisd-agent using sudo',
'sudo_exe': 'The path to sudo',
'sudo_user': 'The user to use if using sudo',
})
return config_help
def get_default_config(self):
config = super(AmavisCollector, self).get_default_config()
config.update({
'path': 'amavis',
'amavisd_exe': '/usr/sbin/amavisd-agent',
'use_sudo': False,
'sudo_exe': '/usr/bin/sudo',
'sudo_user': 'amavis',
})
return config
def collect(self):
"""
Collect memory stats
"""
try:
if str_to_bool(self.config['use_sudo']):
# Use -u instead of --user as the former is more portable. Not
# all versions of sudo support the long form --user.
cmdline = [
self.config['sudo_exe'], '-u', self.config['sudo_user'],
'--', self.config['amavisd_exe'], '-c', '1'
]
else:
cmdline = [self.config['amavisd_exe'], '-c', '1']
agent = subprocess.Popen(cmdline, stdout=subprocess.PIPE)
agent_out = agent.communicate()[0]
lines = agent_out.strip().split(os.linesep)
for line in lines:
for rex in self.matchers:
res = rex.match(line)
if res:
groups = res.groupdict()
name = groups['name']
for metric, value in groups.items():
if metric == 'name':
continue
mtype = 'GAUGE'
precision = 2
if metric in ('count', 'time'):
mtype = 'COUNTER'
precision = 0
self.publish("{}.{}".format(name, metric),
value, metric_type=mtype,
precision=precision)
except OSError as err:
self.log.error("Could not run %s: %s",
self.config['amavisd_exe'],
err)
return None
return True
| MichaelDoyle/Diamond | src/collectors/amavis/amavis.py | Python | mit | 3,732 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import subprocess
import gdb
import pwndbg.arch
import pwndbg.color.memory as M
import pwndbg.commands
import pwndbg.config
import pwndbg.memory
import pwndbg.regs
import pwndbg.stack
import pwndbg.vmmap
import pwndbg.wrappers
parser = argparse.ArgumentParser(description='Shows offsets of the specified address to useful other locations')
parser.add_argument('address', nargs='?', default='$pc',
help='Address to inspect')
def print_line(name, addr, first, second, op, width = 20):
print("{} {} = {} {} {:#x}".format(name.rjust(width), M.get(addr),
M.get(first) if not isinstance(first, str) else first.ljust(len(hex(addr).rstrip('L'))),
op, second,))
def xinfo_stack(page, addr):
# If it's a stack address, print offsets to top and bottom of stack, as
# well as offsets to current stack and base pointer (if used by debugee)
sp = pwndbg.regs.sp
frame = pwndbg.regs[pwndbg.regs.frame]
frame_mapping = pwndbg.vmmap.find(frame)
print_line("Stack Top", addr, page.vaddr, addr - page.vaddr, "+")
print_line("Stack End", addr, page.end, page.end - addr, "-")
print_line("Stack Pointer", addr, sp, addr - sp, "+")
if frame_mapping and page.vaddr == frame_mapping.vaddr:
print_line("Frame Pointer", addr, frame, frame - addr, "-")
canary_value = pwndbg.commands.canary.canary_value()[0]
if canary_value is not None:
all_canaries = list(
pwndbg.search.search(pwndbg.arch.pack(canary_value), mappings=pwndbg.stack.stacks.values())
)
follow_canaries = sorted(filter(lambda a: a > addr, all_canaries))
if follow_canaries is not None and len(follow_canaries) > 0:
nxt = follow_canaries[0]
print_line("Next Stack Canary", addr, nxt, nxt - addr, "-")
def xinfo_mmap_file(page, addr):
# If it's an address pointing into a memory mapped file, print offsets
# to beginning of file in memory and on disk
file_name = page.objfile
objpages = filter(lambda p: p.objfile == file_name, pwndbg.vmmap.get())
first = sorted(objpages, key = lambda p: p.vaddr)[0]
# print offset from ELF base load address
rva = addr - first.vaddr
print_line("File (Base)", addr, first.vaddr, rva, "+")
# find possible LOAD segments that designate memory and file backings
containing_loads = [seg for seg in pwndbg.elf.get_containing_segments(file_name, first.vaddr, addr)
if seg['p_type'] == 'PT_LOAD']
for segment in containing_loads:
if segment['p_type'] == 'PT_LOAD' and addr < segment['x_vaddr_mem_end']:
offset = addr - segment['p_vaddr']
print_line('File (Segment)', addr, segment['p_vaddr'], offset, '+')
break
for segment in containing_loads:
if segment['p_type'] == 'PT_LOAD' and addr < segment['x_vaddr_file_end']:
file_offset = segment['p_offset'] + (addr - segment['p_vaddr'])
print_line("File (Disk)", addr, file_name, file_offset, "+")
break
else:
print('{} {} = [not file backed]'.format('File (Disk)'.rjust(20), M.get(addr)))
containing_sections = pwndbg.elf.get_containing_sections(file_name, first.vaddr, addr)
if len(containing_sections) > 0:
print('\n Containing ELF sections:')
for sec in containing_sections:
print_line(sec['x_name'], addr, sec['sh_addr'], addr - sec['sh_addr'], '+')
def xinfo_default(page, addr):
# Just print the distance to the beginning of the mapping
print_line("Mapped Area", addr, page.vaddr, addr - page.vaddr, "+")
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
def xinfo(address=None):
addr = int(address)
addr &= pwndbg.arch.ptrmask
page = pwndbg.vmmap.find(addr)
if page is None:
print("\n Virtual address {:#x} is not mapped.".format(addr))
return
print("Extended information for virtual address {}:".format(M.get(addr)))
print("\n Containing mapping:")
print(M.get(address, text=str(page)))
print("\n Offset information:")
if page.is_stack:
xinfo_stack(page, addr)
else:
xinfo_default(page, addr)
if page.is_memory_mapped_file:
xinfo_mmap_file(page, addr)
| 0xddaa/pwndbg | pwndbg/commands/xinfo.py | Python | mit | 4,499 |
# -*- coding: utf-8 -*-
from .models import User
from .views import user
from .constants import USER_ROLE, ADMIN, USER, USER_STATUS, NEW, ACTIVE
| jessekl/flixr | fbone/modules/user/__init__.py | Python | mit | 146 |
#!/usr/bin/env python
"""Parses language data from IANA subtag registry plus several other files,
and outputs JSON data in the following format:
[
{
'name': 'Ghotuo',
'code': { 'three': 'aaa' },
'country': ['Nigeria'],
'altNames': [],
},
{
'name': 'Alumu',
'code': { 'three': 'aab' },
'country': ['Nigeria'],
'altNames': ['Alumu', 'Tesu', 'Arum', 'Alumu-Tesu', 'Alumu', 'Arum-Cesu', 'Arum-Chessu', 'Arum-Tesu'],
},
# ...
{
'name': 'Waorani',
'code': { 'three': 'auc' },
'country': ['Brazil'],
'altNames': ['Huaorani', 'Sabela', 'Waodani', 'Auca'], # Pejorative names like Auca are *not* flagged as such
}
# ...
{
'name': 'English',
'code': { 'two': 'en', 'three': 'eng' },
'country': ['Australia', 'United Kingdom', 'United States', ...],
'altNames': ['Belfast', 'Birmingham', ...], # Dialects are *not* flagged as such
}
# ...
]"""
import os, sys
import re
from pprint import pprint, pformat
import codecs
import collections
import json
# Constants - mostly hardcoded filenames
SUBTAG_REGISTRY_FNAME = "ianaSubtagRegistry.txt"
COUNTRY_CODES_FNAME = "CountryCodes.txt"
LANGUAGE_CODES_FNAME = "LanguageCodes.txt"
LANGUAGE_INDEX_FNAME = "LanguageIndex.txt"
CONVERT_2_TO_3_FNAME = "TwoToThreeCodes.txt"
OUTPUT_FNAME = "inputSystems_languages.js"
OUTPUT_KEY_ORDER = ['name', 'code', 'country', 'altNames']
# OUTPUT_PREFIX is the text to write *before* the JSON output
OUTPUT_PREFIX = """\
'use strict';
// THIS FILE IS AUTOMATICALLY GENERATED.
// Do not make changes to this file; they will be overwritten.
// input systems languages data
var _inputSystems_languages = """
# OUTPUT_SUFFIX is the text to write *after* the JSON output
OUTPUT_SUFFIX = ";\n"
def read_file(fname):
with codecs.open(fname, 'rU', 'utf-8-sig') as f:
result = f.read() # utf-8-sig means strip BOM from start of file, if present
return result
def read_all_files():
try:
data = {
"subtags": read_file(SUBTAG_REGISTRY_FNAME),
"ccs": read_file(COUNTRY_CODES_FNAME),
"lcs": read_file(LANGUAGE_CODES_FNAME),
"lndx": read_file(LANGUAGE_INDEX_FNAME),
"2to3": read_file(CONVERT_2_TO_3_FNAME),
}
except IOError:
return None
else:
return data
def parse_subtag_registry(raw_text):
"""Returns data as a dict of lists, keyed by record type:
result['language'] = (list of language records)
result['extlang'] = (list of extended language records)
result['script'] = (list of script records)
And so on. Valid keys for result dict will be language, extlang, script,
region, variant, grandfathered, redundant."""
result = collections.defaultdict(list)
records = raw_text.split(u"%%\n")
for record in records:
data = {}
if record.startswith(u"File-Date:"):
continue # First "record" of file is only file-date
record = record.replace(u"\n ", u" ") # Line continuations: newline plus two spaces
record = re.sub(u" +", u" ", record) # Multiple spaces are collapsed into one, per spec
for line in record.splitlines():
key, val = line.split(": ", 1)
if key == 'Description':
# Descriptions can, and often do, appear more than once per record
data.setdefault(key, []).append(val)
else:
data[key] = val
result[data[u'Type']].append(data)
return result
def parse_tab_separated_file(raw_text, first_line_contains_field_names=True):
"""Returns data as either:
- a list of dicts, if first_line_contains_field_names is True
- a list of lists, if first_line_contains_field_names is False
"""
result = []
lines = raw_text.splitlines()
if first_line_contains_field_names:
field_names = lines[0].split('\t')
lines = lines[1:]
for line in lines:
fields = [field.strip() for field in line.split('\t') if line.strip()]
if first_line_contains_field_names:
result.append(dict(zip(field_names, fields)))
else:
result.append(fields)
return result
def parse_all_files(data):
result = {}
result['subtags'] = parse_subtag_registry(data['subtags'])
result['ccs'] = parse_tab_separated_file(data['ccs'], True)
result['lcs'] = parse_tab_separated_file(data['lcs'], True)
result['lndx'] = parse_tab_separated_file(data['lndx'], True)
result['2to3'] = parse_tab_separated_file(data['2to3'], False)
# Build lookup tables
result['2to3_lookup'] = {record[0]: record[1] for record in result['2to3']}
result['3to2_lookup'] = {record[1]: record[0] for record in result['2to3']}
result['country_lookup'] = {record['CountryID']: record['Name'] for record in result['ccs']}
return result
def build_language_data(data):
result = collections.OrderedDict()
for language_record in data['lndx']:
langid3 = language_record[u'LangID']
langid = data['3to2_lookup'].get(langid3, langid3) # 2-letter code preferred, 3-letter code is fallback
record = result.get(langid, {})
if not record.has_key('code'):
record['code'] = {}
if len(langid) == 2:
record['code']['two'] = langid
record['code']['three'] = langid3
country = data['country_lookup'].get(language_record[u'CountryID'])
if country:
record.setdefault('country', set()).add(country)
name = language_record['Name']
if language_record['NameType'] == 'L':
record['name'] = name
else:
record.setdefault('altNames', set()).add(name)
if not result.has_key(langid):
result[langid] = record
return result
def build_regions_data(data):
result = collections.OrderedDict()
for record in data['ccs']:
result[record['CountryID']] = record['Name']
return result
def build_scripts_data(data):
result = collections.OrderedDict()
for record in data['subtags']['script']:
result[record['Subtag']] = record['Description']
return result
def write_json(final_result, out_fname, prefix, suffix, fix_records=False):
if fix_records:
records_for_output = []
for record in final_result.itervalues():
for key in ['country', 'altNames']:
if record.has_key(key):
record[key] = list(sorted(record[key]))
else:
record[key] = [] # Ensure country and altNames lists exist, even if they're empty
# Rearrange output record so keys will be in predictable order in JSON file
new_record = collections.OrderedDict()
for key in OUTPUT_KEY_ORDER:
new_record[key] = record[key]
records_for_output.append(new_record)
else:
records_for_output = final_result
with codecs.open(out_fname, 'wU', 'utf-8') as f:
f.write(prefix)
json.dump(records_for_output, f, ensure_ascii=False, indent=4, separators=(',', ': '))
f.write(suffix)
def main():
sys.stderr.write('Reading files...\n')
data = read_all_files()
if not data:
sys.stderr.write("Error reading input data files\n")
sys.exit(2)
sys.stderr.write('Parsing files...\n')
data = parse_all_files(data)
sys.stderr.write('Preparing JSON output...\n')
langdata = build_language_data(data)
write_json(langdata, OUTPUT_FNAME, OUTPUT_PREFIX, OUTPUT_SUFFIX, fix_records=True)
regdata = build_regions_data(data)
write_json(regdata,
OUTPUT_FNAME.replace('languages', 'regions'),
OUTPUT_PREFIX.replace('languages', 'regions'),
OUTPUT_SUFFIX.replace('languages', 'regions'))
scriptdata = build_scripts_data(data)
write_json(scriptdata,
OUTPUT_FNAME.replace('languages', 'scripts'),
OUTPUT_PREFIX.replace('languages', 'scripts'),
OUTPUT_SUFFIX.replace('languages', 'scripts'))
if __name__ == '__main__':
main()
| sil-jterm-2015/sfwebchecks | src/scripts/language picker/build-json-language-data.py | Python | mit | 8,241 |
#!/usr/bin/python
"""This script run the pathologic """
try:
import optparse, sys, re, csv, traceback
from optparse import OptionGroup
import pickle
import math
from libs.python_modules.taxonomy.LCAComputation import *
import operator
from os import path, _exit, remove, rename
import logging.handlers
from glob import glob
from libs.python_modules.utils.sysutil import pathDelim
from libs.python_modules.utils.metapathways_utils import fprintf, printf, eprintf, exit_process
from libs.python_modules.utils.sysutil import getstatusoutput
from libs.python_modules.utils.pathwaytoolsutils import *
except:
print """ Could not load some user defined module functions"""
print """ Make sure your typed 'source MetaPathwaysrc'"""
print """ """
print traceback.print_exc(10)
sys.exit(3)
PATHDELIM=pathDelim()
def fprintf(file, fmt, *args):
file.write(fmt % args)
def printf(fmt, *args):
sys.stdout.write(fmt % args)
def files_exist( files , errorlogger = None):
status = True
for file in files:
if not path.exists(file):
if errorlogger:
errorlogger.write( 'ERROR\tCould not find ptools input file : ' + file )
status = False
return not status
usage = sys.argv[0] + """ -s sample -p pgdb_dir --ptoolsExec pathwaytools_executable """
parser = None
def createParser():
global parser
epilog = """The flat file extraction script"""
epilog = re.sub(r'\s+', ' ', epilog)
parser = optparse.OptionParser(usage=usage, epilog = epilog)
standard_options_group = OptionGroup(parser, "Standard Ptools group" )
# Input options
standard_options_group.add_option('-s', '--sample', dest='sample_name', default=None,
help='sample name')
standard_options_group.add_option('-p', '--pgdb', dest='pgdbdir', default=None,
help='folder of the PGDB')
standard_options_group.add_option('--ptoolsExec', dest='ptoolsExec', default=None,
help='PathoLogic Executable')
standard_options_group.add_option("-o", "--output-pwy-table", dest="table_out",
help='the output table for the pathways [REQUIRED]')
import os, signal
TIME = 10
def __StopPathwayTools():
processPATT = re.compile(r'pathway-tools-runtime')
for line in os.popen("ps xa"):
fields = line.split()
pid = fields[0]
process = fields[4]
result = processPATT.search(process)
if result :
os.kill(int(pid), signal.SIGHUP)
def StopPathwayTools():
try:
__StopPathwayTools()
time.sleep(TIME)
__StopPathwayTools()
time.sleep(TIME)
if path.exists("/tmp/ptools-socket"):
remove("/tmp/ptools-socket")
except:
pass
def main(argv, errorlogger = None, runcommand = None, runstatslogger = None):
global parser
options, args = parser.parse_args(argv)
# is there a pathwaytools executable installed
if False and not path.exists(options.ptoolsExec):
eprintf("ERROR\tPathwayTools executable %s not found!\n", options.ptoolsExec)
if errorlogger:
errorlogger.printf("ERROR\tPathwayTools executable %s not found!\n", options.ptoolsExec)
exit_process("ERROR\tPathwayTools executable %s not found!\n" %(options.ptoolsExec))
# command to build the ePGDB
command = "%s " %(options.ptoolsExec)
command += " -api"
pythonCyc = startPathwayTools(options.sample_name.lower(), options.ptoolsExec, True)
#resultLines = pythonCyc.getReactionListLines()
resultLines = pythonCyc.getFlatFiles()
StopPathwayTools()
try:
if False:
pythonCyc = startPathwayTools(options.sample_name.lower(), options.ptoolsExec, True)
pythonCyc.setDebug() # disable pathway debug statements
printf("INFO\tExtracting the reaction list from ePGDB " + options.sample_name + "\n")
resultLines = pythonCyc.getReactionListLines()
#pythonCyc.stopPathwayTools()
reaction_list_file = open(options.reactions_list + ".tmp", 'w')
for line in resultLines:
fprintf(reaction_list_file,"%s\n",line.strip())
reaction_list_file.close()
StopPathwayTools()
except:
print traceback.print_exc(10)
eprintf("ERROR\tFailed to run extract pathways for %s : \n" %(options.sample_name))
eprintf("INFO\tKill any other PathwayTools instance running on the machine and try again")
if errorlogger:
errorlogger.write("ERROR\tFailed to run extract pathways for %s : " %(options.sample_name))
errorlogger.write("INFO\tKill any other PathwayTools instance running on the machine and try again\n")
StopPathwayTools()
def startPathwayTools(organism, ptoolsExec, debug):
StopPathwayTools()
pythonCyc = PythonCyc()
pythonCyc.setDebug(debug = debug)
pythonCyc.setOrganism(organism)
pythonCyc.setPToolsExec(ptoolsExec)
pythonCyc.startPathwayTools()
return pythonCyc
def runPathologicCommand(runcommand = None):
if runcommand == None:
return False
result = getstatusoutput(runcommand)
return result[0]
# this is the portion of the code that fixes the name
def split_attributes(str, attributes):
rawattributes = re.split(';', str)
for attribStr in rawattributes:
insert_attribute(attributes, attribStr)
return attributes
def fixLine(line, id):
fields = line.split('\t')
if len(fields)==2:
return fields[0]+'\t' + id
def getID(line):
fields = line.split('\t')
if len(fields)==2:
return fields[1]
def write_new_file(lines, output_file):
print "Fixing file " + output_file
try:
outputfile = open(output_file,'w')
pass
except IOError:
print "ERROR :Cannot open output file " + output_file
for line in lines:
fprintf(outputfile, "%s\n", line)
outputfile.close()
def cleanup(string):
"""
Cleans up pathway long-names for presentation.
:param string:
:return:
"""
string = re.sub("|", "", string) # vertical bar
string = re.sub("&", "", string) # ampersand
string = re.sub(";", "", string) # semicolon
string = re.sub("<[^<]+?>", '', string) # HTML tags
string = re.sub("\'", "", string) # remove quotes
return string
def get_preferred_taxa_name(taxa_id, megan_map, id_to_name):
"""
Helper function to format NCBI IDs into preferred names. First checks for MEGAN name,
if not found moves to current taxonomy in loaded NCBI taxonomy tree, failing that
gives the taxonomy of 'Unknown', but still provides the id, e.g., 'Unknown (12345)'.
:param taxa_id: numeric taxa id to translate
:param megan_map: preferred megan mapping hash
:param id_to_name: local ncbi tree hash
:return: "perferred name (id)"
"""
taxa_id = str(taxa_id)
if taxa_id in megan_map:
taxa = megan_map[ taxa_id ] + " (" + taxa_id + ")"
elif taxa_id in id_to_name:
taxa = id_to_name[ taxa_id ] + " (" + taxa_id + ")"
else:
taxa = "Unknown" + " (" + taxa_id + ")"
return taxa
def MetaPathways_run_pathologic(argv, extra_command = None, errorlogger = None, runstatslogger =None):
if errorlogger != None:
errorlogger.write("#STEP\tBUILD_PGDB\n")
createParser()
main(argv, errorlogger = errorlogger, runcommand= extra_command, runstatslogger = runstatslogger)
return (0,'')
if __name__ == '__main__':
createParser()
main(sys.argv[1:])
| kishori82/MetaPathways_Python.3.0 | utilities/extract_flat_files.py | Python | mit | 7,627 |
#!/usr/bin/env python
# Copyright(C) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import plyvel
import ast
import hashlib
import os
import sys
import threading
from processor import print_log, logger
from utils import bc_address_to_hash_160, hash_160_to_pubkey_address, Hash, \
bytes8_to_int, bytes4_to_int, int_to_bytes8, \
int_to_hex8, int_to_bytes4, int_to_hex4
"""
Patricia tree for hashing unspents
"""
# increase this when database needs to be updated
global GENESIS_HASH
GENESIS_HASH = '000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f'
DB_VERSION = 3
KEYLENGTH = 56 # 20 + 32 + 4
class Node(object):
def __init__(self, s):
self.k = int(s[0:32].encode('hex'), 16)
self.s = s[32:]
if self.k==0 and self.s:
print "init error", len(self.s), "0x%0.64X" % self.k
raise BaseException("z")
def serialized(self):
k = "0x%0.64X" % self.k
k = k[2:].decode('hex')
assert len(k) == 32
return k + self.s
def has(self, c):
return (self.k & (1<<(ord(c)))) != 0
def is_singleton(self, key):
assert self.s != ''
return len(self.s) == 40
def get_singleton(self):
for i in xrange(256):
if self.k == (1<<i):
return chr(i)
raise BaseException("get_singleton")
def indexof(self, c):
assert self.k != 0 or self.s == ''
x = 0
for i in xrange(ord(c)):
if (self.k & (1<<i)) != 0:
x += 40
return x
def get(self, c):
x = self.indexof(c)
ss = self.s[x:x+40]
_hash = ss[0:32]
value = bytes8_to_int(ss[32:40])
return _hash, value
def set(self, c, h, value):
if h is None:
h = chr(0)*32
vv = int_to_bytes8(value)
item = h + vv
assert len(item) == 40
if self.has(c):
self.remove(c)
x = self.indexof(c)
self.s = self.s[0:x] + item + self.s[x:]
self.k |= (1<<ord(c))
assert self.k != 0
def remove(self, c):
x = self.indexof(c)
self.k &= ~(1<<ord(c))
self.s = self.s[0:x] + self.s[x+40:]
def get_hash(self, x, parent):
if x:
assert self.k != 0
skip_string = x[len(parent)+1:] if x != '' else ''
x = 0
v = 0
hh = ''
for i in xrange(256):
if (self.k&(1<<i)) != 0:
ss = self.s[x:x+40]
hh += ss[0:32]
v += bytes8_to_int(ss[32:40])
x += 40
try:
_hash = Hash(skip_string + hh)
except:
_hash = None
if x:
assert self.k != 0
return _hash, v
@classmethod
def from_dict(klass, d):
k = 0
s = ''
for i in xrange(256):
if chr(i) in d:
k += 1<<i
h, value = d[chr(i)]
if h is None: h = chr(0)*32
vv = int_to_bytes8(value)
item = h + vv
assert len(item) == 40
s += item
k = "0x%0.64X" % k # 32 bytes
k = k[2:].decode('hex')
assert len(k) == 32
out = k + s
return Node(out)
class DB(object):
def __init__(self, path, name, cache_size):
self.db = plyvel.DB(os.path.join(path, name), create_if_missing=True, compression=None, lru_cache_size=cache_size)
self.batch = self.db.write_batch()
self.cache = {}
self.lock = threading.Lock()
def put(self, key, s):
self.batch.put(key, s)
self.cache[key] = s
def get(self, key):
s = self.cache.get(key)
if s == 'deleted':
return None
if s is None:
with self.lock:
s = self.db.get(key)
return s
def delete(self, key):
self.batch.delete(key)
self.cache[key] = 'deleted'
def close(self):
self.db.close()
def write(self):
with self.lock:
self.batch.write()
self.batch.clear()
self.cache.clear()
def get_next(self, key):
with self.lock:
i = self.db.iterator(start=key)
k, _ = i.next()
return k
class Storage(object):
def __init__(self, config, shared, test_reorgs):
self.shared = shared
self.hash_list = {}
self.parents = {}
self.skip_batch = {}
self.test_reorgs = test_reorgs
# init path
self.dbpath = config.get('leveldb', 'path')
if not os.path.exists(self.dbpath):
os.mkdir(self.dbpath)
try:
self.db_utxo = DB(self.dbpath, 'utxo', config.getint('leveldb', 'utxo_cache'))
self.db_hist = DB(self.dbpath, 'hist', config.getint('leveldb', 'hist_cache'))
self.db_addr = DB(self.dbpath, 'addr', config.getint('leveldb', 'addr_cache'))
self.db_undo = DB(self.dbpath, 'undo', None)
except:
logger.error('db init', exc_info=True)
self.shared.stop()
try:
self.last_hash, self.height, db_version = ast.literal_eval(self.db_undo.get('height'))
except:
print_log('Initializing database')
self.height = 0
self.last_hash = GENESIS_HASH
self.pruning_limit = config.getint('leveldb', 'pruning_limit')
db_version = DB_VERSION
self.put_node('', Node.from_dict({}))
# check version
if db_version != DB_VERSION:
print_log("Your database '%s' is deprecated. Please create a new database"%self.dbpath)
self.shared.stop()
return
# pruning limit
try:
self.pruning_limit = ast.literal_eval(self.db_undo.get('limit'))
except:
self.pruning_limit = config.getint('leveldb', 'pruning_limit')
self.db_undo.put('version', repr(self.pruning_limit))
# compute root hash
root_node = self.get_node('')
self.root_hash, coins = root_node.get_hash('', None)
# print stuff
print_log("Database version %d."%db_version)
print_log("Pruning limit for spent outputs is %d."%self.pruning_limit)
print_log("Blockchain height", self.height)
print_log("UTXO tree root hash:", self.root_hash.encode('hex'))
print_log("Coins in database:", coins)
# convert between bitcoin addresses and 20 bytes keys used for storage.
@staticmethod
def address_to_key(addr):
return bc_address_to_hash_160(addr)
def get_skip(self, key):
o = self.skip_batch.get(key)
if o is not None:
return o
k = self.db_utxo.get_next(key)
assert k.startswith(key)
return k[len(key):]
def set_skip(self, key, skip):
self.skip_batch[key] = skip
def get_proof(self, addr):
key = self.address_to_key(addr)
k = self.db_utxo.get_next(key)
p = self.get_path(k)
p.append(k)
out = []
for item in p:
v = self.db_utxo.get(item)
out.append((item.encode('hex'), v.encode('hex')))
return out
def get_balance(self, addr):
key = self.address_to_key(addr)
k = self.db_utxo.get_next(key)
if not k.startswith(key):
return 0
p = self.get_parent(k)
d = self.get_node(p)
letter = k[len(p)]
return d.get(letter)[1]
def listunspent(self, addr):
key = self.address_to_key(addr)
if key is None:
raise BaseException('Invalid Bitcoin address', addr)
out = []
with self.db_utxo.lock:
for k, v in self.db_utxo.db.iterator(start=key):
if not k.startswith(key):
break
if len(k) == KEYLENGTH:
txid = k[20:52].encode('hex')
txpos = bytes4_to_int(k[52:56])
h = bytes4_to_int(v[8:12])
v = bytes8_to_int(v[0:8])
out.append({'tx_hash': txid, 'tx_pos':txpos, 'height': h, 'value':v})
if len(out) == 1000:
print_log('max utxo reached', addr)
break
out.sort(key=lambda x:x['height'])
return out
def get_history(self, addr):
out = []
o = self.listunspent(addr)
for item in o:
out.append((item['height'], item['tx_hash']))
h = self.db_hist.get(addr)
while h:
item = h[0:80]
h = h[80:]
txi = item[0:32].encode('hex')
hi = bytes4_to_int(item[36:40])
txo = item[40:72].encode('hex')
ho = bytes4_to_int(item[76:80])
out.append((hi, txi))
out.append((ho, txo))
# uniqueness
out = set(out)
# sort by height then tx_hash
out = sorted(out)
return map(lambda x: {'height':x[0], 'tx_hash':x[1]}, out)
def get_address(self, txi):
return self.db_addr.get(txi)
def get_undo_info(self, height):
s = self.db_undo.get("undo_info_%d" % (height % 100))
if s is None:
print_log("no undo info for ", height)
return eval(s)
def write_undo_info(self, height, bitcoind_height, undo_info):
if height > bitcoind_height - 100 or self.test_reorgs:
self.db_undo.put("undo_info_%d" % (height % 100), repr(undo_info))
@staticmethod
def common_prefix(word1, word2):
max_len = min(len(word1),len(word2))
for i in xrange(max_len):
if word2[i] != word1[i]:
index = i
break
else:
index = max_len
return word1[0:index]
def put_node(self, key, node):
self.db_utxo.put(key, node.serialized())
def get_node(self, key):
s = self.db_utxo.get(key)
if s is None:
return
return Node(s)
def add_key(self, target, value, height):
assert len(target) == KEYLENGTH
path = self.get_path(target, new=True)
if path is True:
return
#print "add key: target", target.encode('hex'), "path", map(lambda x: x.encode('hex'), path)
parent = path[-1]
parent_node = self.get_node(parent)
n = len(parent)
c = target[n]
if parent_node.has(c):
h, v = parent_node.get(c)
skip = self.get_skip(parent + c)
child = parent + c + skip
assert not target.startswith(child)
prefix = self.common_prefix(child, target)
index = len(prefix)
if len(child) == KEYLENGTH:
# if it's a leaf, get hash and value of new_key from parent
d = Node.from_dict({
target[index]: (None, 0),
child[index]: (h, v)
})
else:
# if it is not a leaf, update its hash because skip_string changed
child_node = self.get_node(child)
h, v = child_node.get_hash(child, prefix)
d = Node.from_dict({
target[index]: (None, 0),
child[index]: (h, v)
})
self.set_skip(prefix + target[index], target[index+1:])
self.set_skip(prefix + child[index], child[index+1:])
self.put_node(prefix, d)
path.append(prefix)
self.parents[child] = prefix
# update parent skip
new_skip = prefix[n+1:]
self.set_skip(parent+c, new_skip)
parent_node.set(c, None, 0)
self.put_node(parent, parent_node)
else:
# add new letter to parent
skip = target[n+1:]
self.set_skip(parent+c, skip)
parent_node.set(c, None, 0)
self.put_node(parent, parent_node)
# write the new leaf
s = (int_to_hex8(value) + int_to_hex4(height)).decode('hex')
self.db_utxo.put(target, s)
# the hash of a leaf is the txid
_hash = target[20:52]
self.update_node_hash(target, path, _hash, value)
def update_node_hash(self, node, path, _hash, value):
c = node
for x in path[::-1]:
self.parents[c] = x
c = x
self.hash_list[node] = (_hash, value)
def update_hashes(self):
nodes = {} # nodes to write
for i in xrange(KEYLENGTH, -1, -1):
for node in self.hash_list.keys():
if len(node) != i:
continue
node_hash, node_value = self.hash_list.pop(node)
parent = self.parents[node] if node!='' else ''
if i != KEYLENGTH and node_hash is None:
n = self.get_node(node)
node_hash, node_value = n.get_hash(node, parent)
assert node_hash is not None
if node == '':
self.root_hash = node_hash
self.root_value = node_value
assert self.root_hash is not None
break
# read parent
d = nodes.get(parent)
if d is None:
d = self.get_node(parent)
assert d is not None
# write value into parent
letter = node[len(parent)]
d.set(letter, node_hash, node_value)
nodes[parent] = d
# iterate
grandparent = self.parents[parent] if parent != '' else None
parent_hash, parent_value = d.get_hash(parent, grandparent)
if parent_hash is not None:
self.hash_list[parent] = (parent_hash, parent_value)
for k, v in nodes.iteritems():
self.put_node(k, v)
# cleanup
assert self.hash_list == {}
self.parents = {}
self.skip_batch = {}
def get_path(self, target, new=False):
x = self.db_utxo.get(target)
if not new and x is None:
raise BaseException('key not in tree', target.encode('hex'))
if new and x is not None:
# raise BaseException('key already in tree', target.encode('hex'))
# occurs at block 91880 (duplicate txid)
print_log('key already in tree', target.encode('hex'))
return True
remaining = target
key = ''
path = []
while key != target:
node = self.get_node(key)
if node is None:
break
#raise # should never happen
path.append(key)
c = remaining[0]
if not node.has(c):
break
skip = self.get_skip(key + c)
key = key + c + skip
if not target.startswith(key):
break
remaining = target[len(key):]
return path
def delete_key(self, leaf):
path = self.get_path(leaf)
#print "delete key", leaf.encode('hex'), map(lambda x: x.encode('hex'), path)
s = self.db_utxo.get(leaf)
self.db_utxo.delete(leaf)
if leaf in self.hash_list:
del self.hash_list[leaf]
parent = path[-1]
letter = leaf[len(parent)]
parent_node = self.get_node(parent)
parent_node.remove(letter)
# remove key if it has a single child
if parent_node.is_singleton(parent):
#print "deleting parent", parent.encode('hex')
self.db_utxo.delete(parent)
if parent in self.hash_list:
del self.hash_list[parent]
l = parent_node.get_singleton()
_hash, value = parent_node.get(l)
skip = self.get_skip(parent + l)
otherleaf = parent + l + skip
# update skip value in grand-parent
gp = path[-2]
gp_items = self.get_node(gp)
letter = otherleaf[len(gp)]
new_skip = otherleaf[len(gp)+1:]
gp_items.set(letter, None, 0)
self.set_skip(gp+ letter, new_skip)
#print "gp new_skip", gp.encode('hex'), new_skip.encode('hex')
self.put_node(gp, gp_items)
# note: k is not necessarily a leaf
if len(otherleaf) == KEYLENGTH:
ss = self.db_utxo.get(otherleaf)
_hash, value = otherleaf[20:52], bytes8_to_int(ss[0:8])
else:
_hash, value = None, None
self.update_node_hash(otherleaf, path[:-1], _hash, value)
else:
self.put_node(parent, parent_node)
_hash, value = None, None
self.update_node_hash(parent, path[:-1], _hash, value)
return s
def get_parent(self, x):
p = self.get_path(x)
return p[-1]
def get_root_hash(self):
return self.root_hash if self.root_hash else ''
def batch_write(self):
for db in [self.db_utxo, self.db_addr, self.db_hist, self.db_undo]:
db.write()
def close(self):
for db in [self.db_utxo, self.db_addr, self.db_hist, self.db_undo]:
db.close()
def save_height(self, block_hash, block_height):
self.db_undo.put('height', repr((block_hash, block_height, DB_VERSION)))
def add_to_history(self, addr, tx_hash, tx_pos, value, tx_height):
key = self.address_to_key(addr)
txo = (tx_hash + int_to_hex4(tx_pos)).decode('hex')
# write the new history
self.add_key(key + txo, value, tx_height)
# backlink
self.db_addr.put(txo, addr)
def revert_add_to_history(self, addr, tx_hash, tx_pos, value, tx_height):
key = self.address_to_key(addr)
txo = (tx_hash + int_to_hex4(tx_pos)).decode('hex')
# delete
self.delete_key(key + txo)
# backlink
self.db_addr.delete(txo)
def get_utxo_value(self, addr, txi):
key = self.address_to_key(addr)
leaf = key + txi
s = self.db_utxo.get(leaf)
value = bytes8_to_int(s[0:8])
return value
def set_spent(self, addr, txi, txid, index, height, undo):
key = self.address_to_key(addr)
leaf = key + txi
s = self.delete_key(leaf)
value = bytes8_to_int(s[0:8])
in_height = bytes4_to_int(s[8:12])
undo[leaf] = value, in_height
# delete backlink txi-> addr
self.db_addr.delete(txi)
# add to history
s = self.db_hist.get(addr)
if s is None: s = ''
txo = (txid + int_to_hex4(index) + int_to_hex4(height)).decode('hex')
s += txi + int_to_bytes4(in_height) + txo
s = s[ -80*self.pruning_limit:]
self.db_hist.put(addr, s)
def revert_set_spent(self, addr, txi, undo):
key = self.address_to_key(addr)
leaf = key + txi
# restore backlink
self.db_addr.put(txi, addr)
v, height = undo.pop(leaf)
self.add_key(leaf, v, height)
# revert add to history
s = self.db_hist.get(addr)
# s might be empty if pruning limit was reached
if not s:
return
assert s[-80:-44] == txi
s = s[:-80]
self.db_hist.put(addr, s)
def import_transaction(self, txid, tx, block_height, touched_addr):
undo = { 'prev_addr':[] } # contains the list of pruned items for each address in the tx; also, 'prev_addr' is a list of prev addresses
prev_addr = []
for i, x in enumerate(tx.get('inputs')):
txi = (x.get('prevout_hash') + int_to_hex4(x.get('prevout_n'))).decode('hex')
addr = self.get_address(txi)
if addr is not None:
self.set_spent(addr, txi, txid, i, block_height, undo)
touched_addr.add(addr)
prev_addr.append(addr)
undo['prev_addr'] = prev_addr
# here I add only the outputs to history; maybe I want to add inputs too (that's in the other loop)
for x in tx.get('outputs'):
addr = x.get('address')
if addr is None: continue
self.add_to_history(addr, txid, x.get('index'), x.get('value'), block_height)
touched_addr.add(addr)
return undo
def revert_transaction(self, txid, tx, block_height, touched_addr, undo):
#print_log("revert tx", txid)
for x in reversed(tx.get('outputs')):
addr = x.get('address')
if addr is None: continue
self.revert_add_to_history(addr, txid, x.get('index'), x.get('value'), block_height)
touched_addr.add(addr)
prev_addr = undo.pop('prev_addr')
for i, x in reversed(list(enumerate(tx.get('inputs')))):
addr = prev_addr[i]
if addr is not None:
txi = (x.get('prevout_hash') + int_to_hex4(x.get('prevout_n'))).decode('hex')
self.revert_set_spent(addr, txi, undo)
touched_addr.add(addr)
assert undo == {}
| bauerj/electrum-server | src/storage.py | Python | mit | 22,202 |
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
class WeibopError(Exception):
"""Weibopy exception"""
def __init__(self, reason):
self.reason = reason.encode('utf-8')
def __str__(self):
return self.reason
| sunner/buzz2weibo | weibopy/error.py | Python | mit | 256 |
# ~autogen spec_version
spec_version = "spec: 0.9.3-pre-r2, kernel: v3.16.7-ckt16-7-ev3dev-ev3"
# ~autogen
| ddemidov/ev3dev-lang-python-1 | spec_version.py | Python | mit | 108 |
"""
===========
N2D+ fitter
===========
Reference for line params:
Dore (priv. comm.) line frequencies in CDMS,
line strength can also be obtained from Splatalogue
L. Dore, P. Caselli, S. Beninati, T. Bourke, P. C. Myers and G. Cazzoli A&A 413, 1177-1181 (2004)
http://adsabs.harvard.edu/abs/2004A%26A...413.1177D
L. Pagani, F. Daniel, and M. L. Dubernet A\%A 494, 719-727 (2009)
DOI: 10.1051/0004-6361:200810570
"""
from . import hyperfine
import astropy.units as u
# line_names = ['J1-0', 'J2-1', 'J3-2',]
# line_names = ['J2-1', 'J3-2',]
freq_dict_cen ={
# 'J1-0': 77109.2697e6,
'J2-1': 154217.1805e6,
'J3-2': 231321.9119e6,
}
voff_lines_dict={
####### J 2-1
'J2-1_01': -5.6031,
'J2-1_02': -5.5332,
'J2-1_03': -5.3617,
'J2-1_04': -5.0993,
'J2-1_05': -4.9677,
'J2-1_06': -4.7052,
'J2-1_07': -3.8195,
'J2-1_08': -3.5571,
'J2-1_09': -2.8342,
'J2-1_10': -2.3388,
'J2-1_11': -1.9449,
'J2-1_12': -1.9002,
'J2-1_13': -1.7733,
'J2-1_14': -1.3965,
'J2-1_15': -1.0025,
'J2-1_16': -0.7968,
'J2-1_17': -0.5740,
'J2-1_18': -0.2311,
'J2-1_19': -0.0085,
'J2-1_20': 0.0000,
'J2-1_21': 0.1351,
'J2-1_22': 0.1457,
'J2-1_23': 0.1886,
'J2-1_24': 0.2538,
'J2-1_25': 0.6165,
'J2-1_26': 0.7541,
'J2-1_27': 0.8789,
'J2-1_28': 2.5594,
'J2-1_29': 3.0143,
'J2-1_30': 3.0632,
'J2-1_31': 3.1579,
'J2-1_32': 3.4572,
'J2-1_33': 3.6394,
'J2-1_34': 3.7234,
'J2-1_35': 3.9567,
'J2-1_36': 4.2049,
'J2-1_37': 4.5817,
'J2-1_38': 4.6054,
'J2-1_39': 8.4164,
'J2-1_40': 9.0414,
####### J 3-2
'J3-2_01': -3.7164,
'J3-2_02': -3.5339,
'J3-2_03': -3.2997,
'J3-2_04': -3.2130,
'J3-2_05': -3.0633,
'J3-2_06': -2.8958,
'J3-2_07': -2.7424,
'J3-2_08': -2.6466,
'J3-2_09': -2.5748,
'J3-2_10': -1.9177,
'J3-2_11': -1.2333,
'J3-2_02': -0.7628,
'J3-2_13': -0.7590,
'J3-2_14': -0.7306,
'J3-2_15': -0.5953,
'J3-2_16': -0.5765,
'J3-2_17': -0.3419,
'J3-2_18': -0.0925,
'J3-2_19': -0.0210,
'J3-2_20': 0.0000,
'J3-2_21': 0.0065,
'J3-2_22': 0.0616,
'J3-2_23': 0.0618,
'J3-2_24': 0.0675,
'J3-2_25': 0.0748,
'J3-2_26': 0.2212,
'J3-2_27': 0.2691,
'J3-2_28': 0.4515,
'J3-2_29': 0.5422,
'J3-2_30': 0.5647,
'J3-2_31': 0.6050,
'J3-2_32': 0.6596,
'J3-2_33': 0.9222,
'J3-2_34': 1.0897,
'J3-2_35': 1.9586,
'J3-2_36': 2.0471,
'J3-2_37': 2.5218,
'J3-2_38': 2.5500,
'J3-2_39': 2.6156,
'J3-2_40': 3.0245,
'J3-2_41': 3.1786,
'J3-2_42': 3.3810,
'J3-2_43': 3.6436,
'J3-2_44': 4.2066,
}
line_strength_dict = {
####### J 2-1
'J2-1_01': 0.008262,
'J2-1_02': 0.005907,
'J2-1_03': 0.031334,
'J2-1_04': 0.013833,
'J2-1_05': 0.013341,
'J2-1_06': 0.010384,
'J2-1_07': 0.000213,
'J2-1_08': 0.000675,
'J2-1_09': 0.000150,
'J2-1_10': 0.001202,
'J2-1_11': 0.000963,
'J2-1_12': 0.000878,
'J2-1_13': 0.002533,
'J2-1_14': 0.000362,
'J2-1_15': 0.000162,
'J2-1_16': 0.021268,
'J2-1_17': 0.031130,
'J2-1_18': 0.000578,
'J2-1_19': 0.001008,
'J2-1_20': 0.200000,
'J2-1_21': 0.111666,
'J2-1_22': 0.088138,
'J2-1_23': 0.142511,
'J2-1_24': 0.011550,
'J2-1_25': 0.027472,
'J2-1_26': 0.012894,
'J2-1_27': 0.066406,
'J2-1_28': 0.013082,
'J2-1_29': 0.003207,
'J2-1_30': 0.061847,
'J2-1_31': 0.004932,
'J2-1_32': 0.035910,
'J2-1_33': 0.011102,
'J2-1_34': 0.038958,
'J2-1_35': 0.019743,
'J2-1_36': 0.004297,
'J2-1_37': 0.001830,
'J2-1_38': 0.000240,
'J2-1_39': 0.000029,
'J2-1_40': 0.000004,
####### J 3-2
'J3-2_01': 0.001842,
'J3-2_02': 0.001819,
'J3-2_03': 0.003544,
'J3-2_04': 0.014100,
'J3-2_05': 0.011404,
'J3-2_06': 0.000088,
'J3-2_07': 0.002201,
'J3-2_08': 0.002153,
'J3-2_09': 0.000059,
'J3-2_10': 0.000058,
'J3-2_11': 0.000203,
'J3-2_12': 0.000259,
'J3-2_13': 0.000248,
'J3-2_14': 0.000437,
'J3-2_15': 0.010215,
'J3-2_16': 0.000073,
'J3-2_17': 0.007445,
'J3-2_18': 0.000155,
'J3-2_19': 0.000272,
'J3-2_20': 0.174603,
'J3-2_21': 0.018678,
'J3-2_22': 0.100524,
'J3-2_23': 0.135563,
'J3-2_24': 0.124910,
'J3-2_25': 0.060970,
'J3-2_26': 0.088513,
'J3-2_27': 0.001085,
'J3-2_28': 0.094480,
'J3-2_29': 0.013955,
'J3-2_30': 0.007236,
'J3-2_31': 0.022222,
'J3-2_32': 0.047921,
'J3-2_33': 0.015427,
'J3-2_34': 0.000070,
'J3-2_35': 0.000796,
'J3-2_36': 0.001373,
'J3-2_37': 0.007147,
'J3-2_38': 0.016574,
'J3-2_39': 0.009776,
'J3-2_40': 0.000995,
'J3-2_41': 0.000491,
'J3-2_42': 0.000067,
'J3-2_43': 0.000039,
'J3-2_44': 0.000010,
}
# freq_dict = {
# 'J2-1': (voff_lines_dict['J2-1']*u.km/u.s).to(u.GHz, equivalencies=u.doppler_radio(freq_dict_cen['J2-1']*u.Hz)).value,
# 'J3-2': (voff_lines_dict['J3-2']*u.km/u.s).to(u.GHz, equivalencies=u.doppler_radio(freq_dict_cen['J3-2']*u.Hz)).value,
# }
# Get frequency dictionary in Hz based on the offset velocity and rest frequency
conv_J21=u.doppler_radio(freq_dict_cen['J2-1']*u.Hz)
conv_J32=u.doppler_radio(freq_dict_cen['J3-2']*u.Hz)
freq_dict = {
name: ((voff_lines_dict[name]*u.km/u.s).to(u.Hz, equivalencies=conv_J21).value) for name in voff_lines_dict.keys() if "J2-1" in name
}
freq_dict.update({
name: ((voff_lines_dict[name]*u.km/u.s).to(u.Hz, equivalencies=conv_J32).value) for name in voff_lines_dict.keys() if "J3-2" in name
})
# I don't know yet how to use this parameter... in CLASS it does not exist
# Note to Jaime: this is the sum of the degeneracy values for all hyperfines
# for a given line; it gives the relative weights between the J=2-1 and J=3-2
# lines, for example (the hyperfine weights are treated as normalized within
# one rotational transition)
w21 = sum(val for name,val in line_strength_dict.items() if 'J2-1' in name)
w32 = sum(val for name,val in line_strength_dict.items() if 'J3-2' in name)
relative_strength_total_degeneracy = {
name : w21 for name in line_strength_dict.keys() if "J2-1" in name
}
relative_strength_total_degeneracy.update({
name : w32 for name in line_strength_dict.keys() if "J3-2" in name
})
# Get the list of line names from the previous lists
line_names = [name for name in voff_lines_dict.keys()]
# 'J2-1': np.array([1]*len(voff_lines_dict['J2-1'])),
# 'J3-2': np.array([1]*len(voff_lines_dict['J3-2'])),
# }
# aval_dict = {
# # 'J1-0': 10**(-4.90770),
# 'J2-1': 10**(-3.92220),
# 'J3-2': 10**(-3.35866),
# }
n2dp_vtau = hyperfine.hyperfinemodel(line_names, voff_lines_dict, freq_dict,
line_strength_dict,
relative_strength_total_degeneracy)
n2dp_vtau_fitter = n2dp_vtau.fitter
n2dp_vtau_vheight_fitter = n2dp_vtau.vheight_fitter
n2dp_vtau_tbg_fitter = n2dp_vtau.background_fitter
| vlas-sokolov/pyspeckit | pyspeckit/spectrum/models/n2dp.py | Python | mit | 7,091 |
# coding: utf-8
from smarthumb import SMARTHUMB
from gluon.contrib.imageutils import RESIZE
# Noticias
db.noticias.titulo.requires = [
IS_NOT_EMPTY(error_message=T('Este campo não pode ficar vazio!')),
IS_NOT_IN_DB(db, db.noticias.titulo,
error_message=T('Título deve ser único.')),
IS_LENGTH(128, error_message=T('Tamanho máximo de 128 caracteres.'))
]
db.noticias.resumo.requires = [
IS_NOT_EMPTY(
error_message=T('Este campo não pode ficar vazio!')),
IS_LENGTH(128, error_message=T('Tamanho máximo de 128 caracteres.'))
]
db.noticias.conteudo.requires = [
IS_NOT_EMPTY(
error_message=T('Este campo não pode ficar vazio!')),
IS_LENGTH(5000, error_message=T('Tamanho máximo de 5000 caracteres.'))
]
db.noticias.permalink.compute = lambda registro: IS_SLUG()(registro.titulo)[0]
db.noticias.foto.requires = [
IS_EMPTY_OR(IS_IMAGE(
error_message=T('Arquivo enviado deve ser uma imagem.'))),
IS_LENGTH(100 * 1024, # 100kb
error_message=T('Arquivo muito grande!'
'Tamanho máximo permitido é 100kb'))
]
db.noticias.thumbnail.compute = lambda registro: SMARTHUMB(registro.foto,
(200, 200))
db.noticias.status.requires = IS_IN_SET(
['publicado', 'não publicado'],
error_message=T('Por favor selecione uma das opções')
)
# Membros
db.membros.nome.requires = [
IS_NOT_EMPTY(error_message=T('Este campo não pode ficar vazio!')),
IS_NOT_IN_DB(db, db.membros.nome,
error_message=T('Nome deve ser único.')),
IS_LENGTH(64, error_message=T('Tamanho máximo de 64 caracteres.'))
]
db.membros.foto.requires = [
IS_EMPTY_OR(
IS_IMAGE(error_message=T('Arquivo enviado deve ser uma imagem.'))
),
IS_LENGTH(100 * 1024, # 100kb
error_message=T('Arquivo muito grande!'
'Tamanho máximo permitido é 100kb')),
IS_EMPTY_OR(RESIZE(200, 200))
]
db.membros.email.requires = IS_EMAIL(error_message=T("Entre um email válido"))
# Eventos
db.eventos.nome.requires = [
IS_NOT_EMPTY(error_message=T('Este campo não pode ficar vazio!')),
IS_LENGTH(128, error_message=T('Tamanho máximo de 128 caracteres.'))
]
db.eventos.endereco.requires = [
IS_NOT_EMPTY(error_message=T('Este campo não pode ficar vazio!')),
IS_LENGTH(128, error_message=T('Tamanho máximo de 128 caracteres.'))
]
db.eventos.descricao.requires = [
IS_NOT_EMPTY(error_message=T('Este campo não pode ficar vazio!')),
IS_LENGTH(256, error_message=T('Tamanho máximo de 256 caracteres.'))
]
db.eventos.banner.requires = [
IS_EMPTY_OR(IS_IMAGE(
error_message=T('Arquivo enviado deve ser uma imagem.'))),
IS_LENGTH(100 * 1024, # 100kb
error_message=T('Arquivo muito grande!'
'Tamanho máximo permitido é 100kb'))
]
db.eventos.banner_thumb.compute = lambda registro: SMARTHUMB(registro.foto,
(200, 200))
# Apoiadores
db.apoiadores.nome.requires = [
IS_NOT_EMPTY(error_message=T('Este campo não pode ficar vazio!')),
IS_LENGTH(64, error_message=T('Tamanho máximo de 64 caracteres.'))
]
db.apoiadores.tipo.requires = IS_IN_SET(
['apoiador', 'patrocinador', 'parceiro'],
error_message=T('Por favor selecione uma das opções')
)
db.apoiadores.logo.requires = [
IS_EMPTY_OR(
IS_IMAGE(error_message=T('Arquivo enviado deve ser uma imagem.'))
),
IS_LENGTH(100 * 1024, # 100kb
error_message=T('Arquivo muito grande!'
'Tamanho máximo permitido é 100kb'))
]
db.apoiadores.logo_thumb.compute = lambda registro: SMARTHUMB(registro.logo,
(200, 200))
db.apoiadores.url.requires = [
IS_NOT_EMPTY(error_message=T('Este campo não pode ficar vazio!')),
IS_LENGTH(256, error_message=T('Tamanho máximo de 256 caracteres.')),
IS_URL()
]
# Produtos
db.produtos.nome.requires = [
IS_NOT_EMPTY(
error_message=T('Este campo não pode ficar vazio!')),
IS_LENGTH(64, error_message=T('Tamanho máximo de 64 caracteres.'))
]
db.produtos.descricao.requires = [
IS_NOT_EMPTY(
error_message=T('Este campo não pode ficar vazio!')),
IS_LENGTH(128, error_message=T('Tamanho máximo de 128 caracteres.'))
]
db.produtos.foto.requires = [
IS_EMPTY_OR(
IS_IMAGE(error_message=T('Arquivo enviado deve ser uma imagem.'))
),
IS_LENGTH(100 * 1024, # 100kb
error_message=T('Arquivo muito grande!'
'Tamanho máximo permitido é 100kb'))
]
db.produtos.thumb.compute = lambda registro: SMARTHUMB(registro.foto,
(200, 200))
db.produtos.preco.requires = IS_EMPTY_OR(IS_FLOAT_IN_RANGE(
minimum=0.1,
dot=',',
error_message=T('Valor inválido para preço. '
'Quando especificado deve ser maior do que 0'
' e no formato 2,50.')
))
# Carousel
db.carousel.nome_aba.requires = [
IS_NOT_EMPTY(
error_message=T('Este campo não pode ficar vazio!')),
IS_LENGTH(16, error_message=T('Tamanho máximo de 16 caracteres.'))
]
db.carousel.descricao_aba.requires = [
IS_NOT_EMPTY(
error_message=T('Este campo não pode ficar vazio!')),
IS_LENGTH(24, error_message=T('Tamanho máximo de 24 caracteres.'))
]
db.carousel.titulo.requires = [
IS_NOT_EMPTY(
error_message=T('Este campo não pode ficar vazio!')),
IS_LENGTH(16, error_message=T('Tamanho máximo de 16 caracteres.'))
]
db.carousel.descricao.requires = [
IS_NOT_EMPTY(
error_message=T('Este campo não pode ficar vazio!')),
IS_LENGTH(256, error_message=T('Tamanho máximo de 256 caracteres.'))
]
db.carousel.imagem.requires = [
IS_EMPTY_OR(
IS_IMAGE(error_message=T('Arquivo enviado deve ser uma imagem.'))
),
IS_LENGTH(100 * 1024, # 100kb
error_message=T('Arquivo muito grande!'
'Tamanho máximo permitido é 100kb')),
IS_EMPTY_OR(RESIZE(1200, 400))
]
db.carousel.url.requires = [
IS_NOT_EMPTY(
error_message=T('Este campo não pode ficar vazio!')),
IS_LENGTH(256, error_message=T('Tamanho máximo de 256 caracteres.')),
IS_URL()
]
db.carousel.status.requires = IS_IN_SET(
['ativo', 'inativo'],
error_message=T('Por favor selecione uma das opções')
)
| pvsousalima/marolo | models/20_validators.py | Python | mit | 6,559 |
"""
Django settings for tests project.
"""
from __future__ import unicode_literals
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=)c(th7-3@w*n9mf9_b+2qg685lc6qgfars@yu1g516xu5&is)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.redirects',
'django.contrib.admin',
'django.contrib.flatpages',
'djangoseo',
'userapp',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or
# "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
'django.core.context_processors.request',
)
ROOT_URLCONF = 'tests.urls'
# WSGI_APPLICATION = 'tests.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.db',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
SITE_ID = 1
CACHE_BACKEND = 'dummy://'
# Enable when testing cache
# CACHE_BACKEND = "locmem://?timeout=30&max_entries=400"
SEO_MODELS = ('userapp',)
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
| romansalin/django-seo | tests/settings.py | Python | mit | 2,490 |
#-- GAUDI jobOptions generated on Fri Jul 17 16:39:48 2015
#-- Contains event types :
#-- 11104124 - 106 files - 1087377 events - 233.68 GBytes
#-- Extra information about the data processing phases:
#-- Processing Pass Step-124620
#-- StepId : 124620
#-- StepName : Digi13 with G4 dE/dx
#-- ApplicationName : Boole
#-- ApplicationVersion : v26r3
#-- OptionFiles : $APPCONFIGOPTS/Boole/Default.py;$APPCONFIGOPTS/Boole/DataType-2012.py;$APPCONFIGOPTS/Boole/Boole-SiG4EnergyDeposit.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-124630
#-- StepId : 124630
#-- StepName : Stripping20-NoPrescalingFlagged for Sim08
#-- ApplicationName : DaVinci
#-- ApplicationVersion : v32r2p1
#-- OptionFiles : $APPCONFIGOPTS/DaVinci/DV-Stripping20-Stripping-MC-NoPrescaling.py;$APPCONFIGOPTS/DaVinci/DataType-2012.py;$APPCONFIGOPTS/DaVinci/InputType-DST.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-125877
#-- StepId : 125877
#-- StepName : L0 emulation - TCK 003d
#-- ApplicationName : Moore
#-- ApplicationVersion : v20r4
#-- OptionFiles : $APPCONFIGOPTS/L0App/L0AppSimProduction.py;$APPCONFIGOPTS/L0App/L0AppTCK-0x003d.py;$APPCONFIGOPTS/L0App/DataType-2012.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r200
#-- Visible : N
#-- Processing Pass Step-127200
#-- StepId : 127200
#-- StepName : TCK-0x4097003d Flagged for Sim08 2012
#-- ApplicationName : Moore
#-- ApplicationVersion : v14r2p1
#-- OptionFiles : $APPCONFIGOPTS/Moore/MooreSimProductionForSeparateL0AppStep.py;$APPCONFIGOPTS/Conditions/TCK-0x4097003d.py;$APPCONFIGOPTS/Moore/DataType-2012.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r206
#-- Visible : Y
#-- Processing Pass Step-124834
#-- StepId : 124834
#-- StepName : Reco14a for MC
#-- ApplicationName : Brunel
#-- ApplicationVersion : v43r2p7
#-- OptionFiles : $APPCONFIGOPTS/Brunel/DataType-2012.py;$APPCONFIGOPTS/Brunel/MC-WithTruth.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-127148
#-- StepId : 127148
#-- StepName : Sim08g - 2012 - MU - Pythia8
#-- ApplicationName : Gauss
#-- ApplicationVersion : v45r9
#-- OptionFiles : $APPCONFIGOPTS/Gauss/Sim08-Beam4000GeV-mu100-2012-nu2.5.py;$DECFILESROOT/options/@{eventType}.py;$LBPYTHIA8ROOT/options/Pythia8.py;$APPCONFIGOPTS/Gauss/G4PL_FTFP_BERT_EmNoCuts.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : dddb-20130929-1
#-- CONDDB : sim-20130522-1-vc-mu100
#-- ExtraPackages : AppConfig.v3r205;DecFiles.v27r37
#-- Visible : Y
from Gaudi.Configuration import *
from GaudiConf import IOHelper
IOHelper('ROOT').inputFiles(['LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000001_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000002_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000003_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000004_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000005_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000006_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000007_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000008_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000009_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000010_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000011_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000012_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000013_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000014_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000015_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000016_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000017_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000018_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000019_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000020_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000021_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000022_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000023_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000024_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000025_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000026_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000027_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000032_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000033_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000034_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000045_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000057_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000058_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000062_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000073_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000074_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000075_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000076_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000077_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000078_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000079_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000080_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000081_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000082_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000083_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000084_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000085_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000086_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000087_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000088_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000089_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000090_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000091_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000092_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000093_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000094_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000095_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000096_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000097_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000098_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000099_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000100_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000101_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000102_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000103_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000104_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000105_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000106_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000107_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000108_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000109_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000110_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000111_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000112_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000113_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000114_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000115_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000116_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000117_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000118_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000119_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000120_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000121_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000122_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000123_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000124_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000125_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000126_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000127_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000128_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000129_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000130_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000131_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000132_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000133_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000134_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000135_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000136_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000137_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000138_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000139_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000140_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000141_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000142_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000143_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000144_1.allstreams.dst'
], clear=True)
| Williams224/davinci-scripts | ksteta3pi/Consideredbkg/MC_12_11104124_MagUp.py | Python | mit | 12,177 |
# -*- coding: utf-8 -*-
from flask_login import LoginManager
from tamactiluya.models import User
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'user.login'
@login_manager.user_loader
def user_loader(uname) -> User or None:
"""
:param uname:
:return:
"""
try:
return User(uname)
except User.NotFound:
return None | tokyo-jesus/tamactiluya | tamactiluya/auth.py | Python | mit | 413 |
replacing='qwertyuiopasdfghjklzxcvbnm )([]\/{}!@#$%^&*'
a='\/abcdefghijklmnopqrstuvwxyz() }{][*%$&^#@!'
replacing=list(replacing)
a=list(a)
d={}
e={}
if len(replacing)==len(a):
for x in range(len(a)):
d[replacing[x]]=a[x]
e[a[x]]=replacing[x]
def encypt(dict,string):
'code'
code=[]
for x in string:
code.append(dict[x])
return ''.join(code)
def decypt(dict,string):
'uncode'
decode=[]
for x in string:
decode.append(dict[x])
return ''.join(decode)
if __name__=='__main__':
c=input('code:')
code=encypt(e,c)
decode=decypt(d,c)
print('encypts to',code)
print('decypt to',decode)
input()
| javaarchive/PIDLE | ccode.py | Python | mit | 846 |
from unittest import TestCase
import six
from regparser.tree.depth import markers, optional_rules, rules
from regparser.tree.depth.derive import debug_idx, derive_depths
from regparser.tree.depth.markers import INLINE_STARS, MARKERLESS, STARS_TAG
class DeriveTests(TestCase):
def assert_depth_match(self, markers, *depths_set):
self.assert_depth_match_extra(markers, [], *depths_set)
def assert_depth_match_extra(self, markers, extra, *depths_set):
"""Verify that the set of markers resolves to the provided set of
depths (in any order). Allows extra constraints."""
solutions = derive_depths(markers, extra)
results = {tuple(a.depth for a in s) for s in solutions}
six.assertCountEqual(self, results, {tuple(s) for s in depths_set})
def test_ints(self):
self.assert_depth_match(['1', '2', '3', '4'],
[0, 0, 0, 0])
def test_alpha_ints(self):
self.assert_depth_match(['A', '1', '2', '3'],
[0, 1, 1, 1])
def test_alpha_ints_jump_back(self):
self.assert_depth_match(['A', '1', '2', '3', 'B', '1', '2', '3', 'C'],
[0, 1, 1, 1, 0, 1, 1, 1, 0])
def test_roman_alpha(self):
self.assert_depth_match(
['a', '1', '2', 'b', '1', '2', '3', '4', 'i', 'ii', 'iii', '5',
'c', 'd', '1', '2', 'e'],
[0, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 1, 0, 0, 1, 1, 0])
def test_mix_levels_roman_alpha(self):
self.assert_depth_match(
['A', '1', '2', 'i', 'ii', 'iii', 'iv', 'B', '1', 'a', 'b', '2',
'a', 'b', 'i', 'ii', 'iii', 'c'],
[0, 1, 1, 2, 2, 2, 2, 0, 1, 2, 2, 1, 2, 2, 3, 3, 3, 2])
def test_i_ambiguity(self):
self.assert_depth_match(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i'],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1])
self.assert_depth_match(
['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0])
self.assert_depth_match(
['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'ii'],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1])
def test_repeat_alpha(self):
self.assert_depth_match(
['A', '1', 'a', 'i', 'ii', 'a', 'b', 'c', 'b'],
[0, 1, 2, 3, 3, 4, 4, 4, 2])
def test_simple_stars(self):
self.assert_depth_match(['A', '1', STARS_TAG, 'd'],
[0, 1, 2, 2])
self.assert_depth_match_extra(['A', '1', 'a', STARS_TAG, 'd'],
[optional_rules.limit_sequence_gap()],
[0, 1, 2, 2, 2])
def test_ambiguous_stars(self):
self.assert_depth_match(['A', '1', 'a', STARS_TAG, 'B'],
[0, 1, 2, 0, 0],
[0, 1, 2, 2, 0],
[0, 1, 2, 3, 3])
self.assert_depth_match_extra(['A', '1', 'a', STARS_TAG, 'B'],
[optional_rules.stars_occupy_space],
[0, 1, 2, 2, 0],
[0, 1, 2, 3, 3])
def test_double_stars(self):
self.assert_depth_match(['A', '1', 'a', STARS_TAG, STARS_TAG, 'B'],
[0, 1, 2, 1, 0, 0],
[0, 1, 2, 2, 0, 0],
[0, 1, 2, 2, 1, 0],
[0, 1, 2, 3, 0, 0],
[0, 1, 2, 3, 2, 0],
[0, 1, 2, 3, 1, 0])
self.assert_depth_match_extra(
['A', '1', 'a', STARS_TAG, STARS_TAG, 'B'],
[optional_rules.stars_occupy_space],
[0, 1, 2, 2, 1, 0],
[0, 1, 2, 3, 2, 0],
[0, 1, 2, 3, 1, 0])
def test_alpha_roman_ambiguous(self):
self.assert_depth_match_extra(
['i', 'ii', STARS_TAG, 'v', STARS_TAG, 'vii'],
[optional_rules.limit_sequence_gap()],
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0])
def test_start_star(self):
self.assert_depth_match_extra(
[STARS_TAG, 'c', '1', STARS_TAG, 'ii', 'iii', '2', 'i', 'ii',
STARS_TAG, 'v', STARS_TAG, 'vii', 'A'],
[optional_rules.limit_sequence_gap()],
[0, 0, 1, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 3],
[0, 0, 1, 2, 2, 2, 1, 2, 2, 3, 3, 2, 2, 3],
[0, 0, 1, 2, 2, 2, 1, 2, 2, 3, 3, 4, 4, 5],
[0, 0, 1, 2, 2, 2, 1, 2, 2, 0, 0, 1, 1, 2])
def test_inline_star(self):
self.assert_depth_match(['1', STARS_TAG, '2'],
[0, 0, 0],
[0, 1, 0])
self.assert_depth_match_extra(['1', STARS_TAG, '2'],
[optional_rules.stars_occupy_space],
[0, 1, 0])
self.assert_depth_match(['1', INLINE_STARS, '2'],
[0, 1, 0])
self.assert_depth_match(['1', INLINE_STARS, 'a'],
[0, 1, 1])
def test_star_star(self):
self.assert_depth_match(['A', STARS_TAG, STARS_TAG, 'D'],
[0, 1, 0, 0])
self.assert_depth_match(['A', INLINE_STARS, STARS_TAG, '3'],
[0, 1, 1, 1])
def test_markerless_repeated(self):
"""Repeated markerless paragraphs must be on the same level"""
self.assert_depth_match(
[MARKERLESS, 'a', MARKERLESS, MARKERLESS],
[0, 1, 0, 0],
[0, 1, 2, 2])
def test_ii_is_not_ambiguous(self):
"""We've fixed ii to be a roman numeral"""
self.assert_depth_match(
['a', STARS_TAG, 'ii'],
[0, 1, 1])
def test_depth_type_order_single(self):
"""Constrain depths to have certain types."""
extra = rules.depth_type_order([markers.ints, markers.lower])
self.assert_depth_match_extra(['1', 'a'], [extra], [0, 1])
self.assert_depth_match_extra(['i', 'a'], [extra])
def test_depth_type_order_multiple(self):
"""Constrain depths to be in a list of types."""
extra = rules.depth_type_order([(markers.ints, markers.roman),
markers.lower])
self.assert_depth_match_extra(['1', 'a'], [extra], [0, 1])
self.assert_depth_match_extra(['i', 'a'], [extra], [0, 1])
def test_depth_type_inverses_t2d(self):
"""Two markers of the same type should have the same depth"""
self.assert_depth_match_extra(
['1', STARS_TAG, 'b', STARS_TAG, 'C', STARS_TAG, 'd'],
[optional_rules.limit_sequence_gap()],
[0, 1, 1, 2, 2, 3, 3],
[0, 1, 1, 2, 2, 1, 1])
self.assert_depth_match_extra(
['1', STARS_TAG, 'b', STARS_TAG, 'C', STARS_TAG, 'd'],
[optional_rules.limit_sequence_gap(),
optional_rules.depth_type_inverses],
[0, 1, 1, 2, 2, 1, 1])
def test_depth_type_inverses_d2t(self):
"""Two markers of the same depth should have the same type"""
self.assert_depth_match_extra(
['1', STARS_TAG, 'c', '2', INLINE_STARS, 'i', STARS_TAG, 'iii'],
[optional_rules.limit_sequence_gap()],
[0, 1, 1, 0, 1, 1, 1, 1],
[0, 1, 1, 0, 1, 1, 2, 2])
self.assert_depth_match_extra(
['1', STARS_TAG, 'c', '2', INLINE_STARS, 'i', STARS_TAG, 'iii'],
[optional_rules.limit_sequence_gap(),
optional_rules.depth_type_inverses],
[0, 1, 1, 0, 1, 1, 2, 2])
def test_depth_type_inverses_markerless(self):
"""Markerless paragraphs should not trigger an incompatibility"""
self.assert_depth_match_extra(
['1', MARKERLESS, '2', 'a'],
[optional_rules.depth_type_inverses],
[0, 1, 0, 1])
def test_star_new_level(self):
"""STARS shouldn't have subparagraphs"""
self.assert_depth_match(
['a', STARS_TAG, 'i'],
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1]
)
self.assert_depth_match_extra(
['a', STARS_TAG, 'i'],
[optional_rules.star_new_level],
[0, 0, 0],
[0, 1, 0],
[0, 1, 1]
)
self.assert_depth_match_extra(
['a', STARS_TAG, 'i'],
[optional_rules.star_new_level, optional_rules.stars_occupy_space],
[0, 0, 0],
[0, 1, 0],
)
def test_marker_stars_markerless_symmetry(self):
self.assert_depth_match(
[MARKERLESS, 'a', STARS_TAG, MARKERLESS],
[0, 1, 1, 0],
[0, 1, 2, 2],
[0, 1, 1, 2]
)
def test_markerless_stars_symmetry(self):
self.assert_depth_match(
[MARKERLESS, STARS_TAG, MARKERLESS],
[0, 0, 0])
def test_cap_roman(self):
"""Capitalized roman numerals can be paragraphs"""
self.assert_depth_match(
['x', '1', 'A', 'i', 'I'],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 2])
def test_limit_paragraph_types(self):
"""Limiting paragraph types limits how the markers are interpreted"""
self.assert_depth_match(
['G', 'H', 'I'],
[0, 0, 0],
[0, 0, 1]
)
self.assert_depth_match_extra(
['G', 'H', 'I'],
[optional_rules.limit_paragraph_types(markers.upper)],
[0, 0, 0]
)
def test_markerless_at_beginning(self):
"""Allow markerless paragraphs to be on the same level as a paragraph
marker"""
self.assert_depth_match(
[MARKERLESS, MARKERLESS, 'a'],
[0, 0, 1],
[0, 0, 0])
self.assert_depth_match(
[MARKERLESS, MARKERLESS, 'a', 'b', 'c', 'd'],
[0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0])
def test_limit_sequence_gap(self):
"""The limit_sequence_gap rule should limit our ability to derive
depths with gaps between adjacent paragraphs. It should be
configurable to allow any value"""
self.assert_depth_match(['a', '1', 'i'],
[0, 1, 2],
[0, 1, 0])
self.assert_depth_match_extra(['a', '1', 'i'],
[optional_rules.limit_sequence_gap()],
[0, 1, 2])
self.assert_depth_match_extra(['a', '1', 'i'],
[optional_rules.limit_sequence_gap(10)],
[0, 1, 2],
[0, 1, 0])
def test_debug_idx(self):
"""Find the index of the first error when attempting to derive
depths"""
self.assertEqual(debug_idx(['1', '2', '3']), 3)
self.assertEqual(debug_idx(['1', 'c']), 1)
self.assertEqual(debug_idx(['1', '2', 'c']), 2)
self.assertEqual(
debug_idx(['1', 'a', '2', 'A'],
[optional_rules.depth_type_inverses]),
3)
| tadhg-ohiggins/regulations-parser | tests/tree_depth_derive_tests.py | Python | cc0-1.0 | 11,439 |
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script retrieves the history of all V8 branches and trunk revisions and
# their corresponding Chromium revisions.
# Requires a chromium checkout with branch heads:
# gclient sync --with_branch_heads
# gclient fetch
import argparse
import csv
import itertools
import json
import os
import re
import sys
from common_includes import *
CONFIG = {
"BRANCHNAME": "retrieve-v8-releases",
"PERSISTFILE_BASENAME": "/tmp/v8-releases-tempfile",
}
# Expression for retrieving the bleeding edge revision from a commit message.
PUSH_MSG_SVN_RE = re.compile(r".* \(based on bleeding_edge revision r(\d+)\)$")
PUSH_MSG_GIT_RE = re.compile(r".* \(based on ([a-fA-F0-9]+)\)$")
# Expression for retrieving the merged patches from a merge commit message
# (old and new format).
MERGE_MESSAGE_RE = re.compile(r"^.*[M|m]erged (.+)(\)| into).*$", re.M)
CHERRY_PICK_TITLE_GIT_RE = re.compile(r"^.* \(cherry\-pick\)\.?$")
# New git message for cherry-picked CLs. One message per line.
MERGE_MESSAGE_GIT_RE = re.compile(r"^Merged ([a-fA-F0-9]+)\.?$")
# Expression for retrieving reverted patches from a commit message (old and
# new format).
ROLLBACK_MESSAGE_RE = re.compile(r"^.*[R|r]ollback of (.+)(\)| in).*$", re.M)
# New git message for reverted CLs. One message per line.
ROLLBACK_MESSAGE_GIT_RE = re.compile(r"^Rollback of ([a-fA-F0-9]+)\.?$")
# Expression for retrieving the code review link.
REVIEW_LINK_RE = re.compile(r"^Review URL: (.+)$", re.M)
# Expression with three versions (historical) for extracting the v8 revision
# from the chromium DEPS file.
DEPS_RE = re.compile(r"""^\s*(?:["']v8_revision["']: ["']"""
"""|\(Var\("googlecode_url"\) % "v8"\) \+ "\/trunk@"""
"""|"http\:\/\/v8\.googlecode\.com\/svn\/trunk@)"""
"""([^"']+)["'].*$""", re.M)
# Expression to pick tag and revision for bleeding edge tags. To be used with
# output of 'svn log'.
BLEEDING_EDGE_TAGS_RE = re.compile(
r"A \/tags\/([^\s]+) \(from \/branches\/bleeding_edge\:(\d+)\)")
def SortBranches(branches):
"""Sort branches with version number names."""
return sorted(branches, key=SortingKey, reverse=True)
def FilterDuplicatesAndReverse(cr_releases):
"""Returns the chromium releases in reverse order filtered by v8 revision
duplicates.
cr_releases is a list of [cr_rev, v8_rev] reverse-sorted by cr_rev.
"""
last = ""
result = []
for release in reversed(cr_releases):
if last == release[1]:
continue
last = release[1]
result.append(release)
return result
def BuildRevisionRanges(cr_releases):
"""Returns a mapping of v8 revision -> chromium ranges.
The ranges are comma-separated, each range has the form R1:R2. The newest
entry is the only one of the form R1, as there is no end range.
cr_releases is a list of [cr_rev, v8_rev] reverse-sorted by cr_rev.
cr_rev either refers to a chromium svn revision or a chromium branch number.
"""
range_lists = {}
cr_releases = FilterDuplicatesAndReverse(cr_releases)
# Visit pairs of cr releases from oldest to newest.
for cr_from, cr_to in itertools.izip(
cr_releases, itertools.islice(cr_releases, 1, None)):
# Assume the chromium revisions are all different.
assert cr_from[0] != cr_to[0]
# TODO(machenbach): Subtraction is not git friendly.
ran = "%s:%d" % (cr_from[0], int(cr_to[0]) - 1)
# Collect the ranges in lists per revision.
range_lists.setdefault(cr_from[1], []).append(ran)
# Add the newest revision.
if cr_releases:
range_lists.setdefault(cr_releases[-1][1], []).append(cr_releases[-1][0])
# Stringify and comma-separate the range lists.
return dict((rev, ", ".join(ran)) for rev, ran in range_lists.iteritems())
def MatchSafe(match):
if match:
return match.group(1)
else:
return ""
class Preparation(Step):
MESSAGE = "Preparation."
def RunStep(self):
self.CommonPrepare()
self.PrepareBranch()
class RetrieveV8Releases(Step):
MESSAGE = "Retrieve all V8 releases."
def ExceedsMax(self, releases):
return (self._options.max_releases > 0
and len(releases) > self._options.max_releases)
def GetBleedingEdgeFromPush(self, title):
return MatchSafe(PUSH_MSG_SVN_RE.match(title))
def GetBleedingEdgeGitFromPush(self, title):
return MatchSafe(PUSH_MSG_GIT_RE.match(title))
def GetMergedPatches(self, body):
patches = MatchSafe(MERGE_MESSAGE_RE.search(body))
if not patches:
patches = MatchSafe(ROLLBACK_MESSAGE_RE.search(body))
if patches:
# Indicate reverted patches with a "-".
patches = "-%s" % patches
return patches
def GetMergedPatchesGit(self, body):
patches = []
for line in body.splitlines():
patch = MatchSafe(MERGE_MESSAGE_GIT_RE.match(line))
if patch:
patches.append(patch)
patch = MatchSafe(ROLLBACK_MESSAGE_GIT_RE.match(line))
if patch:
patches.append("-%s" % patch)
return ", ".join(patches)
def GetReleaseDict(
self, git_hash, bleeding_edge_rev, bleeding_edge_git, branch, version,
patches, cl_body):
revision = self.vc.GitSvn(git_hash)
return {
# The SVN revision on the branch.
"revision": revision,
# The git revision on the branch.
"revision_git": git_hash,
# The SVN revision on bleeding edge (only for newer trunk pushes).
"bleeding_edge": bleeding_edge_rev,
# The same for git.
"bleeding_edge_git": bleeding_edge_git,
# The branch name.
"branch": branch,
# The version for displaying in the form 3.26.3 or 3.26.3.12.
"version": version,
# The date of the commit.
"date": self.GitLog(n=1, format="%ci", git_hash=git_hash),
# Merged patches if available in the form 'r1234, r2345'.
"patches_merged": patches,
# Default for easier output formatting.
"chromium_revision": "",
# Default for easier output formatting.
"chromium_branch": "",
# Link to the CL on code review. Trunk pushes are not uploaded, so this
# field will be populated below with the recent roll CL link.
"review_link": MatchSafe(REVIEW_LINK_RE.search(cl_body)),
# Link to the commit message on google code.
"revision_link": ("https://code.google.com/p/v8/source/detail?r=%s"
% revision),
}
def GetRelease(self, git_hash, branch):
self.ReadAndPersistVersion()
base_version = [self["major"], self["minor"], self["build"]]
version = ".".join(base_version)
body = self.GitLog(n=1, format="%B", git_hash=git_hash)
patches = ""
if self["patch"] != "0":
version += ".%s" % self["patch"]
if CHERRY_PICK_TITLE_GIT_RE.match(body.splitlines()[0]):
patches = self.GetMergedPatchesGit(body)
else:
patches = self.GetMergedPatches(body)
title = self.GitLog(n=1, format="%s", git_hash=git_hash)
bleeding_edge_revision = self.GetBleedingEdgeFromPush(title)
bleeding_edge_git = ""
if bleeding_edge_revision:
bleeding_edge_git = self.vc.SvnGit(bleeding_edge_revision,
self.vc.RemoteMasterBranch())
else:
bleeding_edge_git = self.GetBleedingEdgeGitFromPush(title)
return self.GetReleaseDict(
git_hash, bleeding_edge_revision, bleeding_edge_git, branch, version,
patches, body), self["patch"]
def GetReleasesFromMaster(self):
tag_text = self.SVN("log https://v8.googlecode.com/svn/tags -v --limit 20")
releases = []
for (tag, revision) in re.findall(BLEEDING_EDGE_TAGS_RE, tag_text):
git_hash = self.vc.SvnGit(revision)
# Add bleeding edge release. It does not contain patches or a code
# review link, as tags are not uploaded.
releases.append(self.GetReleaseDict(
git_hash, revision, git_hash, self.vc.MasterBranch(), tag, "", ""))
return releases
def GetReleasesFromBranch(self, branch):
self.GitReset(self.vc.RemoteBranch(branch))
if branch == self.vc.MasterBranch():
return self.GetReleasesFromMaster()
releases = []
try:
for git_hash in self.GitLog(format="%H").splitlines():
if VERSION_FILE not in self.GitChangedFiles(git_hash):
continue
if self.ExceedsMax(releases):
break # pragma: no cover
if not self.GitCheckoutFileSafe(VERSION_FILE, git_hash):
break # pragma: no cover
release, patch_level = self.GetRelease(git_hash, branch)
releases.append(release)
# Follow branches only until their creation point.
# TODO(machenbach): This omits patches if the version file wasn't
# manipulated correctly. Find a better way to detect the point where
# the parent of the branch head leads to the trunk branch.
if branch != self.vc.CandidateBranch() and patch_level == "0":
break
# Allow Ctrl-C interrupt.
except (KeyboardInterrupt, SystemExit): # pragma: no cover
pass
# Clean up checked-out version file.
self.GitCheckoutFileSafe(VERSION_FILE, "HEAD")
return releases
def RunStep(self):
self.GitCreateBranch(self._config["BRANCHNAME"])
branches = self.vc.GetBranches()
releases = []
if self._options.branch == 'recent':
# Get only recent development on trunk, beta and stable.
if self._options.max_releases == 0: # pragma: no cover
self._options.max_releases = 10
beta, stable = SortBranches(branches)[0:2]
releases += self.GetReleasesFromBranch(stable)
releases += self.GetReleasesFromBranch(beta)
releases += self.GetReleasesFromBranch(self.vc.CandidateBranch())
releases += self.GetReleasesFromBranch(self.vc.MasterBranch())
elif self._options.branch == 'all': # pragma: no cover
# Retrieve the full release history.
for branch in branches:
releases += self.GetReleasesFromBranch(branch)
releases += self.GetReleasesFromBranch(self.vc.CandidateBranch())
releases += self.GetReleasesFromBranch(self.vc.MasterBranch())
else: # pragma: no cover
# Retrieve history for a specified branch.
assert self._options.branch in (branches +
[self.vc.CandidateBranch(), self.vc.MasterBranch()])
releases += self.GetReleasesFromBranch(self._options.branch)
self["releases"] = sorted(releases,
key=lambda r: SortingKey(r["version"]),
reverse=True)
class SwitchChromium(Step):
MESSAGE = "Switch to Chromium checkout."
def RunStep(self):
cwd = self._options.chromium
# Check for a clean workdir.
if not self.GitIsWorkdirClean(cwd=cwd): # pragma: no cover
self.Die("Workspace is not clean. Please commit or undo your changes.")
# Assert that the DEPS file is there.
if not os.path.exists(os.path.join(cwd, "DEPS")): # pragma: no cover
self.Die("DEPS file not present.")
class UpdateChromiumCheckout(Step):
MESSAGE = "Update the checkout and create a new branch."
def RunStep(self):
cwd = self._options.chromium
self.GitCheckout("master", cwd=cwd)
self.GitPull(cwd=cwd)
self.GitCreateBranch(self.Config("BRANCHNAME"), cwd=cwd)
def ConvertToCommitNumber(step, revision):
# Simple check for git hashes.
if revision.isdigit() and len(revision) < 8:
return revision
return step.GitConvertToSVNRevision(
revision, cwd=os.path.join(step._options.chromium, "v8"))
class RetrieveChromiumV8Releases(Step):
MESSAGE = "Retrieve V8 releases from Chromium DEPS."
def RunStep(self):
cwd = self._options.chromium
releases = filter(
lambda r: r["branch"] in [self.vc.CandidateBranch(),
self.vc.MasterBranch()],
self["releases"])
if not releases: # pragma: no cover
print "No releases detected. Skipping chromium history."
return True
# Update v8 checkout in chromium.
self.GitFetchOrigin(cwd=os.path.join(cwd, "v8"))
oldest_v8_rev = int(releases[-1]["revision"])
cr_releases = []
try:
for git_hash in self.GitLog(
format="%H", grep="V8", cwd=cwd).splitlines():
if "DEPS" not in self.GitChangedFiles(git_hash, cwd=cwd):
continue
if not self.GitCheckoutFileSafe("DEPS", git_hash, cwd=cwd):
break # pragma: no cover
deps = FileToText(os.path.join(cwd, "DEPS"))
match = DEPS_RE.search(deps)
if match:
cr_rev = self.GetCommitPositionNumber(git_hash, cwd=cwd)
if cr_rev:
v8_rev = ConvertToCommitNumber(self, match.group(1))
cr_releases.append([cr_rev, v8_rev])
# Stop after reaching beyond the last v8 revision we want to update.
# We need a small buffer for possible revert/reland frenzies.
# TODO(machenbach): Subtraction is not git friendly.
if int(v8_rev) < oldest_v8_rev - 100:
break # pragma: no cover
# Allow Ctrl-C interrupt.
except (KeyboardInterrupt, SystemExit): # pragma: no cover
pass
# Clean up.
self.GitCheckoutFileSafe("DEPS", "HEAD", cwd=cwd)
# Add the chromium ranges to the v8 trunk and bleeding_edge releases.
all_ranges = BuildRevisionRanges(cr_releases)
releases_dict = dict((r["revision"], r) for r in releases)
for revision, ranges in all_ranges.iteritems():
releases_dict.get(revision, {})["chromium_revision"] = ranges
# TODO(machenbach): Unify common code with method above.
class RietrieveChromiumBranches(Step):
MESSAGE = "Retrieve Chromium branch information."
def RunStep(self):
cwd = self._options.chromium
trunk_releases = filter(lambda r: r["branch"] == self.vc.CandidateBranch(),
self["releases"])
if not trunk_releases: # pragma: no cover
print "No trunk releases detected. Skipping chromium history."
return True
oldest_v8_rev = int(trunk_releases[-1]["revision"])
# Filter out irrelevant branches.
branches = filter(lambda r: re.match(r"branch-heads/\d+", r),
self.GitRemotes(cwd=cwd))
# Transform into pure branch numbers.
branches = map(lambda r: int(re.match(r"branch-heads/(\d+)", r).group(1)),
branches)
branches = sorted(branches, reverse=True)
cr_branches = []
try:
for branch in branches:
if not self.GitCheckoutFileSafe("DEPS",
"branch-heads/%d" % branch,
cwd=cwd):
break # pragma: no cover
deps = FileToText(os.path.join(cwd, "DEPS"))
match = DEPS_RE.search(deps)
if match:
v8_rev = ConvertToCommitNumber(self, match.group(1))
cr_branches.append([str(branch), v8_rev])
# Stop after reaching beyond the last v8 revision we want to update.
# We need a small buffer for possible revert/reland frenzies.
# TODO(machenbach): Subtraction is not git friendly.
if int(v8_rev) < oldest_v8_rev - 100:
break # pragma: no cover
# Allow Ctrl-C interrupt.
except (KeyboardInterrupt, SystemExit): # pragma: no cover
pass
# Clean up.
self.GitCheckoutFileSafe("DEPS", "HEAD", cwd=cwd)
# Add the chromium branches to the v8 trunk releases.
all_ranges = BuildRevisionRanges(cr_branches)
trunk_dict = dict((r["revision"], r) for r in trunk_releases)
for revision, ranges in all_ranges.iteritems():
trunk_dict.get(revision, {})["chromium_branch"] = ranges
class CleanUp(Step):
MESSAGE = "Clean up."
def RunStep(self):
self.GitCheckout("master", cwd=self._options.chromium)
self.GitDeleteBranch(self.Config("BRANCHNAME"), cwd=self._options.chromium)
self.CommonCleanup()
class WriteOutput(Step):
MESSAGE = "Print output."
def Run(self):
if self._options.csv:
with open(self._options.csv, "w") as f:
writer = csv.DictWriter(f,
["version", "branch", "revision",
"chromium_revision", "patches_merged"],
restval="",
extrasaction="ignore")
for release in self["releases"]:
writer.writerow(release)
if self._options.json:
with open(self._options.json, "w") as f:
f.write(json.dumps(self["releases"]))
if not self._options.csv and not self._options.json:
print self["releases"] # pragma: no cover
class Releases(ScriptsBase):
def _PrepareOptions(self, parser):
parser.add_argument("-b", "--branch", default="recent",
help=("The branch to analyze. If 'all' is specified, "
"analyze all branches. If 'recent' (default) "
"is specified, track beta, stable and trunk."))
parser.add_argument("-c", "--chromium",
help=("The path to your Chromium src/ "
"directory to automate the V8 roll."))
parser.add_argument("--csv", help="Path to a CSV file for export.")
parser.add_argument("-m", "--max-releases", type=int, default=0,
help="The maximum number of releases to track.")
parser.add_argument("--json", help="Path to a JSON file for export.")
def _ProcessOptions(self, options): # pragma: no cover
return True
def _Config(self):
return {
"BRANCHNAME": "retrieve-v8-releases",
"PERSISTFILE_BASENAME": "/tmp/v8-releases-tempfile",
}
def _Steps(self):
return [
Preparation,
RetrieveV8Releases,
SwitchChromium,
UpdateChromiumCheckout,
RetrieveChromiumV8Releases,
RietrieveChromiumBranches,
CleanUp,
WriteOutput,
]
if __name__ == "__main__": # pragma: no cover
sys.exit(Releases().Run())
| hellotomfan/v8-coroutine | deps/v8/tools/push-to-trunk/releases.py | Python | gpl-2.0 | 18,090 |
import os
import sys
import yaml
import re
from subprocess import Popen, PIPE, call
from random import randrange
from threading import Thread
import SocketServer
import struct
import pdb
sys.path.insert(0, '/usr/local/lib/python2.6/dist-packages/rethinkdb')
import ql2_pb2 as p
# tree of YAML documents defining documentation
src_dir = sys.argv[1]
commands = []
# Walk the src files to compile all sections and commands
for root, dirs, file_names in os.walk(src_dir):
for file_name in file_names:
docs = yaml.load(file(os.path.join(root, file_name)))
if 'commands' in docs:
commands.extend(docs['commands'])
def validate_for(lang, port):
test_file_name = 'build/test.%s' % lang
with open(test_file_name, 'w') as out:
if lang == 'py':
out.write("""
from sys import path
path.insert(0, '../../../drivers/python')
import rethinkdb as r
conn = r.connect(port=%d)
print 'Running python validation.'
""" % port)
elif lang == 'js':
out.write("""
var r = require("../../../../drivers/javascript/build/rethinkdb");
var callback = (function() { });
var cur = {next:(function(){}), hasNext:(function(){}), each:(function(){}), toArray:(function(){})};
r.connect({port:%d}, function(err, conn) {
console.log("Running Javascript validation.");
""" % port)
elif lang == 'rb':
out.write("""
$LOAD_PATH.unshift('../../../drivers/ruby/lib')
require 'rethinkdb.rb'
include RethinkDB::Shortcuts
conn = r.connect('localhost', %d)
puts 'Running Ruby validation.'
""" % port)
elif lang == 'ph':
out.write("""
<?php
error_reporting(-1);
set_include_path("../src");
require_once("rdb/rdb.php");
$conn = r\\connect('localhost', %d);
echo 'Running PHP validation.\n';
""" % port)
for command in commands:
section_name = command['section']
command_name = command['tag']
for i,example in enumerate(command['examples']):
test_tag = section_name+"-"+command_name+"-"+str(i)
test_case = example['code']
if isinstance(test_case, dict):
if lang in test_case:
test_case = test_case[lang]
else:
test_case = None
if 'validate' in example and not example['validate']:
test_case = None
skip_validation = True
else:
skip_validation = False
# Check for an override of this test case
if lang in command:
if isinstance(command[lang], bool) and not command[lang]:
test_case = None
elif isinstance(command[lang], dict):
override = command[lang]
if 'examples' in override:
if i in override['examples']:
example_override = override['examples'][i]
if len(example_override) == 0:
test_case = None
elif 'code' in example_override:
test_case = example_override['code']
if 'validate' in example_override:
if not example_override['validate']:
test_case = None
elif skip_validation:
test_case = None
comment = '#'
if lang == 'js':
comment = '//'
if lang == 'ph':
comment = '//'
if test_case != None:
test_case = re.sub("\n", " %s %s\n" % (comment, test_tag), test_case)
out.write("%s %s %s\n" % (test_case, comment, test_tag))
if lang == 'js':
out.write("console.log('Javascript validation complete.');\n");
out.write("conn.close()})")
if lang == 'py':
out.write("print 'Python validation complete.'");
if lang == 'rb':
out.write("puts 'Ruby validation complete.'");
if lang == 'ph':
out.write("echo 'PHP validation complete.\n';\n");
out.write("?>");
if lang == 'py':
interpreter = 'python'
elif lang == 'js':
interpreter = 'node'
elif lang == 'rb':
interpreter = 'ruby'
elif lang == 'ph':
interpreter = 'php5'
ret = call([interpreter, test_file_name])
if ret is not 0:
sys.exit(1)
class BlackHoleRDBHandler(SocketServer.BaseRequestHandler):
def handle(self):
magic = self.request.recv(4)
while (True):
header = self.request.recv(4)
if len(header) == 0:
break;
(length,) = struct.unpack("<L", header)
data = self.request.recv(length)
query = p.Query()
query.ParseFromString(data)
response = p.Response()
response.token = query.token
response.type = p.Response.SUCCESS_ATOM
datum = response.response.add()
datum.type = p.Datum.R_NULL
response_protobuf = response.SerializeToString()
response_header = struct.pack("<L", len(response_protobuf))
self.request.sendall(response_header + response_protobuf)
def validate():
# Setup void server
port = randrange(1025, 65535)
server = SocketServer.TCPServer(('localhost', port), BlackHoleRDBHandler)
t = Thread(target=server.serve_forever)
t.start()
try:
#validate_for('py', port)
#validate_for('js', port)
#validate_for('rb', port)
validate_for('ph', port)
finally:
server.shutdown()
validate()
| felixonmars/app | lib/vendor/php-rql/docs/scripts/validate_examples.py | Python | gpl-2.0 | 5,917 |
# -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib
from resources.lib.libraries import client
from resources.lib.libraries import jsunpack
def resolve(url):
try:
headers = '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': url})
url = url.replace('/video/', '/embed/')
result = client.request(url)
unpacked = ''
packed = result.split('\n')
for i in packed:
try: unpacked += jsunpack.unpack(i)
except: pass
result += unpacked
result = re.sub('\s\s+', ' ', result)
var = re.compile('var\s(.+?)\s*=\s*\'(.+?)\'').findall(result)
for i in range(100):
for v in var: result = result.replace("' %s '" % v[0], v[1]).replace("'%s'" % v[0], v[1])
url = re.compile('sources\s*:\s*\[.+?file\s*:\s*(.+?)\s*\,').findall(result)[0]
var = re.compile('var\s+%s\s*=\s*\'(.+?)\'' % url).findall(result)
if len(var) > 0: url = var[0].strip()
url += headers
if url.startswith('http'): return url
except:
return
| AMOboxTV/AMOBox.LegoBuild | plugin.video.titan/resources/lib/resolvers/ishared.py | Python | gpl-2.0 | 1,793 |
#!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error, software_manager
sm = software_manager.SoftwareManager()
class prelink(test.test):
"""
Autotest module for testing basic functionality
of prelink
@author Athira Rajeev <atrajeev@in.ibm.com>
"""
version = 1
nfail = 0
path = ''
def initialize(self, test_path=''):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
for package in ['gcc', 'gcc-c++']:
if not sm.check_installed(package):
logging.debug("%s missing - trying to install", package)
sm.install(package)
ret_val = subprocess.Popen(['make', 'all'], cwd="%s/prelink" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.Popen(['./prelink.sh'], cwd="%s/prelink" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
| rajashreer7/autotest-client-tests | linux-tools/prelink/prelink.py | Python | gpl-2.0 | 1,662 |
import sys
import gevent
from gevent import socket
import greentest
class Test(greentest.TestCase):
def start(self):
self.server = socket.socket()
self.server.bind(('127.0.0.1', 0))
self.server.listen(1)
self.server_port = self.server.getsockname()[1]
self.acceptor = gevent.spawn(self.server.accept)
def stop(self):
self.server.close()
self.acceptor.kill()
del self.acceptor
del self.server
def test(self):
self.start()
try:
sock = socket.socket()
sock.connect(('127.0.0.1', self.server_port))
try:
sock.settimeout(0.1)
try:
result = sock.recv(1024)
raise AssertionError('Expected timeout to be raised, instead recv() returned %r' % (result, ))
except socket.error:
ex = sys.exc_info()[1]
self.assertEqual(ex.args, ('timed out',))
self.assertEqual(str(ex), 'timed out')
self.assertEqual(ex[0], 'timed out')
finally:
sock.close()
finally:
self.stop()
if __name__ == '__main__':
greentest.main()
| mgadi/naemonbox | sources/psdash/gevent-1.0.1/greentest/test__socket_timeout.py | Python | gpl-2.0 | 1,258 |
from __future__ import with_statement
from xdrdef.pnfs_block_pack import PNFS_BLOCKPacker as Packer
from xdrdef.pnfs_block_pack import PNFS_BLOCKUnpacker as Unpacker
from xdrdef.pnfs_block_type import *
from xdrdef.pnfs_block_const import *
import fs_base
from threading import Lock
import struct
# draft 8
# All sizes are in bytes unless otherwise indicated
"""
Need to be able to set topology in server_exports
From topology, need to create device
"""
id = 0
id_lock = Lock()
def getid(d):
"""Get a new unique id. These are used only internally for printing"""
global id
id_lock.acquire()
out = id
id += 1
id_lock.release()
return out
class BlockVolume(fs_base.LayoutFile):
"""Deals with disk topology information"""
class FakeFs(object):
def _find_extent(self, pos, inode):
# inode here is the topology root block.Volume
vol, v_pos, limit = inode.extent(pos, 1 << 64)
return fs_base.Extent(fs_base.VALID, v_pos, pos, limit, vol._fd)
def __init__(self, volume):
self._component_list = [vol for vol in volume._dump()
if type(vol) == Simple]
self._openlist = []
self.address_body = volume.get_addr()
super(BlockVolume, self).__init__(volume, self.FakeFs(), volume._size)
def open(self, mode="rb+"):
# STUB - need care with mode, for example--append would not work as is
for vol in self._component_list:
# STUB - rewrite in terms of context managers
if vol.backing_dev is None:
raise IOError("No backing device for Simple Volume %i" % vol.id)
vol._fd = open(vol.backing_dev, mode)
self._openlist.append(vol._fd)
return self
def close(self):
# XXX Careful here - what if errors on a close, or previously on open?
for fd in reversed(self._openlist):
fd.close()
__enter__ = open
def __exit__(self, t, v, tb):
self.close()
class Volume(object):
"""Superclass used to represent topology components."""
def get_addr(self):
"""Generate the opaque part of device_addr4 used by NFS4.1.
Note this corresponds to device.address_body property used by
op_getdeviceinfo.
"""
# Create list of all volumes referenced, in order of reference
list = self._dump()
# Create mapping from device to its index in list
mapping = dict(zip(list, range(len(list))))
# Create (unpacked) pnfs_block_volume4 structure for each volume
addr = pnfs_block_deviceaddr4([d.get_xdr(mapping) for d in list])
# Create packed xdr string
p = Packer()
p.pack_pnfs_block_deviceaddr4(addr)
return p.get_buffer()
def _dump(self):
"""Recursively scan for all devices in tree.
They are returned in order of reference, to build volume array.
"""
out = []
for v in self.volumes:
out.extend(v._dump())
out = remove_dups(out)
out.append(self)
return out
def get_xdr(self, mapping):
"""Returns filled (and unpacked) pnfs_block_volume4 structure.
Need mapping from device:to top-level array index to do the conversion.
"""
raise NotImplementedError
def resolve(self, i):
"""Map a byte offset to the corresponding Simple volume and byte offset.
"""
return NotImplementedError
def extent(self, i, limit):
"""Same as resolve, with addition of how far mapping extends."""
return NotImplementedError
class Simple(Volume):
"""Represents an actual disk. Always a leaf node in the topology tree."""
def __init__(self, signature, size=None, backing_dev=None):
self.type = PNFS_BLOCK_VOLUME_SIMPLE
self.id = getid(self)
if type(signature[0]) == int:
# Make it easy to send a single component
signature = [signature]
self.sig = [pnfs_block_sig_component4(i, s) for i, s in signature]
self._size = size # in bytes
self.backing_dev = backing_dev
if backing_dev is None:
if size is None:
raise ValueError("Must set either size or backing_dev")
return
self._fd = None
with open(backing_dev, "rb+") as fd:
# Determine device's actual size
fd.seek(0, 2)
true_size = fd.tell()
if size is None:
self._size = true_size
elif true_size < size:
raise ValueError("backing dev size %r < %r" % (true_size, size))
self._write_sig(fd)
def _write_sig(self, fd):
"""Write out disk signature to open fd."""
for comp in self.sig:
offset = comp.bsc_sig_offset
if offset < 0:
offset += self._size
fd.seek(offset)
fd.write(comp.bsc_contents)
def __repr__(self):
return "Simple %i" % self.id
def _dump(self):
"""Since this is always a leaf node of tree, end recursive scan."""
return (self, )
def get_xdr(self, mapping):
info = pnfs_block_simple_volume_info4(self.sig)
return pnfs_block_volume4(PNFS_BLOCK_VOLUME_SIMPLE, bv_simple_info=info)
def resolve(self, i):
# print "resolve(%i) %r" % (i, self)
if i < 0 or i >= self._size:
raise ValueError("Asked for %i of %i" % (i, self._size))
return (self, i)
def extent(self, i, limit):
return (self, i, min(limit, self._size - i))
class Slice(Volume):
"""A contiguous slice from a single volume."""
def __init__(self, volume, start, length):
self.type = PNFS_BLOCK_VOLUME_SLICE
self.id = getid(self)
self.start = start # block offset
self.length = length # length in blocks
self.volumes = [volume] # volume which is sliced
self._size = length
def __repr__(self):
return "Slice %i (from vol %i)" % (self.id, self.volumes[0].id)
def get_xdr(self, mapping):
info = pnfs_block_slice_volume_info4(self.start, self.length,
mapping[self.volumes[0]])
return pnfs_block_volume4(PNFS_BLOCK_VOLUME_SLICE, bv_slice_info=info)
def resolve(self, i):
# print "resolve(%i) %r" % (i, self)
# print self.start, self._size, self.length
if i < 0 or i >= self._size:
raise ValueError("Asked for %i of %i" % (i, self._size))
return self.volumes[0].resolve(self.start + i)
def extent(self, i, limit):
return self.volumes[0].extent(self.start + i,
min(limit, self._size - i))
class Concat(Volume):
"""A simple concatanation of several volumes."""
def __init__(self, volumes):
self.type = PNFS_BLOCK_VOLUME_CONCAT
self.id = getid(self)
self.volumes = volumes
self._size = sum([v._size for v in volumes])
def get_xdr(self, mapping):
info = pnfs_block_concat_volume_info4([mapping[v] for v in self.volumes])
return pnfs_block_volume4(PNFS_BLOCK_VOLUME_CONCAT, bv_concat_info=info)
def __repr__(self):
return "Concat %i of %r" % (self.id, [v.id for v in self.volumes])
def resolve(self, i):
# print "resolve(%i) %r" % (i, self)
if i < 0 or i >= self._size:
raise ValueError("Asked for %i of %i" % (i, self._size))
sum = 0
for v in self.volumes:
next = sum + v._size
if i < next:
return v.resolve(i - sum)
sum = next
# Shouldn't get here
raise RuntimeError
def extent(self, i, limit):
sum = 0
for v in self.volumes:
next = sum + v._size
if i < next:
return v.extent(i - sum, min(limit, next - i))
sum = next
# Shouldn't get here
raise RuntimeError
class Stripe(Volume):
"""Stripe of several volumes, all of the same size."""
def __init__(self, size, volumes):
self.type = PNFS_BLOCK_VOLUME_STRIPE
self.id = getid(self)
self.stripe_unit = size # in blocks?
self.volumes = volumes
self._size = sum([v._size for v in volumes]) # XXX All same size?
def get_xdr(self, mapping):
info = pnfs_block_stripe_volume_info4(self.stripe_unit,
[mapping[v] for v in self.volumes])
return pnfs_block_volume4(PNFS_BLOCK_VOLUME_STRIPE, bv_stripe_info=info)
def __repr__(self):
return "Slice %i (size=%i) of %r" % (self.id, self.stripe_unit,
[v.id for v in self.volumes])
def resolve(self, i):
"""
0 1 2 3 4 5 6 7 8 global_stripe_number
| | | |
| | | | | | | | | |
| | | |
0 1 2 local_stripe_number
0 1 2 0 1 2 0 1 2 disk_number
"""
def split(x, mod):
return (x // mod, x % mod)
if i < 0 or i >= self._size:
raise ValueError("Asked for %i of %i" % (i, self._size))
global_stripe_number, stripe_pos = split(i, self.stripe_unit)
local_stripe_number, disk_number = split(global_stripe_number,
len(self.volumes))
disk_pos = local_stripe_number * self.stripe_unit + stripe_pos
return self.volumes[disk_number].resolve(disk_pos)
def extent(self, i, limit):
def split(x, mod):
return (x // mod, x % mod)
global_stripe_number, stripe_pos = split(i, self.stripe_unit)
local_stripe_number, disk_number = split(global_stripe_number,
len(self.volumes))
disk_pos = local_stripe_number * self.stripe_unit + stripe_pos
return self.volumes[disk_number].extent(disk_pos, min(limit, self.stripe_unit - stripe_pos))
def remove_dups(l):
# XXX Again, is a better way
out = []
while l:
i = l.pop(0)
if i not in out:
out.append(i)
return out
if __name__=="__main__":
pass
| srimalik/pynfs | nfs4.1/block.py | Python | gpl-2.0 | 10,274 |
from gevent import monkey; monkey.patch_all()
import threading
localdata = threading.local()
localdata.x = "hello"
assert localdata.x == 'hello'
success = []
def func():
try:
localdata.x
raise AssertionError('localdata.x must raise AttributeError')
except AttributeError:
pass
assert localdata.__dict__ == {}, localdata.__dict__
success.append(1)
t = threading.Thread(None, func)
t.start()
t.join()
assert success == [1], 'test failed'
assert localdata.x == 'hello'
| mgadi/naemonbox | sources/psdash/gevent-1.0.1/greentest/test__threading_patched_local.py | Python | gpl-2.0 | 510 |