repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
McNetic/couchpotato-ger | refs/heads/master | library/git/commit.py | 11 | # Copyright (c) 2009, Rotem Yaari <vmalloc@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of organization nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Rotem Yaari ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Rotem Yaari BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .ref import Ref
from .files import ModifiedFile
SHA1_LENGTH = 40
class Commit(Ref):
def __init__(self, repo, sha):
sha = str(sha).lower()
if len(sha) < SHA1_LENGTH:
sha = repo._getCommitByPartialHash(sha).hash
super(Commit, self).__init__(repo, sha)
self.hash = sha
def __repr__(self):
return self.hash
def __eq__(self, other):
if not isinstance(other, Commit):
if isinstance(other, Ref):
other = other.getHead().hash
else:
other = other.hash
if other is None:
return False
if not isinstance(other, basestring):
raise TypeError("Comparing %s and %s" % (type(self), type(other)))
return (self.hash == other.lower())
def getParents(self):
output = self.repo._getOutputAssertSuccess("git rev-list %s --parents -1" % self)
return [Commit(self.repo, sha.strip()) for sha in output.split()[1:]]
def getChange(self):
returned = []
for line in self.repo._getOutputAssertSuccess("git show --pretty=format: --raw %s" % self).splitlines():
line = line.strip()
if not line:
continue
filename = line.split()[-1]
returned.append(ModifiedFile(filename))
return returned
getChangedFiles = getChange
############################ Misc. Commit attributes ###########################
def _getCommitField(self, field):
return self.repo._executeGitCommandAssertSuccess("git log -1 --pretty=format:%s %s" % (field, self)).stdout.read().strip()
def getAuthorName(self):
return self._getCommitField("%an")
def getAuthorEmail(self):
return self._getCommitField("%ae")
def getDate(self):
return int(self._getCommitField("%at"))
def getSubject(self):
return self._getCommitField("%s")
def getMessageBody(self):
return self._getCommitField("%b")
|
numerigraphe/odoomrp-wip | refs/heads/8.0 | mrp_operations_extension/wizard/mrp_product_produce.py | 2 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import fields, models
class MrpWorkOrderProduce(models.TransientModel):
_name = "mrp.work.order.produce"
def default_get(self, cr, uid, var_fields, context=None):
a = super(MrpWorkOrderProduce, self).default_get(
cr, uid, var_fields, context=context)
work = self.pool['mrp.production.workcenter.line'].browse(
cr, uid, context.get('active_ids'), context=context)[0]
a.update({'final_product': work.do_production})
return a
def _get_product_id(self):
""" To obtain product id
@return: id
"""
prod = False
if self.env.context.get("active_id"):
work_line = self.env['mrp.production.workcenter.line'].browse(
self.env.context.get("active_id"))
prod = work_line.production_id
return prod and prod.product_id or False
def _get_track(self):
prod = self._get_product_id()
return prod and prod.track_production or False
def do_produce(self, cr, uid, ids, context=None):
work_line = self.pool['mrp.production.workcenter.line'].browse(
cr, uid, context.get("active_id"), context=context)
production_id = work_line.production_id.id
assert production_id
data = self.browse(cr, uid, ids[0], context=context)
self.pool['mrp.production'].action_produce(
cr, uid, production_id, data.product_qty,
data.mode, data, context=context)
return {}
def do_consume(self, cr, uid, ids, context=None):
work_line = self.pool['mrp.production.workcenter.line'].browse(
cr, uid, context.get("active_id"), context=context)
production_id = work_line.production_id.id
assert production_id
data = self.browse(cr, uid, ids[0], context=context)
self.pool['mrp.production'].action_produce(
cr, uid, production_id, False, 'consume', data, context=context)
return {}
def do_consume_produce(self, cr, uid, ids, context=None):
work_line = self.pool['mrp.production.workcenter.line'].browse(
cr, uid, context.get("active_id"), context=context)
production_id = work_line.production_id.id
assert production_id
data = self.browse(cr, uid, ids[0], context=context)
self.pool['mrp.production'].action_produce(
cr, uid, production_id, data.product_qty, 'consume_produce', data,
context=context)
return {}
def on_change_qty(self, cr, uid, ids, product_qty, consume_lines,
context=None):
"""
When changing the quantity of products to be producedit will
recalculate the number of raw materials needed according to
the scheduled products and the already consumed/produced products
It will return the consume lines needed for the products
to be produced which the user can still adapt
"""
prod_obj = self.pool["mrp.production"]
work_line = self.pool['mrp.production.workcenter.line'].browse(
cr, uid, context.get("active_id"), context=context)
production = work_line.production_id
consume_lines = []
new_consume_lines = []
if product_qty > 0.0:
consume_lines = prod_obj._calculate_qty(
cr, uid, production, product_qty=product_qty, context=context)
line_ids = [i.product_id.id for i in work_line.product_line]
for consume in consume_lines:
if consume['product_id'] in line_ids:
new_consume_lines.append([0, False, consume])
return {'value': {'consume_lines': new_consume_lines}}
def _get_product_qty(self):
""" To obtain product quantity
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return: Quantity
"""
work_line = self.env['mrp.production.workcenter.line'].browse(
self.env.context.get("active_id"))
prod = work_line.production_id
done = 0.0
for move in prod.move_created_ids2:
if move.product_id == prod.product_id:
if not move.scrapped:
done += move.product_qty
return (prod.product_qty - done) or prod.product_qty
product_id = fields.Many2one('product.product',
string='Product', default=_get_product_id)
product_qty = fields.Float('Select Quantity',
digits=(12, 6), required=True,
default=_get_product_qty)
mode = fields.Selection([('consume_produce', 'Consume & Produce'),
('consume', 'Consume Only')],
string='Mode', required=True,
default='consume')
lot_id = fields.Many2one('stock.production.lot', 'Lot')
consume_lines = fields.One2many('mrp.product.produce.line',
'work_produce_id',
string='Products Consumed')
track_production = fields.Boolean('Track production', default=_get_track)
final_product = fields.Boolean(string='Final Product to Stock')
class MrpProductProduceLine(models.TransientModel):
_inherit = "mrp.product.produce.line"
work_produce_id = fields.Many2one('mrp.work.order.produce')
|
erkanay/django | refs/heads/master | tests/unmanaged_models/__init__.py | 12133432 | |
potray/SSBW_Django | refs/heads/master | practica/__init__.py | 12133432 | |
davidsminor/cortex | refs/heads/master | test/IECore/TypeIdTest.py | 12 | ##########################################################################
#
# Copyright (c) 2008, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
class TypeIdTest( unittest.TestCase ) :
def testNoDuplicates( self ) :
#boost.python.enum doesn't allow us to ensure that no duplicate values are in it. Here we do a simple verification to ensure
#that the value of each attribute IECore.TypeId.XXXXX is present in the "values" array. If it's not, the likely cause
#is a duplicate.
ids = {}
num = 0
for i in dir( IECore.TypeId ) :
v = getattr( IECore.TypeId, i )
if type( v ) is IECore.TypeId :
if v in ids :
raise RuntimeError ("TypeId for %s is a duplicate of %s" % ( i, ids[v] ) )
ids[v] = i
num = num + 1
self.assertEqual( num, len( IECore.TypeId.values ) )
if __name__ == "__main__":
unittest.main()
|
LifeDJIK/S.H.I.V.A. | refs/heads/master | containers/shiva/hazelcast/protocol/codec/lock_is_locked_by_current_thread_codec.py | 2 | from hazelcast.serialization.bits import *
from hazelcast.protocol.client_message import ClientMessage
from hazelcast.protocol.custom_codec import *
from hazelcast.util import ImmutableLazyDataList
from hazelcast.protocol.codec.lock_message_type import *
REQUEST_TYPE = LOCK_ISLOCKEDBYCURRENTTHREAD
RESPONSE_TYPE = 101
RETRYABLE = True
def calculate_size(name, thread_id):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += LONG_SIZE_IN_BYTES
return data_size
def encode_request(name, thread_id):
""" Encode request into client_message"""
client_message = ClientMessage(payload_size=calculate_size(name, thread_id))
client_message.set_message_type(REQUEST_TYPE)
client_message.set_retryable(RETRYABLE)
client_message.append_str(name)
client_message.append_long(thread_id)
client_message.update_frame_length()
return client_message
def decode_response(client_message, to_object=None):
""" Decode response from client message"""
parameters = dict(response=None)
parameters['response'] = client_message.read_bool()
return parameters
|
internap/fake-switches | refs/heads/master | tests/dell10g/test_enabled_with_commit_delay.py | 4 | # Copyright 2016 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import time
from hamcrest import greater_than, assert_that, less_than
from tests.dell10g import enable
from tests.util.global_reactor import COMMIT_DELAY
from tests.util.protocol_util import with_protocol, SshTester, ProtocolTest
class Dell10GEnabledWithCommitDelayTest(ProtocolTest):
tester_class = SshTester
test_switch = "commit-delayed-dell10g"
@with_protocol
def test_write_memory_with_commit_delay(self, t):
t.child.timeout = 10
enable(t)
t.write("copy running-config startup-config")
t.readln("")
t.readln("This operation may take a few minutes.")
t.readln("Management interfaces will not be available during this time.")
t.readln("")
t.read("Are you sure you want to save? (y/n) ")
t.write_raw("y")
start_time = time()
t.readln("")
t.readln("")
t.readln("Configuration Saved!")
end_time = time()
t.read("my_switch#")
assert_that((end_time - start_time), greater_than(COMMIT_DELAY))
@with_protocol
def test_write_memory_abort_does_not_delay(self, t):
t.child.timeout = 10
enable(t)
t.write("copy running-config startup-config")
t.readln("")
t.readln("This operation may take a few minutes.")
t.readln("Management interfaces will not be available during this time.")
t.readln("")
t.read("Are you sure you want to save? (y/n) ")
t.write_raw("n")
start_time = time()
t.readln("")
t.readln("")
t.readln("Configuration Not Saved!")
end_time = time()
t.read("my_switch#")
assert_that((end_time - start_time), less_than(COMMIT_DELAY))
|
Khan/git-bigfile | refs/heads/master | vendor/boto/swf/layer2.py | 130 | """Object-oriented interface to SWF wrapping boto.swf.layer1.Layer1"""
import time
from functools import wraps
from boto.swf.layer1 import Layer1
from boto.swf.layer1_decisions import Layer1Decisions
DEFAULT_CREDENTIALS = {
'aws_access_key_id': None,
'aws_secret_access_key': None
}
def set_default_credentials(aws_access_key_id, aws_secret_access_key):
"""Set default credentials."""
DEFAULT_CREDENTIALS.update({
'aws_access_key_id': aws_access_key_id,
'aws_secret_access_key': aws_secret_access_key,
})
class SWFBase(object):
name = None
domain = None
aws_access_key_id = None
aws_secret_access_key = None
region = None
def __init__(self, **kwargs):
# Set default credentials.
for credkey in ('aws_access_key_id', 'aws_secret_access_key'):
if DEFAULT_CREDENTIALS.get(credkey):
setattr(self, credkey, DEFAULT_CREDENTIALS[credkey])
# Override attributes with keyword args.
for kwarg in kwargs:
setattr(self, kwarg, kwargs[kwarg])
self._swf = Layer1(self.aws_access_key_id,
self.aws_secret_access_key,
region=self.region)
def __repr__(self):
rep_str = str(self.name)
if hasattr(self, 'version'):
rep_str += '-' + str(getattr(self, 'version'))
return '<%s %r at 0x%x>' % (self.__class__.__name__, rep_str, id(self))
class Domain(SWFBase):
"""Simple Workflow Domain."""
description = None
retention = 30
@wraps(Layer1.describe_domain)
def describe(self):
"""DescribeDomain."""
return self._swf.describe_domain(self.name)
@wraps(Layer1.deprecate_domain)
def deprecate(self):
"""DeprecateDomain"""
self._swf.deprecate_domain(self.name)
@wraps(Layer1.register_domain)
def register(self):
"""RegisterDomain."""
self._swf.register_domain(self.name, str(self.retention),
self.description)
@wraps(Layer1.list_activity_types)
def activities(self, status='REGISTERED', **kwargs):
"""ListActivityTypes."""
act_types = self._swf.list_activity_types(self.name, status, **kwargs)
act_objects = []
for act_args in act_types['typeInfos']:
act_ident = act_args['activityType']
del act_args['activityType']
act_args.update(act_ident)
act_args.update({
'aws_access_key_id': self.aws_access_key_id,
'aws_secret_access_key': self.aws_secret_access_key,
'domain': self.name,
'region': self.region,
})
act_objects.append(ActivityType(**act_args))
return act_objects
@wraps(Layer1.list_workflow_types)
def workflows(self, status='REGISTERED', **kwargs):
"""ListWorkflowTypes."""
wf_types = self._swf.list_workflow_types(self.name, status, **kwargs)
wf_objects = []
for wf_args in wf_types['typeInfos']:
wf_ident = wf_args['workflowType']
del wf_args['workflowType']
wf_args.update(wf_ident)
wf_args.update({
'aws_access_key_id': self.aws_access_key_id,
'aws_secret_access_key': self.aws_secret_access_key,
'domain': self.name,
'region': self.region,
})
wf_objects.append(WorkflowType(**wf_args))
return wf_objects
def executions(self, closed=False, **kwargs):
"""List list open/closed executions.
For a full list of available parameters refer to
:py:func:`boto.swf.layer1.Layer1.list_closed_workflow_executions` and
:py:func:`boto.swf.layer1.Layer1.list_open_workflow_executions`
"""
if closed:
executions = self._swf.list_closed_workflow_executions(self.name,
**kwargs)
else:
if 'oldest_date' not in kwargs:
# Last 24 hours.
kwargs['oldest_date'] = time.time() - (3600 * 24)
executions = self._swf.list_open_workflow_executions(self.name,
**kwargs)
exe_objects = []
for exe_args in executions['executionInfos']:
for nested_key in ('execution', 'workflowType'):
nested_dict = exe_args[nested_key]
del exe_args[nested_key]
exe_args.update(nested_dict)
exe_args.update({
'aws_access_key_id': self.aws_access_key_id,
'aws_secret_access_key': self.aws_secret_access_key,
'domain': self.name,
'region': self.region,
})
exe_objects.append(WorkflowExecution(**exe_args))
return exe_objects
@wraps(Layer1.count_pending_activity_tasks)
def count_pending_activity_tasks(self, task_list):
"""CountPendingActivityTasks."""
return self._swf.count_pending_activity_tasks(self.name, task_list)
@wraps(Layer1.count_pending_decision_tasks)
def count_pending_decision_tasks(self, task_list):
"""CountPendingDecisionTasks."""
return self._swf.count_pending_decision_tasks(self.name, task_list)
class Actor(SWFBase):
task_list = None
last_tasktoken = None
domain = None
def run(self):
"""To be overloaded by subclasses."""
raise NotImplementedError()
class ActivityWorker(Actor):
"""Base class for SimpleWorkflow activity workers."""
@wraps(Layer1.respond_activity_task_canceled)
def cancel(self, task_token=None, details=None):
"""RespondActivityTaskCanceled."""
if task_token is None:
task_token = self.last_tasktoken
return self._swf.respond_activity_task_canceled(task_token, details)
@wraps(Layer1.respond_activity_task_completed)
def complete(self, task_token=None, result=None):
"""RespondActivityTaskCompleted."""
if task_token is None:
task_token = self.last_tasktoken
return self._swf.respond_activity_task_completed(task_token, result)
@wraps(Layer1.respond_activity_task_failed)
def fail(self, task_token=None, details=None, reason=None):
"""RespondActivityTaskFailed."""
if task_token is None:
task_token = self.last_tasktoken
return self._swf.respond_activity_task_failed(task_token, details,
reason)
@wraps(Layer1.record_activity_task_heartbeat)
def heartbeat(self, task_token=None, details=None):
"""RecordActivityTaskHeartbeat."""
if task_token is None:
task_token = self.last_tasktoken
return self._swf.record_activity_task_heartbeat(task_token, details)
@wraps(Layer1.poll_for_activity_task)
def poll(self, **kwargs):
"""PollForActivityTask."""
task_list = self.task_list
if 'task_list' in kwargs:
task_list = kwargs.get('task_list')
del kwargs['task_list']
task = self._swf.poll_for_activity_task(self.domain, task_list,
**kwargs)
self.last_tasktoken = task.get('taskToken')
return task
class Decider(Actor):
"""Base class for SimpleWorkflow deciders."""
@wraps(Layer1.respond_decision_task_completed)
def complete(self, task_token=None, decisions=None, **kwargs):
"""RespondDecisionTaskCompleted."""
if isinstance(decisions, Layer1Decisions):
# Extract decision list from a Layer1Decisions instance.
decisions = decisions._data
if task_token is None:
task_token = self.last_tasktoken
return self._swf.respond_decision_task_completed(task_token, decisions,
**kwargs)
@wraps(Layer1.poll_for_decision_task)
def poll(self, **kwargs):
"""PollForDecisionTask."""
task_list = self.task_list
if 'task_list' in kwargs:
task_list = kwargs.get('task_list')
del kwargs['task_list']
decision_task = self._swf.poll_for_decision_task(self.domain, task_list,
**kwargs)
self.last_tasktoken = decision_task.get('taskToken')
return decision_task
class WorkflowType(SWFBase):
"""A versioned workflow type."""
version = None
task_list = None
child_policy = 'TERMINATE'
@wraps(Layer1.describe_workflow_type)
def describe(self):
"""DescribeWorkflowType."""
return self._swf.describe_workflow_type(self.domain, self.name,
self.version)
@wraps(Layer1.register_workflow_type)
def register(self, **kwargs):
"""RegisterWorkflowType."""
args = {
'default_execution_start_to_close_timeout': '3600',
'default_task_start_to_close_timeout': '300',
'default_child_policy': 'TERMINATE',
}
args.update(kwargs)
self._swf.register_workflow_type(self.domain, self.name, self.version,
**args)
@wraps(Layer1.deprecate_workflow_type)
def deprecate(self):
"""DeprecateWorkflowType."""
self._swf.deprecate_workflow_type(self.domain, self.name, self.version)
@wraps(Layer1.start_workflow_execution)
def start(self, **kwargs):
"""StartWorkflowExecution."""
if 'workflow_id' in kwargs:
workflow_id = kwargs['workflow_id']
del kwargs['workflow_id']
else:
workflow_id = '%s-%s-%i' % (self.name, self.version, time.time())
for def_attr in ('task_list', 'child_policy'):
kwargs[def_attr] = kwargs.get(def_attr, getattr(self, def_attr))
run_id = self._swf.start_workflow_execution(self.domain, workflow_id,
self.name, self.version, **kwargs)['runId']
return WorkflowExecution(name=self.name, version=self.version,
runId=run_id, domain=self.domain, workflowId=workflow_id,
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key)
class WorkflowExecution(SWFBase):
"""An instance of a workflow."""
workflowId = None
runId = None
@wraps(Layer1.signal_workflow_execution)
def signal(self, signame, **kwargs):
"""SignalWorkflowExecution."""
self._swf.signal_workflow_execution(self.domain, signame,
self.workflowId, **kwargs)
@wraps(Layer1.terminate_workflow_execution)
def terminate(self, **kwargs):
"""TerminateWorkflowExecution (p. 103)."""
return self._swf.terminate_workflow_execution(self.domain,
self.workflowId, **kwargs)
@wraps(Layer1.get_workflow_execution_history)
def history(self, **kwargs):
"""GetWorkflowExecutionHistory."""
return self._swf.get_workflow_execution_history(self.domain, self.runId,
self.workflowId, **kwargs)['events']
@wraps(Layer1.describe_workflow_execution)
def describe(self):
"""DescribeWorkflowExecution."""
return self._swf.describe_workflow_execution(self.domain, self.runId,
self.workflowId)
@wraps(Layer1.request_cancel_workflow_execution)
def request_cancel(self):
"""RequestCancelWorkflowExecution."""
return self._swf.request_cancel_workflow_execution(self.domain,
self.workflowId, self.runId)
class ActivityType(SWFBase):
"""A versioned activity type."""
version = None
@wraps(Layer1.deprecate_activity_type)
def deprecate(self):
"""DeprecateActivityType."""
return self._swf.deprecate_activity_type(self.domain, self.name,
self.version)
@wraps(Layer1.describe_activity_type)
def describe(self):
"""DescribeActivityType."""
return self._swf.describe_activity_type(self.domain, self.name,
self.version)
@wraps(Layer1.register_activity_type)
def register(self, **kwargs):
"""RegisterActivityType."""
args = {
'default_task_heartbeat_timeout': '600',
'default_task_schedule_to_close_timeout': '3900',
'default_task_schedule_to_start_timeout': '300',
'default_task_start_to_close_timeout': '3600',
}
args.update(kwargs)
self._swf.register_activity_type(self.domain, self.name, self.version,
**args)
|
rbalda/neural_ocr | refs/heads/master | env/lib/python2.7/site-packages/numpy/lib/index_tricks.py | 57 | from __future__ import division, absolute_import, print_function
import sys
import math
import numpy.core.numeric as _nx
from numpy.core.numeric import (
asarray, ScalarType, array, alltrue, cumprod, arange
)
from numpy.core.numerictypes import find_common_type, issubdtype
from . import function_base
import numpy.matrixlib as matrix
from .function_base import diff
from numpy.core.multiarray import ravel_multi_index, unravel_index
from numpy.lib.stride_tricks import as_strided
makemat = matrix.matrix
__all__ = [
'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_',
's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal',
'diag_indices', 'diag_indices_from'
]
def ix_(*args):
"""
Construct an open mesh from multiple sequences.
This function takes N 1-D sequences and returns N outputs with N
dimensions each, such that the shape is 1 in all but one dimension
and the dimension with the non-unit shape value cycles through all
N dimensions.
Using `ix_` one can quickly construct index arrays that will index
the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array
``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``.
Parameters
----------
args : 1-D sequences
Returns
-------
out : tuple of ndarrays
N arrays with N dimensions each, with N the number of input
sequences. Together these arrays form an open mesh.
See Also
--------
ogrid, mgrid, meshgrid
Examples
--------
>>> a = np.arange(10).reshape(2, 5)
>>> a
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> ixgrid = np.ix_([0,1], [2,4])
>>> ixgrid
(array([[0],
[1]]), array([[2, 4]]))
>>> ixgrid[0].shape, ixgrid[1].shape
((2, 1), (1, 2))
>>> a[ixgrid]
array([[2, 4],
[7, 9]])
"""
out = []
nd = len(args)
for k, new in enumerate(args):
new = asarray(new)
if new.ndim != 1:
raise ValueError("Cross index must be 1 dimensional")
if new.size == 0:
# Explicitly type empty arrays to avoid float default
new = new.astype(_nx.intp)
if issubdtype(new.dtype, _nx.bool_):
new, = new.nonzero()
new = new.reshape((1,)*k + (new.size,) + (1,)*(nd-k-1))
out.append(new)
return tuple(out)
class nd_grid(object):
"""
Construct a multi-dimensional "meshgrid".
``grid = nd_grid()`` creates an instance which will return a mesh-grid
when indexed. The dimension and number of the output arrays are equal
to the number of indexing dimensions. If the step length is not a
complex number, then the stop is not inclusive.
However, if the step length is a **complex number** (e.g. 5j), then the
integer part of its magnitude is interpreted as specifying the
number of points to create between the start and stop values, where
the stop value **is inclusive**.
If instantiated with an argument of ``sparse=True``, the mesh-grid is
open (or not fleshed out) so that only one-dimension of each returned
argument is greater than 1.
Parameters
----------
sparse : bool, optional
Whether the grid is sparse or not. Default is False.
Notes
-----
Two instances of `nd_grid` are made available in the NumPy namespace,
`mgrid` and `ogrid`::
mgrid = nd_grid(sparse=False)
ogrid = nd_grid(sparse=True)
Users should use these pre-defined instances instead of using `nd_grid`
directly.
Examples
--------
>>> mgrid = np.lib.index_tricks.nd_grid()
>>> mgrid[0:5,0:5]
array([[[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3],
[4, 4, 4, 4, 4]],
[[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]]])
>>> mgrid[-1:1:5j]
array([-1. , -0.5, 0. , 0.5, 1. ])
>>> ogrid = np.lib.index_tricks.nd_grid(sparse=True)
>>> ogrid[0:5,0:5]
[array([[0],
[1],
[2],
[3],
[4]]), array([[0, 1, 2, 3, 4]])]
"""
def __init__(self, sparse=False):
self.sparse = sparse
def __getitem__(self, key):
try:
size = []
typ = int
for k in range(len(key)):
step = key[k].step
start = key[k].start
if start is None:
start = 0
if step is None:
step = 1
if isinstance(step, complex):
size.append(int(abs(step)))
typ = float
else:
size.append(
int(math.ceil((key[k].stop - start)/(step*1.0))))
if (isinstance(step, float) or
isinstance(start, float) or
isinstance(key[k].stop, float)):
typ = float
if self.sparse:
nn = [_nx.arange(_x, dtype=_t)
for _x, _t in zip(size, (typ,)*len(size))]
else:
nn = _nx.indices(size, typ)
for k in range(len(size)):
step = key[k].step
start = key[k].start
if start is None:
start = 0
if step is None:
step = 1
if isinstance(step, complex):
step = int(abs(step))
if step != 1:
step = (key[k].stop - start)/float(step-1)
nn[k] = (nn[k]*step+start)
if self.sparse:
slobj = [_nx.newaxis]*len(size)
for k in range(len(size)):
slobj[k] = slice(None, None)
nn[k] = nn[k][slobj]
slobj[k] = _nx.newaxis
return nn
except (IndexError, TypeError):
step = key.step
stop = key.stop
start = key.start
if start is None:
start = 0
if isinstance(step, complex):
step = abs(step)
length = int(step)
if step != 1:
step = (key.stop-start)/float(step-1)
stop = key.stop + step
return _nx.arange(0, length, 1, float)*step + start
else:
return _nx.arange(start, stop, step)
def __getslice__(self, i, j):
return _nx.arange(i, j)
def __len__(self):
return 0
mgrid = nd_grid(sparse=False)
ogrid = nd_grid(sparse=True)
mgrid.__doc__ = None # set in numpy.add_newdocs
ogrid.__doc__ = None # set in numpy.add_newdocs
class AxisConcatenator(object):
"""
Translates slice objects to concatenation along an axis.
For detailed documentation on usage, see `r_`.
"""
def _retval(self, res):
if self.matrix:
oldndim = res.ndim
res = makemat(res)
if oldndim == 1 and self.col:
res = res.T
self.axis = self._axis
self.matrix = self._matrix
self.col = 0
return res
def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1):
self._axis = axis
self._matrix = matrix
self.axis = axis
self.matrix = matrix
self.col = 0
self.trans1d = trans1d
self.ndmin = ndmin
def __getitem__(self, key):
trans1d = self.trans1d
ndmin = self.ndmin
if isinstance(key, str):
frame = sys._getframe().f_back
mymat = matrix.bmat(key, frame.f_globals, frame.f_locals)
return mymat
if not isinstance(key, tuple):
key = (key,)
objs = []
scalars = []
arraytypes = []
scalartypes = []
for k in range(len(key)):
scalar = False
if isinstance(key[k], slice):
step = key[k].step
start = key[k].start
stop = key[k].stop
if start is None:
start = 0
if step is None:
step = 1
if isinstance(step, complex):
size = int(abs(step))
newobj = function_base.linspace(start, stop, num=size)
else:
newobj = _nx.arange(start, stop, step)
if ndmin > 1:
newobj = array(newobj, copy=False, ndmin=ndmin)
if trans1d != -1:
newobj = newobj.swapaxes(-1, trans1d)
elif isinstance(key[k], str):
if k != 0:
raise ValueError("special directives must be the "
"first entry.")
key0 = key[0]
if key0 in 'rc':
self.matrix = True
self.col = (key0 == 'c')
continue
if ',' in key0:
vec = key0.split(',')
try:
self.axis, ndmin = \
[int(x) for x in vec[:2]]
if len(vec) == 3:
trans1d = int(vec[2])
continue
except:
raise ValueError("unknown special directive")
try:
self.axis = int(key[k])
continue
except (ValueError, TypeError):
raise ValueError("unknown special directive")
elif type(key[k]) in ScalarType:
newobj = array(key[k], ndmin=ndmin)
scalars.append(k)
scalar = True
scalartypes.append(newobj.dtype)
else:
newobj = key[k]
if ndmin > 1:
tempobj = array(newobj, copy=False, subok=True)
newobj = array(newobj, copy=False, subok=True,
ndmin=ndmin)
if trans1d != -1 and tempobj.ndim < ndmin:
k2 = ndmin-tempobj.ndim
if (trans1d < 0):
trans1d += k2 + 1
defaxes = list(range(ndmin))
k1 = trans1d
axes = defaxes[:k1] + defaxes[k2:] + \
defaxes[k1:k2]
newobj = newobj.transpose(axes)
del tempobj
objs.append(newobj)
if not scalar and isinstance(newobj, _nx.ndarray):
arraytypes.append(newobj.dtype)
# Esure that scalars won't up-cast unless warranted
final_dtype = find_common_type(arraytypes, scalartypes)
if final_dtype is not None:
for k in scalars:
objs[k] = objs[k].astype(final_dtype)
res = _nx.concatenate(tuple(objs), axis=self.axis)
return self._retval(res)
def __getslice__(self, i, j):
res = _nx.arange(i, j)
return self._retval(res)
def __len__(self):
return 0
# separate classes are used here instead of just making r_ = concatentor(0),
# etc. because otherwise we couldn't get the doc string to come out right
# in help(r_)
class RClass(AxisConcatenator):
"""
Translates slice objects to concatenation along the first axis.
This is a simple way to build up arrays quickly. There are two use cases.
1. If the index expression contains comma separated arrays, then stack
them along their first axis.
2. If the index expression contains slice notation or scalars then create
a 1-D array with a range indicated by the slice notation.
If slice notation is used, the syntax ``start:stop:step`` is equivalent
to ``np.arange(start, stop, step)`` inside of the brackets. However, if
``step`` is an imaginary number (i.e. 100j) then its integer portion is
interpreted as a number-of-points desired and the start and stop are
inclusive. In other words ``start:stop:stepj`` is interpreted as
``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets.
After expansion of slice notation, all comma separated sequences are
concatenated together.
Optional character strings placed as the first element of the index
expression can be used to change the output. The strings 'r' or 'c' result
in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row)
matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1
(column) matrix is produced. If the result is 2-D then both provide the
same matrix result.
A string integer specifies which axis to stack multiple comma separated
arrays along. A string of two comma-separated integers allows indication
of the minimum number of dimensions to force each entry into as the
second integer (the axis to concatenate along is still the first integer).
A string with three comma-separated integers allows specification of the
axis to concatenate along, the minimum number of dimensions to force the
entries to, and which axis should contain the start of the arrays which
are less than the specified number of dimensions. In other words the third
integer allows you to specify where the 1's should be placed in the shape
of the arrays that have their shapes upgraded. By default, they are placed
in the front of the shape tuple. The third argument allows you to specify
where the start of the array should be instead. Thus, a third argument of
'0' would place the 1's at the end of the array shape. Negative integers
specify where in the new shape tuple the last dimension of upgraded arrays
should be placed, so the default is '-1'.
Parameters
----------
Not a function, so takes no parameters
Returns
-------
A concatenated ndarray or matrix.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
c_ : Translates slice objects to concatenation along the second axis.
Examples
--------
>>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])]
array([1, 2, 3, 0, 0, 4, 5, 6])
>>> np.r_[-1:1:6j, [0]*3, 5, 6]
array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ])
String integers specify the axis to concatenate along or the minimum
number of dimensions to force entries into.
>>> a = np.array([[0, 1, 2], [3, 4, 5]])
>>> np.r_['-1', a, a] # concatenate along last axis
array([[0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5]])
>>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2
array([[1, 2, 3],
[4, 5, 6]])
>>> np.r_['0,2,0', [1,2,3], [4,5,6]]
array([[1],
[2],
[3],
[4],
[5],
[6]])
>>> np.r_['1,2,0', [1,2,3], [4,5,6]]
array([[1, 4],
[2, 5],
[3, 6]])
Using 'r' or 'c' as a first string argument creates a matrix.
>>> np.r_['r',[1,2,3], [4,5,6]]
matrix([[1, 2, 3, 4, 5, 6]])
"""
def __init__(self):
AxisConcatenator.__init__(self, 0)
r_ = RClass()
class CClass(AxisConcatenator):
"""
Translates slice objects to concatenation along the second axis.
This is short-hand for ``np.r_['-1,2,0', index expression]``, which is
useful because of its common occurrence. In particular, arrays will be
stacked along their last axis after being upgraded to at least 2-D with
1's post-pended to the shape (column vectors made out of 1-D arrays).
For detailed documentation, see `r_`.
Examples
--------
>>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])]
array([[1, 2, 3, 0, 0, 4, 5, 6]])
"""
def __init__(self):
AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0)
c_ = CClass()
class ndenumerate(object):
"""
Multidimensional index iterator.
Return an iterator yielding pairs of array coordinates and values.
Parameters
----------
arr : ndarray
Input array.
See Also
--------
ndindex, flatiter
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> for index, x in np.ndenumerate(a):
... print index, x
(0, 0) 1
(0, 1) 2
(1, 0) 3
(1, 1) 4
"""
def __init__(self, arr):
self.iter = asarray(arr).flat
def __next__(self):
"""
Standard iterator method, returns the index tuple and array value.
Returns
-------
coords : tuple of ints
The indices of the current iteration.
val : scalar
The array element of the current iteration.
"""
return self.iter.coords, next(self.iter)
def __iter__(self):
return self
next = __next__
class ndindex(object):
"""
An N-dimensional iterator object to index arrays.
Given the shape of an array, an `ndindex` instance iterates over
the N-dimensional index of the array. At each iteration a tuple
of indices is returned, the last dimension is iterated over first.
Parameters
----------
`*args` : ints
The size of each dimension of the array.
See Also
--------
ndenumerate, flatiter
Examples
--------
>>> for index in np.ndindex(3, 2, 1):
... print index
(0, 0, 0)
(0, 1, 0)
(1, 0, 0)
(1, 1, 0)
(2, 0, 0)
(2, 1, 0)
"""
def __init__(self, *shape):
if len(shape) == 1 and isinstance(shape[0], tuple):
shape = shape[0]
x = as_strided(_nx.zeros(1), shape=shape,
strides=_nx.zeros_like(shape))
self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'],
order='C')
def __iter__(self):
return self
def ndincr(self):
"""
Increment the multi-dimensional index by one.
This method is for backward compatibility only: do not use.
"""
next(self)
def __next__(self):
"""
Standard iterator method, updates the index and returns the index
tuple.
Returns
-------
val : tuple of ints
Returns a tuple containing the indices of the current
iteration.
"""
next(self._it)
return self._it.multi_index
next = __next__
# You can do all this with slice() plus a few special objects,
# but there's a lot to remember. This version is simpler because
# it uses the standard array indexing syntax.
#
# Written by Konrad Hinsen <hinsen@cnrs-orleans.fr>
# last revision: 1999-7-23
#
# Cosmetic changes by T. Oliphant 2001
#
#
class IndexExpression(object):
"""
A nicer way to build up index tuples for arrays.
.. note::
Use one of the two predefined instances `index_exp` or `s_`
rather than directly using `IndexExpression`.
For any index combination, including slicing and axis insertion,
``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any
array `a`. However, ``np.index_exp[indices]`` can be used anywhere
in Python code and returns a tuple of slice objects that can be
used in the construction of complex index expressions.
Parameters
----------
maketuple : bool
If True, always returns a tuple.
See Also
--------
index_exp : Predefined instance that always returns a tuple:
`index_exp = IndexExpression(maketuple=True)`.
s_ : Predefined instance without tuple conversion:
`s_ = IndexExpression(maketuple=False)`.
Notes
-----
You can do all this with `slice()` plus a few special objects,
but there's a lot to remember and this version is simpler because
it uses the standard array indexing syntax.
Examples
--------
>>> np.s_[2::2]
slice(2, None, 2)
>>> np.index_exp[2::2]
(slice(2, None, 2),)
>>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]]
array([2, 4])
"""
def __init__(self, maketuple):
self.maketuple = maketuple
def __getitem__(self, item):
if self.maketuple and not isinstance(item, tuple):
return (item,)
else:
return item
index_exp = IndexExpression(maketuple=True)
s_ = IndexExpression(maketuple=False)
# End contribution from Konrad.
# The following functions complement those in twodim_base, but are
# applicable to N-dimensions.
def fill_diagonal(a, val, wrap=False):
"""Fill the main diagonal of the given array of any dimensionality.
For an array `a` with ``a.ndim > 2``, the diagonal is the list of
locations with indices ``a[i, i, ..., i]`` all identical. This function
modifies the input array in-place, it does not return a value.
Parameters
----------
a : array, at least 2-D.
Array whose diagonal is to be filled, it gets modified in-place.
val : scalar
Value to be written on the diagonal, its type must be compatible with
that of the array a.
wrap : bool
For tall matrices in NumPy version up to 1.6.2, the
diagonal "wrapped" after N columns. You can have this behavior
with this option. This affect only tall matrices.
See also
--------
diag_indices, diag_indices_from
Notes
-----
.. versionadded:: 1.4.0
This functionality can be obtained via `diag_indices`, but internally
this version uses a much faster implementation that never constructs the
indices and uses simple slicing.
Examples
--------
>>> a = np.zeros((3, 3), int)
>>> np.fill_diagonal(a, 5)
>>> a
array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5]])
The same function can operate on a 4-D array:
>>> a = np.zeros((3, 3, 3, 3), int)
>>> np.fill_diagonal(a, 4)
We only show a few blocks for clarity:
>>> a[0, 0]
array([[4, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> a[1, 1]
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 0]])
>>> a[2, 2]
array([[0, 0, 0],
[0, 0, 0],
[0, 0, 4]])
The wrap option affects only tall matrices:
>>> # tall matrices no wrap
>>> a = np.zeros((5, 3),int)
>>> fill_diagonal(a, 4)
>>> a
array([[4, 0, 0],
[0, 4, 0],
[0, 0, 4],
[0, 0, 0],
[0, 0, 0]])
>>> # tall matrices wrap
>>> a = np.zeros((5, 3),int)
>>> fill_diagonal(a, 4, wrap=True)
>>> a
array([[4, 0, 0],
[0, 4, 0],
[0, 0, 4],
[0, 0, 0],
[4, 0, 0]])
>>> # wide matrices
>>> a = np.zeros((3, 5),int)
>>> fill_diagonal(a, 4, wrap=True)
>>> a
array([[4, 0, 0, 0, 0],
[0, 4, 0, 0, 0],
[0, 0, 4, 0, 0]])
"""
if a.ndim < 2:
raise ValueError("array must be at least 2-d")
end = None
if a.ndim == 2:
# Explicit, fast formula for the common case. For 2-d arrays, we
# accept rectangular ones.
step = a.shape[1] + 1
#This is needed to don't have tall matrix have the diagonal wrap.
if not wrap:
end = a.shape[1] * a.shape[1]
else:
# For more than d=2, the strided formula is only valid for arrays with
# all dimensions equal, so we check first.
if not alltrue(diff(a.shape) == 0):
raise ValueError("All dimensions of input must be of equal length")
step = 1 + (cumprod(a.shape[:-1])).sum()
# Write the value out into the diagonal.
a.flat[:end:step] = val
def diag_indices(n, ndim=2):
"""
Return the indices to access the main diagonal of an array.
This returns a tuple of indices that can be used to access the main
diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape
(n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for
``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]``
for ``i = [0..n-1]``.
Parameters
----------
n : int
The size, along each dimension, of the arrays for which the returned
indices can be used.
ndim : int, optional
The number of dimensions.
See also
--------
diag_indices_from
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Create a set of indices to access the diagonal of a (4, 4) array:
>>> di = np.diag_indices(4)
>>> di
(array([0, 1, 2, 3]), array([0, 1, 2, 3]))
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> a[di] = 100
>>> a
array([[100, 1, 2, 3],
[ 4, 100, 6, 7],
[ 8, 9, 100, 11],
[ 12, 13, 14, 100]])
Now, we create indices to manipulate a 3-D array:
>>> d3 = np.diag_indices(2, 3)
>>> d3
(array([0, 1]), array([0, 1]), array([0, 1]))
And use it to set the diagonal of an array of zeros to 1:
>>> a = np.zeros((2, 2, 2), dtype=np.int)
>>> a[d3] = 1
>>> a
array([[[1, 0],
[0, 0]],
[[0, 0],
[0, 1]]])
"""
idx = arange(n)
return (idx,) * ndim
def diag_indices_from(arr):
"""
Return the indices to access the main diagonal of an n-dimensional array.
See `diag_indices` for full details.
Parameters
----------
arr : array, at least 2-D
See Also
--------
diag_indices
Notes
-----
.. versionadded:: 1.4.0
"""
if not arr.ndim >= 2:
raise ValueError("input array must be at least 2-d")
# For more than d=2, the strided formula is only valid for arrays with
# all dimensions equal, so we check first.
if not alltrue(diff(arr.shape) == 0):
raise ValueError("All dimensions of input must be of equal length")
return diag_indices(arr.shape[0], arr.ndim)
|
xyq946692052/microblog | refs/heads/master | db_repository/versions/002_migration.py | 134 | from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
post = Table('post', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('body', String(length=140)),
Column('timestamp', DateTime),
Column('user_id', Integer),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['post'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['post'].drop()
|
bartosh/zipline | refs/heads/master | zipline/utils/simfactory.py | 6 | import zipline.utils.factory as factory
from zipline.testing.core import create_data_portal_from_trade_history
from zipline.test_algorithms import TestAlgorithm
from zipline.utils.calendars import get_calendar
def create_test_zipline(**config):
"""
:param config: A configuration object that is a dict with:
- sid - an integer, which will be used as the asset ID.
- order_count - the number of orders the test algo will place,
defaults to 100
- order_amount - the number of shares per order, defaults to 100
- trade_count - the number of trades to simulate, defaults to 101
to ensure all orders are processed.
- algorithm - optional parameter providing an algorithm. defaults
to :py:class:`zipline.test.algorithms.TestAlgorithm`
- trade_source - optional parameter to specify trades, if present.
If not present :py:class:`zipline.sources.SpecificEquityTrades`
is the source, with daily frequency in trades.
- slippage: optional parameter that configures the
:py:class:`zipline.gens.tradingsimulation.TransactionSimulator`.
Expects an object with a simulate mehod, such as
:py:class:`zipline.gens.tradingsimulation.FixedSlippage`.
:py:mod:`zipline.finance.trading`
"""
assert isinstance(config, dict)
try:
sid_list = config['sid_list']
except KeyError:
try:
sid_list = [config['sid']]
except KeyError:
raise Exception("simfactory create_test_zipline() requires "
"argument 'sid_list' or 'sid'")
concurrent_trades = config.get('concurrent_trades', False)
order_count = config.get('order_count', 100)
order_amount = config.get('order_amount', 100)
trading_calendar = config.get('trading_calendar', get_calendar("NYSE"))
# -------------------
# Create the Algo
# -------------------
if 'algorithm' in config:
test_algo = config['algorithm']
else:
test_algo = TestAlgorithm(
sid_list[0],
order_amount,
order_count,
sim_params=config.get('sim_params',
factory.create_simulation_parameters()),
trading_calendar=trading_calendar,
slippage=config.get('slippage'),
identifiers=sid_list
)
# -------------------
# Trade Source
# -------------------
if 'skip_data' not in config:
if 'trade_source' in config:
trade_source = config['trade_source']
else:
trade_source = factory.create_daily_trade_source(
sid_list,
test_algo.sim_params,
test_algo.trading_environment,
trading_calendar,
concurrent=concurrent_trades,
)
trades_by_sid = {}
for trade in trade_source:
if trade.sid not in trades_by_sid:
trades_by_sid[trade.sid] = []
trades_by_sid[trade.sid].append(trade)
data_portal = create_data_portal_from_trade_history(
config['env'].asset_finder,
trading_calendar,
config['tempdir'],
config['sim_params'],
trades_by_sid
)
test_algo.data_portal = data_portal
# -------------------
# Benchmark source
# -------------------
test_algo.benchmark_return_source = config.get('benchmark_source', None)
# ------------------
# generator/simulator
sim = test_algo.get_generator()
return sim
|
sujaymansingh/random_cricket_profiles | refs/heads/develop | random_cricket_profiles/api.py | 1 | """A flask API for generating (and retrieving) player details!
Usage:
api.py run [--no-debug] [--port=<p>] [--host=<h>]
api.py (-h | --help)
Options:
-h --help Show this screen
--no-debug Don't add debug=True when running
--port=<p> The port to use [default: 5000]
--host=<h> The host (address to listen on) [default: 0.0.0.0]
"""
import logging
import random
import docopt
import flask
from random_cricket_profiles import countries, player_generator, players, prettyint
logging.basicConfig(level=logging.INFO)
app = flask.Flask(__name__)
app.config.from_object("random_cricket_profiles.settings.Config")
generator = None
def get_player_generator():
global generator
filename = app.config["DATA_FILENAME"]
min_profile_length = app.config["MIN_PROFILE_LENGTH"]
if generator is None:
sample_players = players.load_players(filename)
generator = player_generator.PlayerGenerator(sample_players, min_profile_length)
return generator
@app.route("/p/<country_code>/<seed_string>")
def player(country_code, seed_string):
seed = prettyint.decode(seed_string)
pg = get_player_generator()
player, _seed = pg.generate(country_code=country_code, seed=seed)
response = {
"country": country_code,
"firstnames": player.firstnames,
"surname": player.surname,
"profile": player.profile
}
return flask.jsonify(response)
@app.route("/p/random")
def random_player():
country = random.choice(countries.COUNTRIES)
country_code = country.country_code
seed = random.getrandbits(64)
seed_string = prettyint.encode(seed)
url = flask.url_for("player", country_code=country_code, seed_string=seed_string)
return flask.redirect(url)
if __name__ == "__main__":
args = docopt.docopt(__doc__)
if args.get("run"):
port = int(args["--port"])
host = args["--host"]
debug = not bool(args.get("--no-debug"))
app.run(host=host, port=port, debug=debug)
|
Samnsparky/py_common_subseq | refs/heads/master | py_common_subseq/__init__.py | 1 | """Micro-library implementating logic to find all common subsequences.
Implementation of the calACS-DP (all common subsequences - dynamic programming)
algorithm as presented in Hui Wang's "All common subsequences."
IJCAI 2007:635-640 (ACM). This Micro-library also introduces a variation which
can produce a list all of those common subsequences. Additionally, both the
classic implementation as well as the variation offer some space optimizations
similar to those found in longest common subsequence imeplementations.
@author: A. Samuel Pottinger (http://gleap.org)
@license: MIT
"""
from py_common_subseq import *
|
spektom/incubator-airflow | refs/heads/master | airflow/providers/apache/pinot/hooks/pinot.py | 4 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import subprocess
from pinotdb import connect
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.dbapi_hook import DbApiHook
class PinotAdminHook(BaseHook):
"""
This hook is a wrapper around the pinot-admin.sh script.
For now, only small subset of its subcommands are implemented,
which are required to ingest offline data into Apache Pinot
(i.e., AddSchema, AddTable, CreateSegment, and UploadSegment).
Their command options are based on Pinot v0.1.0.
Unfortunately, as of v0.1.0, pinot-admin.sh always exits with
status code 0. To address this behavior, users can use the
pinot_admin_system_exit flag. If its value is set to false,
this hook evaluates the result based on the output message
instead of the status code. This Pinot's behavior is supposed
to be improved in the next release, which will include the
following PR: https://github.com/apache/incubator-pinot/pull/4110
:param conn_id: The name of the connection to use.
:type conn_id: str
:param cmd_path: The filepath to the pinot-admin.sh executable
:type cmd_path: str
:param pinot_admin_system_exit: If true, the result is evaluated based on the status code.
Otherwise, the result is evaluated as a failure if "Error" or
"Exception" is in the output message.
:type pinot_admin_system_exit: bool
"""
def __init__(self,
conn_id="pinot_admin_default",
cmd_path="pinot-admin.sh",
pinot_admin_system_exit=False):
conn = self.get_connection(conn_id)
self.host = conn.host
self.port = str(conn.port)
self.cmd_path = conn.extra_dejson.get("cmd_path", cmd_path)
self.pinot_admin_system_exit = conn.extra_dejson.get("pinot_admin_system_exit",
pinot_admin_system_exit)
self.conn = conn
def get_conn(self):
return self.conn
def add_schema(self, schema_file, exec=True):
cmd = ["AddSchema"]
cmd += ["-controllerHost", self.host]
cmd += ["-controllerPort", self.port]
cmd += ["-schemaFile", schema_file]
if exec:
cmd += ["-exec"]
self.run_cli(cmd)
def add_table(self, file_path, exec=True):
cmd = ["AddTable"]
cmd += ["-controllerHost", self.host]
cmd += ["-controllerPort", self.port]
cmd += ["-filePath", file_path]
if exec:
cmd += ["-exec"]
self.run_cli(cmd)
def create_segment(self,
generator_config_file=None,
data_dir=None,
format=None,
out_dir=None,
overwrite=None,
table_name=None,
segment_name=None,
time_column_name=None,
schema_file=None,
reader_config_file=None,
enable_star_tree_index=None,
star_tree_index_spec_file=None,
hll_size=None,
hll_columns=None,
hll_suffix=None,
num_threads=None,
post_creation_verification=None,
retry=None):
cmd = ["CreateSegment"]
if generator_config_file:
cmd += ["-generatorConfigFile", generator_config_file]
if data_dir:
cmd += ["-dataDir", data_dir]
if format:
cmd += ["-format", format]
if out_dir:
cmd += ["-outDir", out_dir]
if overwrite:
cmd += ["-overwrite", overwrite]
if table_name:
cmd += ["-tableName", table_name]
if segment_name:
cmd += ["-segmentName", segment_name]
if time_column_name:
cmd += ["-timeColumnName", time_column_name]
if schema_file:
cmd += ["-schemaFile", schema_file]
if reader_config_file:
cmd += ["-readerConfigFile", reader_config_file]
if enable_star_tree_index:
cmd += ["-enableStarTreeIndex", enable_star_tree_index]
if star_tree_index_spec_file:
cmd += ["-starTreeIndexSpecFile", star_tree_index_spec_file]
if hll_size:
cmd += ["-hllSize", hll_size]
if hll_columns:
cmd += ["-hllColumns", hll_columns]
if hll_suffix:
cmd += ["-hllSuffix", hll_suffix]
if num_threads:
cmd += ["-numThreads", num_threads]
if post_creation_verification:
cmd += ["-postCreationVerification", post_creation_verification]
if retry:
cmd += ["-retry", retry]
self.run_cli(cmd)
def upload_segment(self, segment_dir, table_name=None):
cmd = ["UploadSegment"]
cmd += ["-controllerHost", self.host]
cmd += ["-controllerPort", self.port]
cmd += ["-segmentDir", segment_dir]
if table_name:
cmd += ["-tableName", table_name]
self.run_cli(cmd)
def run_cli(self, cmd, verbose=True):
command = [self.cmd_path]
command.extend(cmd)
env = None
if self.pinot_admin_system_exit:
env = os.environ.copy()
java_opts = "-Dpinot.admin.system.exit=true " + os.environ.get("JAVA_OPTS", "")
env.update({"JAVA_OPTS": java_opts})
if verbose:
self.log.info(" ".join(command))
sp = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True,
env=env)
stdout = ""
for line in iter(sp.stdout):
line = line.decode()
stdout += line
if verbose:
self.log.info(line.strip())
sp.wait()
# As of Pinot v0.1.0, either of "Error: ..." or "Exception caught: ..."
# is expected to be in the output messages. See:
# https://github.com/apache/incubator-pinot/blob/release-0.1.0/pinot-tools/src/main/java/org/apache/pinot/tools/admin/PinotAdministrator.java#L98-L101
if ((self.pinot_admin_system_exit and sp.returncode) or
("Error" in stdout or "Exception" in stdout)):
raise AirflowException(stdout)
return stdout
class PinotDbApiHook(DbApiHook):
"""
Connect to pinot db (https://github.com/apache/incubator-pinot) to issue pql
"""
conn_name_attr = 'pinot_broker_conn_id'
default_conn_name = 'pinot_broker_default'
supports_autocommit = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_conn(self):
"""
Establish a connection to pinot broker through pinot dbapi.
"""
conn = self.get_connection(self.pinot_broker_conn_id)
pinot_broker_conn = connect(
host=conn.host,
port=conn.port,
path=conn.extra_dejson.get('endpoint', '/pql'),
scheme=conn.extra_dejson.get('schema', 'http')
)
self.log.info('Get the connection to pinot '
'broker on {host}'.format(host=conn.host))
return pinot_broker_conn
def get_uri(self):
"""
Get the connection uri for pinot broker.
e.g: http://localhost:9000/pql
"""
conn = self.get_connection(getattr(self, self.conn_name_attr))
host = conn.host
if conn.port is not None:
host += ':{port}'.format(port=conn.port)
conn_type = 'http' if not conn.conn_type else conn.conn_type
endpoint = conn.extra_dejson.get('endpoint', 'pql')
return '{conn_type}://{host}/{endpoint}'.format(
conn_type=conn_type, host=host, endpoint=endpoint)
def get_records(self, sql):
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str
"""
with self.get_conn() as cur:
cur.execute(sql)
return cur.fetchall()
def get_first(self, sql):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
"""
with self.get_conn() as cur:
cur.execute(sql)
return cur.fetchone()
def set_autocommit(self, conn, autocommit):
raise NotImplementedError()
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
raise NotImplementedError()
|
eeshangarg/oh-mainline | refs/heads/master | vendor/packages/gdata/tests/gdata_tests/apps/migration/service_test.py | 38 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for Email Migration service."""
__author__ = 'google-apps-apis@googlegroups.com'
import getpass
import unittest
import gdata.apps.migration.service
domain = ''
admin_email = ''
admin_password = ''
username = ''
MESSAGE = """From: joe@blow.com
To: jane@doe.com
Date: Mon, 29 Sep 2008 20:00:34 -0500 (CDT)
Subject: %s
%s"""
class MigrationTest(unittest.TestCase):
"""Test for the MigrationService."""
def setUp(self):
self.ms = gdata.apps.migration.service.MigrationService(
email=admin_email, password=admin_password, domain=domain)
self.ms.ProgrammaticLogin()
def testImportMail(self):
self.ms.ImportMail(user_name=username,
mail_message=MESSAGE % ('Test subject', 'Test body'),
mail_item_properties=['IS_STARRED'],
mail_labels=['Test'])
def testImportMultipleMails(self):
for i in xrange(1, 10):
self.ms.AddMailEntry(mail_message=MESSAGE % ('Test thread %d' % i,
'Test thread'),
mail_item_properties=['IS_UNREAD'],
mail_labels=['Test', 'Thread'],
identifier=str(i))
self.ms.ImportMultipleMails(user_name=username)
if __name__ == '__main__':
print("Google Apps Email Migration Service Tests\n\n"
"NOTE: Please run these tests only with a test user account.\n")
domain = raw_input('Google Apps domain: ')
admin_email = '%s@%s' % (raw_input('Administrator username: '), domain)
admin_password = getpass.getpass('Administrator password: ')
username = raw_input('Test username: ')
unittest.main()
|
el-lumbergato/namebench | refs/heads/master | nb_third_party/httplib2/iri2uri.py | 885 | """
iri2uri
Converts an IRI to a URI.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = []
__version__ = "1.0.0"
__license__ = "MIT"
__history__ = """
"""
import urlparse
# Convert an IRI to a URI following the rules in RFC 3987
#
# The characters we need to enocde and escape are defined in the spec:
#
# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD
# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF
# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD
# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD
# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD
# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD
# / %xD0000-DFFFD / %xE1000-EFFFD
escape_range = [
(0xA0, 0xD7FF ),
(0xE000, 0xF8FF ),
(0xF900, 0xFDCF ),
(0xFDF0, 0xFFEF),
(0x10000, 0x1FFFD ),
(0x20000, 0x2FFFD ),
(0x30000, 0x3FFFD),
(0x40000, 0x4FFFD ),
(0x50000, 0x5FFFD ),
(0x60000, 0x6FFFD),
(0x70000, 0x7FFFD ),
(0x80000, 0x8FFFD ),
(0x90000, 0x9FFFD),
(0xA0000, 0xAFFFD ),
(0xB0000, 0xBFFFD ),
(0xC0000, 0xCFFFD),
(0xD0000, 0xDFFFD ),
(0xE1000, 0xEFFFD),
(0xF0000, 0xFFFFD ),
(0x100000, 0x10FFFD)
]
def encode(c):
retval = c
i = ord(c)
for low, high in escape_range:
if i < low:
break
if i >= low and i <= high:
retval = "".join(["%%%2X" % ord(o) for o in c.encode('utf-8')])
break
return retval
def iri2uri(uri):
"""Convert an IRI to a URI. Note that IRIs must be
passed in a unicode strings. That is, do not utf-8 encode
the IRI before passing it into the function."""
if isinstance(uri ,unicode):
(scheme, authority, path, query, fragment) = urlparse.urlsplit(uri)
authority = authority.encode('idna')
# For each character in 'ucschar' or 'iprivate'
# 1. encode as utf-8
# 2. then %-encode each octet of that utf-8
uri = urlparse.urlunsplit((scheme, authority, path, query, fragment))
uri = "".join([encode(c) for c in uri])
return uri
if __name__ == "__main__":
import unittest
class Test(unittest.TestCase):
def test_uris(self):
"""Test that URIs are invariant under the transformation."""
invariant = [
u"ftp://ftp.is.co.za/rfc/rfc1808.txt",
u"http://www.ietf.org/rfc/rfc2396.txt",
u"ldap://[2001:db8::7]/c=GB?objectClass?one",
u"mailto:John.Doe@example.com",
u"news:comp.infosystems.www.servers.unix",
u"tel:+1-816-555-1212",
u"telnet://192.0.2.16:80/",
u"urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ]
for uri in invariant:
self.assertEqual(uri, iri2uri(uri))
def test_iri(self):
""" Test that the right type of escaping is done for each part of the URI."""
self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri(u"http://\N{COMET}.com/\N{COMET}"))
self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri(u"http://bitworking.org/?fred=\N{COMET}"))
self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri(u"http://bitworking.org/#\N{COMET}"))
self.assertEqual("#%E2%98%84", iri2uri(u"#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")))
self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8')))
unittest.main()
|
Vladimir37/Sanelotto | refs/heads/master | routes/help.py | 1 | def help():
print()
print('\033[1mSanelotto\033[0m - application for auto-deploying. Sanelotto works in conjunction with Continuous-Integration systems.')
print()
print('\033[1mSYNOPSIS\033[0m')
print()
print('sanelotto generate')
print('- Start question-answer mode to generate a new project.')
print()
print('sanelotto create [NAME]')
print('- Create project template with your project-name.')
print()
print('sanelotto start [TYPE]')
print('- Start project form current directory. Type: "local" or "server"')
print()
print('sanelotto version')
print('- Print Sanelotto version')
print()
print('sanelotto uninstall')
print('- Uninstall Sanelotto')
print()
print('sanelotto help')
print('- Print Sanelotto help')
print()
print('\033[1mAUTHOR\033[0m')
print('Vladimir37')
print('MIT License')
print()
print('\033[1mDOCUMENTATION\033[0m')
print('You can find full documentation on http://sanelotto.info')
print() |
trenton3983/Artificial_Intelligence_for_Humans | refs/heads/master | vol3/vol3-python-examples/lib/aifh/boltzmann.py | 2 | #!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 3: Deep Learning and Neural Networks
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2015 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
from energetic import EnergeticNetwork
import numpy as np
import math
class BoltzmannMachine(EnergeticNetwork):
def __init__(self, neuron_count):
super(BoltzmannMachine, self).__init__(neuron_count)
# The current temperature of the neural network. The higher the
# temperature, the more random the network will behave.
self.temperature = 0
# The thresholds.
self.threshold = [0] * neuron_count
# Count used to internally determine if a neuron is "on".
self.on = [0] * neuron_count
# Count used to internally determine if a neuron is "off".
self.off = [0] * neuron_count
# The number of cycles to anneal for.
self.anneal_cycles = 100
# The number of cycles to run the network through before annealing.
self.run_cycles = 1000
def compute(self, input):
"""
Note: for Boltzmann networks, you will usually want to call the "run"
method to compute the output.
This method can be used to copy the input data to the current state. A
single iteration is then run, and the new current state is returned.
:param input: The input pattern.
:return: The new current state.
"""
result = self.current_state[:]
self.run()
self.current_state[:] = result[:]
return result
def decrease_temperature(self, d):
"""
Decrease/increase the temperature by the specified amount.
:param d: The amount to decrease by, for example .8 to change to
80% of current.
"""
self.temperature *= d
def establish_equilibrium(self):
count = self.neuron_count
for i in range(0, count):
self.on[i] = 0
self.off[i] = 0
for n in range(0, self.run_cycles * count):
self.run(np.random.randint(0, count - 1))
for n in range(0, self.anneal_cycles * count):
i = np.random.randint(0, count - 1)
self.run(i)
if self.current_state[i]>0:
self.on[i] += 1
else:
self.off[i] += 1
for i in range(0,count):
self.current_state[i] = 1 if self.on[i] > self.off[i] else 0
def run(self):
"""
Run the network for all neurons present.
"""
count = self.neuron_count
for i in range(0, count):
self.run(i)
def run(self, i):
"""
Run the network for the specified neuron.
:param i: The neuron to run for.
"""
j = 0
sum = 0
probability = 0
count = self.neuron_count
sum = 0
for j in range(0, count):
sum += self.get_weight(i, j) * (1 if (self.current_state[j] > 0) else 0)
sum -= self.threshold[i]
probability = 1 / (1 + math.exp(-sum / self.temperature))
if np.random.rand() <= probability:
self.current_state[i] = 1.0
else:
self.current_state[i] = 0.0
|
maxalbert/tohu | refs/heads/master | tests/v7/test_field_selector.py | 1 | import pytest
from tohu.v7.field_selector import FieldSelector
from tohu.v7.custom_generator.tohu_items_class import make_tohu_items_class
def test_field_selector():
Quux = make_tohu_items_class("Quux", field_names=["cc", "aa", "bb"])
input_items = [Quux(aa=104, bb="672EF2", cc="Johnny"), Quux(aa=114, bb="250204", cc="David")]
fs = FieldSelector(Quux)
items_expected = [{"aa": 104, "bb": "672EF2", "cc": "Johnny"}, {"aa": 114, "bb": "250204", "cc": "David"}]
assert items_expected == list(fs(input_items))
fs = FieldSelector(Quux, fields=["cc", "aa"])
items_expected = [{"cc": "Johnny", "aa": 104}, {"cc": "David", "aa": 114}]
assert items_expected == list(fs(input_items))
fs = FieldSelector(Quux, fields={"id": "bb", "name": "cc"})
items_expected = [{"id": "672EF2", "name": "Johnny"}, {"id": "250204", "name": "David"}]
assert items_expected == list(fs(input_items))
@pytest.mark.skip(reason="Consistency checks of nested fields is not supported yet")
def test_field_selector_raises_error_if_fields_are_inconsistent():
Quux = make_tohu_items_class("Quux", field_names=["aa", "bb", "cc"])
with pytest.raises(ValueError, match="Field names must be a subset of the fields defined on `tohu_items_cls`"):
FieldSelector(Quux, fields=["not_an_existing_field"])
with pytest.raises(ValueError, match="Field names must be a subset of the fields defined on `tohu_items_cls`"):
FieldSelector(Quux, fields={"new_field": "not_an_existing_field"})
def test_extraction_of_nested_fields():
Foo = make_tohu_items_class("Foo", field_names=["my_name", "my_age"])
foo_item = Foo("Peter", 42)
Quux = make_tohu_items_class("Quux", field_names=["aa", "bb"])
input_items = [Quux(aa="D8A024", bb=foo_item), Quux(aa="CC3ABB", bb=foo_item), Quux(aa="5398D1", bb=foo_item)]
fs = FieldSelector(Quux, fields={"name": "bb.my_name", "age": "bb.my_age", "id": "aa"})
items_expected = [
{"name": "Peter", "age": 42, "id": "D8A024"},
{"name": "Peter", "age": 42, "id": "CC3ABB"},
{"name": "Peter", "age": 42, "id": "5398D1"},
]
assert items_expected == list(fs(input_items))
|
Daihiro/ldjam37 | refs/heads/master | ld37/common/utils/assetManager.py | 1 | import json
import pygame
class AssetManager:
'Use this to get all of the resources'
asset_dictionary = {}
json_dict = {}
def __init__(self):
if len(AssetManager.json_dict) == 0:
asset_file = open("assets/asset_map.json")
file_text = asset_file.read()
AssetManager.json_dict = json.loads(file_text)
def request_texture(self, name):
'Get a texture surface with specified name'
texture_key = "Texture_" + name
if AssetManager.asset_dictionary.get(texture_key) is None:
textures_array = AssetManager.json_dict['textures']
for texture in textures_array:
if texture['name'] == name:
main_surface = pygame.image.load(texture['filename'])
origin_json = texture['origin']
dimension_json = texture['dimensions']
origin = (origin_json['x'], origin_json['y'])
dimensions = (dimension_json['w'], dimension_json['h'])
AssetManager.asset_dictionary[texture_key] = main_surface.subsurface(pygame.Rect(origin, dimensions))
return AssetManager.asset_dictionary[texture_key]
else:
return AssetManager.asset_dictionary.get(texture_key)
print "No such texture with name: " + name
def request_sound(self, name):
'Get a mixer sound with specified name'
sound_key = "Sound_" + name
if AssetManager.asset_dictionary.get(sound_key) is None:
sound_array = AssetManager.json_dict['sound']
for sound in sound_array:
if sound['name'] == name:
AssetManager.asset_dictionary[sound_key] = pygame.mixer.Sound(sound['filename'])
return AssetManager.asset_dictionary[sound_key]
else:
return AssetManager.asset_dictionary.get(sound_key)
print "No such sound with name: " + name
def load_song(self, name):
'Loads song with specified name into pygame.mixer.music, does not start playback'
song_key = "Song_" + name
if AssetManager.asset_dictionary.get(song_key) is None:
song_array = AssetManager.json_dict['song']
for song in song_array:
if song['name'] == name:
AssetManager.asset_dictionary[song_key] = song['filename']
if AssetManager.asset_dictionary.get(song_key) is not None:
pygame.mixer.music.load(AssetManager.asset_dictionary[song_key])
pygame.mixer.music.play(-1)
|
p2/ZBar | refs/heads/master | examples/upcrpc.py | 29 | #!/usr/bin/env python
from xmlrpclib import ServerProxy
import sys, re
server = ServerProxy("http://www.upcdatabase.com/rpc")
ean_re = re.compile(r'^(UPC-A:|EAN-13:)?(\d{11,13})$', re.M)
def lookup(decode):
match = ean_re.search(decode)
if match is None:
print decode,
return
ean = match.group(2)
if match.group(1) == "UPC-A:":
ean = "0" + ean;
elif len(ean) < 12:
print decode,
return
if len(ean) == 12:
ean = server.calculateCheckDigit(ean + "C")
print "[" + match.group(1) + ean + "]",
result = server.lookupEAN(ean)
if isinstance(result, dict):
if "found" not in result or not result["found"] or \
"description" not in result:
print "not found"
else:
print result["description"]
else:
print str(result)
sys.stdout.flush()
if __name__ == "__main__":
del sys.argv[0]
if len(sys.argv):
for decode in sys.argv:
lookup(decode)
if not sys.stdin.isatty():
while 1:
decode = sys.stdin.readline()
if not decode:
break
lookup(decode)
|
mwytock/cvxpy | refs/heads/master | cvxpy/tests/test_tree_mat.py | 12 | """
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy import *
import cvxpy.settings as s
from cvxpy.lin_ops.tree_mat import mul, tmul, prune_constants
import cvxpy.problems.iterative as iterative
from cvxpy.problems.solvers.utilities import SOLVERS
from cvxpy.problems.problem_data.sym_data import SymData
import numpy as np
import scipy.sparse as sp
import scipy.linalg as LA
import unittest
from cvxpy.tests.base_test import BaseTest
class test_tree_mat(BaseTest):
""" Unit tests for the matrix ops with expression trees. """
def test_mul(self):
"""Test the mul method.
"""
n = 2
ones = np.mat(np.ones((n, n)))
# Multiplication
x = Variable(n, n)
A = np.matrix("1 2; 3 4")
expr = (A*x).canonical_form[0]
val_dict = {x.id: ones}
result = mul(expr, val_dict)
assert (result == A*ones).all()
result_dict = tmul(expr, result)
assert (result_dict[x.id] == A.T*A*ones).all()
# Multiplication with promotion.
t = Variable()
A = np.matrix("1 2; 3 4")
expr = (A*t).canonical_form[0]
val_dict = {t.id: 2}
result = mul(expr, val_dict)
assert (result == A*2).all()
result_dict = tmul(expr, result)
total = 0
for i in range(A.shape[0]):
for j in range(A.shape[1]):
total += A[i, j]*result[i, j]
assert (result_dict[t.id] == total)
# Addition
y = Variable(n, n)
expr = (y + A*x).canonical_form[0]
val_dict = {x.id: np.ones((n, n)),
y.id: np.ones((n, n))}
result = mul(expr, val_dict)
assert (result == A*ones + ones).all()
result_dict = tmul(expr, result)
assert (result_dict[y.id] == result).all()
assert (result_dict[x.id] == A.T*result).all()
val_dict = {x.id: A,
y.id: A}
# Indexing
expr = (x[:, 0] + y[:, 1]).canonical_form[0]
result = mul(expr, val_dict)
assert (result == A[:, 0] + A[:, 1]).all()
result_dict = tmul(expr, result)
mat = ones
mat[:, 0] = result
mat[:, 1] = 0
assert (result_dict[x.id] == mat).all()
# Negation
val_dict = {x.id: A}
expr = (-x).canonical_form[0]
result = mul(expr, val_dict)
assert (result == -A).all()
result_dict = tmul(expr, result)
assert (result_dict[x.id] == A).all()
# Transpose
expr = x.T.canonical_form[0]
val_dict = {x.id: A}
result = mul(expr, val_dict)
assert (result == A.T).all()
result_dict = tmul(expr, result)
assert (result_dict[x.id] == A).all()
# Convolution
x = Variable(3)
f = np.matrix(np.array([1, 2, 3])).T
g = np.array([0, 1, 0.5])
f_conv_g = np.array([ 0., 1., 2.5, 4., 1.5])
expr = conv(f, x).canonical_form[0]
val_dict = {x.id: g}
result = mul(expr, val_dict)
self.assertItemsAlmostEqual(result, f_conv_g)
value = np.array(range(5))
result_dict = tmul(expr, value)
toep = LA.toeplitz(np.array([1,0,0]),
np.array([1, 2, 3, 0, 0]))
x_val = toep.dot(value)
self.assertItemsAlmostEqual(result_dict[x.id], x_val)
def test_abs_mul(self):
"""Test the abs mul method.
"""
n = 2
ones = np.mat(np.ones((n, n)))
# Multiplication
x = Variable(n, n)
A = np.matrix("-1 2; -3 4")
abs_A = np.abs(A)
expr = (A*x).canonical_form[0]
val_dict = {x.id: ones}
result = mul(expr, val_dict, True)
assert (result == abs_A*ones).all()
result_dict = tmul(expr, result, True)
assert (result_dict[x.id] == abs_A.T*abs_A*ones).all()
# Multiplication with promotion.
t = Variable()
A = np.matrix("1 -2; -3 -4")
abs_A = np.abs(A)
expr = (A*t).canonical_form[0]
val_dict = {t.id: 2}
result = mul(expr, val_dict, True)
assert (result == abs_A*2).all()
result_dict = tmul(expr, result, True)
total = 0
for i in range(A.shape[0]):
for j in range(A.shape[1]):
total += abs_A[i, j]*result[i, j]
assert (result_dict[t.id] == total)
# Addition
y = Variable(n, n)
expr = (y + A*x).canonical_form[0]
val_dict = {x.id: np.ones((n, n)),
y.id: np.ones((n, n))}
result = mul(expr, val_dict)
assert (result == A*ones + ones).all()
result_dict = tmul(expr, result)
assert (result_dict[y.id] == result).all()
assert (result_dict[x.id] == A.T*result).all()
val_dict = {x.id: A,
y.id: A}
# Indexing
expr = (x[:, 0] + y[:, 1]).canonical_form[0]
result = mul(expr, val_dict)
assert (result == A[:, 0] + A[:, 1]).all()
result_dict = tmul(expr, result)
mat = ones
mat[:, 0] = result
mat[:, 1] = 0
assert (result_dict[x.id] == mat).all()
# Negation
val_dict = {x.id: A}
expr = (-x).canonical_form[0]
result = mul(expr, val_dict)
assert (result == -A).all()
result_dict = tmul(expr, result)
assert (result_dict[x.id] == A).all()
# Transpose
expr = x.T.canonical_form[0]
val_dict = {x.id: A}
result = mul(expr, val_dict)
assert (result == A.T).all()
result_dict = tmul(expr, result)
assert (result_dict[x.id] == A).all()
# Convolution
x = Variable(3)
f = np.matrix(np.array([1, -2, -3])).T
g = np.array([0, 1, 0.5])
f_conv_g = np.array([ 0., 1., 2.5, 4., 1.5])
expr = conv(f, x).canonical_form[0]
val_dict = {x.id: g}
result = mul(expr, val_dict, True)
self.assertItemsAlmostEqual(result, f_conv_g)
value = np.array(range(5))
result_dict = tmul(expr, value, True)
toep = LA.toeplitz(np.array([1,0,0]),
np.array([1, 2, 3, 0, 0]))
x_val = toep.dot(value)
self.assertItemsAlmostEqual(result_dict[x.id], x_val)
def test_prune_constants(self):
"""Test pruning constants from constraints.
"""
x = Variable(2)
A = np.matrix("1 2; 3 4")
constraints = (A*x <= 2).canonical_form[1]
pruned = prune_constants(constraints)
prod = mul(pruned[0].expr, {})
self.assertItemsAlmostEqual(prod, np.zeros(A.shape[0]))
# Test no-op
constraints = (0*x <= 2).canonical_form[1]
pruned = prune_constants(constraints)
prod = mul(pruned[0].expr, {x.id: 1})
self.assertItemsAlmostEqual(prod, np.zeros(A.shape[0]))
def test_mul_funcs(self):
"""Test functions to multiply by A, A.T
"""
n = 10
x = Variable(n)
obj = Minimize(norm(x, 1))
constraints = [x >= 2]
prob = Problem(obj, constraints)
data = prob.get_problem_data(solver=SCS)
A = data["A"]
objective, constraints = prob.canonicalize()
sym_data = SymData(objective, constraints, SOLVERS[SCS])
sym_data.constraints = prune_constants(sym_data.constraints)
Amul, ATmul = iterative.get_mul_funcs(sym_data)
vec = np.array(range(sym_data.x_length))
# A*vec
result = np.zeros(A.shape[0])
Amul(vec, result)
self.assertItemsAlmostEqual(A*vec, result)
Amul(vec, result)
self.assertItemsAlmostEqual(2*A*vec, result)
# A.T*vec
vec = np.array(range(A.shape[0]))
result = np.zeros(A.shape[1])
ATmul(vec, result)
self.assertItemsAlmostEqual(A.T*vec, result)
ATmul(vec, result)
self.assertItemsAlmostEqual(2*A.T*vec, result)
|
TRESCLOUD/odoo | refs/heads/Integracion&ControlDeCalidad | openerp/workflow/wkf_logs.py | 61 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#
# May be uncommented to logs workflows modifications
#
def log(cr,ident,act_id,info=''):
return
# msg = """
#res_type: %r
#res_id: %d
#uid: %d
#act_id: %d
#info: %s
#""" % (ident[1], ident[2], ident[0], act_id, info)
#cr.execute('insert into wkf_logs (res_type, res_id, uid, act_id, time, info) values (%s,%s,%s,%s,current_time,%s)', (ident[1],int(ident[2]),int(ident[0]),int(act_id),info))
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mortada/tensorflow | refs/heads/master | tensorflow/python/training/rmsprop.py | 52 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""One-line documentation for rmsprop module.
rmsprop algorithm [tieleman2012rmsprop]
A detailed description of rmsprop.
- maintain a moving (discounted) average of the square of gradients
- divide gradient by the root of this average
mean_square = decay * mean_square{t-1} + (1-decay) * gradient ** 2
mom = momentum * mom{t-1} + learning_rate * g_t / sqrt(mean_square + epsilon)
delta = - mom
The centered version additionally maintains a moving (discounted) average of the
gradients, and uses that average to estimate the variance:
mean_grad = decay * mean_square{t-1} + (1-decay) * gradient
mean_square = decay * mean_square{t-1} + (1-decay) * gradient ** 2
mom = momentum * mom{t-1} + learning_rate * g_t /
sqrt(mean_square - mean_grad**2 + epsilon)
delta = - mom
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
class RMSPropOptimizer(optimizer.Optimizer):
"""Optimizer that implements the RMSProp algorithm.
See the [paper](http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf).
"""
def __init__(self,
learning_rate,
decay=0.9,
momentum=0.0,
epsilon=1e-10,
use_locking=False,
centered=False,
name="RMSProp"):
"""Construct a new RMSProp optimizer.
Note that in dense implement of this algorithm, m_t and v_t will
update even if g is zero, but in sparse implement, m_t and v_t
will not update in iterations g is zero.
Args:
learning_rate: A Tensor or a floating point value. The learning rate.
decay: Discounting factor for the history/coming gradient
momentum: A scalar tensor.
epsilon: Small value to avoid zero denominator.
use_locking: If True use locks for update operation.
centered: If True, gradients are normalized by the estimated variance of
the gradient; if False, by the uncentered second moment. Setting this to
True may help with training, but is slightly more expensive in terms of
computation and memory. Defaults to False.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "RMSProp".
"""
super(RMSPropOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
self._decay = decay
self._momentum = momentum
self._epsilon = epsilon
self._centered = centered
# Tensors for learning rate and momentum. Created in _prepare.
self._learning_rate_tensor = None
self._decay_tensor = None
self._momentum_tensor = None
self._epsilon_tensor = None
def _create_slots(self, var_list):
for v in var_list:
init_rms = init_ops.ones_initializer(dtype=v.dtype)
self._get_or_make_slot_with_initializer(v, init_rms, v.get_shape(),
v.dtype, "rms", self._name)
if self._centered:
self._zeros_slot(v, "mg", self._name)
self._zeros_slot(v, "momentum", self._name)
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(self._learning_rate,
name="learning_rate")
self._decay_tensor = ops.convert_to_tensor(self._decay, name="decay")
self._momentum_tensor = ops.convert_to_tensor(self._momentum,
name="momentum")
self._epsilon_tensor = ops.convert_to_tensor(self._epsilon,
name="epsilon")
def _apply_dense(self, grad, var):
rms = self.get_slot(var, "rms")
mom = self.get_slot(var, "momentum")
if self._centered:
mg = self.get_slot(var, "mg")
return training_ops.apply_centered_rms_prop(
var,
mg,
rms,
mom,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._decay_tensor, var.dtype.base_dtype),
math_ops.cast(self._momentum_tensor, var.dtype.base_dtype),
math_ops.cast(self._epsilon_tensor, var.dtype.base_dtype),
grad,
use_locking=self._use_locking).op
else:
return training_ops.apply_rms_prop(
var,
rms,
mom,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._decay_tensor, var.dtype.base_dtype),
math_ops.cast(self._momentum_tensor, var.dtype.base_dtype),
math_ops.cast(self._epsilon_tensor, var.dtype.base_dtype),
grad,
use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, var):
rms = self.get_slot(var, "rms")
mom = self.get_slot(var, "momentum")
if self._centered:
mg = self.get_slot(var, "mg")
return training_ops.resource_apply_centered_rms_prop(
var.handle,
mg.handle,
rms.handle,
mom.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype),
math_ops.cast(self._decay_tensor, grad.dtype.base_dtype),
math_ops.cast(self._momentum_tensor, grad.dtype.base_dtype),
math_ops.cast(self._epsilon_tensor, grad.dtype.base_dtype),
grad,
use_locking=self._use_locking)
else:
return training_ops.resource_apply_rms_prop(
var.handle,
rms.handle,
mom.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype),
math_ops.cast(self._decay_tensor, grad.dtype.base_dtype),
math_ops.cast(self._momentum_tensor, grad.dtype.base_dtype),
math_ops.cast(self._epsilon_tensor, grad.dtype.base_dtype),
grad,
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
rms = self.get_slot(var, "rms")
mom = self.get_slot(var, "momentum")
if self._centered:
mg = self.get_slot(var, "mg")
return training_ops.sparse_apply_centered_rms_prop(
var,
mg,
rms,
mom,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._decay_tensor, var.dtype.base_dtype),
math_ops.cast(self._momentum_tensor, var.dtype.base_dtype),
math_ops.cast(self._epsilon_tensor, var.dtype.base_dtype),
grad.values,
grad.indices,
use_locking=self._use_locking)
else:
return training_ops.sparse_apply_rms_prop(
var,
rms,
mom,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._decay_tensor, var.dtype.base_dtype),
math_ops.cast(self._momentum_tensor, var.dtype.base_dtype),
math_ops.cast(self._epsilon_tensor, var.dtype.base_dtype),
grad.values,
grad.indices,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices):
rms = self.get_slot(var, "rms")
mom = self.get_slot(var, "momentum")
if self._centered:
mg = self.get_slot(var, "mg")
return training_ops.resource_sparse_apply_centered_rms_prop(
var.handle,
mg.handle,
rms.handle,
mom.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
math_ops.cast(self._decay_tensor, grad.dtype),
math_ops.cast(self._momentum_tensor, grad.dtype),
math_ops.cast(self._epsilon_tensor, grad.dtype),
grad,
indices,
use_locking=self._use_locking)
else:
return training_ops.resource_sparse_apply_rms_prop(
var.handle,
rms.handle,
mom.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
math_ops.cast(self._decay_tensor, grad.dtype),
math_ops.cast(self._momentum_tensor, grad.dtype),
math_ops.cast(self._epsilon_tensor, grad.dtype),
grad,
indices,
use_locking=self._use_locking)
|
keras-team/keras-contrib | refs/heads/master | keras_contrib/activations/__init__.py | 2 | from __future__ import absolute_import
from .squash import squash
|
hcrlab/access_teleop | refs/heads/master | ezgripper_driver/nodes/ezgripperGUI.py | 1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import rospy
import sys
from ezgripper_libs.ezgripper_interface import EZGripper
from PyQt4 import QtGui, QtCore
rospy.init_node('hello_ezgripper')
gripper = EZGripper('ezgripper/main')
class GripperGUI(QtGui.QMainWindow):
def __init__(self):
super(GripperGUI, self).__init__()
self.initUI()
def initUI(self):
calibrateButton=QtGui.QPushButton("Calibrate",self)
calibrateButton.resize(100,30)
calibrateButton.clicked.connect(gripper.calibrate)
#QObject.connect(calibrateButton, SIGNAL("clicked()"), gripper.calibrate)
calibrateButton.move(50,10)
calibrateButton.show()
releaseButton=QtGui.QPushButton("Release",self)
releaseButton.resize(200,200)
releaseButton.clicked.connect(gripper.release)
#QObject.connect(hard_closeButton, SIGNAL("clicked()"), gripper.hard_close)
releaseButton.move(50,50)
hard_closeButton=QtGui.QPushButton("Hard Close",self)
hard_closeButton.resize(200,200)
hard_closeButton.clicked.connect(self.submit_goto_hard_close)
hard_closeButton.move(250,50)
openButton=QtGui.QPushButton("Open", self)
openButton.clicked.connect(self.submit_goto_open)
openButton.resize(200,200)
openButton.move(450,50)
gotoButton=QtGui.QPushButton("0% Torque Mode", self)
gotoButton.resize(100,200)
gotoButton.clicked.connect(self.submit_goto1)
gotoButton.move(50,250)
gotoButton=QtGui.QPushButton("10%", self)
gotoButton.resize(100,200)
gotoButton.clicked.connect(self.submit_goto2)
gotoButton.move(150,250)
gotoButton=QtGui.QPushButton("20%", self)
gotoButton.resize(100,200)
gotoButton.clicked.connect(self.submit_goto3)
gotoButton.move(250,250)
gotoButton=QtGui.QPushButton("30%", self)
gotoButton.resize(100,200)
gotoButton.clicked.connect(self.submit_goto4)
gotoButton.move(350,250)
gotoButton=QtGui.QPushButton("40%", self)
gotoButton.resize(100,200)
gotoButton.clicked.connect(self.submit_goto5)
gotoButton.move(450,250)
gotoButton=QtGui.QPushButton("50%", self)
gotoButton.resize(100,200)
gotoButton.clicked.connect(self.submit_goto6)
gotoButton.move(550,250)
gotoButton=QtGui.QPushButton("60%", self)
gotoButton.resize(100,200)
gotoButton.clicked.connect(self.submit_goto7)
gotoButton.move(150,450)
gotoButton=QtGui.QPushButton("70%", self)
gotoButton.resize(100,200)
gotoButton.clicked.connect(self.submit_goto8)
gotoButton.move(250,450)
gotoButton=QtGui.QPushButton("80%", self)
gotoButton.resize(100,200)
gotoButton.clicked.connect(self.submit_goto9)
gotoButton.move(350,450)
gotoButton=QtGui.QPushButton("90%", self)
gotoButton.resize(100,200)
gotoButton.clicked.connect(self.submit_goto10)
gotoButton.move(450,450)
gotoButton=QtGui.QPushButton("100%", self)
gotoButton.resize(100,200)
gotoButton.clicked.connect(self.submit_goto11)
gotoButton.move(550,450)
self.statusBar()
self.setGeometry(300, 200, 800, 850)
self.setWindowTitle("EZGripper GUI")
self.show()
def submit_goto_hard_close(self):
gripper.goto_position(0, 100)
def submit_goto_open(self):
gripper.goto_position(1, 100)
def submit_goto1(self):
gripper.goto_position(0, 10)
def submit_goto2(self):
gripper.goto_position(.10, 100)
def submit_goto3(self):
gripper.goto_position(.20, 100)
def submit_goto4(self):
gripper.goto_position(.30, 100)
def submit_goto5(self):
gripper.goto_position(.40, 100)
def submit_goto6(self):
gripper.goto_position(.50, 100)
def submit_goto7(self):
gripper.goto_position(.60, 100)
def submit_goto8(self):
gripper.goto_position(.70, 100)
def submit_goto9(self):
gripper.goto_position(.80, 100)
def submit_goto10(self):
gripper.goto_position(.90, 100)
def submit_goto11(self):
gripper.goto_position(1.0, 100)
def submit_goto12(self):
gripper.goto_position(.20, 100)
def submit_goto13(self):
gripper.goto_position(.20, 100)
def submit_goto14(self):
gripper.goto_position(.20, 100)
def main():
ezgripper_app=QtGui.QApplication(sys.argv)
ex=GripperGUI()
sys.exit(ezgripper_app.exec_())
if __name__== '__main__':
main()
|
fabada/pootle | refs/heads/master | pootle/core/models.py | 7 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from .cache import get_cache
from .mixins import TreeItem
cache = get_cache('redis')
class Revision(object):
"""Wrapper around the revision counter stored in Redis."""
CACHE_KEY = 'pootle:revision'
INITIAL = 0
@classmethod
def initialize(cls, force=False):
"""Initializes the revision with `cls.INITIAL`.
:param force: whether to overwrite the number if there's a
revision already set or not.
:return: `True` if the initial value was set, `False` otherwise.
"""
if force:
return cls.set(cls.INITIAL)
return cls.add(cls.INITIAL)
@classmethod
def get(cls):
"""Gets the current revision number.
:return: The current revision number, or the initial number if
there's no revision stored yet.
"""
return cache.get(cls.CACHE_KEY, cls.INITIAL)
@classmethod
def set(cls, value):
"""Sets the revision number to `value`, regardless of whether
there's a value previously set or not.
:return: `True` if the value was set, `False` otherwise.
"""
return cache.set(cls.CACHE_KEY, value)
@classmethod
def add(cls, value):
"""Sets the revision number to `value`, only if there's no
revision already set.
:return: `True` if the value was set, `False` otherwise.
"""
return cache.add(cls.CACHE_KEY, value)
@classmethod
def incr(cls):
"""Increments the revision number.
:return: the new revision number after incrementing it, or the
initial number if there's no revision stored yet.
"""
try:
return cache.incr(cls.CACHE_KEY)
except ValueError:
return cls.INITIAL
class VirtualResource(TreeItem):
"""An object representing a virtual resource.
A virtual resource doesn't live in the DB and has a unique
`pootle_path` of its own. It's a simple collection of actual
resources.
For instance, this can be used in projects to have cross-language
references.
Don't use this object as-is, rather subclass it and adapt the
implementation details for each context.
"""
def __init__(self, resources, pootle_path, *args, **kwargs):
self.resources = resources #: Collection of underlying resources
self.pootle_path = pootle_path
super(VirtualResource, self).__init__(*args, **kwargs)
def __unicode__(self):
return self.pootle_path
### TreeItem
def get_children(self):
return self.resources
def get_cachekey(self):
return self.pootle_path
### /TreeItem
|
40223232/2015cd_midterm2 | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/urllib/__init__.py | 12133432 | |
andela-angene/coursebuilder-core | refs/heads/develop-frontend | coursebuilder/modules/oauth2/__init__.py | 12133432 | |
graphql-python/graphql-epoxy | refs/heads/master | epoxy/metaclasses/__init__.py | 12133432 | |
amaork/libi2c | refs/heads/master | tests/test_pylibi2c.py | 1 | import random
import unittest
import pylibi2c
class Pylibi2cTest(unittest.TestCase):
def setUp(self):
self.i2c_size = 256
# 24C04 E2PROM test
self.i2c = pylibi2c.I2CDevice(bus="/dev/i2c-1", addr=0x56, page_bytes=16)
def test_init(self):
with self.assertRaises(TypeError):
pylibi2c.I2CDevice()
with self.assertRaises(TypeError):
pylibi2c.I2CDevice(1, 2)
with self.assertRaises(TypeError):
pylibi2c.I2CDevice("1", "2")
with self.assertRaises(TypeError):
pylibi2c.I2CDevice("/dev/i2c-1")
with self.assertRaises(IOError):
pylibi2c.I2CDevice("/dev/i2c-100", 0x56)
def test_getattr(self):
i2c = pylibi2c.I2CDevice("/dev/i2c-1", 0x56)
with self.assertRaises(AttributeError):
i2c.bus
with self.assertRaises(AttributeError):
i2c.addr
def test_setattr(self):
i2c = pylibi2c.I2CDevice("/dev/i2c-1", 0x56)
with self.assertRaises(AttributeError):
i2c.bus = ""
with self.assertRaises(AttributeError):
i2c.addr = ""
def test_flags(self):
i2c = pylibi2c.I2CDevice("/dev/i2c-1", 0x56)
self.assertEqual(i2c.flags, 0)
i2c = pylibi2c.I2CDevice("/dev/i2c-1", 0x56, flags=1)
self.assertEqual(i2c.flags, 1)
with self.assertRaises(TypeError):
i2c.flags = "100"
with self.assertRaises(TypeError):
i2c.flags = 0.1
with self.assertRaises(ValueError):
i2c.flags = -1
i2c.flags = 0
self.assertEqual(i2c.flags, 0)
i2c.flags = pylibi2c.I2C_M_NOSTART
self.assertEqual(i2c.flags, pylibi2c.I2C_M_NOSTART)
i2c.flags = pylibi2c.I2C_M_IGNORE_NAK
self.assertEqual(i2c.flags, pylibi2c.I2C_M_IGNORE_NAK)
def test_delay(self):
i2c = pylibi2c.I2CDevice("/dev/i2c-1", 0x56)
self.assertEqual(i2c.delay, 1)
i2c = pylibi2c.I2CDevice("/dev/i2c-1", 0x56, delay=0)
self.assertEqual(i2c.delay, 0)
with self.assertRaises(TypeError):
i2c.delay = "100"
with self.assertRaises(TypeError):
i2c.delay = 0.1
with self.assertRaises(ValueError):
i2c.delay = -1
with self.assertRaises(ValueError):
i2c.delay = 101
i2c.delay = 10
self.assertEqual(i2c.delay, 10)
i2c.delay = 100
self.assertEqual(i2c.delay, 100)
def test_tenbit(self):
i2c = pylibi2c.I2CDevice("/dev/i2c-1", 0x56)
self.assertEqual(i2c.tenbit, False)
i2c = pylibi2c.I2CDevice("/dev/i2c-1", 0x56, tenbit=1)
self.assertEqual(i2c.tenbit, True)
with self.assertRaises(TypeError):
i2c.tenbit = 0
with self.assertRaises(TypeError):
i2c.tenbit = 100
with self.assertRaises(TypeError):
i2c.tenbit = "True"
i2c.tenbit = False
self.assertEqual(i2c.tenbit, False)
i2c.tenbit = True
self.assertEqual(i2c.tenbit, True)
def test_page_bytes(self):
i2c = pylibi2c.I2CDevice("/dev/i2c-1", 0x56)
self.assertEqual(i2c.page_bytes, 8)
i2c = pylibi2c.I2CDevice("/dev/i2c-1", 0x56, page_bytes=16)
self.assertEqual(i2c.page_bytes, 16)
with self.assertRaises(TypeError):
i2c.page_bytes = "1"
with self.assertRaises(ValueError):
i2c.page_bytes = -1
with self.assertRaises(ValueError):
i2c.page_bytes = 0
with self.assertRaises(ValueError):
i2c.page_bytes = 4
with self.assertRaises(ValueError):
i2c.page_bytes = 10
with self.assertRaises(ValueError):
i2c.page_bytes = 2048
i2c.page_bytes = 32
self.assertEqual(i2c.page_bytes, 32)
i2c.page_bytes = 64
self.assertEqual(i2c.page_bytes, 64)
def test_iaddr_bytes(self):
i2c = pylibi2c.I2CDevice("/dev/i2c-1", 0x56)
self.assertEqual(i2c.iaddr_bytes, 1)
i2c = pylibi2c.I2CDevice("/dev/i2c-1", 0x56, iaddr_bytes=2)
self.assertEqual(i2c.iaddr_bytes, 2)
with self.assertRaises(TypeError):
i2c.iaddr_bytes = "1"
with self.assertRaises(ValueError):
i2c.iaddr_bytes = -1
with self.assertRaises(ValueError):
i2c.iaddr_bytes = 5
i2c.iaddr_bytes = 0
self.assertEqual(i2c.iaddr_bytes, 0)
i2c.iaddr_bytes = 1
self.assertEqual(i2c.iaddr_bytes, 1)
i2c.iaddr_bytes = 3
self.assertEqual(i2c.iaddr_bytes, 3)
def test_read(self):
self.assertEqual(len(self.i2c.read(0, self.i2c_size)), self.i2c_size)
self.assertEqual(len(self.i2c.read(0, 100)), 100)
self.assertEqual(len(self.i2c.read(13, 13)), 13)
self.assertEqual(len(self.i2c.read(13, 1)), 1)
def test_write(self):
# 0 - 0xff
w_buf = bytearray(range(self.i2c_size))
self.assertEqual(self.i2c.write(0, bytes(w_buf)), self.i2c_size)
r_buf = self.i2c.read(0, self.i2c_size)
self.assertEqual(len(r_buf), self.i2c_size)
self.assertSequenceEqual(w_buf, r_buf)
# Random data
w_buf = bytearray(self.i2c_size)
for i in range(self.i2c_size):
w_buf[i] = random.randint(0, 255)
self.assertEqual(self.i2c.write(0, bytes(w_buf)), self.i2c_size)
r_buf = self.i2c.read(0, self.i2c_size)
self.assertEqual(len(r_buf), self.i2c_size)
self.assertSequenceEqual(w_buf, r_buf)
# Not aligned write
data = "a212131edada123qdadaeqeqdsadskfljfjfj"
for addr in range(1, 200, 2):
self.assertEqual(self.i2c.write(addr, data), len(data))
self.assertEqual(self.i2c.read(addr, len(data)).decode("ascii"), data)
def test_ioctl_read(self):
self.assertEqual(len(self.i2c.ioctl_read(0, self.i2c_size)), self.i2c_size)
self.assertEqual(len(self.i2c.ioctl_read(0, 100)), 100)
self.assertEqual(len(self.i2c.ioctl_read(13, 13)), 13)
self.assertEqual(len(self.i2c.ioctl_read(13, 1)), 1)
def test_ioctl_ioctl_write(self):
# 0 - 0xff
w_buf = bytearray(range(self.i2c_size))
self.assertEqual(self.i2c.ioctl_write(0, bytes(w_buf)), self.i2c_size)
r_buf = self.i2c.ioctl_read(0, self.i2c_size)
self.assertEqual(len(r_buf), self.i2c_size)
self.assertSequenceEqual(w_buf, r_buf)
# Random data
w_buf = bytearray(self.i2c_size)
for i in range(self.i2c_size):
w_buf[i] = random.randint(0, 255)
self.assertEqual(self.i2c.ioctl_write(0, bytes(w_buf)), self.i2c_size)
r_buf = self.i2c.ioctl_read(0, self.i2c_size)
self.assertEqual(len(r_buf), self.i2c_size)
self.assertSequenceEqual(w_buf, r_buf)
# Not aligned write
data = "a212131edada123qdadaeqeqdsadskfljfjfj"
for addr in range(1, 200, 2):
self.assertEqual(self.i2c.ioctl_write(addr, data), len(data))
self.assertEqual(self.i2c.ioctl_read(addr, len(data)).decode("ascii"), data)
if __name__ == '__main__':
unittest.main()
|
mambocab/python-driver | refs/heads/master | tests/unit/cython/test_types.py | 3 | # Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests.unit.cython.utils import cyimport, cythontest
types_testhelper = cyimport('tests.unit.cython.types_testhelper')
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
class TypesTest(unittest.TestCase):
@cythontest
def test_datetype(self):
types_testhelper.test_datetype(self.assertEqual)
@cythontest
def test_date_side_by_side(self):
types_testhelper.test_date_side_by_side(self.assertEqual)
|
marcosmodesto/django-testapp | refs/heads/master | django/contrib/auth/tokens.py | 96 | from datetime import date
from django.conf import settings
from django.utils.http import int_to_base36, base36_to_int
from django.utils.crypto import constant_time_compare, salted_hmac
class PasswordResetTokenGenerator(object):
"""
Strategy object used to generate and check tokens for the password
reset mechanism.
"""
def make_token(self, user):
"""
Returns a token that can be used once to do a password reset
for the given user.
"""
return self._make_token_with_timestamp(user, self._num_days(self._today()))
def check_token(self, user, token):
"""
Check that a password reset token is correct for a given user.
"""
# Parse the token
try:
ts_b36, hash = token.split("-")
except ValueError:
return False
try:
ts = base36_to_int(ts_b36)
except ValueError:
return False
# Check that the timestamp/uid has not been tampered with
if not constant_time_compare(self._make_token_with_timestamp(user, ts), token):
return False
# Check the timestamp is within limit
if (self._num_days(self._today()) - ts) > settings.PASSWORD_RESET_TIMEOUT_DAYS:
return False
return True
def _make_token_with_timestamp(self, user, timestamp):
# timestamp is number of days since 2001-1-1. Converted to
# base 36, this gives us a 3 digit string until about 2121
ts_b36 = int_to_base36(timestamp)
# By hashing on the internal state of the user and using state
# that is sure to change (the password salt will change as soon as
# the password is set, at least for current Django auth, and
# last_login will also change), we produce a hash that will be
# invalid as soon as it is used.
# We limit the hash to 20 chars to keep URL short
key_salt = "django.contrib.auth.tokens.PasswordResetTokenGenerator"
# Ensure results are consistent across DB backends
login_timestamp = user.last_login.replace(microsecond=0, tzinfo=None)
value = (unicode(user.id) + user.password +
unicode(login_timestamp) + unicode(timestamp))
hash = salted_hmac(key_salt, value).hexdigest()[::2]
return "%s-%s" % (ts_b36, hash)
def _num_days(self, dt):
return (dt - date(2001, 1, 1)).days
def _today(self):
# Used for mocking in tests
return date.today()
default_token_generator = PasswordResetTokenGenerator()
|
DarthMaulware/EquationGroupLeaks | refs/heads/master | Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/encodings/hp_roman8.py | 1 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: hp_roman8.py
""" Python Character Mapping Codec generated from 'hp_roman8.txt' with gencodec.py.
Based on data from ftp://dkuug.dk/i18n/charmaps/HP-ROMAN8 (Keld Simonsen)
Original source: LaserJet IIP Printer User's Manual HP part no
33471-90901, Hewlet-Packard, June 1989.
"""
import codecs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_map)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_map)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_map)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
def getregentry():
return codecs.CodecInfo(name='hp-roman8', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamwriter=StreamWriter, streamreader=StreamReader)
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({161: 192,
162: 194,
163: 200,
164: 202,
165: 203,
166: 206,
167: 207,
168: 180,
169: 715,
170: 710,
171: 168,
172: 732,
173: 217,
174: 219,
175: 8356,
176: 175,
177: 221,
178: 253,
179: 176,
180: 199,
181: 231,
182: 209,
183: 241,
184: 161,
185: 191,
186: 164,
187: 163,
188: 165,
189: 167,
190: 402,
191: 162,
192: 226,
193: 234,
194: 244,
195: 251,
196: 225,
197: 233,
198: 243,
199: 250,
200: 224,
201: 232,
202: 242,
203: 249,
204: 228,
205: 235,
206: 246,
207: 252,
208: 197,
209: 238,
210: 216,
211: 198,
212: 229,
213: 237,
214: 248,
215: 230,
216: 196,
217: 236,
218: 214,
219: 220,
220: 201,
221: 239,
222: 223,
223: 212,
224: 193,
225: 195,
226: 227,
227: 208,
228: 240,
229: 205,
230: 204,
231: 211,
232: 210,
233: 213,
234: 245,
235: 352,
236: 353,
237: 218,
238: 376,
239: 255,
240: 222,
241: 254,
242: 183,
243: 181,
244: 182,
245: 190,
246: 8212,
247: 188,
248: 189,
249: 170,
250: 186,
251: 171,
252: 9632,
253: 187,
254: 177,
255: None
})
encoding_map = codecs.make_encoding_map(decoding_map) |
vijaylbais/boto | refs/heads/develop | boto/manage/propget.py | 153 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
def get(prop, choices=None):
prompt = prop.verbose_name
if not prompt:
prompt = prop.name
if choices:
if callable(choices):
choices = choices()
else:
choices = prop.get_choices()
valid = False
while not valid:
if choices:
min = 1
max = len(choices)
for i in range(min, max+1):
value = choices[i-1]
if isinstance(value, tuple):
value = value[0]
print('[%d] %s' % (i, value))
value = raw_input('%s [%d-%d]: ' % (prompt, min, max))
try:
int_value = int(value)
value = choices[int_value-1]
if isinstance(value, tuple):
value = value[1]
valid = True
except ValueError:
print('%s is not a valid choice' % value)
except IndexError:
print('%s is not within the range[%d-%d]' % (min, max))
else:
value = raw_input('%s: ' % prompt)
try:
value = prop.validate(value)
if prop.empty(value) and prop.required:
print('A value is required')
else:
valid = True
except:
print('Invalid value: %s' % value)
return value
|
robjohnson189/home-assistant | refs/heads/dev | tests/components/automation/test_time.py | 7 | """The tests for the time automation."""
from datetime import timedelta
import unittest
from unittest.mock import patch
from homeassistant.core import callback
from homeassistant.bootstrap import setup_component
import homeassistant.util.dt as dt_util
import homeassistant.components.automation as automation
from tests.common import (
fire_time_changed, get_test_home_assistant, assert_setup_component)
# pylint: disable=invalid-name
class TestAutomationTime(unittest.TestCase):
"""Test the event automation."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.config.components.append('group')
self.calls = []
@callback
def record_call(service):
"""Helper to record calls."""
self.calls.append(service)
self.hass.services.register('test', 'automation', record_call)
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_if_fires_when_hour_matches(self):
"""Test for firing if hour is matching."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
'hours': 0,
},
'action': {
'service': 'test.automation'
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(hour=0))
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
automation.turn_off(self.hass)
self.hass.block_till_done()
fire_time_changed(self.hass, dt_util.utcnow().replace(hour=0))
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_when_minute_matches(self):
"""Test for firing if minutes are matching."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
'minutes': 0,
},
'action': {
'service': 'test.automation'
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(minute=0))
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_when_second_matches(self):
"""Test for firing if seconds are matching."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
'seconds': 0,
},
'action': {
'service': 'test.automation'
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(second=0))
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_when_all_matches(self):
"""Test for firing if everything matches."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
'hours': 1,
'minutes': 2,
'seconds': 3,
},
'action': {
'service': 'test.automation'
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(
hour=1, minute=2, second=3))
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_periodic_seconds(self):
"""Test for firing periodically every second."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
'seconds': "/2",
},
'action': {
'service': 'test.automation'
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(
hour=0, minute=0, second=2))
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_periodic_minutes(self):
"""Test for firing periodically every minute."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
'minutes': "/2",
},
'action': {
'service': 'test.automation'
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(
hour=0, minute=2, second=0))
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_periodic_hours(self):
"""Test for firing periodically every hour."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
'hours': "/2",
},
'action': {
'service': 'test.automation'
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(
hour=2, minute=0, second=0))
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_using_after(self):
"""Test for firing after."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
'after': '5:00:00',
},
'action': {
'service': 'test.automation',
'data_template': {
'some': '{{ trigger.platform }} - '
'{{ trigger.now.hour }}'
},
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(
hour=5, minute=0, second=0))
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual('time - 5', self.calls[0].data['some'])
def test_if_not_working_if_no_values_in_conf_provided(self):
"""Test for failure if no configuration."""
with assert_setup_component(0):
assert not setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
},
'action': {
'service': 'test.automation'
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(
hour=5, minute=0, second=0))
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_if_not_fires_using_wrong_after(self):
"""YAML translates time values to total seconds.
This should break the before rule.
"""
with assert_setup_component(0):
assert not setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
'after': 3605,
# Total seconds. Hour = 3600 second
},
'action': {
'service': 'test.automation'
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(
hour=1, minute=0, second=5))
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_if_action_before(self):
"""Test for if action before."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event'
},
'condition': {
'condition': 'time',
'before': '10:00',
},
'action': {
'service': 'test.automation'
}
}
})
before_10 = dt_util.now().replace(hour=8)
after_10 = dt_util.now().replace(hour=14)
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=before_10):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=after_10):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_action_after(self):
"""Test for if action after."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event'
},
'condition': {
'condition': 'time',
'after': '10:00',
},
'action': {
'service': 'test.automation'
}
}
})
before_10 = dt_util.now().replace(hour=8)
after_10 = dt_util.now().replace(hour=14)
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=before_10):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=after_10):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_action_one_weekday(self):
"""Test for if action with one weekday."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event'
},
'condition': {
'condition': 'time',
'weekday': 'mon',
},
'action': {
'service': 'test.automation'
}
}
})
days_past_monday = dt_util.now().weekday()
monday = dt_util.now() - timedelta(days=days_past_monday)
tuesday = monday + timedelta(days=1)
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=monday):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=tuesday):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_action_list_weekday(self):
"""Test for action with a list of weekdays."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event'
},
'condition': {
'condition': 'time',
'weekday': ['mon', 'tue'],
},
'action': {
'service': 'test.automation'
}
}
})
days_past_monday = dt_util.now().weekday()
monday = dt_util.now() - timedelta(days=days_past_monday)
tuesday = monday + timedelta(days=1)
wednesday = tuesday + timedelta(days=1)
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=monday):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=tuesday):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(2, len(self.calls))
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=wednesday):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(2, len(self.calls))
|
DonLakeFlyer/ardupilot | refs/heads/master | Tools/ardupilotwaf/build_summary.py | 17 | #!/usr/bin/env python
# encoding: utf-8
# Copyright (C) 2016 Intel Corporation. All rights reserved.
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Waf tool for printing build summary. To be used, this must be loaded in the
options(), configure() and build() functions.
This tool expects toolchain tool to be already loaded.
The environment variable BUILD_SUMMARY_HEADER can be used to change the default
header for the targets' summary table.
Extra information can be printed by creating assigning a function to
bld.extra_build_summary. That function must receive bld as the first argument
and this module as the second one.
If one target's task generator (tg) doesn't have a link_task or places the ELF
file at a place different from link_task.outputs[0], then
tg.build_summary['binary'] should be set as the Node object or a path relative
to bld.bldnode for the binary file. Otherwise, size information won't be
printed for that target.
'''
import sys
from waflib import Context, Logs, Node
from waflib.Configure import conf
from waflib.TaskGen import before_method, feature
MAX_TARGETS = 20
header_text = {
'target': 'Target',
'binary_path': 'Binary',
'size_text': 'Text',
'size_data': 'Data',
'size_bss': 'BSS',
'size_total': 'Total',
}
def text(label, text=''):
text = text.strip()
if text:
Logs.info('%s%s%s%s%s' % (
Logs.colors.NORMAL,
Logs.colors.BOLD,
label,
Logs.colors.NORMAL,
text))
else:
Logs.info('%s%s%s' % (
Logs.colors.NORMAL,
Logs.colors.BOLD,
label
))
def print_table(summary_data_list, header):
max_widths = []
table = [[] for _ in range(len(summary_data_list))]
header_row = []
for h in header:
txt = header_text.get(h, h)
header_row.append(txt)
max_width = len(txt)
for i, row_data in enumerate(summary_data_list):
txt = str(row_data.get(h, '-'))
table[i].append(txt)
w = len(txt)
if w > max_width:
max_width = w
max_widths.append(max_width)
sep = ' '
fmts = ['{:<%d}' % w for w in max_widths]
header_row = sep.join(fmts).format(*header_row)
text(header_row)
line = ('-' * len(sep)).join('-' * w for w in max_widths)
print(line)
for row in table:
fmts = []
for j, v in enumerate(row):
w = max_widths[j]
try:
float(v)
except ValueError:
fmts.append('{:<%d}' % w)
else:
fmts.append('{:>%d}' % w)
row = sep.join(fmts).format(*row)
print(row)
def _build_summary(bld):
Logs.info('')
text('BUILD SUMMARY')
text('Build directory: ', bld.bldnode.abspath())
targets_suppressed = False
if bld.targets == '*':
taskgens = bld.get_all_task_gen()
if len(taskgens) > MAX_TARGETS and not bld.options.summary_all:
targets_suppressed = True
taskgens = taskgens[:MAX_TARGETS]
else:
targets = bld.targets.split(',')
if len(targets) > MAX_TARGETS and not bld.options.summary_all:
targets_suppressed = True
targets = targets[:MAX_TARGETS]
taskgens = [bld.get_tgen_by_name(t) for t in targets]
nodes = []
filtered_taskgens = []
for tg in taskgens:
if not hasattr(tg, 'build_summary'):
tg.init_summary_data()
n = tg.build_summary.get('binary', None)
if not n:
t = getattr(tg, 'link_task', None)
if not t:
continue
n = t.outputs[0]
tg.build_summary['binary'] = n
nodes.append(n)
filtered_taskgens.append(tg)
taskgens = filtered_taskgens
if nodes:
l = bld.size_summary(nodes)
for i, data in enumerate(l):
taskgens[i].build_summary.update(data)
summary_data_list = [tg.build_summary for tg in taskgens]
print_table(summary_data_list, bld.env.BUILD_SUMMARY_HEADER)
if targets_suppressed:
Logs.info('')
Logs.pprint(
'NORMAL',
'Note: Some targets were suppressed. Use --summary-all if you want information of all targets.',
)
if hasattr(bld, 'extra_build_summary'):
bld.extra_build_summary(bld, sys.modules[__name__])
def _parse_size_output(s):
lines = s.splitlines()[1:]
l = []
for line in lines:
row = line.strip().split()
l.append(dict(
size_text=int(row[0]),
size_data=int(row[1]),
size_bss=int(row[2]),
size_total=int(row[3]),
))
return l
@conf
def size_summary(bld, nodes):
l = []
for n in nodes:
path = n
if isinstance(n, Node.Node):
path = n.path_from(bld.bldnode)
l.append(dict(binary_path=path))
if bld.env.SIZE:
cmd = [bld.env.get_flat('SIZE')] + [d['binary_path'] for d in l]
out = bld.cmd_and_log(
cmd,
cwd=bld.bldnode.abspath(),
quiet=Context.BOTH,
)
parsed = _parse_size_output(out)
for i, data in enumerate(parsed):
l[i].update(data)
return l
@conf
def build_summary_post_fun(bld):
bld.add_post_fun(_build_summary)
@feature('cprogram', 'cxxprogram')
@before_method('process_rule')
def init_summary_data(self):
self.build_summary = dict(target=self.name)
def options(opt):
g = opt.ap_groups['build']
g.add_option('--summary-all',
action='store_true',
help='''Print build summary for all targets. By default, only
information about the first %d targets will be printed.
''' % MAX_TARGETS)
def configure(cfg):
size_name = 'size'
if cfg.env.TOOLCHAIN != 'native':
size_name = cfg.env.TOOLCHAIN + '-' + size_name
cfg.find_program(size_name, var='SIZE', mandatory=False)
if not cfg.env.BUILD_SUMMARY_HEADER:
cfg.env.BUILD_SUMMARY_HEADER = [
'target',
'size_text',
'size_data',
'size_bss',
'size_total',
]
|
michael-dev2rights/ansible | refs/heads/ansible-d2r | lib/ansible/modules/network/netvisor/pn_ospfarea.py | 29 | #!/usr/bin/python
""" PN-CLI vrouter-ospf-add/remove """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_ospfarea
author: "Pluribus Networks (@amitsi)"
version_added: "2.2"
short_description: CLI command to add/remove ospf area to/from a vrouter.
description:
- Execute vrouter-ospf-add, vrouter-ospf-remove command.
- This command adds/removes Open Shortest Path First(OSPF) area to/from
a virtual router(vRouter) service.
options:
pn_cliusername:
description:
- Login username.
required: true
pn_clipassword:
description:
- Login password.
required: true
pn_cliswitch:
description:
- Target switch(es) to run the CLI on.
required: False
state:
description:
- State the action to perform. Use 'present' to add ospf-area, 'absent'
to remove ospf-area and 'update' to modify ospf-area.
required: true
choices: ['present', 'absent', 'update']
pn_vrouter_name:
description:
- Specify the name of the vRouter.
required: true
pn_ospf_area:
description:
- Specify the OSPF area number.
required: true
pn_stub_type:
description:
- Specify the OSPF stub type.
choices: ['none', 'stub', 'stub-no-summary', 'nssa', 'nssa-no-summary']
pn_prefix_listin:
description:
- OSPF prefix list for filtering incoming packets.
pn_prefix_listout:
description:
- OSPF prefix list for filtering outgoing packets.
pn_quiet:
description:
- Enable/disable system information.
required: false
default: true
"""
EXAMPLES = """
- name: "Add OSPF area to vrouter"
pn_ospfarea:
state: present
pn_cliusername: admin
pn_clipassword: admin
pn_ospf_area: 1.0.0.0
pn_stub_type: stub
- name: "Remove OSPF from vrouter"
pn_ospf:
state: absent
pn_cliusername: admin
pn_clipassword: admin
pn_vrouter_name: name-string
pn_ospf_area: 1.0.0.0
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
returned: always
type: str
stdout:
description: The set of responses from the ospf command.
returned: always
type: list
stderr:
description: The set of error responses from the ospf command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
import shlex
def get_command_from_state(state):
"""
This method gets appropriate command name for the state specified. It
returns the command name for the specified state.
:param state: The state for which the respective command name is required.
"""
command = None
if state == 'present':
command = 'vrouter-ospf-area-add'
if state == 'absent':
command = 'vrouter-ospf-area-remove'
if state == 'update':
command = 'vrouter-ospf-area-modify'
return command
def main():
""" This section is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=True, type='str'),
pn_clipassword=dict(required=True, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str'),
state =dict(required=True, type='str',
choices=['present', 'absent', 'update']),
pn_vrouter_name=dict(required=True, type='str'),
pn_ospf_area=dict(required=True, type='str'),
pn_stub_type=dict(type='str', choices=['none', 'stub', 'nssa',
'stub-no-summary',
'nssa-no-summary']),
pn_prefix_listin=dict(type='str'),
pn_prefix_listout=dict(type='str'),
pn_quiet=dict(type='bool', default='True')
)
)
# Accessing the arguments
cliusername = module.params['pn_cliusername']
clipassword = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
vrouter_name = module.params['pn_vrouter_name']
ospf_area = module.params['pn_ospf_area']
stub_type = module.params['pn_stub_type']
prefix_listin = module.params['pn_prefix_listin']
prefix_listout = module.params['pn_prefix_listout']
quiet = module.params['pn_quiet']
command = get_command_from_state(state)
# Building the CLI command string
cli = '/usr/bin/cli'
if quiet is True:
cli += ' --quiet '
cli += ' --user %s:%s ' % (cliusername, clipassword)
if cliswitch:
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
cli += ' %s vrouter-name %s area %s ' % (command, vrouter_name, ospf_area)
if stub_type:
cli += ' stub-type ' + stub_type
if prefix_listin:
cli += ' prefix-list-in ' + prefix_listin
if prefix_listout:
cli += ' prefix-list-out ' + prefix_listout
# Run the CLI command
ospfcommand = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(ospfcommand)
# Response in JSON format
if result != 0:
module.exit_json(
command=cli,
stderr=err.rstrip("\r\n"),
changed=False
)
else:
module.exit_json(
command=cli,
stdout=out.rstrip("\r\n"),
changed=True
)
# AnsibleModule boilerplate
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
|
oglops/Maya-PyQt-Scripts | refs/heads/master | tests/pyqt-test.py | 2 | import sip
import maya.OpenMayaUI as mui
from PyQt4.QtCore import *
from PyQt4.QtGui import *
def getMayaWindow():
ptr =mui.MQtUtil.mainWindow ()
return sip.wrapinstance (long(ptr), QObject)
class Form(QDialog):
def __init__ (self, parent=None):
super (Form, self).__init__ (parent)
self.setWindowTitle ('Test Dialog')
self.setObjectName ('mainUI')
self.mainLayout =QVBoxLayout (self)
self.myButton =QPushButton ('myButton')
self.mainLayout.addWidget (self.myButton)
global app
global form
app =qApp
form =Form (getMayaWindow ())
form.show () |
chemelnucfin/tensorflow | refs/heads/master | tensorflow/contrib/tpu/python/profiler/__init__.py | 8 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stub file to maintain backwards compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import,unused-import
from tensorflow.python.tpu.profiler import *
# pylint: enable=wildcard-import,unused-import
|
lukemerrett/SteamProgress | refs/heads/master | clients/installed_games.py | 1 | import os
import settings
__installed_games = os.listdir(settings.steam_user_folder)
def is_game_in_installed_games_list(game):
return str(game['appid']) in __installed_games
|
albertjan/pypyjs-presentation | refs/heads/gh-pages | assets/js/pypy.js-0.3.1/lib/modules/unittest/test/test_skipping.py | 71 | import unittest
from .support import LoggingResult
class Test_TestSkipping(unittest.TestCase):
def test_skipping(self):
class Foo(unittest.TestCase):
def test_skip_me(self):
self.skipTest("skip")
events = []
result = LoggingResult(events)
test = Foo("test_skip_me")
test.run(result)
self.assertEqual(events, ['startTest', 'addSkip', 'stopTest'])
self.assertEqual(result.skipped, [(test, "skip")])
# Try letting setUp skip the test now.
class Foo(unittest.TestCase):
def setUp(self):
self.skipTest("testing")
def test_nothing(self): pass
events = []
result = LoggingResult(events)
test = Foo("test_nothing")
test.run(result)
self.assertEqual(events, ['startTest', 'addSkip', 'stopTest'])
self.assertEqual(result.skipped, [(test, "testing")])
self.assertEqual(result.testsRun, 1)
def test_skipping_decorators(self):
op_table = ((unittest.skipUnless, False, True),
(unittest.skipIf, True, False))
for deco, do_skip, dont_skip in op_table:
class Foo(unittest.TestCase):
@deco(do_skip, "testing")
def test_skip(self): pass
@deco(dont_skip, "testing")
def test_dont_skip(self): pass
test_do_skip = Foo("test_skip")
test_dont_skip = Foo("test_dont_skip")
suite = unittest.TestSuite([test_do_skip, test_dont_skip])
events = []
result = LoggingResult(events)
suite.run(result)
self.assertEqual(len(result.skipped), 1)
expected = ['startTest', 'addSkip', 'stopTest',
'startTest', 'addSuccess', 'stopTest']
self.assertEqual(events, expected)
self.assertEqual(result.testsRun, 2)
self.assertEqual(result.skipped, [(test_do_skip, "testing")])
self.assertTrue(result.wasSuccessful())
def test_skip_class(self):
@unittest.skip("testing")
class Foo(unittest.TestCase):
def test_1(self):
record.append(1)
record = []
result = unittest.TestResult()
test = Foo("test_1")
suite = unittest.TestSuite([test])
suite.run(result)
self.assertEqual(result.skipped, [(test, "testing")])
self.assertEqual(record, [])
def test_skip_non_unittest_class_old_style(self):
@unittest.skip("testing")
class Mixin:
def test_1(self):
record.append(1)
class Foo(Mixin, unittest.TestCase):
pass
record = []
result = unittest.TestResult()
test = Foo("test_1")
suite = unittest.TestSuite([test])
suite.run(result)
self.assertEqual(result.skipped, [(test, "testing")])
self.assertEqual(record, [])
def test_skip_non_unittest_class_new_style(self):
@unittest.skip("testing")
class Mixin(object):
def test_1(self):
record.append(1)
class Foo(Mixin, unittest.TestCase):
pass
record = []
result = unittest.TestResult()
test = Foo("test_1")
suite = unittest.TestSuite([test])
suite.run(result)
self.assertEqual(result.skipped, [(test, "testing")])
self.assertEqual(record, [])
def test_expected_failure(self):
class Foo(unittest.TestCase):
@unittest.expectedFailure
def test_die(self):
self.fail("help me!")
events = []
result = LoggingResult(events)
test = Foo("test_die")
test.run(result)
self.assertEqual(events,
['startTest', 'addExpectedFailure', 'stopTest'])
self.assertEqual(result.expectedFailures[0][0], test)
self.assertTrue(result.wasSuccessful())
def test_unexpected_success(self):
class Foo(unittest.TestCase):
@unittest.expectedFailure
def test_die(self):
pass
events = []
result = LoggingResult(events)
test = Foo("test_die")
test.run(result)
self.assertEqual(events,
['startTest', 'addUnexpectedSuccess', 'stopTest'])
self.assertFalse(result.failures)
self.assertEqual(result.unexpectedSuccesses, [test])
self.assertTrue(result.wasSuccessful())
def test_skip_doesnt_run_setup(self):
class Foo(unittest.TestCase):
wasSetUp = False
wasTornDown = False
def setUp(self):
Foo.wasSetUp = True
def tornDown(self):
Foo.wasTornDown = True
@unittest.skip('testing')
def test_1(self):
pass
result = unittest.TestResult()
test = Foo("test_1")
suite = unittest.TestSuite([test])
suite.run(result)
self.assertEqual(result.skipped, [(test, "testing")])
self.assertFalse(Foo.wasSetUp)
self.assertFalse(Foo.wasTornDown)
def test_decorated_skip(self):
def decorator(func):
def inner(*a):
return func(*a)
return inner
class Foo(unittest.TestCase):
@decorator
@unittest.skip('testing')
def test_1(self):
pass
result = unittest.TestResult()
test = Foo("test_1")
suite = unittest.TestSuite([test])
suite.run(result)
self.assertEqual(result.skipped, [(test, "testing")])
if __name__ == '__main__':
unittest.main()
|
GLPJJ/PL | refs/heads/master | py/ztest/quant_stock.py | 1 | #python 3.6
#pip install pandas
#pip install lxml
#根据提示语 安装缺少的模块
#pip install requests
#pip install bs4
#pip install tushare
#交易部分
#pip install tesseract
#搜tesseract 单独下载,并且需要放到path目录
#TESSDATA_PREFIX 标记到环境变量中 训练数据位置 C:\Program Files (x86)\Tesseract-OCR\tessdata
import tushare
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime
import time
import math
import os
import sys
import json
import file_helper
class QuantMatplot(object):
"""docstring for QuantMatplot"""
def __init__(self):
super(QuantMatplot, self).__init__()
class QuantOrder(object):
"""docstring for QuantOrder"""
def __init__(self):
super(QuantOrder, self).__init__()
class QuantAccountData(object):
def __init__(self):
"""
构造函数
"""
class QuantUserData(object):
def __init__(self):
"""
构造函数
"""
class QuantStockContext(object):
def __init__(self,start,end,frequency):
"""
构造函数
"""
#开始日期 format:YYYY-MM-DD 为空时取上市首日
self.start_time= start
#结束日期 format:YYYY-MM-DD 为空时取最近一个交易日
self.end_time= end
#数据类型,D=日k线 W=周 M=月 5=5分钟 15=15分钟 30=30分钟 60=60分钟,默认为D
self.frequency = frequency
#自定义数据
self.user_data = QuantUserData()
print("self.user_data =",id(self.user_data))
self.account = QuantAccountData()
self.order = QuantOrder()
self.account_initial = QuantAccountData()
self.matplot = QuantMatplot()
self.matplot.date = []
self.matplot.my = []
self.matplot.standard = []
#收盘价购买,没有滑点
STOCK_FLOAT = 0.0
def stock_buy(name,price,cash):
count = int(cash/price//100*100)
if(count >= 100):
#直接认为买入成功 价格+误差
buy_money = (price+STOCK_FLOAT)*count
fee = context.account.commission*buy_money #佣金
if(fee < context.account.commission_base):
fee = context.account.commission_base
context.account.money -= buy_money #减去买入金额
context.account.money -= fee #减去费用
context.account.stock += count;
print("buy 买入["+name+"]成功 剩余现金 =",context.account.money,",持有股票 =",context.account.stock
,"成交额 =",buy_money,"总费用 =",fee,"印花税 =",0);
return True
else:
print("buy 买入["+name+"]失败 现金不足买入一手!!!")
return False
def stock_sell(name,price,count):
#直接认为买入成功 价格-误差
sell_money = (price-STOCK_FLOAT)*count
fee = context.account.commission*sell_money #佣金
if(fee < context.account.commission_base):
fee = context.account.commission_base
fee1 = context.account.tax*sell_money #印花税
fee += fee1
context.account.money += sell_money #加上卖出金额
context.account.money -= fee #减去费用
context.account.stock -= count;
print("sell 卖出["+name+"]成功 剩余现金 =",context.account.money,",持有股票 =",context.account.stock
,"成交额 =",sell_money,"总费用 =",fee,"印花税 =",fee1);
def summarize(date,price):
curTotal = context.account.money+context.account.stock*price
orignTotal = context.account_initial.money
orignPrice = context.account_initial.price_start
my_profit = (curTotal-orignTotal)/orignTotal*100
standard_profit = (price-orignPrice)/orignPrice*100
print(date+" 当前我的资产总价值 money = ",curTotal,"策略收益 ="
,str(my_profit)+"%","基准收益 = "
,str(standard_profit)+"%")
print("我的现金 =",context.account.money,"我的股票 =",context.account.stock,"*",price)
context.matplot.date.append(date);
context.matplot.my.append(my_profit)
context.matplot.standard.append(standard_profit)
def draw_figure(context):
fig = plt.figure("盈利分析",figsize=(18,6))#
date_times = [datetime.datetime.strptime(x,'%Y-%m-%d') for x in context.matplot.date]
# print(date_times[0])
dates = matplotlib.dates.date2num(date_times)
#设置标题
fig.suptitle('stock profit', fontsize = 14, fontweight='bold')
ax = fig.add_subplot(1,1,1)
ax.plot(dates,context.matplot.standard)
ax.plot(dates,context.matplot.my,'r')
#x轴标签旋转角度
plt.xticks(rotation=90)
# for label in ax.xaxis.get_ticklabels():
# label.set_rotation(45)
ax.set_xlabel("date")
ax.set_ylabel("%")
print("len(context.matplot.date) =",len(context.matplot.date))
#print("matplotlib.ticker.Locator.MAXTICKS =",matplotlib.ticker.Locator.MAXTICKS)
interval = math.ceil(len(context.matplot.date) / 30)
print("interval =",interval)
interval = 1 if interval == 0 else interval
ax.xaxis.set_major_locator(mdates.DayLocator(bymonthday=range(1,31), interval=interval))
ax.xaxis.set_major_formatter(mdates.DateFormatter("%Y-%m-%d"))
#plt.savefig("easyplot.jpg")
plt.show()
#开始日期 format:YYYY-MM-DD 为空时取上市首日
#结束日期 format:YYYY-MM-DD 为空时取最近一个交易日
#数据类型,D=日k线 W=周 M=月 5=5分钟 15=15分钟 30=30分钟 60=60分钟,默认为D
"""
159915 起始日期为 2011-12-09
300033 2009-12-25
300104 2010-08-31
"""
#"2011-09-08","2014-09-08"
context = QuantStockContext("2017-08-15","","D") #2015-08-10
context.security = "159915"
context.order.buy = stock_buy
context.order.sell = stock_sell
context.summarize = summarize
#print("context.user_data =",id(context.user_data))
context.account_initial.money = 2000 #起始持有RMB数量
context.account_initial.stock = 0 #起始持有股票数量
context.account.commission = 0.00025 #万二点五佣金
context.account.commission_base = 5 #佣金最低额
context.account.tax = 0.001 #印花税 交易上海的股票需要过户费,我暂且忽略过户费
#海龟策略
import quant.haigui as quant_strategy
def main(fromFile=False):
#初始化我的账户钱和股票数量
data = None
account_file = "account.temp"
if fromFile:#如果读取来自文件的
try:
with open(account_file,"r") as file:
data = json.load(file)
except FileNotFoundError:
pass
if data :
context.account.money = data['money']
context.account.stock = data['stock']
#context.start_time = data['today']
else:
context.account.money = context.account_initial.money
context.account.stock = context.account_initial.stock
print("tushare version =",tushare.__version__)
print("*"*100)
# print("quant_stock 当前目录=",os.getcwd())
# print(sys.path[0])
#上面两种方式在脚本被其他脚本引入的时候的目录不准确,是引入他们的文件的目录
print(file_helper.get_curpy_dir(__file__))
account_stock = {}
try:
with open("account_stock","r") as file:
account_stock = json.load(file)
except FileNotFoundError:
print("再见")
return #再见
# 测试实盘登陆账号 中信
# tushare.set_broker("csc",account_stock["account"],account_stock["pwd"])
# context.trader = tushare.trader.trader.TraderAPI("csc")
# context.trader.login()
# print(context.trader.baseinfo())
# return
k_data = tushare.get_k_data(context.security,start=context.start_time, end=context.end_time,ktype=context.frequency)
print(k_data)
#print(type(k_data))
#print(len(k_data))
#测试 pandas.core.series.Series 和 pandas.core.frame.DataFrame
# k_data_seg = k_data[1399:1410] # [1399,1410) 这里弄个开区间
# print(k_data_seg)
# print(k_data_seg[-10:])#取最后10行数据
# print("***1")
# print(k_data_seg[5:])#取第5行到后面的数据
# print("***2")
# #下面这个访问,指定第1401行到第1406行(行数由总索引指定),指定 high列
# print(k_data_seg.loc[1401:1406,["high"]]) #[1401:1406] 这里弄个闭区间,卧槽
# print("**")
# print(k_data_seg.loc[5:7,["high"]]) # 空数据
# print("**")
# print(k_data_seg.loc[-7:-5,["high"]]) # 空数据,不支持负行数索引
# print("***3")
# #下面这个访问,指定第1行到第2行(行数由当前分片所决定),指定 2、3列
# print(k_data_seg.iloc[1:2,[2,3]]) #[1:2) 这里又是开区间...
# print("**")
# print(k_data_seg.iloc[1401:1406,[2,3]]) #空数据 卧槽,,真烦,行数代表意义不统一
# print("**")
# print(k_data_seg.iloc[-10:-9,[2,3]]) # 支持负行数索引
# print("***4")
# k_data_seg_high = k_data_seg["high"]
# print(k_data_seg_high,type(k_data_seg_high)) # 访问指定列
# print("**")
# print(k_data_seg_high[1:3])
# print(k_data_seg_high[-10:-8])
# # last_seg_high_max = np.max(k_data_seg_high[1:3])
# # print("last_seg_high_max",last_seg_high_max)
# #pandas.core.series.Series 转换成数组然后再访问
# k_data_seg_high_arra = k_data_seg_high[1:3].get_values()
# print(k_data_seg_high_arra,type(k_data_seg_high_arra))
# print(k_data_seg_high_arra[0]) #numpy.ndarray
# return
k_data_start = k_data.iloc[0]
# print(k_data_start)
if data:
context.account_initial.price_start = data['price_start']
else:
context.account_initial.price_start = k_data_start.open
print(k_data_start.date + " first open =",context.account_initial.price_start)
#初始化量化策略
quant_strategy.quant_init(context,fromFile)
#获取策略要求的bar数量
needCount = quant_strategy.quant_need_count(context)
#print("needCount =",needCount)
#print(k_data[0:needCount])
for i in range(len(k_data)):
# print("i",i)
k_data_seg = []
realIndex = i+1
if realIndex < needCount:
k_data_seg = k_data[0:realIndex];
else:
k_data_seg = k_data[realIndex-needCount:realIndex];
#DataFrame 只支持正向的索引,不支持负数访问
#print(k_data_seg.loc[0:5,["close"]])
# print(k_data_seg)
if len(k_data_seg) != 0:
print("进入处理函数 "+(">"*100))
quant_strategy.handle_data(context,k_data_seg)
print("进入处理函数 end"+("<"*100))
hist_end_data = k_data_seg.iloc[len(k_data_seg)-1]#获取当前时间段内的收盘价
# print(hist_end_data.close)
context.summarize(hist_end_data.date,hist_end_data.close) #总结财富
#先保存到文件
save_to = {'money':context.account.money
,'stock':context.account.stock
,"price_start":context.account_initial.price_start
,"today":str(datetime.date.today())}
with open(account_file,"w") as file:
json.dump(save_to,file)
#然后绘制图表
draw_figure(context)
if __name__ == '__main__':
main()
|
ychen820/microblog | refs/heads/master | y/google-cloud-sdk/platform/google_appengine/lib/grizzled/grizzled/text/__init__.py | 19 | # $Id: 55b6d7323887ac09f6bfba365205e952533b847b $
"""
The ``grizzled.text`` package contains text-related classes and modules.
"""
__docformat__ = "restructuredtext en"
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
from StringIO import StringIO
# ---------------------------------------------------------------------------
# Exports
# ---------------------------------------------------------------------------
__all__ = ['hexdump']
# ---------------------------------------------------------------------------
# Constants
# ---------------------------------------------------------------------------
REPEAT_FORMAT = '*** Repeated %d times'
# ---------------------------------------------------------------------------
# Functions
# ---------------------------------------------------------------------------
def hexdump(source, out, width=16, start=0, limit=None, show_repeats=False):
"""
Produce a "standard" hexdump of the specified string or file-like
object. The output consists of a series of lines like this::
000000: 72 22 22 22 4f 53 20 72 6f 75 r'''OS rou
00000a: 74 69 6e 65 73 20 66 6f 72 20 tines for
000014: 4d 61 63 2c 20 4e 54 2c 20 6f Mac, NT, o
00001e: 72 20 50 6f 73 69 78 20 64 65 r Posix de
000028: 70 65 6e 64 69 6e 67 20 6f 6e pending on
000032: 20 77 68 61 74 20 73 79 73 74 what syst
00003c: 65 6d 20 77 65 27 72 65 20 6f em we're o
000046: 6e 2e 0a 0a 54 68 69 73 20 65 n...This e
The output width (i.e., the number of decoded characters shown on a
line) can be controlled with the ``width`` parameter.
Adjacent repeated lines are collapsed by default. For example::
000000: 00 00 00 00 00 00 00 00 00 00 ..........
*** Repeated 203 times
0007f8: 72 22 22 22 4f 53 20 72 6f 75 r'''OS rou
This behavior can be disabled via the ``show_repeats`` parameter.
:Parameters:
source : str or file
The object whose contents are to be dumped in hex. The
object can be a string or a file-like object.
out : file
Where to dump the hex output
width : int
The number of dumped characters per line
start : int
Offset within ``input`` where reading should begin
limit : int
Total number of bytes to dump. Defaults to everything from
``start`` to the end.
show_repeats : bool
``False`` to collapse repeated output lines, ``True`` to
dump all lines, even if they're repeats.
"""
def ascii(b):
"""Determine how to show a byte in ascii."""
if 32 <= b <= 126:
return chr(b)
else:
return '.'
pos = 0
ascii_map = [ ascii(c) for c in range(256) ]
lastbuf = ''
lastline = ''
repeat_count = 0
if width > 4:
space_col = width/2
else:
space_col = -1
if type(source) == str:
source = StringIO(source)
if start:
source.seek(start)
pos = start
hex_field_width = (width * 3) + 1
total_read = 0
while True:
if limit:
to_read = min(limit - total_read, width)
else:
to_read = width
buf = source.read(to_read)
length = len(buf)
total_read += length
if length == 0:
if repeat_count and (not show_repeats):
if repeat_count > 1:
print >> out, REPEAT_FORMAT % (repeat_count - 1)
elif repeat_count == 1:
print >> out, lastline
print >> out, lastline
break
else:
show_buf = True
if buf == lastbuf:
repeat_count += 1
show_buf = False
else:
if repeat_count and (not show_repeats):
if repeat_count == 1:
print >> out, lastline
else:
print >> out, REPEAT_FORMAT % (repeat_count - 1)
repeat_count = 0
# Build output line.
hex = ""
asc = ""
for i in range(length):
c = buf[i]
if i == space_col:
hex = hex + " "
hex = hex + ("%02x" % ord(c)) + " "
asc = asc + ascii_map[ord(c)]
line = "%06x: %-*s %s" % (pos, hex_field_width, hex, asc)
if show_buf:
print >> out, line
pos = pos + length
lastbuf = buf
lastline = line
def str2bool(s):
"""
Convert a string to a boolean value. The supported conversions are:
+--------------+---------------+
| String | Boolean value |
+==============+===============+
| "false" | False |
+--------------+---------------+
| "true" | True |
+--------------+---------------+
| "f" | False |
+--------------+---------------+
| "t" + True |
+--------------+---------------+
| "0" | False |
+--------------+---------------+
| "1" + True |
+--------------+---------------+
| "n" | False |
+--------------+---------------+
| "y" + True |
+--------------+---------------+
| "no" | False |
+--------------+---------------+
| "yes" + True |
+--------------+---------------+
| "off" | False |
+--------------+---------------+
| "on" + True |
+--------------+---------------+
Strings are compared in a case-blind fashion.
**Note**: This function is not currently localizable.
:Parameters:
s : str
The string to convert to boolean
:rtype: bool
:return: the corresponding boolean value
:raise ValueError: unrecognized boolean string
"""
try:
return {'false' : False,
'true' : True,
'f' : False,
't' : True,
'0' : False,
'1' : True,
'no' : False,
'yes' : True,
'y' : False,
'n' : True,
'off' : False,
'on' : True}[s.lower()]
except KeyError:
raise ValueError, 'Unrecognized boolean string: "%s"' % s
|
cognitoedtech/assy | refs/heads/master | 3rd_party/fuelux-master/fuelux-master/node_modules/testem/node_modules/js-yaml/support/pyyaml-src/emitter.py | 189 |
# Emitter expects events obeying the following grammar:
# stream ::= STREAM-START document* STREAM-END
# document ::= DOCUMENT-START node DOCUMENT-END
# node ::= SCALAR | sequence | mapping
# sequence ::= SEQUENCE-START node* SEQUENCE-END
# mapping ::= MAPPING-START (node node)* MAPPING-END
__all__ = ['Emitter', 'EmitterError']
from .error import YAMLError
from .events import *
class EmitterError(YAMLError):
pass
class ScalarAnalysis:
def __init__(self, scalar, empty, multiline,
allow_flow_plain, allow_block_plain,
allow_single_quoted, allow_double_quoted,
allow_block):
self.scalar = scalar
self.empty = empty
self.multiline = multiline
self.allow_flow_plain = allow_flow_plain
self.allow_block_plain = allow_block_plain
self.allow_single_quoted = allow_single_quoted
self.allow_double_quoted = allow_double_quoted
self.allow_block = allow_block
class Emitter:
DEFAULT_TAG_PREFIXES = {
'!' : '!',
'tag:yaml.org,2002:' : '!!',
}
def __init__(self, stream, canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None):
# The stream should have the methods `write` and possibly `flush`.
self.stream = stream
# Encoding can be overriden by STREAM-START.
self.encoding = None
# Emitter is a state machine with a stack of states to handle nested
# structures.
self.states = []
self.state = self.expect_stream_start
# Current event and the event queue.
self.events = []
self.event = None
# The current indentation level and the stack of previous indents.
self.indents = []
self.indent = None
# Flow level.
self.flow_level = 0
# Contexts.
self.root_context = False
self.sequence_context = False
self.mapping_context = False
self.simple_key_context = False
# Characteristics of the last emitted character:
# - current position.
# - is it a whitespace?
# - is it an indention character
# (indentation space, '-', '?', or ':')?
self.line = 0
self.column = 0
self.whitespace = True
self.indention = True
# Whether the document requires an explicit document indicator
self.open_ended = False
# Formatting details.
self.canonical = canonical
self.allow_unicode = allow_unicode
self.best_indent = 2
if indent and 1 < indent < 10:
self.best_indent = indent
self.best_width = 80
if width and width > self.best_indent*2:
self.best_width = width
self.best_line_break = '\n'
if line_break in ['\r', '\n', '\r\n']:
self.best_line_break = line_break
# Tag prefixes.
self.tag_prefixes = None
# Prepared anchor and tag.
self.prepared_anchor = None
self.prepared_tag = None
# Scalar analysis and style.
self.analysis = None
self.style = None
def dispose(self):
# Reset the state attributes (to clear self-references)
self.states = []
self.state = None
def emit(self, event):
self.events.append(event)
while not self.need_more_events():
self.event = self.events.pop(0)
self.state()
self.event = None
# In some cases, we wait for a few next events before emitting.
def need_more_events(self):
if not self.events:
return True
event = self.events[0]
if isinstance(event, DocumentStartEvent):
return self.need_events(1)
elif isinstance(event, SequenceStartEvent):
return self.need_events(2)
elif isinstance(event, MappingStartEvent):
return self.need_events(3)
else:
return False
def need_events(self, count):
level = 0
for event in self.events[1:]:
if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
level += 1
elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
level -= 1
elif isinstance(event, StreamEndEvent):
level = -1
if level < 0:
return False
return (len(self.events) < count+1)
def increase_indent(self, flow=False, indentless=False):
self.indents.append(self.indent)
if self.indent is None:
if flow:
self.indent = self.best_indent
else:
self.indent = 0
elif not indentless:
self.indent += self.best_indent
# States.
# Stream handlers.
def expect_stream_start(self):
if isinstance(self.event, StreamStartEvent):
if self.event.encoding and not hasattr(self.stream, 'encoding'):
self.encoding = self.event.encoding
self.write_stream_start()
self.state = self.expect_first_document_start
else:
raise EmitterError("expected StreamStartEvent, but got %s"
% self.event)
def expect_nothing(self):
raise EmitterError("expected nothing, but got %s" % self.event)
# Document handlers.
def expect_first_document_start(self):
return self.expect_document_start(first=True)
def expect_document_start(self, first=False):
if isinstance(self.event, DocumentStartEvent):
if (self.event.version or self.event.tags) and self.open_ended:
self.write_indicator('...', True)
self.write_indent()
if self.event.version:
version_text = self.prepare_version(self.event.version)
self.write_version_directive(version_text)
self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
if self.event.tags:
handles = sorted(self.event.tags.keys())
for handle in handles:
prefix = self.event.tags[handle]
self.tag_prefixes[prefix] = handle
handle_text = self.prepare_tag_handle(handle)
prefix_text = self.prepare_tag_prefix(prefix)
self.write_tag_directive(handle_text, prefix_text)
implicit = (first and not self.event.explicit and not self.canonical
and not self.event.version and not self.event.tags
and not self.check_empty_document())
if not implicit:
self.write_indent()
self.write_indicator('---', True)
if self.canonical:
self.write_indent()
self.state = self.expect_document_root
elif isinstance(self.event, StreamEndEvent):
if self.open_ended:
self.write_indicator('...', True)
self.write_indent()
self.write_stream_end()
self.state = self.expect_nothing
else:
raise EmitterError("expected DocumentStartEvent, but got %s"
% self.event)
def expect_document_end(self):
if isinstance(self.event, DocumentEndEvent):
self.write_indent()
if self.event.explicit:
self.write_indicator('...', True)
self.write_indent()
self.flush_stream()
self.state = self.expect_document_start
else:
raise EmitterError("expected DocumentEndEvent, but got %s"
% self.event)
def expect_document_root(self):
self.states.append(self.expect_document_end)
self.expect_node(root=True)
# Node handlers.
def expect_node(self, root=False, sequence=False, mapping=False,
simple_key=False):
self.root_context = root
self.sequence_context = sequence
self.mapping_context = mapping
self.simple_key_context = simple_key
if isinstance(self.event, AliasEvent):
self.expect_alias()
elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
self.process_anchor('&')
self.process_tag()
if isinstance(self.event, ScalarEvent):
self.expect_scalar()
elif isinstance(self.event, SequenceStartEvent):
if self.flow_level or self.canonical or self.event.flow_style \
or self.check_empty_sequence():
self.expect_flow_sequence()
else:
self.expect_block_sequence()
elif isinstance(self.event, MappingStartEvent):
if self.flow_level or self.canonical or self.event.flow_style \
or self.check_empty_mapping():
self.expect_flow_mapping()
else:
self.expect_block_mapping()
else:
raise EmitterError("expected NodeEvent, but got %s" % self.event)
def expect_alias(self):
if self.event.anchor is None:
raise EmitterError("anchor is not specified for alias")
self.process_anchor('*')
self.state = self.states.pop()
def expect_scalar(self):
self.increase_indent(flow=True)
self.process_scalar()
self.indent = self.indents.pop()
self.state = self.states.pop()
# Flow sequence handlers.
def expect_flow_sequence(self):
self.write_indicator('[', True, whitespace=True)
self.flow_level += 1
self.increase_indent(flow=True)
self.state = self.expect_first_flow_sequence_item
def expect_first_flow_sequence_item(self):
if isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
self.write_indicator(']', False)
self.state = self.states.pop()
else:
if self.canonical or self.column > self.best_width:
self.write_indent()
self.states.append(self.expect_flow_sequence_item)
self.expect_node(sequence=True)
def expect_flow_sequence_item(self):
if isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
if self.canonical:
self.write_indicator(',', False)
self.write_indent()
self.write_indicator(']', False)
self.state = self.states.pop()
else:
self.write_indicator(',', False)
if self.canonical or self.column > self.best_width:
self.write_indent()
self.states.append(self.expect_flow_sequence_item)
self.expect_node(sequence=True)
# Flow mapping handlers.
def expect_flow_mapping(self):
self.write_indicator('{', True, whitespace=True)
self.flow_level += 1
self.increase_indent(flow=True)
self.state = self.expect_first_flow_mapping_key
def expect_first_flow_mapping_key(self):
if isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
self.write_indicator('}', False)
self.state = self.states.pop()
else:
if self.canonical or self.column > self.best_width:
self.write_indent()
if not self.canonical and self.check_simple_key():
self.states.append(self.expect_flow_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator('?', True)
self.states.append(self.expect_flow_mapping_value)
self.expect_node(mapping=True)
def expect_flow_mapping_key(self):
if isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
if self.canonical:
self.write_indicator(',', False)
self.write_indent()
self.write_indicator('}', False)
self.state = self.states.pop()
else:
self.write_indicator(',', False)
if self.canonical or self.column > self.best_width:
self.write_indent()
if not self.canonical and self.check_simple_key():
self.states.append(self.expect_flow_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator('?', True)
self.states.append(self.expect_flow_mapping_value)
self.expect_node(mapping=True)
def expect_flow_mapping_simple_value(self):
self.write_indicator(':', False)
self.states.append(self.expect_flow_mapping_key)
self.expect_node(mapping=True)
def expect_flow_mapping_value(self):
if self.canonical or self.column > self.best_width:
self.write_indent()
self.write_indicator(':', True)
self.states.append(self.expect_flow_mapping_key)
self.expect_node(mapping=True)
# Block sequence handlers.
def expect_block_sequence(self):
indentless = (self.mapping_context and not self.indention)
self.increase_indent(flow=False, indentless=indentless)
self.state = self.expect_first_block_sequence_item
def expect_first_block_sequence_item(self):
return self.expect_block_sequence_item(first=True)
def expect_block_sequence_item(self, first=False):
if not first and isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.state = self.states.pop()
else:
self.write_indent()
self.write_indicator('-', True, indention=True)
self.states.append(self.expect_block_sequence_item)
self.expect_node(sequence=True)
# Block mapping handlers.
def expect_block_mapping(self):
self.increase_indent(flow=False)
self.state = self.expect_first_block_mapping_key
def expect_first_block_mapping_key(self):
return self.expect_block_mapping_key(first=True)
def expect_block_mapping_key(self, first=False):
if not first and isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.state = self.states.pop()
else:
self.write_indent()
if self.check_simple_key():
self.states.append(self.expect_block_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator('?', True, indention=True)
self.states.append(self.expect_block_mapping_value)
self.expect_node(mapping=True)
def expect_block_mapping_simple_value(self):
self.write_indicator(':', False)
self.states.append(self.expect_block_mapping_key)
self.expect_node(mapping=True)
def expect_block_mapping_value(self):
self.write_indent()
self.write_indicator(':', True, indention=True)
self.states.append(self.expect_block_mapping_key)
self.expect_node(mapping=True)
# Checkers.
def check_empty_sequence(self):
return (isinstance(self.event, SequenceStartEvent) and self.events
and isinstance(self.events[0], SequenceEndEvent))
def check_empty_mapping(self):
return (isinstance(self.event, MappingStartEvent) and self.events
and isinstance(self.events[0], MappingEndEvent))
def check_empty_document(self):
if not isinstance(self.event, DocumentStartEvent) or not self.events:
return False
event = self.events[0]
return (isinstance(event, ScalarEvent) and event.anchor is None
and event.tag is None and event.implicit and event.value == '')
def check_simple_key(self):
length = 0
if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
length += len(self.prepared_anchor)
if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
and self.event.tag is not None:
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(self.event.tag)
length += len(self.prepared_tag)
if isinstance(self.event, ScalarEvent):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
length += len(self.analysis.scalar)
return (length < 128 and (isinstance(self.event, AliasEvent)
or (isinstance(self.event, ScalarEvent)
and not self.analysis.empty and not self.analysis.multiline)
or self.check_empty_sequence() or self.check_empty_mapping()))
# Anchor, Tag, and Scalar processors.
def process_anchor(self, indicator):
if self.event.anchor is None:
self.prepared_anchor = None
return
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
if self.prepared_anchor:
self.write_indicator(indicator+self.prepared_anchor, True)
self.prepared_anchor = None
def process_tag(self):
tag = self.event.tag
if isinstance(self.event, ScalarEvent):
if self.style is None:
self.style = self.choose_scalar_style()
if ((not self.canonical or tag is None) and
((self.style == '' and self.event.implicit[0])
or (self.style != '' and self.event.implicit[1]))):
self.prepared_tag = None
return
if self.event.implicit[0] and tag is None:
tag = '!'
self.prepared_tag = None
else:
if (not self.canonical or tag is None) and self.event.implicit:
self.prepared_tag = None
return
if tag is None:
raise EmitterError("tag is not specified")
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(tag)
if self.prepared_tag:
self.write_indicator(self.prepared_tag, True)
self.prepared_tag = None
def choose_scalar_style(self):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.event.style == '"' or self.canonical:
return '"'
if not self.event.style and self.event.implicit[0]:
if (not (self.simple_key_context and
(self.analysis.empty or self.analysis.multiline))
and (self.flow_level and self.analysis.allow_flow_plain
or (not self.flow_level and self.analysis.allow_block_plain))):
return ''
if self.event.style and self.event.style in '|>':
if (not self.flow_level and not self.simple_key_context
and self.analysis.allow_block):
return self.event.style
if not self.event.style or self.event.style == '\'':
if (self.analysis.allow_single_quoted and
not (self.simple_key_context and self.analysis.multiline)):
return '\''
return '"'
def process_scalar(self):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.style is None:
self.style = self.choose_scalar_style()
split = (not self.simple_key_context)
#if self.analysis.multiline and split \
# and (not self.style or self.style in '\'\"'):
# self.write_indent()
if self.style == '"':
self.write_double_quoted(self.analysis.scalar, split)
elif self.style == '\'':
self.write_single_quoted(self.analysis.scalar, split)
elif self.style == '>':
self.write_folded(self.analysis.scalar)
elif self.style == '|':
self.write_literal(self.analysis.scalar)
else:
self.write_plain(self.analysis.scalar, split)
self.analysis = None
self.style = None
# Analyzers.
def prepare_version(self, version):
major, minor = version
if major != 1:
raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
return '%d.%d' % (major, minor)
def prepare_tag_handle(self, handle):
if not handle:
raise EmitterError("tag handle must not be empty")
if handle[0] != '!' or handle[-1] != '!':
raise EmitterError("tag handle must start and end with '!': %r" % handle)
for ch in handle[1:-1]:
if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-_'):
raise EmitterError("invalid character %r in the tag handle: %r"
% (ch, handle))
return handle
def prepare_tag_prefix(self, prefix):
if not prefix:
raise EmitterError("tag prefix must not be empty")
chunks = []
start = end = 0
if prefix[0] == '!':
end = 1
while end < len(prefix):
ch = prefix[end]
if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-;/?!:@&=+$,_.~*\'()[]':
end += 1
else:
if start < end:
chunks.append(prefix[start:end])
start = end = end+1
data = ch.encode('utf-8')
for ch in data:
chunks.append('%%%02X' % ord(ch))
if start < end:
chunks.append(prefix[start:end])
return ''.join(chunks)
def prepare_tag(self, tag):
if not tag:
raise EmitterError("tag must not be empty")
if tag == '!':
return tag
handle = None
suffix = tag
prefixes = sorted(self.tag_prefixes.keys())
for prefix in prefixes:
if tag.startswith(prefix) \
and (prefix == '!' or len(prefix) < len(tag)):
handle = self.tag_prefixes[prefix]
suffix = tag[len(prefix):]
chunks = []
start = end = 0
while end < len(suffix):
ch = suffix[end]
if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-;/?:@&=+$,_.~*\'()[]' \
or (ch == '!' and handle != '!'):
end += 1
else:
if start < end:
chunks.append(suffix[start:end])
start = end = end+1
data = ch.encode('utf-8')
for ch in data:
chunks.append('%%%02X' % ord(ch))
if start < end:
chunks.append(suffix[start:end])
suffix_text = ''.join(chunks)
if handle:
return '%s%s' % (handle, suffix_text)
else:
return '!<%s>' % suffix_text
def prepare_anchor(self, anchor):
if not anchor:
raise EmitterError("anchor must not be empty")
for ch in anchor:
if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-_'):
raise EmitterError("invalid character %r in the anchor: %r"
% (ch, anchor))
return anchor
def analyze_scalar(self, scalar):
# Empty scalar is a special case.
if not scalar:
return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
allow_flow_plain=False, allow_block_plain=True,
allow_single_quoted=True, allow_double_quoted=True,
allow_block=False)
# Indicators and special characters.
block_indicators = False
flow_indicators = False
line_breaks = False
special_characters = False
# Important whitespace combinations.
leading_space = False
leading_break = False
trailing_space = False
trailing_break = False
break_space = False
space_break = False
# Check document indicators.
if scalar.startswith('---') or scalar.startswith('...'):
block_indicators = True
flow_indicators = True
# First character or preceded by a whitespace.
preceeded_by_whitespace = True
# Last character or followed by a whitespace.
followed_by_whitespace = (len(scalar) == 1 or
scalar[1] in '\0 \t\r\n\x85\u2028\u2029')
# The previous character is a space.
previous_space = False
# The previous character is a break.
previous_break = False
index = 0
while index < len(scalar):
ch = scalar[index]
# Check for indicators.
if index == 0:
# Leading indicators are special characters.
if ch in '#,[]{}&*!|>\'\"%@`':
flow_indicators = True
block_indicators = True
if ch in '?:':
flow_indicators = True
if followed_by_whitespace:
block_indicators = True
if ch == '-' and followed_by_whitespace:
flow_indicators = True
block_indicators = True
else:
# Some indicators cannot appear within a scalar as well.
if ch in ',?[]{}':
flow_indicators = True
if ch == ':':
flow_indicators = True
if followed_by_whitespace:
block_indicators = True
if ch == '#' and preceeded_by_whitespace:
flow_indicators = True
block_indicators = True
# Check for line breaks, special, and unicode characters.
if ch in '\n\x85\u2028\u2029':
line_breaks = True
if not (ch == '\n' or '\x20' <= ch <= '\x7E'):
if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF'
or '\uE000' <= ch <= '\uFFFD') and ch != '\uFEFF':
unicode_characters = True
if not self.allow_unicode:
special_characters = True
else:
special_characters = True
# Detect important whitespace combinations.
if ch == ' ':
if index == 0:
leading_space = True
if index == len(scalar)-1:
trailing_space = True
if previous_break:
break_space = True
previous_space = True
previous_break = False
elif ch in '\n\x85\u2028\u2029':
if index == 0:
leading_break = True
if index == len(scalar)-1:
trailing_break = True
if previous_space:
space_break = True
previous_space = False
previous_break = True
else:
previous_space = False
previous_break = False
# Prepare for the next character.
index += 1
preceeded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029')
followed_by_whitespace = (index+1 >= len(scalar) or
scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029')
# Let's decide what styles are allowed.
allow_flow_plain = True
allow_block_plain = True
allow_single_quoted = True
allow_double_quoted = True
allow_block = True
# Leading and trailing whitespaces are bad for plain scalars.
if (leading_space or leading_break
or trailing_space or trailing_break):
allow_flow_plain = allow_block_plain = False
# We do not permit trailing spaces for block scalars.
if trailing_space:
allow_block = False
# Spaces at the beginning of a new line are only acceptable for block
# scalars.
if break_space:
allow_flow_plain = allow_block_plain = allow_single_quoted = False
# Spaces followed by breaks, as well as special character are only
# allowed for double quoted scalars.
if space_break or special_characters:
allow_flow_plain = allow_block_plain = \
allow_single_quoted = allow_block = False
# Although the plain scalar writer supports breaks, we never emit
# multiline plain scalars.
if line_breaks:
allow_flow_plain = allow_block_plain = False
# Flow indicators are forbidden for flow plain scalars.
if flow_indicators:
allow_flow_plain = False
# Block indicators are forbidden for block plain scalars.
if block_indicators:
allow_block_plain = False
return ScalarAnalysis(scalar=scalar,
empty=False, multiline=line_breaks,
allow_flow_plain=allow_flow_plain,
allow_block_plain=allow_block_plain,
allow_single_quoted=allow_single_quoted,
allow_double_quoted=allow_double_quoted,
allow_block=allow_block)
# Writers.
def flush_stream(self):
if hasattr(self.stream, 'flush'):
self.stream.flush()
def write_stream_start(self):
# Write BOM if needed.
if self.encoding and self.encoding.startswith('utf-16'):
self.stream.write('\uFEFF'.encode(self.encoding))
def write_stream_end(self):
self.flush_stream()
def write_indicator(self, indicator, need_whitespace,
whitespace=False, indention=False):
if self.whitespace or not need_whitespace:
data = indicator
else:
data = ' '+indicator
self.whitespace = whitespace
self.indention = self.indention and indention
self.column += len(data)
self.open_ended = False
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_indent(self):
indent = self.indent or 0
if not self.indention or self.column > indent \
or (self.column == indent and not self.whitespace):
self.write_line_break()
if self.column < indent:
self.whitespace = True
data = ' '*(indent-self.column)
self.column = indent
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_line_break(self, data=None):
if data is None:
data = self.best_line_break
self.whitespace = True
self.indention = True
self.line += 1
self.column = 0
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_version_directive(self, version_text):
data = '%%YAML %s' % version_text
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_line_break()
def write_tag_directive(self, handle_text, prefix_text):
data = '%%TAG %s %s' % (handle_text, prefix_text)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_line_break()
# Scalar streams.
def write_single_quoted(self, text, split=True):
self.write_indicator('\'', True)
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if spaces:
if ch is None or ch != ' ':
if start+1 == end and self.column > self.best_width and split \
and start != 0 and end != len(text):
self.write_indent()
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
elif breaks:
if ch is None or ch not in '\n\x85\u2028\u2029':
if text[start] == '\n':
self.write_line_break()
for br in text[start:end]:
if br == '\n':
self.write_line_break()
else:
self.write_line_break(br)
self.write_indent()
start = end
else:
if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'':
if start < end:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch == '\'':
data = '\'\''
self.column += 2
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end + 1
if ch is not None:
spaces = (ch == ' ')
breaks = (ch in '\n\x85\u2028\u2029')
end += 1
self.write_indicator('\'', False)
ESCAPE_REPLACEMENTS = {
'\0': '0',
'\x07': 'a',
'\x08': 'b',
'\x09': 't',
'\x0A': 'n',
'\x0B': 'v',
'\x0C': 'f',
'\x0D': 'r',
'\x1B': 'e',
'\"': '\"',
'\\': '\\',
'\x85': 'N',
'\xA0': '_',
'\u2028': 'L',
'\u2029': 'P',
}
def write_double_quoted(self, text, split=True):
self.write_indicator('"', True)
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \
or not ('\x20' <= ch <= '\x7E'
or (self.allow_unicode
and ('\xA0' <= ch <= '\uD7FF'
or '\uE000' <= ch <= '\uFFFD'))):
if start < end:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch is not None:
if ch in self.ESCAPE_REPLACEMENTS:
data = '\\'+self.ESCAPE_REPLACEMENTS[ch]
elif ch <= '\xFF':
data = '\\x%02X' % ord(ch)
elif ch <= '\uFFFF':
data = '\\u%04X' % ord(ch)
else:
data = '\\U%08X' % ord(ch)
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end+1
if 0 < end < len(text)-1 and (ch == ' ' or start >= end) \
and self.column+(end-start) > self.best_width and split:
data = text[start:end]+'\\'
if start < end:
start = end
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_indent()
self.whitespace = False
self.indention = False
if text[start] == ' ':
data = '\\'
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
end += 1
self.write_indicator('"', False)
def determine_block_hints(self, text):
hints = ''
if text:
if text[0] in ' \n\x85\u2028\u2029':
hints += str(self.best_indent)
if text[-1] not in '\n\x85\u2028\u2029':
hints += '-'
elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029':
hints += '+'
return hints
def write_folded(self, text):
hints = self.determine_block_hints(text)
self.write_indicator('>'+hints, True)
if hints[-1:] == '+':
self.open_ended = True
self.write_line_break()
leading_space = True
spaces = False
breaks = True
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if breaks:
if ch is None or ch not in '\n\x85\u2028\u2029':
if not leading_space and ch is not None and ch != ' ' \
and text[start] == '\n':
self.write_line_break()
leading_space = (ch == ' ')
for br in text[start:end]:
if br == '\n':
self.write_line_break()
else:
self.write_line_break(br)
if ch is not None:
self.write_indent()
start = end
elif spaces:
if ch != ' ':
if start+1 == end and self.column > self.best_width:
self.write_indent()
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
else:
if ch is None or ch in ' \n\x85\u2028\u2029':
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
if ch is None:
self.write_line_break()
start = end
if ch is not None:
breaks = (ch in '\n\x85\u2028\u2029')
spaces = (ch == ' ')
end += 1
def write_literal(self, text):
hints = self.determine_block_hints(text)
self.write_indicator('|'+hints, True)
if hints[-1:] == '+':
self.open_ended = True
self.write_line_break()
breaks = True
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if breaks:
if ch is None or ch not in '\n\x85\u2028\u2029':
for br in text[start:end]:
if br == '\n':
self.write_line_break()
else:
self.write_line_break(br)
if ch is not None:
self.write_indent()
start = end
else:
if ch is None or ch in '\n\x85\u2028\u2029':
data = text[start:end]
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
if ch is None:
self.write_line_break()
start = end
if ch is not None:
breaks = (ch in '\n\x85\u2028\u2029')
end += 1
def write_plain(self, text, split=True):
if self.root_context:
self.open_ended = True
if not text:
return
if not self.whitespace:
data = ' '
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.whitespace = False
self.indention = False
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if spaces:
if ch != ' ':
if start+1 == end and self.column > self.best_width and split:
self.write_indent()
self.whitespace = False
self.indention = False
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
elif breaks:
if ch not in '\n\x85\u2028\u2029':
if text[start] == '\n':
self.write_line_break()
for br in text[start:end]:
if br == '\n':
self.write_line_break()
else:
self.write_line_break(br)
self.write_indent()
self.whitespace = False
self.indention = False
start = end
else:
if ch is None or ch in ' \n\x85\u2028\u2029':
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch is not None:
spaces = (ch == ' ')
breaks = (ch in '\n\x85\u2028\u2029')
end += 1
|
kawamon/hue | refs/heads/master | desktop/core/src/desktop/lib/raz/ranger/__init__.py | 6 | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from builtins import map
from future.utils import raise_
import calendar
import errno
import logging
import posixpath
import re
import sys
import time
from functools import wraps
from boto.exception import S3ResponseError
from hadoop.fs import normpath as fs_normpath
ERRNO_MAP = {
403: errno.EACCES,
404: errno.ENOENT
}
DEFAULT_ERRNO = errno.EINVAL
S3_PATH_RE = re.compile('^/*[sS]3[aA]?://([^/]+)(/(.*?([^/]+)?/?))?$')
S3_ROOT = 's3://'
S3A_ROOT = 's3a://'
def lookup_s3error(error):
err_no = ERRNO_MAP.get(error.status, DEFAULT_ERRNO)
return IOError(err_no, error.reason)
def translate_s3_error(fn):
@wraps(fn)
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except S3ResponseError:
_, exc, tb = sys.exc_info()
logging.error('S3 error: %s' % exc)
lookup = lookup_s3error(exc)
raise_(lookup.__class__, lookup, tb)
return wrapped
def parse_uri(uri):
"""
Returns tuple (bucket_name, key_name, key_basename).
Raises ValueError if invalid S3 URI is passed.
"""
match = S3_PATH_RE.match(uri)
if not match:
raise ValueError("Invalid S3 URI: %s" % uri)
key = match.group(3) or ''
basename = match.group(4) or ''
return match.group(1), key, basename
def is_root(uri):
"""
Check if URI is S3 root (S3A://)
"""
return uri.lower() == S3A_ROOT
def abspath(cd, uri):
"""
Returns absolute URI, examples:
abspath('s3a://bucket/key', key2') == 's3a://bucket/key/key2'
abspath('s3a://bucket/key', 's3a://bucket2/key2') == 'sa://bucket2/key2'
"""
if cd.lower().startswith(S3A_ROOT):
uri = join(cd, uri)
else:
uri = normpath(join(cd, uri))
return uri
def join(*comp_list):
def _prep(uri):
try:
return '/%s/%s' % parse_uri(uri)[:2]
except ValueError:
return '/' if is_root(uri) else uri
joined = posixpath.join(*list(map(_prep, comp_list)))
if joined and joined[0] == '/':
joined = 's3a:/%s' % joined
return joined
def normpath(path):
"""
Return normalized path but ignore leading S3A_ROOT prefix if it exists
"""
if path.lower().startswith(S3A_ROOT):
if is_root(path):
normalized = path
else:
normalized = '%s%s' % (S3A_ROOT, fs_normpath(path[len(S3A_ROOT):]))
else:
normalized = fs_normpath(path)
return normalized
def s3datetime_to_timestamp(datetime):
"""
Returns timestamp (seconds) by datetime string from S3 API responses.
S3 REST API returns two types of datetime strings:
* `Thu, 26 Feb 2015 20:42:07 GMT` for Object HEAD requests
(see http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html);
* `2015-02-26T20:42:07.000Z` for Bucket GET requests
(see http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html).
"""
# There is chance (depends on platform) to get
# `'z' is a bad directive in format ...` error (see https://bugs.python.org/issue6641),
# but S3 always returns time in GMT, so `GMT` and `.000Z` can be pruned.
try:
stripped = time.strptime(datetime[:-4], '%a, %d %b %Y %H:%M:%S')
assert datetime[-4:] == ' GMT', 'Time [%s] is not in GMT.' % datetime
except ValueError:
stripped = time.strptime(datetime[:-5], '%Y-%m-%dT%H:%M:%S')
assert datetime[-1:] == 'Z' and datetime[-5:-4] == '.', 'Time [%s] is not in GMT.' % datetime
return int(calendar.timegm(stripped))
|
rytaft/h-store | refs/heads/master | third_party/python/boto/route53/__init__.py | 20 | # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# this is here for backward compatibility
# originally, the Route53Connection class was defined here
from connection import Route53Connection
|
opensemanticsearch/open-semantic-search-apps | refs/heads/master | src/morphology/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
Tooskich/python_core | refs/heads/master | widgets/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
ThiagoGarciaAlves/intellij-community | refs/heads/master | python/testData/resolve/AttributeAssignedNearby.py | 83 | def foo(bar):
bar.xyzzy = 1
print bar.xyzzy
# <ref> |
yasir1brahim/OLiMS | refs/heads/master | models/suppliercontact.py | 2 | """The contact person at a reference supplier organisation.
"""
from openerp import fields, models, api
from fields.string_field import StringField
from base_olims_model import BaseOLiMSModel
from fields.widget.widget import StringWidget
from openerp.tools.translate import _
schema = (
StringField('Salutation',
widget = StringWidget(
label = ("Salutation",
"Title"),
description=_("Greeting title eg. Mr, Mrs, Dr"),
),
),
StringField('Firstname',
required = 1,
widget = StringWidget(
label=_("Firstname"),
),
),
StringField('Middleinitial',
required = 0,
widget = StringWidget(
label=_("Middle initial"),
),
),
StringField('Middlename',
required = 0,
widget = StringWidget(
label=_("Middle name"),
),
),
StringField('Surname',
required = 1,
widget = StringWidget(
label=_("Surname"),
),
),
fields.Char(compute='computeFulname', string='Fullname'),
StringField('Username',
widget = StringWidget(
visible = False
),
),
StringField('EmailAddress',
schemata = 'Email Telephone Fax',
searchable = 1,
widget = StringWidget(
label=_("Email Address"),
),
),
StringField('BusinessPhone',
schemata = 'Email Telephone Fax',
widget = StringWidget(
label=_("Phone (business)"),
),
),
StringField('BusinessFax',
schemata = 'Email Telephone Fax',
widget = StringWidget(
label=_("Fax (business)"),
),
),
StringField('HomePhone',
schemata = 'Email Telephone Fax',
widget = StringWidget(
label=_("Phone (home)"),
),
),
StringField('MobilePhone',
schemata = 'Email Telephone Fax',
widget = StringWidget(
label=_("Phone (mobile)"),
),
),
StringField('JobTitle',
widget = StringWidget(
label=_("Job title"),
),
),
StringField('Department',
widget = StringWidget(
label=_("Department"),
),
),
# # ~~~~~~~~~~ PhysicalAddress behavior in Odoo is as selection field ~~~~~~~~~~~
fields.Many2one(comodel_name='olims.country',string='physical_country',default=lambda self: self.env['olims.country'].search([('name','=','United States')]).id),
fields.Many2one(comodel_name='olims.state',string='physical_state', domain="[('Country', '=', physical_country)]",default=lambda self: self.env['olims.state'].search([('name','=','Washington')]).id),
fields.Many2one(comodel_name='olims.district',string='physical_district', domain="[('State', '=', physical_state)]"),
fields.Char(string='physical_city'),
fields.Char(string='physical_postalcode'),
fields.Char(string='physical_address'),
fields.Selection([('postal', 'PostalAddress')],string='physical_copy_from'),
# # ~~~~~~~~~~ PostalAddress behavior in Odoo is as selection field ~~~~~~~~~~~
fields.Many2one(comodel_name='olims.country',string='postal_country',default=lambda self: self.env['olims.country'].search([('name','=','United States')]).id),
fields.Many2one(comodel_name='olims.state',string='postal_state', domain="[('Country', '=', postal_country)]",default=lambda self: self.env['olims.state'].search([('name','=','Washington')]).id),
fields.Many2one(comodel_name='olims.district',string='postal_district', domain="[('State', '=', postal_state)]"),
fields.Char(string='postal_city'),
fields.Char(string='postal_postalcode'),
fields.Char(string='postal_address'),
fields.Selection([('physical', 'PhysicalAddress')],string='postal_copy_from'),
fields.Many2one(string='SupplierId',
comodel_name='olims.supplier'),
)
class SupplierContact(models.Model, BaseOLiMSModel): #Person
_name = 'olims.supplier_contact'
_at_rename_after_creation = True
def _renameAfterCreation(self, check_auto_id=False):
from lims.idserver import renameAfterCreation
renameAfterCreation(self)
def computeFulname(self):
""" return Person's Fullname """
for record in self:
fn = record.getFirstname()
mi = record.getMiddleinitial()
md = record.getMiddlename()
sn = record.getSurname()
fullname = ''
if fn or sn:
if mi and md:
fullname = '%s %s %s %s' % (record.getFirstname(),
record.getMiddleinitial(),
record.getMiddlename(),
record.getSurname())
elif mi:
fullname = '%s %s %s' % (record.getFirstname(),
record.getMiddleinitial(),
record.getSurname())
elif md:
fullname = '%s %s %s' % (record.getFirstname(),
record.getMiddlename(),
record.getSurname())
else:
fullname = '%s %s' % (record.getFirstname(), record.getSurname())
record.Fullname = fullname.strip()
@api.onchange('physical_copy_from')
def _onchange_physical(self):
# set auto-changing field
if self.physical_copy_from:
setattr(self, 'physical_country', getattr(self,self.physical_copy_from+'_country'))
setattr(self, 'physical_state', getattr(self,self.physical_copy_from+'_state'))
setattr(self, 'physical_district', getattr(self,self.physical_copy_from+'_district'))
setattr(self, 'physical_city', getattr(self,self.physical_copy_from+'_city'))
setattr(self, 'physical_postalcode', getattr(self,self.physical_copy_from+'_postalcode'))
setattr(self, 'physical_address', getattr(self,self.physical_copy_from+'_address'))
@api.onchange('postal_copy_from')
def _onchange_postal(self):
# set auto-changing field
if self.postal_copy_from:
setattr(self, 'postal_country', getattr(self,self.postal_copy_from+'_country'))
setattr(self, 'postal_state', getattr(self,self.postal_copy_from+'_state'))
setattr(self, 'postal_district', getattr(self,self.postal_copy_from+'_district'))
setattr(self, 'postal_city', getattr(self,self.postal_copy_from+'_city'))
setattr(self, 'postal_postalcode', getattr(self,self.postal_copy_from+'_postalcode'))
setattr(self, 'postal_address', getattr(self,self.postal_copy_from+'_address'))
SupplierContact.initialze(schema) |
tayfun/django | refs/heads/master | tests/forms_tests/tests/test_error_messages.py | 169 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.files.uploadedfile import SimpleUploadedFile
from django.forms import (
BooleanField, CharField, ChoiceField, DateField, DateTimeField,
DecimalField, EmailField, FileField, FloatField, Form,
GenericIPAddressField, IntegerField, ModelChoiceField,
ModelMultipleChoiceField, MultipleChoiceField, RegexField,
SplitDateTimeField, TimeField, URLField, ValidationError, utils,
)
from django.test import SimpleTestCase, TestCase
from django.utils.encoding import python_2_unicode_compatible
from django.utils.safestring import mark_safe
class AssertFormErrorsMixin(object):
def assertFormErrors(self, expected, the_callable, *args, **kwargs):
try:
the_callable(*args, **kwargs)
self.fail("Testing the 'clean' method on %s failed to raise a ValidationError.")
except ValidationError as e:
self.assertEqual(e.messages, expected)
class FormsErrorMessagesTestCase(SimpleTestCase, AssertFormErrorsMixin):
def test_charfield(self):
e = {
'required': 'REQUIRED',
'min_length': 'LENGTH %(show_value)s, MIN LENGTH %(limit_value)s',
'max_length': 'LENGTH %(show_value)s, MAX LENGTH %(limit_value)s',
}
f = CharField(min_length=5, max_length=10, error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['LENGTH 4, MIN LENGTH 5'], f.clean, '1234')
self.assertFormErrors(['LENGTH 11, MAX LENGTH 10'], f.clean, '12345678901')
def test_integerfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_value': 'MIN VALUE IS %(limit_value)s',
'max_value': 'MAX VALUE IS %(limit_value)s',
}
f = IntegerField(min_value=5, max_value=10, error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc')
self.assertFormErrors(['MIN VALUE IS 5'], f.clean, '4')
self.assertFormErrors(['MAX VALUE IS 10'], f.clean, '11')
def test_floatfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_value': 'MIN VALUE IS %(limit_value)s',
'max_value': 'MAX VALUE IS %(limit_value)s',
}
f = FloatField(min_value=5, max_value=10, error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc')
self.assertFormErrors(['MIN VALUE IS 5'], f.clean, '4')
self.assertFormErrors(['MAX VALUE IS 10'], f.clean, '11')
def test_decimalfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_value': 'MIN VALUE IS %(limit_value)s',
'max_value': 'MAX VALUE IS %(limit_value)s',
'max_digits': 'MAX DIGITS IS %(max)s',
'max_decimal_places': 'MAX DP IS %(max)s',
'max_whole_digits': 'MAX DIGITS BEFORE DP IS %(max)s',
}
f = DecimalField(min_value=5, max_value=10, error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc')
self.assertFormErrors(['MIN VALUE IS 5'], f.clean, '4')
self.assertFormErrors(['MAX VALUE IS 10'], f.clean, '11')
f2 = DecimalField(max_digits=4, decimal_places=2, error_messages=e)
self.assertFormErrors(['MAX DIGITS IS 4'], f2.clean, '123.45')
self.assertFormErrors(['MAX DP IS 2'], f2.clean, '1.234')
self.assertFormErrors(['MAX DIGITS BEFORE DP IS 2'], f2.clean, '123.4')
def test_datefield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
}
f = DateField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc')
def test_timefield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
}
f = TimeField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc')
def test_datetimefield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
}
f = DateTimeField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc')
def test_regexfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_length': 'LENGTH %(show_value)s, MIN LENGTH %(limit_value)s',
'max_length': 'LENGTH %(show_value)s, MAX LENGTH %(limit_value)s',
}
f = RegexField(r'^[0-9]+$', min_length=5, max_length=10, error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abcde')
self.assertFormErrors(['LENGTH 4, MIN LENGTH 5'], f.clean, '1234')
self.assertFormErrors(['LENGTH 11, MAX LENGTH 10'], f.clean, '12345678901')
def test_emailfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_length': 'LENGTH %(show_value)s, MIN LENGTH %(limit_value)s',
'max_length': 'LENGTH %(show_value)s, MAX LENGTH %(limit_value)s',
}
f = EmailField(min_length=8, max_length=10, error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abcdefgh')
self.assertFormErrors(['LENGTH 7, MIN LENGTH 8'], f.clean, 'a@b.com')
self.assertFormErrors(['LENGTH 11, MAX LENGTH 10'], f.clean, 'aye@bee.com')
def test_filefield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'missing': 'MISSING',
'empty': 'EMPTY FILE',
}
f = FileField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc')
self.assertFormErrors(['EMPTY FILE'], f.clean, SimpleUploadedFile('name', None))
self.assertFormErrors(['EMPTY FILE'], f.clean, SimpleUploadedFile('name', ''))
def test_urlfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'max_length': '"%(value)s" has more than %(limit_value)d characters.',
}
f = URLField(error_messages=e, max_length=17)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc.c')
self.assertFormErrors(['"http://djangoproject.com" has more than 17 characters.'], f.clean, 'djangoproject.com')
def test_booleanfield(self):
e = {
'required': 'REQUIRED',
}
f = BooleanField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
def test_choicefield(self):
e = {
'required': 'REQUIRED',
'invalid_choice': '%(value)s IS INVALID CHOICE',
}
f = ChoiceField(choices=[('a', 'aye')], error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['b IS INVALID CHOICE'], f.clean, 'b')
def test_multiplechoicefield(self):
e = {
'required': 'REQUIRED',
'invalid_choice': '%(value)s IS INVALID CHOICE',
'invalid_list': 'NOT A LIST',
}
f = MultipleChoiceField(choices=[('a', 'aye')], error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['NOT A LIST'], f.clean, 'b')
self.assertFormErrors(['b IS INVALID CHOICE'], f.clean, ['b'])
def test_splitdatetimefield(self):
e = {
'required': 'REQUIRED',
'invalid_date': 'INVALID DATE',
'invalid_time': 'INVALID TIME',
}
f = SplitDateTimeField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID DATE', 'INVALID TIME'], f.clean, ['a', 'b'])
def test_generic_ipaddressfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID IP ADDRESS',
}
f = GenericIPAddressField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID IP ADDRESS'], f.clean, '127.0.0')
def test_subclassing_errorlist(self):
class TestForm(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
def clean(self):
raise ValidationError("I like to be awkward.")
@python_2_unicode_compatible
class CustomErrorList(utils.ErrorList):
def __str__(self):
return self.as_divs()
def as_divs(self):
if not self:
return ''
return mark_safe('<div class="error">%s</div>' % ''.join('<p>%s</p>' % e for e in self))
# This form should print errors the default way.
form1 = TestForm({'first_name': 'John'})
self.assertHTMLEqual(str(form1['last_name'].errors), '<ul class="errorlist"><li>This field is required.</li></ul>')
self.assertHTMLEqual(str(form1.errors['__all__']), '<ul class="errorlist nonfield"><li>I like to be awkward.</li></ul>')
# This one should wrap error groups in the customized way.
form2 = TestForm({'first_name': 'John'}, error_class=CustomErrorList)
self.assertHTMLEqual(str(form2['last_name'].errors), '<div class="error"><p>This field is required.</p></div>')
self.assertHTMLEqual(str(form2.errors['__all__']), '<div class="error"><p>I like to be awkward.</p></div>')
class ModelChoiceFieldErrorMessagesTestCase(TestCase, AssertFormErrorsMixin):
def test_modelchoicefield(self):
# Create choices for the model choice field tests below.
from forms_tests.models import ChoiceModel
ChoiceModel.objects.create(pk=1, name='a')
ChoiceModel.objects.create(pk=2, name='b')
ChoiceModel.objects.create(pk=3, name='c')
# ModelChoiceField
e = {
'required': 'REQUIRED',
'invalid_choice': 'INVALID CHOICE',
}
f = ModelChoiceField(queryset=ChoiceModel.objects.all(), error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID CHOICE'], f.clean, '4')
# ModelMultipleChoiceField
e = {
'required': 'REQUIRED',
'invalid_choice': '%(value)s IS INVALID CHOICE',
'list': 'NOT A LIST OF VALUES',
}
f = ModelMultipleChoiceField(queryset=ChoiceModel.objects.all(), error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['NOT A LIST OF VALUES'], f.clean, '3')
self.assertFormErrors(['4 IS INVALID CHOICE'], f.clean, ['4'])
|
sinkpoint/dipy | refs/heads/master | dipy/fixes/argparse.py | 19 | # emacs: -*- coding: utf-8; mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
# Copyright 2006-2009 Steven J. Bethard <steven.bethard@gmail.com>.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Command-line parsing library
This module is an optparse-inspired command-line parsing library that:
- handles both optional and positional arguments
- produces highly informative usage messages
- supports parsers that dispatch to sub-parsers
The following is a simple usage example that sums integers from the
command-line and writes the result to a file::
parser = argparse.ArgumentParser(
description='sum the integers at the command line')
parser.add_argument(
'integers', metavar='int', nargs='+', type=int,
help='an integer to be summed')
parser.add_argument(
'--log', default=sys.stdout, type=argparse.FileType('w'),
help='the file where the sum should be written')
args = parser.parse_args()
args.log.write('%s' % sum(args.integers))
args.log.close()
The module contains the following public classes:
- ArgumentParser -- The main entry point for command-line parsing. As the
example above shows, the add_argument() method is used to populate
the parser with actions for optional and positional arguments. Then
the parse_args() method is invoked to convert the args at the
command-line into an object with attributes.
- ArgumentError -- The exception raised by ArgumentParser objects when
there are errors with the parser's actions. Errors raised while
parsing the command-line are caught by ArgumentParser and emitted
as command-line messages.
- FileType -- A factory for defining types of files to be created. As the
example above shows, instances of FileType are typically passed as
the type= argument of add_argument() calls.
- Action -- The base class for parser actions. Typically actions are
selected by passing strings like 'store_true' or 'append_const' to
the action= argument of add_argument(). However, for greater
customization of ArgumentParser actions, subclasses of Action may
be defined and passed as the action= argument.
- HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter,
ArgumentDefaultsHelpFormatter -- Formatter classes which
may be passed as the formatter_class= argument to the
ArgumentParser constructor. HelpFormatter is the default,
RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser
not to change the formatting for help text, and
ArgumentDefaultsHelpFormatter adds information about argument defaults
to the help.
All other classes in this module are considered implementation details.
(Also note that HelpFormatter and RawDescriptionHelpFormatter are only
considered public as object names -- the API of the formatter objects is
still considered an implementation detail.)
"""
__version__ = '1.0.1'
__all__ = [
'ArgumentParser',
'ArgumentError',
'Namespace',
'Action',
'FileType',
'HelpFormatter',
'RawDescriptionHelpFormatter',
'RawTextHelpFormatter'
'ArgumentDefaultsHelpFormatter',
]
import copy as _copy
import os as _os
import re as _re
import sys as _sys
import textwrap as _textwrap
from gettext import gettext as _
try:
_set = set
except NameError:
from sets import Set as _set
try:
_basestring = basestring
except NameError:
_basestring = str
try:
_sorted = sorted
except NameError:
def _sorted(iterable, reverse=False):
result = list(iterable)
result.sort()
if reverse:
result.reverse()
return result
# silence Python 2.6 buggy warnings about Exception.message
if _sys.version_info[:2] == (2, 6):
import warnings
warnings.filterwarnings(
action='ignore',
message='BaseException.message has been deprecated as of Python 2.6',
category=DeprecationWarning,
module='argparse')
SUPPRESS = '==SUPPRESS=='
OPTIONAL = '?'
ZERO_OR_MORE = '*'
ONE_OR_MORE = '+'
PARSER = '==PARSER=='
# =============================
# Utility functions and classes
# =============================
class _AttributeHolder(object):
"""Abstract base class that provides __repr__.
The __repr__ method returns a string in the format::
ClassName(attr=name, attr=name, ...)
The attributes are determined either by a class-level attribute,
'_kwarg_names', or by inspecting the instance __dict__.
"""
def __repr__(self):
type_name = type(self).__name__
arg_strings = []
for arg in self._get_args():
arg_strings.append(repr(arg))
for name, value in self._get_kwargs():
arg_strings.append('%s=%r' % (name, value))
return '%s(%s)' % (type_name, ', '.join(arg_strings))
def _get_kwargs(self):
return _sorted(self.__dict__.items())
def _get_args(self):
return []
def _ensure_value(namespace, name, value):
if getattr(namespace, name, None) is None:
setattr(namespace, name, value)
return getattr(namespace, name)
# ===============
# Formatting Help
# ===============
class HelpFormatter(object):
"""Formatter for generating usage messages and argument help strings.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def __init__(self,
prog,
indent_increment=2,
max_help_position=24,
width=None):
# default setting for width
if width is None:
try:
width = int(_os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self._prog = prog
self._indent_increment = indent_increment
self._max_help_position = max_help_position
self._width = width
self._current_indent = 0
self._level = 0
self._action_max_length = 0
self._root_section = self._Section(self, None)
self._current_section = self._root_section
self._whitespace_matcher = _re.compile(r'\s+')
self._long_break_matcher = _re.compile(r'\n\n\n+')
# ===============================
# Section and indentation methods
# ===============================
def _indent(self):
self._current_indent += self._indent_increment
self._level += 1
def _dedent(self):
self._current_indent -= self._indent_increment
assert self._current_indent >= 0, 'Indent decreased below 0.'
self._level -= 1
class _Section(object):
def __init__(self, formatter, parent, heading=None):
self.formatter = formatter
self.parent = parent
self.heading = heading
self.items = []
def format_help(self):
# format the indented section
if self.parent is not None:
self.formatter._indent()
join = self.formatter._join_parts
for func, args in self.items:
func(*args)
item_help = join([func(*args) for func, args in self.items])
if self.parent is not None:
self.formatter._dedent()
# return nothing if the section was empty
if not item_help:
return ''
# add the heading if the section was non-empty
if self.heading is not SUPPRESS and self.heading is not None:
current_indent = self.formatter._current_indent
heading = '%*s%s:\n' % (current_indent, '', self.heading)
else:
heading = ''
# join the section-initial newline, the heading and the help
return join(['\n', heading, item_help, '\n'])
def _add_item(self, func, args):
self._current_section.items.append((func, args))
# ========================
# Message building methods
# ========================
def start_section(self, heading):
self._indent()
section = self._Section(self, self._current_section, heading)
self._add_item(section.format_help, [])
self._current_section = section
def end_section(self):
self._current_section = self._current_section.parent
self._dedent()
def add_text(self, text):
if text is not SUPPRESS and text is not None:
self._add_item(self._format_text, [text])
def add_usage(self, usage, actions, groups, prefix=None):
if usage is not SUPPRESS:
args = usage, actions, groups, prefix
self._add_item(self._format_usage, args)
def add_argument(self, action):
if action.help is not SUPPRESS:
# find all invocations
get_invocation = self._format_action_invocation
invocations = [get_invocation(action)]
for subaction in self._iter_indented_subactions(action):
invocations.append(get_invocation(subaction))
# update the maximum item length
invocation_length = max([len(s) for s in invocations])
action_length = invocation_length + self._current_indent
self._action_max_length = max(self._action_max_length,
action_length)
# add the item to the list
self._add_item(self._format_action, [action])
def add_arguments(self, actions):
for action in actions:
self.add_argument(action)
# =======================
# Help-formatting methods
# =======================
def format_help(self):
help = self._root_section.format_help()
if help:
help = self._long_break_matcher.sub('\n\n', help)
help = help.strip('\n') + '\n'
return help
def _join_parts(self, part_strings):
return ''.join([part
for part in part_strings
if part and part is not SUPPRESS])
def _format_usage(self, usage, actions, groups, prefix):
if prefix is None:
prefix = _('usage: ')
# if usage is specified, use that
if usage is not None:
usage = usage % dict(prog=self._prog)
# if no optionals or positionals are available, usage is just prog
elif usage is None and not actions:
usage = '%(prog)s' % dict(prog=self._prog)
# if optionals and positionals are available, calculate usage
elif usage is None:
prog = '%(prog)s' % dict(prog=self._prog)
# split optionals from positionals
optionals = []
positionals = []
for action in actions:
if action.option_strings:
optionals.append(action)
else:
positionals.append(action)
# build full usage string
format = self._format_actions_usage
action_usage = format(optionals + positionals, groups)
usage = ' '.join([s for s in [prog, action_usage] if s])
# wrap the usage parts if it's too long
text_width = self._width - self._current_indent
if len(prefix) + len(usage) > text_width:
# break usage into wrappable parts
part_regexp = r'\(.*?\)+|\[.*?\]+|\S+'
opt_usage = format(optionals, groups)
pos_usage = format(positionals, groups)
opt_parts = _re.findall(part_regexp, opt_usage)
pos_parts = _re.findall(part_regexp, pos_usage)
assert ' '.join(opt_parts) == opt_usage
assert ' '.join(pos_parts) == pos_usage
# helper for wrapping lines
def get_lines(parts, indent, prefix=None):
lines = []
line = []
if prefix is not None:
line_len = len(prefix) - 1
else:
line_len = len(indent) - 1
for part in parts:
if line_len + 1 + len(part) > text_width:
lines.append(indent + ' '.join(line))
line = []
line_len = len(indent) - 1
line.append(part)
line_len += len(part) + 1
if line:
lines.append(indent + ' '.join(line))
if prefix is not None:
lines[0] = lines[0][len(indent):]
return lines
# if prog is short, follow it with optionals or positionals
if len(prefix) + len(prog) <= 0.75 * text_width:
indent = ' ' * (len(prefix) + len(prog) + 1)
if opt_parts:
lines = get_lines([prog] + opt_parts, indent, prefix)
lines.extend(get_lines(pos_parts, indent))
elif pos_parts:
lines = get_lines([prog] + pos_parts, indent, prefix)
else:
lines = [prog]
# if prog is long, put it on its own line
else:
indent = ' ' * len(prefix)
parts = opt_parts + pos_parts
lines = get_lines(parts, indent)
if len(lines) > 1:
lines = []
lines.extend(get_lines(opt_parts, indent))
lines.extend(get_lines(pos_parts, indent))
lines = [prog] + lines
# join lines into usage
usage = '\n'.join(lines)
# prefix with 'usage:'
return '%s%s\n\n' % (prefix, usage)
def _format_actions_usage(self, actions, groups):
# find group indices and identify actions in groups
group_actions = _set()
inserts = {}
for group in groups:
try:
start = actions.index(group._group_actions[0])
except ValueError:
continue
else:
end = start + len(group._group_actions)
if actions[start:end] == group._group_actions:
for action in group._group_actions:
group_actions.add(action)
if not group.required:
inserts[start] = '['
inserts[end] = ']'
else:
inserts[start] = '('
inserts[end] = ')'
for i in range(start + 1, end):
inserts[i] = '|'
# collect all actions format strings
parts = []
for i, action in enumerate(actions):
# suppressed arguments are marked with None
# remove | separators for suppressed arguments
if action.help is SUPPRESS:
parts.append(None)
if inserts.get(i) == '|':
inserts.pop(i)
elif inserts.get(i + 1) == '|':
inserts.pop(i + 1)
# produce all arg strings
elif not action.option_strings:
part = self._format_args(action, action.dest)
# if it's in a group, strip the outer []
if action in group_actions:
if part[0] == '[' and part[-1] == ']':
part = part[1:-1]
# add the action string to the list
parts.append(part)
# produce the first way to invoke the option in brackets
else:
option_string = action.option_strings[0]
# if the Optional doesn't take a value, format is:
# -s or --long
if action.nargs == 0:
part = '%s' % option_string
# if the Optional takes a value, format is:
# -s ARGS or --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
part = '%s %s' % (option_string, args_string)
# make it look optional if it's not required or in a group
if not action.required and action not in group_actions:
part = '[%s]' % part
# add the action string to the list
parts.append(part)
# insert things at the necessary indices
for i in _sorted(inserts, reverse=True):
parts[i:i] = [inserts[i]]
# join all the action items with spaces
text = ' '.join([item for item in parts if item is not None])
# clean up separators for mutually exclusive groups
open = r'[\[(]'
close = r'[\])]'
text = _re.sub(r'(%s) ' % open, r'\1', text)
text = _re.sub(r' (%s)' % close, r'\1', text)
text = _re.sub(r'%s *%s' % (open, close), r'', text)
text = _re.sub(r'\(([^|]*)\)', r'\1', text)
text = text.strip()
# return the text
return text
def _format_text(self, text):
text_width = self._width - self._current_indent
indent = ' ' * self._current_indent
return self._fill_text(text, text_width, indent) + '\n\n'
def _format_action(self, action):
# determine the required width and the entry label
help_position = min(self._action_max_length + 2,
self._max_help_position)
help_width = self._width - help_position
action_width = help_position - self._current_indent - 2
action_header = self._format_action_invocation(action)
# ho nelp; start on same line and add a final newline
if not action.help:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
# short action name; start on the same line and pad two spaces
elif len(action_header) <= action_width:
tup = self._current_indent, '', action_width, action_header
action_header = '%*s%-*s ' % tup
indent_first = 0
# long action name; start on the next line
else:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
indent_first = help_position
# collect the pieces of the action help
parts = [action_header]
# if there was help for the action, add lines of help text
if action.help:
help_text = self._expand_help(action)
help_lines = self._split_lines(help_text, help_width)
parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
for line in help_lines[1:]:
parts.append('%*s%s\n' % (help_position, '', line))
# or add a newline if the description doesn't end with one
elif not action_header.endswith('\n'):
parts.append('\n')
# if there are any sub-actions, add their help as well
for subaction in self._iter_indented_subactions(action):
parts.append(self._format_action(subaction))
# return a single string
return self._join_parts(parts)
def _format_action_invocation(self, action):
if not action.option_strings:
metavar, = self._metavar_formatter(action, action.dest)(1)
return metavar
else:
parts = []
# if the Optional doesn't take a value, format is:
# -s, --long
if action.nargs == 0:
parts.extend(action.option_strings)
# if the Optional takes a value, format is:
# -s ARGS, --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
for option_string in action.option_strings:
parts.append('%s %s' % (option_string, args_string))
return ', '.join(parts)
def _metavar_formatter(self, action, default_metavar):
if action.metavar is not None:
result = action.metavar
elif action.choices is not None:
choice_strs = [str(choice) for choice in action.choices]
result = '{%s}' % ','.join(choice_strs)
else:
result = default_metavar
def format(tuple_size):
if isinstance(result, tuple):
return result
else:
return (result, ) * tuple_size
return format
def _format_args(self, action, default_metavar):
get_metavar = self._metavar_formatter(action, default_metavar)
if action.nargs is None:
result = '%s' % get_metavar(1)
elif action.nargs == OPTIONAL:
result = '[%s]' % get_metavar(1)
elif action.nargs == ZERO_OR_MORE:
result = '[%s [%s ...]]' % get_metavar(2)
elif action.nargs == ONE_OR_MORE:
result = '%s [%s ...]' % get_metavar(2)
elif action.nargs is PARSER:
result = '%s ...' % get_metavar(1)
else:
formats = ['%s' for _ in range(action.nargs)]
result = ' '.join(formats) % get_metavar(action.nargs)
return result
def _expand_help(self, action):
params = dict(vars(action), prog=self._prog)
for name in list(params):
if params[name] is SUPPRESS:
del params[name]
if params.get('choices') is not None:
choices_str = ', '.join([str(c) for c in params['choices']])
params['choices'] = choices_str
return self._get_help_string(action) % params
def _iter_indented_subactions(self, action):
try:
get_subactions = action._get_subactions
except AttributeError:
pass
else:
self._indent()
for subaction in get_subactions():
yield subaction
self._dedent()
def _split_lines(self, text, width):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.wrap(text, width)
def _fill_text(self, text, width, indent):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.fill(text, width, initial_indent=indent,
subsequent_indent=indent)
def _get_help_string(self, action):
return action.help
class RawDescriptionHelpFormatter(HelpFormatter):
"""Help message formatter which retains any formatting in descriptions.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _fill_text(self, text, width, indent):
return ''.join([indent + line for line in text.splitlines(True)])
class RawTextHelpFormatter(RawDescriptionHelpFormatter):
"""Help message formatter which retains formatting of all help text.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _split_lines(self, text, width):
return text.splitlines()
class ArgumentDefaultsHelpFormatter(HelpFormatter):
"""Help message formatter which adds default values to argument help.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _get_help_string(self, action):
help = action.help
if '%(default)' not in action.help:
if action.default is not SUPPRESS:
defaulting_nargs = [OPTIONAL, ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
help += ' (default: %(default)s)'
return help
# =====================
# Options and Arguments
# =====================
def _get_action_name(argument):
if argument is None:
return None
elif argument.option_strings:
return '/'.join(argument.option_strings)
elif argument.metavar not in (None, SUPPRESS):
return argument.metavar
elif argument.dest not in (None, SUPPRESS):
return argument.dest
else:
return None
class ArgumentError(Exception):
"""An error from creating or using an argument (optional or positional).
The string value of this exception is the message, augmented with
information about the argument that caused it.
"""
def __init__(self, argument, message):
self.argument_name = _get_action_name(argument)
self.message = message
def __str__(self):
if self.argument_name is None:
format = '%(message)s'
else:
format = 'argument %(argument_name)s: %(message)s'
return format % dict(message=self.message,
argument_name=self.argument_name)
# ==============
# Action classes
# ==============
class Action(_AttributeHolder):
"""Information about how to convert command line strings to Python objects.
Action objects are used by an ArgumentParser to represent the information
needed to parse a single argument from one or more strings from the
command line. The keyword arguments to the Action constructor are also
all attributes of Action instances.
Keyword Arguments:
- option_strings -- A list of command-line option strings which
should be associated with this action.
- dest -- The name of the attribute to hold the created object(s)
- nargs -- The number of command-line arguments that should be
consumed. By default, one argument will be consumed and a single
value will be produced. Other values include:
- N (an integer) consumes N arguments (and produces a list)
- '?' consumes zero or one arguments
- '*' consumes zero or more arguments (and produces a list)
- '+' consumes one or more arguments (and produces a list)
Note that the difference between the default and nargs=1 is that
with the default, a single value will be produced, while with
nargs=1, a list containing a single value will be produced.
- const -- The value to be produced if the option is specified and the
option uses an action that takes no values.
- default -- The value to be produced if the option is not specified.
- type -- The type which the command-line arguments should be converted
to, should be one of 'string', 'int', 'float', 'complex' or a
callable object that accepts a single string argument. If None,
'string' is assumed.
- choices -- A container of values that should be allowed. If not None,
after a command-line argument has been converted to the appropriate
type, an exception will be raised if it is not a member of this
collection.
- required -- True if the action must always be specified at the
command line. This is only meaningful for optional command-line
arguments.
- help -- The help string describing the argument.
- metavar -- The name to be used for the option's argument with the
help string. If None, the 'dest' value will be used as the name.
"""
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
self.option_strings = option_strings
self.dest = dest
self.nargs = nargs
self.const = const
self.default = default
self.type = type
self.choices = choices
self.required = required
self.help = help
self.metavar = metavar
def _get_kwargs(self):
names = [
'option_strings',
'dest',
'nargs',
'const',
'default',
'type',
'choices',
'help',
'metavar',
]
return [(name, getattr(self, name)) for name in names]
def __call__(self, parser, namespace, values, option_string=None):
raise NotImplementedError(_('.__call__() not defined'))
class _StoreAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs for store actions must be > 0; if you '
'have nothing to store, actions such as store '
'true or store const may be more appropriate')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_StoreAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
class _StoreConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_StoreConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, self.const)
class _StoreTrueAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=False,
required=False,
help=None):
super(_StoreTrueAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=True,
default=default,
required=required,
help=help)
class _StoreFalseAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=True,
required=False,
help=None):
super(_StoreFalseAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=False,
default=default,
required=required,
help=help)
class _AppendAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs for append actions must be > 0; if arg '
'strings are not supplying the value to append, '
'the append const action may be more appropriate')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_AppendAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
items = _copy.copy(_ensure_value(namespace, self.dest, []))
items.append(values)
setattr(namespace, self.dest, items)
class _AppendConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_AppendConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
items = _copy.copy(_ensure_value(namespace, self.dest, []))
items.append(self.const)
setattr(namespace, self.dest, items)
class _CountAction(Action):
def __init__(self,
option_strings,
dest,
default=None,
required=False,
help=None):
super(_CountAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
new_count = _ensure_value(namespace, self.dest, 0) + 1
setattr(namespace, self.dest, new_count)
class _HelpAction(Action):
def __init__(self,
option_strings,
dest=SUPPRESS,
default=SUPPRESS,
help=None):
super(_HelpAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
parser.exit()
class _VersionAction(Action):
def __init__(self,
option_strings,
dest=SUPPRESS,
default=SUPPRESS,
help=None):
super(_VersionAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
parser.print_version()
parser.exit()
class _SubParsersAction(Action):
class _ChoicesPseudoAction(Action):
def __init__(self, name, help):
sup = super(_SubParsersAction._ChoicesPseudoAction, self)
sup.__init__(option_strings=[], dest=name, help=help)
def __init__(self,
option_strings,
prog,
parser_class,
dest=SUPPRESS,
help=None,
metavar=None):
self._prog_prefix = prog
self._parser_class = parser_class
self._name_parser_map = {}
self._choices_actions = []
super(_SubParsersAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=PARSER,
choices=self._name_parser_map,
help=help,
metavar=metavar)
def add_parser(self, name, **kwargs):
# set prog from the existing prefix
if kwargs.get('prog') is None:
kwargs['prog'] = '%s %s' % (self._prog_prefix, name)
# create a pseudo-action to hold the choice help
if 'help' in kwargs:
help = kwargs.pop('help')
choice_action = self._ChoicesPseudoAction(name, help)
self._choices_actions.append(choice_action)
# create the parser and add it to the map
parser = self._parser_class(**kwargs)
self._name_parser_map[name] = parser
return parser
def _get_subactions(self):
return self._choices_actions
def __call__(self, parser, namespace, values, option_string=None):
parser_name = values[0]
arg_strings = values[1:]
# set the parser name if requested
if self.dest is not SUPPRESS:
setattr(namespace, self.dest, parser_name)
# select the parser
try:
parser = self._name_parser_map[parser_name]
except KeyError:
tup = parser_name, ', '.join(self._name_parser_map)
msg = _('unknown parser %r (choices: %s)' % tup)
raise ArgumentError(self, msg)
# parse all the remaining options into the namespace
parser.parse_args(arg_strings, namespace)
# ==============
# Type classes
# ==============
class FileType(object):
"""Factory for creating file object types
Instances of FileType are typically passed as type= arguments to the
ArgumentParser add_argument() method.
Keyword Arguments:
- mode -- A string indicating how the file is to be opened. Accepts the
same values as the builtin open() function.
- bufsize -- The file's desired buffer size. Accepts the same values as
the builtin open() function.
"""
def __init__(self, mode='r', bufsize=None):
self._mode = mode
self._bufsize = bufsize
def __call__(self, string):
# the special argument "-" means sys.std{in,out}
if string == '-':
if 'r' in self._mode:
return _sys.stdin
elif 'w' in self._mode:
return _sys.stdout
else:
msg = _('argument "-" with mode %r' % self._mode)
raise ValueError(msg)
# all other arguments are used as file names
if self._bufsize:
return open(string, self._mode, self._bufsize)
else:
return open(string, self._mode)
def __repr__(self):
args = [self._mode, self._bufsize]
args_str = ', '.join([repr(arg) for arg in args if arg is not None])
return '%s(%s)' % (type(self).__name__, args_str)
# ===========================
# Optional and Positional Parsing
# ===========================
class Namespace(_AttributeHolder):
"""Simple object for storing attributes.
Implements equality by attribute names and values, and provides a simple
string representation.
"""
def __init__(self, **kwargs):
for name in kwargs:
setattr(self, name, kwargs[name])
def __eq__(self, other):
return vars(self) == vars(other)
def __ne__(self, other):
return not (self == other)
class _ActionsContainer(object):
def __init__(self,
description,
prefix_chars,
argument_default,
conflict_handler):
super(_ActionsContainer, self).__init__()
self.description = description
self.argument_default = argument_default
self.prefix_chars = prefix_chars
self.conflict_handler = conflict_handler
# set up registries
self._registries = {}
# register actions
self.register('action', None, _StoreAction)
self.register('action', 'store', _StoreAction)
self.register('action', 'store_const', _StoreConstAction)
self.register('action', 'store_true', _StoreTrueAction)
self.register('action', 'store_false', _StoreFalseAction)
self.register('action', 'append', _AppendAction)
self.register('action', 'append_const', _AppendConstAction)
self.register('action', 'count', _CountAction)
self.register('action', 'help', _HelpAction)
self.register('action', 'version', _VersionAction)
self.register('action', 'parsers', _SubParsersAction)
# raise an exception if the conflict handler is invalid
self._get_handler()
# action storage
self._actions = []
self._option_string_actions = {}
# groups
self._action_groups = []
self._mutually_exclusive_groups = []
# defaults storage
self._defaults = {}
# determines whether an "option" looks like a negative number
self._negative_number_matcher = _re.compile(r'^-\d+|-\d*.\d+$')
# whether or not there are any optionals that look like negative
# numbers -- uses a list so it can be shared and edited
self._has_negative_number_optionals = []
# ====================
# Registration methods
# ====================
def register(self, registry_name, value, object):
registry = self._registries.setdefault(registry_name, {})
registry[value] = object
def _registry_get(self, registry_name, value, default=None):
return self._registries[registry_name].get(value, default)
# ==================================
# Namespace default settings methods
# ==================================
def set_defaults(self, **kwargs):
self._defaults.update(kwargs)
# if these defaults match any existing arguments, replace
# the previous default on the object with the new one
for action in self._actions:
if action.dest in kwargs:
action.default = kwargs[action.dest]
# =======================
# Adding argument actions
# =======================
def add_argument(self, *args, **kwargs):
"""
add_argument(dest, ..., name=value, ...)
add_argument(option_string, option_string, ..., name=value, ...)
"""
# if no positional args are supplied or only one is supplied and
# it doesn't look like an option string, parse a positional
# argument
chars = self.prefix_chars
if not args or len(args) == 1 and args[0][0] not in chars:
kwargs = self._get_positional_kwargs(*args, **kwargs)
# otherwise, we're adding an optional argument
else:
kwargs = self._get_optional_kwargs(*args, **kwargs)
# if no default was supplied, use the parser-level default
if 'default' not in kwargs:
dest = kwargs['dest']
if dest in self._defaults:
kwargs['default'] = self._defaults[dest]
elif self.argument_default is not None:
kwargs['default'] = self.argument_default
# create the action object, and add it to the parser
action_class = self._pop_action_class(kwargs)
action = action_class(**kwargs)
return self._add_action(action)
def add_argument_group(self, *args, **kwargs):
group = _ArgumentGroup(self, *args, **kwargs)
self._action_groups.append(group)
return group
def add_mutually_exclusive_group(self, **kwargs):
group = _MutuallyExclusiveGroup(self, **kwargs)
self._mutually_exclusive_groups.append(group)
return group
def _add_action(self, action):
# resolve any conflicts
self._check_conflict(action)
# add to actions list
self._actions.append(action)
action.container = self
# index the action by any option strings it has
for option_string in action.option_strings:
self._option_string_actions[option_string] = action
# set the flag if any option strings look like negative numbers
for option_string in action.option_strings:
if self._negative_number_matcher.match(option_string):
if not self._has_negative_number_optionals:
self._has_negative_number_optionals.append(True)
# return the created action
return action
def _remove_action(self, action):
self._actions.remove(action)
def _add_container_actions(self, container):
# collect groups by titles
title_group_map = {}
for group in self._action_groups:
if group.title in title_group_map:
msg = _('cannot merge actions - two groups are named %r')
raise ValueError(msg % (group.title))
title_group_map[group.title] = group
# map each action to its group
group_map = {}
for group in container._action_groups:
# if a group with the title exists, use that, otherwise
# create a new group matching the container's group
if group.title not in title_group_map:
title_group_map[group.title] = self.add_argument_group(
title=group.title,
description=group.description,
conflict_handler=group.conflict_handler)
# map the actions to their new group
for action in group._group_actions:
group_map[action] = title_group_map[group.title]
# add container's mutually exclusive groups
# NOTE: if add_mutually_exclusive_group ever gains title= and
# description= then this code will need to be expanded as above
for group in container._mutually_exclusive_groups:
mutex_group = self.add_mutually_exclusive_group(
required=group.required)
# map the actions to their new mutex group
for action in group._group_actions:
group_map[action] = mutex_group
# add all actions to this container or their group
for action in container._actions:
group_map.get(action, self)._add_action(action)
def _get_positional_kwargs(self, dest, **kwargs):
# make sure required is not specified
if 'required' in kwargs:
msg = _("'required' is an invalid argument for positionals")
raise TypeError(msg)
# mark positional arguments as required if at least one is
# always required
if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]:
kwargs['required'] = True
if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs:
kwargs['required'] = True
# return the keyword arguments with no option strings
return dict(kwargs, dest=dest, option_strings=[])
def _get_optional_kwargs(self, *args, **kwargs):
# determine short and long option strings
option_strings = []
long_option_strings = []
for option_string in args:
# error on one-or-fewer-character option strings
if len(option_string) < 2:
msg = _('invalid option string %r: '
'must be at least two characters long')
raise ValueError(msg % option_string)
# error on strings that don't start with an appropriate prefix
if not option_string[0] in self.prefix_chars:
msg = _('invalid option string %r: '
'must start with a character %r')
tup = option_string, self.prefix_chars
raise ValueError(msg % tup)
# error on strings that are all prefix characters
if not (_set(option_string) - _set(self.prefix_chars)):
msg = _('invalid option string %r: '
'must contain characters other than %r')
tup = option_string, self.prefix_chars
raise ValueError(msg % tup)
# strings starting with two prefix characters are long options
option_strings.append(option_string)
if option_string[0] in self.prefix_chars:
if option_string[1] in self.prefix_chars:
long_option_strings.append(option_string)
# infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'
dest = kwargs.pop('dest', None)
if dest is None:
if long_option_strings:
dest_option_string = long_option_strings[0]
else:
dest_option_string = option_strings[0]
dest = dest_option_string.lstrip(self.prefix_chars)
dest = dest.replace('-', '_')
# return the updated keyword arguments
return dict(kwargs, dest=dest, option_strings=option_strings)
def _pop_action_class(self, kwargs, default=None):
action = kwargs.pop('action', default)
return self._registry_get('action', action, action)
def _get_handler(self):
# determine function from conflict handler string
handler_func_name = '_handle_conflict_%s' % self.conflict_handler
try:
return getattr(self, handler_func_name)
except AttributeError:
msg = _('invalid conflict_resolution value: %r')
raise ValueError(msg % self.conflict_handler)
def _check_conflict(self, action):
# find all options that conflict with this option
confl_optionals = []
for option_string in action.option_strings:
if option_string in self._option_string_actions:
confl_optional = self._option_string_actions[option_string]
confl_optionals.append((option_string, confl_optional))
# resolve any conflicts
if confl_optionals:
conflict_handler = self._get_handler()
conflict_handler(action, confl_optionals)
def _handle_conflict_error(self, action, conflicting_actions):
message = _('conflicting option string(s): %s')
conflict_string = ', '.join([option_string
for option_string, action
in conflicting_actions])
raise ArgumentError(action, message % conflict_string)
def _handle_conflict_resolve(self, action, conflicting_actions):
# remove all conflicting options
for option_string, action in conflicting_actions:
# remove the conflicting option
action.option_strings.remove(option_string)
self._option_string_actions.pop(option_string, None)
# if the option now has no option string, remove it from the
# container holding it
if not action.option_strings:
action.container._remove_action(action)
class _ArgumentGroup(_ActionsContainer):
def __init__(self, container, title=None, description=None, **kwargs):
# add any missing keyword arguments by checking the container
update = kwargs.setdefault
update('conflict_handler', container.conflict_handler)
update('prefix_chars', container.prefix_chars)
update('argument_default', container.argument_default)
super_init = super(_ArgumentGroup, self).__init__
super_init(description=description, **kwargs)
# group attributes
self.title = title
self._group_actions = []
# share most attributes with the container
self._registries = container._registries
self._actions = container._actions
self._option_string_actions = container._option_string_actions
self._defaults = container._defaults
self._has_negative_number_optionals = \
container._has_negative_number_optionals
def _add_action(self, action):
action = super(_ArgumentGroup, self)._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
super(_ArgumentGroup, self)._remove_action(action)
self._group_actions.remove(action)
class _MutuallyExclusiveGroup(_ArgumentGroup):
def __init__(self, container, required=False):
super(_MutuallyExclusiveGroup, self).__init__(container)
self.required = required
self._container = container
def _add_action(self, action):
if action.required:
msg = _('mutually exclusive arguments must be optional')
raise ValueError(msg)
action = self._container._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
self._container._remove_action(action)
self._group_actions.remove(action)
class ArgumentParser(_AttributeHolder, _ActionsContainer):
"""Object for parsing command line strings into Python objects.
Keyword Arguments:
- prog -- The name of the program (default: sys.argv[0])
- usage -- A usage message (default: auto-generated from arguments)
- description -- A description of what the program does
- epilog -- Text following the argument descriptions
- version -- Add a -v/--version option with the given version string
- parents -- Parsers whose arguments should be copied into this one
- formatter_class -- HelpFormatter class for printing help messages
- prefix_chars -- Characters that prefix optional arguments
- fromfile_prefix_chars -- Characters that prefix files containing
additional arguments
- argument_default -- The default value for all arguments
- conflict_handler -- String indicating how to handle conflicts
- add_help -- Add a -h/-help option
"""
def __init__(self,
prog=None,
usage=None,
description=None,
epilog=None,
version=None,
parents=[],
formatter_class=HelpFormatter,
prefix_chars='-',
fromfile_prefix_chars=None,
argument_default=None,
conflict_handler='error',
add_help=True):
superinit = super(ArgumentParser, self).__init__
superinit(description=description,
prefix_chars=prefix_chars,
argument_default=argument_default,
conflict_handler=conflict_handler)
# default setting for prog
if prog is None:
prog = _os.path.basename(_sys.argv[0])
self.prog = prog
self.usage = usage
self.epilog = epilog
self.version = version
self.formatter_class = formatter_class
self.fromfile_prefix_chars = fromfile_prefix_chars
self.add_help = add_help
add_group = self.add_argument_group
self._positionals = add_group(_('positional arguments'))
self._optionals = add_group(_('optional arguments'))
self._subparsers = None
# register types
def identity(string):
return string
self.register('type', None, identity)
# add help and version arguments if necessary
# (using explicit default to override global argument_default)
if self.add_help:
self.add_argument(
'-h', '--help', action='help', default=SUPPRESS,
help=_('show this help message and exit'))
if self.version:
self.add_argument(
'-v', '--version', action='version', default=SUPPRESS,
help=_("show program's version number and exit"))
# add parent arguments and defaults
for parent in parents:
self._add_container_actions(parent)
try:
defaults = parent._defaults
except AttributeError:
pass
else:
self._defaults.update(defaults)
# =======================
# Pretty __repr__ methods
# =======================
def _get_kwargs(self):
names = [
'prog',
'usage',
'description',
'version',
'formatter_class',
'conflict_handler',
'add_help',
]
return [(name, getattr(self, name)) for name in names]
# ==================================
# Optional/Positional adding methods
# ==================================
def add_subparsers(self, **kwargs):
if self._subparsers is not None:
self.error(_('cannot have multiple subparser arguments'))
# add the parser class to the arguments if it's not present
kwargs.setdefault('parser_class', type(self))
if 'title' in kwargs or 'description' in kwargs:
title = _(kwargs.pop('title', 'subcommands'))
description = _(kwargs.pop('description', None))
self._subparsers = self.add_argument_group(title, description)
else:
self._subparsers = self._positionals
# prog defaults to the usage message of this parser, skipping
# optional arguments and with no "usage:" prefix
if kwargs.get('prog') is None:
formatter = self._get_formatter()
positionals = self._get_positional_actions()
groups = self._mutually_exclusive_groups
formatter.add_usage(self.usage, positionals, groups, '')
kwargs['prog'] = formatter.format_help().strip()
# create the parsers action and add it to the positionals list
parsers_class = self._pop_action_class(kwargs, 'parsers')
action = parsers_class(option_strings=[], **kwargs)
self._subparsers._add_action(action)
# return the created parsers action
return action
def _add_action(self, action):
if action.option_strings:
self._optionals._add_action(action)
else:
self._positionals._add_action(action)
return action
def _get_optional_actions(self):
return [action
for action in self._actions
if action.option_strings]
def _get_positional_actions(self):
return [action
for action in self._actions
if not action.option_strings]
# =====================================
# Command line argument parsing methods
# =====================================
def parse_args(self, args=None, namespace=None):
args, argv = self.parse_known_args(args, namespace)
if argv:
msg = _('unrecognized arguments: %s')
self.error(msg % ' '.join(argv))
return args
def parse_known_args(self, args=None, namespace=None):
# args default to the system args
if args is None:
args = _sys.argv[1:]
# default Namespace built from parser defaults
if namespace is None:
namespace = Namespace()
# add any action defaults that aren't present
for action in self._actions:
if action.dest is not SUPPRESS:
if not hasattr(namespace, action.dest):
if action.default is not SUPPRESS:
default = action.default
if isinstance(action.default, _basestring):
default = self._get_value(action, default)
setattr(namespace, action.dest, default)
# add any parser defaults that aren't present
for dest in self._defaults:
if not hasattr(namespace, dest):
setattr(namespace, dest, self._defaults[dest])
# parse the arguments and exit if there are any errors
try:
return self._parse_known_args(args, namespace)
except ArgumentError:
err = _sys.exc_info()[1]
self.error(str(err))
def _parse_known_args(self, arg_strings, namespace):
# replace arg strings that are file references
if self.fromfile_prefix_chars is not None:
arg_strings = self._read_args_from_files(arg_strings)
# map all mutually exclusive arguments to the other arguments
# they can't occur with
action_conflicts = {}
for mutex_group in self._mutually_exclusive_groups:
group_actions = mutex_group._group_actions
for i, mutex_action in enumerate(mutex_group._group_actions):
conflicts = action_conflicts.setdefault(mutex_action, [])
conflicts.extend(group_actions[:i])
conflicts.extend(group_actions[i + 1:])
# find all option indices, and determine the arg_string_pattern
# which has an 'O' if there is an option at an index,
# an 'A' if there is an argument, or a '-' if there is a '--'
option_string_indices = {}
arg_string_pattern_parts = []
arg_strings_iter = iter(arg_strings)
for i, arg_string in enumerate(arg_strings_iter):
# all args after -- are non-options
if arg_string == '--':
arg_string_pattern_parts.append('-')
for arg_string in arg_strings_iter:
arg_string_pattern_parts.append('A')
# otherwise, add the arg to the arg strings
# and note the index if it was an option
else:
option_tuple = self._parse_optional(arg_string)
if option_tuple is None:
pattern = 'A'
else:
option_string_indices[i] = option_tuple
pattern = 'O'
arg_string_pattern_parts.append(pattern)
# join the pieces together to form the pattern
arg_strings_pattern = ''.join(arg_string_pattern_parts)
# converts arg strings to the appropriate and then takes the action
seen_actions = _set()
seen_non_default_actions = _set()
def take_action(action, argument_strings, option_string=None):
seen_actions.add(action)
argument_values = self._get_values(action, argument_strings)
# error if this argument is not allowed with other previously
# seen arguments, assuming that actions that use the default
# value don't really count as "present"
if argument_values is not action.default:
seen_non_default_actions.add(action)
for conflict_action in action_conflicts.get(action, []):
if conflict_action in seen_non_default_actions:
msg = _('not allowed with argument %s')
action_name = _get_action_name(conflict_action)
raise ArgumentError(action, msg % action_name)
# take the action if we didn't receive a SUPPRESS value
# (e.g. from a default)
if argument_values is not SUPPRESS:
action(self, namespace, argument_values, option_string)
# function to convert arg_strings into an optional action
def consume_optional(start_index):
# get the optional identified at this index
option_tuple = option_string_indices[start_index]
action, option_string, explicit_arg = option_tuple
# identify additional optionals in the same arg string
# (e.g. -xyz is the same as -x -y -z if no args are required)
match_argument = self._match_argument
action_tuples = []
while True:
# if we found no optional action, skip it
if action is None:
extras.append(arg_strings[start_index])
return start_index + 1
# if there is an explicit argument, try to match the
# optional's string arguments to only this
if explicit_arg is not None:
arg_count = match_argument(action, 'A')
# if the action is a single-dash option and takes no
# arguments, try to parse more single-dash options out
# of the tail of the option string
chars = self.prefix_chars
if arg_count == 0 and option_string[1] not in chars:
action_tuples.append((action, [], option_string))
for char in self.prefix_chars:
option_string = char + explicit_arg[0]
explicit_arg = explicit_arg[1:] or None
optionals_map = self._option_string_actions
if option_string in optionals_map:
action = optionals_map[option_string]
break
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if the action expect exactly one argument, we've
# successfully matched the option; exit the loop
elif arg_count == 1:
stop = start_index + 1
args = [explicit_arg]
action_tuples.append((action, args, option_string))
break
# error if a double-dash option did not use the
# explicit argument
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if there is no explicit argument, try to match the
# optional's string arguments with the following strings
# if successful, exit the loop
else:
start = start_index + 1
selected_patterns = arg_strings_pattern[start:]
arg_count = match_argument(action, selected_patterns)
stop = start + arg_count
args = arg_strings[start:stop]
action_tuples.append((action, args, option_string))
break
# add the Optional to the list and return the index at which
# the Optional's string args stopped
assert action_tuples
for action, args, option_string in action_tuples:
take_action(action, args, option_string)
return stop
# the list of Positionals left to be parsed; this is modified
# by consume_positionals()
positionals = self._get_positional_actions()
# function to convert arg_strings into positional actions
def consume_positionals(start_index):
# match as many Positionals as possible
match_partial = self._match_arguments_partial
selected_pattern = arg_strings_pattern[start_index:]
arg_counts = match_partial(positionals, selected_pattern)
# slice off the appropriate arg strings for each Positional
# and add the Positional and its args to the list
for action, arg_count in zip(positionals, arg_counts):
args = arg_strings[start_index: start_index + arg_count]
start_index += arg_count
take_action(action, args)
# slice off the Positionals that we just parsed and return the
# index at which the Positionals' string args stopped
positionals[:] = positionals[len(arg_counts):]
return start_index
# consume Positionals and Optionals alternately, until we have
# passed the last option string
extras = []
start_index = 0
if option_string_indices:
max_option_string_index = max(option_string_indices)
else:
max_option_string_index = -1
while start_index <= max_option_string_index:
# consume any Positionals preceding the next option
next_option_string_index = min([
index
for index in option_string_indices
if index >= start_index])
if start_index != next_option_string_index:
positionals_end_index = consume_positionals(start_index)
# only try to parse the next optional if we didn't consume
# the option string during the positionals parsing
if positionals_end_index > start_index:
start_index = positionals_end_index
continue
else:
start_index = positionals_end_index
# if we consumed all the positionals we could and we're not
# at the index of an option string, there were extra arguments
if start_index not in option_string_indices:
strings = arg_strings[start_index:next_option_string_index]
extras.extend(strings)
start_index = next_option_string_index
# consume the next optional and any arguments for it
start_index = consume_optional(start_index)
# consume any positionals following the last Optional
stop_index = consume_positionals(start_index)
# if we didn't consume all the argument strings, there were extras
extras.extend(arg_strings[stop_index:])
# if we didn't use all the Positional objects, there were too few
# arg strings supplied.
if positionals:
self.error(_('too few arguments'))
# make sure all required actions were present
for action in self._actions:
if action.required:
if action not in seen_actions:
name = _get_action_name(action)
self.error(_('argument %s is required') % name)
# make sure all required groups had one option present
for group in self._mutually_exclusive_groups:
if group.required:
for action in group._group_actions:
if action in seen_non_default_actions:
break
# if no actions were used, report the error
else:
names = [_get_action_name(action)
for action in group._group_actions
if action.help is not SUPPRESS]
msg = _('one of the arguments %s is required')
self.error(msg % ' '.join(names))
# return the updated namespace and the extra arguments
return namespace, extras
def _read_args_from_files(self, arg_strings):
# expand arguments referencing files
new_arg_strings = []
for arg_string in arg_strings:
# for regular arguments, just add them back into the list
if arg_string[0] not in self.fromfile_prefix_chars:
new_arg_strings.append(arg_string)
# replace arguments referencing files with the file content
else:
try:
args_file = open(arg_string[1:])
try:
arg_strings = args_file.read().splitlines()
arg_strings = self._read_args_from_files(arg_strings)
new_arg_strings.extend(arg_strings)
finally:
args_file.close()
except IOError:
err = _sys.exc_info()[1]
self.error(str(err))
# return the modified argument list
return new_arg_strings
def _match_argument(self, action, arg_strings_pattern):
# match the pattern for this action to the arg strings
nargs_pattern = self._get_nargs_pattern(action)
match = _re.match(nargs_pattern, arg_strings_pattern)
# raise an exception if we weren't able to find a match
if match is None:
nargs_errors = {
None: _('expected one argument'),
OPTIONAL: _('expected at most one argument'),
ONE_OR_MORE: _('expected at least one argument'),
}
default = _('expected %s argument(s)') % action.nargs
msg = nargs_errors.get(action.nargs, default)
raise ArgumentError(action, msg)
# return the number of arguments matched
return len(match.group(1))
def _match_arguments_partial(self, actions, arg_strings_pattern):
# progressively shorten the actions list by slicing off the
# final actions until we find a match
result = []
for i in range(len(actions), 0, -1):
actions_slice = actions[:i]
pattern = ''.join([self._get_nargs_pattern(action)
for action in actions_slice])
match = _re.match(pattern, arg_strings_pattern)
if match is not None:
result.extend([len(string) for string in match.groups()])
break
# return the list of arg string counts
return result
def _parse_optional(self, arg_string):
# if it's an empty string, it was meant to be a positional
if not arg_string:
return None
# if it doesn't start with a prefix, it was meant to be positional
if not arg_string[0] in self.prefix_chars:
return None
# if it's just dashes, it was meant to be positional
if not arg_string.strip('-'):
return None
# if the option string is present in the parser, return the action
if arg_string in self._option_string_actions:
action = self._option_string_actions[arg_string]
return action, arg_string, None
# search through all possible prefixes of the option string
# and all actions in the parser for possible interpretations
option_tuples = self._get_option_tuples(arg_string)
# if multiple actions match, the option string was ambiguous
if len(option_tuples) > 1:
options = ', '.join([option_string
for action, option_string, explicit_arg in option_tuples])
tup = arg_string, options
self.error(_('ambiguous option: %s could match %s') % tup)
# if exactly one action matched, this segmentation is good,
# so return the parsed action
elif len(option_tuples) == 1:
option_tuple, = option_tuples
return option_tuple
# if it was not found as an option, but it looks like a negative
# number, it was meant to be positional
# unless there are negative-number-like options
if self._negative_number_matcher.match(arg_string):
if not self._has_negative_number_optionals:
return None
# if it contains a space, it was meant to be a positional
if ' ' in arg_string:
return None
# it was meant to be an optional but there is no such option
# in this parser (though it might be a valid option in a subparser)
return None, arg_string, None
def _get_option_tuples(self, option_string):
result = []
# option strings starting with two prefix characters are only
# split at the '='
chars = self.prefix_chars
if option_string[0] in chars and option_string[1] in chars:
if '=' in option_string:
option_prefix, explicit_arg = option_string.split('=', 1)
else:
option_prefix = option_string
explicit_arg = None
for option_string in self._option_string_actions:
if option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# single character options can be concatenated with their arguments
# but multiple character options always have to have their argument
# separate
elif option_string[0] in chars and option_string[1] not in chars:
option_prefix = option_string
explicit_arg = None
short_option_prefix = option_string[:2]
short_explicit_arg = option_string[2:]
for option_string in self._option_string_actions:
if option_string == short_option_prefix:
action = self._option_string_actions[option_string]
tup = action, option_string, short_explicit_arg
result.append(tup)
elif option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# shouldn't ever get here
else:
self.error(_('unexpected option string: %s') % option_string)
# return the collected option tuples
return result
def _get_nargs_pattern(self, action):
# in all examples below, we have to allow for '--' args
# which are represented as '-' in the pattern
nargs = action.nargs
# the default (None) is assumed to be a single argument
if nargs is None:
nargs_pattern = '(-*A-*)'
# allow zero or one arguments
elif nargs == OPTIONAL:
nargs_pattern = '(-*A?-*)'
# allow zero or more arguments
elif nargs == ZERO_OR_MORE:
nargs_pattern = '(-*[A-]*)'
# allow one or more arguments
elif nargs == ONE_OR_MORE:
nargs_pattern = '(-*A[A-]*)'
# allow one argument followed by any number of options or arguments
elif nargs is PARSER:
nargs_pattern = '(-*A[-AO]*)'
# all others should be integers
else:
nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs)
# if this is an optional action, -- is not allowed
if action.option_strings:
nargs_pattern = nargs_pattern.replace('-*', '')
nargs_pattern = nargs_pattern.replace('-', '')
# return the pattern
return nargs_pattern
# ========================
# Value conversion methods
# ========================
def _get_values(self, action, arg_strings):
# for everything but PARSER args, strip out '--'
if action.nargs is not PARSER:
arg_strings = [s for s in arg_strings if s != '--']
# optional argument produces a default when not present
if not arg_strings and action.nargs == OPTIONAL:
if action.option_strings:
value = action.const
else:
value = action.default
if isinstance(value, _basestring):
value = self._get_value(action, value)
self._check_value(action, value)
# when nargs='*' on a positional, if there were no command-line
# args, use the default if it is anything other than None
elif (not arg_strings and action.nargs == ZERO_OR_MORE and
not action.option_strings):
if action.default is not None:
value = action.default
else:
value = arg_strings
self._check_value(action, value)
# single argument or optional argument produces a single value
elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]:
arg_string, = arg_strings
value = self._get_value(action, arg_string)
self._check_value(action, value)
# PARSER arguments convert all values, but check only the first
elif action.nargs is PARSER:
value = [self._get_value(action, v) for v in arg_strings]
self._check_value(action, value[0])
# all other types of nargs produce a list
else:
value = [self._get_value(action, v) for v in arg_strings]
for v in value:
self._check_value(action, v)
# return the converted value
return value
def _get_value(self, action, arg_string):
type_func = self._registry_get('type', action.type, action.type)
if not hasattr(type_func, '__call__'):
if not hasattr(type_func, '__bases__'): # classic classes
msg = _('%r is not callable')
raise ArgumentError(action, msg % type_func)
# convert the value to the appropriate type
try:
result = type_func(arg_string)
# TypeErrors or ValueErrors indicate errors
except (TypeError, ValueError):
name = getattr(action.type, '__name__', repr(action.type))
msg = _('invalid %s value: %r')
raise ArgumentError(action, msg % (name, arg_string))
# return the converted value
return result
def _check_value(self, action, value):
# converted value must be one of the choices (if specified)
if action.choices is not None and value not in action.choices:
tup = value, ', '.join(map(repr, action.choices))
msg = _('invalid choice: %r (choose from %s)') % tup
raise ArgumentError(action, msg)
# =======================
# Help-formatting methods
# =======================
def format_usage(self):
formatter = self._get_formatter()
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
return formatter.format_help()
def format_help(self):
formatter = self._get_formatter()
# usage
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
# description
formatter.add_text(self.description)
# positionals, optionals and user-defined groups
for action_group in self._action_groups:
formatter.start_section(action_group.title)
formatter.add_text(action_group.description)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
# epilog
formatter.add_text(self.epilog)
# determine help from format above
return formatter.format_help()
def format_version(self):
formatter = self._get_formatter()
formatter.add_text(self.version)
return formatter.format_help()
def _get_formatter(self):
return self.formatter_class(prog=self.prog)
# =====================
# Help-printing methods
# =====================
def print_usage(self, file=None):
self._print_message(self.format_usage(), file)
def print_help(self, file=None):
self._print_message(self.format_help(), file)
def print_version(self, file=None):
self._print_message(self.format_version(), file)
def _print_message(self, message, file=None):
if message:
if file is None:
file = _sys.stderr
file.write(message)
# ===============
# Exiting methods
# ===============
def exit(self, status=0, message=None):
if message:
_sys.stderr.write(message)
_sys.exit(status)
def error(self, message):
"""error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(_sys.stderr)
self.exit(2, _('%s: error: %s\n') % (self.prog, message))
|
archen/django | refs/heads/master | django/contrib/auth/models.py | 15 | from __future__ import unicode_literals
from django.core.mail import send_mail
from django.core import validators
from django.db import models
from django.db.models.manager import EmptyManager
from django.utils.crypto import get_random_string
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.contrib import auth
from django.contrib.auth.hashers import (
check_password, make_password, is_password_usable)
from django.contrib.auth.signals import user_logged_in
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import python_2_unicode_compatible
def update_last_login(sender, user, **kwargs):
"""
A signal receiver which updates the last_login date for
the user logging in.
"""
user.last_login = timezone.now()
user.save(update_fields=['last_login'])
user_logged_in.connect(update_last_login)
class PermissionManager(models.Manager):
def get_by_natural_key(self, codename, app_label, model):
return self.get(
codename=codename,
content_type=ContentType.objects.get_by_natural_key(app_label,
model),
)
@python_2_unicode_compatible
class Permission(models.Model):
"""
The permissions system provides a way to assign permissions to specific
users and groups of users.
The permission system is used by the Django admin site, but may also be
useful in your own code. The Django admin site uses permissions as follows:
- The "add" permission limits the user's ability to view the "add" form
and add an object.
- The "change" permission limits a user's ability to view the change
list, view the "change" form and change an object.
- The "delete" permission limits the ability to delete an object.
Permissions are set globally per type of object, not per specific object
instance. It is possible to say "Mary may change news stories," but it's
not currently possible to say "Mary may change news stories, but only the
ones she created herself" or "Mary may only change news stories that have a
certain status or publication date."
Three basic permissions -- add, change and delete -- are automatically
created for each Django model.
"""
name = models.CharField(_('name'), max_length=50)
content_type = models.ForeignKey(ContentType)
codename = models.CharField(_('codename'), max_length=100)
objects = PermissionManager()
class Meta:
verbose_name = _('permission')
verbose_name_plural = _('permissions')
unique_together = (('content_type', 'codename'),)
ordering = ('content_type__app_label', 'content_type__model',
'codename')
def __str__(self):
return "%s | %s | %s" % (
six.text_type(self.content_type.app_label),
six.text_type(self.content_type),
six.text_type(self.name))
def natural_key(self):
return (self.codename,) + self.content_type.natural_key()
natural_key.dependencies = ['contenttypes.contenttype']
class GroupManager(models.Manager):
"""
The manager for the auth's Group model.
"""
def get_by_natural_key(self, name):
return self.get(name=name)
@python_2_unicode_compatible
class Group(models.Model):
"""
Groups are a generic way of categorizing users to apply permissions, or
some other label, to those users. A user can belong to any number of
groups.
A user in a group automatically has all the permissions granted to that
group. For example, if the group Site editors has the permission
can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to
apply some label, or extended functionality, to them. For example, you
could create a group 'Special users', and you could write code that would
do special things to those users -- such as giving them access to a
members-only portion of your site, or sending them members-only email
messages.
"""
name = models.CharField(_('name'), max_length=80, unique=True)
permissions = models.ManyToManyField(Permission,
verbose_name=_('permissions'), blank=True)
objects = GroupManager()
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
def __str__(self):
return self.name
def natural_key(self):
return (self.name,)
class BaseUserManager(models.Manager):
@classmethod
def normalize_email(cls, email):
"""
Normalize the address by lowercasing the domain part of the email
address.
"""
email = email or ''
try:
email_name, domain_part = email.strip().rsplit('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
return email
def make_random_password(self, length=10,
allowed_chars='abcdefghjkmnpqrstuvwxyz'
'ABCDEFGHJKLMNPQRSTUVWXYZ'
'23456789'):
"""
Generates a random password with the given length and given
allowed_chars. Note that the default value of allowed_chars does not
have "I" or "O" or letters and digits that look similar -- just to
avoid confusion.
"""
return get_random_string(length, allowed_chars)
def get_by_natural_key(self, username):
return self.get(**{self.model.USERNAME_FIELD: username})
class UserManager(BaseUserManager):
def _create_user(self, username, email, password,
is_staff, is_superuser, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
"""
now = timezone.now()
if not username:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
user = self.model(username=username, email=email,
is_staff=is_staff, is_active=True,
is_superuser=is_superuser, last_login=now,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, email=None, password=None, **extra_fields):
return self._create_user(username, email, password, False, False,
**extra_fields)
def create_superuser(self, username, email, password, **extra_fields):
return self._create_user(username, email, password, True, True,
**extra_fields)
@python_2_unicode_compatible
class AbstractBaseUser(models.Model):
password = models.CharField(_('password'), max_length=128)
last_login = models.DateTimeField(_('last login'), default=timezone.now)
is_active = True
REQUIRED_FIELDS = []
class Meta:
abstract = True
def get_username(self):
"Return the identifying username for this User"
return getattr(self, self.USERNAME_FIELD)
def __str__(self):
return self.get_username()
def natural_key(self):
return (self.get_username(),)
def is_anonymous(self):
"""
Always returns False. This is a way of comparing User objects to
anonymous users.
"""
return False
def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return True
def set_password(self, raw_password):
self.password = make_password(raw_password)
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
hashing formats behind the scenes.
"""
def setter(raw_password):
self.set_password(raw_password)
self.save(update_fields=["password"])
return check_password(raw_password, self.password, setter)
def set_unusable_password(self):
# Sets a value that will never be a valid hash
self.password = make_password(None)
def has_usable_password(self):
return is_password_usable(self.password)
def get_full_name(self):
raise NotImplementedError('subclasses of AbstractBaseUser must provide a get_full_name() method')
def get_short_name(self):
raise NotImplementedError('subclasses of AbstractBaseUser must provide a get_short_name() method.')
# A few helper functions for common logic between User and AnonymousUser.
def _user_get_all_permissions(user, obj):
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_all_permissions"):
permissions.update(backend.get_all_permissions(user, obj))
return permissions
def _user_has_perm(user, perm, obj):
for backend in auth.get_backends():
if hasattr(backend, "has_perm"):
if backend.has_perm(user, perm, obj):
return True
return False
def _user_has_module_perms(user, app_label):
for backend in auth.get_backends():
if hasattr(backend, "has_module_perms"):
if backend.has_module_perms(user, app_label):
return True
return False
class PermissionsMixin(models.Model):
"""
A mixin class that adds the fields and methods necessary to support
Django's Group and Permission model using the ModelBackend.
"""
is_superuser = models.BooleanField(_('superuser status'), default=False,
help_text=_('Designates that this user has all permissions without '
'explicitly assigning them.'))
groups = models.ManyToManyField(Group, verbose_name=_('groups'),
blank=True, help_text=_('The groups this user belongs to. A user will '
'get all permissions granted to each of '
'his/her group.'),
related_name="user_set", related_query_name="user")
user_permissions = models.ManyToManyField(Permission,
verbose_name=_('user permissions'), blank=True,
help_text=_('Specific permissions for this user.'),
related_name="user_set", related_query_name="user")
class Meta:
abstract = True
def get_group_permissions(self, obj=None):
"""
Returns a list of permission strings that this user has through his/her
groups. This method queries all available auth backends. If an object
is passed in, only permissions matching this object are returned.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
permissions.update(backend.get_group_permissions(self, obj))
return permissions
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj)
def has_perm(self, perm, obj=None):
"""
Returns True if the user has the specified permission. This method
queries all available auth backends, but returns immediately if any
backend returns True. Thus, a user who has permission from a single
auth backend is assumed to have permission in general. If an object is
provided, permissions for this specific object are checked.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return _user_has_perm(self, perm, obj)
def has_perms(self, perm_list, obj=None):
"""
Returns True if the user has each of the specified permissions. If
object is passed, it checks if the user has all required perms for this
object.
"""
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, app_label):
"""
Returns True if the user has any permissions in the given app label.
Uses pretty much the same logic as has_perm, above.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return _user_has_module_perms(self, app_label)
class AbstractUser(AbstractBaseUser, PermissionsMixin):
"""
An abstract base class implementing a fully featured User model with
admin-compliant permissions.
Username, password and email are required. Other fields are optional.
"""
username = models.CharField(_('username'), max_length=30, unique=True,
help_text=_('Required. 30 characters or fewer. Letters, digits and '
'@/./+/-/_ only.'),
validators=[
validators.RegexValidator(r'^[\w.@+-]+$', _('Enter a valid username.'), 'invalid')
])
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('email address'), blank=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
abstract = True
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email], **kwargs)
class User(AbstractUser):
"""
Users within the Django authentication system are represented by this
model.
Username, password and email are required. Other fields are optional.
"""
class Meta(AbstractUser.Meta):
swappable = 'AUTH_USER_MODEL'
@python_2_unicode_compatible
class AnonymousUser(object):
id = None
pk = None
username = ''
is_staff = False
is_active = False
is_superuser = False
_groups = EmptyManager(Group)
_user_permissions = EmptyManager(Permission)
def __init__(self):
pass
def __str__(self):
return 'AnonymousUser'
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return 1 # instances always return the same hash value
def save(self):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def delete(self):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def set_password(self, raw_password):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def check_password(self, raw_password):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def _get_groups(self):
return self._groups
groups = property(_get_groups)
def _get_user_permissions(self):
return self._user_permissions
user_permissions = property(_get_user_permissions)
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj=obj)
def has_perm(self, perm, obj=None):
return _user_has_perm(self, perm, obj=obj)
def has_perms(self, perm_list, obj=None):
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, module):
return _user_has_module_perms(self, module)
def is_anonymous(self):
return True
def is_authenticated(self):
return False
|
postlund/home-assistant | refs/heads/dev | homeassistant/components/input_text/__init__.py | 2 | """Support to enter a value into a text box."""
import logging
import typing
import voluptuous as vol
from homeassistant.const import (
ATTR_EDITABLE,
ATTR_MODE,
CONF_ICON,
CONF_ID,
CONF_MODE,
CONF_NAME,
CONF_UNIT_OF_MEASUREMENT,
SERVICE_RELOAD,
)
from homeassistant.core import callback
from homeassistant.helpers import collection
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import RestoreEntity
import homeassistant.helpers.service
from homeassistant.helpers.storage import Store
from homeassistant.helpers.typing import ConfigType, HomeAssistantType, ServiceCallType
_LOGGER = logging.getLogger(__name__)
DOMAIN = "input_text"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
CONF_INITIAL = "initial"
CONF_MIN = "min"
CONF_MIN_VALUE = 0
CONF_MAX = "max"
CONF_MAX_VALUE = 100
CONF_PATTERN = "pattern"
CONF_VALUE = "value"
MODE_TEXT = "text"
MODE_PASSWORD = "password"
ATTR_VALUE = CONF_VALUE
ATTR_MIN = "min"
ATTR_MAX = "max"
ATTR_PATTERN = CONF_PATTERN
SERVICE_SET_VALUE = "set_value"
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
CREATE_FIELDS = {
vol.Required(CONF_NAME): vol.All(str, vol.Length(min=1)),
vol.Optional(CONF_MIN, default=CONF_MIN_VALUE): vol.Coerce(int),
vol.Optional(CONF_MAX, default=CONF_MAX_VALUE): vol.Coerce(int),
vol.Optional(CONF_INITIAL, ""): cv.string,
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_PATTERN): cv.string,
vol.Optional(CONF_MODE, default=MODE_TEXT): vol.In([MODE_TEXT, MODE_PASSWORD]),
}
UPDATE_FIELDS = {
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_MIN): vol.Coerce(int),
vol.Optional(CONF_MAX): vol.Coerce(int),
vol.Optional(CONF_INITIAL): cv.string,
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_PATTERN): cv.string,
vol.Optional(CONF_MODE): vol.In([MODE_TEXT, MODE_PASSWORD]),
}
def _cv_input_text(cfg):
"""Configure validation helper for input box (voluptuous)."""
minimum = cfg.get(CONF_MIN)
maximum = cfg.get(CONF_MAX)
if minimum > maximum:
raise vol.Invalid(
f"Max len ({minimum}) is not greater than min len ({maximum})"
)
state = cfg.get(CONF_INITIAL)
if state is not None and (len(state) < minimum or len(state) > maximum):
raise vol.Invalid(
f"Initial value {state} length not in range {minimum}-{maximum}"
)
return cfg
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: cv.schema_with_slug_keys(
vol.Any(
vol.All(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_MIN, default=CONF_MIN_VALUE): vol.Coerce(int),
vol.Optional(CONF_MAX, default=CONF_MAX_VALUE): vol.Coerce(int),
vol.Optional(CONF_INITIAL, ""): cv.string,
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_PATTERN): cv.string,
vol.Optional(CONF_MODE, default=MODE_TEXT): vol.In(
[MODE_TEXT, MODE_PASSWORD]
),
},
_cv_input_text,
),
None,
)
)
},
extra=vol.ALLOW_EXTRA,
)
RELOAD_SERVICE_SCHEMA = vol.Schema({})
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Set up an input text."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
id_manager = collection.IDManager()
yaml_collection = collection.YamlCollection(
logging.getLogger(f"{__name__}.yaml_collection"), id_manager
)
collection.attach_entity_component_collection(
component, yaml_collection, InputText.from_yaml
)
storage_collection = InputTextStorageCollection(
Store(hass, STORAGE_VERSION, STORAGE_KEY),
logging.getLogger(f"{__name__}.storage_collection"),
id_manager,
)
collection.attach_entity_component_collection(
component, storage_collection, InputText
)
await yaml_collection.async_load(
[{CONF_ID: id_, **(conf or {})} for id_, conf in config.get(DOMAIN, {}).items()]
)
await storage_collection.async_load()
collection.StorageCollectionWebsocket(
storage_collection, DOMAIN, DOMAIN, CREATE_FIELDS, UPDATE_FIELDS
).async_setup(hass)
collection.attach_entity_registry_cleaner(hass, DOMAIN, DOMAIN, yaml_collection)
collection.attach_entity_registry_cleaner(hass, DOMAIN, DOMAIN, storage_collection)
async def reload_service_handler(service_call: ServiceCallType) -> None:
"""Reload yaml entities."""
conf = await component.async_prepare_reload(skip_reset=True)
if conf is None:
conf = {DOMAIN: {}}
await yaml_collection.async_load(
[{CONF_ID: id_, **(cfg or {})} for id_, cfg in conf.get(DOMAIN, {}).items()]
)
homeassistant.helpers.service.async_register_admin_service(
hass,
DOMAIN,
SERVICE_RELOAD,
reload_service_handler,
schema=RELOAD_SERVICE_SCHEMA,
)
component.async_register_entity_service(
SERVICE_SET_VALUE, {vol.Required(ATTR_VALUE): cv.string}, "async_set_value"
)
return True
class InputTextStorageCollection(collection.StorageCollection):
"""Input storage based collection."""
CREATE_SCHEMA = vol.Schema(vol.All(CREATE_FIELDS, _cv_input_text))
UPDATE_SCHEMA = vol.Schema(UPDATE_FIELDS)
async def _process_create_data(self, data: typing.Dict) -> typing.Dict:
"""Validate the config is valid."""
return self.CREATE_SCHEMA(data)
@callback
def _get_suggested_id(self, info: typing.Dict) -> str:
"""Suggest an ID based on the config."""
return info[CONF_NAME]
async def _update_data(self, data: dict, update_data: typing.Dict) -> typing.Dict:
"""Return a new updated data object."""
update_data = self.UPDATE_SCHEMA(update_data)
return _cv_input_text({**data, **update_data})
class InputText(RestoreEntity):
"""Represent a text box."""
def __init__(self, config: typing.Dict):
"""Initialize a text input."""
self._config = config
self.editable = True
self._current_value = config.get(CONF_INITIAL)
@classmethod
def from_yaml(cls, config: typing.Dict) -> "InputText":
"""Return entity instance initialized from yaml storage."""
# set defaults for empty config
config = {
CONF_MAX: CONF_MAX_VALUE,
CONF_MIN: CONF_MIN_VALUE,
CONF_MODE: MODE_TEXT,
**config,
}
input_text = cls(config)
input_text.entity_id = ENTITY_ID_FORMAT.format(config[CONF_ID])
input_text.editable = False
return input_text
@property
def should_poll(self):
"""If entity should be polled."""
return False
@property
def name(self):
"""Return the name of the text input entity."""
return self._config.get(CONF_NAME)
@property
def icon(self):
"""Return the icon to be used for this entity."""
return self._config.get(CONF_ICON)
@property
def _maximum(self) -> int:
"""Return max len of the text."""
return self._config[CONF_MAX]
@property
def _minimum(self) -> int:
"""Return min len of the text."""
return self._config[CONF_MIN]
@property
def state(self):
"""Return the state of the component."""
return self._current_value
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._config.get(CONF_UNIT_OF_MEASUREMENT)
@property
def unique_id(self) -> typing.Optional[str]:
"""Return unique id for the entity."""
return self._config[CONF_ID]
@property
def state_attributes(self):
"""Return the state attributes."""
return {
ATTR_EDITABLE: self.editable,
ATTR_MIN: self._minimum,
ATTR_MAX: self._maximum,
ATTR_PATTERN: self._config.get(CONF_PATTERN),
ATTR_MODE: self._config[CONF_MODE],
}
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
if self._current_value is not None:
return
state = await self.async_get_last_state()
value = state and state.state
# Check against None because value can be 0
if value is not None and self._minimum <= len(value) <= self._maximum:
self._current_value = value
async def async_set_value(self, value):
"""Select new value."""
if len(value) < self._minimum or len(value) > self._maximum:
_LOGGER.warning(
"Invalid value: %s (length range %s - %s)",
value,
self._minimum,
self._maximum,
)
return
self._current_value = value
self.async_write_ha_state()
async def async_update_config(self, config: typing.Dict) -> None:
"""Handle when the config is updated."""
self._config = config
self.async_write_ha_state()
|
gurneyalex/odoo | refs/heads/13.0-improve_sale_coupon_perf | addons/point_of_sale/tests/test_pos_multiple_sale_accounts.py | 7 | import odoo
from odoo.addons.point_of_sale.tests.common import TestPoSCommon
@odoo.tests.tagged('post_install', '-at_install')
class TestPoSMultipleSaleAccounts(TestPoSCommon):
""" Test to orders containing products with different sale accounts
keywords/phrases: Different Income Accounts
In this test, two sale (income) accounts are involved:
self.sale_account -> default for products because it is in the category
self.other_sale_account -> manually set to self.product2
"""
def setUp(self):
super(TestPoSMultipleSaleAccounts, self).setUp()
self.config = self.basic_config
self.product1 = self.create_product(
'Product 1',
self.categ_basic,
lst_price=10.99,
standard_price=5.0,
tax_ids=self.taxes['tax7'].ids,
)
self.product2 = self.create_product(
'Product 2',
self.categ_basic,
lst_price=19.99,
standard_price=10.0,
tax_ids=self.taxes['tax10'].ids,
sale_account=self.other_sale_account,
)
self.product3 = self.create_product(
'Product 3',
self.categ_basic,
lst_price=30.99,
standard_price=15.0,
tax_ids=self.taxes['tax_group_7_10'].ids,
)
self.adjust_inventory([self.product1, self.product2, self.product3], [100, 50, 50])
def test_01_check_product_properties(self):
self.assertEqual(self.product2.property_account_income_id, self.other_sale_account, 'Income account for the product2 should be the other sale account.')
self.assertFalse(self.product1.property_account_income_id, msg='Income account for product1 should not be set.')
self.assertFalse(self.product3.property_account_income_id, msg='Income account for product3 should not be set.')
self.assertEqual(self.product1.categ_id.property_account_income_categ_id, self.sale_account)
self.assertEqual(self.product3.categ_id.property_account_income_categ_id, self.sale_account)
def test_02_orders_without_invoice(self):
""" orders without invoice
Orders
======
+---------+----------+-----------+----------+-----+---------+--------------------------+--------+
| order | payments | invoiced? | product | qty | untaxed | tax | total |
+---------+----------+-----------+----------+-----+---------+--------------------------+--------+
| order 1 | cash | no | product1 | 10 | 109.9 | 7.69 [7%] | 117.59 |
| | | | product2 | 10 | 181.73 | 18.17 [10%] | 199.9 |
| | | | product3 | 10 | 281.73 | 19.72 [7%] + 28.17 [10%] | 329.62 |
+---------+----------+-----------+----------+-----+---------+--------------------------+--------+
| order 2 | cash | no | product1 | 5 | 54.95 | 3.85 [7%] | 58.80 |
| | | | product2 | 5 | 90.86 | 9.09 [10%] | 99.95 |
+---------+----------+-----------+----------+-----+---------+--------------------------+--------+
| order 3 | bank | no | product2 | 5 | 90.86 | 9.09 [10%] | 99.95 |
| | | | product3 | 5 | 140.86 | 9.86 [7%] + 14.09 [10%] | 164.81 |
+---------+----------+-----------+----------+-----+---------+--------------------------+--------+
Expected Result
===============
+---------------------+---------+
| account | balance |
+---------------------+---------+
| sale_account | -164.85 | (for the 7% base amount)
| sale_account | -422.59 | (for the 7+10% base amount)
| other_sale_account | -363.45 |
| tax 7% | -41.12 |
| tax 10% | -78.61 |
| pos receivable bank | 264.76 |
| pos receivable cash | 805.86 |
+---------------------+---------+
| Total balance | 0.00 |
+---------------------+---------+
"""
self.open_new_session()
# create orders
orders = []
orders.append(self.create_ui_order_data([(self.product1, 10), (self.product2, 10), (self.product3, 10)]))
orders.append(self.create_ui_order_data([(self.product1, 5), (self.product2, 5)]))
orders.append(self.create_ui_order_data([(self.product2, 5), (self.product3, 5)], payments=[(self.bank_pm, 264.76)]))
# sync orders
order = self.env['pos.order'].create_from_ui(orders)
# check values before closing the session
self.assertEqual(3, self.pos_session.order_count)
orders_total = sum(order.amount_total for order in self.pos_session.order_ids)
self.assertAlmostEqual(orders_total, self.pos_session.total_payments_amount, msg='Total order amount should be equal to the total payment amount.')
# close the session
self.pos_session.action_pos_session_validate()
# check values after the session is closed
session_move = self.pos_session.move_id
sale_account_lines = session_move.line_ids.filtered(lambda line: line.account_id == self.sale_account)
for balance, amount in zip(sorted(sale_account_lines.mapped('balance')), sorted([-164.85, -422.59])):
self.assertAlmostEqual(balance, amount)
other_sale_account_line = session_move.line_ids.filtered(lambda line: line.account_id == self.other_sale_account)
self.assertAlmostEqual(other_sale_account_line.balance, -363.45)
receivable_line_bank = session_move.line_ids.filtered(lambda line: self.bank_pm.name in line.name)
self.assertAlmostEqual(receivable_line_bank.balance, 264.76)
receivable_line_cash = session_move.line_ids.filtered(lambda line: self.cash_pm.name in line.name)
self.assertAlmostEqual(receivable_line_cash.balance, 805.86)
manually_calculated_taxes = (-41.12, -78.61)
tax_lines = session_move.line_ids.filtered(lambda line: line.account_id == self.tax_received_account)
self.assertAlmostEqual(sum(manually_calculated_taxes), sum(tax_lines.mapped('balance')))
for t1, t2 in zip(sorted(manually_calculated_taxes), sorted(tax_lines.mapped('balance'))):
self.assertAlmostEqual(t1, t2, msg='Taxes should be correctly combined.')
self.assertTrue(receivable_line_cash.full_reconcile_id)
def test_03_orders_with_invoice(self):
""" orders with invoice
Orders
======
+---------+----------+---------------+----------+-----+---------+--------------------------+--------+
| order | payments | invoiced? | product | qty | untaxed | tax | total |
+---------+----------+---------------+----------+-----+---------+--------------------------+--------+
| order 1 | cash | no | product1 | 10 | 109.9 | 7.69 [7%] | 117.59 |
| | | | product2 | 10 | 181.73 | 18.17 [10%] | 199.9 |
| | | | product3 | 10 | 281.73 | 19.72 [7%] + 28.17 [10%] | 329.62 |
+---------+----------+---------------+----------+-----+---------+--------------------------+--------+
| order 2 | bank | no | product1 | 5 | 54.95 | 3.85 [7%] | 58.80 |
| | | | product2 | 5 | 90.86 | 9.09 [10%] | 99.95 |
+---------+----------+---------------+----------+-----+---------+--------------------------+--------+
| order 3 | bank | yes, customer | product2 | 5 | 90.86 | 9.09 [10%] | 99.95 |
| | | | product3 | 5 | 140.86 | 9.86 [7%] + 14.09 [10%] | 164.81 |
+---------+----------+---------------+----------+-----+---------+--------------------------+--------+
Expected Result
===============
+---------------------+---------+
| account | balance |
+---------------------+---------+
| sale_account | -164.85 | (for the 7% base amount)
| sale_account | -281.73 | (for the 7+10% base amount)
| other_sale_account | -272.59 |
| tax 7% | -31.26 |
| tax 10% | -55.43 |
| pos receivable cash | 647.11 |
| pos receivable bank | 423.51 |
| receivable | -264.76 |
+---------------------+---------+
| Total balance | 0.00 |
+---------------------+---------+
"""
self.open_new_session()
# create orders
orders = []
orders.append(self.create_ui_order_data([(self.product1, 10), (self.product2, 10), (self.product3, 10)]))
orders.append(self.create_ui_order_data(
[(self.product1, 5), (self.product2, 5)],
payments=[(self.bank_pm, 158.75)],
))
orders.append(self.create_ui_order_data(
[(self.product2, 5), (self.product3, 5)],
payments=[(self.bank_pm, 264.76)],
customer=self.customer,
is_invoiced=True,
uid='09876-098-0987',
))
# sync orders
order = self.env['pos.order'].create_from_ui(orders)
# check values before closing the session
self.assertEqual(3, self.pos_session.order_count)
orders_total = sum(order.amount_total for order in self.pos_session.order_ids)
self.assertAlmostEqual(orders_total, self.pos_session.total_payments_amount, msg='Total order amount should be equal to the total payment amount.')
# check if there is one invoiced order
self.assertEqual(len(self.pos_session.order_ids.filtered(lambda order: order.state == 'invoiced')), 1, 'There should only be one invoiced order.')
# close the session
self.pos_session.action_pos_session_validate()
# check values after the session is closed
session_move = self.pos_session.move_id
sale_account_lines = session_move.line_ids.filtered(lambda line: line.account_id == self.sale_account)
for balance, amount in zip(sorted(sale_account_lines.mapped('balance')), sorted([-164.85, -281.73])):
self.assertAlmostEqual(balance, amount)
other_sale_account_line = session_move.line_ids.filtered(lambda line: line.account_id == self.other_sale_account)
self.assertAlmostEqual(other_sale_account_line.balance, -272.59)
pos_receivable_line_bank = session_move.line_ids.filtered(lambda line: self.bank_pm.name in line.name)
self.assertAlmostEqual(pos_receivable_line_bank.balance, 423.51)
pos_receivable_line_cash = session_move.line_ids.filtered(lambda line: self.cash_pm.name in line.name)
self.assertAlmostEqual(pos_receivable_line_cash.balance, 647.11)
manually_calculated_taxes = (-31.26, -55.43)
tax_lines = session_move.line_ids.filtered(lambda line: line.account_id == self.tax_received_account)
self.assertAlmostEqual(sum(manually_calculated_taxes), sum(tax_lines.mapped('balance')))
for t1, t2 in zip(sorted(manually_calculated_taxes), sorted(tax_lines.mapped('balance'))):
self.assertAlmostEqual(t1, t2, msg='Taxes should be correctly combined.')
receivable_line = session_move.line_ids.filtered(lambda line: line.account_id == self.receivable_account)
self.assertAlmostEqual(receivable_line.balance, -264.76)
self.assertTrue(pos_receivable_line_cash.full_reconcile_id)
self.assertTrue(receivable_line.full_reconcile_id)
|
jack8daniels2/yabgp | refs/heads/master | yabgp/tests/unit/core/test_protocol.py | 4 | # Copyright 2015 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test BGP protocol
"""
|
crimsonthunder/kernel_samsung_trlte_5.1.1 | refs/heads/COI3 | scripts/rt-tester/rt-tester.py | 11005 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
rentongzhang/servo | refs/heads/master | python/mozlog/mozlog/structured/commandline.py | 39 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import os
import optparse
from collections import defaultdict
from structuredlog import StructuredLogger, set_default_logger
import handlers
import formatters
log_formatters = {
'raw': (formatters.JSONFormatter, "Raw structured log messages"),
'unittest': (formatters.UnittestFormatter, "Unittest style output"),
'xunit': (formatters.XUnitFormatter, "xUnit compatible XML"),
'html': (formatters.HTMLFormatter, "HTML report"),
'mach': (formatters.MachFormatter, "Human-readable output"),
'tbpl': (formatters.TbplFormatter, "TBPL style log format"),
}
TEXT_FORMATTERS = ('raw', 'mach')
"""a subset of formatters for non test harnesses related applications"""
def level_filter_wrapper(formatter, level):
return handlers.LogLevelFilter(formatter, level)
def verbose_wrapper(formatter, verbose):
formatter.verbose = verbose
return formatter
def buffer_handler_wrapper(handler, buffer_limit):
if buffer_limit == "UNLIMITED":
buffer_limit = None
else:
buffer_limit = int(buffer_limit)
return handlers.BufferingLogFilter(handler, buffer_limit)
formatter_option_defaults = {
'verbose': False,
'level': 'info',
}
fmt_options = {
# <option name>: (<wrapper function>, description, <applicable formatters>, action)
# "action" is used by the commandline parser in use.
'verbose': (verbose_wrapper,
"Enables verbose mode for the given formatter.",
["mach"], "store_true"),
'level': (level_filter_wrapper,
"A least log level to subscribe to for the given formatter (debug, info, error, etc.)",
["mach", "tbpl"], "store"),
'buffer': (buffer_handler_wrapper,
"If specified, enables message buffering at the given buffer size limit.",
["mach", "tbpl"], "store"),
}
def log_file(name):
if name == "-":
return sys.stdout
# ensure we have a correct dirpath by using realpath
dirpath = os.path.dirname(os.path.realpath(name))
if not os.path.exists(dirpath):
os.makedirs(dirpath)
return open(name, "w")
def add_logging_group(parser, include_formatters=None):
"""
Add logging options to an argparse ArgumentParser or
optparse OptionParser.
Each formatter has a corresponding option of the form --log-{name}
where {name} is the name of the formatter. The option takes a value
which is either a filename or "-" to indicate stdout.
:param parser: The ArgumentParser or OptionParser object that should have
logging options added.
:param include_formatters: List of formatter names that should be included
in the option group. Default to None, meaning
all the formatters are included. A common use
of this option is to specify
:data:`TEXT_FORMATTERS` to include only the
most useful formatters for a command line tool
that is not related to test harnesses.
"""
group_name = "Output Logging"
group_description = ("Each option represents a possible logging format "
"and takes a filename to write that format to, "
"or '-' to write to stdout.")
if include_formatters is None:
include_formatters = log_formatters.keys()
if isinstance(parser, optparse.OptionParser):
group = optparse.OptionGroup(parser,
group_name,
group_description)
parser.add_option_group(group)
opt_log_type = 'str'
group_add = group.add_option
else:
group = parser.add_argument_group(group_name,
group_description)
opt_log_type = log_file
group_add = group.add_argument
for name, (cls, help_str) in log_formatters.iteritems():
if name in include_formatters:
group_add("--log-" + name, action="append", type=opt_log_type,
help=help_str)
for optname, (cls, help_str, formatters, action) in fmt_options.iteritems():
for fmt in formatters:
# make sure fmt is in log_formatters and is accepted
if fmt in log_formatters and fmt in include_formatters:
group_add("--log-%s-%s" % (fmt, optname), action=action,
help=help_str, default=None)
def setup_handlers(logger, formatters, formatter_options):
"""
Add handlers to the given logger according to the formatters and
options provided.
:param logger: The logger configured by this function.
:param formatters: A dict of {formatter, [streams]} to use in handlers.
:param formatter_options: a dict of {formatter: {option: value}} to
to use when configuring formatters.
"""
unused_options = set(formatter_options.keys()) - set(formatters.keys())
if unused_options:
msg = ("Options specified for unused formatter(s) (%s) have no effect" %
list(unused_options))
raise ValueError(msg)
for fmt, streams in formatters.iteritems():
formatter_cls = log_formatters[fmt][0]
formatter = formatter_cls()
handler_wrapper, handler_option = None, ""
for option, value in formatter_options[fmt].iteritems():
if option == "buffer":
handler_wrapper, handler_option = fmt_options[option][0], value
else:
formatter = fmt_options[option][0](formatter, value)
for value in streams:
handler = handlers.StreamHandler(stream=value, formatter=formatter)
if handler_wrapper:
handler = handler_wrapper(handler, handler_option)
logger.add_handler(handler)
def setup_logging(suite, args, defaults=None):
"""
Configure a structuredlogger based on command line arguments.
The created structuredlogger will also be set as the default logger, and
can be retrieved with :py:func:`~mozlog.structured.structuredlog.get_default_logger`.
:param suite: The name of the testsuite being run
:param args: A dictionary of {argument_name:value} produced from
parsing the command line arguments for the application
:param defaults: A dictionary of {formatter name: output stream} to apply
when there is no logging supplied on the command line. If
this isn't supplied, reasonable defaults are chosen
(coloured mach formatting if stdout is a terminal, or raw
logs otherwise).
:rtype: StructuredLogger
"""
logger = StructuredLogger(suite)
# Keep track of any options passed for formatters.
formatter_options = defaultdict(lambda: formatter_option_defaults.copy())
# Keep track of formatters and list of streams specified.
formatters = defaultdict(list)
found = False
found_stdout_logger = False
if not hasattr(args, 'iteritems'):
args = vars(args)
if defaults is None:
if sys.__stdout__.isatty():
defaults = {"mach": sys.stdout}
else:
defaults = {"raw": sys.stdout}
for name, values in args.iteritems():
parts = name.split('_')
if len(parts) > 3:
continue
# Our args will be ['log', <formatter>] or ['log', <formatter>, <option>].
if parts[0] == 'log' and values is not None:
if len(parts) == 1 or parts[1] not in log_formatters:
continue
if len(parts) == 2:
_, formatter = parts
for value in values:
found = True
if isinstance(value, basestring):
value = log_file(value)
if value == sys.stdout:
found_stdout_logger = True
formatters[formatter].append(value)
if len(parts) == 3:
_, formatter, opt = parts
formatter_options[formatter][opt] = values
#If there is no user-specified logging, go with the default options
if not found:
for name, value in defaults.iteritems():
formatters[name].append(value)
elif not found_stdout_logger and sys.stdout in defaults.values():
for name, value in defaults.iteritems():
if value == sys.stdout:
formatters[name].append(value)
setup_handlers(logger, formatters, formatter_options)
set_default_logger(logger)
return logger
|
jolevq/odoopub | refs/heads/master | addons/website_blog/wizard/__init__.py | 373 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import document_page_show_diff
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
sbtlaarzc/vispy | refs/heads/master | examples/basics/plotting/plot.py | 13 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Plot data with different styles
"""
import numpy as np
from vispy import plot as vp
fig = vp.Fig(size=(600, 500), show=False)
# Plot the target square wave shape
x = np.linspace(0, 10, 1000)
y = np.zeros(1000)
y[1:500] = 1
y[500:-1] = -1
line = fig[0, 0].plot((x, y), width=3, color='k',
title='Square Wave Fourier Expansion', xlabel='x',
ylabel='4/π Σ[ 1/n sin(nπx/L) | n=1,3,5,...]')
y = np.zeros(1000)
L = 5
colors = [(0.8, 0, 0, 1),
(0.8, 0, 0.8, 1),
(0, 0, 1.0, 1),
(0, 0.7, 0, 1), ]
plot_nvals = [1, 3, 7, 31]
for i in range(16):
n = i * 2 + 1
y += (4. / np.pi) * (1. / n) * np.sin(n * np.pi * x / L)
if n in plot_nvals:
l = fig[0, 0].plot((x, y), color=colors[plot_nvals.index(n)], width=2)
l.update_gl_state(depth_test=False)
labelgrid = fig[0, 0].view.add_grid(margin=10)
hspacer = vp.Widget()
hspacer.stretch = (6, 1)
labelgrid.add_widget(hspacer, row=0, col=0)
box = vp.Widget(bgcolor=(1, 1, 1, 0.6), border_color='k')
labelgrid.add_widget(box, row=0, col=1)
vspacer = vp.Widget()
vspacer.stretch = (1, 2)
labelgrid.add_widget(vspacer, row=1, col=1)
labels = [vp.Label('n=%d' % plot_nvals[i], color=colors[i], anchor_x='left')
for i in range(len(plot_nvals))]
boxgrid = box.add_grid()
for i, label in enumerate(labels):
boxgrid.add_widget(label, row=i, col=0)
hspacer2 = vp.Widget()
hspacer2.stretch = (4, 1)
boxgrid.add_widget(hspacer2, row=0, col=1)
grid = vp.visuals.GridLines(color=(0, 0, 0, 0.5))
grid.set_gl_state('translucent')
fig[0, 0].view.add(grid)
if __name__ == '__main__':
fig.show(run=True)
|
thanhacun/odoo | refs/heads/8.0 | addons/resource/resource.py | 174 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP SA (http://www.openerp.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
from dateutil import rrule
from dateutil.relativedelta import relativedelta
from operator import itemgetter
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.float_utils import float_compare
from openerp.tools.translate import _
class resource_calendar(osv.osv):
""" Calendar model for a resource. It has
- attendance_ids: list of resource.calendar.attendance that are a working
interval in a given weekday.
- leave_ids: list of leaves linked to this calendar. A leave can be general
or linked to a specific resource, depending on its resource_id.
All methods in this class use intervals. An interval is a tuple holding
(begin_datetime, end_datetime). A list of intervals is therefore a list of
tuples, holding several intervals of work or leaves. """
_name = "resource.calendar"
_description = "Resource Calendar"
_columns = {
'name': fields.char("Name", required=True),
'company_id': fields.many2one('res.company', 'Company', required=False),
'attendance_ids': fields.one2many('resource.calendar.attendance', 'calendar_id', 'Working Time', copy=True),
'manager': fields.many2one('res.users', 'Workgroup Manager'),
'leave_ids': fields.one2many(
'resource.calendar.leaves', 'calendar_id', 'Leaves',
help=''
),
}
_defaults = {
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'resource.calendar', context=context)
}
# --------------------------------------------------
# Utility methods
# --------------------------------------------------
def interval_clean(self, intervals):
""" Utility method that sorts and removes overlapping inside datetime
intervals. The intervals are sorted based on increasing starting datetime.
Overlapping intervals are merged into a single one.
:param list intervals: list of intervals; each interval is a tuple
(datetime_from, datetime_to)
:return list cleaned: list of sorted intervals without overlap """
intervals = sorted(intervals, key=itemgetter(0)) # sort on first datetime
cleaned = []
working_interval = None
while intervals:
current_interval = intervals.pop(0)
if not working_interval: # init
working_interval = [current_interval[0], current_interval[1]]
elif working_interval[1] < current_interval[0]: # interval is disjoint
cleaned.append(tuple(working_interval))
working_interval = [current_interval[0], current_interval[1]]
elif working_interval[1] < current_interval[1]: # union of greater intervals
working_interval[1] = current_interval[1]
if working_interval: # handle void lists
cleaned.append(tuple(working_interval))
return cleaned
def interval_remove_leaves(self, interval, leave_intervals):
""" Utility method that remove leave intervals from a base interval:
- clean the leave intervals, to have an ordered list of not-overlapping
intervals
- initiate the current interval to be the base interval
- for each leave interval:
- finishing before the current interval: skip, go to next
- beginning after the current interval: skip and get out of the loop
because we are outside range (leaves are ordered)
- beginning within the current interval: close the current interval
and begin a new current interval that begins at the end of the leave
interval
- ending within the current interval: update the current interval begin
to match the leave interval ending
:param tuple interval: a tuple (beginning datetime, ending datetime) that
is the base interval from which the leave intervals
will be removed
:param list leave_intervals: a list of tuples (beginning datetime, ending datetime)
that are intervals to remove from the base interval
:return list intervals: a list of tuples (begin datetime, end datetime)
that are the remaining valid intervals """
if not interval:
return interval
if leave_intervals is None:
leave_intervals = []
intervals = []
leave_intervals = self.interval_clean(leave_intervals)
current_interval = [interval[0], interval[1]]
for leave in leave_intervals:
if leave[1] <= current_interval[0]:
continue
if leave[0] >= current_interval[1]:
break
if current_interval[0] < leave[0] < current_interval[1]:
current_interval[1] = leave[0]
intervals.append((current_interval[0], current_interval[1]))
current_interval = [leave[1], interval[1]]
# if current_interval[0] <= leave[1] <= current_interval[1]:
if current_interval[0] <= leave[1]:
current_interval[0] = leave[1]
if current_interval and current_interval[0] < interval[1]: # remove intervals moved outside base interval due to leaves
intervals.append((current_interval[0], current_interval[1]))
return intervals
def interval_schedule_hours(self, intervals, hour, remove_at_end=True):
""" Schedule hours in intervals. The last matching interval is truncated
to match the specified hours.
It is possible to truncate the last interval at its beginning or ending.
However this does nothing on the given interval order that should be
submitted accordingly.
:param list intervals: a list of tuples (beginning datetime, ending datetime)
:param int/float hours: number of hours to schedule. It will be converted
into a timedelta, but should be submitted as an
int or float.
:param boolean remove_at_end: remove extra hours at the end of the last
matching interval. Otherwise, do it at the
beginning.
:return list results: a list of intervals. If the number of hours to schedule
is greater than the possible scheduling in the intervals, no extra-scheduling
is done, and results == intervals. """
results = []
res = datetime.timedelta()
limit = datetime.timedelta(hours=hour)
for interval in intervals:
res += interval[1] - interval[0]
if res > limit and remove_at_end:
interval = (interval[0], interval[1] + relativedelta(seconds=seconds(limit-res)))
elif res > limit:
interval = (interval[0] + relativedelta(seconds=seconds(res-limit)), interval[1])
results.append(interval)
if res > limit:
break
return results
# --------------------------------------------------
# Date and hours computation
# --------------------------------------------------
def get_attendances_for_weekdays(self, cr, uid, id, weekdays, context=None):
""" Given a list of weekdays, return matching resource.calendar.attendance"""
calendar = self.browse(cr, uid, id, context=None)
return [att for att in calendar.attendance_ids if int(att.dayofweek) in weekdays]
def get_weekdays(self, cr, uid, id, default_weekdays=None, context=None):
""" Return the list of weekdays that contain at least one working interval.
If no id is given (no calendar), return default weekdays. """
if id is None:
return default_weekdays if default_weekdays is not None else [0, 1, 2, 3, 4]
calendar = self.browse(cr, uid, id, context=None)
weekdays = set()
for attendance in calendar.attendance_ids:
weekdays.add(int(attendance.dayofweek))
return list(weekdays)
def get_next_day(self, cr, uid, id, day_date, context=None):
""" Get following date of day_date, based on resource.calendar. If no
calendar is provided, just return the next day.
:param int id: id of a resource.calendar. If not given, simply add one day
to the submitted date.
:param date day_date: current day as a date
:return date: next day of calendar, or just next day """
if not id:
return day_date + relativedelta(days=1)
weekdays = self.get_weekdays(cr, uid, id, context)
base_index = -1
for weekday in weekdays:
if weekday > day_date.weekday():
break
base_index += 1
new_index = (base_index + 1) % len(weekdays)
days = (weekdays[new_index] - day_date.weekday())
if days < 0:
days = 7 + days
return day_date + relativedelta(days=days)
def get_previous_day(self, cr, uid, id, day_date, context=None):
""" Get previous date of day_date, based on resource.calendar. If no
calendar is provided, just return the previous day.
:param int id: id of a resource.calendar. If not given, simply remove
one day from the submitted date.
:param date day_date: current day as a date
:return date: previous day of calendar, or just previous day """
if not id:
return day_date + relativedelta(days=-1)
weekdays = self.get_weekdays(cr, uid, id, context)
weekdays.reverse()
base_index = -1
for weekday in weekdays:
if weekday < day_date.weekday():
break
base_index += 1
new_index = (base_index + 1) % len(weekdays)
days = (weekdays[new_index] - day_date.weekday())
if days > 0:
days = days - 7
return day_date + relativedelta(days=days)
def get_leave_intervals(self, cr, uid, id, resource_id=None,
start_datetime=None, end_datetime=None,
context=None):
"""Get the leaves of the calendar. Leaves can be filtered on the resource,
the start datetime or the end datetime.
:param int resource_id: the id of the resource to take into account when
computing the leaves. If not set, only general
leaves are computed. If set, generic and
specific leaves are computed.
:param datetime start_datetime: if provided, do not take into account leaves
ending before this date.
:param datetime end_datetime: if provided, do not take into account leaves
beginning after this date.
:return list leaves: list of tuples (start_datetime, end_datetime) of
leave intervals
"""
resource_calendar = self.browse(cr, uid, id, context=context)
leaves = []
for leave in resource_calendar.leave_ids:
if leave.resource_id and not resource_id == leave.resource_id.id:
continue
date_from = datetime.datetime.strptime(leave.date_from, tools.DEFAULT_SERVER_DATETIME_FORMAT)
if end_datetime and date_from > end_datetime:
continue
date_to = datetime.datetime.strptime(leave.date_to, tools.DEFAULT_SERVER_DATETIME_FORMAT)
if start_datetime and date_to < start_datetime:
continue
leaves.append((date_from, date_to))
return leaves
def get_working_intervals_of_day(self, cr, uid, id, start_dt=None, end_dt=None,
leaves=None, compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Get the working intervals of the day based on calendar. This method
handle leaves that come directly from the leaves parameter or can be computed.
:param int id: resource.calendar id; take the first one if is a list
:param datetime start_dt: datetime object that is the beginning hours
for the working intervals computation; any
working interval beginning before start_dt
will be truncated. If not set, set to end_dt
or today() if no end_dt at 00.00.00.
:param datetime end_dt: datetime object that is the ending hour
for the working intervals computation; any
working interval ending after end_dt
will be truncated. If not set, set to start_dt()
at 23.59.59.
:param list leaves: a list of tuples(start_datetime, end_datetime) that
represent leaves.
:param boolean compute_leaves: if set and if leaves is None, compute the
leaves based on calendar and resource.
If leaves is None and compute_leaves false
no leaves are taken into account.
:param int resource_id: the id of the resource to take into account when
computing the leaves. If not set, only general
leaves are computed. If set, generic and
specific leaves are computed.
:param tuple default_interval: if no id, try to return a default working
day using default_interval[0] as beginning
hour, and default_interval[1] as ending hour.
Example: default_interval = (8, 16).
Otherwise, a void list of working intervals
is returned when id is None.
:return list intervals: a list of tuples (start_datetime, end_datetime)
of work intervals """
if isinstance(id, (list, tuple)):
id = id[0]
# Computes start_dt, end_dt (with default values if not set) + off-interval work limits
work_limits = []
if start_dt is None and end_dt is not None:
start_dt = end_dt.replace(hour=0, minute=0, second=0)
elif start_dt is None:
start_dt = datetime.datetime.now().replace(hour=0, minute=0, second=0)
else:
work_limits.append((start_dt.replace(hour=0, minute=0, second=0), start_dt))
if end_dt is None:
end_dt = start_dt.replace(hour=23, minute=59, second=59)
else:
work_limits.append((end_dt, end_dt.replace(hour=23, minute=59, second=59)))
assert start_dt.date() == end_dt.date(), 'get_working_intervals_of_day is restricted to one day'
intervals = []
work_dt = start_dt.replace(hour=0, minute=0, second=0)
# no calendar: try to use the default_interval, then return directly
if id is None:
if default_interval:
working_interval = (start_dt.replace(hour=default_interval[0], minute=0, second=0), start_dt.replace(hour=default_interval[1], minute=0, second=0))
intervals = self.interval_remove_leaves(working_interval, work_limits)
return intervals
working_intervals = []
for calendar_working_day in self.get_attendances_for_weekdays(cr, uid, id, [start_dt.weekday()], context):
working_interval = (
work_dt.replace(hour=int(calendar_working_day.hour_from)),
work_dt.replace(hour=int(calendar_working_day.hour_to))
)
working_intervals += self.interval_remove_leaves(working_interval, work_limits)
# find leave intervals
if leaves is None and compute_leaves:
leaves = self.get_leave_intervals(cr, uid, id, resource_id=resource_id, context=None)
# filter according to leaves
for interval in working_intervals:
work_intervals = self.interval_remove_leaves(interval, leaves)
intervals += work_intervals
return intervals
def get_working_hours_of_date(self, cr, uid, id, start_dt=None, end_dt=None,
leaves=None, compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Get the working hours of the day based on calendar. This method uses
get_working_intervals_of_day to have the work intervals of the day. It
then calculates the number of hours contained in those intervals. """
res = datetime.timedelta()
intervals = self.get_working_intervals_of_day(
cr, uid, id,
start_dt, end_dt, leaves,
compute_leaves, resource_id,
default_interval, context)
for interval in intervals:
res += interval[1] - interval[0]
return seconds(res) / 3600.0
def get_working_hours(self, cr, uid, id, start_dt, end_dt, compute_leaves=False,
resource_id=None, default_interval=None, context=None):
hours = 0.0
for day in rrule.rrule(rrule.DAILY, dtstart=start_dt,
until=(end_dt + datetime.timedelta(days=1)).replace(hour=0, minute=0, second=0),
byweekday=self.get_weekdays(cr, uid, id, context=context)):
day_start_dt = day.replace(hour=0, minute=0, second=0)
if start_dt and day.date() == start_dt.date():
day_start_dt = start_dt
day_end_dt = day.replace(hour=23, minute=59, second=59)
if end_dt and day.date() == end_dt.date():
day_end_dt = end_dt
hours += self.get_working_hours_of_date(
cr, uid, id, start_dt=day_start_dt, end_dt=day_end_dt,
compute_leaves=compute_leaves, resource_id=resource_id,
default_interval=default_interval,
context=context)
return hours
# --------------------------------------------------
# Hours scheduling
# --------------------------------------------------
def _schedule_hours(self, cr, uid, id, hours, day_dt=None,
compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Schedule hours of work, using a calendar and an optional resource to
compute working and leave days. This method can be used backwards, i.e.
scheduling days before a deadline.
:param int hours: number of hours to schedule. Use a negative number to
compute a backwards scheduling.
:param datetime day_dt: reference date to compute working days. If days is
> 0 date is the starting date. If days is < 0
date is the ending date.
:param boolean compute_leaves: if set, compute the leaves based on calendar
and resource. Otherwise no leaves are taken
into account.
:param int resource_id: the id of the resource to take into account when
computing the leaves. If not set, only general
leaves are computed. If set, generic and
specific leaves are computed.
:param tuple default_interval: if no id, try to return a default working
day using default_interval[0] as beginning
hour, and default_interval[1] as ending hour.
Example: default_interval = (8, 16).
Otherwise, a void list of working intervals
is returned when id is None.
:return tuple (datetime, intervals): datetime is the beginning/ending date
of the schedulign; intervals are the
working intervals of the scheduling.
Note: Why not using rrule.rrule ? Because rrule does not seem to allow
getting back in time.
"""
if day_dt is None:
day_dt = datetime.datetime.now()
backwards = (hours < 0)
hours = abs(hours)
intervals = []
remaining_hours = hours * 1.0
iterations = 0
current_datetime = day_dt
call_args = dict(compute_leaves=compute_leaves, resource_id=resource_id, default_interval=default_interval, context=context)
while float_compare(remaining_hours, 0.0, precision_digits=2) in (1, 0) and iterations < 1000:
if backwards:
call_args['end_dt'] = current_datetime
else:
call_args['start_dt'] = current_datetime
working_intervals = self.get_working_intervals_of_day(cr, uid, id, **call_args)
if id is None and not working_intervals: # no calendar -> consider working 8 hours
remaining_hours -= 8.0
elif working_intervals:
if backwards:
working_intervals.reverse()
new_working_intervals = self.interval_schedule_hours(working_intervals, remaining_hours, not backwards)
if backwards:
new_working_intervals.reverse()
res = datetime.timedelta()
for interval in working_intervals:
res += interval[1] - interval[0]
remaining_hours -= (seconds(res) / 3600.0)
if backwards:
intervals = new_working_intervals + intervals
else:
intervals = intervals + new_working_intervals
# get next day
if backwards:
current_datetime = datetime.datetime.combine(self.get_previous_day(cr, uid, id, current_datetime, context), datetime.time(23, 59, 59))
else:
current_datetime = datetime.datetime.combine(self.get_next_day(cr, uid, id, current_datetime, context), datetime.time())
# avoid infinite loops
iterations += 1
return intervals
def schedule_hours_get_date(self, cr, uid, id, hours, day_dt=None,
compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Wrapper on _schedule_hours: return the beginning/ending datetime of
an hours scheduling. """
res = self._schedule_hours(cr, uid, id, hours, day_dt, compute_leaves, resource_id, default_interval, context)
return res and res[0][0] or False
def schedule_hours(self, cr, uid, id, hours, day_dt=None,
compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Wrapper on _schedule_hours: return the working intervals of an hours
scheduling. """
return self._schedule_hours(cr, uid, id, hours, day_dt, compute_leaves, resource_id, default_interval, context)
# --------------------------------------------------
# Days scheduling
# --------------------------------------------------
def _schedule_days(self, cr, uid, id, days, day_date=None, compute_leaves=False,
resource_id=None, default_interval=None, context=None):
"""Schedule days of work, using a calendar and an optional resource to
compute working and leave days. This method can be used backwards, i.e.
scheduling days before a deadline.
:param int days: number of days to schedule. Use a negative number to
compute a backwards scheduling.
:param date day_date: reference date to compute working days. If days is > 0
date is the starting date. If days is < 0 date is the
ending date.
:param boolean compute_leaves: if set, compute the leaves based on calendar
and resource. Otherwise no leaves are taken
into account.
:param int resource_id: the id of the resource to take into account when
computing the leaves. If not set, only general
leaves are computed. If set, generic and
specific leaves are computed.
:param tuple default_interval: if no id, try to return a default working
day using default_interval[0] as beginning
hour, and default_interval[1] as ending hour.
Example: default_interval = (8, 16).
Otherwise, a void list of working intervals
is returned when id is None.
:return tuple (datetime, intervals): datetime is the beginning/ending date
of the schedulign; intervals are the
working intervals of the scheduling.
Implementation note: rrule.rrule is not used because rrule it des not seem
to allow getting back in time.
"""
if day_date is None:
day_date = datetime.datetime.now()
backwards = (days < 0)
days = abs(days)
intervals = []
planned_days = 0
iterations = 0
if backwards:
current_datetime = day_date.replace(hour=23, minute=59, second=59)
else:
current_datetime = day_date.replace(hour=0, minute=0, second=0)
while planned_days < days and iterations < 1000:
working_intervals = self.get_working_intervals_of_day(
cr, uid, id, current_datetime,
compute_leaves=compute_leaves, resource_id=resource_id,
default_interval=default_interval,
context=context)
if id is None or working_intervals: # no calendar -> no working hours, but day is considered as worked
planned_days += 1
intervals += working_intervals
# get next day
if backwards:
current_datetime = self.get_previous_day(cr, uid, id, current_datetime, context)
else:
current_datetime = self.get_next_day(cr, uid, id, current_datetime, context)
# avoid infinite loops
iterations += 1
return intervals
def schedule_days_get_date(self, cr, uid, id, days, day_date=None, compute_leaves=False,
resource_id=None, default_interval=None, context=None):
""" Wrapper on _schedule_days: return the beginning/ending datetime of
a days scheduling. """
res = self._schedule_days(cr, uid, id, days, day_date, compute_leaves, resource_id, default_interval, context)
return res and res[-1][1] or False
def schedule_days(self, cr, uid, id, days, day_date=None, compute_leaves=False,
resource_id=None, default_interval=None, context=None):
""" Wrapper on _schedule_days: return the working intervals of a days
scheduling. """
return self._schedule_days(cr, uid, id, days, day_date, compute_leaves, resource_id, default_interval, context)
# --------------------------------------------------
# Compatibility / to clean / to remove
# --------------------------------------------------
def working_hours_on_day(self, cr, uid, resource_calendar_id, day, context=None):
""" Used in hr_payroll/hr_payroll.py
:deprecated: OpenERP saas-3. Use get_working_hours_of_date instead. Note:
since saas-3, take hour/minutes into account, not just the whole day."""
if isinstance(day, datetime.datetime):
day = day.replace(hour=0, minute=0)
return self.get_working_hours_of_date(cr, uid, resource_calendar_id.id, start_dt=day, context=None)
def interval_min_get(self, cr, uid, id, dt_from, hours, resource=False):
""" Schedule hours backwards. Used in mrp_operations/mrp_operations.py.
:deprecated: OpenERP saas-3. Use schedule_hours instead. Note: since
saas-3, counts leave hours instead of all-day leaves."""
return self.schedule_hours(
cr, uid, id, hours * -1.0,
day_dt=dt_from.replace(minute=0, second=0),
compute_leaves=True, resource_id=resource,
default_interval=(8, 16)
)
def interval_get_multi(self, cr, uid, date_and_hours_by_cal, resource=False, byday=True):
""" Used in mrp_operations/mrp_operations.py (default parameters) and in
interval_get()
:deprecated: OpenERP saas-3. Use schedule_hours instead. Note:
Byday was not used. Since saas-3, counts Leave hours instead of all-day leaves."""
res = {}
for dt_str, hours, calendar_id in date_and_hours_by_cal:
result = self.schedule_hours(
cr, uid, calendar_id, hours,
day_dt=datetime.datetime.strptime(dt_str, '%Y-%m-%d %H:%M:%S').replace(minute=0, second=0),
compute_leaves=True, resource_id=resource,
default_interval=(8, 16)
)
res[(dt_str, hours, calendar_id)] = result
return res
def interval_get(self, cr, uid, id, dt_from, hours, resource=False, byday=True):
""" Unifier of interval_get_multi. Used in: mrp_operations/mrp_operations.py,
crm/crm_lead.py (res given).
:deprecated: OpenERP saas-3. Use get_working_hours instead."""
res = self.interval_get_multi(
cr, uid, [(dt_from.strftime('%Y-%m-%d %H:%M:%S'), hours, id)], resource, byday)[(dt_from.strftime('%Y-%m-%d %H:%M:%S'), hours, id)]
return res
def interval_hours_get(self, cr, uid, id, dt_from, dt_to, resource=False):
""" Unused wrapper.
:deprecated: OpenERP saas-3. Use get_working_hours instead."""
return self._interval_hours_get(cr, uid, id, dt_from, dt_to, resource_id=resource)
def _interval_hours_get(self, cr, uid, id, dt_from, dt_to, resource_id=False, timezone_from_uid=None, exclude_leaves=True, context=None):
""" Computes working hours between two dates, taking always same hour/minuts.
:deprecated: OpenERP saas-3. Use get_working_hours instead. Note: since saas-3,
now resets hour/minuts. Now counts leave hours instead of all-day leaves."""
return self.get_working_hours(
cr, uid, id, dt_from, dt_to,
compute_leaves=(not exclude_leaves), resource_id=resource_id,
default_interval=(8, 16), context=context)
class resource_calendar_attendance(osv.osv):
_name = "resource.calendar.attendance"
_description = "Work Detail"
_columns = {
'name' : fields.char("Name", required=True),
'dayofweek': fields.selection([('0','Monday'),('1','Tuesday'),('2','Wednesday'),('3','Thursday'),('4','Friday'),('5','Saturday'),('6','Sunday')], 'Day of Week', required=True, select=True),
'date_from' : fields.date('Starting Date'),
'hour_from' : fields.float('Work from', required=True, help="Start and End time of working.", select=True),
'hour_to' : fields.float("Work to", required=True),
'calendar_id' : fields.many2one("resource.calendar", "Resource's Calendar", required=True),
}
_order = 'dayofweek, hour_from'
_defaults = {
'dayofweek' : '0'
}
def hours_time_string(hours):
""" convert a number of hours (float) into a string with format '%H:%M' """
minutes = int(round(hours * 60))
return "%02d:%02d" % divmod(minutes, 60)
class resource_resource(osv.osv):
_name = "resource.resource"
_description = "Resource Detail"
_columns = {
'name': fields.char("Name", required=True),
'code': fields.char('Code', size=16, copy=False),
'active' : fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the resource record without removing it."),
'company_id' : fields.many2one('res.company', 'Company'),
'resource_type': fields.selection([('user','Human'),('material','Material')], 'Resource Type', required=True),
'user_id' : fields.many2one('res.users', 'User', help='Related user name for the resource to manage its access.'),
'time_efficiency' : fields.float('Efficiency Factor', size=8, required=True, help="This field depict the efficiency of the resource to complete tasks. e.g resource put alone on a phase of 5 days with 5 tasks assigned to him, will show a load of 100% for this phase by default, but if we put a efficiency of 200%, then his load will only be 50%."),
'calendar_id' : fields.many2one("resource.calendar", "Working Time", help="Define the schedule of resource"),
}
_defaults = {
'resource_type' : 'user',
'time_efficiency' : 1,
'active' : True,
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'resource.resource', context=context)
}
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
if not default.get('name', False):
default.update(name=_('%s (copy)') % (self.browse(cr, uid, id, context=context).name))
return super(resource_resource, self).copy(cr, uid, id, default, context)
def generate_resources(self, cr, uid, user_ids, calendar_id, context=None):
"""
Return a list of Resource Class objects for the resources allocated to the phase.
NOTE: Used in project/project.py
"""
resource_objs = {}
user_pool = self.pool.get('res.users')
for user in user_pool.browse(cr, uid, user_ids, context=context):
resource_objs[user.id] = {
'name' : user.name,
'vacation': [],
'efficiency': 1.0,
}
resource_ids = self.search(cr, uid, [('user_id', '=', user.id)], context=context)
if resource_ids:
for resource in self.browse(cr, uid, resource_ids, context=context):
resource_objs[user.id]['efficiency'] = resource.time_efficiency
resource_cal = resource.calendar_id.id
if resource_cal:
leaves = self.compute_vacation(cr, uid, calendar_id, resource.id, resource_cal, context=context)
resource_objs[user.id]['vacation'] += list(leaves)
return resource_objs
def compute_vacation(self, cr, uid, calendar_id, resource_id=False, resource_calendar=False, context=None):
"""
Compute the vacation from the working calendar of the resource.
@param calendar_id : working calendar of the project
@param resource_id : resource working on phase/task
@param resource_calendar : working calendar of the resource
NOTE: used in project/project.py, and in generate_resources
"""
resource_calendar_leaves_pool = self.pool.get('resource.calendar.leaves')
leave_list = []
if resource_id:
leave_ids = resource_calendar_leaves_pool.search(cr, uid, ['|', ('calendar_id', '=', calendar_id),
('calendar_id', '=', resource_calendar),
('resource_id', '=', resource_id)
], context=context)
else:
leave_ids = resource_calendar_leaves_pool.search(cr, uid, [('calendar_id', '=', calendar_id),
('resource_id', '=', False)
], context=context)
leaves = resource_calendar_leaves_pool.read(cr, uid, leave_ids, ['date_from', 'date_to'], context=context)
for i in range(len(leaves)):
dt_start = datetime.datetime.strptime(leaves[i]['date_from'], '%Y-%m-%d %H:%M:%S')
dt_end = datetime.datetime.strptime(leaves[i]['date_to'], '%Y-%m-%d %H:%M:%S')
no = dt_end - dt_start
[leave_list.append((dt_start + datetime.timedelta(days=x)).strftime('%Y-%m-%d')) for x in range(int(no.days + 1))]
leave_list.sort()
return leave_list
def compute_working_calendar(self, cr, uid, calendar_id=False, context=None):
"""
Change the format of working calendar from 'Openerp' format to bring it into 'Faces' format.
@param calendar_id : working calendar of the project
NOTE: used in project/project.py
"""
if not calendar_id:
# Calendar is not specified: working days: 24/7
return [('fri', '8:0-12:0','13:0-17:0'), ('thu', '8:0-12:0','13:0-17:0'), ('wed', '8:0-12:0','13:0-17:0'),
('mon', '8:0-12:0','13:0-17:0'), ('tue', '8:0-12:0','13:0-17:0')]
resource_attendance_pool = self.pool.get('resource.calendar.attendance')
time_range = "8:00-8:00"
non_working = ""
week_days = {"0": "mon", "1": "tue", "2": "wed","3": "thu", "4": "fri", "5": "sat", "6": "sun"}
wk_days = {}
wk_time = {}
wktime_list = []
wktime_cal = []
week_ids = resource_attendance_pool.search(cr, uid, [('calendar_id', '=', calendar_id)], context=context)
weeks = resource_attendance_pool.read(cr, uid, week_ids, ['dayofweek', 'hour_from', 'hour_to'], context=context)
# Convert time formats into appropriate format required
# and create a list like [('mon', '8:00-12:00'), ('mon', '13:00-18:00')]
for week in weeks:
res_str = ""
day = None
if week_days.get(week['dayofweek'],False):
day = week_days[week['dayofweek']]
wk_days[week['dayofweek']] = week_days[week['dayofweek']]
else:
raise osv.except_osv(_('Configuration Error!'),_('Make sure the Working time has been configured with proper week days!'))
hour_from_str = hours_time_string(week['hour_from'])
hour_to_str = hours_time_string(week['hour_to'])
res_str = hour_from_str + '-' + hour_to_str
wktime_list.append((day, res_str))
# Convert into format like [('mon', '8:00-12:00', '13:00-18:00')]
for item in wktime_list:
if wk_time.has_key(item[0]):
wk_time[item[0]].append(item[1])
else:
wk_time[item[0]] = [item[0]]
wk_time[item[0]].append(item[1])
for k,v in wk_time.items():
wktime_cal.append(tuple(v))
# Add for the non-working days like: [('sat, sun', '8:00-8:00')]
for k, v in wk_days.items():
if week_days.has_key(k):
week_days.pop(k)
for v in week_days.itervalues():
non_working += v + ','
if non_working:
wktime_cal.append((non_working[:-1], time_range))
return wktime_cal
class resource_calendar_leaves(osv.osv):
_name = "resource.calendar.leaves"
_description = "Leave Detail"
_columns = {
'name' : fields.char("Name"),
'company_id' : fields.related('calendar_id','company_id',type='many2one',relation='res.company',string="Company", store=True, readonly=True),
'calendar_id' : fields.many2one("resource.calendar", "Working Time"),
'date_from' : fields.datetime('Start Date', required=True),
'date_to' : fields.datetime('End Date', required=True),
'resource_id' : fields.many2one("resource.resource", "Resource", help="If empty, this is a generic holiday for the company. If a resource is set, the holiday/leave is only for this resource"),
}
def check_dates(self, cr, uid, ids, context=None):
for leave in self.browse(cr, uid, ids, context=context):
if leave.date_from and leave.date_to and leave.date_from > leave.date_to:
return False
return True
_constraints = [
(check_dates, 'Error! leave start-date must be lower then leave end-date.', ['date_from', 'date_to'])
]
def onchange_resource(self, cr, uid, ids, resource, context=None):
result = {}
if resource:
resource_pool = self.pool.get('resource.resource')
result['calendar_id'] = resource_pool.browse(cr, uid, resource, context=context).calendar_id.id
return {'value': result}
return {'value': {'calendar_id': []}}
def seconds(td):
assert isinstance(td, datetime.timedelta)
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10.**6
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
YuriyLisovskiy/messenger | refs/heads/master | test/unittest/__init__.py | 12133432 | |
yaroslavprogrammer/django | refs/heads/master | tests/modeladmin/__init__.py | 12133432 | |
donce/django-cms | refs/heads/develop | cms/admin/dialog/__init__.py | 12133432 | |
SnippyHolloW/contextual_word_segmentation | refs/heads/master | src/plot_topics_distribs.py | 2 | import sys
import pylab as pl
LDAMODEL = 'provi_reseg_lemmatized_tfidf.ldamodel'
usage = """python src/plot_topics_distrib.py < all_1min_doc_topics_reseg_lemmatized.txt"""
topics_distrib = {}
for line in sys.stdin:
tuples = line.split('[')[1].split(')')
tuples = tuples[:-1]
for t in tuples:
tmp = t.strip(' (,').split(',')
tid = int(tmp[0])
topics_distrib[tid] = topics_distrib.get(tid, 0.0) + float(tmp[1].strip(' '))
import gensim
import cPickle
with open(LDAMODEL) as f:
#with open(sys.argv[1]) as f:
lda = cPickle.load(f)
y = ['\n\n'.join(filter(lambda x: 'NN' in x, lda.print_topic(i).split('+')[:20])) for i in topics_distrib.iterkeys()]
#for topic, value in topics_distrib.iteritems():
pl.rcParams['lines.linewidth'] = 2
pl.rcParams['font.family'] = 'sans-serif'
pl.rcParams['font.size'] = 16
pl.pie([x for x in topics_distrib.itervalues()], labels=y, shadow=True, explode=[0.05 for i in xrange(len(topics_distrib))])
pl.show()
|
tarzan0820/odoo | refs/heads/8.0 | addons/payment_buckaroo/models/buckaroo.py | 209 | # -*- coding: utf-'8' "-*-"
from hashlib import sha1
import logging
import urlparse
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment_buckaroo.controllers.main import BuckarooController
from openerp.osv import osv, fields
from openerp.tools.float_utils import float_compare
_logger = logging.getLogger(__name__)
class AcquirerBuckaroo(osv.Model):
_inherit = 'payment.acquirer'
def _get_buckaroo_urls(self, cr, uid, environment, context=None):
""" Buckaroo URLs
"""
if environment == 'prod':
return {
'buckaroo_form_url': 'https://checkout.buckaroo.nl/html/',
}
else:
return {
'buckaroo_form_url': 'https://testcheckout.buckaroo.nl/html/',
}
def _get_providers(self, cr, uid, context=None):
providers = super(AcquirerBuckaroo, self)._get_providers(cr, uid, context=context)
providers.append(['buckaroo', 'Buckaroo'])
return providers
_columns = {
'brq_websitekey': fields.char('WebsiteKey', required_if_provider='buckaroo'),
'brq_secretkey': fields.char('SecretKey', required_if_provider='buckaroo'),
}
def _buckaroo_generate_digital_sign(self, acquirer, inout, values):
""" Generate the shasign for incoming or outgoing communications.
:param browse acquirer: the payment.acquirer browse record. It should
have a shakey in shaky out
:param string inout: 'in' (openerp contacting buckaroo) or 'out' (buckaroo
contacting openerp).
:param dict values: transaction values
:return string: shasign
"""
assert inout in ('in', 'out')
assert acquirer.provider == 'buckaroo'
keys = "add_returndata Brq_amount Brq_culture Brq_currency Brq_invoicenumber Brq_return Brq_returncancel Brq_returnerror Brq_returnreject brq_test Brq_websitekey".split()
def get_value(key):
if values.get(key):
return values[key]
return ''
values = dict(values or {})
if inout == 'out':
if 'BRQ_SIGNATURE' in values:
del values['BRQ_SIGNATURE']
items = sorted((k.upper(), v) for k, v in values.items())
sign = ''.join('%s=%s' % (k, v) for k, v in items)
else:
sign = ''.join('%s=%s' % (k,get_value(k)) for k in keys)
#Add the pre-shared secret key at the end of the signature
sign = sign + acquirer.brq_secretkey
if isinstance(sign, str):
sign = urlparse.parse_qsl(sign)
shasign = sha1(sign).hexdigest()
return shasign
def buckaroo_form_generate_values(self, cr, uid, id, partner_values, tx_values, context=None):
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
acquirer = self.browse(cr, uid, id, context=context)
buckaroo_tx_values = dict(tx_values)
buckaroo_tx_values.update({
'Brq_websitekey': acquirer.brq_websitekey,
'Brq_amount': tx_values['amount'],
'Brq_currency': tx_values['currency'] and tx_values['currency'].name or '',
'Brq_invoicenumber': tx_values['reference'],
'brq_test': False if acquirer.environment == 'prod' else True,
'Brq_return': '%s' % urlparse.urljoin(base_url, BuckarooController._return_url),
'Brq_returncancel': '%s' % urlparse.urljoin(base_url, BuckarooController._cancel_url),
'Brq_returnerror': '%s' % urlparse.urljoin(base_url, BuckarooController._exception_url),
'Brq_returnreject': '%s' % urlparse.urljoin(base_url, BuckarooController._reject_url),
'Brq_culture': (partner_values.get('lang') or 'en_US').replace('_', '-'),
})
if buckaroo_tx_values.get('return_url'):
buckaroo_tx_values['add_returndata'] = {'return_url': '%s' % buckaroo_tx_values.pop('return_url')}
else:
buckaroo_tx_values['add_returndata'] = ''
buckaroo_tx_values['Brq_signature'] = self._buckaroo_generate_digital_sign(acquirer, 'in', buckaroo_tx_values)
return partner_values, buckaroo_tx_values
def buckaroo_get_form_action_url(self, cr, uid, id, context=None):
acquirer = self.browse(cr, uid, id, context=context)
return self._get_buckaroo_urls(cr, uid, acquirer.environment, context=context)['buckaroo_form_url']
class TxBuckaroo(osv.Model):
_inherit = 'payment.transaction'
# buckaroo status
_buckaroo_valid_tx_status = [190]
_buckaroo_pending_tx_status = [790, 791, 792, 793]
_buckaroo_cancel_tx_status = [890, 891]
_buckaroo_error_tx_status = [490, 491, 492]
_buckaroo_reject_tx_status = [690]
_columns = {
'buckaroo_txnid': fields.char('Transaction ID'),
}
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
def _buckaroo_form_get_tx_from_data(self, cr, uid, data, context=None):
""" Given a data dict coming from buckaroo, verify it and find the related
transaction record. """
reference, pay_id, shasign = data.get('BRQ_INVOICENUMBER'), data.get('BRQ_PAYMENT'), data.get('BRQ_SIGNATURE')
if not reference or not pay_id or not shasign:
error_msg = 'Buckaroo: received data with missing reference (%s) or pay_id (%s) or shashign (%s)' % (reference, pay_id, shasign)
_logger.error(error_msg)
raise ValidationError(error_msg)
tx_ids = self.search(cr, uid, [('reference', '=', reference)], context=context)
if not tx_ids or len(tx_ids) > 1:
error_msg = 'Buckaroo: received data for reference %s' % (reference)
if not tx_ids:
error_msg += '; no order found'
else:
error_msg += '; multiple order found'
_logger.error(error_msg)
raise ValidationError(error_msg)
tx = self.pool['payment.transaction'].browse(cr, uid, tx_ids[0], context=context)
#verify shasign
shasign_check = self.pool['payment.acquirer']._buckaroo_generate_digital_sign(tx.acquirer_id, 'out' ,data)
if shasign_check.upper() != shasign.upper():
error_msg = 'Buckaroo: invalid shasign, received %s, computed %s, for data %s' % (shasign, shasign_check, data)
_logger.error(error_msg)
raise ValidationError(error_msg)
return tx
def _buckaroo_form_get_invalid_parameters(self, cr, uid, tx, data, context=None):
invalid_parameters = []
if tx.acquirer_reference and data.get('BRQ_TRANSACTIONS') != tx.acquirer_reference:
invalid_parameters.append(('Transaction Id', data.get('BRQ_TRANSACTIONS'), tx.acquirer_reference))
# check what is buyed
if float_compare(float(data.get('BRQ_AMOUNT', '0.0')), tx.amount, 2) != 0:
invalid_parameters.append(('Amount', data.get('BRQ_AMOUNT'), '%.2f' % tx.amount))
if data.get('BRQ_CURRENCY') != tx.currency_id.name:
invalid_parameters.append(('Currency', data.get('BRQ_CURRENCY'), tx.currency_id.name))
return invalid_parameters
def _buckaroo_form_validate(self, cr, uid, tx, data, context=None):
status_code = int(data.get('BRQ_STATUSCODE','0'))
if status_code in self._buckaroo_valid_tx_status:
tx.write({
'state': 'done',
'buckaroo_txnid': data.get('BRQ_TRANSACTIONS'),
})
return True
elif status_code in self._buckaroo_pending_tx_status:
tx.write({
'state': 'pending',
'buckaroo_txnid': data.get('BRQ_TRANSACTIONS'),
})
return True
elif status_code in self._buckaroo_cancel_tx_status:
tx.write({
'state': 'cancel',
'buckaroo_txnid': data.get('BRQ_TRANSACTIONS'),
})
return True
else:
error = 'Buckaroo: feedback error'
_logger.info(error)
tx.write({
'state': 'error',
'state_message': error,
'buckaroo_txnid': data.get('BRQ_TRANSACTIONS'),
})
return False
|
NeCTAR-RC/neutron | refs/heads/master | neutron/tests/tempest/test.py | 17 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import atexit
import functools
import json
import os
import re
import sys
import time
import urllib
import uuid
import fixtures
from oslo_log import log as logging
from oslo_utils import importutils
import six
import testscenarios
import testtools
from neutron.tests.api import clients
from neutron.tests.tempest.common import credentials
import neutron.tests.tempest.common.generator.valid_generator as valid
from neutron.tests.tempest import config
from neutron.tests.tempest import exceptions
LOG = logging.getLogger(__name__)
CONF = config.CONF
def attr(*args, **kwargs):
"""A decorator which applies the testtools attr decorator
This decorator applies the testtools.testcase.attr if it is in the list of
attributes to testtools we want to apply.
"""
def decorator(f):
if 'type' in kwargs and isinstance(kwargs['type'], str):
f = testtools.testcase.attr(kwargs['type'])(f)
if kwargs['type'] == 'smoke':
f = testtools.testcase.attr('gate')(f)
elif 'type' in kwargs and isinstance(kwargs['type'], list):
for attr in kwargs['type']:
f = testtools.testcase.attr(attr)(f)
if attr == 'smoke':
f = testtools.testcase.attr('gate')(f)
return f
return decorator
def idempotent_id(id):
"""Stub for metadata decorator"""
if not isinstance(id, six.string_types):
raise TypeError('Test idempotent_id must be string not %s'
'' % type(id).__name__)
uuid.UUID(id)
def decorator(f):
f = testtools.testcase.attr('id-%s' % id)(f)
if f.__doc__:
f.__doc__ = 'Test idempotent id: %s\n%s' % (id, f.__doc__)
else:
f.__doc__ = 'Test idempotent id: %s' % id
return f
return decorator
def get_service_list():
service_list = {
'compute': CONF.service_available.nova,
'image': CONF.service_available.glance,
'baremetal': CONF.service_available.ironic,
'volume': CONF.service_available.cinder,
'orchestration': CONF.service_available.heat,
# NOTE(mtreinish) nova-network will provide networking functionality
# if neutron isn't available, so always set to True.
'network': True,
'identity': True,
'object_storage': CONF.service_available.swift,
'dashboard': CONF.service_available.horizon,
'telemetry': CONF.service_available.ceilometer,
'data_processing': CONF.service_available.sahara,
'database': CONF.service_available.trove
}
return service_list
def services(*args, **kwargs):
"""A decorator used to set an attr for each service used in a test case
This decorator applies a testtools attr for each service that gets
exercised by a test case.
"""
def decorator(f):
services = ['compute', 'image', 'baremetal', 'volume', 'orchestration',
'network', 'identity', 'object_storage', 'dashboard',
'telemetry', 'data_processing', 'database']
for service in args:
if service not in services:
raise exceptions.InvalidServiceTag('%s is not a valid '
'service' % service)
attr(type=list(args))(f)
@functools.wraps(f)
def wrapper(self, *func_args, **func_kwargs):
service_list = get_service_list()
for service in args:
if not service_list[service]:
msg = 'Skipped because the %s service is not available' % (
service)
raise testtools.TestCase.skipException(msg)
return f(self, *func_args, **func_kwargs)
return wrapper
return decorator
def stresstest(*args, **kwargs):
"""Add stress test decorator
For all functions with this decorator a attr stress will be
set automatically.
@param class_setup_per: allowed values are application, process, action
``application``: once in the stress job lifetime
``process``: once in the worker process lifetime
``action``: on each action
@param allow_inheritance: allows inheritance of this attribute
"""
def decorator(f):
if 'class_setup_per' in kwargs:
setattr(f, "st_class_setup_per", kwargs['class_setup_per'])
else:
setattr(f, "st_class_setup_per", 'process')
if 'allow_inheritance' in kwargs:
setattr(f, "st_allow_inheritance", kwargs['allow_inheritance'])
else:
setattr(f, "st_allow_inheritance", False)
attr(type='stress')(f)
return f
return decorator
def requires_ext(*args, **kwargs):
"""A decorator to skip tests if an extension is not enabled
@param extension
@param service
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*func_args, **func_kwargs):
if not is_extension_enabled(kwargs['extension'],
kwargs['service']):
msg = "Skipped because %s extension: %s is not enabled" % (
kwargs['service'], kwargs['extension'])
raise testtools.TestCase.skipException(msg)
return func(*func_args, **func_kwargs)
return wrapper
return decorator
def is_extension_enabled(extension_name, service):
"""A function that will check the list of enabled extensions from config
"""
config_dict = {
'compute': CONF.compute_feature_enabled.api_extensions,
'volume': CONF.volume_feature_enabled.api_extensions,
'network': CONF.network_feature_enabled.api_extensions,
'object': CONF.object_storage_feature_enabled.discoverable_apis,
}
if len(config_dict[service]) == 0:
return False
if config_dict[service][0] == 'all':
return True
if extension_name in config_dict[service]:
return True
return False
at_exit_set = set()
def validate_tearDownClass():
if at_exit_set:
LOG.error(
"tearDownClass does not call the super's "
"tearDownClass in these classes: \n"
+ str(at_exit_set))
atexit.register(validate_tearDownClass)
class BaseTestCase(testtools.testcase.WithAttributes,
testtools.TestCase):
"""The test base class defines Tempest framework for class level fixtures.
`setUpClass` and `tearDownClass` are defined here and cannot be overwritten
by subclasses (enforced via hacking rule T105).
Set-up is split in a series of steps (setup stages), which can be
overwritten by test classes. Set-up stages are:
- skip_checks
- setup_credentials
- setup_clients
- resource_setup
Tear-down is also split in a series of steps (teardown stages), which are
stacked for execution only if the corresponding setup stage had been
reached during the setup phase. Tear-down stages are:
- clear_isolated_creds (defined in the base test class)
- resource_cleanup
"""
setUpClassCalled = False
_service = None
network_resources = {}
# NOTE(sdague): log_format is defined inline here instead of using the oslo
# default because going through the config path recouples config to the
# stress tests too early, and depending on testr order will fail unit tests
log_format = ('%(asctime)s %(process)d %(levelname)-8s '
'[%(name)s] %(message)s')
@classmethod
def setUpClass(cls):
# It should never be overridden by descendants
if hasattr(super(BaseTestCase, cls), 'setUpClass'):
super(BaseTestCase, cls).setUpClass()
cls.setUpClassCalled = True
# Stack of (name, callable) to be invoked in reverse order at teardown
cls.teardowns = []
# All the configuration checks that may generate a skip
cls.skip_checks()
try:
# Allocation of all required credentials and client managers
cls.teardowns.append(('credentials', cls.clear_isolated_creds))
cls.setup_credentials()
# Shortcuts to clients
cls.setup_clients()
# Additional class-wide test resources
cls.teardowns.append(('resources', cls.resource_cleanup))
cls.resource_setup()
except Exception:
etype, value, trace = sys.exc_info()
LOG.info("%s raised in %s.setUpClass. Invoking tearDownClass." % (
etype, cls.__name__))
cls.tearDownClass()
try:
raise etype, value, trace
finally:
del trace # to avoid circular refs
@classmethod
def tearDownClass(cls):
at_exit_set.discard(cls)
# It should never be overridden by descendants
if hasattr(super(BaseTestCase, cls), 'tearDownClass'):
super(BaseTestCase, cls).tearDownClass()
# Save any existing exception, we always want to re-raise the original
# exception only
etype, value, trace = sys.exc_info()
# If there was no exception during setup we shall re-raise the first
# exception in teardown
re_raise = (etype is None)
while cls.teardowns:
name, teardown = cls.teardowns.pop()
# Catch any exception in tearDown so we can re-raise the original
# exception at the end
try:
teardown()
except Exception as te:
sys_exec_info = sys.exc_info()
tetype = sys_exec_info[0]
# TODO(andreaf): Till we have the ability to cleanup only
# resources that were successfully setup in resource_cleanup,
# log AttributeError as info instead of exception.
if tetype is AttributeError and name == 'resources':
LOG.info("tearDownClass of %s failed: %s" % (name, te))
else:
LOG.exception("teardown of %s failed: %s" % (name, te))
if not etype:
etype, value, trace = sys_exec_info
# If exceptions were raised during teardown, an not before, re-raise
# the first one
if re_raise and etype is not None:
try:
raise etype, value, trace
finally:
del trace # to avoid circular refs
@classmethod
def skip_checks(cls):
"""Class level skip checks. Subclasses verify in here all
conditions that might prevent the execution of the entire test class.
Checks implemented here may not make use API calls, and should rely on
configuration alone.
In general skip checks that require an API call are discouraged.
If one is really needed it may be implemented either in the
resource_setup or at test level.
"""
pass
@classmethod
def setup_credentials(cls):
"""Allocate credentials and the client managers from them."""
# TODO(andreaf) There is a fair amount of code that could me moved from
# base / test classes in here. Ideally tests should be able to only
# specify a list of (additional) credentials the need to use.
pass
@classmethod
def setup_clients(cls):
"""Create links to the clients into the test object."""
# TODO(andreaf) There is a fair amount of code that could me moved from
# base / test classes in here. Ideally tests should be able to only
# specify which client is `client` and nothing else.
pass
@classmethod
def resource_setup(cls):
"""Class level resource setup for test cases.
"""
pass
@classmethod
def resource_cleanup(cls):
"""Class level resource cleanup for test cases.
Resource cleanup must be able to handle the case of partially setup
resources, in case a failure during `resource_setup` should happen.
"""
pass
def setUp(self):
super(BaseTestCase, self).setUp()
if not self.setUpClassCalled:
raise RuntimeError("setUpClass does not calls the super's"
"setUpClass in the "
+ self.__class__.__name__)
at_exit_set.add(self.__class__)
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
os.environ.get('OS_STDOUT_CAPTURE') == '1'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
os.environ.get('OS_STDERR_CAPTURE') == '1'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
if (os.environ.get('OS_LOG_CAPTURE') != 'False' and
os.environ.get('OS_LOG_CAPTURE') != '0'):
self.useFixture(fixtures.LoggerFixture(nuke_handlers=False,
format=self.log_format,
level=None))
@classmethod
def get_client_manager(cls):
"""
Returns an OpenStack client manager
"""
force_tenant_isolation = getattr(cls, 'force_tenant_isolation', None)
if (not hasattr(cls, 'isolated_creds') or
not cls.isolated_creds.name == cls.__name__):
cls.isolated_creds = credentials.get_isolated_credentials(
name=cls.__name__, network_resources=cls.network_resources,
force_tenant_isolation=force_tenant_isolation,
)
creds = cls.isolated_creds.get_primary_creds()
os = clients.Manager(credentials=creds, service=cls._service)
return os
@classmethod
def clear_isolated_creds(cls):
"""
Clears isolated creds if set
"""
if hasattr(cls, 'isolated_creds'):
cls.isolated_creds.clear_isolated_creds()
@classmethod
def _get_identity_admin_client(cls):
"""
Returns an instance of the Identity Admin API client
"""
os = clients.AdminManager(service=cls._service)
admin_client = os.identity_client
return admin_client
@classmethod
def set_network_resources(cls, network=False, router=False, subnet=False,
dhcp=False):
"""Specify which network resources should be created
@param network
@param router
@param subnet
@param dhcp
"""
# network resources should be set only once from callers
# in order to ensure that even if it's called multiple times in
# a chain of overloaded methods, the attribute is set only
# in the leaf class
if not cls.network_resources:
cls.network_resources = {
'network': network,
'router': router,
'subnet': subnet,
'dhcp': dhcp}
def assertEmpty(self, list, msg=None):
self.assertTrue(len(list) == 0, msg)
def assertNotEmpty(self, list, msg=None):
self.assertTrue(len(list) > 0, msg)
class NegativeAutoTest(BaseTestCase):
_resources = {}
@classmethod
def setUpClass(cls):
super(NegativeAutoTest, cls).setUpClass()
os = cls.get_client_manager()
cls.client = os.negative_client
os_admin = clients.AdminManager(service=cls._service)
cls.admin_client = os_admin.negative_client
@staticmethod
def load_tests(*args):
"""
Wrapper for testscenarios to set the mandatory scenarios variable
only in case a real test loader is in place. Will be automatically
called in case the variable "load_tests" is set.
"""
if getattr(args[0], 'suiteClass', None) is not None:
loader, standard_tests, pattern = args
else:
standard_tests, module, loader = args
for test in testtools.iterate_tests(standard_tests):
schema = getattr(test, '_schema', None)
if schema is not None:
setattr(test, 'scenarios',
NegativeAutoTest.generate_scenario(schema))
return testscenarios.load_tests_apply_scenarios(*args)
@staticmethod
def generate_scenario(description):
"""
Generates the test scenario list for a given description.
:param description: A file or dictionary with the following entries:
name (required) name for the api
http-method (required) one of HEAD,GET,PUT,POST,PATCH,DELETE
url (required) the url to be appended to the catalog url with '%s'
for each resource mentioned
resources: (optional) A list of resource names such as "server",
"flavor", etc. with an element for each '%s' in the url. This
method will call self.get_resource for each element when
constructing the positive test case template so negative
subclasses are expected to return valid resource ids when
appropriate.
json-schema (optional) A valid json schema that will be used to
create invalid data for the api calls. For "GET" and "HEAD",
the data is used to generate query strings appended to the url,
otherwise for the body of the http call.
"""
LOG.debug(description)
generator = importutils.import_class(
CONF.negative.test_generator)()
generator.validate_schema(description)
schema = description.get("json-schema", None)
resources = description.get("resources", [])
scenario_list = []
expected_result = None
for resource in resources:
if isinstance(resource, dict):
expected_result = resource['expected_result']
resource = resource['name']
LOG.debug("Add resource to test %s" % resource)
scn_name = "inv_res_%s" % (resource)
scenario_list.append((scn_name, {"resource": (resource,
str(uuid.uuid4())),
"expected_result": expected_result
}))
if schema is not None:
for scenario in generator.generate_scenarios(schema):
scenario_list.append((scenario['_negtest_name'],
scenario))
LOG.debug(scenario_list)
return scenario_list
def execute(self, description):
"""
Execute a http call on an api that are expected to
result in client errors. First it uses invalid resources that are part
of the url, and then invalid data for queries and http request bodies.
:param description: A json file or dictionary with the following
entries:
name (required) name for the api
http-method (required) one of HEAD,GET,PUT,POST,PATCH,DELETE
url (required) the url to be appended to the catalog url with '%s'
for each resource mentioned
resources: (optional) A list of resource names such as "server",
"flavor", etc. with an element for each '%s' in the url. This
method will call self.get_resource for each element when
constructing the positive test case template so negative
subclasses are expected to return valid resource ids when
appropriate.
json-schema (optional) A valid json schema that will be used to
create invalid data for the api calls. For "GET" and "HEAD",
the data is used to generate query strings appended to the url,
otherwise for the body of the http call.
"""
LOG.info("Executing %s" % description["name"])
LOG.debug(description)
generator = importutils.import_class(
CONF.negative.test_generator)()
schema = description.get("json-schema", None)
method = description["http-method"]
url = description["url"]
expected_result = None
if "default_result_code" in description:
expected_result = description["default_result_code"]
resources = [self.get_resource(r) for
r in description.get("resources", [])]
if hasattr(self, "resource"):
# Note(mkoderer): The resources list already contains an invalid
# entry (see get_resource).
# We just send a valid json-schema with it
valid_schema = None
if schema:
valid_schema = \
valid.ValidTestGenerator().generate_valid(schema)
new_url, body = self._http_arguments(valid_schema, url, method)
elif hasattr(self, "_negtest_name"):
schema_under_test = \
valid.ValidTestGenerator().generate_valid(schema)
local_expected_result = \
generator.generate_payload(self, schema_under_test)
if local_expected_result is not None:
expected_result = local_expected_result
new_url, body = \
self._http_arguments(schema_under_test, url, method)
else:
raise Exception("testscenarios are not active. Please make sure "
"that your test runner supports the load_tests "
"mechanism")
if "admin_client" in description and description["admin_client"]:
client = self.admin_client
else:
client = self.client
resp, resp_body = client.send_request(method, new_url,
resources, body=body)
self._check_negative_response(expected_result, resp.status, resp_body)
def _http_arguments(self, json_dict, url, method):
LOG.debug("dict: %s url: %s method: %s" % (json_dict, url, method))
if not json_dict:
return url, None
elif method in ["GET", "HEAD", "PUT", "DELETE"]:
return "%s?%s" % (url, urllib.urlencode(json_dict)), None
else:
return url, json.dumps(json_dict)
def _check_negative_response(self, expected_result, result, body):
self.assertTrue(result >= 400 and result < 500 and result != 413,
"Expected client error, got %s:%s" %
(result, body))
self.assertTrue(expected_result is None or expected_result == result,
"Expected %s, got %s:%s" %
(expected_result, result, body))
@classmethod
def set_resource(cls, name, resource):
"""
This function can be used in setUpClass context to register a resoruce
for a test.
:param name: The name of the kind of resource such as "flavor", "role",
etc.
:resource: The id of the resource
"""
cls._resources[name] = resource
def get_resource(self, name):
"""
Return a valid uuid for a type of resource. If a real resource is
needed as part of a url then this method should return one. Otherwise
it can return None.
:param name: The name of the kind of resource such as "flavor", "role",
etc.
"""
if isinstance(name, dict):
name = name['name']
if hasattr(self, "resource") and self.resource[0] == name:
LOG.debug("Return invalid resource (%s) value: %s" %
(self.resource[0], self.resource[1]))
return self.resource[1]
if name in self._resources:
return self._resources[name]
return None
def SimpleNegativeAutoTest(klass):
"""
This decorator registers a test function on basis of the class name.
"""
@attr(type=['negative', 'gate'])
def generic_test(self):
if hasattr(self, '_schema'):
self.execute(self._schema)
cn = klass.__name__
cn = cn.replace('JSON', '')
cn = cn.replace('Test', '')
# NOTE(mkoderer): replaces uppercase chars inside the class name with '_'
lower_cn = re.sub('(?<!^)(?=[A-Z])', '_', cn).lower()
func_name = 'test_%s' % lower_cn
setattr(klass, func_name, generic_test)
return klass
def call_until_true(func, duration, sleep_for):
"""
Call the given function until it returns True (and return True) or
until the specified duration (in seconds) elapses (and return
False).
:param func: A zero argument callable that returns True on success.
:param duration: The number of seconds for which to attempt a
successful call of the function.
:param sleep_for: The number of seconds to sleep after an unsuccessful
invocation of the function.
"""
now = time.time()
timeout = now + duration
while now < timeout:
if func():
return True
time.sleep(sleep_for)
now = time.time()
return False
|
cliffano/swaggy-jenkins | refs/heads/master | clients/python-flask/generated/openapi_server/models/multibranch_pipeline.py | 1 | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server import util
class MultibranchPipeline(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, display_name: str=None, estimated_duration_in_millis: int=None, latest_run: str=None, name: str=None, organization: str=None, weather_score: int=None, branch_names: List[str]=None, number_of_failing_branches: int=None, number_of_failing_pull_requests: int=None, number_of_successful_branches: int=None, number_of_successful_pull_requests: int=None, total_number_of_branches: int=None, total_number_of_pull_requests: int=None, _class: str=None): # noqa: E501
"""MultibranchPipeline - a model defined in OpenAPI
:param display_name: The display_name of this MultibranchPipeline. # noqa: E501
:type display_name: str
:param estimated_duration_in_millis: The estimated_duration_in_millis of this MultibranchPipeline. # noqa: E501
:type estimated_duration_in_millis: int
:param latest_run: The latest_run of this MultibranchPipeline. # noqa: E501
:type latest_run: str
:param name: The name of this MultibranchPipeline. # noqa: E501
:type name: str
:param organization: The organization of this MultibranchPipeline. # noqa: E501
:type organization: str
:param weather_score: The weather_score of this MultibranchPipeline. # noqa: E501
:type weather_score: int
:param branch_names: The branch_names of this MultibranchPipeline. # noqa: E501
:type branch_names: List[str]
:param number_of_failing_branches: The number_of_failing_branches of this MultibranchPipeline. # noqa: E501
:type number_of_failing_branches: int
:param number_of_failing_pull_requests: The number_of_failing_pull_requests of this MultibranchPipeline. # noqa: E501
:type number_of_failing_pull_requests: int
:param number_of_successful_branches: The number_of_successful_branches of this MultibranchPipeline. # noqa: E501
:type number_of_successful_branches: int
:param number_of_successful_pull_requests: The number_of_successful_pull_requests of this MultibranchPipeline. # noqa: E501
:type number_of_successful_pull_requests: int
:param total_number_of_branches: The total_number_of_branches of this MultibranchPipeline. # noqa: E501
:type total_number_of_branches: int
:param total_number_of_pull_requests: The total_number_of_pull_requests of this MultibranchPipeline. # noqa: E501
:type total_number_of_pull_requests: int
:param _class: The _class of this MultibranchPipeline. # noqa: E501
:type _class: str
"""
self.openapi_types = {
'display_name': str,
'estimated_duration_in_millis': int,
'latest_run': str,
'name': str,
'organization': str,
'weather_score': int,
'branch_names': List[str],
'number_of_failing_branches': int,
'number_of_failing_pull_requests': int,
'number_of_successful_branches': int,
'number_of_successful_pull_requests': int,
'total_number_of_branches': int,
'total_number_of_pull_requests': int,
'_class': str
}
self.attribute_map = {
'display_name': 'displayName',
'estimated_duration_in_millis': 'estimatedDurationInMillis',
'latest_run': 'latestRun',
'name': 'name',
'organization': 'organization',
'weather_score': 'weatherScore',
'branch_names': 'branchNames',
'number_of_failing_branches': 'numberOfFailingBranches',
'number_of_failing_pull_requests': 'numberOfFailingPullRequests',
'number_of_successful_branches': 'numberOfSuccessfulBranches',
'number_of_successful_pull_requests': 'numberOfSuccessfulPullRequests',
'total_number_of_branches': 'totalNumberOfBranches',
'total_number_of_pull_requests': 'totalNumberOfPullRequests',
'_class': '_class'
}
self._display_name = display_name
self._estimated_duration_in_millis = estimated_duration_in_millis
self._latest_run = latest_run
self._name = name
self._organization = organization
self._weather_score = weather_score
self._branch_names = branch_names
self._number_of_failing_branches = number_of_failing_branches
self._number_of_failing_pull_requests = number_of_failing_pull_requests
self._number_of_successful_branches = number_of_successful_branches
self._number_of_successful_pull_requests = number_of_successful_pull_requests
self._total_number_of_branches = total_number_of_branches
self._total_number_of_pull_requests = total_number_of_pull_requests
self.__class = _class
@classmethod
def from_dict(cls, dikt) -> 'MultibranchPipeline':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The MultibranchPipeline of this MultibranchPipeline. # noqa: E501
:rtype: MultibranchPipeline
"""
return util.deserialize_model(dikt, cls)
@property
def display_name(self) -> str:
"""Gets the display_name of this MultibranchPipeline.
:return: The display_name of this MultibranchPipeline.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name: str):
"""Sets the display_name of this MultibranchPipeline.
:param display_name: The display_name of this MultibranchPipeline.
:type display_name: str
"""
self._display_name = display_name
@property
def estimated_duration_in_millis(self) -> int:
"""Gets the estimated_duration_in_millis of this MultibranchPipeline.
:return: The estimated_duration_in_millis of this MultibranchPipeline.
:rtype: int
"""
return self._estimated_duration_in_millis
@estimated_duration_in_millis.setter
def estimated_duration_in_millis(self, estimated_duration_in_millis: int):
"""Sets the estimated_duration_in_millis of this MultibranchPipeline.
:param estimated_duration_in_millis: The estimated_duration_in_millis of this MultibranchPipeline.
:type estimated_duration_in_millis: int
"""
self._estimated_duration_in_millis = estimated_duration_in_millis
@property
def latest_run(self) -> str:
"""Gets the latest_run of this MultibranchPipeline.
:return: The latest_run of this MultibranchPipeline.
:rtype: str
"""
return self._latest_run
@latest_run.setter
def latest_run(self, latest_run: str):
"""Sets the latest_run of this MultibranchPipeline.
:param latest_run: The latest_run of this MultibranchPipeline.
:type latest_run: str
"""
self._latest_run = latest_run
@property
def name(self) -> str:
"""Gets the name of this MultibranchPipeline.
:return: The name of this MultibranchPipeline.
:rtype: str
"""
return self._name
@name.setter
def name(self, name: str):
"""Sets the name of this MultibranchPipeline.
:param name: The name of this MultibranchPipeline.
:type name: str
"""
self._name = name
@property
def organization(self) -> str:
"""Gets the organization of this MultibranchPipeline.
:return: The organization of this MultibranchPipeline.
:rtype: str
"""
return self._organization
@organization.setter
def organization(self, organization: str):
"""Sets the organization of this MultibranchPipeline.
:param organization: The organization of this MultibranchPipeline.
:type organization: str
"""
self._organization = organization
@property
def weather_score(self) -> int:
"""Gets the weather_score of this MultibranchPipeline.
:return: The weather_score of this MultibranchPipeline.
:rtype: int
"""
return self._weather_score
@weather_score.setter
def weather_score(self, weather_score: int):
"""Sets the weather_score of this MultibranchPipeline.
:param weather_score: The weather_score of this MultibranchPipeline.
:type weather_score: int
"""
self._weather_score = weather_score
@property
def branch_names(self) -> List[str]:
"""Gets the branch_names of this MultibranchPipeline.
:return: The branch_names of this MultibranchPipeline.
:rtype: List[str]
"""
return self._branch_names
@branch_names.setter
def branch_names(self, branch_names: List[str]):
"""Sets the branch_names of this MultibranchPipeline.
:param branch_names: The branch_names of this MultibranchPipeline.
:type branch_names: List[str]
"""
self._branch_names = branch_names
@property
def number_of_failing_branches(self) -> int:
"""Gets the number_of_failing_branches of this MultibranchPipeline.
:return: The number_of_failing_branches of this MultibranchPipeline.
:rtype: int
"""
return self._number_of_failing_branches
@number_of_failing_branches.setter
def number_of_failing_branches(self, number_of_failing_branches: int):
"""Sets the number_of_failing_branches of this MultibranchPipeline.
:param number_of_failing_branches: The number_of_failing_branches of this MultibranchPipeline.
:type number_of_failing_branches: int
"""
self._number_of_failing_branches = number_of_failing_branches
@property
def number_of_failing_pull_requests(self) -> int:
"""Gets the number_of_failing_pull_requests of this MultibranchPipeline.
:return: The number_of_failing_pull_requests of this MultibranchPipeline.
:rtype: int
"""
return self._number_of_failing_pull_requests
@number_of_failing_pull_requests.setter
def number_of_failing_pull_requests(self, number_of_failing_pull_requests: int):
"""Sets the number_of_failing_pull_requests of this MultibranchPipeline.
:param number_of_failing_pull_requests: The number_of_failing_pull_requests of this MultibranchPipeline.
:type number_of_failing_pull_requests: int
"""
self._number_of_failing_pull_requests = number_of_failing_pull_requests
@property
def number_of_successful_branches(self) -> int:
"""Gets the number_of_successful_branches of this MultibranchPipeline.
:return: The number_of_successful_branches of this MultibranchPipeline.
:rtype: int
"""
return self._number_of_successful_branches
@number_of_successful_branches.setter
def number_of_successful_branches(self, number_of_successful_branches: int):
"""Sets the number_of_successful_branches of this MultibranchPipeline.
:param number_of_successful_branches: The number_of_successful_branches of this MultibranchPipeline.
:type number_of_successful_branches: int
"""
self._number_of_successful_branches = number_of_successful_branches
@property
def number_of_successful_pull_requests(self) -> int:
"""Gets the number_of_successful_pull_requests of this MultibranchPipeline.
:return: The number_of_successful_pull_requests of this MultibranchPipeline.
:rtype: int
"""
return self._number_of_successful_pull_requests
@number_of_successful_pull_requests.setter
def number_of_successful_pull_requests(self, number_of_successful_pull_requests: int):
"""Sets the number_of_successful_pull_requests of this MultibranchPipeline.
:param number_of_successful_pull_requests: The number_of_successful_pull_requests of this MultibranchPipeline.
:type number_of_successful_pull_requests: int
"""
self._number_of_successful_pull_requests = number_of_successful_pull_requests
@property
def total_number_of_branches(self) -> int:
"""Gets the total_number_of_branches of this MultibranchPipeline.
:return: The total_number_of_branches of this MultibranchPipeline.
:rtype: int
"""
return self._total_number_of_branches
@total_number_of_branches.setter
def total_number_of_branches(self, total_number_of_branches: int):
"""Sets the total_number_of_branches of this MultibranchPipeline.
:param total_number_of_branches: The total_number_of_branches of this MultibranchPipeline.
:type total_number_of_branches: int
"""
self._total_number_of_branches = total_number_of_branches
@property
def total_number_of_pull_requests(self) -> int:
"""Gets the total_number_of_pull_requests of this MultibranchPipeline.
:return: The total_number_of_pull_requests of this MultibranchPipeline.
:rtype: int
"""
return self._total_number_of_pull_requests
@total_number_of_pull_requests.setter
def total_number_of_pull_requests(self, total_number_of_pull_requests: int):
"""Sets the total_number_of_pull_requests of this MultibranchPipeline.
:param total_number_of_pull_requests: The total_number_of_pull_requests of this MultibranchPipeline.
:type total_number_of_pull_requests: int
"""
self._total_number_of_pull_requests = total_number_of_pull_requests
@property
def _class(self) -> str:
"""Gets the _class of this MultibranchPipeline.
:return: The _class of this MultibranchPipeline.
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class: str):
"""Sets the _class of this MultibranchPipeline.
:param _class: The _class of this MultibranchPipeline.
:type _class: str
"""
self.__class = _class
|
jagguli/intellij-community | refs/heads/master | python/testData/inspections/PyUnboundLocalVariableInspection/SysExit.py | 83 | def f():
import sys
sys.exit(1) #pass
|
yongshengwang/hue | refs/heads/master | build/env/lib/python2.7/site-packages/Django-1.6.10-py2.7.egg/django/conf/locale/sl/__init__.py | 12133432 | |
sczzq/symmetrical-spoon | refs/heads/master | base-usage/shell/find/new.py | 12133432 | |
tangentlabs/wagtail | refs/heads/master | wagtail/wagtailforms/migrations/__init__.py | 12133432 | |
alrusdi/python-social-auth | refs/heads/master | social/tests/backends/test_strava.py | 87 | import json
from social.tests.backends.oauth import OAuth2Test
class StravaOAuthTest(OAuth2Test):
backend_path = 'social.backends.strava.StravaOAuth'
user_data_url = 'https://www.strava.com/api/v3/athlete'
expected_username = '227615'
access_token_body = json.dumps({
"access_token": "83ebeabdec09f6670863766f792ead24d61fe3f9",
"athlete": {
"id": 227615,
"resource_state": 3,
"firstname": "John",
"lastname": "Applestrava",
"profile_medium": "http://pics.com/227615/medium.jpg",
"profile": "http://pics.com/227615/large.jpg",
"city": "San Francisco",
"state": "California",
"country": "United States",
"sex": "M",
"friend": "null",
"follower": "null",
"premium": "true",
"created_at": "2008-01-01T17:44:00Z",
"updated_at": "2013-09-04T20:00:50Z",
"follower_count": 273,
"friend_count": 19,
"mutual_friend_count": 0,
"date_preference": "%m/%d/%Y",
"measurement_preference": "feet",
"email": "john@applestrava.com",
"clubs": [],
"bikes": [],
"shoes": []
}
})
user_data_body = json.dumps({
"id": 227615,
"resource_state": 2,
"firstname": "John",
"lastname": "Applestrava",
"profile_medium": "http://pics.com/227615/medium.jpg",
"profile": "http://pics.com/227615/large.jpg",
"city": "San Francisco",
"state": "CA",
"country": "United States",
"sex": "M",
"friend": "null",
"follower": "accepted",
"premium": "true",
"created_at": "2011-03-19T21:59:57Z",
"updated_at": "2013-09-05T16:46:54Z",
"approve_followers": "false"
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
|
jmartu/testing | refs/heads/master | venv/lib/python3.6/site-packages/setuptools/py27compat.py | 189 | """
Compatibility Support for Python 2.7 and earlier
"""
import platform
from setuptools.extern import six
def get_all_headers(message, key):
"""
Given an HTTPMessage, return all headers matching a given key.
"""
return message.get_all(key)
if six.PY2:
def get_all_headers(message, key):
return message.getheaders(key)
linux_py2_ascii = (
platform.system() == 'Linux' and
six.PY2
)
rmtree_safe = str if linux_py2_ascii else lambda x: x
"""Workaround for http://bugs.python.org/issue24672"""
|
ccnmtl/lettuce | refs/heads/master | tests/integration/lib/Django-1.3/tests/regressiontests/urlpatterns_reverse/included_urls.py | 109 | from django.conf.urls.defaults import *
from views import empty_view
urlpatterns = patterns('',
url(r'^$', empty_view, name="inner-nothing"),
url(r'^extra/(?P<extra>\w+)/$', empty_view, name="inner-extra"),
url(r'^(?P<one>\d+)|(?P<two>\d+)/$', empty_view, name="inner-disjunction"),
)
|
gogobook/wagtail | refs/heads/master | wagtail/wagtailredirects/migrations/0005_capitalizeverbose.py | 2 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('wagtailredirects', '0004_set_unique_on_path_and_site'),
]
operations = [
migrations.AlterModelOptions(
name='redirect',
options={'verbose_name': 'redirect'},
),
migrations.AlterField(
model_name='redirect',
name='is_permanent',
field=models.BooleanField(default=True, help_text="Recommended. Permanent redirects ensure search engines forget the old page (the 'Redirect from') and index the new page instead.", verbose_name='permanent'),
),
migrations.AlterField(
model_name='redirect',
name='old_path',
field=models.CharField(max_length=255, db_index=True, verbose_name='redirect from'),
),
migrations.AlterField(
model_name='redirect',
name='redirect_link',
field=models.URLField(blank=True, verbose_name='redirect to any URL'),
),
migrations.AlterField(
model_name='redirect',
name='redirect_page',
field=models.ForeignKey(on_delete=models.CASCADE, blank=True, null=True, to='wagtailcore.Page', verbose_name='redirect to a page'),
),
migrations.AlterField(
model_name='redirect',
name='site',
field=models.ForeignKey(on_delete=models.CASCADE, blank=True, related_name='redirects', null=True, to='wagtailcore.Site', verbose_name='site'),
),
]
|
liangazhou/django-rdp | refs/heads/master | packages/Django-1.8.6/tests/urlpatterns_reverse/urls_error_handlers_callables.py | 513 | # Used by the ErrorHandlerResolutionTests test case.
from .views import empty_view
urlpatterns = []
handler400 = empty_view
handler404 = empty_view
handler500 = empty_view
|
MrJohz/wfething-backend | refs/heads/master | test/__init__.py | 12133432 | |
googlearchive/bigquery-samples-python | refs/heads/master | python/samples/__init__.py | 12133432 | |
superdesk/Live-Blog | refs/heads/master | plugins/embed/gui-themes/themes/satakansa/desktop/__init__.py | 12133432 | |
badock/nova | refs/heads/master | nova/tests/keymgr/__init__.py | 12133432 | |
django-school-management/ssms | refs/heads/master | ssms/common/common/__init__.py | 12133432 | |
daltemen/example-oracle-django | refs/heads/master | hometech/estructuras/__init__.py | 12133432 | |
gangadharkadam/sterp | refs/heads/develop | erpnext/stock/doctype/material_request/test_material_request.py | 13 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# ERPNext - web based ERP (http://erpnext.com)
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, unittest
from frappe.utils import flt
class TestMaterialRequest(unittest.TestCase):
def setUp(self):
frappe.defaults.set_global_default("auto_accounting_for_stock", 0)
def test_make_purchase_order(self):
from erpnext.stock.doctype.material_request.material_request import make_purchase_order
mr = frappe.copy_doc(test_records[0]).insert()
self.assertRaises(frappe.ValidationError, make_purchase_order,
mr.name)
mr = frappe.get_doc("Material Request", mr.name)
mr.submit()
po = make_purchase_order(mr.name)
self.assertEquals(po.doctype, "Purchase Order")
self.assertEquals(len(po.get("po_details")), len(mr.get("indent_details")))
def test_make_supplier_quotation(self):
from erpnext.stock.doctype.material_request.material_request import make_supplier_quotation
mr = frappe.copy_doc(test_records[0]).insert()
self.assertRaises(frappe.ValidationError, make_supplier_quotation, mr.name)
mr = frappe.get_doc("Material Request", mr.name)
mr.submit()
sq = make_supplier_quotation(mr.name)
self.assertEquals(sq.doctype, "Supplier Quotation")
self.assertEquals(len(sq.get("quotation_items")), len(mr.get("indent_details")))
def test_make_stock_entry(self):
from erpnext.stock.doctype.material_request.material_request import make_stock_entry
mr = frappe.copy_doc(test_records[0]).insert()
self.assertRaises(frappe.ValidationError, make_stock_entry,
mr.name)
mr = frappe.get_doc("Material Request", mr.name)
mr.material_request_type = "Transfer"
mr.submit()
se = make_stock_entry(mr.name)
self.assertEquals(se.doctype, "Stock Entry")
self.assertEquals(len(se.get("mtn_details")), len(mr.get("indent_details")))
def _test_requested_qty(self, qty1, qty2):
self.assertEqual(flt(frappe.db.get_value("Bin", {"item_code": "_Test Item Home Desktop 100",
"warehouse": "_Test Warehouse - _TC"}, "indented_qty")), qty1)
self.assertEqual(flt(frappe.db.get_value("Bin", {"item_code": "_Test Item Home Desktop 200",
"warehouse": "_Test Warehouse - _TC"}, "indented_qty")), qty2)
def _insert_stock_entry(self, qty1, qty2):
se = frappe.get_doc({
"company": "_Test Company",
"doctype": "Stock Entry",
"posting_date": "2013-03-01",
"posting_time": "00:00:00",
"purpose": "Material Receipt",
"fiscal_year": "_Test Fiscal Year 2013",
"mtn_details": [
{
"conversion_factor": 1.0,
"doctype": "Stock Entry Detail",
"item_code": "_Test Item Home Desktop 100",
"parentfield": "mtn_details",
"incoming_rate": 100,
"qty": qty1,
"stock_uom": "_Test UOM 1",
"transfer_qty": qty1,
"uom": "_Test UOM 1",
"t_warehouse": "_Test Warehouse 1 - _TC",
},
{
"conversion_factor": 1.0,
"doctype": "Stock Entry Detail",
"item_code": "_Test Item Home Desktop 200",
"parentfield": "mtn_details",
"incoming_rate": 100,
"qty": qty2,
"stock_uom": "_Test UOM 1",
"transfer_qty": qty2,
"uom": "_Test UOM 1",
"t_warehouse": "_Test Warehouse 1 - _TC",
}
]
})
se.insert()
se.submit()
def test_completed_qty_for_purchase(self):
frappe.db.sql("""delete from `tabBin`""")
# submit material request of type Purchase
mr = frappe.copy_doc(test_records[0])
mr.insert()
mr.submit()
# check if per complete is None
self.assertEquals(mr.per_ordered, None)
self.assertEquals(mr.get("indent_details")[0].ordered_qty, 0)
self.assertEquals(mr.get("indent_details")[1].ordered_qty, 0)
self._test_requested_qty(54.0, 3.0)
# map a purchase order
from erpnext.stock.doctype.material_request.material_request import make_purchase_order
po_doc = make_purchase_order(mr.name)
po_doc.supplier = "_Test Supplier"
po_doc.transaction_date = "2013-07-07"
po_doc.get("po_details")[0].qty = 27.0
po_doc.get("po_details")[1].qty = 1.5
po_doc.get("po_details")[0].schedule_date = "2013-07-09"
po_doc.get("po_details")[1].schedule_date = "2013-07-09"
# check for stopped status of Material Request
po = frappe.copy_doc(po_doc)
po.insert()
po.load_from_db()
mr.update_status('Stopped')
self.assertRaises(frappe.InvalidStatusError, po.submit)
frappe.db.set(po, "docstatus", 1)
self.assertRaises(frappe.InvalidStatusError, po.cancel)
# resubmit and check for per complete
mr.load_from_db()
mr.update_status('Submitted')
po = frappe.copy_doc(po_doc)
po.insert()
po.submit()
# check if per complete is as expected
mr.load_from_db()
self.assertEquals(mr.per_ordered, 50)
self.assertEquals(mr.get("indent_details")[0].ordered_qty, 27.0)
self.assertEquals(mr.get("indent_details")[1].ordered_qty, 1.5)
self._test_requested_qty(27.0, 1.5)
po.cancel()
# check if per complete is as expected
mr.load_from_db()
self.assertEquals(mr.per_ordered, None)
self.assertEquals(mr.get("indent_details")[0].ordered_qty, None)
self.assertEquals(mr.get("indent_details")[1].ordered_qty, None)
self._test_requested_qty(54.0, 3.0)
def test_completed_qty_for_transfer(self):
frappe.db.sql("""delete from `tabBin`""")
frappe.db.sql("""delete from `tabStock Ledger Entry`""")
# submit material request of type Purchase
mr = frappe.copy_doc(test_records[0])
mr.material_request_type = "Transfer"
mr.insert()
mr.submit()
# check if per complete is None
self.assertEquals(mr.per_ordered, None)
self.assertEquals(mr.get("indent_details")[0].ordered_qty, 0)
self.assertEquals(mr.get("indent_details")[1].ordered_qty, 0)
self._test_requested_qty(54.0, 3.0)
from erpnext.stock.doctype.material_request.material_request import make_stock_entry
# map a stock entry
se_doc = make_stock_entry(mr.name)
se_doc.update({
"posting_date": "2013-03-01",
"posting_time": "01:00",
"fiscal_year": "_Test Fiscal Year 2013",
})
se_doc.get("mtn_details")[0].update({
"qty": 27.0,
"transfer_qty": 27.0,
"s_warehouse": "_Test Warehouse 1 - _TC",
"incoming_rate": 1.0
})
se_doc.get("mtn_details")[1].update({
"qty": 1.5,
"transfer_qty": 1.5,
"s_warehouse": "_Test Warehouse 1 - _TC",
"incoming_rate": 1.0
})
# make available the qty in _Test Warehouse 1 before transfer
self._insert_stock_entry(27.0, 1.5)
# check for stopped status of Material Request
se = frappe.copy_doc(se_doc)
se.insert()
mr.update_status('Stopped')
self.assertRaises(frappe.InvalidStatusError, se.submit)
mr.update_status('Submitted')
se.ignore_validate_update_after_submit = True
se.submit()
mr.update_status('Stopped')
self.assertRaises(frappe.InvalidStatusError, se.cancel)
mr.update_status('Submitted')
se = frappe.copy_doc(se_doc)
se.insert()
se.submit()
# check if per complete is as expected
mr.load_from_db()
self.assertEquals(mr.per_ordered, 50)
self.assertEquals(mr.get("indent_details")[0].ordered_qty, 27.0)
self.assertEquals(mr.get("indent_details")[1].ordered_qty, 1.5)
self._test_requested_qty(27.0, 1.5)
# check if per complete is as expected for Stock Entry cancelled
se.cancel()
mr.load_from_db()
self.assertEquals(mr.per_ordered, 0)
self.assertEquals(mr.get("indent_details")[0].ordered_qty, 0)
self.assertEquals(mr.get("indent_details")[1].ordered_qty, 0)
self._test_requested_qty(54.0, 3.0)
def test_completed_qty_for_over_transfer(self):
frappe.db.sql("""delete from `tabBin`""")
frappe.db.sql("""delete from `tabStock Ledger Entry`""")
# submit material request of type Purchase
mr = frappe.copy_doc(test_records[0])
mr.material_request_type = "Transfer"
mr.insert()
mr.submit()
# check if per complete is None
self.assertEquals(mr.per_ordered, None)
self.assertEquals(mr.get("indent_details")[0].ordered_qty, 0)
self.assertEquals(mr.get("indent_details")[1].ordered_qty, 0)
self._test_requested_qty(54.0, 3.0)
# map a stock entry
from erpnext.stock.doctype.material_request.material_request import make_stock_entry
se_doc = make_stock_entry(mr.name)
se_doc.update({
"posting_date": "2013-03-01",
"posting_time": "00:00",
"fiscal_year": "_Test Fiscal Year 2013",
})
se_doc.get("mtn_details")[0].update({
"qty": 60.0,
"transfer_qty": 60.0,
"s_warehouse": "_Test Warehouse 1 - _TC",
"incoming_rate": 1.0
})
se_doc.get("mtn_details")[1].update({
"qty": 3.0,
"transfer_qty": 3.0,
"s_warehouse": "_Test Warehouse 1 - _TC",
"incoming_rate": 1.0
})
# make available the qty in _Test Warehouse 1 before transfer
self._insert_stock_entry(60.0, 3.0)
# check for stopped status of Material Request
se = frappe.copy_doc(se_doc)
se.insert()
mr.update_status('Stopped')
self.assertRaises(frappe.InvalidStatusError, se.submit)
self.assertRaises(frappe.InvalidStatusError, se.cancel)
mr.update_status('Submitted')
se = frappe.copy_doc(se_doc)
se.insert()
se.submit()
# check if per complete is as expected
mr.load_from_db()
self.assertEquals(mr.per_ordered, 100)
self.assertEquals(mr.get("indent_details")[0].ordered_qty, 60.0)
self.assertEquals(mr.get("indent_details")[1].ordered_qty, 3.0)
self._test_requested_qty(0.0, 0.0)
# check if per complete is as expected for Stock Entry cancelled
se.cancel()
mr.load_from_db()
self.assertEquals(mr.per_ordered, 0)
self.assertEquals(mr.get("indent_details")[0].ordered_qty, 0)
self.assertEquals(mr.get("indent_details")[1].ordered_qty, 0)
self._test_requested_qty(54.0, 3.0)
def test_incorrect_mapping_of_stock_entry(self):
# submit material request of type Purchase
mr = frappe.copy_doc(test_records[0])
mr.material_request_type = "Transfer"
mr.insert()
mr.submit()
# map a stock entry
from erpnext.stock.doctype.material_request.material_request import make_stock_entry
se_doc = make_stock_entry(mr.name)
se_doc.update({
"posting_date": "2013-03-01",
"posting_time": "00:00",
"fiscal_year": "_Test Fiscal Year 2013",
})
se_doc.get("mtn_details")[0].update({
"qty": 60.0,
"transfer_qty": 60.0,
"s_warehouse": "_Test Warehouse - _TC",
"t_warehouse": "_Test Warehouse 1 - _TC",
"incoming_rate": 1.0
})
se_doc.get("mtn_details")[1].update({
"qty": 3.0,
"transfer_qty": 3.0,
"s_warehouse": "_Test Warehouse 1 - _TC",
"incoming_rate": 1.0
})
# check for stopped status of Material Request
se = frappe.copy_doc(se_doc)
self.assertRaises(frappe.MappingMismatchError, se.insert)
def test_warehouse_company_validation(self):
from erpnext.stock.utils import InvalidWarehouseCompany
mr = frappe.copy_doc(test_records[0])
mr.company = "_Test Company 1"
self.assertRaises(InvalidWarehouseCompany, mr.insert)
test_dependencies = ["Currency Exchange"]
test_records = frappe.get_test_records('Material Request')
|
cirosantilli/sulley | refs/heads/master | unit_tests/legos.py | 6 | from sulley import *
def run ():
tag()
ndr_string()
ber()
# clear out the requests.
blocks.REQUESTS = {}
blocks.CURRENT = None
########################################################################################################################
def tag ():
s_initialize("UNIT TEST TAG 1")
s_lego("tag", value="pedram")
req = s_get("UNIT TEST TAG 1")
print "LEGO MUTATION COUNTS:"
print "\ttag: %d" % req.num_mutations()
########################################################################################################################
def ndr_string ():
s_initialize("UNIT TEST NDR 1")
s_lego("ndr_string", value="pedram")
req = s_get("UNIT TEST NDR 1")
# TODO: unfinished!
#print req.render()
########################################################################################################################
def ber ():
s_initialize("UNIT TEST BER 1")
s_lego("ber_string", value="pedram")
req = s_get("UNIT TEST BER 1")
assert(s_render() == "\x04\x84\x00\x00\x00\x06\x70\x65\x64\x72\x61\x6d")
s_mutate()
assert(s_render() == "\x04\x84\x00\x00\x00\x00\x70\x65\x64\x72\x61\x6d")
s_initialize("UNIT TEST BER 2")
s_lego("ber_integer", value=0xdeadbeef)
req = s_get("UNIT TEST BER 2")
assert(s_render() == "\x02\x04\xde\xad\xbe\xef")
s_mutate()
assert(s_render() == "\x02\x04\x00\x00\x00\x00")
s_mutate()
assert(s_render() == "\x02\x04\x00\x00\x00\x01") |
auto-mat/klub | refs/heads/diakonie | apps/aklub/tests/test_mailing.py | 1 | # -*- coding: utf-8 -*-
# Author: Petr Dlouhý <petr.dlouhy@auto-mat.cz>
#
# Copyright (C) 2017 o.s. Auto*Mat
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from django.contrib.messages.storage.fallback import FallbackStorage
from django.core import mail
from django.test import RequestFactory, TransactionTestCase
from django.test.utils import override_settings
from flexible_filter_conditions.models import NamedCondition
from freezegun import freeze_time
from model_mommy import mommy
from .. import mailing, models
@override_settings(
CELERY_ALWAYS_EAGER=True,
)
class MailingTest(TransactionTestCase):
fixtures = ['conditions', 'users']
def setUp(self):
self.factory = RequestFactory()
self.request = self.factory.get('')
setattr(self.request, 'session', 'session')
messages = FallbackStorage(self.request)
setattr(self.request, '_messages', messages)
self.unit = mommy.make(
"aklub.administrativeunit",
name="test_unit",
from_email_str="<example@some.com>",
from_email_address="example@some.com",
)
@freeze_time("2015-5-1")
def test_mailing_fake_user(self):
sending_user = mommy.make('aklub.userprofile', first_name="Testing", last_name="UserInCampaign")
mommy.make("ProfileEmail", user=sending_user, email="test@test.com", is_primary=True)
inter_category = mommy.make('interactions.interactioncategory', category='testcategory')
inter_type = mommy.make('interactions.interactiontype', category=inter_category, name='testtype', send_email=True)
condition = mommy.make("flexible_filter_conditions.NamedCondition")
c = mommy.make(
"aklub.AutomaticCommunication",
condition=condition,
template="Testing template",
subject="Testing email",
method_type=inter_type,
administrative_unit=self.unit,
)
# test userprofile email
mailing.send_fake_communication(c, sending_user, self.request)
self.assertEqual(len(mail.outbox), 1)
msg = mail.outbox[0]
self.assertEqual(msg.recipients(), ['<example@some.com>'])
self.assertEqual(msg.subject, 'Testing email')
self.assertIn("Testing template", msg.body)
@freeze_time("2015-5-1")
def test_mailing_fake_company(self):
sending_company = mommy.make('aklub.companyprofile', name="Testing Company")
mommy.make(
"aklub.companycontact",
company=sending_company,
email="test_company@test.com",
is_primary=True,
administrative_unit=self.unit,
)
inter_category = mommy.make('interactions.interactioncategory', category='testcategory')
inter_type = mommy.make('interactions.interactiontype', category=inter_category, name='testtype', send_email=True)
condition = mommy.make("flexible_filter_conditions.NamedCondition")
c = mommy.make(
"aklub.AutomaticCommunication",
condition=condition,
template="Testing template",
subject="Testing email",
method_type=inter_type,
administrative_unit=self.unit,
)
mailing.send_fake_communication(c, sending_company, self.request)
self.assertEqual(len(mail.outbox), 1)
msg = mail.outbox[0]
self.assertEqual(msg.recipients(), ['<example@some.com>'])
self.assertEqual(msg.subject, 'Testing email')
self.assertIn("Testing template", msg.body)
@freeze_time("2015-5-1")
def test_mailing_fail_user(self):
sending_user = models.UserProfile.objects.create(
first_name="Testing",
last_name="UserInCampaign",
)
inter_category = mommy.make('interactions.interactioncategory', category='testcategory')
inter_type = mommy.make('interactions.interactiontype', category=inter_category, name='testtype', send_email=True)
mommy.make("ProfileEmail", user=sending_user, email="test@test.com", is_primary=True)
c = models.AutomaticCommunication.objects.create(
condition=NamedCondition.objects.create(),
template="Testing template",
subject="Testing email",
subject_en="Testing email",
method_type=inter_type,
administrative_unit=self.unit,
)
u = models.Profile.objects.get(email='test.user1@email.cz')
with self.assertRaises(Exception) as ex:
mailing.send_communication_sync(c.id, 'automatic', u.id, sending_user.id)
self.assertEqual(str(ex.exception), "Message template is empty for one of the language variants.")
@freeze_time("2015-5-1")
def test_mailing_fail_company(self):
sending_company = mommy.make('aklub.companyprofile', name="Testing Company")
mommy.make(
"aklub.companycontact",
company=sending_company,
email="test_company@test.com",
is_primary=True,
administrative_unit=self.unit,
)
inter_category = mommy.make('interactions.interactioncategory', category='testcategory')
inter_type = mommy.make('interactions.interactiontype', category=inter_category, name='testtype', send_email=True)
c = models.AutomaticCommunication.objects.create(
condition=NamedCondition.objects.create(),
template="Testing template",
subject="Testing email",
subject_en="Testing email",
method_type=inter_type,
administrative_unit=self.unit,
)
u = models.Profile.objects.get(email='test.user1@email.cz')
with self.assertRaises(Exception) as ex:
mailing.send_communication_sync(c.id, 'automatic', u.id, sending_company.id)
self.assertEqual(str(ex.exception), "Message template is empty for one of the language variants.")
@freeze_time("2015-5-1")
def test_mailing_user(self):
sending_user = models.UserProfile.objects.create(
first_name="Testing",
last_name="UserInCampaign",
)
mommy.make("ProfileEmail", user=sending_user, email="test@test.com", is_primary=True)
inter_category = mommy.make('interactions.interactioncategory', category='testcategory')
inter_type = mommy.make('interactions.interactiontype', category=inter_category, name='testtype', send_email=True)
c = models.MassCommunication.objects.create(
template="Testing template",
template_en="Testing template en",
subject="Testing email",
subject_en="Testing email en",
method_type=inter_type,
date="2015-5-1",
administrative_unit=self.unit,
)
c.send_to_users.set(models.Profile.objects.filter(pk__in=[3, 2978, 2979]))
mailing.send_mass_communication(c, sending_user, self.request)
self.assertEqual(len(mail.outbox), 3)
mail.outbox.sort(key=lambda m: m.recipients()[0])
msg = mail.outbox[2]
self.assertEqual(msg.recipients(), ['without_payments@email.cz'])
self.assertEqual(msg.subject, 'Testing email')
self.assertIn("Testing template", msg.body)
msg = mail.outbox[1]
self.assertEqual(msg.recipients(), ['test.user@email.cz'])
self.assertEqual(msg.subject, 'Testing email')
self.assertIn("Testing template", msg.body)
msg1 = mail.outbox[0]
self.assertEqual(msg1.recipients(), ['test.user1@email.cz'])
self.assertEqual(msg1.subject, 'Testing email en')
self.assertIn("Testing template", msg1.body)
@freeze_time("2015-5-1")
def test_mailing_company(self):
sending_user = models.UserProfile.objects.create(
first_name="Testing",
last_name="UserInCampaign",
)
mommy.make("ProfileEmail", user=sending_user, email="test@test.com", is_primary=True)
company1 = mommy.make('aklub.companyprofile', name="Testing Company")
mommy.make(
"aklub.companycontact",
company=company1,
email="test_company@test.com",
is_primary=True,
administrative_unit=self.unit,
)
company2 = mommy.make('aklub.companyprofile', name="Testing Company2", language='en')
mommy.make(
"aklub.companycontact",
company=company2,
email="test_company2@test.com",
is_primary=True,
administrative_unit=self.unit,
)
inter_category = mommy.make('interactions.interactioncategory', category='testcategory')
inter_type = mommy.make('interactions.interactiontype', category=inter_category, name='testtype', send_email=True)
c = models.MassCommunication.objects.create(
template="Testing template",
template_en="Testing template en",
subject="Testing email",
subject_en="Testing email en",
method_type=inter_type,
date="2015-5-1",
administrative_unit=self.unit,
)
c.send_to_users.set([company1.id, company2.id])
mailing.send_mass_communication(c, sending_user, self.request)
self.assertEqual(len(mail.outbox), 2)
mail.outbox.sort(key=lambda m: m.recipients()[0])
msg = mail.outbox[1]
self.assertEqual(msg.recipients(), ['test_company@test.com'])
self.assertEqual(msg.subject, 'Testing email')
self.assertIn("Testing template", msg.body)
msg1 = mail.outbox[0]
self.assertEqual(msg1.recipients(), ['test_company2@test.com'])
self.assertEqual(msg1.subject, 'Testing email en')
self.assertIn("Testing template", msg1.body)
|
shakamunyi/neutron-dvr | refs/heads/master | neutron/db/migration/alembic_migrations/versions/5a875d0e5c_ryu.py | 8 | # Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
"""ryu
This retroactively provides migration support for
https://review.openstack.org/#/c/11204/
Revision ID: 5a875d0e5c
Revises: 2c4af419145b
Create Date: 2012-12-18 12:32:04.482477
"""
# revision identifiers, used by Alembic.
revision = '5a875d0e5c'
down_revision = '2c4af419145b'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.create_table(
'tunnelkeys',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('tunnel_key', sa.Integer(), autoincrement=False,
nullable=False),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('tunnel_key')
)
op.create_table(
'tunnelkeylasts',
sa.Column('last_key', sa.Integer(), autoincrement=False,
nullable=False),
sa.PrimaryKeyConstraint('last_key')
)
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_table('tunnelkeylasts')
op.drop_table('tunnelkeys')
|
manashmndl/dfvfs | refs/heads/master | tests/file_io/vhdi_file_io.py | 2 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the file-like object implementation using pyvhdi."""
import os
import unittest
from dfvfs.path import os_path_spec
from dfvfs.path import vhdi_path_spec
from tests.file_io import test_lib
class VhdiFileTest(test_lib.ImageFileTestCase):
"""The unit test for the VHD image file-like object."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
super(VhdiFileTest, self).setUp()
test_file = os.path.join(u'test_data', u'image.vhd')
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._vhdi_path_spec = vhdi_path_spec.VhdiPathSpec(parent=path_spec)
def testOpenCloseInode(self):
"""Test the open and close functionality using an inode."""
self._TestOpenCloseInode(self._vhdi_path_spec)
def testOpenCloseLocation(self):
"""Test the open and close functionality using a location."""
self._TestOpenCloseLocation(self._vhdi_path_spec)
def testSeek(self):
"""Test the seek functionality."""
self._TestSeek(self._vhdi_path_spec)
def testRead(self):
"""Test the read functionality."""
self._TestRead(self._vhdi_path_spec)
if __name__ == '__main__':
unittest.main()
|
rahul67/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/django/conf/locale/nl/__init__.py | 12133432 | |
daoluan/decode-Django | refs/heads/master | Django-1.5.1/django/contrib/localflavor/tr/__init__.py | 12133432 | |
turbio/haveijackedit | refs/heads/master | index/data_uri.py | 1 | import mimetypes
import re
import urllib
import base64
MIMETYPE_REGEX = r'[\w]+\/[\w\-\+\.]+'
_MIMETYPE_RE = re.compile('^{}$'.format(MIMETYPE_REGEX))
CHARSET_REGEX = r'[\w\-\+\.]+'
_CHARSET_RE = re.compile('^{}$'.format(CHARSET_REGEX))
DATA_URI_REGEX = (
r'data:' +
r'(?P<mimetype>{})?'.format(MIMETYPE_REGEX) +
r'(?:\;charset\=(?P<charset>{}))?'.format(CHARSET_REGEX) +
r'(?P<base64>\;base64)?' +
r',(?P<data>.*)')
_DATA_URI_RE = re.compile(r'^{}$'.format(DATA_URI_REGEX), re.DOTALL)
class DataURI(str):
@classmethod
def make(cls, mimetype, charset, base64, data):
parts = ['data:']
if mimetype is not None:
if not _MIMETYPE_RE.match(mimetype):
raise ValueError("Invalid mimetype: %r" % mimetype)
parts.append(mimetype)
if charset is not None:
if not _CHARSET_RE.match(charset):
raise ValueError("Invalid charset: %r" % charset)
parts.extend([';charset=', charset])
if base64:
parts.append(';base64')
encoded_data = data.encode('base64').replace('\n', '')
else:
encoded_data = urllib.quote(data)
parts.extend([',', encoded_data])
return cls(''.join(parts))
@classmethod
def from_file(cls, filename, charset=None, base64=True):
mimetype, _ = mimetypes.guess_type(filename, strict=False)
with open(filename) as fp:
data = fp.read()
return cls.make(mimetype, charset, base64, data)
def __new__(cls, *args, **kwargs):
uri = super(DataURI, cls).__new__(cls, *args, **kwargs)
uri._parse # Trigger any ValueErrors on instantiation.
return uri
def __repr__(self):
return 'DataURI(%s)' % (super(DataURI, self).__repr__(),)
def wrap(self, width=76):
return type(self)('\n'.join(textwrap.wrap(self, width)))
@property
def mimetype(self):
return self._parse[0]
@property
def charset(self):
return self._parse[1]
@property
def is_base64(self):
return self._parse[2]
@property
def data(self):
return self._parse[3]
@property
def _parse(self):
match = _DATA_URI_RE.match(self)
if not match:
raise ValueError("Not a valid data URI: %r" % self)
mimetype = match.group('mimetype') or None
charset = match.group('charset') or None
if match.group('base64'):
data = base64.b64decode(match.group('data'))
else:
data = urllib.unquote(match.group('data'))
return mimetype, charset, bool(match.group('base64')), data
|
datapythonista/pandas | refs/heads/master | pandas/tests/frame/methods/test_matmul.py | 4 | import operator
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
Series,
)
import pandas._testing as tm
class TestMatMul:
def test_matmul(self):
# matmul test is for GH#10259
a = DataFrame(
np.random.randn(3, 4), index=["a", "b", "c"], columns=["p", "q", "r", "s"]
)
b = DataFrame(
np.random.randn(4, 2), index=["p", "q", "r", "s"], columns=["one", "two"]
)
# DataFrame @ DataFrame
result = operator.matmul(a, b)
expected = DataFrame(
np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"]
)
tm.assert_frame_equal(result, expected)
# DataFrame @ Series
result = operator.matmul(a, b.one)
expected = Series(np.dot(a.values, b.one.values), index=["a", "b", "c"])
tm.assert_series_equal(result, expected)
# np.array @ DataFrame
result = operator.matmul(a.values, b)
assert isinstance(result, DataFrame)
assert result.columns.equals(b.columns)
assert result.index.equals(Index(range(3)))
expected = np.dot(a.values, b.values)
tm.assert_almost_equal(result.values, expected)
# nested list @ DataFrame (__rmatmul__)
result = operator.matmul(a.values.tolist(), b)
expected = DataFrame(
np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"]
)
tm.assert_almost_equal(result.values, expected.values)
# mixed dtype DataFrame @ DataFrame
a["q"] = a.q.round().astype(int)
result = operator.matmul(a, b)
expected = DataFrame(
np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"]
)
tm.assert_frame_equal(result, expected)
# different dtypes DataFrame @ DataFrame
a = a.astype(int)
result = operator.matmul(a, b)
expected = DataFrame(
np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"]
)
tm.assert_frame_equal(result, expected)
# unaligned
df = DataFrame(np.random.randn(3, 4), index=[1, 2, 3], columns=range(4))
df2 = DataFrame(np.random.randn(5, 3), index=range(5), columns=[1, 2, 3])
with pytest.raises(ValueError, match="aligned"):
operator.matmul(df, df2)
def test_matmul_message_shapes(self):
# GH#21581 exception message should reflect original shapes,
# not transposed shapes
a = np.random.rand(10, 4)
b = np.random.rand(5, 3)
df = DataFrame(b)
msg = r"shapes \(10, 4\) and \(5, 3\) not aligned"
with pytest.raises(ValueError, match=msg):
a @ df
with pytest.raises(ValueError, match=msg):
a.tolist() @ df
|
adversary-org/dodgpg | refs/heads/master | dodgpg/_version.py | 1 |
IN_LONG_VERSION_PY = True
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.7+ (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
import subprocess
import sys
def run_command(args, cwd=None, verbose=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
except EnvironmentError:
e = sys.exc_info()[1]
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_source):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
for line in open(versionfile_source,"r").readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
for ref in list(refs):
if not re.search(r'\d', ref):
if verbose:
print("discarding '%s', no digits" % ref)
refs.discard(ref)
# Assume all version tags have a digit. git's %d expansion
# behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us
# distinguish between branches and tags. By ignoring refnames
# without digits, we filter out many common branch names like
# "release" and "stabilization", as well as "HEAD" and "master".
if verbose:
print("remaining refs: %s" % ",".join(sorted(refs)))
for ref in sorted(refs):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
# this runs 'git' from the root of the source tree. That either means
# someone ran a setup.py command (and this code is in versioneer.py, so
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
# the source tree), or someone ran a project-specific entry point (and
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
# containing directory is somewhere deeper in the source tree). This only
# gets called if the git-archive 'subst' variables were *not* expanded,
# and _version.py hasn't already been rewritten with a short version
# string, meaning we're inside a checked out source tree.
try:
here = os.path.abspath(__file__)
except NameError:
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
return {} # not always correct
# versionfile_source is the relative path from the top of the source tree
# (where the .git directory might live) to this file. Invert this to find
# the root from __file__.
root = here
if IN_LONG_VERSION_PY:
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
root = os.path.dirname(here)
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GIT = "git"
if sys.platform == "win32":
GIT = "git.cmd"
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
if IN_LONG_VERSION_PY:
# We're running from _version.py. If it's from a source tree
# (execute-in-place), we can work upwards to find the root of the
# tree, and then check the parent directory for a version string. If
# it's in an installed application, there's no hope.
try:
here = os.path.abspath(__file__)
except NameError:
# py2exe/bbfreeze/non-CPython don't have __file__
return {} # without __file__, we have no hope
# versionfile_source is the relative path from the top of the source
# tree to _version.py. Invert this to find the root from __file__.
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
# we're running from versioneer.py, which means we're running from
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
here = os.path.abspath(sys.argv[0])
root = os.path.dirname(here)
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
tag_prefix = "dodgpg-"
parentdir_prefix = "dodgpg-"
versionfile_source = "src/_version.py"
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
variables = { "refnames": git_refnames, "full": git_full }
ver = versions_from_expanded_variables(variables, tag_prefix, verbose)
if not ver:
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
if not ver:
ver = versions_from_parentdir(parentdir_prefix, versionfile_source,
verbose)
if not ver:
ver = default
return ver
|
FreeAgent/djangoappengine-starter | refs/heads/master | django/contrib/localflavor/ca/ca_provinces.py | 237 | """
An alphabetical list of provinces and territories for use as `choices`
in a formfield., and a mapping of province misspellings/abbreviations to
normalized abbreviations
Source: http://www.canada.gc.ca/othergov/prov_e.html
This exists in this standalone file so that it's only imported into memory
when explicitly needed.
"""
import warnings
warnings.warn(
'There have been recent changes to the CA localflavor. See the release notes for details',
RuntimeWarning
)
PROVINCE_CHOICES = (
('AB', 'Alberta'),
('BC', 'British Columbia'),
('MB', 'Manitoba'),
('NB', 'New Brunswick'),
('NL', 'Newfoundland and Labrador'),
('NT', 'Northwest Territories'),
('NS', 'Nova Scotia'),
('NU', 'Nunavut'),
('ON', 'Ontario'),
('PE', 'Prince Edward Island'),
('QC', 'Quebec'),
('SK', 'Saskatchewan'),
('YT', 'Yukon')
)
PROVINCES_NORMALIZED = {
'ab': 'AB',
'alberta': 'AB',
'bc': 'BC',
'b.c.': 'BC',
'british columbia': 'BC',
'mb': 'MB',
'manitoba': 'MB',
'nb': 'NB',
'new brunswick': 'NB',
'nf': 'NL',
'nl': 'NL',
'newfoundland': 'NL',
'newfoundland and labrador': 'NL',
'nt': 'NT',
'northwest territories': 'NT',
'ns': 'NS',
'nova scotia': 'NS',
'nu': 'NU',
'nunavut': 'NU',
'on': 'ON',
'ontario': 'ON',
'pe': 'PE',
'pei': 'PE',
'p.e.i.': 'PE',
'prince edward island': 'PE',
'qc': 'QC',
'quebec': 'QC',
'sk': 'SK',
'saskatchewan': 'SK',
'yk': 'YT',
'yt': 'YT',
'yukon': 'YT',
'yukon territory': 'YT',
} |
nhippenmeyer/django | refs/heads/master | tests/postgres_tests/migrations/0001_setup_extensions.py | 310 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
try:
from django.contrib.postgres.operations import (
HStoreExtension, UnaccentExtension,
)
except ImportError:
from django.test import mock
HStoreExtension = mock.Mock()
UnaccentExtension = mock.Mock()
class Migration(migrations.Migration):
dependencies = [
]
operations = [
HStoreExtension(),
UnaccentExtension(),
]
|
xiandiancloud/edx-platform | refs/heads/master | common/djangoapps/lang_pref/tests/test_middleware.py | 54 | from django.test import TestCase
from django.test.client import RequestFactory
from django.contrib.sessions.middleware import SessionMiddleware
from lang_pref.middleware import LanguagePreferenceMiddleware
from user_api.models import UserPreference
from lang_pref import LANGUAGE_KEY
from student.tests.factories import UserFactory
class TestUserPreferenceMiddleware(TestCase):
"""
Tests to make sure user preferences are getting properly set in the middleware
"""
def setUp(self):
self.middleware = LanguagePreferenceMiddleware()
self.session_middleware = SessionMiddleware()
self.user = UserFactory.create()
self.request = RequestFactory().get('/somewhere')
self.request.user = self.user
self.session_middleware.process_request(self.request)
def test_no_language_set_in_session_or_prefs(self):
# nothing set in the session or the prefs
self.middleware.process_request(self.request)
self.assertNotIn('django_language', self.request.session)
def test_language_in_user_prefs(self):
# language set in the user preferences and not the session
UserPreference.set_preference(self.user, LANGUAGE_KEY, 'eo')
self.middleware.process_request(self.request)
self.assertEquals(self.request.session['django_language'], 'eo')
def test_language_in_session(self):
# language set in both the user preferences and session,
# session should get precedence
self.request.session['django_language'] = 'en'
UserPreference.set_preference(self.user, LANGUAGE_KEY, 'eo')
self.middleware.process_request(self.request)
self.assertEquals(self.request.session['django_language'], 'en')
|
infinidb/autooam | refs/heads/master | vagrant/makebox.py | 1 | # Copyright (C) 2014 InfiniDB, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2 of
# the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#!/usr/bin/env python
import os,sys,getopt
from emtools.common.utils import syscall_log
from autooam.vmi.vagstatus import vagrant_status
import re
def usage():
print '''usage: usage: makebox.py [-hlrb:ds]
Options:
-h : show usage
-l : install new boxes locally
-r : install new boxes remote
-b <box> : process a singe named box (default = all subdirectories)
-d : destroy box and start over (defaut = use existing box state)
-s : skip box creation. (NOTE: use this only if you are SURE the
.box files are already updated)
-c : check boxes only - this runs the playbook but does not
write a new box
basebox : initial box - can be remote or local
boxname : new box to create
'''
def syscall(cmd):
print 'INFO: issuing %s' % cmd
(rc, out, err) = syscall_log(cmd)
if rc != 0:
raise Exception("Command %s failed! rc=%s, stdout=%s, stderr=%s" % (cmd, rc, out, err))
return (rc, out, err)
def do_box(boxname, do_local = False, do_remote = False, do_destroy = False, do_skip = False, do_check = False):
print 'INFO: Handling box %s' % boxname
ret = 0
cwd = os.getcwd()
os.chdir(boxname)
try:
if not os.path.exists('Vagrantfile'):
raise Exception("ERROR: no VagrantFile found in %s" % boxname)
if not do_skip:
if do_destroy:
syscall('vagrant destroy -f')
else:
status = vagrant_status('.')
if status['cluster'] != "not created" and status['cluster'] != "poweroff":
print "INFO: machine appears to be running or suspended, shutting down for clean restart"
syscall('vagrant halt')
(rc, out, err) = syscall('vagrant up')
sshportpatt = re.compile('SSH address: 127.0.0.1:([0-9]+)')
mat = sshportpatt.search( out )
if not mat:
raise Exception('ERROR: could not locate SSH address in %s' % out)
sshport = mat.group(1)
print 'INFO: vagrant box %s is on port %s' % (boxname, sshport)
java_file = os.path.join('..','jdk-7u55-linux-x64.gz')
syscall('cp %s .' % java_file)
cmd = 'ansible-playbook -i ../inventory ../makebox.yml -e "ansible_ssh_port=%s boxdir=%s"' % (sshport,boxname)
rc = os.system(cmd) >> 8
if rc != 0:
raise Exception("Ansible command (%s) failed: %s!" % (cmd, rc))
syscall('vagrant halt')
if not do_check:
if os.path.exists('%s.box' % boxname):
os.remove('%s.box' % boxname)
syscall('vagrant package --output %s.box' % boxname)
if do_local:
(rc, out, err) = syscall('vagrant box list')
for b in out.split('\n'):
box = b.split(' ')[0]
if box == boxname:
syscall('vagrant box remove %s' % boxname)
break
syscall('vagrant box add %s ./%s.box' % (boxname, boxname))
if do_remote:
print 'INFO: going to scp the new box to srvengcm1...this may prompt for a password'
rc = os.system('scp %s.box root@srvengcm1.calpont.com:/Calpont/exports/vagrant_boxes/.' % (boxname)) >> 8
if rc != 0:
raise Exception("scp command failed: %s!" % (rc))
except Exception, exc:
print exc
ret = 1
os.chdir( cwd )
return ret
def main(argv=None):
try:
opts, args = getopt.getopt(sys.argv[1:], "hlrb:dsc", [])
except getopt.GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
do_local = False
do_remote = False
do_destroy = False
do_skip = False
do_check = False
boxname = None
for o, a in opts:
if o == "-h":
usage()
return 0
elif o == "-l":
do_local = True
elif o == "-r":
do_remote = True
elif o == "-b":
boxname = a
elif o == "-d":
do_destroy = True
elif o == "-s":
do_skip = True
elif o == "-c":
do_check = True
else:
assert False, "unhandled option"
boxes = []
if boxname:
if not os.path.isdir(boxname):
print 'ERROR: no subdircetory named %s' % boxname
sys.exit(1)
boxes.append( boxname )
else:
for p in os.listdir('.'):
if os.path.isdir(p):
boxes.append(p)
print 'INFO: going to process boxes %s' % boxes
for box in boxes:
do_box( box, do_local=do_local, do_remote=do_remote, do_destroy=do_destroy, do_skip=do_skip, do_check=do_check )
return 0
if __name__ == "__main__":
sys.exit(main())
|
adieu/allbuttonspressed | refs/heads/master | docutils/writers/manpage.py | 6 | # -*- coding: utf-8 -*-
# $Id: manpage.py 7048 2011-06-04 12:35:19Z grubert $
# Author: Engelbert Gruber <grubert@users.sourceforge.net>
# Copyright: This module is put into the public domain.
"""
Simple man page writer for reStructuredText.
Man pages (short for "manual pages") contain system documentation on unix-like
systems. The pages are grouped in numbered sections:
1 executable programs and shell commands
2 system calls
3 library functions
4 special files
5 file formats
6 games
7 miscellaneous
8 system administration
Man pages are written *troff*, a text file formatting system.
See http://www.tldp.org/HOWTO/Man-Page for a start.
Man pages have no subsection only parts.
Standard parts
NAME ,
SYNOPSIS ,
DESCRIPTION ,
OPTIONS ,
FILES ,
SEE ALSO ,
BUGS ,
and
AUTHOR .
A unix-like system keeps an index of the DESCRIPTIONs, which is accesable
by the command whatis or apropos.
"""
__docformat__ = 'reStructuredText'
import re
import docutils
from docutils import nodes, writers, languages
import roman
FIELD_LIST_INDENT = 7
DEFINITION_LIST_INDENT = 7
OPTION_LIST_INDENT = 7
BLOCKQOUTE_INDENT = 3.5
# Define two macros so man/roff can calculate the
# indent/unindent margins by itself
MACRO_DEF = (r""".
.nr rst2man-indent-level 0
.
.de1 rstReportMargin
\\$1 \\n[an-margin]
level \\n[rst2man-indent-level]
level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
-
\\n[rst2man-indent0]
\\n[rst2man-indent1]
\\n[rst2man-indent2]
..
.de1 INDENT
.\" .rstReportMargin pre:
. RS \\$1
. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
. nr rst2man-indent-level +1
.\" .rstReportMargin post:
..
.de UNINDENT
. RE
.\" indent \\n[an-margin]
.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
.nr rst2man-indent-level -1
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
..
""")
class Writer(writers.Writer):
supported = ('manpage',)
"""Formats this writer supports."""
output = None
"""Final translated form of `document`."""
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = Translator
def translate(self):
visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
self.output = visitor.astext()
class Table:
def __init__(self):
self._rows = []
self._options = ['center']
self._tab_char = '\t'
self._coldefs = []
def new_row(self):
self._rows.append([])
def append_separator(self, separator):
"""Append the separator for table head."""
self._rows.append([separator])
def append_cell(self, cell_lines):
"""cell_lines is an array of lines"""
start = 0
if len(cell_lines) > 0 and cell_lines[0] == '.sp\n':
start = 1
self._rows[-1].append(cell_lines[start:])
if len(self._coldefs) < len(self._rows[-1]):
self._coldefs.append('l')
def _minimize_cell(self, cell_lines):
"""Remove leading and trailing blank and ``.sp`` lines"""
while (cell_lines and cell_lines[0] in ('\n', '.sp\n')):
del cell_lines[0]
while (cell_lines and cell_lines[-1] in ('\n', '.sp\n')):
del cell_lines[-1]
def as_list(self):
text = ['.TS\n']
text.append(' '.join(self._options) + ';\n')
text.append('|%s|.\n' % ('|'.join(self._coldefs)))
for row in self._rows:
# row = array of cells. cell = array of lines.
text.append('_\n') # line above
text.append('T{\n')
for i in range(len(row)):
cell = row[i]
self._minimize_cell(cell)
text.extend(cell)
if not text[-1].endswith('\n'):
text[-1] += '\n'
if i < len(row)-1:
text.append('T}'+self._tab_char+'T{\n')
else:
text.append('T}\n')
text.append('_\n')
text.append('.TE\n')
return text
class Translator(nodes.NodeVisitor):
""""""
words_and_spaces = re.compile(r'\S+| +|\n')
possibly_a_roff_command = re.compile(r'\.\w')
document_start = """Man page generated from reStructeredText."""
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
self.settings = settings = document.settings
lcode = settings.language_code
self.language = languages.get_language(lcode, document.reporter)
self.head = []
self.body = []
self.foot = []
self.section_level = 0
self.context = []
self.topic_class = ''
self.colspecs = []
self.compact_p = 1
self.compact_simple = None
# the list style "*" bullet or "#" numbered
self._list_char = []
# writing the header .TH and .SH NAME is postboned after
# docinfo.
self._docinfo = {
"title" : "", "title_upper": "",
"subtitle" : "",
"manual_section" : "", "manual_group" : "",
"author" : [],
"date" : "",
"copyright" : "",
"version" : "",
}
self._docinfo_keys = [] # a list to keep the sequence as in source.
self._docinfo_names = {} # to get name from text not normalized.
self._in_docinfo = None
self._active_table = None
self._in_literal = False
self.header_written = 0
self._line_block = 0
self.authors = []
self.section_level = 0
self._indent = [0]
# central definition of simple processing rules
# what to output on : visit, depart
# Do not use paragraph requests ``.PP`` because these set indentation.
# use ``.sp``. Remove superfluous ``.sp`` in ``astext``.
#
# Fonts are put on a stack, the top one is used.
# ``.ft P`` or ``\\fP`` pop from stack.
# ``B`` bold, ``I`` italic, ``R`` roman should be available.
# Hopefully ``C`` courier too.
self.defs = {
'indent' : ('.INDENT %.1f\n', '.UNINDENT\n'),
'definition_list_item' : ('.TP', ''),
'field_name' : ('.TP\n.B ', '\n'),
'literal' : ('\\fB', '\\fP'),
'literal_block' : ('.sp\n.nf\n.ft C\n', '\n.ft P\n.fi\n'),
'option_list_item' : ('.TP\n', ''),
'reference' : (r'\fI\%', r'\fP'),
'emphasis': ('\\fI', '\\fP'),
'strong' : ('\\fB', '\\fP'),
'term' : ('\n.B ', '\n'),
'title_reference' : ('\\fI', '\\fP'),
'topic-title' : ('.SS ',),
'sidebar-title' : ('.SS ',),
'problematic' : ('\n.nf\n', '\n.fi\n'),
}
# NOTE do not specify the newline before a dot-command, but ensure
# it is there.
def comment_begin(self, text):
"""Return commented version of the passed text WITHOUT end of
line/comment."""
prefix = '.\\" '
out_text = ''.join(
[(prefix + in_line + '\n')
for in_line in text.split('\n')])
return out_text
def comment(self, text):
"""Return commented version of the passed text."""
return self.comment_begin(text)+'.\n'
def ensure_eol(self):
"""Ensure the last line in body is terminated by new line."""
if self.body[-1][-1] != '\n':
self.body.append('\n')
def astext(self):
"""Return the final formatted document as a string."""
if not self.header_written:
# ensure we get a ".TH" as viewers require it.
self.head.append(self.header())
# filter body
for i in xrange(len(self.body)-1, 0, -1):
# remove superfluous vertical gaps.
if self.body[i] == '.sp\n':
if self.body[i - 1][:4] in ('.BI ','.IP '):
self.body[i] = '.\n'
elif (self.body[i - 1][:3] == '.B ' and
self.body[i - 2][:4] == '.TP\n'):
self.body[i] = '.\n'
elif (self.body[i - 1] == '\n' and
not self.possibly_a_roff_command.match(self.body[i - 2]) and
(self.body[i - 3][:7] == '.TP\n.B '
or self.body[i - 3][:4] == '\n.B ')
):
self.body[i] = '.\n'
return ''.join(self.head + self.body + self.foot)
def deunicode(self, text):
text = text.replace(u'\xa0', '\\ ')
text = text.replace(u'\u2020', '\\(dg')
return text
def visit_Text(self, node):
text = node.astext()
text = text.replace('\\','\\e')
replace_pairs = [
(u'-', ur'\-'),
(u'\'', ur'\(aq'),
(u'´', ur'\''),
(u'`', ur'\(ga'),
]
for (in_char, out_markup) in replace_pairs:
text = text.replace(in_char, out_markup)
# unicode
text = self.deunicode(text)
if self._in_literal:
# prevent interpretation of "." at line start
if text[0] == '.':
text = '\\&' + text
text = text.replace('\n.', '\n\\&.')
self.body.append(text)
def depart_Text(self, node):
pass
def list_start(self, node):
class enum_char:
enum_style = {
'bullet' : '\\(bu',
'emdash' : '\\(em',
}
def __init__(self, style):
self._style = style
if node.has_key('start'):
self._cnt = node['start'] - 1
else:
self._cnt = 0
self._indent = 2
if style == 'arabic':
# indentation depends on number of childrens
# and start value.
self._indent = len(str(len(node.children)))
self._indent += len(str(self._cnt)) + 1
elif style == 'loweralpha':
self._cnt += ord('a') - 1
self._indent = 3
elif style == 'upperalpha':
self._cnt += ord('A') - 1
self._indent = 3
elif style.endswith('roman'):
self._indent = 5
def next(self):
if self._style == 'bullet':
return self.enum_style[self._style]
elif self._style == 'emdash':
return self.enum_style[self._style]
self._cnt += 1
# TODO add prefix postfix
if self._style == 'arabic':
return "%d." % self._cnt
elif self._style in ('loweralpha', 'upperalpha'):
return "%c." % self._cnt
elif self._style.endswith('roman'):
res = roman.toRoman(self._cnt) + '.'
if self._style.startswith('upper'):
return res.upper()
return res.lower()
else:
return "%d." % self._cnt
def get_width(self):
return self._indent
def __repr__(self):
return 'enum_style-%s' % list(self._style)
if node.has_key('enumtype'):
self._list_char.append(enum_char(node['enumtype']))
else:
self._list_char.append(enum_char('bullet'))
if len(self._list_char) > 1:
# indent nested lists
self.indent(self._list_char[-2].get_width())
else:
self.indent(self._list_char[-1].get_width())
def list_end(self):
self.dedent()
self._list_char.pop()
def header(self):
tmpl = (".TH %(title_upper)s %(manual_section)s"
" \"%(date)s\" \"%(version)s\" \"%(manual_group)s\"\n"
".SH NAME\n"
"%(title)s \- %(subtitle)s\n")
return tmpl % self._docinfo
def append_header(self):
"""append header with .TH and .SH NAME"""
# NOTE before everything
# .TH title_upper section date source manual
if self.header_written:
return
self.body.append(self.header())
self.body.append(MACRO_DEF)
self.header_written = 1
def visit_address(self, node):
self.visit_docinfo_item(node, 'address')
def depart_address(self, node):
pass
def visit_admonition(self, node, name=None):
if name:
self.body.append('.IP %s\n' %
self.language.labels.get(name, name))
def depart_admonition(self, node):
self.body.append('.RE\n')
def visit_attention(self, node):
self.visit_admonition(node, 'attention')
depart_attention = depart_admonition
def visit_docinfo_item(self, node, name):
if name == 'author':
self._docinfo[name].append(node.astext())
else:
self._docinfo[name] = node.astext()
self._docinfo_keys.append(name)
raise nodes.SkipNode
def depart_docinfo_item(self, node):
pass
def visit_author(self, node):
self.visit_docinfo_item(node, 'author')
depart_author = depart_docinfo_item
def visit_authors(self, node):
# _author is called anyway.
pass
def depart_authors(self, node):
pass
def visit_block_quote(self, node):
# BUG/HACK: indent alway uses the _last_ indention,
# thus we need two of them.
self.indent(BLOCKQOUTE_INDENT)
self.indent(0)
def depart_block_quote(self, node):
self.dedent()
self.dedent()
def visit_bullet_list(self, node):
self.list_start(node)
def depart_bullet_list(self, node):
self.list_end()
def visit_caption(self, node):
pass
def depart_caption(self, node):
pass
def visit_caution(self, node):
self.visit_admonition(node, 'caution')
depart_caution = depart_admonition
def visit_citation(self, node):
num, text = node.astext().split(None, 1)
num = num.strip()
self.body.append('.IP [%s] 5\n' % num)
def depart_citation(self, node):
pass
def visit_citation_reference(self, node):
self.body.append('['+node.astext()+']')
raise nodes.SkipNode
def visit_classifier(self, node):
pass
def depart_classifier(self, node):
pass
def visit_colspec(self, node):
self.colspecs.append(node)
def depart_colspec(self, node):
pass
def write_colspecs(self):
self.body.append("%s.\n" % ('L '*len(self.colspecs)))
def visit_comment(self, node,
sub=re.compile('-(?=-)').sub):
self.body.append(self.comment(node.astext()))
raise nodes.SkipNode
def visit_contact(self, node):
self.visit_docinfo_item(node, 'contact')
depart_contact = depart_docinfo_item
def visit_container(self, node):
pass
def depart_container(self, node):
pass
def visit_compound(self, node):
pass
def depart_compound(self, node):
pass
def visit_copyright(self, node):
self.visit_docinfo_item(node, 'copyright')
def visit_danger(self, node):
self.visit_admonition(node, 'danger')
depart_danger = depart_admonition
def visit_date(self, node):
self.visit_docinfo_item(node, 'date')
def visit_decoration(self, node):
pass
def depart_decoration(self, node):
pass
def visit_definition(self, node):
pass
def depart_definition(self, node):
pass
def visit_definition_list(self, node):
self.indent(DEFINITION_LIST_INDENT)
def depart_definition_list(self, node):
self.dedent()
def visit_definition_list_item(self, node):
self.body.append(self.defs['definition_list_item'][0])
def depart_definition_list_item(self, node):
self.body.append(self.defs['definition_list_item'][1])
def visit_description(self, node):
pass
def depart_description(self, node):
pass
def visit_docinfo(self, node):
self._in_docinfo = 1
def depart_docinfo(self, node):
self._in_docinfo = None
# NOTE nothing should be written before this
self.append_header()
def visit_doctest_block(self, node):
self.body.append(self.defs['literal_block'][0])
self._in_literal = True
def depart_doctest_block(self, node):
self._in_literal = False
self.body.append(self.defs['literal_block'][1])
def visit_document(self, node):
# no blank line between comment and header.
self.body.append(self.comment(self.document_start).rstrip()+'\n')
# writing header is postboned
self.header_written = 0
def depart_document(self, node):
if self._docinfo['author']:
self.body.append('.SH AUTHOR\n%s\n'
% ', '.join(self._docinfo['author']))
skip = ('author', 'copyright', 'date',
'manual_group', 'manual_section',
'subtitle',
'title', 'title_upper', 'version')
for name in self._docinfo_keys:
if name == 'address':
self.body.append("\n%s:\n%s%s.nf\n%s\n.fi\n%s%s" % (
self.language.labels.get(name, name),
self.defs['indent'][0] % 0,
self.defs['indent'][0] % BLOCKQOUTE_INDENT,
self._docinfo[name],
self.defs['indent'][1],
self.defs['indent'][1]))
elif not name in skip:
if name in self._docinfo_names:
label = self._docinfo_names[name]
else:
label = self.language.labels.get(name, name)
self.body.append("\n%s: %s\n" % (label, self._docinfo[name]))
if self._docinfo['copyright']:
self.body.append('.SH COPYRIGHT\n%s\n'
% self._docinfo['copyright'])
self.body.append(self.comment(
'Generated by docutils manpage writer.\n'))
def visit_emphasis(self, node):
self.body.append(self.defs['emphasis'][0])
def depart_emphasis(self, node):
self.body.append(self.defs['emphasis'][1])
def visit_entry(self, node):
# a cell in a table row
if 'morerows' in node:
self.document.reporter.warning('"table row spanning" not supported',
base_node=node)
if 'morecols' in node:
self.document.reporter.warning(
'"table cell spanning" not supported', base_node=node)
self.context.append(len(self.body))
def depart_entry(self, node):
start = self.context.pop()
self._active_table.append_cell(self.body[start:])
del self.body[start:]
def visit_enumerated_list(self, node):
self.list_start(node)
def depart_enumerated_list(self, node):
self.list_end()
def visit_error(self, node):
self.visit_admonition(node, 'error')
depart_error = depart_admonition
def visit_field(self, node):
pass
def depart_field(self, node):
pass
def visit_field_body(self, node):
if self._in_docinfo:
name_normalized = self._field_name.lower().replace(" ","_")
self._docinfo_names[name_normalized] = self._field_name
self.visit_docinfo_item(node, name_normalized)
raise nodes.SkipNode
def depart_field_body(self, node):
pass
def visit_field_list(self, node):
self.indent(FIELD_LIST_INDENT)
def depart_field_list(self, node):
self.dedent()
def visit_field_name(self, node):
if self._in_docinfo:
self._field_name = node.astext()
raise nodes.SkipNode
else:
self.body.append(self.defs['field_name'][0])
def depart_field_name(self, node):
self.body.append(self.defs['field_name'][1])
def visit_figure(self, node):
self.indent(2.5)
self.indent(0)
def depart_figure(self, node):
self.dedent()
self.dedent()
def visit_footer(self, node):
self.document.reporter.warning('"footer" not supported',
base_node=node)
def depart_footer(self, node):
pass
def visit_footnote(self, node):
num, text = node.astext().split(None, 1)
num = num.strip()
self.body.append('.IP [%s] 5\n' % self.deunicode(num))
def depart_footnote(self, node):
pass
def footnote_backrefs(self, node):
self.document.reporter.warning('"footnote_backrefs" not supported',
base_node=node)
def visit_footnote_reference(self, node):
self.body.append('['+self.deunicode(node.astext())+']')
raise nodes.SkipNode
def depart_footnote_reference(self, node):
pass
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_header(self, node):
raise NotImplementedError, node.astext()
def depart_header(self, node):
pass
def visit_hint(self, node):
self.visit_admonition(node, 'hint')
depart_hint = depart_admonition
def visit_subscript(self, node):
self.body.append('\\s-2\\d')
def depart_subscript(self, node):
self.body.append('\\u\\s0')
def visit_superscript(self, node):
self.body.append('\\s-2\\u')
def depart_superscript(self, node):
self.body.append('\\d\\s0')
def visit_attribution(self, node):
self.body.append('\\(em ')
def depart_attribution(self, node):
self.body.append('\n')
def visit_image(self, node):
self.document.reporter.warning('"image" not supported',
base_node=node)
text = []
if 'alt' in node.attributes:
text.append(node.attributes['alt'])
if 'uri' in node.attributes:
text.append(node.attributes['uri'])
self.body.append('[image: %s]\n' % ('/'.join(text)))
raise nodes.SkipNode
def visit_important(self, node):
self.visit_admonition(node, 'important')
depart_important = depart_admonition
def visit_label(self, node):
# footnote and citation
if (isinstance(node.parent, nodes.footnote)
or isinstance(node.parent, nodes.citation)):
raise nodes.SkipNode
self.document.reporter.warning('"unsupported "label"',
base_node=node)
self.body.append('[')
def depart_label(self, node):
self.body.append(']\n')
def visit_legend(self, node):
pass
def depart_legend(self, node):
pass
# WHAT should we use .INDENT, .UNINDENT ?
def visit_line_block(self, node):
self._line_block += 1
if self._line_block == 1:
# TODO: separate inline blocks from previous paragraphs
# see http://hg.intevation.org/mercurial/crew/rev/9c142ed9c405
# self.body.append('.sp\n')
# but it does not work for me.
self.body.append('.nf\n')
else:
self.body.append('.in +2\n')
def depart_line_block(self, node):
self._line_block -= 1
if self._line_block == 0:
self.body.append('.fi\n')
self.body.append('.sp\n')
else:
self.body.append('.in -2\n')
def visit_line(self, node):
pass
def depart_line(self, node):
self.body.append('\n')
def visit_list_item(self, node):
# man 7 man argues to use ".IP" instead of ".TP"
self.body.append('.IP %s %d\n' % (
self._list_char[-1].next(),
self._list_char[-1].get_width(),))
def depart_list_item(self, node):
pass
def visit_literal(self, node):
self.body.append(self.defs['literal'][0])
def depart_literal(self, node):
self.body.append(self.defs['literal'][1])
def visit_literal_block(self, node):
self.body.append(self.defs['literal_block'][0])
self._in_literal = True
def depart_literal_block(self, node):
self._in_literal = False
self.body.append(self.defs['literal_block'][1])
def visit_math(self, node):
self.document.reporter.warning('"math" role not supported',
base_node=node)
self.visit_literal(node)
def depart_math(self, node):
self.depart_literal(node)
def visit_math_block(self, node):
self.document.reporter.warning('"math" directive not supported',
base_node=node)
self.visit_literal_block(node)
def depart_math_block(self, node):
self.depart_literal_block(node)
def visit_meta(self, node):
raise NotImplementedError, node.astext()
def depart_meta(self, node):
pass
def visit_note(self, node):
self.visit_admonition(node, 'note')
depart_note = depart_admonition
def indent(self, by=0.5):
# if we are in a section ".SH" there already is a .RS
step = self._indent[-1]
self._indent.append(by)
self.body.append(self.defs['indent'][0] % step)
def dedent(self):
self._indent.pop()
self.body.append(self.defs['indent'][1])
def visit_option_list(self, node):
self.indent(OPTION_LIST_INDENT)
def depart_option_list(self, node):
self.dedent()
def visit_option_list_item(self, node):
# one item of the list
self.body.append(self.defs['option_list_item'][0])
def depart_option_list_item(self, node):
self.body.append(self.defs['option_list_item'][1])
def visit_option_group(self, node):
# as one option could have several forms it is a group
# options without parameter bold only, .B, -v
# options with parameter bold italic, .BI, -f file
#
# we do not know if .B or .BI
self.context.append('.B') # blind guess
self.context.append(len(self.body)) # to be able to insert later
self.context.append(0) # option counter
def depart_option_group(self, node):
self.context.pop() # the counter
start_position = self.context.pop()
text = self.body[start_position:]
del self.body[start_position:]
self.body.append('%s%s\n' % (self.context.pop(), ''.join(text)))
def visit_option(self, node):
# each form of the option will be presented separately
if self.context[-1] > 0:
self.body.append(', ')
if self.context[-3] == '.BI':
self.body.append('\\')
self.body.append(' ')
def depart_option(self, node):
self.context[-1] += 1
def visit_option_string(self, node):
# do not know if .B or .BI
pass
def depart_option_string(self, node):
pass
def visit_option_argument(self, node):
self.context[-3] = '.BI' # bold/italic alternate
if node['delimiter'] != ' ':
self.body.append('\\fB%s ' % node['delimiter'])
elif self.body[len(self.body)-1].endswith('='):
# a blank only means no blank in output, just changing font
self.body.append(' ')
else:
# blank backslash blank, switch font then a blank
self.body.append(' \\ ')
def depart_option_argument(self, node):
pass
def visit_organization(self, node):
self.visit_docinfo_item(node, 'organization')
def depart_organization(self, node):
pass
def first_child(self, node):
first = isinstance(node.parent[0], nodes.label) # skip label
for child in node.parent.children[first:]:
if isinstance(child, nodes.Invisible):
continue
if child is node:
return 1
break
return 0
def visit_paragraph(self, node):
# ``.PP`` : Start standard indented paragraph.
# ``.LP`` : Start block paragraph, all except the first.
# ``.P [type]`` : Start paragraph type.
# NOTE dont use paragraph starts because they reset indentation.
# ``.sp`` is only vertical space
self.ensure_eol()
if not self.first_child(node):
self.body.append('.sp\n')
def depart_paragraph(self, node):
self.body.append('\n')
def visit_problematic(self, node):
self.body.append(self.defs['problematic'][0])
def depart_problematic(self, node):
self.body.append(self.defs['problematic'][1])
def visit_raw(self, node):
if node.get('format') == 'manpage':
self.body.append(node.astext() + "\n")
# Keep non-manpage raw text out of output:
raise nodes.SkipNode
def visit_reference(self, node):
"""E.g. link or email address."""
self.body.append(self.defs['reference'][0])
def depart_reference(self, node):
self.body.append(self.defs['reference'][1])
def visit_revision(self, node):
self.visit_docinfo_item(node, 'revision')
depart_revision = depart_docinfo_item
def visit_row(self, node):
self._active_table.new_row()
def depart_row(self, node):
pass
def visit_section(self, node):
self.section_level += 1
def depart_section(self, node):
self.section_level -= 1
def visit_status(self, node):
self.visit_docinfo_item(node, 'status')
depart_status = depart_docinfo_item
def visit_strong(self, node):
self.body.append(self.defs['strong'][0])
def depart_strong(self, node):
self.body.append(self.defs['strong'][1])
def visit_substitution_definition(self, node):
"""Internal only."""
raise nodes.SkipNode
def visit_substitution_reference(self, node):
self.document.reporter.warning('"substitution_reference" not supported',
base_node=node)
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.sidebar):
self.body.append(self.defs['strong'][0])
elif isinstance(node.parent, nodes.document):
self.visit_docinfo_item(node, 'subtitle')
elif isinstance(node.parent, nodes.section):
self.body.append(self.defs['strong'][0])
def depart_subtitle(self, node):
# document subtitle calls SkipNode
self.body.append(self.defs['strong'][1]+'\n.PP\n')
def visit_system_message(self, node):
# TODO add report_level
#if node['level'] < self.document.reporter['writer'].report_level:
# Level is too low to display:
# raise nodes.SkipNode
attr = {}
backref_text = ''
if node.hasattr('id'):
attr['name'] = node['id']
if node.hasattr('line'):
line = ', line %s' % node['line']
else:
line = ''
self.body.append('.IP "System Message: %s/%s (%s:%s)"\n'
% (node['type'], node['level'], node['source'], line))
def depart_system_message(self, node):
pass
def visit_table(self, node):
self._active_table = Table()
def depart_table(self, node):
self.ensure_eol()
self.body.extend(self._active_table.as_list())
self._active_table = None
def visit_target(self, node):
# targets are in-document hyper targets, without any use for man-pages.
raise nodes.SkipNode
def visit_tbody(self, node):
pass
def depart_tbody(self, node):
pass
def visit_term(self, node):
self.body.append(self.defs['term'][0])
def depart_term(self, node):
self.body.append(self.defs['term'][1])
def visit_tgroup(self, node):
pass
def depart_tgroup(self, node):
pass
def visit_thead(self, node):
# MAYBE double line '='
pass
def depart_thead(self, node):
# MAYBE double line '='
pass
def visit_tip(self, node):
self.visit_admonition(node, 'tip')
depart_tip = depart_admonition
def visit_title(self, node):
if isinstance(node.parent, nodes.topic):
self.body.append(self.defs['topic-title'][0])
elif isinstance(node.parent, nodes.sidebar):
self.body.append(self.defs['sidebar-title'][0])
elif isinstance(node.parent, nodes.admonition):
self.body.append('.IP "')
elif self.section_level == 0:
self._docinfo['title'] = node.astext()
# document title for .TH
self._docinfo['title_upper'] = node.astext().upper()
raise nodes.SkipNode
elif self.section_level == 1:
self.body.append('.SH %s\n' % self.deunicode(node.astext().upper()))
raise nodes.SkipNode
else:
self.body.append('.SS ')
def depart_title(self, node):
if isinstance(node.parent, nodes.admonition):
self.body.append('"')
self.body.append('\n')
def visit_title_reference(self, node):
"""inline citation reference"""
self.body.append(self.defs['title_reference'][0])
def depart_title_reference(self, node):
self.body.append(self.defs['title_reference'][1])
def visit_topic(self, node):
pass
def depart_topic(self, node):
pass
def visit_sidebar(self, node):
pass
def depart_sidebar(self, node):
pass
def visit_rubric(self, node):
pass
def depart_rubric(self, node):
pass
def visit_transition(self, node):
# .PP Begin a new paragraph and reset prevailing indent.
# .sp N leaves N lines of blank space.
# .ce centers the next line
self.body.append('\n.sp\n.ce\n----\n')
def depart_transition(self, node):
self.body.append('\n.ce 0\n.sp\n')
def visit_version(self, node):
self.visit_docinfo_item(node, 'version')
def visit_warning(self, node):
self.visit_admonition(node, 'warning')
depart_warning = depart_admonition
def unimplemented_visit(self, node):
raise NotImplementedError('visiting unimplemented node type: %s'
% node.__class__.__name__)
# vim: set fileencoding=utf-8 et ts=4 ai :
|
imranyousuf/project-kappa | refs/heads/master | code/utils/tests/test_pre.py | 10 | # import numpy as np
# import nibabel as nib
# import matplotlib.pyplot as plt
# import os
# import diagnostics as dg
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.