repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
washort/zamboni | refs/heads/master | mkt/site/migrations/0002_auto_20151109_1326.py | 6 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def remove_preload_waffle(apps, schema_editor):
# We can't import the Switch model directly as it may be a newer
# version than this migration expects. We use the historical version.
Switch = apps.get_model('waffle', 'Switch')
Switch.objects.filter(name='preload-apps').delete()
class Migration(migrations.Migration):
dependencies = [
('site', '0001_initial'),
('waffle', '0001_initial'),
]
operations = [
migrations.RunPython(remove_preload_waffle)
]
|
cgstudiomap/cgstudiomap | refs/heads/develop | main/eggs/requests-2.8.1-py2.7.egg/requests/hooks.py | 390 | # -*- coding: utf-8 -*-
"""
requests.hooks
~~~~~~~~~~~~~~
This module provides the capabilities for the Requests hooks system.
Available hooks:
``response``:
The response generated from a Request.
"""
HOOKS = ['response']
def default_hooks():
return dict((event, []) for event in HOOKS)
# TODO: response is the only one
def dispatch_hook(key, hooks, hook_data, **kwargs):
"""Dispatches a hook dictionary on a given piece of data."""
hooks = hooks or dict()
hooks = hooks.get(key)
if hooks:
if hasattr(hooks, '__call__'):
hooks = [hooks]
for hook in hooks:
_hook_data = hook(hook_data, **kwargs)
if _hook_data is not None:
hook_data = _hook_data
return hook_data
|
Rctue/nao-lib | refs/heads/master | gestures/QuestionGesture_1.py | 2 | # Choregraphe bezier export in Python.
from naoqi import ALProxy
names = list()
times = list()
keys = list()
names.append("LShoulderPitch")
times.append([ 0.50000, 1.00000])
keys.append([ [ 1.56300, [ 3, -0.16667, 0.00000], [ 3, 0.16667, 0.00000]], [ 1.39626, [ 3, -0.16667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("LShoulderRoll")
times.append([ 0.50000, 1.00000])
keys.append([ [ 0.18710, [ 3, -0.16667, 0.00000], [ 3, 0.16667, 0.00000]], [ 0.34907, [ 3, -0.16667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("LElbowYaw")
times.append([ 0.50000, 1.00000])
keys.append([ [ -1.15433, [ 3, -0.16667, 0.00000], [ 3, 0.16667, 0.00000]], [ -1.39626, [ 3, -0.16667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("LElbowRoll")
times.append([ 0.50000, 1.00000])
keys.append([ [ -0.59450, [ 3, -0.16667, 0.00000], [ 3, 0.16667, 0.00000]], [ -1.04720, [ 3, -0.16667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("LWristYaw")
times.append([ 0.50000, 1.00000])
keys.append([ [ -1.82387, [ 3, -0.16667, 0.00000], [ 3, 0.16667, 0.00000]], [ 0.00000, [ 3, -0.16667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("LHand")
times.append([ 0.50000, 1.00000])
keys.append([ [ 0.01745, [ 3, -0.16667, 0.00000], [ 3, 0.16667, 0.00000]], [ 0.00000, [ 3, -0.16667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("RShoulderPitch")
times.append([ 0.50000, 1.00000])
keys.append([ [ 1.47629, [ 3, -0.16667, 0.00000], [ 3, 0.16667, 0.00000]], [ 1.39626, [ 3, -0.16667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("RShoulderRoll")
times.append([ 0.50000, 1.00000])
keys.append([ [ -0.11905, [ 3, -0.16667, 0.00000], [ 3, 0.16667, 0.00000]], [ -0.34907, [ 3, -0.16667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("RElbowYaw")
times.append([ 0.50000, 1.00000])
keys.append([ [ 1.18564, [ 3, -0.16667, 0.00000], [ 3, 0.16667, 0.00000]], [ 1.39626, [ 3, -0.16667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("RElbowRoll")
times.append([ 0.50000, 1.00000])
keys.append([ [ 0.43850, [ 3, -0.16667, 0.00000], [ 3, 0.16667, 0.00000]], [ 1.04720, [ 3, -0.16667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("RWristYaw")
times.append([ 0.50000, 1.00000])
keys.append([ [ 1.82387, [ 3, -0.16667, 0.00000], [ 3, 0.16667, 0.00000]], [ -0.00000, [ 3, -0.16667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("RHand")
times.append([ 0.50000, 1.00000])
keys.append([ [ 0.01745, [ 3, -0.16667, 0.00000], [ 3, 0.16667, 0.00000]], [ 0.00000, [ 3, -0.16667, 0.00000], [ 3, 0.00000, 0.00000]]])
try:
# uncomment the following line and modify the IP if you use this script outside Choregraphe.
# motion = ALProxy("ALMotion", IP, 9559)
motion = ALProxy("ALMotion")
motion.angleInterpolationBezier(names, times, keys);
except BaseException, err:
print err
|
beezee/GAE-Django-base-app | refs/heads/master | django/contrib/gis/tests/distapp/__init__.py | 12133432 | |
B-UMMI/INNUca | refs/heads/master | modules/__init__.py | 12133432 | |
theolind/home-assistant | refs/heads/master | tests/util/__init__.py | 12133432 | |
isandlaTech/cohorte-demos | refs/heads/dev | led/dump/led-demo-raspberry/cohorte/dist/cohorte-1.0.0-1.0.0-20141201.234602-19-python-distribution/repo/sleekxmpp/plugins/xep_0221/media.py | 14 | """
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2012 Nathanael C. Fritz, Lance J.T. Stout
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import logging
from sleekxmpp.plugins import BasePlugin
from sleekxmpp.xmlstream import register_stanza_plugin
from sleekxmpp.plugins.xep_0221 import stanza, Media, URI
from sleekxmpp.plugins.xep_0004 import FormField
log = logging.getLogger(__name__)
class XEP_0221(BasePlugin):
name = 'xep_0221'
description = 'XEP-0221: Data Forms Media Element'
dependencies = set(['xep_0004'])
def plugin_init(self):
register_stanza_plugin(FormField, Media)
|
klnprj/testapp | refs/heads/master | django/contrib/messages/constants.py | 630 | DEBUG = 10
INFO = 20
SUCCESS = 25
WARNING = 30
ERROR = 40
DEFAULT_TAGS = {
DEBUG: 'debug',
INFO: 'info',
SUCCESS: 'success',
WARNING: 'warning',
ERROR: 'error',
}
|
yonglong009/pycharmNoteBook | refs/heads/master | lean_python3/pytools/for_excel/ctl_excel.py | 1 | import os
__author__ = 'MaYonglong'
# -*- coding: utf-8 -*-
# http://www.jb51.net/article/60510.htm python 读写excel
# http://www.python-excel.org/ |
nishant8BITS/node-gyp | refs/heads/master | gyp/pylib/gyp/generator/gypd.py | 1824 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypd output module
This module produces gyp input as its output. Output files are given the
.gypd extension to avoid overwriting the .gyp files that they are generated
from. Internal references to .gyp files (such as those found in
"dependencies" sections) are not adjusted to point to .gypd files instead;
unlike other paths, which are relative to the .gyp or .gypd file, such paths
are relative to the directory from which gyp was run to create the .gypd file.
This generator module is intended to be a sample and a debugging aid, hence
the "d" for "debug" in .gypd. It is useful to inspect the results of the
various merges, expansions, and conditional evaluations performed by gyp
and to see a representation of what would be fed to a generator module.
It's not advisable to rename .gypd files produced by this module to .gyp,
because they will have all merges, expansions, and evaluations already
performed and the relevant constructs not present in the output; paths to
dependencies may be wrong; and various sections that do not belong in .gyp
files such as such as "included_files" and "*_excluded" will be present.
Output will also be stripped of comments. This is not intended to be a
general-purpose gyp pretty-printer; for that, you probably just want to
run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip
comments but won't do all of the other things done to this module's output.
The specific formatting of the output generated by this module is subject
to change.
"""
import gyp.common
import errno
import os
import pprint
# These variables should just be spit back out as variable references.
_generator_identity_variables = [
'CONFIGURATION_NAME',
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'LIB_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
'SHARED_LIB_DIR',
'SHARED_LIB_PREFIX',
'SHARED_LIB_SUFFIX',
'STATIC_LIB_PREFIX',
'STATIC_LIB_SUFFIX',
]
# gypd doesn't define a default value for OS like many other generator
# modules. Specify "-D OS=whatever" on the command line to provide a value.
generator_default_variables = {
}
# gypd supports multiple toolsets
generator_supports_multiple_toolsets = True
# TODO(mark): This always uses <, which isn't right. The input module should
# notify the generator to tell it which phase it is operating in, and this
# module should use < for the early phase and then switch to > for the late
# phase. Bonus points for carrying @ back into the output too.
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
output_files = {}
for qualified_target in target_list:
[input_file, target] = \
gyp.common.ParseQualifiedTarget(qualified_target)[0:2]
if input_file[-4:] != '.gyp':
continue
input_file_stem = input_file[:-4]
output_file = input_file_stem + params['options'].suffix + '.gypd'
if not output_file in output_files:
output_files[output_file] = input_file
for output_file, input_file in output_files.iteritems():
output = open(output_file, 'w')
pprint.pprint(data[input_file], output)
output.close()
|
alqfahad/odoo | refs/heads/8.0 | addons/survey_crm/__openerp__.py | 312 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Survey CRM',
'version': '2.0',
'category': 'Marketing',
'complexity': 'easy',
'website': 'https://www.odoo.com/page/survey',
'description': """
Survey - CRM (bridge module)
=================================================================================
This module adds a Survey mass mailing button inside the more option of lead/customers views
""",
'author': 'OpenERP SA',
'depends': ['crm', 'survey'],
'data': [
'crm_view.xml',
],
'installable': True,
'auto_install': True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
BassantMorsi/finderApp | refs/heads/master | lib/python2.7/site-packages/django/contrib/gis/geos/prototypes/__init__.py | 163 | """
This module contains all of the GEOS ctypes function prototypes. Each
prototype handles the interaction between the GEOS library and Python
via ctypes.
"""
from django.contrib.gis.geos.prototypes.coordseq import ( # NOQA
create_cs, cs_clone, cs_getdims, cs_getordinate, cs_getsize, cs_getx,
cs_gety, cs_getz, cs_setordinate, cs_setx, cs_sety, cs_setz, get_cs,
)
from django.contrib.gis.geos.prototypes.geom import ( # NOQA
create_collection, create_empty_polygon, create_linearring,
create_linestring, create_point, create_polygon, destroy_geom, from_hex,
from_wkb, from_wkt, geom_clone, geos_get_srid, geos_normalize,
geos_set_srid, geos_type, geos_typeid, get_dims, get_extring, get_geomn,
get_intring, get_nrings, get_num_coords, get_num_geoms, to_hex, to_wkb,
to_wkt,
)
from django.contrib.gis.geos.prototypes.misc import * # NOQA
from django.contrib.gis.geos.prototypes.predicates import ( # NOQA
geos_contains, geos_covers, geos_crosses, geos_disjoint, geos_equals,
geos_equalsexact, geos_hasz, geos_intersects, geos_isclosed, geos_isempty,
geos_isring, geos_issimple, geos_isvalid, geos_overlaps,
geos_relatepattern, geos_touches, geos_within,
)
from django.contrib.gis.geos.prototypes.topology import * # NOQA
|
peterwilletts24/Monsoon-Python-Scripts | refs/heads/master | pp_load_mean_pickle_multiple_l_s_rain_30181.py | 1 | """
Load multiple pp diagnostic files, aggregate by year, day etc, calcualte mean, sum etc and pickle
"""
import os, sys
import glob
import itertools
import numpy as np
import cPickle as pickle
import iris
import iris.coords as coords
import iris.coord_categorisation
from iris.analysis.interpolate import linear
import cartopy.crs as ccrs
diagnostic = '30181.pp'
flist = glob.glob ('/projects/cascade/pwille/moose_retrievals/*/*/%s' % diagnostic)
for i in flist:
fname = str(i)
l_s_r_rate, t_tot_incr = iris.load_cubes(fname, ['stratiform_rainfall_rate', 'tendency_of_air_temperature'])
experiment_id = fname.split('/')[6]
#iris.coord_categorisation.add_day_of_year(p_at_msl, 'forecast_reference_time', name='dayyear')
# forecast_period messes up aggergation sometimes so remove. Probably need to comment out for time of day
# http://nbviewer.ipython.org/github/SciTools/iris_example_code/blob/master/coord_categorisation.ipynb
# Because some model outputs have time as a 2-D aux coord, as opposed to a 1-D dim coord, the standard iris categorisation by day, year etc throws an error. Add_categorised_coord allows categorisation of 2-dimensional arrays.
# Get year from time coord. Function to use in add_categorised_coord below
#def year_from_time(coord, point):
# yearnp = coord.units.num2date(point).time
# yearpoint=np.zeros(yearnp.shape)
# for index, x in np.ndenumerate(yearnp):
# yearpoint[index] = x.year
# year = iris.coords.AuxCoord(yearpoint)
# return year
#iris.coord_categorisation.add_categorised_coord(p_at_msl, 'year', 'time', year_from_time)
try:
#iris.coord_categorisation.add_year(p_at_msl, 'time', name='year')
model_mean = l_s_r_rate.collapsed('time', iris.analysis.MEAN)
except:
#print p_at_msl
continue
#iris.coord_categorisation.add_month(p_at_msl, 'forecast_reference_time', name='month')
#daily_mean = p_at_msl.aggregated_by(['dayyear'], iris.analysis.MEAN)
#model_mean = p_at_msl.aggregated_by(['year'], iris.analysis.MEAN)
#month_mean = p_at_msl2.aggregated_by(['month'], iris.analysis.MEAN)
#daily_mean_rot = checkpoleposition(daily_mean)
if not os.path.exists(experiment_id): os.makedirs(experiment_id)
# iris.save(daily_mean,'daily_mean_rot%s.pp' % experiment_id)
pickle.dump( model_mean, open( "/home/pwille/python_scripts/%s/pickle_model_mean_collapsed_%s.p" % (experiment_id, experiment_id), "wb" ) )
#pickle.dump( daily_mean, open( "/home/pwille/python_scripts/%s/pickle_daily_mean_%s.p" % (experiment_id, experiment_id), "wb" ) )
#pickle.dump( month_mean, open( "/home/pwille/python_scripts/%s/pickle_month_mean_%s.p" % (experiment_id, experiment_id), "wb" ) )
|
yasoob/PythonRSSReader | refs/heads/master | venv/lib/python2.7/dist-packages/twisted/test/test_defgen.py | 34 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.defer.deferredGenerator} and related APIs.
"""
from __future__ import division, absolute_import
import sys
from twisted.internet import reactor
from twisted.trial import unittest
from twisted.internet.defer import waitForDeferred, deferredGenerator, Deferred
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet import defer
def getThing():
d = Deferred()
reactor.callLater(0, d.callback, "hi")
return d
def getOwie():
d = Deferred()
def CRAP():
d.errback(ZeroDivisionError('OMG'))
reactor.callLater(0, CRAP)
return d
# NOTE: most of the tests in DeferredGeneratorTests are duplicated
# with slightly different syntax for the InlineCallbacksTests below.
class TerminalException(Exception):
pass
class BaseDefgenTests:
"""
This class sets up a bunch of test cases which will test both
deferredGenerator and inlineCallbacks based generators. The subclasses
DeferredGeneratorTests and InlineCallbacksTests each provide the actual
generator implementations tested.
"""
def testBasics(self):
"""
Test that a normal deferredGenerator works. Tests yielding a
deferred which callbacks, as well as a deferred errbacks. Also
ensures returning a final value works.
"""
return self._genBasics().addCallback(self.assertEqual, 'WOOSH')
def testBuggy(self):
"""
Ensure that a buggy generator properly signals a Failure
condition on result deferred.
"""
return self.assertFailure(self._genBuggy(), ZeroDivisionError)
def testNothing(self):
"""Test that a generator which never yields results in None."""
return self._genNothing().addCallback(self.assertEqual, None)
def testHandledTerminalFailure(self):
"""
Create a Deferred Generator which yields a Deferred which fails and
handles the exception which results. Assert that the Deferred
Generator does not errback its Deferred.
"""
return self._genHandledTerminalFailure().addCallback(self.assertEqual, None)
def testHandledTerminalAsyncFailure(self):
"""
Just like testHandledTerminalFailure, only with a Deferred which fires
asynchronously with an error.
"""
d = defer.Deferred()
deferredGeneratorResultDeferred = self._genHandledTerminalAsyncFailure(d)
d.errback(TerminalException("Handled Terminal Failure"))
return deferredGeneratorResultDeferred.addCallback(
self.assertEqual, None)
def testStackUsage(self):
"""
Make sure we don't blow the stack when yielding immediately
available deferreds.
"""
return self._genStackUsage().addCallback(self.assertEqual, 0)
def testStackUsage2(self):
"""
Make sure we don't blow the stack when yielding immediately
available values.
"""
return self._genStackUsage2().addCallback(self.assertEqual, 0)
class DeferredGeneratorTests(BaseDefgenTests, unittest.TestCase):
# First provide all the generator impls necessary for BaseDefgenTests
def _genBasics(self):
x = waitForDeferred(getThing())
yield x
x = x.getResult()
self.assertEqual(x, "hi")
ow = waitForDeferred(getOwie())
yield ow
try:
ow.getResult()
except ZeroDivisionError as e:
self.assertEqual(str(e), 'OMG')
yield "WOOSH"
return
_genBasics = deferredGenerator(_genBasics)
def _genBuggy(self):
yield waitForDeferred(getThing())
1//0
_genBuggy = deferredGenerator(_genBuggy)
def _genNothing(self):
if 0: yield 1
_genNothing = deferredGenerator(_genNothing)
def _genHandledTerminalFailure(self):
x = waitForDeferred(defer.fail(TerminalException("Handled Terminal Failure")))
yield x
try:
x.getResult()
except TerminalException:
pass
_genHandledTerminalFailure = deferredGenerator(_genHandledTerminalFailure)
def _genHandledTerminalAsyncFailure(self, d):
x = waitForDeferred(d)
yield x
try:
x.getResult()
except TerminalException:
pass
_genHandledTerminalAsyncFailure = deferredGenerator(_genHandledTerminalAsyncFailure)
def _genStackUsage(self):
for x in range(5000):
# Test with yielding a deferred
x = waitForDeferred(defer.succeed(1))
yield x
x = x.getResult()
yield 0
_genStackUsage = deferredGenerator(_genStackUsage)
def _genStackUsage2(self):
for x in range(5000):
# Test with yielding a random value
yield 1
yield 0
_genStackUsage2 = deferredGenerator(_genStackUsage2)
# Tests unique to deferredGenerator
def testDeferredYielding(self):
"""
Ensure that yielding a Deferred directly is trapped as an
error.
"""
# See the comment _deferGenerator about d.callback(Deferred).
def _genDeferred():
yield getThing()
_genDeferred = deferredGenerator(_genDeferred)
return self.assertFailure(_genDeferred(), TypeError)
class InlineCallbacksTests(BaseDefgenTests, unittest.TestCase):
# First provide all the generator impls necessary for BaseDefgenTests
def _genBasics(self):
x = yield getThing()
self.assertEqual(x, "hi")
try:
ow = yield getOwie()
except ZeroDivisionError as e:
self.assertEqual(str(e), 'OMG')
returnValue("WOOSH")
_genBasics = inlineCallbacks(_genBasics)
def _genBuggy(self):
yield getThing()
1/0
_genBuggy = inlineCallbacks(_genBuggy)
def _genNothing(self):
if 0: yield 1
_genNothing = inlineCallbacks(_genNothing)
def _genHandledTerminalFailure(self):
try:
x = yield defer.fail(TerminalException("Handled Terminal Failure"))
except TerminalException:
pass
_genHandledTerminalFailure = inlineCallbacks(_genHandledTerminalFailure)
def _genHandledTerminalAsyncFailure(self, d):
try:
x = yield d
except TerminalException:
pass
_genHandledTerminalAsyncFailure = inlineCallbacks(
_genHandledTerminalAsyncFailure)
def _genStackUsage(self):
for x in range(5000):
# Test with yielding a deferred
x = yield defer.succeed(1)
returnValue(0)
_genStackUsage = inlineCallbacks(_genStackUsage)
def _genStackUsage2(self):
for x in range(5000):
# Test with yielding a random value
yield 1
returnValue(0)
_genStackUsage2 = inlineCallbacks(_genStackUsage2)
# Tests unique to inlineCallbacks
def testYieldNonDeferrred(self):
"""
Ensure that yielding a non-deferred passes it back as the
result of the yield expression.
"""
def _test():
x = yield 5
returnValue(5)
_test = inlineCallbacks(_test)
return _test().addCallback(self.assertEqual, 5)
def testReturnNoValue(self):
"""Ensure a standard python return results in a None result."""
def _noReturn():
yield 5
return
_noReturn = inlineCallbacks(_noReturn)
return _noReturn().addCallback(self.assertEqual, None)
def testReturnValue(self):
"""Ensure that returnValue works."""
def _return():
yield 5
returnValue(6)
_return = inlineCallbacks(_return)
return _return().addCallback(self.assertEqual, 6)
def test_nonGeneratorReturn(self):
"""
Ensure that C{TypeError} with a message about L{inlineCallbacks} is
raised when a non-generator returns something other than a generator.
"""
def _noYield():
return 5
_noYield = inlineCallbacks(_noYield)
self.assertIn("inlineCallbacks",
str(self.assertRaises(TypeError, _noYield)))
def test_nonGeneratorReturnValue(self):
"""
Ensure that C{TypeError} with a message about L{inlineCallbacks} is
raised when a non-generator calls L{returnValue}.
"""
def _noYield():
returnValue(5)
_noYield = inlineCallbacks(_noYield)
self.assertIn("inlineCallbacks",
str(self.assertRaises(TypeError, _noYield)))
|
mhugent/Quantum-GIS | refs/heads/master | python/plugins/processing/tests/GdalTest.py | 8 | # -*- coding: utf-8 -*-
"""
***************************************************************************
GdalTest.py
---------------------
Date : April 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'April 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import unittest
from osgeo import gdal
from osgeo.gdalconst import GA_ReadOnly
import processing
from processing.tools import dataobjects
from processing.tools.system import *
from processing.tests.TestData import points, points2, polygons, polygons2, \
lines, union, table, polygonsGeoJson, raster
class GdalTest(unittest.TestCase):
def test_gdalogrsieve(self):
outputs = processing.runalg('gdalogr:sieve', raster(), 2, 0, None)
output = outputs['dst_filename']
self.assertTrue(os.path.isfile(output))
dataset = gdal.Open(output, GA_ReadOnly)
strhash = hash(str(dataset.ReadAsArray(0).tolist()))
self.assertEqual(strhash, -1353696889)
def test_gdalogrsieveWithUnsupportedOutputFormat(self):
outputs = processing.runalg('gdalogr:sieve', raster(), 2, 0,
getTempFilename('img'))
output = outputs['dst_filename']
self.assertTrue(os.path.isfile(output))
dataset = gdal.Open(output, GA_ReadOnly)
strhash = hash(str(dataset.ReadAsArray(0).tolist()))
self.assertEqual(strhash, -1353696889)
def test_gdalogrwarpreproject(self):
outputs = processing.runalg(
'gdalogr:warpreproject',
raster(),
'EPSG:23030',
'EPSG:4326',
0,
0,
'',
None,
)
output = outputs['OUTPUT']
self.assertTrue(os.path.isfile(output))
dataset = gdal.Open(output, GA_ReadOnly)
strhash = hash(str(dataset.ReadAsArray(0).tolist()))
self.assertEqual(strhash, -2021328784)
def test_gdalogrmerge(self):
outputs = processing.runalg('gdalogr:merge', raster(), False, False,
None)
output = outputs['OUTPUT']
self.assertTrue(os.path.isfile(output))
dataset = gdal.Open(output, GA_ReadOnly)
strhash = hash(str(dataset.ReadAsArray(0).tolist()))
self.assertEqual(strhash, -1353696889)
def test_gdalogrogr2ogr(self):
outputs = processing.runalg('gdalogr:ogr2ogr', union(), 3, '', None)
output = outputs['OUTPUT_LAYER']
layer = dataobjects.getObjectFromUri(output, True)
fields = layer.pendingFields()
expectednames = [
'id',
'poly_num_a',
'poly_st_a',
'id_2',
'poly_num_b',
'poly_st_b',
]
expectedtypes = [
'Integer',
'Real',
'String',
'Integer',
'Real',
'String',
]
names = [str(f.name()) for f in fields]
types = [str(f.typeName()) for f in fields]
self.assertEqual(expectednames, names)
self.assertEqual(expectedtypes, types)
features = processing.features(layer)
self.assertEqual(8, len(features))
feature = features.next()
attrs = feature.attributes()
expectedvalues = [
'1',
'1.1',
'string a',
'2',
'1',
'string a',
]
values = [str(attr) for attr in attrs]
self.assertEqual(expectedvalues, values)
wkt = 'POLYGON((270807.08580285 4458940.1594565,270798.42294527 4458914.62661676,270780.81854858 4458914.21983449,270763.52289518 4458920.715993,270760.3449542 4458926.6570575,270763.78234766 4458958.22561242,270794.30290024 4458942.16424502,270807.08580285 4458940.1594565))'
self.assertEqual(wkt, str(feature.geometry().exportToWkt()))
def test_gdalogrogr2ogrWrongExtension(self):
outputs = processing.runalg('gdalogr:ogr2ogr', union(), 3, '',
getTempFilename('wrongext'))
output = outputs['OUTPUT_LAYER']
layer = dataobjects.getObjectFromUri(output, True)
fields = layer.pendingFields()
expectednames = [
'id',
'poly_num_a',
'poly_st_a',
'id_2',
'poly_num_b',
'poly_st_b',
]
expectedtypes = [
'Integer',
'Real',
'String',
'Integer',
'Real',
'String',
]
names = [str(f.name()) for f in fields]
types = [str(f.typeName()) for f in fields]
self.assertEqual(expectednames, names)
self.assertEqual(expectedtypes, types)
features = processing.features(layer)
self.assertEqual(8, len(features))
feature = features.next()
attrs = feature.attributes()
expectedvalues = [
'1',
'1.1',
'string a',
'2',
'1',
'string a',
]
values = [str(attr) for attr in attrs]
self.assertEqual(expectedvalues, values)
wkt = 'POLYGON((270807.08580285 4458940.1594565,270798.42294527 4458914.62661676,270780.81854858 4458914.21983449,270763.52289518 4458920.715993,270760.3449542 4458926.6570575,270763.78234766 4458958.22561242,270794.30290024 4458942.16424502,270807.08580285 4458940.1594565))'
self.assertEqual(wkt, str(feature.geometry().exportToWkt()))
def suite():
suite = unittest.makeSuite(GdalTest, 'test')
return suite
def runtests():
result = unittest.TestResult()
testsuite = suite()
testsuite.run(result)
return result
|
twalpole/selenium | refs/heads/master | py/test/selenium/webdriver/common/click_tests.py | 39 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
@pytest.fixture(autouse=True)
def loadPage(pages):
pages.load("clicks.html")
def testCanClickOnALinkThatOverflowsAndFollowIt(driver):
driver.find_element(By.ID, "overflowLink").click()
WebDriverWait(driver, 3).until(EC.title_is("XHTML Test Page"))
def testClickingALinkMadeUpOfNumbersIsHandledCorrectly(driver):
driver.find_element(By.LINK_TEXT, "333333").click()
WebDriverWait(driver, 3).until(EC.title_is("XHTML Test Page"))
|
smilusingjavascript/blink | refs/heads/master | LayoutTests/http/tests/media/resources/media-source/generate-config-change-tests.py | 55 | #!/usr/bin/python
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This is a script that generates the content and HTML files for Media Source
codec config change LayoutTests.
"""
import json
import os
DURATION = 2
MEDIA_FORMATS = ['webm', 'mp4']
ENCODE_SETTINGS = [
## Video-only files
# Frame rate changes
{'fs': '320x240', 'fr': 24, 'kfr': 8, 'c': '#ff0000', 'vbr': 128, 'abr': 0, 'asr': 0, 'ach': 0, 'afreq': 0},
{'fs': '320x240', 'fr': 30, 'kfr': 10, 'c': '#ff0000', 'vbr': 128, 'abr': 0, 'asr': 0, 'ach': 0, 'afreq': 0},
# Frame size change
{'fs': '640x480', 'fr': 30, 'kfr': 10, 'c': '#00ff00', 'vbr': 128, 'abr': 0, 'asr': 0, 'ach': 0, 'afreq': 0},
# Bitrate change
{'fs': '320x240', 'fr': 30, 'kfr': 10, 'c': '#ff00ff', 'vbr': 256, 'abr': 0, 'asr': 0, 'ach': 0, 'afreq': 0},
## Audio-only files
# Bitrate/Codebook changes
{'fs': '0x0', 'fr': 0, 'kfr': 0, 'c': '#000000', 'vbr': 0, 'abr': 128, 'asr': 44100, 'ach': 1, 'afreq': 2000},
{'fs': '0x0', 'fr': 0, 'kfr': 0, 'c': '#000000', 'vbr': 0, 'abr': 192, 'asr': 44100, 'ach': 1, 'afreq': 4000},
## Audio-Video files
# Frame size change.
{'fs': '320x240', 'fr': 30, 'kfr': 10, 'c': '#ff0000', 'vbr': 256, 'abr': 128, 'asr': 44100, 'ach': 1, 'afreq': 2000},
{'fs': '640x480', 'fr': 30, 'kfr': 10, 'c': '#00ff00', 'vbr': 256, 'abr': 128, 'asr': 44100, 'ach': 1, 'afreq': 2000},
# Audio bitrate change.
{'fs': '640x480', 'fr': 30, 'kfr': 10, 'c': '#00ff00', 'vbr': 256, 'abr': 192, 'asr': 44100, 'ach': 1, 'afreq': 4000},
# Video bitrate change.
{'fs': '640x480', 'fr': 30, 'kfr': 10, 'c': '#00ffff', 'vbr': 512, 'abr': 128, 'asr': 44100, 'ach': 1, 'afreq': 2000},
]
CONFIG_CHANGE_TESTS = [
["v-framerate", 0, 1, "Tests %s video-only frame rate changes."],
["v-framesize", 1, 2, "Tests %s video-only frame size changes."],
["v-bitrate", 1, 3, "Tests %s video-only bitrate changes."],
["a-bitrate", 4, 5, "Tests %s audio-only bitrate changes."],
["av-framesize", 6, 7, "Tests %s frame size changes in multiplexed content."],
["av-audio-bitrate", 7, 8, "Tests %s audio bitrate changes in multiplexed content."],
["av-video-bitrate", 7, 9, "Tests %s video bitrate changes in multiplexed content."]
]
CODEC_INFO = {
"mp4": {"audio": "mp4a.40.2", "video": "avc1.4D4001"},
"webm": {"audio": "vorbis", "video": "vp8"}
}
HTML_TEMPLATE = """<!DOCTYPE html>
<html>
<head>
<script src="/w3c/resources/testharness.js"></script>
<script src="/w3c/resources/testharnessreport.js"></script>
<script src="mediasource-util.js"></script>
<script src="mediasource-config-changes.js"></script>
<link rel="stylesheet" href="/w3c/resources/testharness.css">
</head>
<body>
<div id="log"></div>
<script>
mediaSourceConfigChangeTest("%(media_format)s", "%(idA)s", "%(idB)s", "%(description)s");
</script>
</body>
</html>
"""
def run(cmd_line):
os.system(" ".join(cmd_line))
def generate_manifest(filename, media_filename, media_format, has_audio, has_video):
major_type = "audio"
if has_video:
major_type = "video"
codecs = []
if has_video:
codecs.append(CODEC_INFO[media_format]["video"])
if has_audio:
codecs.append(CODEC_INFO[media_format]["audio"])
mimetype = "%s/%s;codecs=\"%s\"" % (major_type, media_format, ",".join(codecs))
manifest = { 'url': media_filename, 'type': mimetype}
f = open(filename, "wb")
f.write(json.dumps(manifest, indent=4, separators=(',', ': ')))
f.close()
def generate_test_html(media_format, config_change_tests, encoding_ids):
for test_info in config_change_tests:
filename = "../../media-source/mediasource-config-change-%s-%s.html" % (media_format, test_info[0])
html = HTML_TEMPLATE % {'media_format': media_format,
'idA': encoding_ids[test_info[1]],
'idB': encoding_ids[test_info[2]],
'description': test_info[3] % (media_format)}
f = open(filename, "wb")
f.write(html)
f.close()
def main():
encoding_ids = []
for media_format in MEDIA_FORMATS:
run(["mkdir ", media_format])
for settings in ENCODE_SETTINGS:
video_bitrate = settings['vbr']
has_video = (video_bitrate > 0)
audio_bitrate = settings['abr']
has_audio = (audio_bitrate > 0)
bitrate = video_bitrate + audio_bitrate
frame_size = settings['fs']
frame_rate = settings['fr']
keyframe_rate = settings['kfr']
color = settings['c']
sample_rate = settings['asr']
channels = settings['ach']
frequency = settings['afreq']
cmdline = ["ffmpeg", "-y"]
id_prefix = ""
id_params = ""
if has_audio:
id_prefix += "a"
id_params += "-%sHz-%sch" % (sample_rate, channels)
channel_layout = "FC"
sin_func = "sin(%s*2*PI*t)" % frequency
func = sin_func
if channels == 2:
channel_layout += "|BC"
func += "|" + sin_func
cmdline += ["-f", "lavfi", "-i", "aevalsrc=\"%s:s=%s:c=%s:d=%s\"" % (func, sample_rate, channel_layout, DURATION)]
if has_video:
id_prefix += "v"
id_params += "-%s-%sfps-%skfr" % (frame_size, frame_rate, keyframe_rate)
cmdline += ["-f", "lavfi", "-i", "color=%s:duration=%s:size=%s:rate=%s" % (color, DURATION, frame_size, frame_rate)]
if has_audio:
cmdline += ["-b:a", "%sk" % audio_bitrate]
if has_video:
cmdline += ["-b:v", "%sk" % video_bitrate]
cmdline += ["-keyint_min", "%s" % keyframe_rate]
cmdline += ["-g", "%s" % keyframe_rate]
textOverlayInfo = "'drawtext=fontfile=Mono:fontsize=32:text=Time\\\\:\\\\ %{pts}"
textOverlayInfo += ",drawtext=fontfile=Mono:fontsize=32:y=32:text=Size\\\\:\\\\ %s" % (frame_size)
textOverlayInfo += ",drawtext=fontfile=Mono:fontsize=32:y=64:text=Bitrate\\\\:\\\\ %s" % (bitrate)
textOverlayInfo += ",drawtext=fontfile=Mono:fontsize=32:y=96:text=FrameRate\\\\:\\\\ %s" % (frame_rate)
textOverlayInfo += ",drawtext=fontfile=Mono:fontsize=32:y=128:text=KeyFrameRate\\\\:\\\\ %s" % (keyframe_rate)
if has_audio:
textOverlayInfo += ",drawtext=fontfile=Mono:fontsize=32:y=160:text=SampleRate\\\\:\\\\ %s" % (sample_rate)
textOverlayInfo += ",drawtext=fontfile=Mono:fontsize=32:y=192:text=Channels\\\\:\\\\ %s" % (channels)
textOverlayInfo += "'"
cmdline += ["-vf", textOverlayInfo]
encoding_id = "%s-%sk%s" % (id_prefix, bitrate, id_params)
if len(encoding_ids) < len(ENCODE_SETTINGS):
encoding_ids.append(encoding_id)
filename_base = "%s/test-%s" % (media_format, encoding_id)
media_filename = filename_base + "." + media_format
manifest_filename = filename_base + "-manifest.json"
cmdline.append(media_filename)
run(cmdline)
# Remux file so it conforms to MSE bytestream requirements.
if media_format == "webm":
tmp_filename = media_filename + ".tmp"
run(["mse_webm_remuxer", media_filename, tmp_filename])
run(["mv", tmp_filename, media_filename])
elif media_format == "mp4":
run(["MP4Box", "-dash", "250", "-rap", media_filename])
run(["mv", filename_base + "_dash.mp4", media_filename])
run(["rm", filename_base + "_dash.mpd"])
generate_manifest(manifest_filename, media_filename, media_format, has_audio, has_video)
generate_test_html(media_format, CONFIG_CHANGE_TESTS, encoding_ids)
if '__main__' == __name__:
main()
|
EthereumWebhooks/blockhooks | refs/heads/master | lib/ethereum/tests/test_transactions.py | 2 | import ethereum.transactions as transactions
import ethereum.utils as utils
import rlp
from rlp.utils import decode_hex, encode_hex, str_to_bytes
import ethereum.testutils as testutils
from ethereum.testutils import fixture_to_bytes
import ethereum.config as config
import sys
import json
from ethereum.slogging import get_logger
logger = get_logger()
# customize VM log output to your needs
# hint: use 'py.test' with the '-s' option to dump logs to the console
# configure_logging(':trace')
encode_hex('')
def test_transaction(filename, testname, testdata):
testdata = fixture_to_bytes(testdata)
try:
rlpdata = decode_hex(testdata["rlp"][2:])
o = {}
tx = rlp.decode(rlpdata, transactions.Transaction)
blknum = int(testdata["blocknumber"])
if blknum >= config.default_config["HOMESTEAD_FORK_BLKNUM"]:
tx.check_low_s()
o["sender"] = tx.sender
o["transaction"] = {
"data": b'0x' * (len(tx.data) > 0) + encode_hex(tx.data),
"gasLimit": str_to_bytes(str(tx.startgas)),
"gasPrice": str_to_bytes(str(tx.gasprice)),
"nonce": str_to_bytes(str(tx.nonce)),
"r": b'0x' + encode_hex(utils.zpad(utils.int_to_big_endian(tx.r), 32)),
"s": b'0x' + encode_hex(utils.zpad(utils.int_to_big_endian(tx.s), 32)),
"v": str_to_bytes(str(tx.v)),
"value": str_to_bytes(str(tx.value)),
"to": encode_hex(tx.to),
}
except Exception as e:
tx = None
sys.stderr.write(str(e))
if 'transaction' not in testdata: # expected to fail
assert tx is None
else:
assert set(o['transaction'].keys()) == set(testdata.get("transaction", dict()).keys())
o.get("transaction", None) == testdata.get("transaction", None)
assert encode_hex(o.get("sender", '')) == testdata.get("sender", '')
def pytest_generate_tests(metafunc):
testutils.generate_test_params('TransactionTests', metafunc)
def main():
if len(sys.argv) == 1:
# read fixture from stdin
fixtures = {'stdin': json.load(sys.stdin)}
else:
# load fixtures from specified file or dir
fixtures = testutils.get_tests_from_file_or_dir(sys.argv[1])
for filename, tests in list(fixtures.items()):
for testname, testdata in list(tests.items()):
if len(sys.argv) < 3 or testname == sys.argv[2]:
print("Testing: %s %s" % (filename, testname))
# testutils.check_state_test(testdata)
test_transaction(filename, testname, testdata)
if __name__ == '__main__':
main()
|
allenlavoie/tensorflow | refs/heads/master | tensorflow/python/ops/nn_batchnorm_test.py | 3 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for batch_norm related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
@test_util.with_c_api
class BatchNormalizationTest(test.TestCase):
def _npBatchNorm(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization, shift_after_normalization):
y = (x - m) / np.sqrt(v + epsilon)
y = y * gamma if scale_after_normalization else y
return y + beta if shift_after_normalization else y
def _opsBatchNorm(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization, shift_after_normalization):
y = (x - m) * math_ops.rsqrt(v + epsilon)
if scale_after_normalization:
y = gamma * y
return y + beta if shift_after_normalization else y
def _tfBatchNormV1(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization):
"""Original implementation."""
test_util.set_producer_version(ops.get_default_graph(), 8)
return gen_nn_ops._batch_norm_with_global_normalization(
x, m, v, beta, gamma, epsilon, scale_after_normalization)
def _tfBatchNormV1BW(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization):
"""Re-implementation of the original kernel for backward compatibility."""
return nn_impl.batch_norm_with_global_normalization(
x, m, v, beta, gamma, epsilon, scale_after_normalization)
def _tfBatchNormV2(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization, shift_after_normalization):
"""New implementation."""
return nn_impl.batch_normalization(x, m, v, beta if
shift_after_normalization else None,
gamma if scale_after_normalization else
None, epsilon)
def testBatchNorm(self):
x_shape = [3, 5, 4, 2]
param_shape = [2]
x_val = np.random.random_sample(x_shape).astype(np.float32)
m_val = np.random.random_sample(param_shape).astype(np.float32)
v_val = np.random.random_sample(param_shape).astype(np.float32)
beta_val = np.random.random_sample(param_shape).astype(np.float32)
gamma_val = np.random.random_sample(param_shape).astype(np.float32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name="x")
m = constant_op.constant(m_val, name="m")
v = constant_op.constant(v_val, name="v")
beta = constant_op.constant(beta_val, name="beta")
gamma = constant_op.constant(gamma_val, name="gamma")
epsilon = 0.001
for scale_after_normalization in [True, False]:
for shift_after_normalization in [True, False]:
bn2 = self._tfBatchNormV2(x, m, v, beta, gamma, epsilon,
scale_after_normalization,
shift_after_normalization)
bn1bw = self._tfBatchNormV1BW(x, m, v, beta, gamma, epsilon,
scale_after_normalization)
bn1 = self._tfBatchNormV1(x, m, v, beta, gamma, epsilon,
scale_after_normalization)
on = self._opsBatchNorm(x, m, v, beta, gamma, epsilon,
scale_after_normalization,
shift_after_normalization)
np_bn = self._npBatchNorm(x_val, m_val, v_val, beta_val, gamma_val,
epsilon, scale_after_normalization,
shift_after_normalization)
tf_bn_v2, tf_bn_v1bw, tf_bn_v1, ops_bn = sess.run(
[bn2, bn1bw, bn1, on])
self.assertAllClose(np_bn, ops_bn, atol=0.00001)
self.assertAllClose(np_bn, tf_bn_v2, atol=0.00001)
self.assertAllClose(tf_bn_v2, ops_bn, atol=0.00001)
# shift_after_normalization=False is not supported in v1.
if shift_after_normalization:
self.assertAllClose(np_bn, tf_bn_v1bw, atol=0.00001)
self.assertAllClose(np_bn, tf_bn_v1, atol=0.00001)
self.assertAllClose(tf_bn_v1, ops_bn, atol=0.00001)
self.assertAllClose(tf_bn_v1bw, ops_bn, atol=0.00001)
def _testBatchNormGradient(self,
param_index,
tag,
scale_after_normalization,
shift_after_normalization,
version,
err_tolerance=1e-11):
x_shape = [3, 5, 4, 5]
param_shape = [5]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float64)
m_val = np.random.random_sample(param_shape).astype(np.float64)
v_val = np.random.random_sample(param_shape).astype(np.float64)
beta_val = np.random.random_sample(param_shape).astype(np.float64)
gamma_val = np.random.random_sample(param_shape).astype(np.float64)
with self.test_session():
x = constant_op.constant(x_val, name="x")
m = constant_op.constant(m_val, name="m")
v = constant_op.constant(v_val, name="v")
beta = constant_op.constant(beta_val, name="beta")
gamma = constant_op.constant(gamma_val, name="gamma")
epsilon = 0.001
if version == 1:
output = self._tfBatchNormV1(x, m, v, beta, gamma, epsilon,
scale_after_normalization)
elif version == 2:
output = self._tfBatchNormV2(x, m, v, beta, gamma, epsilon,
scale_after_normalization,
shift_after_normalization)
else:
print("Invalid version", version)
raise ValueError()
all_params = [x, m, v, beta, gamma]
all_shapes = [x_shape, param_shape, param_shape, param_shape, param_shape]
err = gradient_checker.compute_gradient_error(all_params[param_index],
all_shapes[param_index],
output, x_shape)
print("Batch normalization v%d %s gradient %s scale and %s shift err = " %
(version, tag, "with" if scale_after_normalization else "without",
"with" if shift_after_normalization else "without"), err)
self.assertLess(err, err_tolerance)
def _testBatchNormGradientInAllNeedConfigs(self,
param_index,
tag,
err_tolerance=1e-11):
for scale_after_normalization in [True, False]:
for shift_after_normalization in [True, False]:
# shift_after_normalization=False is not supported in version 1.
for v in ([1, 2] if shift_after_normalization else [2]):
self._testBatchNormGradient(param_index, tag,
scale_after_normalization,
shift_after_normalization, v,
err_tolerance)
def testBatchNormInputGradient(self):
self._testBatchNormGradientInAllNeedConfigs(0, "x")
def testBatchNormMeanGradient(self):
self._testBatchNormGradientInAllNeedConfigs(1, "mean")
def testBatchNormVarianceGradient(self):
self._testBatchNormGradientInAllNeedConfigs(
2, "variance", err_tolerance=1e-03)
def testBatchNormBetaGradient(self):
# Since beta does not exist when scale_after_normalization=False, we only
# test for scale_after_normalization=True.
for scale_after_normalization in [True, False]:
for v in [1, 2]:
self._testBatchNormGradient(3, "beta", scale_after_normalization, True,
v)
def testBatchNormGammaGradient(self):
# If scale_after_normalization is False, backprop for gamma in v1
# will be 0. In version 2 of the API, if scale_after_normalization is False,
# gamma is not used at all, and the gradient is None, which displeases the
# gradient checker.
for scale_after_normalization in [True, False]:
self._testBatchNormGradient(4, "gamma", scale_after_normalization, True,
1)
for shift_after_normalization in [True, False]:
self._testBatchNormGradient(4, "gamma", True, shift_after_normalization,
2)
def testBatchNormGradImpl(self):
x_shape = [7, 5, 4, 6]
param_shape = [6]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float32)
m_val = np.random.random_sample(param_shape).astype(np.float32)
v_val = np.random.random_sample(param_shape).astype(np.float32)
beta_val = np.random.random_sample(param_shape).astype(np.float32)
gamma_val = np.random.random_sample(param_shape).astype(np.float32)
backprop_val = np.random.random_sample(x_shape).astype(np.float32)
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name="x")
m = constant_op.constant(m_val, name="m")
v = constant_op.constant(v_val, name="v")
beta = constant_op.constant(beta_val, name="beta")
gamma = constant_op.constant(gamma_val, name="gamma")
backprop = constant_op.constant(backprop_val, name="backprop")
epsilon = 0.001
for scale_after_normalization in [True, False]:
# _batch_norm_with_global_normalization_grad is deprecated in v9
test_util.set_producer_version(ops.get_default_graph(), 8)
grad = gen_nn_ops.batch_norm_with_global_normalization_grad(
x, m, v, gamma, backprop, epsilon, scale_after_normalization)
dx, dm, dv, db, dg = grad
self.assertEqual(grad.dx, dx)
self.assertEqual(grad.dm, dm)
self.assertEqual(grad.dv, dv)
self.assertEqual(grad.db, db)
self.assertEqual(grad.dg, dg)
on = self._opsBatchNorm(x, m, v, beta, gamma, epsilon,
scale_after_normalization, True)
odx, odm, odv, odb, odg = gradients_impl.gradients(
[on], [x, m, v, beta, gamma], [backprop])
if scale_after_normalization:
all_grads = sess.run([dx, dm, dv, db, dg, odx, odm, odv, odb, odg])
to_check = ["dx", "dm", "dv", "db", "dg"]
else:
all_grads = sess.run([dx, dm, dv, db, odx, odm, odv, odb])
to_check = ["dx", "dm", "dv", "db"]
for i, _ in enumerate(to_check):
self.assertAllClose(
all_grads[i + len(to_check)], all_grads[i], atol=0.000001)
def testBatchNormKeepDims(self):
"""Test for tf.nn.moments(..., keep_dims=True / False).
Make sure that parameters with shape (1, 1, 1, depth) yield the same
result as parameters with shape (depth)
"""
x_shape = (3, 5, 4, 2)
param_shape = (2)
keep_dims_param_shape = (1, 1, 1, 2)
x_val = np.random.random_sample(x_shape).astype(np.float32)
m_val = np.random.random_sample(param_shape).astype(np.float32)
v_val = np.random.random_sample(param_shape).astype(np.float32)
beta_val = np.random.random_sample(param_shape).astype(np.float32)
gamma_val = np.random.random_sample(param_shape).astype(np.float32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name="x")
m = constant_op.constant(m_val, name="m")
v = constant_op.constant(v_val, name="v")
beta = constant_op.constant(beta_val, name="beta")
gamma = constant_op.constant(gamma_val, name="gamma")
keep_dims_m = array_ops.reshape(
m, keep_dims_param_shape, name="keep_dims_m")
keep_dims_v = array_ops.reshape(
v, keep_dims_param_shape, name="keep_dims_v")
keep_dims_beta = array_ops.reshape(
beta, keep_dims_param_shape, name="keep_dims_beta")
keep_dims_gamma = array_ops.reshape(
gamma, keep_dims_param_shape, name="keep_dims_gamma")
epsilon = 0.001
for scale_after_normalization in [True, False]:
for shift_after_normalization in [True, False]:
bn = self._tfBatchNormV2(x, m, v, beta, gamma, epsilon,
scale_after_normalization,
shift_after_normalization)
keep_dims_bn = self._tfBatchNormV2(x, keep_dims_m, keep_dims_v,
keep_dims_beta, keep_dims_gamma,
epsilon,
scale_after_normalization,
shift_after_normalization)
tf_batch_norm, keep_dims_tf_batch_norm = sess.run(
[bn, keep_dims_bn])
self.assertEquals(x_shape, tf_batch_norm.shape)
self.assertEquals(x_shape, keep_dims_tf_batch_norm.shape)
self.assertAllClose(
tf_batch_norm, keep_dims_tf_batch_norm, atol=0.000001)
def _testBatchNormArbitraryShapes(self, x_shape, param_shape, atol=0.0001):
x_val = np.random.random_sample(x_shape).astype(np.float32)
m_val = np.random.random_sample(param_shape).astype(np.float32)
v_val = np.random.random_sample(param_shape).astype(np.float32)
beta_val = np.random.random_sample(param_shape).astype(np.float32)
gamma_val = np.random.random_sample(param_shape).astype(np.float32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name="x")
m = constant_op.constant(m_val, name="m")
v = constant_op.constant(v_val, name="v")
beta = constant_op.constant(beta_val, name="beta")
gamma = constant_op.constant(gamma_val, name="gamma")
epsilon = 0.001
for scale_after_normalization in [True, False]:
for shift_after_normalization in [True, False]:
bn = self._tfBatchNormV2(x, m, v, beta, gamma, epsilon,
scale_after_normalization,
shift_after_normalization)
np_batch_norm = self._npBatchNorm(x_val, m_val, v_val, beta_val,
gamma_val, epsilon,
scale_after_normalization,
shift_after_normalization)
[tf_batch_norm] = sess.run([bn])
self.assertEquals(x_shape, np_batch_norm.shape)
self.assertEquals(x_shape, tf_batch_norm.shape)
self.assertAllClose(np_batch_norm, tf_batch_norm, atol=atol)
def testBatchNormArbitraryShapes(self):
"""Test for a variety of shapes and moments.
Batch normalization is expected to work regardless of the position and
dimensionality of the 'depth' axis/axes.
"""
self._testBatchNormArbitraryShapes((3, 3), (1, 3))
self._testBatchNormArbitraryShapes((3, 3), (3, 1))
self._testBatchNormArbitraryShapes((3, 2, 4, 5), (1, 2, 1, 1))
self._testBatchNormArbitraryShapes(
(2, 3, 2, 4, 5), (1, 1, 1, 4, 5), atol=0.005)
@test_util.with_c_api
class SufficientStatisticsTest(test.TestCase):
def _npSuffStats(self, x, axes, shift, keep_dims):
axis = tuple(axes)
if shift is not None:
m_ss = np.sum(x - shift, axis=axis, keepdims=keep_dims)
v_ss = np.sum((x - shift) * (x - shift), axis=axis, keepdims=keep_dims)
else:
m_ss = np.sum(x, axis=axis, keepdims=keep_dims)
v_ss = np.sum(x * x, axis=axis, keepdims=keep_dims)
count = 1.0
for d in xrange(x.ndim):
if d in set(axes):
count *= x.shape[d]
if not keep_dims:
shift = np.squeeze(shift, axis=axis)
return count, m_ss, v_ss, shift
def _opSuffStats(self, x, axes, shift, keep_dims):
return nn_impl.sufficient_statistics(x, axes, shift, keep_dims)
def _testSuffStats(self, x_shape, axes, shift, keep_dims, has_shape):
x_val = np.random.random_sample(x_shape).astype(np.float32)
np_c, np_m, np_v, np_s = self._npSuffStats(x_val, axes, shift, keep_dims)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
if has_shape:
x = constant_op.constant(x_val, name="x")
x.set_shape(x_shape)
op_c, op_m, op_v, op_s = self._opSuffStats(x, axes, shift, keep_dims)
if shift:
tf_c, tf_m, tf_v, tf_s = sess.run([op_c, op_m, op_v, op_s])
else:
tf_c, tf_m, tf_v = sess.run([op_c, op_m, op_v])
else:
x = array_ops.placeholder(
dtype=dtypes.float32, shape=[None] * len(x_shape), name="x")
op_c, op_m, op_v, op_s = self._opSuffStats(x, axes, shift, keep_dims)
if shift:
tf_c, tf_m, tf_v, tf_s = sess.run([op_c, op_m, op_v, op_s],
feed_dict={x: x_val})
else:
tf_c, tf_m, tf_v = sess.run([op_c, op_m, op_v],
feed_dict={x: x_val})
self.assertAllClose(np_c, tf_c, atol=0.000001)
self.assertAllClose(np_m, tf_m, atol=0.000001)
self.assertAllClose(np_v, tf_v, atol=0.000001)
if shift:
self.assertAllClose(np_s, tf_s, atol=0.000001)
def testSuffStats(self):
for has_shape in [True, False]:
for keep_dims in [True, False]:
for shift in [None, 1.0]:
self._testSuffStats([2, 3], [1], shift, keep_dims, has_shape)
self._testSuffStats([2, 3], [0], shift, keep_dims, has_shape)
self._testSuffStats([1, 2, 3], [0, 2], shift, keep_dims, has_shape)
@test_util.with_c_api
class NormalizeMomentsTest(test.TestCase):
def _npNormalizeMoments(self, counts, mean_ss, variance_ss, shift):
mean = mean_ss / counts
variance = variance_ss / counts - mean * mean
if shift is not None:
mean += shift
return mean, variance
def _opNormalizeMoments(self, counts, mean_ss, variance_ss, shift):
return nn_impl.normalize_moments(counts, mean_ss, variance_ss, shift)
def _testNormalizeMoments(self, shape, shift):
counts = np.ones([1]).astype(np.float32)
mean_ss = np.random.random_sample(shape).astype(np.float32)
variance_ss = np.random.random_sample(shape).astype(np.float32)
variance_ss *= variance_ss
if shift:
shift_v = np.random.random_sample(shape).astype(np.float32)
else:
shift_v = None
npm, npv = self._npNormalizeMoments(counts, mean_ss, variance_ss, shift_v)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
tf_counts = constant_op.constant(counts, name="counts")
tf_mean_ss = constant_op.constant(mean_ss, name="mean_ss")
tf_variance_ss = constant_op.constant(variance_ss, name="variance_ss")
if shift:
tf_shift_v = constant_op.constant(shift_v, name="shift")
else:
tf_shift_v = None
opm, opv = self._opNormalizeMoments(tf_counts, tf_mean_ss,
tf_variance_ss, tf_shift_v)
tfm, tfv = sess.run([opm, opv])
self.assertAllClose(npm, tfm, atol=0.000001)
self.assertAllClose(npv, tfv, atol=0.000001)
def testNormalizeMoments(self):
for shift in [None, 4.0]:
self._testNormalizeMoments([3], shift)
self._testNormalizeMoments([2, 3], shift)
@test_util.with_c_api
class MomentsTest(test.TestCase):
def _unweighted_moments(self, x, axes, keep_dims=False, extra_out_grads=None):
# Method to compute moments of `x` wrt `axes`.
#
# This is exposed so WeightedMomentsTest can inherit the tests and
# assertions from MomentsTest; the extra_out_grads argument allows
# its inherited gradient tests to assert gradients against the
# weights as well as the input values.
return nn_impl.moments(x, axes, keep_dims=keep_dims)
def RunMomentTestWithDynamicShape(self, shape, axes, keep_dims, dtype):
with self.test_session():
# shape = [batch, width, height, depth]
assert len(shape) == 4
x_numpy = np.random.normal(size=shape).astype(np.float32)
x = array_ops.placeholder(dtype, shape=[None] * len(shape))
mean, var = self._unweighted_moments(x, axes, keep_dims=keep_dims)
num_elements = np.prod([shape[i] for i in axes])
ax = tuple(axes)
expected_mean = np.sum(x_numpy, axis=ax,
keepdims=keep_dims) / num_elements
expected_mean_squared = np.multiply(expected_mean, expected_mean)
expected_x_squared = np.sum(np.multiply(x_numpy, x_numpy),
axis=ax,
keepdims=keep_dims) / num_elements
expected_variance = expected_x_squared - expected_mean_squared
# Check that the moments are correct.
self.assertAllCloseAccordingToType(
expected_mean, mean.eval(feed_dict={x: x_numpy}))
self.assertAllCloseAccordingToType(
expected_variance, var.eval(feed_dict={x: x_numpy}))
def RunMomentTest(self, shape, axes, keep_dims, dtype):
with self.test_session():
# shape = [batch, width, height, depth]
assert len(shape) == 4
x_numpy = np.random.normal(size=shape).astype(np.float32)
x = math_ops.cast(constant_op.constant(x_numpy), dtype=dtype)
# Compute the expected values at high precision since the method
# is prone to catastrophic cancellation:
x_numpy = x_numpy.astype(np.float128)
mean, var = self._unweighted_moments(x, axes, keep_dims=keep_dims)
num_elements = np.prod([shape[i] for i in axes])
ax = tuple(axes)
expected_mean = np.sum(x_numpy, axis=ax,
keepdims=keep_dims) / num_elements
expected_mean_squared = np.multiply(expected_mean, expected_mean)
expected_x_squared = np.sum(np.multiply(x_numpy, x_numpy),
axis=ax,
keepdims=keep_dims) / num_elements
expected_variance = expected_x_squared - expected_mean_squared
# Check that the moments are correct.
self.assertAllCloseAccordingToType(expected_mean, mean.eval())
self.assertAllCloseAccordingToType(expected_variance, var.eval())
def testBasic(self):
for keep_dims in [False, True]:
for dtype in [dtypes.float32, dtypes.float16]:
self.RunMomentTest(
shape=[2, 3, 5, 4], axes=[0], keep_dims=keep_dims, dtype=dtype)
self.RunMomentTestWithDynamicShape(
shape=[2, 3, 5, 4], axes=[0], keep_dims=keep_dims, dtype=dtype)
def testGlobalNormalization(self):
for keep_dims in [False, True]:
for dtype in [dtypes.float32, dtypes.float16]:
self.RunMomentTest(
shape=[2, 3, 5, 4],
axes=[0, 1, 2],
keep_dims=keep_dims,
dtype=dtype)
self.RunMomentTestWithDynamicShape(
shape=[2, 3, 5, 4],
axes=[0, 1, 2],
keep_dims=keep_dims,
dtype=dtype)
def testAxes(self):
for keep_dims in [False, True]:
for dtype in [dtypes.float32, dtypes.float16]:
self.RunMomentTest(
shape=[2, 3, 5, 4],
axes=[1, 2, 3],
keep_dims=keep_dims,
dtype=dtype)
self.RunMomentTestWithDynamicShape(
shape=[2, 3, 5, 4],
axes=[1, 2, 3],
keep_dims=keep_dims,
dtype=dtype)
def _testGlobalGradient(self, from_y="mean"):
with self.test_session():
x_shape = [3, 5, 4, 2]
x_val = np.random.random_sample(x_shape).astype(np.float64)
x = constant_op.constant(x_val)
x.set_shape(x_shape)
axes = [0, 1, 2]
y_shape = [2] # Depth of x
inputs_to_compute_gradients_for = [x]
out_mean, out_var = self._unweighted_moments(
x, axes, extra_out_grads=inputs_to_compute_gradients_for)
if from_y == "mean":
y = out_mean
elif from_y == "var":
y = out_var
for (i, v) in enumerate(inputs_to_compute_gradients_for):
err = gradient_checker.compute_gradient_error(v,
v.get_shape().as_list(),
y, y_shape)
print("Moments %s gradient err vs input %d = %g" % (from_y, i, err))
self.assertLess(err, 1e-11)
def testMeanGlobalGradient(self):
self._testGlobalGradient(from_y="mean")
def testVarGlobalGradient(self):
self._testGlobalGradient(from_y="var")
@test_util.with_c_api
class WeightedMomentsTest(MomentsTest):
"""Tests for nn.weighted_moments.
Note that this test inherits from MomentsTest, inheriting all its
test methods!
It modifies MomentsTest in two ways:
a) By overriding _unweighted_moments, all the codepaths in
MomentsTest are executed, but with calls to tf.nn.moments()
replaced by calls to tf.nn.weighted_moments() with a constant
weight of 1.
b) By overriding RunMomentTest and RunMomentTestWithDynamicShape,
this test adds multiple additional calls to
RunWeightedMomentsTest() to exercise correctness with
non-constant weights and varying broadcasting situations. (It
also continues to call MomentsTest.Run(Weighted)?MomentsTest as
well.)
"""
def _unweighted_moments(self, x, axes, keep_dims=False, extra_out_grads=None):
weights = constant_op.constant(1, dtype=x.dtype)
if extra_out_grads is not None:
# We want to assert gradients WRT weights as well as X!
extra_out_grads.append(weights)
return nn_impl.weighted_moments(x, axes, weights, keep_dims=keep_dims)
def RunMomentTest(self, shape, axes, keep_dims, dtype, dynshapes=False):
if not dynshapes:
super(WeightedMomentsTest, self).RunMomentTest(shape, axes, keep_dims,
dtype)
else:
super(WeightedMomentsTest, self).RunMomentTestWithDynamicShape(shape,
axes,
keep_dims,
dtype)
# 1:1 weights and inputs
self.RunWeightedMomentTest(shape, shape, axes, keep_dims, dtype)
# Various broadcasting combinations
for idx in range(len(shape)):
# try broadcasting weights in all positions
weight_shape = [1] * len(shape)
weight_shape[idx] = shape[idx]
self.RunWeightedMomentTest(shape, weight_shape, axes, keep_dims, dtype)
# Also try broadcasting with a suffix of length n
weight_shape = shape[-(idx + 1):]
self.RunWeightedMomentTest(
shape, weight_shape, axes, keep_dims, dtype, dynshapes=dynshapes)
def RunMomentTestWithDynamicShape(self, shape, axes, keep_dims, dtype):
self.RunMomentTest(shape, axes, keep_dims, dtype, dynshapes=True)
def RunWeightedMomentTest(self,
shape,
weights_shape,
axes,
keep_dims,
dtype,
dynshapes=False):
with self.test_session() as s:
x_numpy = np.random.normal(size=shape).astype(np.float32)
weights_numpy = np.absolute( # weights must be positive
np.random.normal(
size=weights_shape, loc=1.0).astype(np.float32))
# Expand the numpy version to higher precision
x_numpy = x_numpy.astype(np.float128)
weights_numpy = weights_numpy.astype(np.float128)
x_shape = [None] * len(shape) if dynshapes else shape
weights_shape = ([None] * len(weights_shape) if dynshapes else
weights_shape)
x = array_ops.placeholder(dtype, shape=x_shape)
weights = array_ops.placeholder(dtype, shape=weights_shape)
mean, var = nn_impl.weighted_moments(
x, axes, weights, keep_dims=keep_dims)
ax = tuple(axes)
def _np_weighted_sum(v):
return np.sum(weights_numpy * v, axis=ax, keepdims=keep_dims)
weight_sum = _np_weighted_sum(np.ones_like(x_numpy))
expected_mean = _np_weighted_sum(x_numpy) / weight_sum
expected_mean_squared = np.multiply(expected_mean, expected_mean)
expected_x_squared = (_np_weighted_sum(np.multiply(x_numpy, x_numpy)) /
weight_sum)
expected_variance = expected_x_squared - expected_mean_squared
mean_v, var_v = s.run([mean, var],
feed_dict={x: x_numpy,
weights: weights_numpy})
self.assertAllCloseAccordingToType(expected_mean, mean_v)
self.assertAllCloseAccordingToType(expected_variance, var_v)
if __name__ == "__main__":
test.main()
|
marcanpilami/MAGE | refs/heads/master | ref/views/duplicate.py | 2 | # coding: utf-8
from django.shortcuts import render, redirect
from django import forms
from ref.models.instances import ComponentInstance, Environment
from ref.creation import duplicate_envt
from django.forms.formsets import formset_factory
from django.contrib.auth.decorators import permission_required
from django.db.transaction import atomic
def envt_duplicate(request, envt_name):
e = duplicate_envt(envt_name, "new_name", {})
return redirect('admin:ref_environment_change', e.id)
@permission_required('ref.scm_addenvironment')
@atomic
def envt_duplicate_name(request, envt_name):
e = Environment.objects.get(name=envt_name)
FS = formset_factory(DuplicateFormRelInline, extra=0)
if request.method == 'POST': # If the form has been submitted...
form = DuplicateForm(request.POST, envt=e) # A form bound to the POST data
fs = FS(request.POST)
if form.is_valid() and fs.is_valid(): # All validation rules pass
remaps = {}
for f in fs.cleaned_data:
if f['new_target']:
remaps[f['old_target'].id] = f['new_target'].id
e1 = duplicate_envt(envt_name, form.cleaned_data['new_name'], remaps, *ComponentInstance.objects.filter(pk__in=form.cleaned_data['instances_to_copy']))
return redirect('admin:ref_environment_change', e1.id)
else:
form = DuplicateForm(envt=e) # An unbound form
## Create a formset for each external relation
internal_pks = [i.pk for i in e.component_instances.all()]
ext = {}
initial_rel = []
for cpn in e.component_instances.all():
for rel in cpn.relationships.all():
if not rel.id in internal_pks:
ext[rel] = None
for rel in ext.keys():
initial_rel .append({'old_target':rel, 'new_target': None})
fs = FS(initial=initial_rel)
return render(request, 'ref/envt_duplicate.html', {'form': form, 'envt': e, 'fs': fs})
#########################################
## Forms
#########################################
class DuplicateFormRelInline(forms.Form):
old_target = forms.ModelChoiceField(queryset=ComponentInstance.objects.all())
new_target = forms.ModelChoiceField(queryset=ComponentInstance.objects.none(), empty_label='-- Don\'t remap --', required=False)
def __init__(self, *args, **kwargs):
super(DuplicateFormRelInline, self).__init__(*args, **kwargs)
if self.is_bound:
self.fields['new_target'].queryset = ComponentInstance.objects.get(pk=self.data[self.prefix + '-old_target']).description.instance_set.all()
if self.initial.has_key('old_target') and self.initial['old_target']:
self.fields['new_target'].queryset = self.initial['old_target'].description.instance_set.all()
class DuplicateForm(forms.Form):
new_name = forms.CharField(max_length=20)
instances_to_copy = forms.TypedMultipleChoiceField(choices=(), initial=(), widget=forms.widgets.CheckboxSelectMultiple, coerce=int)
def __init__(self, *args, **kwargs):
self.envt = kwargs['envt']
del kwargs['envt']
super(DuplicateForm, self).__init__(*args, **kwargs)
self.fields['instances_to_copy'].choices = [(i.pk, i.__unicode__()) for i in self.envt.component_instances.all()]
self.fields['instances_to_copy'].initial = [i.pk for i in self.envt.component_instances.all()]
|
crepererum/invenio | refs/heads/master | invenio/legacy/bibupload/scripts/__init__.py | 12133432 | |
PolicyStat/django | refs/heads/master | django/contrib/gis/geos/tests/__init__.py | 12133432 | |
bloer/bgexplorer | refs/heads/master | bgexplorer/modeleditor/__init__.py | 12133432 | |
agilemobiledev/spiderfoot | refs/heads/master | ext/dns/tokenizer.py | 44 | # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Tokenize DNS master file format"""
import cStringIO
import sys
import dns.exception
import dns.name
import dns.ttl
_DELIMITERS = {
' ' : True,
'\t' : True,
'\n' : True,
';' : True,
'(' : True,
')' : True,
'"' : True }
_QUOTING_DELIMITERS = { '"' : True }
EOF = 0
EOL = 1
WHITESPACE = 2
IDENTIFIER = 3
QUOTED_STRING = 4
COMMENT = 5
DELIMITER = 6
class UngetBufferFull(dns.exception.DNSException):
"""Raised when an attempt is made to unget a token when the unget
buffer is full."""
pass
class Token(object):
"""A DNS master file format token.
@ivar ttype: The token type
@type ttype: int
@ivar value: The token value
@type value: string
@ivar has_escape: Does the token value contain escapes?
@type has_escape: bool
"""
def __init__(self, ttype, value='', has_escape=False):
"""Initialize a token instance.
@param ttype: The token type
@type ttype: int
@param value: The token value
@type value: string
@param has_escape: Does the token value contain escapes?
@type has_escape: bool
"""
self.ttype = ttype
self.value = value
self.has_escape = has_escape
def is_eof(self):
return self.ttype == EOF
def is_eol(self):
return self.ttype == EOL
def is_whitespace(self):
return self.ttype == WHITESPACE
def is_identifier(self):
return self.ttype == IDENTIFIER
def is_quoted_string(self):
return self.ttype == QUOTED_STRING
def is_comment(self):
return self.ttype == COMMENT
def is_delimiter(self):
return self.ttype == DELIMITER
def is_eol_or_eof(self):
return (self.ttype == EOL or self.ttype == EOF)
def __eq__(self, other):
if not isinstance(other, Token):
return False
return (self.ttype == other.ttype and
self.value == other.value)
def __ne__(self, other):
if not isinstance(other, Token):
return True
return (self.ttype != other.ttype or
self.value != other.value)
def __str__(self):
return '%d "%s"' % (self.ttype, self.value)
def unescape(self):
if not self.has_escape:
return self
unescaped = ''
l = len(self.value)
i = 0
while i < l:
c = self.value[i]
i += 1
if c == '\\':
if i >= l:
raise dns.exception.UnexpectedEnd
c = self.value[i]
i += 1
if c.isdigit():
if i >= l:
raise dns.exception.UnexpectedEnd
c2 = self.value[i]
i += 1
if i >= l:
raise dns.exception.UnexpectedEnd
c3 = self.value[i]
i += 1
if not (c2.isdigit() and c3.isdigit()):
raise dns.exception.SyntaxError
c = chr(int(c) * 100 + int(c2) * 10 + int(c3))
unescaped += c
return Token(self.ttype, unescaped)
# compatibility for old-style tuple tokens
def __len__(self):
return 2
def __iter__(self):
return iter((self.ttype, self.value))
def __getitem__(self, i):
if i == 0:
return self.ttype
elif i == 1:
return self.value
else:
raise IndexError
class Tokenizer(object):
"""A DNS master file format tokenizer.
A token is a (type, value) tuple, where I{type} is an int, and
I{value} is a string. The valid types are EOF, EOL, WHITESPACE,
IDENTIFIER, QUOTED_STRING, COMMENT, and DELIMITER.
@ivar file: The file to tokenize
@type file: file
@ivar ungotten_char: The most recently ungotten character, or None.
@type ungotten_char: string
@ivar ungotten_token: The most recently ungotten token, or None.
@type ungotten_token: (int, string) token tuple
@ivar multiline: The current multiline level. This value is increased
by one every time a '(' delimiter is read, and decreased by one every time
a ')' delimiter is read.
@type multiline: int
@ivar quoting: This variable is true if the tokenizer is currently
reading a quoted string.
@type quoting: bool
@ivar eof: This variable is true if the tokenizer has encountered EOF.
@type eof: bool
@ivar delimiters: The current delimiter dictionary.
@type delimiters: dict
@ivar line_number: The current line number
@type line_number: int
@ivar filename: A filename that will be returned by the L{where} method.
@type filename: string
"""
def __init__(self, f=sys.stdin, filename=None):
"""Initialize a tokenizer instance.
@param f: The file to tokenize. The default is sys.stdin.
This parameter may also be a string, in which case the tokenizer
will take its input from the contents of the string.
@type f: file or string
@param filename: the name of the filename that the L{where} method
will return.
@type filename: string
"""
if isinstance(f, str):
f = cStringIO.StringIO(f)
if filename is None:
filename = '<string>'
else:
if filename is None:
if f is sys.stdin:
filename = '<stdin>'
else:
filename = '<file>'
self.file = f
self.ungotten_char = None
self.ungotten_token = None
self.multiline = 0
self.quoting = False
self.eof = False
self.delimiters = _DELIMITERS
self.line_number = 1
self.filename = filename
def _get_char(self):
"""Read a character from input.
@rtype: string
"""
if self.ungotten_char is None:
if self.eof:
c = ''
else:
c = self.file.read(1)
if c == '':
self.eof = True
elif c == '\n':
self.line_number += 1
else:
c = self.ungotten_char
self.ungotten_char = None
return c
def where(self):
"""Return the current location in the input.
@rtype: (string, int) tuple. The first item is the filename of
the input, the second is the current line number.
"""
return (self.filename, self.line_number)
def _unget_char(self, c):
"""Unget a character.
The unget buffer for characters is only one character large; it is
an error to try to unget a character when the unget buffer is not
empty.
@param c: the character to unget
@type c: string
@raises UngetBufferFull: there is already an ungotten char
"""
if not self.ungotten_char is None:
raise UngetBufferFull
self.ungotten_char = c
def skip_whitespace(self):
"""Consume input until a non-whitespace character is encountered.
The non-whitespace character is then ungotten, and the number of
whitespace characters consumed is returned.
If the tokenizer is in multiline mode, then newlines are whitespace.
@rtype: int
"""
skipped = 0
while True:
c = self._get_char()
if c != ' ' and c != '\t':
if (c != '\n') or not self.multiline:
self._unget_char(c)
return skipped
skipped += 1
def get(self, want_leading = False, want_comment = False):
"""Get the next token.
@param want_leading: If True, return a WHITESPACE token if the
first character read is whitespace. The default is False.
@type want_leading: bool
@param want_comment: If True, return a COMMENT token if the
first token read is a comment. The default is False.
@type want_comment: bool
@rtype: Token object
@raises dns.exception.UnexpectedEnd: input ended prematurely
@raises dns.exception.SyntaxError: input was badly formed
"""
if not self.ungotten_token is None:
token = self.ungotten_token
self.ungotten_token = None
if token.is_whitespace():
if want_leading:
return token
elif token.is_comment():
if want_comment:
return token
else:
return token
skipped = self.skip_whitespace()
if want_leading and skipped > 0:
return Token(WHITESPACE, ' ')
token = ''
ttype = IDENTIFIER
has_escape = False
while True:
c = self._get_char()
if c == '' or c in self.delimiters:
if c == '' and self.quoting:
raise dns.exception.UnexpectedEnd
if token == '' and ttype != QUOTED_STRING:
if c == '(':
self.multiline += 1
self.skip_whitespace()
continue
elif c == ')':
if not self.multiline > 0:
raise dns.exception.SyntaxError
self.multiline -= 1
self.skip_whitespace()
continue
elif c == '"':
if not self.quoting:
self.quoting = True
self.delimiters = _QUOTING_DELIMITERS
ttype = QUOTED_STRING
continue
else:
self.quoting = False
self.delimiters = _DELIMITERS
self.skip_whitespace()
continue
elif c == '\n':
return Token(EOL, '\n')
elif c == ';':
while 1:
c = self._get_char()
if c == '\n' or c == '':
break
token += c
if want_comment:
self._unget_char(c)
return Token(COMMENT, token)
elif c == '':
if self.multiline:
raise dns.exception.SyntaxError('unbalanced parentheses')
return Token(EOF)
elif self.multiline:
self.skip_whitespace()
token = ''
continue
else:
return Token(EOL, '\n')
else:
# This code exists in case we ever want a
# delimiter to be returned. It never produces
# a token currently.
token = c
ttype = DELIMITER
else:
self._unget_char(c)
break
elif self.quoting:
if c == '\\':
c = self._get_char()
if c == '':
raise dns.exception.UnexpectedEnd
if c.isdigit():
c2 = self._get_char()
if c2 == '':
raise dns.exception.UnexpectedEnd
c3 = self._get_char()
if c == '':
raise dns.exception.UnexpectedEnd
if not (c2.isdigit() and c3.isdigit()):
raise dns.exception.SyntaxError
c = chr(int(c) * 100 + int(c2) * 10 + int(c3))
elif c == '\n':
raise dns.exception.SyntaxError('newline in quoted string')
elif c == '\\':
#
# It's an escape. Put it and the next character into
# the token; it will be checked later for goodness.
#
token += c
has_escape = True
c = self._get_char()
if c == '' or c == '\n':
raise dns.exception.UnexpectedEnd
token += c
if token == '' and ttype != QUOTED_STRING:
if self.multiline:
raise dns.exception.SyntaxError('unbalanced parentheses')
ttype = EOF
return Token(ttype, token, has_escape)
def unget(self, token):
"""Unget a token.
The unget buffer for tokens is only one token large; it is
an error to try to unget a token when the unget buffer is not
empty.
@param token: the token to unget
@type token: Token object
@raises UngetBufferFull: there is already an ungotten token
"""
if not self.ungotten_token is None:
raise UngetBufferFull
self.ungotten_token = token
def next(self):
"""Return the next item in an iteration.
@rtype: (int, string)
"""
token = self.get()
if token.is_eof():
raise StopIteration
return token
def __iter__(self):
return self
# Helpers
def get_int(self):
"""Read the next token and interpret it as an integer.
@raises dns.exception.SyntaxError:
@rtype: int
"""
token = self.get().unescape()
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
if not token.value.isdigit():
raise dns.exception.SyntaxError('expecting an integer')
return int(token.value)
def get_uint8(self):
"""Read the next token and interpret it as an 8-bit unsigned
integer.
@raises dns.exception.SyntaxError:
@rtype: int
"""
value = self.get_int()
if value < 0 or value > 255:
raise dns.exception.SyntaxError('%d is not an unsigned 8-bit integer' % value)
return value
def get_uint16(self):
"""Read the next token and interpret it as a 16-bit unsigned
integer.
@raises dns.exception.SyntaxError:
@rtype: int
"""
value = self.get_int()
if value < 0 or value > 65535:
raise dns.exception.SyntaxError('%d is not an unsigned 16-bit integer' % value)
return value
def get_uint32(self):
"""Read the next token and interpret it as a 32-bit unsigned
integer.
@raises dns.exception.SyntaxError:
@rtype: int
"""
token = self.get().unescape()
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
if not token.value.isdigit():
raise dns.exception.SyntaxError('expecting an integer')
value = long(token.value)
if value < 0 or value > 4294967296L:
raise dns.exception.SyntaxError('%d is not an unsigned 32-bit integer' % value)
return value
def get_string(self, origin=None):
"""Read the next token and interpret it as a string.
@raises dns.exception.SyntaxError:
@rtype: string
"""
token = self.get().unescape()
if not (token.is_identifier() or token.is_quoted_string()):
raise dns.exception.SyntaxError('expecting a string')
return token.value
def get_identifier(self, origin=None):
"""Read the next token and raise an exception if it is not an identifier.
@raises dns.exception.SyntaxError:
@rtype: string
"""
token = self.get().unescape()
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
return token.value
def get_name(self, origin=None):
"""Read the next token and interpret it as a DNS name.
@raises dns.exception.SyntaxError:
@rtype: dns.name.Name object"""
token = self.get()
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
return dns.name.from_text(token.value, origin)
def get_eol(self):
"""Read the next token and raise an exception if it isn't EOL or
EOF.
@raises dns.exception.SyntaxError:
@rtype: string
"""
token = self.get()
if not token.is_eol_or_eof():
raise dns.exception.SyntaxError('expected EOL or EOF, got %d "%s"' % (token.ttype, token.value))
return token.value
def get_ttl(self):
token = self.get().unescape()
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
return dns.ttl.from_text(token.value)
|
tectronics/madcow | refs/heads/master | madcow/include/simplejson/tests/test_separators.py | 136 | import textwrap
from unittest import TestCase
import simplejson as json
class TestSeparators(TestCase):
def test_separators(self):
h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh', 'i-vhbjkhnth',
{'nifty': 87}, {'field': 'yes', 'morefield': False} ]
expect = textwrap.dedent("""\
[
[
"blorpie"
] ,
[
"whoops"
] ,
[] ,
"d-shtaeou" ,
"d-nthiouh" ,
"i-vhbjkhnth" ,
{
"nifty" : 87
} ,
{
"field" : "yes" ,
"morefield" : false
}
]""")
d1 = json.dumps(h)
d2 = json.dumps(h, indent=' ', sort_keys=True, separators=(' ,', ' : '))
h1 = json.loads(d1)
h2 = json.loads(d2)
self.assertEquals(h1, h)
self.assertEquals(h2, h)
self.assertEquals(d2, expect)
|
GehenHe/Recognize-Face-on-Android | refs/heads/master | tensorflow/contrib/learn/python/learn/estimators/multioutput_test.py | 18 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-output tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.estimators._sklearn import mean_squared_error
from tensorflow.python.platform import test
class MultiOutputTest(test.TestCase):
"""Multi-output tests."""
def testMultiRegression(self):
random.seed(42)
rng = np.random.RandomState(1)
x = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(x).ravel(), np.pi * np.cos(x).ravel()]).T
regressor = learn.LinearRegressor(
feature_columns=learn.infer_real_valued_columns_from_input(x),
label_dimension=2)
regressor.fit(x, y, steps=100)
score = mean_squared_error(np.array(list(regressor.predict_scores(x))), y)
self.assertLess(score, 10, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
|
simleo/pydoop | refs/heads/develop | int_test/opaque_split/gen_splits.py | 2 | #!/usr/bin/env python
# BEGIN_COPYRIGHT
#
# Copyright 2009-2021 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
import argparse
import sys
import pydoop.hdfs as hdfs
from pydoop.mapreduce.pipes import OpaqueSplit, write_opaque_splits
N_TASKS = 2
ITEMS_PER_TASK = 5
def gen_ranges():
for i in range(N_TASKS):
start = ITEMS_PER_TASK * i
yield start, start + ITEMS_PER_TASK
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("splits_path", metavar="HDFS_PATH")
args = parser.parse_args(sys.argv[1:])
splits = [OpaqueSplit(_) for _ in gen_ranges()]
with hdfs.open(args.splits_path, "wb") as f:
write_opaque_splits(splits, f)
|
jochemdu/nyanchain | refs/heads/master | Abe/BCDataStream.py | 53 | #
# Workalike python implementation of Bitcoin's CDataStream class.
#
import struct
import StringIO
import mmap
class SerializationError(Exception):
""" Thrown when there's a problem deserializing or serializing """
class BCDataStream(object):
def __init__(self):
self.input = None
self.read_cursor = 0
def clear(self):
self.input = None
self.read_cursor = 0
def write(self, bytes): # Initialize with string of bytes
if self.input is None:
self.input = bytes
else:
self.input += bytes
def map_file(self, file, start): # Initialize with bytes from file
self.input = mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ)
self.read_cursor = start
def seek_file(self, position):
self.read_cursor = position
def close_file(self):
self.input.close()
def read_string(self):
# Strings are encoded depending on length:
# 0 to 252 : 1-byte-length followed by bytes (if any)
# 253 to 65,535 : byte'253' 2-byte-length followed by bytes
# 65,536 to 4,294,967,295 : byte '254' 4-byte-length followed by bytes
# ... and the Bitcoin client is coded to understand:
# greater than 4,294,967,295 : byte '255' 8-byte-length followed by bytes of string
# ... but I don't think it actually handles any strings that big.
if self.input is None:
raise SerializationError("call write(bytes) before trying to deserialize")
try:
length = self.read_compact_size()
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return self.read_bytes(length)
def write_string(self, string):
# Length-encoded as with read-string
self.write_compact_size(len(string))
self.write(string)
def read_bytes(self, length):
try:
result = self.input[self.read_cursor:self.read_cursor+length]
self.read_cursor += length
return result
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return ''
def read_boolean(self): return self.read_bytes(1)[0] != chr(0)
def read_int16 (self): return self._read_num('<h')
def read_uint16 (self): return self._read_num('<H')
def read_int32 (self): return self._read_num('<i')
def read_uint32 (self): return self._read_num('<I')
def read_int64 (self): return self._read_num('<q')
def read_uint64 (self): return self._read_num('<Q')
def write_boolean(self, val): return self.write(chr(1) if val else chr(0))
def write_int16 (self, val): return self._write_num('<h', val)
def write_uint16 (self, val): return self._write_num('<H', val)
def write_int32 (self, val): return self._write_num('<i', val)
def write_uint32 (self, val): return self._write_num('<I', val)
def write_int64 (self, val): return self._write_num('<q', val)
def write_uint64 (self, val): return self._write_num('<Q', val)
def read_compact_size(self):
size = ord(self.input[self.read_cursor])
self.read_cursor += 1
if size == 253:
size = self._read_num('<H')
elif size == 254:
size = self._read_num('<I')
elif size == 255:
size = self._read_num('<Q')
return size
def write_compact_size(self, size):
if size < 0:
raise SerializationError("attempt to write size < 0")
elif size < 253:
self.write(chr(size))
elif size < 2**16:
self.write('\xfd')
self._write_num('<H', size)
elif size < 2**32:
self.write('\xfe')
self._write_num('<I', size)
elif size < 2**64:
self.write('\xff')
self._write_num('<Q', size)
def _read_num(self, format):
(i,) = struct.unpack_from(format, self.input, self.read_cursor)
self.read_cursor += struct.calcsize(format)
return i
def _write_num(self, format, num):
s = struct.pack(format, num)
self.write(s)
|
vmanoria/bluemix-hue-filebrowser | refs/heads/master | hue-3.8.1-bluemix/apps/search/src/search/urls.py | 9 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import patterns, url
urlpatterns = patterns('search.views',
url(r'^$', 'index', name='index'),
url(r'^search$', 'search', name='search'),
url(r'^save$', 'save', name='save'),
url(r'^new_search', 'new_search', name='new_search'),
url(r'^browse/(?P<name>.+)', 'browse', name='browse'),
url(r'^download$', 'download', name='download'),
url(r'^admin/collections$', 'admin_collections', name='admin_collections'),
# Ajax
# Search
url(r'^suggest/(?P<collection_id>\w+)/(?P<query>\w+)?$', 'query_suggest', name='query_suggest'),
url(r'^index/fields/dynamic$', 'index_fields_dynamic', name='index_fields_dynamic'),
url(r'^template/new_facet$', 'new_facet', name='new_facet'),
url(r'^get_document$', 'get_document', name='get_document'),
url(r'^get_range_facet$', 'get_range_facet', name='get_range_facet'),
url(r'^get_timeline$', 'get_timeline', name='get_timeline'),
url(r'^get_collection$', 'get_collection', name='get_collection'),
url(r'^get_collections$', 'get_collections', name='get_collections'),
url(r'^get_stats$', 'get_stats', name='get_stats'),
url(r'^get_terms$', 'get_terms', name='get_terms'),
# Admin
url(r'^admin/collection_delete$', 'admin_collection_delete', name='admin_collection_delete'),
url(r'^admin/collection_copy$', 'admin_collection_copy', name='admin_collection_copy'),
url(r'^install_examples$', 'install_examples', name='install_examples'),
)
|
taroplus/spark | refs/heads/master | python/pyspark/ml/param/__init__.py | 53 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import array
import sys
if sys.version > '3':
basestring = str
xrange = range
unicode = str
from abc import ABCMeta
import copy
import numpy as np
from py4j.java_gateway import JavaObject
from pyspark.ml.linalg import DenseVector, Vector, Matrix
from pyspark.ml.util import Identifiable
__all__ = ['Param', 'Params', 'TypeConverters']
class Param(object):
"""
A param with self-contained documentation.
.. versionadded:: 1.3.0
"""
def __init__(self, parent, name, doc, typeConverter=None):
if not isinstance(parent, Identifiable):
raise TypeError("Parent must be an Identifiable but got type %s." % type(parent))
self.parent = parent.uid
self.name = str(name)
self.doc = str(doc)
self.typeConverter = TypeConverters.identity if typeConverter is None else typeConverter
def _copy_new_parent(self, parent):
"""Copy the current param to a new parent, must be a dummy param."""
if self.parent == "undefined":
param = copy.copy(self)
param.parent = parent.uid
return param
else:
raise ValueError("Cannot copy from non-dummy parent %s." % parent)
def __str__(self):
return str(self.parent) + "__" + self.name
def __repr__(self):
return "Param(parent=%r, name=%r, doc=%r)" % (self.parent, self.name, self.doc)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
if isinstance(other, Param):
return self.parent == other.parent and self.name == other.name
else:
return False
class TypeConverters(object):
"""
.. note:: DeveloperApi
Factory methods for common type conversion functions for `Param.typeConverter`.
.. versionadded:: 2.0.0
"""
@staticmethod
def _is_numeric(value):
vtype = type(value)
return vtype in [int, float, np.float64, np.int64] or vtype.__name__ == 'long'
@staticmethod
def _is_integer(value):
return TypeConverters._is_numeric(value) and float(value).is_integer()
@staticmethod
def _can_convert_to_list(value):
vtype = type(value)
return vtype in [list, np.ndarray, tuple, xrange, array.array] or isinstance(value, Vector)
@staticmethod
def _can_convert_to_string(value):
vtype = type(value)
return isinstance(value, basestring) or vtype in [np.unicode_, np.string_, np.str_]
@staticmethod
def identity(value):
"""
Dummy converter that just returns value.
"""
return value
@staticmethod
def toList(value):
"""
Convert a value to a list, if possible.
"""
if type(value) == list:
return value
elif type(value) in [np.ndarray, tuple, xrange, array.array]:
return list(value)
elif isinstance(value, Vector):
return list(value.toArray())
else:
raise TypeError("Could not convert %s to list" % value)
@staticmethod
def toListFloat(value):
"""
Convert a value to list of floats, if possible.
"""
if TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._is_numeric(v), value)):
return [float(v) for v in value]
raise TypeError("Could not convert %s to list of floats" % value)
@staticmethod
def toListInt(value):
"""
Convert a value to list of ints, if possible.
"""
if TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._is_integer(v), value)):
return [int(v) for v in value]
raise TypeError("Could not convert %s to list of ints" % value)
@staticmethod
def toListString(value):
"""
Convert a value to list of strings, if possible.
"""
if TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._can_convert_to_string(v), value)):
return [TypeConverters.toString(v) for v in value]
raise TypeError("Could not convert %s to list of strings" % value)
@staticmethod
def toVector(value):
"""
Convert a value to a MLlib Vector, if possible.
"""
if isinstance(value, Vector):
return value
elif TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._is_numeric(v), value)):
return DenseVector(value)
raise TypeError("Could not convert %s to vector" % value)
@staticmethod
def toMatrix(value):
"""
Convert a value to a MLlib Matrix, if possible.
"""
if isinstance(value, Matrix):
return value
raise TypeError("Could not convert %s to matrix" % value)
@staticmethod
def toFloat(value):
"""
Convert a value to a float, if possible.
"""
if TypeConverters._is_numeric(value):
return float(value)
else:
raise TypeError("Could not convert %s to float" % value)
@staticmethod
def toInt(value):
"""
Convert a value to an int, if possible.
"""
if TypeConverters._is_integer(value):
return int(value)
else:
raise TypeError("Could not convert %s to int" % value)
@staticmethod
def toString(value):
"""
Convert a value to a string, if possible.
"""
if isinstance(value, basestring):
return value
elif type(value) in [np.string_, np.str_]:
return str(value)
elif type(value) == np.unicode_:
return unicode(value)
else:
raise TypeError("Could not convert %s to string type" % type(value))
@staticmethod
def toBoolean(value):
"""
Convert a value to a boolean, if possible.
"""
if type(value) == bool:
return value
else:
raise TypeError("Boolean Param requires value of type bool. Found %s." % type(value))
class Params(Identifiable):
"""
Components that take parameters. This also provides an internal
param map to store parameter values attached to the instance.
.. versionadded:: 1.3.0
"""
__metaclass__ = ABCMeta
def __init__(self):
super(Params, self).__init__()
#: internal param map for user-supplied values param map
self._paramMap = {}
#: internal param map for default values
self._defaultParamMap = {}
#: value returned by :py:func:`params`
self._params = None
# Copy the params from the class to the object
self._copy_params()
def _copy_params(self):
"""
Copy all params defined on the class to current object.
"""
cls = type(self)
src_name_attrs = [(x, getattr(cls, x)) for x in dir(cls)]
src_params = list(filter(lambda nameAttr: isinstance(nameAttr[1], Param), src_name_attrs))
for name, param in src_params:
setattr(self, name, param._copy_new_parent(self))
@property
def params(self):
"""
Returns all params ordered by name. The default implementation
uses :py:func:`dir` to get all attributes of type
:py:class:`Param`.
"""
if self._params is None:
self._params = list(filter(lambda attr: isinstance(attr, Param),
[getattr(self, x) for x in dir(self) if x != "params" and
not isinstance(getattr(type(self), x, None), property)]))
return self._params
def explainParam(self, param):
"""
Explains a single param and returns its name, doc, and optional
default value and user-supplied value in a string.
"""
param = self._resolveParam(param)
values = []
if self.isDefined(param):
if param in self._defaultParamMap:
values.append("default: %s" % self._defaultParamMap[param])
if param in self._paramMap:
values.append("current: %s" % self._paramMap[param])
else:
values.append("undefined")
valueStr = "(" + ", ".join(values) + ")"
return "%s: %s %s" % (param.name, param.doc, valueStr)
def explainParams(self):
"""
Returns the documentation of all params with their optionally
default values and user-supplied values.
"""
return "\n".join([self.explainParam(param) for param in self.params])
def getParam(self, paramName):
"""
Gets a param by its name.
"""
param = getattr(self, paramName)
if isinstance(param, Param):
return param
else:
raise ValueError("Cannot find param with name %s." % paramName)
def isSet(self, param):
"""
Checks whether a param is explicitly set by user.
"""
param = self._resolveParam(param)
return param in self._paramMap
def hasDefault(self, param):
"""
Checks whether a param has a default value.
"""
param = self._resolveParam(param)
return param in self._defaultParamMap
def isDefined(self, param):
"""
Checks whether a param is explicitly set by user or has
a default value.
"""
return self.isSet(param) or self.hasDefault(param)
def hasParam(self, paramName):
"""
Tests whether this instance contains a param with a given
(string) name.
"""
if isinstance(paramName, basestring):
p = getattr(self, paramName, None)
return isinstance(p, Param)
else:
raise TypeError("hasParam(): paramName must be a string")
def getOrDefault(self, param):
"""
Gets the value of a param in the user-supplied param map or its
default value. Raises an error if neither is set.
"""
param = self._resolveParam(param)
if param in self._paramMap:
return self._paramMap[param]
else:
return self._defaultParamMap[param]
def extractParamMap(self, extra=None):
"""
Extracts the embedded default param values and user-supplied
values, and then merges them with extra values from input into
a flat param map, where the latter value is used if there exist
conflicts, i.e., with ordering: default param values <
user-supplied values < extra.
:param extra: extra param values
:return: merged param map
"""
if extra is None:
extra = dict()
paramMap = self._defaultParamMap.copy()
paramMap.update(self._paramMap)
paramMap.update(extra)
return paramMap
def copy(self, extra=None):
"""
Creates a copy of this instance with the same uid and some
extra params. The default implementation creates a
shallow copy using :py:func:`copy.copy`, and then copies the
embedded and extra parameters over and returns the copy.
Subclasses should override this method if the default approach
is not sufficient.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
that = copy.copy(self)
that._paramMap = {}
that._defaultParamMap = {}
return self._copyValues(that, extra)
def set(self, param, value):
"""
Sets a parameter in the embedded param map.
"""
self._shouldOwn(param)
try:
value = param.typeConverter(value)
except ValueError as e:
raise ValueError('Invalid param value given for param "%s". %s' % (param.name, e))
self._paramMap[param] = value
def _shouldOwn(self, param):
"""
Validates that the input param belongs to this Params instance.
"""
if not (self.uid == param.parent and self.hasParam(param.name)):
raise ValueError("Param %r does not belong to %r." % (param, self))
def _resolveParam(self, param):
"""
Resolves a param and validates the ownership.
:param param: param name or the param instance, which must
belong to this Params instance
:return: resolved param instance
"""
if isinstance(param, Param):
self._shouldOwn(param)
return param
elif isinstance(param, basestring):
return self.getParam(param)
else:
raise ValueError("Cannot resolve %r as a param." % param)
@staticmethod
def _dummy():
"""
Returns a dummy Params instance used as a placeholder to
generate docs.
"""
dummy = Params()
dummy.uid = "undefined"
return dummy
def _set(self, **kwargs):
"""
Sets user-supplied params.
"""
for param, value in kwargs.items():
p = getattr(self, param)
if value is not None:
try:
value = p.typeConverter(value)
except TypeError as e:
raise TypeError('Invalid param value given for param "%s". %s' % (p.name, e))
self._paramMap[p] = value
return self
def _clear(self, param):
"""
Clears a param from the param map if it has been explicitly set.
"""
if self.isSet(param):
del self._paramMap[param]
def _setDefault(self, **kwargs):
"""
Sets default params.
"""
for param, value in kwargs.items():
p = getattr(self, param)
if value is not None and not isinstance(value, JavaObject):
try:
value = p.typeConverter(value)
except TypeError as e:
raise TypeError('Invalid default param value given for param "%s". %s'
% (p.name, e))
self._defaultParamMap[p] = value
return self
def _copyValues(self, to, extra=None):
"""
Copies param values from this instance to another instance for
params shared by them.
:param to: the target instance
:param extra: extra params to be copied
:return: the target instance with param values copied
"""
paramMap = self._paramMap.copy()
if extra is not None:
paramMap.update(extra)
for param in self.params:
# copy default params
if param in self._defaultParamMap and to.hasParam(param.name):
to._defaultParamMap[to.getParam(param.name)] = self._defaultParamMap[param]
# copy explicitly set params
if param in paramMap and to.hasParam(param.name):
to._set(**{param.name: paramMap[param]})
return to
def _resetUid(self, newUid):
"""
Changes the uid of this instance. This updates both
the stored uid and the parent uid of params and param maps.
This is used by persistence (loading).
:param newUid: new uid to use, which is converted to unicode
:return: same instance, but with the uid and Param.parent values
updated, including within param maps
"""
newUid = unicode(newUid)
self.uid = newUid
newDefaultParamMap = dict()
newParamMap = dict()
for param in self.params:
newParam = copy.copy(param)
newParam.parent = newUid
if param in self._defaultParamMap:
newDefaultParamMap[newParam] = self._defaultParamMap[param]
if param in self._paramMap:
newParamMap[newParam] = self._paramMap[param]
param.parent = newUid
self._defaultParamMap = newDefaultParamMap
self._paramMap = newParamMap
return self
|
elkingtonmcb/django | refs/heads/master | django/db/backends/mysql/base.py | 323 | """
MySQL database backend for Django.
Requires mysqlclient: https://pypi.python.org/pypi/mysqlclient/
MySQLdb is supported for Python 2 only: http://sourceforge.net/projects/mysql-python
"""
from __future__ import unicode_literals
import datetime
import re
import sys
import warnings
from django.conf import settings
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.utils import six, timezone
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes, SafeText
try:
import MySQLdb as Database
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading MySQLdb module: %s" % e)
from MySQLdb.constants import CLIENT, FIELD_TYPE # isort:skip
from MySQLdb.converters import Thing2Literal, conversions # isort:skip
# Some of these import MySQLdb, so import them after checking if it's installed.
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
from .validation import DatabaseValidation # isort:skip
# We want version (1, 2, 1, 'final', 2) or later. We can't just use
# lexicographic ordering in this check because then (1, 2, 1, 'gamma')
# inadvertently passes the version test.
version = Database.version_info
if (version < (1, 2, 1) or (version[:3] == (1, 2, 1) and
(len(version) < 5 or version[3] != 'final' or version[4] < 2))):
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("MySQLdb-1.2.1p2 or newer is required; you have %s" % Database.__version__)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
def adapt_datetime_warn_on_aware_datetime(value, conv):
# Remove this function and rely on the default adapter in Django 2.0.
if settings.USE_TZ and timezone.is_aware(value):
warnings.warn(
"The MySQL database adapter received an aware datetime (%s), "
"probably from cursor.execute(). Update your code to pass a "
"naive datetime in the database connection's time zone (UTC by "
"default).", RemovedInDjango20Warning)
# This doesn't account for the database connection's timezone,
# which isn't known. (That's why this adapter is deprecated.)
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return Thing2Literal(value.strftime("%Y-%m-%d %H:%M:%S.%f"), conv)
# MySQLdb-1.2.1 returns TIME columns as timedelta -- they are more like
# timedelta in terms of actual behavior as they are signed and include days --
# and Django expects time, so we still need to override that. We also need to
# add special handling for SafeText and SafeBytes as MySQLdb's type
# checking is too tight to catch those (see Django ticket #6052).
django_conversions = conversions.copy()
django_conversions.update({
FIELD_TYPE.TIME: backend_utils.typecast_time,
FIELD_TYPE.DECIMAL: backend_utils.typecast_decimal,
FIELD_TYPE.NEWDECIMAL: backend_utils.typecast_decimal,
datetime.datetime: adapt_datetime_warn_on_aware_datetime,
})
# This should match the numerical portion of the version numbers (we can treat
# versions like 5.0.24 and 5.0.24a as the same). Based on the list of version
# at http://dev.mysql.com/doc/refman/4.1/en/news.html and
# http://dev.mysql.com/doc/refman/5.0/en/news.html .
server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
# MySQLdb-1.2.1 and newer automatically makes use of SHOW WARNINGS on
# MySQL-4.1 and newer, so the MysqlDebugWrapper is unnecessary. Since the
# point is to raise Warnings as exceptions, this can be done with the Python
# warning module, and this is setup when the connection is created, and the
# standard backend_utils.CursorDebugWrapper can be used. Also, using sql_mode
# TRADITIONAL will automatically cause most warnings to be treated as errors.
class CursorWrapper(object):
"""
A thin wrapper around MySQLdb's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
Implemented as a wrapper, rather than a subclass, so that we aren't stuck
to the particular underlying representation returned by Connection.cursor().
"""
codes_for_integrityerror = (1048,)
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
# args is None means no string interpolation
return self.cursor.execute(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Ticket #17671 - Close instead of passing thru to avoid backend
# specific behavior.
self.close()
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'mysql'
# This dictionary maps Field objects to their associated MySQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
_data_types = {
'AutoField': 'integer AUTO_INCREMENT',
'BinaryField': 'longblob',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'bigint',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer UNSIGNED',
'PositiveSmallIntegerField': 'smallint UNSIGNED',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'longtext',
'TimeField': 'time',
'UUIDField': 'char(32)',
}
@cached_property
def data_types(self):
if self.features.supports_microsecond_precision:
return dict(self._data_types, DateTimeField='datetime(6)', TimeField='time(6)')
else:
return self._data_types
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'regex': 'REGEXP BINARY %s',
'iregex': 'REGEXP %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\\', '\\\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': "LIKE BINARY CONCAT('%%', {}, '%%')",
'icontains': "LIKE CONCAT('%%', {}, '%%')",
'startswith': "LIKE BINARY CONCAT({}, '%%')",
'istartswith': "LIKE CONCAT({}, '%%')",
'endswith': "LIKE BINARY CONCAT('%%', {})",
'iendswith': "LIKE CONCAT('%%', {})",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = DatabaseValidation(self)
def get_connection_params(self):
kwargs = {
'conv': django_conversions,
'charset': 'utf8',
}
if six.PY2:
kwargs['use_unicode'] = True
settings_dict = self.settings_dict
if settings_dict['USER']:
kwargs['user'] = settings_dict['USER']
if settings_dict['NAME']:
kwargs['db'] = settings_dict['NAME']
if settings_dict['PASSWORD']:
kwargs['passwd'] = force_str(settings_dict['PASSWORD'])
if settings_dict['HOST'].startswith('/'):
kwargs['unix_socket'] = settings_dict['HOST']
elif settings_dict['HOST']:
kwargs['host'] = settings_dict['HOST']
if settings_dict['PORT']:
kwargs['port'] = int(settings_dict['PORT'])
# We need the number of potentially affected rows after an
# "UPDATE", not the number of changed rows.
kwargs['client_flag'] = CLIENT.FOUND_ROWS
kwargs.update(settings_dict['OPTIONS'])
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.encoders[SafeText] = conn.encoders[six.text_type]
conn.encoders[SafeBytes] = conn.encoders[bytes]
return conn
def init_connection_state(self):
with self.cursor() as cursor:
# SQL_AUTO_IS_NULL in MySQL controls whether an AUTO_INCREMENT column
# on a recently-inserted row will return when the field is tested for
# NULL. Disabling this value brings this aspect of MySQL in line with
# SQL standards.
cursor.execute('SET SQL_AUTO_IS_NULL = 0')
def create_cursor(self):
cursor = self.connection.cursor()
return CursorWrapper(cursor)
def _rollback(self):
try:
BaseDatabaseWrapper._rollback(self)
except Database.NotSupportedError:
pass
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit(autocommit)
def disable_constraint_checking(self):
"""
Disables foreign key checks, primarily for use in adding rows with forward references. Always returns True,
to indicate constraint checks need to be re-enabled.
"""
self.cursor().execute('SET foreign_key_checks=0')
return True
def enable_constraint_checking(self):
"""
Re-enable foreign key checks after they have been disabled.
"""
# Override needs_rollback in case constraint_checks_disabled is
# nested inside transaction.atomic.
self.needs_rollback, needs_rollback = False, self.needs_rollback
try:
self.cursor().execute('SET foreign_key_checks=1')
finally:
self.needs_rollback = needs_rollback
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
Raises an IntegrityError on the first invalid foreign key reference
encountered (if any) and provides detailed information about the
invalid reference in the error message.
Backends can override this method if they can more directly apply
constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute("""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
% (primary_key_column_name, column_name, table_name, referenced_table_name,
column_name, referenced_column_name, column_name, referenced_column_name))
for bad_row in cursor.fetchall():
raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
% (table_name, bad_row[0],
table_name, column_name, bad_row[1],
referenced_table_name, referenced_column_name))
def is_usable(self):
try:
self.connection.ping()
except Database.Error:
return False
else:
return True
@cached_property
def mysql_version(self):
with self.temporary_connection():
server_info = self.connection.get_server_info()
match = server_version_re.match(server_info)
if not match:
raise Exception('Unable to determine MySQL version from version string %r' % server_info)
return tuple(int(x) for x in match.groups())
|
raffaelespazzoli/origin | refs/heads/master | vendor/github.com/google/certificate-transparency/python/ct/cert_analysis/dnsnames_test.py | 17 | #!/usr/bin/env python
# coding=utf-8
import unittest
import mock
from ct.cert_analysis import base_check_test
from ct.cert_analysis import dnsnames
from ct.cert_analysis import tld_list
from ct.cert_analysis import tld_check
from ct.test import test_config
def gen_dns_name(name):
dns_name = mock.Mock()
dns_name.value = name
return dns_name
def cert_with_urls(*args):
certificate = mock.MagicMock()
certificate.subject_dns_names = mock.Mock(return_value=list(args))
return certificate
EXAMPLE = gen_dns_name("example.com")
EXAMPLE_WILDCARD = gen_dns_name("*.example.com")
UTF8_URL = gen_dns_name("ćęrtifićątętrąńśpąręńćy.com")
NON_UTF8_URL = gen_dns_name("\xff.com")
URL_INVALID_CHARACTERS_5 = gen_dns_name("[][]].com")
EMAIL_ADDRESS = gen_dns_name("example@example.com")
NOT_TLD = gen_dns_name("asdf.asdf")
WILDCARD_TLD = gen_dns_name("*.com")
NON_UNICODE_TLD = gen_dns_name("\xff\x00.com")
class DnsnamesTest(base_check_test.BaseCheckTest):
def setUp(self):
tld_check.CheckTldMatches.TLD_LIST_ = tld_list.TLDList(
tld_dir=test_config.get_tld_directory(),
tld_file_name="test_tld_list")
def test_dnsnames_valid(self):
certificate = cert_with_urls(EXAMPLE)
check = dnsnames.CheckValidityOfDnsnames()
result = check.check(certificate)
self.assertEqual(len(result), 0)
def test_dnsnames_wildcard(self):
certificate = cert_with_urls(EXAMPLE_WILDCARD)
check = dnsnames.CheckValidityOfDnsnames()
result = check.check(certificate)
self.assertEqual(len(result), 0)
def test_dnsnames_utf8(self):
certificate = cert_with_urls(UTF8_URL)
check = dnsnames.CheckValidityOfDnsnames()
result = check.check(certificate)
self.assertEqual(len(result), 0)
def test_dnsnames_non_utf8(self):
certificate = cert_with_urls(NON_UTF8_URL)
check = dnsnames.CheckValidityOfDnsnames()
result = check.check(certificate)
self.assertEqual(len(result), 1)
self.assertIsNotNone(result[0].reason)
def test_dnsnames_invalid_chars(self):
certificate = cert_with_urls(URL_INVALID_CHARACTERS_5)
check = dnsnames.CheckValidityOfDnsnames()
result = check.check(certificate)
self.assertEqual(len(result), 5)
for res in result:
self.assertIsNotNone(res.details)
def test_dnsnames_email(self):
certificate = cert_with_urls(EMAIL_ADDRESS)
check = dnsnames.CheckValidityOfDnsnames()
result = check.check(certificate)
self.assertEqual(len(result), 1)
self.assertIsNotNone(result[0].reason)
self.assertIn('@', ''.join(result[0].details))
def test_dnsnames_multiple_names(self):
certificate = cert_with_urls(EXAMPLE, EXAMPLE_WILDCARD, UTF8_URL,
NON_UTF8_URL, URL_INVALID_CHARACTERS_5)
check = dnsnames.CheckValidityOfDnsnames()
result = check.check(certificate)
# 1 from NON_UTF8, 5 from INVALID_CHARACTERS_5
self.assertEqual(len(result), 6)
def test_dnsnames_tld_match(self):
certificate = cert_with_urls(EXAMPLE)
check = dnsnames.CheckTldMatches()
result = check.check(certificate)
self.assertEqual(len(result), 0)
def test_dnsnames_no_tld_match(self):
certificate = cert_with_urls(NOT_TLD)
check = dnsnames.CheckTldMatches()
result = check.check(certificate)
self.assertIn(dnsnames.NoTldMatch().description, ''.join([
obs.description for obs in result]))
def test_dnsnames_wildcard_tld_match(self):
certificate = cert_with_urls(WILDCARD_TLD)
check = dnsnames.CheckTldMatches()
result = check.check(certificate)
self.assertIn(dnsnames.GenericWildcard().description, ''.join([
obs.description for obs in result]))
def test_dnsnames_non_unicode_match(self):
certificate = cert_with_urls(NON_UNICODE_TLD)
check = dnsnames.CheckTldMatches()
result = check.check(certificate)
self.assertIn(dnsnames.NonUnicodeAddress().description, ''.join([
obs.description for obs in result]))
self.assertEqual(len(result), 1)
if __name__ == '__main__':
unittest.main()
|
vamanea/u-boot | refs/heads/master | tools/patman/series.py | 10 | # Copyright (c) 2011 The Chromium OS Authors.
#
# See file CREDITS for list of people who contributed to this
# project.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
import os
import gitutil
import terminal
# Series-xxx tags that we understand
valid_series = ['to', 'cc', 'version', 'changes', 'prefix', 'notes', 'name'];
class Series(dict):
"""Holds information about a patch series, including all tags.
Vars:
cc: List of aliases/emails to Cc all patches to
commits: List of Commit objects, one for each patch
cover: List of lines in the cover letter
notes: List of lines in the notes
changes: (dict) List of changes for each version, The key is
the integer version number
"""
def __init__(self):
self.cc = []
self.to = []
self.commits = []
self.cover = None
self.notes = []
self.changes = {}
# These make us more like a dictionary
def __setattr__(self, name, value):
self[name] = value
def __getattr__(self, name):
return self[name]
def AddTag(self, commit, line, name, value):
"""Add a new Series-xxx tag along with its value.
Args:
line: Source line containing tag (useful for debug/error messages)
name: Tag name (part after 'Series-')
value: Tag value (part after 'Series-xxx: ')
"""
# If we already have it, then add to our list
if name in self:
values = value.split(',')
values = [str.strip() for str in values]
if type(self[name]) != type([]):
raise ValueError("In %s: line '%s': Cannot add another value "
"'%s' to series '%s'" %
(commit.hash, line, values, self[name]))
self[name] += values
# Otherwise just set the value
elif name in valid_series:
self[name] = value
else:
raise ValueError("In %s: line '%s': Unknown 'Series-%s': valid "
"options are %s" % (commit.hash, line, name,
', '.join(valid_series)))
def AddCommit(self, commit):
"""Add a commit into our list of commits
We create a list of tags in the commit subject also.
Args:
commit: Commit object to add
"""
commit.CheckTags()
self.commits.append(commit)
def ShowActions(self, args, cmd, process_tags):
"""Show what actions we will/would perform
Args:
args: List of patch files we created
cmd: The git command we would have run
process_tags: Process tags as if they were aliases
"""
col = terminal.Color()
print 'Dry run, so not doing much. But I would do this:'
print
print 'Send a total of %d patch%s with %scover letter.' % (
len(args), '' if len(args) == 1 else 'es',
self.get('cover') and 'a ' or 'no ')
# TODO: Colour the patches according to whether they passed checks
for upto in range(len(args)):
commit = self.commits[upto]
print col.Color(col.GREEN, ' %s' % args[upto])
cc_list = []
if process_tags:
cc_list += gitutil.BuildEmailList(commit.tags)
cc_list += gitutil.BuildEmailList(commit.cc_list)
# Skip items in To list
if 'to' in self:
try:
map(cc_list.remove, gitutil.BuildEmailList(self.to))
except ValueError:
pass
for email in cc_list:
if email == None:
email = col.Color(col.YELLOW, "<alias '%s' not found>"
% tag)
if email:
print ' Cc: ',email
print
for item in gitutil.BuildEmailList(self.get('to', '<none>')):
print 'To:\t ', item
for item in gitutil.BuildEmailList(self.cc):
print 'Cc:\t ', item
print 'Version: ', self.get('version')
print 'Prefix:\t ', self.get('prefix')
if self.cover:
print 'Cover: %d lines' % len(self.cover)
if cmd:
print 'Git command: %s' % cmd
def MakeChangeLog(self, commit):
"""Create a list of changes for each version.
Return:
The change log as a list of strings, one per line
Changes in v2:
- Jog the dial back closer to the widget
Changes in v1:
- Fix the widget
- Jog the dial
etc.
"""
final = []
need_blank = False
for change in sorted(self.changes, reverse=True):
out = []
for this_commit, text in self.changes[change]:
if commit and this_commit != commit:
continue
out.append(text)
if out:
out = ['Changes in v%d:' % change] + out
if need_blank:
out = [''] + out
final += out
need_blank = True
if self.changes:
final.append('')
return final
def DoChecks(self):
"""Check that each version has a change log
Print an error if something is wrong.
"""
col = terminal.Color()
if self.get('version'):
changes_copy = dict(self.changes)
for version in range(1, int(self.version) + 1):
if self.changes.get(version):
del changes_copy[version]
else:
if version > 1:
str = 'Change log missing for v%d' % version
print col.Color(col.RED, str)
for version in changes_copy:
str = 'Change log for unknown version v%d' % version
print col.Color(col.RED, str)
elif self.changes:
str = 'Change log exists, but no version is set'
print col.Color(col.RED, str)
def MakeCcFile(self, process_tags):
"""Make a cc file for us to use for per-commit Cc automation
Args:
process_tags: Process tags as if they were aliases
Return:
Filename of temp file created
"""
# Look for commit tags (of the form 'xxx:' at the start of the subject)
fname = '/tmp/patman.%d' % os.getpid()
fd = open(fname, 'w')
for commit in self.commits:
list = []
if process_tags:
list += gitutil.BuildEmailList(commit.tags)
list += gitutil.BuildEmailList(commit.cc_list)
print >>fd, commit.patch, ', '.join(list)
fd.close()
return fname
def AddChange(self, version, commit, info):
"""Add a new change line to a version.
This will later appear in the change log.
Args:
version: version number to add change list to
info: change line for this version
"""
if not self.changes.get(version):
self.changes[version] = []
self.changes[version].append([commit, info])
def GetPatchPrefix(self):
"""Get the patch version string
Return:
Patch string, like 'RFC PATCH v5' or just 'PATCH'
"""
version = ''
if self.get('version'):
version = ' v%s' % self['version']
# Get patch name prefix
prefix = ''
if self.get('prefix'):
prefix = '%s ' % self['prefix']
return '%sPATCH%s' % (prefix, version)
|
jorik041/robotframework | refs/heads/master | src/robot/parsing/model.py | 17 | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import copy
from robot.errors import DataError
from robot.variables import is_var
from robot.output import LOGGER
from robot.writer import DataFileWriter
from robot.utils import abspath, is_string, normalize, NormalizedDict
from .comments import Comment
from .populators import FromFilePopulator, FromDirectoryPopulator
from .settings import (Documentation, Fixture, Timeout, Tags, Metadata,
Library, Resource, Variables, Arguments, Return,
Template, MetadataList, ImportList)
def TestData(parent=None, source=None, include_suites=None,
warn_on_skipped=False):
"""Parses a file or directory to a corresponding model object.
:param parent: (optional) parent to be used in creation of the model object.
:param source: path where test data is read from.
:returns: :class:`~.model.TestDataDirectory` if `source` is a directory,
:class:`~.model.TestCaseFile` otherwise.
"""
if os.path.isdir(source):
return TestDataDirectory(parent, source).populate(include_suites,
warn_on_skipped)
return TestCaseFile(parent, source).populate()
class _TestData(object):
_setting_table_names = 'Setting', 'Settings', 'Metadata'
_variable_table_names = 'Variable', 'Variables'
_testcase_table_names = 'Test Case', 'Test Cases'
_keyword_table_names = 'Keyword', 'Keywords', 'User Keyword', 'User Keywords'
def __init__(self, parent=None, source=None):
self.parent = parent
self.source = abspath(source) if source else None
self.children = []
self._tables = NormalizedDict(self._get_tables())
def _get_tables(self):
for names, table in [(self._setting_table_names, self.setting_table),
(self._variable_table_names, self.variable_table),
(self._testcase_table_names, self.testcase_table),
(self._keyword_table_names, self.keyword_table)]:
for name in names:
yield name, table
def start_table(self, header_row):
try:
table = self._tables[header_row[0]]
except (KeyError, IndexError):
return None
if not self._table_is_allowed(table):
return None
table.set_header(header_row)
return table
@property
def name(self):
return self._format_name(self._get_basename()) if self.source else None
def _get_basename(self):
return os.path.splitext(os.path.basename(self.source))[0]
def _format_name(self, name):
name = self._strip_possible_prefix_from_name(name)
name = name.replace('_', ' ').strip()
return name.title() if name.islower() else name
def _strip_possible_prefix_from_name(self, name):
return name.split('__', 1)[-1]
@property
def keywords(self):
return self.keyword_table.keywords
@property
def imports(self):
return self.setting_table.imports
def report_invalid_syntax(self, message, level='ERROR'):
initfile = getattr(self, 'initfile', None)
path = os.path.join(self.source, initfile) if initfile else self.source
LOGGER.write("Error in file '%s': %s" % (path, message), level)
def save(self, **options):
"""Writes this datafile to disk.
:param options: Configuration for writing. These are passed to
:py:class:`~robot.writer.datafilewriter.WritingContext` as
keyword arguments.
See also :py:class:`robot.writer.datafilewriter.DataFileWriter`
"""
return DataFileWriter(**options).write(self)
class TestCaseFile(_TestData):
"""The parsed test case file object.
:param parent: parent object to be used in creation of the model object.
:param source: path where test data is read from.
"""
def __init__(self, parent=None, source=None):
self.directory = os.path.dirname(source) if source else None
self.setting_table = TestCaseFileSettingTable(self)
self.variable_table = VariableTable(self)
self.testcase_table = TestCaseTable(self)
self.keyword_table = KeywordTable(self)
_TestData.__init__(self, parent, source)
def populate(self):
FromFilePopulator(self).populate(self.source)
self._validate()
return self
def _validate(self):
if not self.testcase_table.is_started():
raise DataError('File has no test case table.')
def _table_is_allowed(self, table):
return True
def has_tests(self):
return True
def __iter__(self):
for table in [self.setting_table, self.variable_table,
self.testcase_table, self.keyword_table]:
yield table
class ResourceFile(_TestData):
"""The parsed resource file object.
:param source: path where resource file is read from.
"""
def __init__(self, source=None):
self.directory = os.path.dirname(source) if source else None
self.setting_table = ResourceFileSettingTable(self)
self.variable_table = VariableTable(self)
self.testcase_table = TestCaseTable(self)
self.keyword_table = KeywordTable(self)
_TestData.__init__(self, source=source)
def populate(self):
FromFilePopulator(self).populate(self.source)
self._report_status()
return self
def _report_status(self):
if self.setting_table or self.variable_table or self.keyword_table:
LOGGER.info("Imported resource file '%s' (%d keywords)."
% (self.source, len(self.keyword_table.keywords)))
else:
LOGGER.warn("Imported resource file '%s' is empty." % self.source)
def _table_is_allowed(self, table):
if table is self.testcase_table:
raise DataError("Resource file '%s' contains a test case table "
"which is not allowed." % self.source)
return True
def __iter__(self):
for table in [self.setting_table, self.variable_table, self.keyword_table]:
yield table
class TestDataDirectory(_TestData):
"""The parsed test data directory object. Contains hiearchical structure
of other :py:class:`.TestDataDirectory` and :py:class:`.TestCaseFile`
objects.
:param parent: parent object to be used in creation of the model object.
:param source: path where test data is read from.
"""
def __init__(self, parent=None, source=None):
self.directory = source
self.initfile = None
self.setting_table = InitFileSettingTable(self)
self.variable_table = VariableTable(self)
self.testcase_table = TestCaseTable(self)
self.keyword_table = KeywordTable(self)
_TestData.__init__(self, parent, source)
def populate(self, include_suites=None, warn_on_skipped=False, recurse=True):
FromDirectoryPopulator().populate(self.source, self, include_suites,
warn_on_skipped, recurse)
self.children = [ch for ch in self.children if ch.has_tests()]
return self
def _get_basename(self):
return os.path.basename(self.source)
def _table_is_allowed(self, table):
if table is self.testcase_table:
LOGGER.error("Test suite init file in '%s' contains a test case "
"table which is not allowed." % self.source)
return False
return True
def add_child(self, path, include_suites):
self.children.append(TestData(parent=self,source=path,
include_suites=include_suites))
def has_tests(self):
return any(ch.has_tests() for ch in self.children)
def __iter__(self):
for table in [self.setting_table, self.variable_table, self.keyword_table]:
yield table
class _Table(object):
def __init__(self, parent):
self.parent = parent
self._header = None
def set_header(self, header):
self._header = self._prune_old_style_headers(header)
def _prune_old_style_headers(self, header):
if len(header) < 3:
return header
if self._old_header_matcher.match(header):
return [header[0]]
return header
@property
def header(self):
return self._header or [self.type.title() + 's']
@property
def name(self):
return self.header[0]
@property
def source(self):
return self.parent.source
@property
def directory(self):
return self.parent.directory
def report_invalid_syntax(self, message, level='ERROR'):
self.parent.report_invalid_syntax(message, level)
def __nonzero__(self):
return bool(self._header or len(self))
def __len__(self):
return sum(1 for item in self)
class _WithSettings(object):
def get_setter(self, setting_name):
normalized = self.normalize(setting_name)
if normalized in self._setters:
return self._setters[normalized](self)
self.report_invalid_syntax("Non-existing setting '%s'." % setting_name)
def is_setting(self, setting_name):
return self.normalize(setting_name) in self._setters
def normalize(self, setting):
result = normalize(setting)
return result[0:-1] if result and result[-1]==':' else result
class _SettingTable(_Table, _WithSettings):
type = 'setting'
def __init__(self, parent):
_Table.__init__(self, parent)
self.doc = Documentation('Documentation', self)
self.suite_setup = Fixture('Suite Setup', self)
self.suite_teardown = Fixture('Suite Teardown', self)
self.test_setup = Fixture('Test Setup', self)
self.test_teardown = Fixture('Test Teardown', self)
self.force_tags = Tags('Force Tags', self)
self.default_tags = Tags('Default Tags', self)
self.test_template = Template('Test Template', self)
self.test_timeout = Timeout('Test Timeout', self)
self.metadata = MetadataList(self)
self.imports = ImportList(self)
@property
def _old_header_matcher(self):
return OldStyleSettingAndVariableTableHeaderMatcher()
def add_metadata(self, name, value='', comment=None):
self.metadata.add(Metadata(self, name, value, comment))
return self.metadata[-1]
def add_library(self, name, args=None, comment=None):
self.imports.add(Library(self, name, args, comment=comment))
return self.imports[-1]
def add_resource(self, name, invalid_args=None, comment=None):
self.imports.add(Resource(self, name, invalid_args, comment=comment))
return self.imports[-1]
def add_variables(self, name, args=None, comment=None):
self.imports.add(Variables(self, name, args, comment=comment))
return self.imports[-1]
def __len__(self):
return sum(1 for setting in self if setting.is_set())
class TestCaseFileSettingTable(_SettingTable):
_setters = {'documentation': lambda s: s.doc.populate,
'document': lambda s: s.doc.populate,
'suitesetup': lambda s: s.suite_setup.populate,
'suiteprecondition': lambda s: s.suite_setup.populate,
'suiteteardown': lambda s: s.suite_teardown.populate,
'suitepostcondition': lambda s: s.suite_teardown.populate,
'testsetup': lambda s: s.test_setup.populate,
'testprecondition': lambda s: s.test_setup.populate,
'testteardown': lambda s: s.test_teardown.populate,
'testpostcondition': lambda s: s.test_teardown.populate,
'forcetags': lambda s: s.force_tags.populate,
'defaulttags': lambda s: s.default_tags.populate,
'testtemplate': lambda s: s.test_template.populate,
'testtimeout': lambda s: s.test_timeout.populate,
'library': lambda s: s.imports.populate_library,
'resource': lambda s: s.imports.populate_resource,
'variables': lambda s: s.imports.populate_variables,
'metadata': lambda s: s.metadata.populate}
def __iter__(self):
for setting in [self.doc, self.suite_setup, self.suite_teardown,
self.test_setup, self.test_teardown, self.force_tags,
self.default_tags, self.test_template, self.test_timeout] \
+ self.metadata.data + self.imports.data:
yield setting
class ResourceFileSettingTable(_SettingTable):
_setters = {'documentation': lambda s: s.doc.populate,
'document': lambda s: s.doc.populate,
'library': lambda s: s.imports.populate_library,
'resource': lambda s: s.imports.populate_resource,
'variables': lambda s: s.imports.populate_variables}
def __iter__(self):
for setting in [self.doc] + self.imports.data:
yield setting
class InitFileSettingTable(_SettingTable):
_setters = {'documentation': lambda s: s.doc.populate,
'document': lambda s: s.doc.populate,
'suitesetup': lambda s: s.suite_setup.populate,
'suiteprecondition': lambda s: s.suite_setup.populate,
'suiteteardown': lambda s: s.suite_teardown.populate,
'suitepostcondition': lambda s: s.suite_teardown.populate,
'testsetup': lambda s: s.test_setup.populate,
'testprecondition': lambda s: s.test_setup.populate,
'testteardown': lambda s: s.test_teardown.populate,
'testpostcondition': lambda s: s.test_teardown.populate,
'testtimeout': lambda s: s.test_timeout.populate,
'forcetags': lambda s: s.force_tags.populate,
'library': lambda s: s.imports.populate_library,
'resource': lambda s: s.imports.populate_resource,
'variables': lambda s: s.imports.populate_variables,
'metadata': lambda s: s.metadata.populate}
def __iter__(self):
for setting in [self.doc, self.suite_setup, self.suite_teardown,
self.test_setup, self.test_teardown, self.force_tags,
self.test_timeout] + self.metadata.data + self.imports.data:
yield setting
class VariableTable(_Table):
type = 'variable'
def __init__(self, parent):
_Table.__init__(self, parent)
self.variables = []
@property
def _old_header_matcher(self):
return OldStyleSettingAndVariableTableHeaderMatcher()
def add(self, name, value, comment=None):
self.variables.append(Variable(self, name, value, comment))
def __iter__(self):
return iter(self.variables)
class TestCaseTable(_Table):
type = 'test case'
def __init__(self, parent):
_Table.__init__(self, parent)
self.tests = []
@property
def _old_header_matcher(self):
return OldStyleTestAndKeywordTableHeaderMatcher()
def add(self, name):
self.tests.append(TestCase(self, name))
return self.tests[-1]
def __iter__(self):
return iter(self.tests)
def is_started(self):
return bool(self._header)
def __nonzero__(self):
return True
class KeywordTable(_Table):
type = 'keyword'
def __init__(self, parent):
_Table.__init__(self, parent)
self.keywords = []
@property
def _old_header_matcher(self):
return OldStyleTestAndKeywordTableHeaderMatcher()
def add(self, name):
self.keywords.append(UserKeyword(self, name))
return self.keywords[-1]
def __iter__(self):
return iter(self.keywords)
class Variable(object):
def __init__(self, parent, name, value, comment=None):
self.parent = parent
self.name = name.rstrip('= ')
if name.startswith('$') and value == []:
value = ''
if is_string(value):
value = [value]
self.value = value
self.comment = Comment(comment)
def as_list(self):
if self.has_data():
return [self.name] + self.value + self.comment.as_list()
return self.comment.as_list()
def is_set(self):
return True
def is_for_loop(self):
return False
def has_data(self):
return bool(self.name or ''.join(self.value))
def __nonzero__(self):
return self.has_data()
def report_invalid_syntax(self, message, level='ERROR'):
self.parent.report_invalid_syntax("Setting variable '%s' failed: %s"
% (self.name, message), level)
class _WithSteps(object):
def add_step(self, content, comment=None):
self.steps.append(Step(content, comment))
return self.steps[-1]
def copy(self, name):
new = copy.deepcopy(self)
new.name = name
self._add_to_parent(new)
return new
class TestCase(_WithSteps, _WithSettings):
def __init__(self, parent, name):
self.parent = parent
self.name = name
self.doc = Documentation('[Documentation]', self)
self.template = Template('[Template]', self)
self.tags = Tags('[Tags]', self)
self.setup = Fixture('[Setup]', self)
self.teardown = Fixture('[Teardown]', self)
self.timeout = Timeout('[Timeout]', self)
self.steps = []
_setters = {'documentation': lambda s: s.doc.populate,
'document': lambda s: s.doc.populate,
'template': lambda s: s.template.populate,
'setup': lambda s: s.setup.populate,
'precondition': lambda s: s.setup.populate,
'teardown': lambda s: s.teardown.populate,
'postcondition': lambda s: s.teardown.populate,
'tags': lambda s: s.tags.populate,
'timeout': lambda s: s.timeout.populate}
@property
def source(self):
return self.parent.source
@property
def directory(self):
return self.parent.directory
def add_for_loop(self, declaration, comment=None):
self.steps.append(ForLoop(declaration, comment))
return self.steps[-1]
def report_invalid_syntax(self, message, level='ERROR'):
type_ = 'test case' if type(self) is TestCase else 'keyword'
message = "Invalid syntax in %s '%s': %s" % (type_, self.name, message)
self.parent.report_invalid_syntax(message, level)
def _add_to_parent(self, test):
self.parent.tests.append(test)
@property
def settings(self):
return [self.doc, self.tags, self.setup, self.template, self.timeout,
self.teardown]
def __iter__(self):
for element in [self.doc, self.tags, self.setup,
self.template, self.timeout] \
+ self.steps + [self.teardown]:
yield element
class UserKeyword(TestCase):
def __init__(self, parent, name):
self.parent = parent
self.name = name
self.doc = Documentation('[Documentation]', self)
self.args = Arguments('[Arguments]', self)
self.return_ = Return('[Return]', self)
self.timeout = Timeout('[Timeout]', self)
self.teardown = Fixture('[Teardown]', self)
self.tags = Tags('[Tags]', self)
self.steps = []
_setters = {'documentation': lambda s: s.doc.populate,
'document': lambda s: s.doc.populate,
'arguments': lambda s: s.args.populate,
'return': lambda s: s.return_.populate,
'timeout': lambda s: s.timeout.populate,
'teardown': lambda s: s.teardown.populate,
'tags': lambda s: s.tags.populate}
def _add_to_parent(self, test):
self.parent.keywords.append(test)
@property
def settings(self):
return [self.args, self.doc, self.tags, self.timeout, self.teardown, self.return_]
def __iter__(self):
for element in [self.args, self.doc, self.tags, self.timeout] \
+ self.steps + [self.teardown, self.return_]:
yield element
class ForLoop(_WithSteps):
"""The parsed representation of a for-loop.
:param list declaration: The literal cell values that declare the loop
(excluding ":FOR").
:param str comment: A comment, default None.
:ivar str flavor: The value of the 'IN' item, uppercased.
Typically 'IN', 'IN RANGE', 'IN ZIP', or 'IN ENUMERATE'.
:ivar list vars: Variables set per-iteration by this loop.
:ivar list items: Loop values that come after the 'IN' item.
:ivar str comment: A comment, or None.
:ivar list steps: A list of steps in the loop.
"""
def __init__(self, declaration, comment=None):
self.flavor, index = self._get_flavors_and_index(declaration)
self.vars = declaration[:index]
self.items = declaration[index+1:]
self.comment = Comment(comment)
self.steps = []
def _get_flavors_and_index(self, declaration):
for index, item in enumerate(declaration):
item = item.upper()
if item.replace(' ', '').startswith('IN'):
return item, index
return 'IN', len(declaration)
def is_comment(self):
return False
def is_for_loop(self):
return True
def as_list(self, indent=False, include_comment=True):
comments = self.comment.as_list() if include_comment else []
return [': FOR'] + self.vars + [self.flavor] + self.items + comments
def __iter__(self):
return iter(self.steps)
def is_set(self):
return True
class Step(object):
def __init__(self, content, comment=None):
self.assign = list(self._get_assigned_vars(content))
try:
self.name = content[len(self.assign)]
except IndexError:
self.name = None
self.args = content[len(self.assign)+1:]
self.comment = Comment(comment)
def _get_assigned_vars(self, content):
for item in content:
if not is_var(item.rstrip('= ')):
return
yield item
def is_comment(self):
return not (self.assign or self.name or self.args)
def is_for_loop(self):
return False
def is_set(self):
return True
def as_list(self, indent=False, include_comment=True):
kw = [self.name] if self.name is not None else []
comments = self.comment.as_list() if include_comment else []
data = self.assign + kw + self.args + comments
if indent:
data.insert(0, '')
return data
class OldStyleSettingAndVariableTableHeaderMatcher(object):
def match(self, header):
return all((True if e.lower() == 'value' else False)
for e in header[1:])
class OldStyleTestAndKeywordTableHeaderMatcher(object):
def match(self, header):
if header[1].lower() != 'action':
return False
for h in header[2:]:
if not h.lower().startswith('arg'):
return False
return True
|
Distrotech/bzr | refs/heads/distrotech-bzr | bzrlib/tests/per_repository_vf/test_repository.py | 2 | # Copyright (C) 2011 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Tests for repository implementations - tests a repository format."""
from bzrlib import (
errors,
gpg,
inventory,
repository as _mod_repository,
revision as _mod_revision,
tests,
versionedfile,
vf_repository,
)
from bzrlib.symbol_versioning import deprecated_in
from bzrlib.tests.matchers import MatchesAncestry
from bzrlib.tests.per_repository_vf import (
TestCaseWithRepository,
all_repository_vf_format_scenarios,
)
from bzrlib.tests.scenarios import load_tests_apply_scenarios
load_tests = load_tests_apply_scenarios
class TestRepository(TestCaseWithRepository):
scenarios = all_repository_vf_format_scenarios()
def assertFormatAttribute(self, attribute, allowed_values):
"""Assert that the format has an attribute 'attribute'."""
repo = self.make_repository('repo')
self.assertSubset([getattr(repo._format, attribute)], allowed_values)
def test_attribute__fetch_order(self):
"""Test the _fetch_order attribute."""
self.assertFormatAttribute('_fetch_order', ('topological', 'unordered'))
def test_attribute__fetch_uses_deltas(self):
"""Test the _fetch_uses_deltas attribute."""
self.assertFormatAttribute('_fetch_uses_deltas', (True, False))
def test_attribute_inventories_store(self):
"""Test the existence of the inventories attribute."""
tree = self.make_branch_and_tree('tree')
repo = tree.branch.repository
self.assertIsInstance(repo.inventories, versionedfile.VersionedFiles)
def test_attribute_inventories_basics(self):
"""Test basic aspects of the inventories attribute."""
tree = self.make_branch_and_tree('tree')
repo = tree.branch.repository
rev_id = (tree.commit('a'),)
tree.lock_read()
self.addCleanup(tree.unlock)
self.assertEqual(set([rev_id]), set(repo.inventories.keys()))
def test_attribute_revision_store(self):
"""Test the existence of the revisions attribute."""
tree = self.make_branch_and_tree('tree')
repo = tree.branch.repository
self.assertIsInstance(repo.revisions,
versionedfile.VersionedFiles)
def test_attribute_revision_store_basics(self):
"""Test the basic behaviour of the revisions attribute."""
tree = self.make_branch_and_tree('tree')
repo = tree.branch.repository
repo.lock_write()
try:
self.assertEqual(set(), set(repo.revisions.keys()))
revid = (tree.commit("foo"),)
self.assertEqual(set([revid]), set(repo.revisions.keys()))
self.assertEqual({revid:()},
repo.revisions.get_parent_map([revid]))
finally:
repo.unlock()
tree2 = self.make_branch_and_tree('tree2')
tree2.pull(tree.branch)
left_id = (tree2.commit('left'),)
right_id = (tree.commit('right'),)
tree.merge_from_branch(tree2.branch)
merge_id = (tree.commit('merged'),)
repo.lock_read()
self.addCleanup(repo.unlock)
self.assertEqual(set([revid, left_id, right_id, merge_id]),
set(repo.revisions.keys()))
self.assertEqual({revid:(), left_id:(revid,), right_id:(revid,),
merge_id:(right_id, left_id)},
repo.revisions.get_parent_map(repo.revisions.keys()))
def test_attribute_signature_store(self):
"""Test the existence of the signatures attribute."""
tree = self.make_branch_and_tree('tree')
repo = tree.branch.repository
self.assertIsInstance(repo.signatures,
versionedfile.VersionedFiles)
def test_exposed_versioned_files_are_marked_dirty(self):
repo = self.make_repository('.')
repo.lock_write()
signatures = repo.signatures
revisions = repo.revisions
inventories = repo.inventories
repo.unlock()
self.assertRaises(errors.ObjectNotLocked,
signatures.keys)
self.assertRaises(errors.ObjectNotLocked,
revisions.keys)
self.assertRaises(errors.ObjectNotLocked,
inventories.keys)
self.assertRaises(errors.ObjectNotLocked,
signatures.add_lines, ('foo',), [], [])
self.assertRaises(errors.ObjectNotLocked,
revisions.add_lines, ('foo',), [], [])
self.assertRaises(errors.ObjectNotLocked,
inventories.add_lines, ('foo',), [], [])
def test__get_sink(self):
repo = self.make_repository('repo')
sink = repo._get_sink()
self.assertIsInstance(sink, vf_repository.StreamSink)
def test_get_serializer_format(self):
repo = self.make_repository('.')
format = repo.get_serializer_format()
self.assertEqual(repo._serializer.format_num, format)
def test_add_revision_inventory_sha1(self):
inv = inventory.Inventory(revision_id='A')
inv.root.revision = 'A'
inv.root.file_id = 'fixed-root'
# Insert the inventory on its own to an identical repository, to get
# its sha1.
reference_repo = self.make_repository('reference_repo')
reference_repo.lock_write()
reference_repo.start_write_group()
inv_sha1 = reference_repo.add_inventory('A', inv, [])
reference_repo.abort_write_group()
reference_repo.unlock()
# Now insert a revision with this inventory, and it should get the same
# sha1.
repo = self.make_repository('repo')
repo.lock_write()
repo.start_write_group()
root_id = inv.root.file_id
repo.texts.add_lines(('fixed-root', 'A'), [], [])
repo.add_revision('A', _mod_revision.Revision(
'A', committer='B', timestamp=0,
timezone=0, message='C'), inv=inv)
repo.commit_write_group()
repo.unlock()
repo.lock_read()
self.assertEquals(inv_sha1, repo.get_revision('A').inventory_sha1)
repo.unlock()
def test_install_revisions(self):
wt = self.make_branch_and_tree('source')
wt.commit('A', allow_pointless=True, rev_id='A')
repo = wt.branch.repository
repo.lock_write()
repo.start_write_group()
repo.sign_revision('A', gpg.LoopbackGPGStrategy(None))
repo.commit_write_group()
repo.unlock()
repo.lock_read()
self.addCleanup(repo.unlock)
repo2 = self.make_repository('repo2')
revision = repo.get_revision('A')
tree = repo.revision_tree('A')
signature = repo.get_signature_text('A')
repo2.lock_write()
self.addCleanup(repo2.unlock)
vf_repository.install_revisions(repo2, [(revision, tree, signature)])
self.assertEqual(revision, repo2.get_revision('A'))
self.assertEqual(signature, repo2.get_signature_text('A'))
def test_attribute_text_store(self):
"""Test the existence of the texts attribute."""
tree = self.make_branch_and_tree('tree')
repo = tree.branch.repository
self.assertIsInstance(repo.texts,
versionedfile.VersionedFiles)
def test_iter_inventories_is_ordered(self):
# just a smoke test
tree = self.make_branch_and_tree('a')
first_revision = tree.commit('')
second_revision = tree.commit('')
tree.lock_read()
self.addCleanup(tree.unlock)
revs = (first_revision, second_revision)
invs = tree.branch.repository.iter_inventories(revs)
for rev_id, inv in zip(revs, invs):
self.assertEqual(rev_id, inv.revision_id)
self.assertIsInstance(inv, inventory.CommonInventory)
def test_item_keys_introduced_by(self):
# Make a repo with one revision and one versioned file.
tree = self.make_branch_and_tree('t')
self.build_tree(['t/foo'])
tree.add('foo', 'file1')
tree.commit('message', rev_id='rev_id')
repo = tree.branch.repository
repo.lock_write()
repo.start_write_group()
try:
repo.sign_revision('rev_id', gpg.LoopbackGPGStrategy(None))
except errors.UnsupportedOperation:
signature_texts = []
else:
signature_texts = ['rev_id']
repo.commit_write_group()
repo.unlock()
repo.lock_read()
self.addCleanup(repo.unlock)
# Item keys will be in this order, for maximum convenience for
# generating data to insert into knit repository:
# * files
# * inventory
# * signatures
# * revisions
expected_item_keys = [
('file', 'file1', ['rev_id']),
('inventory', None, ['rev_id']),
('signatures', None, signature_texts),
('revisions', None, ['rev_id'])]
item_keys = list(repo.item_keys_introduced_by(['rev_id']))
item_keys = [
(kind, file_id, list(versions))
for (kind, file_id, versions) in item_keys]
if repo.supports_rich_root():
# Check for the root versioned file in the item_keys, then remove
# it from streamed_names so we can compare that with
# expected_record_names.
# Note that the file keys can be in any order, so this test is
# written to allow that.
inv = repo.get_inventory('rev_id')
root_item_key = ('file', inv.root.file_id, ['rev_id'])
self.assertTrue(root_item_key in item_keys)
item_keys.remove(root_item_key)
self.assertEqual(expected_item_keys, item_keys)
def test_attribute_text_store_basics(self):
"""Test the basic behaviour of the text store."""
tree = self.make_branch_and_tree('tree')
repo = tree.branch.repository
file_id = "Foo:Bar"
file_key = (file_id,)
tree.lock_write()
try:
self.assertEqual(set(), set(repo.texts.keys()))
tree.add(['foo'], [file_id], ['file'])
tree.put_file_bytes_non_atomic(file_id, 'content\n')
try:
rev_key = (tree.commit("foo"),)
except errors.IllegalPath:
raise tests.TestNotApplicable(
'file_id %r cannot be stored on this'
' platform for this repo format' % (file_id,))
if repo._format.rich_root_data:
root_commit = (tree.get_root_id(),) + rev_key
keys = set([root_commit])
parents = {root_commit:()}
else:
keys = set()
parents = {}
keys.add(file_key + rev_key)
parents[file_key + rev_key] = ()
self.assertEqual(keys, set(repo.texts.keys()))
self.assertEqual(parents,
repo.texts.get_parent_map(repo.texts.keys()))
finally:
tree.unlock()
tree2 = self.make_branch_and_tree('tree2')
tree2.pull(tree.branch)
tree2.put_file_bytes_non_atomic('Foo:Bar', 'right\n')
right_key = (tree2.commit('right'),)
keys.add(file_key + right_key)
parents[file_key + right_key] = (file_key + rev_key,)
tree.put_file_bytes_non_atomic('Foo:Bar', 'left\n')
left_key = (tree.commit('left'),)
keys.add(file_key + left_key)
parents[file_key + left_key] = (file_key + rev_key,)
tree.merge_from_branch(tree2.branch)
tree.put_file_bytes_non_atomic('Foo:Bar', 'merged\n')
try:
tree.auto_resolve()
except errors.UnsupportedOperation:
pass
merge_key = (tree.commit('merged'),)
keys.add(file_key + merge_key)
parents[file_key + merge_key] = (file_key + left_key,
file_key + right_key)
repo.lock_read()
self.addCleanup(repo.unlock)
self.assertEqual(keys, set(repo.texts.keys()))
self.assertEqual(parents, repo.texts.get_parent_map(repo.texts.keys()))
class TestCaseWithComplexRepository(TestCaseWithRepository):
scenarios = all_repository_vf_format_scenarios()
def setUp(self):
super(TestCaseWithComplexRepository, self).setUp()
tree_a = self.make_branch_and_tree('a')
self.bzrdir = tree_a.branch.bzrdir
# add a corrupt inventory 'orphan'
# this may need some generalising for knits.
tree_a.lock_write()
try:
tree_a.branch.repository.start_write_group()
try:
inv_file = tree_a.branch.repository.inventories
inv_file.add_lines(('orphan',), [], [])
except:
tree_a.branch.repository.commit_write_group()
raise
else:
tree_a.branch.repository.abort_write_group()
finally:
tree_a.unlock()
# add a real revision 'rev1'
tree_a.commit('rev1', rev_id='rev1', allow_pointless=True)
# add a real revision 'rev2' based on rev1
tree_a.commit('rev2', rev_id='rev2', allow_pointless=True)
# add a reference to a ghost
tree_a.add_parent_tree_id('ghost1')
try:
tree_a.commit('rev3', rev_id='rev3', allow_pointless=True)
except errors.RevisionNotPresent:
raise tests.TestNotApplicable(
"Cannot test with ghosts for this format.")
# add another reference to a ghost, and a second ghost.
tree_a.add_parent_tree_id('ghost1')
tree_a.add_parent_tree_id('ghost2')
tree_a.commit('rev4', rev_id='rev4', allow_pointless=True)
def test_revision_trees(self):
revision_ids = ['rev1', 'rev2', 'rev3', 'rev4']
repository = self.bzrdir.open_repository()
repository.lock_read()
self.addCleanup(repository.unlock)
trees1 = list(repository.revision_trees(revision_ids))
trees2 = [repository.revision_tree(t) for t in revision_ids]
self.assertEqual(len(trees1), len(trees2))
for tree1, tree2 in zip(trees1, trees2):
self.assertFalse(tree2.changes_from(tree1).has_changed())
def test_get_deltas_for_revisions(self):
repository = self.bzrdir.open_repository()
repository.lock_read()
self.addCleanup(repository.unlock)
revisions = [repository.get_revision(r) for r in
['rev1', 'rev2', 'rev3', 'rev4']]
deltas1 = list(repository.get_deltas_for_revisions(revisions))
deltas2 = [repository.get_revision_delta(r.revision_id) for r in
revisions]
self.assertEqual(deltas1, deltas2)
def test_all_revision_ids(self):
# all_revision_ids -> all revisions
self.assertEqual(set(['rev1', 'rev2', 'rev3', 'rev4']),
set(self.bzrdir.open_repository().all_revision_ids()))
def test_reserved_id(self):
repo = self.make_repository('repository')
repo.lock_write()
repo.start_write_group()
try:
self.assertRaises(errors.ReservedId, repo.add_inventory,
'reserved:', None, None)
self.assertRaises(errors.ReservedId, repo.add_inventory_by_delta,
"foo", [], 'reserved:', None)
self.assertRaises(errors.ReservedId, repo.add_revision,
'reserved:', None)
finally:
repo.abort_write_group()
repo.unlock()
class TestCaseWithCorruptRepository(TestCaseWithRepository):
scenarios = all_repository_vf_format_scenarios()
def setUp(self):
super(TestCaseWithCorruptRepository, self).setUp()
# a inventory with no parents and the revision has parents..
# i.e. a ghost.
repo = self.make_repository('inventory_with_unnecessary_ghost')
repo.lock_write()
repo.start_write_group()
inv = inventory.Inventory(revision_id = 'ghost')
inv.root.revision = 'ghost'
if repo.supports_rich_root():
root_id = inv.root.file_id
repo.texts.add_lines((root_id, 'ghost'), [], [])
sha1 = repo.add_inventory('ghost', inv, [])
rev = _mod_revision.Revision(
timestamp=0, timezone=None, committer="Foo Bar <foo@example.com>",
message="Message", inventory_sha1=sha1, revision_id='ghost')
rev.parent_ids = ['the_ghost']
try:
repo.add_revision('ghost', rev)
except (errors.NoSuchRevision, errors.RevisionNotPresent):
raise tests.TestNotApplicable(
"Cannot test with ghosts for this format.")
inv = inventory.Inventory(revision_id = 'the_ghost')
inv.root.revision = 'the_ghost'
if repo.supports_rich_root():
root_id = inv.root.file_id
repo.texts.add_lines((root_id, 'the_ghost'), [], [])
sha1 = repo.add_inventory('the_ghost', inv, [])
rev = _mod_revision.Revision(
timestamp=0, timezone=None, committer="Foo Bar <foo@example.com>",
message="Message", inventory_sha1=sha1, revision_id='the_ghost')
rev.parent_ids = []
repo.add_revision('the_ghost', rev)
# check its setup usefully
inv_weave = repo.inventories
possible_parents = (None, (('ghost',),))
self.assertSubset(inv_weave.get_parent_map([('ghost',)])[('ghost',)],
possible_parents)
repo.commit_write_group()
repo.unlock()
def test_corrupt_revision_access_asserts_if_reported_wrong(self):
repo_url = self.get_url('inventory_with_unnecessary_ghost')
repo = _mod_repository.Repository.open(repo_url)
m = MatchesAncestry(repo, 'ghost')
reported_wrong = False
try:
if m.match(['the_ghost', 'ghost']) is not None:
reported_wrong = True
except errors.CorruptRepository:
# caught the bad data:
return
if not reported_wrong:
return
self.assertRaises(errors.CorruptRepository, repo.get_revision, 'ghost')
def test_corrupt_revision_get_revision_reconcile(self):
repo_url = self.get_url('inventory_with_unnecessary_ghost')
repo = _mod_repository.Repository.open(repo_url)
repo.get_revision_reconcile('ghost')
|
sangh/LaserShow | refs/heads/master | pyglet-hg/pyglet/image/codecs/pil.py | 10 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import os.path
from pyglet.gl import *
from pyglet.image import *
from pyglet.image.codecs import *
import Image
class PILImageDecoder(ImageDecoder):
def get_file_extensions(self):
# Only most common ones shown here
return ['.bmp', '.cur', '.gif', '.ico', '.jpg', '.jpeg', '.pcx', '.png',
'.tga', '.tif', '.tiff', '.xbm', '.xpm']
def decode(self, file, filename):
try:
image = Image.open(file)
except Exception, e:
raise ImageDecodeException(
'PIL cannot read %r: %s' % (filename or file, e))
image = image.transpose(Image.FLIP_TOP_BOTTOM)
# Convert bitmap and palette images to component
if image.mode in ('1', 'P'):
image = image.convert()
if image.mode not in ('L', 'LA', 'RGB', 'RGBA'):
raise ImageDecodeException('Unsupported mode "%s"' % image.mode)
type = GL_UNSIGNED_BYTE
width, height = image.size
return ImageData(width, height, image.mode, image.tostring())
class PILImageEncoder(ImageEncoder):
def get_file_extensions(self):
# Most common only
return ['.bmp', '.eps', '.gif', '.jpg', '.jpeg',
'.pcx', '.png', '.ppm', '.tiff', '.xbm']
def encode(self, image, file, filename):
# File format is guessed from filename extension, otherwise defaults
# to PNG.
pil_format = (filename and os.path.splitext(filename)[1][1:]) or 'png'
if pil_format.lower() == 'jpg':
pil_format = 'JPEG'
image = image.get_image_data()
format = image.format
if format != 'RGB':
# Only save in RGB or RGBA formats.
format = 'RGBA'
pitch = -(image.width * len(format))
# Note: Don't try and use frombuffer(..); different versions of
# PIL will orient the image differently.
pil_image = Image.fromstring(
format, (image.width, image.height), image.get_data(format, pitch))
try:
pil_image.save(file, pil_format)
except Exception, e:
raise ImageEncodeException(e)
def get_decoders():
return [PILImageDecoder()]
def get_encoders():
return [PILImageEncoder()]
|
douggeiger/gnuradio | refs/heads/master | gr-filter/python/filter/qa_freq_xlating_fir_filter.py | 51 | #!/usr/bin/env python
#
# Copyright 2008,2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from gnuradio import gr, gr_unittest, filter, blocks
import cmath, math
def fir_filter(x, taps, decim=1):
y = []
x2 = (len(taps)-1)*[0,] + x
for i in range(0, len(x), decim):
yi = 0
for j in range(len(taps)):
yi += taps[len(taps)-1-j] * x2[i+j]
y.append(yi)
return y
def sig_source_s(samp_rate, freq, amp, N):
t = map(lambda x: float(x)/samp_rate, xrange(N))
y = map(lambda x: int(100*math.sin(2.*math.pi*freq*x)), t)
return y
def sig_source_c(samp_rate, freq, amp, N):
t = map(lambda x: float(x)/samp_rate, xrange(N))
y = map(lambda x: math.cos(2.*math.pi*freq*x) + \
1j*math.sin(2.*math.pi*freq*x), t)
return y
def mix(lo, data):
y = [lo_i*data_i for lo_i, data_i in zip(lo, data)]
return y
class test_freq_xlating_filter(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block ()
def tearDown(self):
self.tb = None
def generate_ccf_source(self):
self.fs = fs = 1
self.fc = fc = 0.3
self.bw = bw = 0.1
self.taps = filter.firdes.low_pass(1, fs, bw, bw/4)
times = xrange(100)
self.src_data = map(lambda t: cmath.exp(-2j*cmath.pi*fc/fs*(t/100.0)), times)
def generate_ccc_source(self):
self.fs = fs = 1
self.fc = fc = 0.3
self.bw = bw = 0.1
self.taps = filter.firdes.complex_band_pass(1, fs, -bw/2, bw/2, bw/4)
times = xrange(100)
self.src_data = map(lambda t: cmath.exp(-2j*cmath.pi*fc/fs*(t/100.0)), times)
def generate_fcf_source(self):
self.fs = fs = 1
self.fc = fc = 0.3
self.bw = bw = 0.1
self.taps = filter.firdes.low_pass(1, fs, bw, bw/4)
times = xrange(100)
self.src_data = map(lambda t: math.sin(2*cmath.pi*fc/fs*(t/100.0)), times)
def generate_fcc_source(self):
self.fs = fs = 1
self.fc = fc = 0.3
self.bw = bw = 0.1
self.taps = filter.firdes.complex_band_pass(1, fs, -bw/2, bw/2, bw/4)
times = xrange(100)
self.src_data = map(lambda t: math.sin(2*cmath.pi*fc/fs*(t/100.0)), times)
def generate_scf_source(self):
self.fs = fs = 1
self.fc = fc = 0.3
self.bw = bw = 0.12
self.taps = filter.firdes.low_pass(1, fs, bw, bw/4)
times = xrange(100)
self.src_data = map(lambda t: int(100*math.sin(2*cmath.pi*fc/fs*(t/100.0))), times)
def generate_scc_source(self):
self.fs = fs = 1
self.fc = fc = 0.3
self.bw = bw = 0.12
self.taps = filter.firdes.complex_band_pass(1, fs, -bw/2, bw/2, bw/4)
times = xrange(100)
self.src_data = map(lambda t: int(100*math.sin(2*cmath.pi*fc/fs*(t/100.0))), times)
def test_fir_filter_ccf_001(self):
self.generate_ccf_source()
decim = 1
lo = sig_source_c(self.fs, -self.fc, 1, len(self.src_data))
despun = mix(lo, self.src_data)
expected_data = fir_filter(despun, self.taps, decim)
src = blocks.vector_source_c(self.src_data)
op = filter.freq_xlating_fir_filter_ccf(decim, self.taps, self.fc, self.fs)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_data, result_data, 5)
def test_fir_filter_ccf_002(self):
self.generate_ccf_source()
decim = 4
lo = sig_source_c(self.fs, -self.fc, 1, len(self.src_data))
despun = mix(lo, self.src_data)
expected_data = fir_filter(despun, self.taps, decim)
src = blocks.vector_source_c(self.src_data)
op = filter.freq_xlating_fir_filter_ccf(decim, self.taps, self.fc, self.fs)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_data, result_data, 5)
def test_fir_filter_ccc_001(self):
self.generate_ccc_source()
decim = 1
lo = sig_source_c(self.fs, -self.fc, 1, len(self.src_data))
despun = mix(lo, self.src_data)
expected_data = fir_filter(despun, self.taps, decim)
src = blocks.vector_source_c(self.src_data)
op = filter.freq_xlating_fir_filter_ccc(decim, self.taps, self.fc, self.fs)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_data, result_data, 5)
def test_fir_filter_ccc_002(self):
self.generate_ccc_source()
decim = 4
lo = sig_source_c(self.fs, -self.fc, 1, len(self.src_data))
despun = mix(lo, self.src_data)
expected_data = fir_filter(despun, self.taps, decim)
src = blocks.vector_source_c(self.src_data)
op = filter.freq_xlating_fir_filter_ccc(decim, self.taps, self.fc, self.fs)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_data, result_data, 5)
def test_fir_filter_fcf_001(self):
self.generate_fcf_source()
decim = 1
lo = sig_source_c(self.fs, -self.fc, 1, len(self.src_data))
despun = mix(lo, self.src_data)
expected_data = fir_filter(despun, self.taps, decim)
src = blocks.vector_source_f(self.src_data)
op = filter.freq_xlating_fir_filter_fcf(decim, self.taps, self.fc, self.fs)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_data, result_data, 5)
def test_fir_filter_fcf_002(self):
self.generate_fcf_source()
decim = 4
lo = sig_source_c(self.fs, -self.fc, 1, len(self.src_data))
despun = mix(lo, self.src_data)
expected_data = fir_filter(despun, self.taps, decim)
src = blocks.vector_source_f(self.src_data)
op = filter.freq_xlating_fir_filter_fcf(decim, self.taps, self.fc, self.fs)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_data, result_data, 5)
def test_fir_filter_fcc_001(self):
self.generate_fcc_source()
decim = 1
lo = sig_source_c(self.fs, -self.fc, 1, len(self.src_data))
despun = mix(lo, self.src_data)
expected_data = fir_filter(despun, self.taps, decim)
src = blocks.vector_source_f(self.src_data)
op = filter.freq_xlating_fir_filter_fcc(decim, self.taps, self.fc, self.fs)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_data, result_data, 5)
def test_fir_filter_fcc_002(self):
self.generate_fcc_source()
decim = 4
lo = sig_source_c(self.fs, -self.fc, 1, len(self.src_data))
despun = mix(lo, self.src_data)
expected_data = fir_filter(despun, self.taps, decim)
src = blocks.vector_source_f(self.src_data)
op = filter.freq_xlating_fir_filter_fcc(decim, self.taps, self.fc, self.fs)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_data, result_data, 5)
def test_fir_filter_scf_001(self):
self.generate_scf_source()
decim = 1
lo = sig_source_c(self.fs, -self.fc, 1, len(self.src_data))
despun = mix(lo, self.src_data)
expected_data = fir_filter(despun, self.taps, decim)
src = blocks.vector_source_s(self.src_data)
op = filter.freq_xlating_fir_filter_scf(decim, self.taps, self.fc, self.fs)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_data, result_data, 4)
def test_fir_filter_scf_002(self):
self.generate_scf_source()
decim = 4
lo = sig_source_c(self.fs, -self.fc, 1, len(self.src_data))
despun = mix(lo, self.src_data)
expected_data = fir_filter(despun, self.taps, decim)
src = blocks.vector_source_s(self.src_data)
op = filter.freq_xlating_fir_filter_scf(decim, self.taps, self.fc, self.fs)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_data, result_data, 4)
def test_fir_filter_scc_001(self):
self.generate_scc_source()
decim = 1
lo = sig_source_c(self.fs, -self.fc, 1, len(self.src_data))
despun = mix(lo, self.src_data)
expected_data = fir_filter(despun, self.taps, decim)
src = blocks.vector_source_s(self.src_data)
op = filter.freq_xlating_fir_filter_scc(decim, self.taps, self.fc, self.fs)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_data, result_data, 4)
def test_fir_filter_scc_002(self):
self.generate_scc_source()
decim = 4
lo = sig_source_c(self.fs, -self.fc, 1, len(self.src_data))
despun = mix(lo, self.src_data)
expected_data = fir_filter(despun, self.taps, decim)
src = blocks.vector_source_s(self.src_data)
op = filter.freq_xlating_fir_filter_scc(decim, self.taps, self.fc, self.fs)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_data, result_data, 4)
if __name__ == '__main__':
gr_unittest.run(test_freq_xlating_filter, "test_freq_xlating_filter.xml")
|
devs1991/test_edx_docmode | refs/heads/master | venv/lib/python2.7/site-packages/networkx/utils/union_find.py | 44 | """
Union-find data structure.
"""
# Copyright (C) 2004-2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
class UnionFind:
"""Union-find data structure.
Each unionFind instance X maintains a family of disjoint sets of
hashable objects, supporting the following two methods:
- X[item] returns a name for the set containing the given item.
Each set is named by an arbitrarily-chosen one of its members; as
long as the set remains unchanged it will keep the same name. If
the item is not yet part of a set in X, a new singleton set is
created for it.
- X.union(item1, item2, ...) merges the sets containing each item
into a single larger set. If any item is not yet part of a set
in X, it is added to X as one of the members of the merged set.
Union-find data structure. Based on Josiah Carlson's code,
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/215912
with significant additional changes by D. Eppstein.
http://www.ics.uci.edu/~eppstein/PADS/UnionFind.py
"""
def __init__(self):
"""Create a new empty union-find structure."""
self.weights = {}
self.parents = {}
def __getitem__(self, object):
"""Find and return the name of the set containing the object."""
# check for previously unknown object
if object not in self.parents:
self.parents[object] = object
self.weights[object] = 1
return object
# find path of objects leading to the root
path = [object]
root = self.parents[object]
while root != path[-1]:
path.append(root)
root = self.parents[root]
# compress the path and return
for ancestor in path:
self.parents[ancestor] = root
return root
def __iter__(self):
"""Iterate through all items ever found or unioned by this structure."""
return iter(self.parents)
def union(self, *objects):
"""Find the sets containing the objects and merge them all."""
roots = [self[x] for x in objects]
heaviest = max([(self.weights[r],r) for r in roots])[1]
for r in roots:
if r != heaviest:
self.weights[heaviest] += self.weights[r]
self.parents[r] = heaviest
|
rven/odoo | refs/heads/14.0-fix-partner-merge-mail-activity | addons/stock/__init__.py | 6 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import controllers
from . import models
from . import report
from . import wizard
from odoo import api, SUPERUSER_ID
# TODO: Apply proper fix & remove in master
def pre_init_hook(cr):
env = api.Environment(cr, SUPERUSER_ID, {})
env['ir.model.data'].search([
('model', 'like', '%stock%'),
('module', '=', 'stock')
]).unlink()
def _assign_default_mail_template_picking_id(cr, registry):
env = api.Environment(cr, SUPERUSER_ID, {})
company_ids_without_default_mail_template_id = env['res.company'].search([
('stock_mail_confirmation_template_id', '=', False)
])
default_mail_template_id = env.ref('stock.mail_template_data_delivery_confirmation', raise_if_not_found=False)
if default_mail_template_id:
company_ids_without_default_mail_template_id.write({
'stock_mail_confirmation_template_id': default_mail_template_id.id,
})
|
jikortus/pykickstart | refs/heads/master | translation-canary/translation_canary/translated/test_usability.py | 5 | # Check a .mo file for basic usability
#
# This will test that the file is well-formed and that the Plural-Forms value
# is parseable
#
# Copyright (C) 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): David Shea <dshea@redhat.com>
import gettext
def test_usability(mofile):
with open(mofile, "rb") as fp:
_t = gettext.GNUTranslations(fp=fp)
|
minorua/QGIS | refs/heads/master | python/plugins/processing/algs/grass7/ext/r_li_shape.py | 45 | # -*- coding: utf-8 -*-
"""
***************************************************************************
r_li_shape.py
-------------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
from .r_li import checkMovingWindow, configFile
def checkParameterValuesBeforeExecuting(alg, parameters, context):
return checkMovingWindow(alg, parameters, context)
def processCommand(alg, parameters, context, feedback):
configFile(alg, parameters, context, feedback)
|
TeamExodus/external_chromium_org | refs/heads/EXODUS-5.1 | build/android/pylib/junit/test_dispatcher.py | 38 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
def RunTests(tests, runner_factory):
"""Runs a set of java tests on the host.
Return:
A tuple containing the results & the exit code.
"""
def run(t):
runner = runner_factory(None, None)
runner.SetUp()
result = runner.RunTest(t)
runner.TearDown()
return result == 0
return (None, 0 if all(run(t) for t in tests) else 1)
|
vivitsu/dbxuploader | refs/heads/master | main.py | 1 | import urllib.request
import shutil
import argparse
import urllib.parse
import os
from dbx import DBX
parser = argparse.ArgumentParser()
parser.add_argument("url", help="The URL of the file you want to upload")
args = parser.parse_args()
url = args.url
o = urllib.parse.urlparse(url)
"""
Will work in very select cases, for e.g.:
http://static.googleusercontent.com/media/research.google.com/en/us/archive/mapreduce-osdi04.pdf
"""
path_elems = o.path.split("/")
filename = path_elems[-1]
dbx = DBX()
# TODO: Check if file exists, and don't download if it does
with urllib.request.urlopen(url) as response, open(filename, "wb") as out_file:
shutil.copyfileobj(response, out_file)
dbx.upload(filename)
if os.path.exists(filename):
os.remove(filename)
else:
print "File %s not found." % filename |
xhat/micropython | refs/heads/master | tests/bench/var-8-namedtuple-1st.py | 52 | import bench
from _collections import namedtuple
T = namedtuple("Tup", ["num", "bar"])
def test(num):
t = T(20000000, 0)
i = 0
while i < t.num:
i += 1
bench.run(test)
|
davidzchen/tensorflow | refs/heads/master | tensorflow/python/tools/strip_unused_lib.py | 23 | # pylint: disable=g-bad-file-header
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to remove unneeded nodes from a GraphDefs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from google.protobuf import text_format
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import graph_util
from tensorflow.python.platform import gfile
def strip_unused(input_graph_def, input_node_names, output_node_names,
placeholder_type_enum):
"""Removes unused nodes from a GraphDef.
Args:
input_graph_def: A graph with nodes we want to prune.
input_node_names: A list of the nodes we use as inputs.
output_node_names: A list of the output nodes.
placeholder_type_enum: The AttrValue enum for the placeholder data type, or
a list that specifies one value per input node name.
Returns:
A `GraphDef` with all unnecessary ops removed.
Raises:
ValueError: If any element in `input_node_names` refers to a tensor instead
of an operation.
KeyError: If any element in `input_node_names` is not found in the graph.
"""
for name in input_node_names:
if ":" in name:
raise ValueError("Name '%s' appears to refer to a Tensor, "
"not a Operation." % name)
# Here we replace the nodes we're going to override as inputs with
# placeholders so that any unused nodes that are inputs to them are
# automatically stripped out by extract_sub_graph().
not_found = {name for name in input_node_names}
inputs_replaced_graph_def = graph_pb2.GraphDef()
for node in input_graph_def.node:
if node.name in input_node_names:
not_found.remove(node.name)
placeholder_node = node_def_pb2.NodeDef()
placeholder_node.op = "Placeholder"
placeholder_node.name = node.name
if isinstance(placeholder_type_enum, list):
input_node_index = input_node_names.index(node.name)
placeholder_node.attr["dtype"].CopyFrom(
attr_value_pb2.AttrValue(type=placeholder_type_enum[
input_node_index]))
else:
placeholder_node.attr["dtype"].CopyFrom(
attr_value_pb2.AttrValue(type=placeholder_type_enum))
if "_output_shapes" in node.attr:
placeholder_node.attr["_output_shapes"].CopyFrom(node.attr[
"_output_shapes"])
if "shape" in node.attr:
placeholder_node.attr["shape"].CopyFrom(node.attr["shape"])
inputs_replaced_graph_def.node.extend([placeholder_node])
else:
inputs_replaced_graph_def.node.extend([copy.deepcopy(node)])
if not_found:
raise KeyError("The following input nodes were not found: %s" % not_found)
output_graph_def = graph_util.extract_sub_graph(inputs_replaced_graph_def,
output_node_names)
return output_graph_def
def strip_unused_from_files(input_graph, input_binary, output_graph,
output_binary, input_node_names, output_node_names,
placeholder_type_enum):
"""Removes unused nodes from a graph file."""
if not gfile.Exists(input_graph):
print("Input graph file '" + input_graph + "' does not exist!")
return -1
if not output_node_names:
print("You need to supply the name of a node to --output_node_names.")
return -1
input_graph_def = graph_pb2.GraphDef()
mode = "rb" if input_binary else "r"
with gfile.GFile(input_graph, mode) as f:
if input_binary:
input_graph_def.ParseFromString(f.read())
else:
text_format.Merge(f.read(), input_graph_def)
output_graph_def = strip_unused(input_graph_def,
input_node_names.split(","),
output_node_names.split(","),
placeholder_type_enum)
if output_binary:
with gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
else:
with gfile.GFile(output_graph, "w") as f:
f.write(text_format.MessageToString(output_graph_def))
print("%d ops in the final graph." % len(output_graph_def.node))
|
dankeder/ansible-modules-extras | refs/heads/devel | packaging/svr4pkg.py | 51 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Boyd Adamson <boyd () boydadamson.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: svr4pkg
short_description: Manage Solaris SVR4 packages
description:
- Manages SVR4 packages on Solaris 10 and 11.
- These were the native packages on Solaris <= 10 and are available
as a legacy feature in Solaris 11.
- Note that this is a very basic packaging system. It will not enforce
dependencies on install or remove.
version_added: "0.9"
author: Boyd Adamson
options:
name:
description:
- Package name, e.g. C(SUNWcsr)
required: true
state:
description:
- Whether to install (C(present)), or remove (C(absent)) a package.
- If the package is to be installed, then I(src) is required.
- The SVR4 package system doesn't provide an upgrade operation. You need to uninstall the old, then install the new package.
required: true
choices: ["present", "absent"]
src:
description:
- Specifies the location to install the package from. Required when C(state=present).
- "Can be any path acceptable to the C(pkgadd) command's C(-d) option. e.g.: C(somefile.pkg), C(/dir/with/pkgs), C(http:/server/mypkgs.pkg)."
- If using a file or directory, they must already be accessible by the host. See the M(copy) module for a way to get them there.
proxy:
description:
- HTTP[s] proxy to be used if C(src) is a URL.
response_file:
description:
- Specifies the location of a response file to be used if package expects input on install. (added in Ansible 1.4)
required: false
zone:
description:
- Whether to install the package only in the current zone, or install it into all zones.
- The installation into all zones works only if you are working with the global zone.
required: false
default: "all"
choices: ["current", "all"]
version_added: "1.6"
category:
description:
- Install/Remove category instead of a single package.
required: false
choices: ["true", "false"]
version_added: "1.6"
'''
EXAMPLES = '''
# Install a package from an already copied file
- svr4pkg: name=CSWcommon src=/tmp/cswpkgs.pkg state=present
# Install a package directly from an http site
- svr4pkg: name=CSWpkgutil src=http://get.opencsw.org/now state=present zone=current
# Install a package with a response file
- svr4pkg: name=CSWggrep src=/tmp/third-party.pkg response_file=/tmp/ggrep.response state=present
# Ensure that a package is not installed.
- svr4pkg: name=SUNWgnome-sound-recorder state=absent
# Ensure that a category is not installed.
- svr4pkg: name=FIREFOX state=absent category=true
'''
import os
import tempfile
def package_installed(module, name, category):
cmd = [module.get_bin_path('pkginfo', True)]
cmd.append('-q')
if category:
cmd.append('-c')
cmd.append(name)
rc, out, err = module.run_command(' '.join(cmd))
if rc == 0:
return True
else:
return False
def create_admin_file():
(desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True)
fullauto = '''
mail=
instance=unique
partial=nocheck
runlevel=quit
idepend=nocheck
rdepend=nocheck
space=quit
setuid=nocheck
conflict=nocheck
action=nocheck
networktimeout=60
networkretries=3
authentication=quit
keystore=/var/sadm/security
proxy=
basedir=default
'''
os.write(desc, fullauto)
os.close(desc)
return filename
def run_command(module, cmd):
progname = cmd[0]
cmd[0] = module.get_bin_path(progname, True)
return module.run_command(cmd)
def package_install(module, name, src, proxy, response_file, zone, category):
adminfile = create_admin_file()
cmd = [ 'pkgadd', '-n']
if zone == 'current':
cmd += [ '-G' ]
cmd += [ '-a', adminfile, '-d', src ]
if proxy is not None:
cmd += [ '-x', proxy ]
if response_file is not None:
cmd += [ '-r', response_file ]
if category:
cmd += [ '-Y' ]
cmd.append(name)
(rc, out, err) = run_command(module, cmd)
os.unlink(adminfile)
return (rc, out, err)
def package_uninstall(module, name, src, category):
adminfile = create_admin_file()
if category:
cmd = [ 'pkgrm', '-na', adminfile, '-Y', name ]
else:
cmd = [ 'pkgrm', '-na', adminfile, name]
(rc, out, err) = run_command(module, cmd)
os.unlink(adminfile)
return (rc, out, err)
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required = True),
state = dict(required = True, choices=['present', 'absent']),
src = dict(default = None),
proxy = dict(default = None),
response_file = dict(default = None),
zone = dict(required=False, default = 'all', choices=['current','all']),
category = dict(default=False, type='bool')
),
supports_check_mode=True
)
state = module.params['state']
name = module.params['name']
src = module.params['src']
proxy = module.params['proxy']
response_file = module.params['response_file']
zone = module.params['zone']
category = module.params['category']
rc = None
out = ''
err = ''
result = {}
result['name'] = name
result['state'] = state
if state == 'present':
if src is None:
module.fail_json(name=name,
msg="src is required when state=present")
if not package_installed(module, name, category):
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = package_install(module, name, src, proxy, response_file, zone, category)
# Stdout is normally empty but for some packages can be
# very long and is not often useful
if len(out) > 75:
out = out[:75] + '...'
elif state == 'absent':
if package_installed(module, name, category):
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = package_uninstall(module, name, src, category)
out = out[:75]
# Success, Warning, Interruption, Reboot all, Reboot this return codes
if rc in (0, 2, 3, 10, 20):
result['changed'] = True
# no install nor uninstall, or failed
else:
result['changed'] = False
# Fatal error, Administration, Administration Interaction return codes
if rc in (1, 4 , 5):
result['failed'] = True
else:
result['failed'] = False
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
|
51reboot/actual_09_homework | refs/heads/master | 08/tanshuai/cmdb_v5/log2db.py | 2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import dbutils
import time
s = time.time()
# print 'start after:', time.time() - s
def log2db(log_files='', topn=10, fetch=True):
sql = 'insert into accesslog(ip,url,status,count) values(%s,%s,%s,%s)'
rt_dict = {}
_rt = []
if fetch: # 查询数据
_columns = ('id', 'ip', 'url', 'status', 'count')
sql = 'select * from accesslog order by count desc limit %s'
_count, _rt_list = dbutils.execute_sql(sql, args=topn, fetch=True)
for _list in _rt_list:
_rt.append(dict(zip(_columns, _list)))
return _rt
else: # 写入数据
try:
log_files = open(log_files, 'r')
while True:
line = log_files.readline()
if not line:
break
logs = line.split()
(ip, url, status) = logs[0], logs[6], logs[8]
if (ip, url, status) not in rt_dict:
rt_dict[(ip, url, status)] = 1
else:
rt_dict[(ip, url, status)] += 1
log_files.close()
# args_list = sorted(rt_dict.items(), key=lambda x:x[1], reverse=True)
_count, _rt_list = dbutils.execute_sql(sql, args_list=rt_dict.items(), fetch='insertLogs')
return _count != 0
except:
return False
if __name__ == "__main__":
log_files = 'access.txt'
print log2db(log_files=log_files, fetch=False) # 写入logs数据
# print log2db(log_files=log_files, topn=8) # 读取logs数据
# print 'ok:', time.time() - s
# 建库SQL语句
# create table accesslog (
# id int primary key auto_increment,
# ip varchar(128),
# url text,
# status int,
# count int
# )default charset=utf8; |
azat/qtcreator | refs/heads/master | scripts/deployqt.py | 5 | #!/usr/bin/env python
################################################################################
# Copyright (c) 2011 Nokia Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Nokia Corporation, nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
################################################################################
import os
import sys
import getopt
import subprocess
import re
import string
import shutil
from glob import glob
ignoreErrors = False
debug_build = False
def usage():
print "Usage: %s <creator_install_dir> [qmake_path]" % os.path.basename(sys.argv[0])
def which(program):
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
if sys.platform.startswith('win'):
if is_exe(program + ".exe"):
return program + ".exe"
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
if sys.platform.startswith('win'):
if is_exe(exe_file + ".exe"):
return exe_file + ".exe"
return None
def is_debug(fpath):
# bootstrap exception
if fpath.endswith('QtCore4d.dll'):
return True
output = subprocess.check_output(['dumpbin', '/imports', fpath])
return output.find('QtCored4.dll') != -1
def is_debug_build(install_dir):
return is_debug(os.path.join(install_dir, 'bin', 'qtcreator.exe'))
def op_failed(details = None):
if details != None:
print details
if ignoreErrors == False:
print("Error: operation failed!")
sys.exit(2)
else:
print("Error: operation failed, but proceeding gracefully.")
def fix_rpaths_helper(chrpath_bin, install_dir, dirpath, filenames):
# patch file
for filename in filenames:
fpath = os.path.join(dirpath, filename)
relpath = os.path.relpath(install_dir+'/lib/qtcreator', dirpath)
command = [chrpath_bin, '-r', '$ORIGIN/'+relpath, fpath]
print fpath, ':', command
try:
subprocess.check_call(command)
except:
op_failed()
def check_unix_binary_exec_helper(dirpath, filename):
""" Whether a file is really a binary executable and not a script (unix only)"""
fpath = os.path.join(dirpath, filename)
if os.path.exists(fpath) and os.access(fpath, os.X_OK):
with open(fpath) as f:
return f.read(2) != "#!"
def check_unix_library_helper(dirpath, filename):
""" Whether a file is really a library and not a symlink (unix only)"""
fpath = os.path.join(dirpath, filename)
return filename.find('.so') != -1 and not os.path.islink(fpath)
def fix_rpaths(chrpath_bin, install_dir):
print "fixing rpaths..."
for dirpath, dirnames, filenames in os.walk(os.path.join(install_dir, 'bin')):
#TODO remove library_helper once all libs moved out of bin/ on linux
filenames = [filename for filename in filenames if check_unix_binary_exec_helper(dirpath, filename) or check_unix_library_helper(dirpath, filename)]
fix_rpaths_helper(chrpath_bin, install_dir, dirpath, filenames)
for dirpath, dirnames, filenames in os.walk(os.path.join(install_dir, 'lib')):
filenames = [filename for filename in filenames if check_unix_library_helper(dirpath, filename)]
fix_rpaths_helper(chrpath_bin, install_dir, dirpath, filenames)
def windows_debug_files_filter(filename):
ignore_patterns = ['.lib', '.pdb', '.exp', '.ilk']
for ip in ignore_patterns:
if filename.endswith(ip):
return True
return False
def copy_ignore_patterns_helper(dir, filenames):
if not sys.platform.startswith('win'):
return filenames
if debug_build:
wrong_dlls = filter(lambda filename: filename.endswith('.dll') and not is_debug(os.path.join(dir, filename)), filenames)
else:
wrong_dlls = filter(lambda filename: filename.endswith('.dll') and is_debug(os.path.join(dir, filename)), filenames)
filenames = wrong_dlls + filter(windows_debug_files_filter, filenames)
return filenames
def copy_qt_libs(install_dir, qt_libs_dir, qt_plugin_dir, qt_import_dir, plugins, imports):
print "copying Qt libraries..."
if sys.platform.startswith('win'):
libraries = glob(os.path.join(qt_libs_dir, '*.dll'))
else:
libraries = glob(os.path.join(qt_libs_dir, '*.so.*'))
if sys.platform.startswith('win'):
dest = os.path.join(install_dir, 'bin')
else:
dest = os.path.join(install_dir, 'lib', 'qtcreator')
if sys.platform.startswith('win'):
if debug_build:
libraries = filter(lambda library: is_debug(library), libraries)
else:
libraries = filter(lambda library: not is_debug(library), libraries)
for library in libraries:
print library, '->', dest
if os.path.islink(library):
linkto = os.readlink(library)
try:
os.symlink(linkto, os.path.join(dest, os.path.basename(library)))
except:
op_failed("Link already exists!")
else:
shutil.copy(library, dest)
copy_ignore_func = None
if sys.platform.startswith('win'):
copy_ignore_func = copy_ignore_patterns_helper
print "Copying plugins:", plugins
for plugin in plugins:
target = os.path.join(install_dir, 'bin', plugin)
if (os.path.exists(target)):
shutil.rmtree(target)
shutil.copytree(os.path.join(qt_plugin_dir, plugin), target, ignore=copy_ignore_func, symlinks=True)
print "Copying plugins:", imports
for qtimport in imports:
target = os.path.join(install_dir, 'bin', qtimport)
if (os.path.exists(target)):
shutil.rmtree(target)
shutil.copytree(os.path.join(qt_import_dir, qtimport), target, ignore=copy_ignore_func, symlinks=True)
def copy_translations(install_dir, qt_tr_dir, tr_catalogs):
langs = []
tr_dir = os.path.join(install_dir, 'share', 'qtcreator', 'translations')
p = re.compile(r'_(.*).qm')
for dirpath, dirnames, filenames in os.walk(tr_dir):
for filename in filenames:
if filename.endswith('.qm') and string.find(filename, 'qtcreator_') != -1:
lang = p.findall(filename)
if lang != '':
langs += lang
print "copying translations..."
for lang in langs:
for catalog in tr_catalogs:
copy_file = "%s_%s.qm" % (catalog, lang)
copy_src = os.path.join(qt_tr_dir, copy_file)
copy_dst = os.path.join(tr_dir, copy_file)
print copy_src, '->', copy_dst
shutil.copy(copy_src, copy_dst)
def readQmakeVar(qmake_bin, var):
pipe = os.popen(' '.join([qmake_bin, '-query', var]))
return pipe.read().rstrip('\n')
def main():
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], 'hi', ['help', 'ignore-errors'])
except:
usage()
sys.exit(2)
for o, a in opts:
if o in ('-h', '--help'):
usage()
sys.exit(0)
if o in ('-i', '--ignore-errors'):
global ignoreErrors
ignoreErrors = True
print "Note: Ignoring all errors"
if len(args) < 1:
usage()
sys.exit(2)
install_dir = args[0]
qmake_bin = 'qmake'
if len(args) > 1:
qmake_bin = args[1]
qmake_bin = which(qmake_bin)
if qmake_bin == None:
print "Cannot find required binary 'qmake'."
sys.exit(2)
if not sys.platform.startswith('win'):
chrpath_bin = which('chrpath')
if chrpath_bin == None:
print "Cannot find required binary 'chrpath'."
sys.exit(2)
QT_INSTALL_LIBS = readQmakeVar(qmake_bin, 'QT_INSTALL_LIBS')
QT_INSTALL_BINS = readQmakeVar(qmake_bin, 'QT_INSTALL_BINS')
QT_INSTALL_PLUGINS = readQmakeVar(qmake_bin, 'QT_INSTALL_PLUGINS')
QT_INSTALL_IMPORTS = readQmakeVar(qmake_bin, 'QT_INSTALL_IMPORTS')
QT_INSTALL_TRANSLATIONS = readQmakeVar(qmake_bin, 'QT_INSTALL_TRANSLATIONS')
plugins = ['accessible', 'designer', 'iconengines', 'imageformats', 'sqldrivers']
imports = ['Qt', 'QtWebKit']
tr_catalogs = ['assistant', 'designer', 'qt', 'qt_help']
if sys.platform.startswith('win'):
global debug_build
debug_build = is_debug_build(install_dir)
if sys.platform.startswith('win'):
copy_qt_libs(install_dir, QT_INSTALL_BINS, QT_INSTALL_PLUGINS, QT_INSTALL_IMPORTS, plugins, imports)
else:
copy_qt_libs(install_dir, QT_INSTALL_LIBS, QT_INSTALL_PLUGINS, QT_INSTALL_IMPORTS, plugins, imports)
copy_translations(install_dir, QT_INSTALL_TRANSLATIONS, tr_catalogs)
if not sys.platform.startswith('win'):
fix_rpaths(chrpath_bin, install_dir)
if __name__ == "__main__":
if sys.platform == 'darwin':
print "Mac OS is not supported by this script, please use macqtdeploy!"
sys.exit(2)
else:
main()
|
kcarnold/autograd | refs/heads/master | autograd/numpy/gpu_array_node.py | 7 | from __future__ import absolute_import
from autograd.core import Node, primitive, cast, getval
from . import numpy_wrapper as anp
from .numpy_extra import ArrayNode, array_dtype_mappings, SparseArray
from .use_gpu_numpy import use_gpu_numpy
assert use_gpu_numpy()
class GpuArrayNode(ArrayNode):
@staticmethod
def zeros_like(value):
return anp.array(anp.zeros(value.shape), dtype=anp.gpu_float32)
@staticmethod
def cast(value):
return gpu_arraycast(value)
@staticmethod
def new_sparse_array(template, idx, x):
return GpuSparseArray(template, idx, x)
Node.type_mappings[anp.garray] = GpuArrayNode
array_dtype_mappings[anp.gpu_float32] = GpuArrayNode
@primitive
def gpu_arraycast(val):
return anp.array(val, dtype=anp.gpu_float32)
gpu_arraycast.defgrad(lambda ans, val: lambda g : g)
class GpuSparseArray(SparseArray): pass
Node.type_mappings[GpuSparseArray] = GpuArrayNode
|
JosmanPS/scikit-learn | refs/heads/master | sklearn/linear_model/tests/test_bayes.py | 299 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
|
KiranJKurian/XScheduler | refs/heads/master | venv/lib/python2.7/site-packages/flask/testsuite/test_apps/lib/python2.5/site-packages/site_package/__init__.py | 1799 | import flask
app = flask.Flask(__name__)
|
kevinburke/hamms | refs/heads/master | setup.py | 1 | from setuptools import setup
# XXX: also update version in hamms/__init__.py
__version__ = '1.3'
setup(
name='hamms',
packages=['hamms'],
version=__version__,
description='Malformed servers to test your HTTP client',
author='Kevin Burke',
author_email='kev@inburke.com',
url='https://github.com/kevinburke/hamms',
keywords=['testing', 'server', 'http',],
# XXX, pin these down
install_requires=['flask', 'httpbin', 'twisted'],
)
|
idrogeno/enigma2 | refs/heads/master | lib/python/Tools/BoundFunction.py | 120 | class boundFunction:
def __init__(self, fnc, *args, **kwargs):
self.fnc = fnc
self.args = args
self.kwargs = kwargs
def __call__(self, *args, **kwargs):
newkwargs = self.kwargs
newkwargs.update(kwargs)
return self.fnc(*self.args + args, **newkwargs)
|
openstack/nova-solver-scheduler | refs/heads/master | nova_solverscheduler/tests/scheduler/solvers/constraints/test_aggregate_disk.py | 1 | # Copyright (c) 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import context
from nova import test
from nova_solverscheduler.scheduler.solvers.constraints import aggregate_disk
from nova_solverscheduler.tests.scheduler import solver_scheduler_fakes \
as fakes
class TestAggregateDiskConstraint(test.NoDBTestCase):
def setUp(self):
super(TestAggregateDiskConstraint, self).setUp()
self.constraint_cls = aggregate_disk.AggregateDiskConstraint
self.context = context.RequestContext('fake', 'fake')
self._generate_fake_constraint_input()
def _generate_fake_constraint_input(self):
self.fake_filter_properties = {
'context': self.context,
'instance_type': {'root_gb': 1, 'ephemeral_gb': 1, 'swap': 0},
'instance_uuids': ['fake_uuid_%s' % x for x in range(2)],
'num_instances': 2}
host1 = fakes.FakeSolverSchedulerHostState('host1', 'node1',
{'free_disk_mb': 1024, 'total_usable_disk_gb': 2})
host2 = fakes.FakeSolverSchedulerHostState('host2', 'node1',
{'free_disk_mb': 1024, 'total_usable_disk_gb': 2})
host3 = fakes.FakeSolverSchedulerHostState('host3', 'node1',
{'free_disk_mb': 1024, 'total_usable_disk_gb': 2})
self.fake_hosts = [host1, host2, host3]
@mock.patch('nova_solverscheduler.scheduler.solvers.utils.'
'aggregate_values_from_key')
def test_get_constraint_matrix(self, agg_mock):
self.flags(disk_allocation_ratio=1.0)
def _agg_mock_side_effect(*args, **kwargs):
if args[0].host == 'host1':
return set(['2.0', '3.0'])
if args[0].host == 'host2':
return set(['3.0'])
if args[0].host == 'host3':
return set([])
agg_mock.side_effect = _agg_mock_side_effect
expected_cons_mat = [
[True, False],
[True, True],
[False, False]]
cons_mat = self.constraint_cls().get_constraint_matrix(
self.fake_hosts, self.fake_filter_properties)
agg_mock.assert_any_call(self.fake_hosts[0], 'disk_allocation_ratio')
agg_mock.assert_any_call(self.fake_hosts[1], 'disk_allocation_ratio')
agg_mock.assert_any_call(self.fake_hosts[2], 'disk_allocation_ratio')
self.assertEqual(expected_cons_mat, cons_mat)
|
mxOBS/deb-pkg_trusty_chromium-browser | refs/heads/master | mojo/public/tools/bindings/pylib/mojom_tests/parse/__init__.py | 12133432 | |
agiliq/django | refs/heads/master | django/contrib/gis/tests/gis_migrations/migrations/__init__.py | 12133432 | |
akaihola/django | refs/heads/master | tests/regressiontests/db_typecasts/__init__.py | 12133432 | |
uclouvain/osis | refs/heads/dev | base/tests/scripts/__init__.py | 12133432 | |
richardcornish/richardcornish | refs/heads/master | richardcornish/contact/templatetags/__init__.py | 12133432 | |
TanguyPatte/phantomjs-packaging | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/port/pulseaudio_sanitizer.py | 122 | # Copyright (C) 2010 Google Inc. All rights reserved.
# Copyrigth (C) 2012 Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import subprocess
_log = logging.getLogger(__name__)
# Shared by GTK and EFL for pulseaudio sanitizing before running tests.
class PulseAudioSanitizer:
def unload_pulseaudio_module(self):
# Unload pulseaudio's module-stream-restore, since it remembers
# volume settings from different runs, and could affect
# multimedia tests results
self._pa_module_index = -1
with open(os.devnull, 'w') as devnull:
try:
pactl_process = subprocess.Popen(["pactl", "list", "short", "modules"], stdout=subprocess.PIPE, stderr=devnull)
pactl_process.wait()
except OSError:
# pactl might not be available.
_log.debug('pactl not found. Please install pulseaudio-utils to avoid some potential media test failures.')
return
modules_list = pactl_process.communicate()[0]
for module in modules_list.splitlines():
if module.find("module-stream-restore") >= 0:
# Some pulseaudio-utils versions don't provide
# the index, just an empty string
self._pa_module_index = module.split('\t')[0] or -1
try:
# Since they could provide other stuff (not an index
# nor an empty string, let's make sure this is an int.
if int(self._pa_module_index) != -1:
pactl_process = subprocess.Popen(["pactl", "unload-module", self._pa_module_index])
pactl_process.wait()
if pactl_process.returncode == 0:
_log.debug('Unloaded module-stream-restore successfully')
else:
_log.debug('Unloading module-stream-restore failed')
except ValueError:
# pactl should have returned an index if the module is found
_log.debug('Unable to parse module index. Please check if your pulseaudio-utils version is too old.')
return
def restore_pulseaudio_module(self):
# If pulseaudio's module-stream-restore was previously unloaded,
# restore it back. We shouldn't need extra checks here, since an
# index != -1 here means we successfully unloaded it previously.
if self._pa_module_index != -1:
with open(os.devnull, 'w') as devnull:
pactl_process = subprocess.Popen(["pactl", "load-module", "module-stream-restore"], stdout=devnull, stderr=devnull)
pactl_process.wait()
if pactl_process.returncode == 0:
_log.debug('Restored module-stream-restore successfully')
else:
_log.debug('Restoring module-stream-restore failed')
|
wikimedia/operations-debs-check_ganglia | refs/heads/master | check_ganglia/nagios.py | 2 | #!/usr/bin/python
import sys
import optparse
from constants import *
class OptionParser (optparse.OptionParser):
def __init__ (self):
optparse.OptionParser.__init__(self)
self.add_option('-w', '--warn',
help='Warn threshold.')
self.add_option('-c', '--critical',
help='Critical threshold.')
self.add_option('-v', '--verbose', action='count',
help='Make output more verbse.')
def result (service, status, msg=None, perfdata=None):
text = [ '%s %s' % (service, STATUS[status])]
if msg:
text.append(': %s' % msg)
if perfdata:
text.append(' | ')
text.append(' '.join(['%s=%s;' % x for x in perfdata]))
print ''.join(text)
sys.exit(status)
if __name__ == '__main__':
p = OptionParser()
opts, args = p.parse_args()
result(STATUS_WTF, 'Thanks for playing.')
|
spirrello/spirrello-pynet-work | refs/heads/master | applied_python/lib/python2.7/site-packages/pylint/test/functional/eval_used.py | 3 | """test for eval usage"""
eval('os.listdir(".")') # [eval-used]
eval('os.listdir(".")', globals={}) # [eval-used]
eval('os.listdir(".")', globals=globals()) # [eval-used]
def func():
""" eval in local scope"""
eval('b = 1') # [eval-used]
|
ovnicraft/odoo | refs/heads/8.0 | addons/website_sale_options/models/sale_order.py | 237 | # -*- coding: utf-8 -*-
from openerp import SUPERUSER_ID
from openerp.osv import osv, orm, fields
from openerp.tools.translate import _
class sale_order_line(osv.Model):
_inherit = "sale.order.line"
_columns = {
'linked_line_id': fields.many2one('sale.order.line', 'Linked Order Line', domain="[('order_id','!=',order_id)]", ondelete='cascade'),
'option_line_ids': fields.one2many('sale.order.line', 'linked_line_id', string='Options Linked'),
}
class sale_order(osv.Model):
_inherit = "sale.order"
def _cart_find_product_line(self, cr, uid, ids, product_id=None, line_id=None, context=None, **kwargs):
line_ids = super(sale_order, self)._cart_find_product_line(cr, uid, ids, product_id, line_id, context=context)
if line_id:
return line_ids
linked_line_id = kwargs.get('linked_line_id')
optional_product_ids = kwargs.get('optional_product_ids')
for so in self.browse(cr, uid, ids, context=context):
domain = [('id', 'in', line_ids)]
domain += linked_line_id and [('linked_line_id', '=', linked_line_id)] or [('linked_line_id', '=', False)]
if optional_product_ids:
domain += [('option_line_ids.product_id', '=', pid) for pid in optional_product_ids]
else:
domain += [('option_line_ids', '=', False)]
return self.pool.get('sale.order.line').search(cr, SUPERUSER_ID, domain, context=context)
def _cart_update(self, cr, uid, ids, product_id=None, line_id=None, add_qty=0, set_qty=0, context=None, **kwargs):
""" Add or set product quantity, add_qty can be negative """
value = super(sale_order, self)._cart_update(cr, uid, ids, product_id, line_id, add_qty, set_qty, context=context, **kwargs)
linked_line_id = kwargs.get('linked_line_id')
sol = self.pool.get('sale.order.line')
line = sol.browse(cr, SUPERUSER_ID, value.get('line_id'), context=context)
for so in self.browse(cr, uid, ids, context=context):
if linked_line_id and linked_line_id in map(int,so.order_line):
linked = sol.browse(cr, SUPERUSER_ID, linked_line_id, context=context)
line.write({
"name": _("%s\nOption for: %s") % (line.name, linked.product_id.name_get()[0][1]),
"linked_line_id": linked_line_id
})
# select linked product
option_ids = [l for l in so.order_line if l.linked_line_id.id == line.id]
# update line
for l in option_ids:
super(sale_order, self)._cart_update(cr, uid, ids, l.product_id.id, l.id, add_qty, set_qty, context=context, **kwargs)
value['option_ids'] = [l.id for l in option_ids]
return value
|
zestrada/nova-cs498cc | refs/heads/master | nova/hooks.py | 10 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Decorator and config option definitions for adding custom code (hooks)
around callables.
Any method may have the 'add_hook' decorator applied, which yields the
ability to invoke Hook objects before or after the method. (i.e. pre and
post)
Hook objects are loaded by HookLoaders. Each named hook may invoke multiple
Hooks.
Example Hook object:
class MyHook(object):
def pre(self, *args, **kwargs):
# do stuff before wrapped callable runs
def post(self, rv, *args, **kwargs):
# do stuff after wrapped callable runs
"""
import functools
import stevedore
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
NS = 'nova.hooks'
_HOOKS = {} # hook name => hook manager
class HookManager(stevedore.hook.HookManager):
def __init__(self, name):
# invoke_on_load creates an instance of the Hook class
super(HookManager, self).__init__(NS, name, invoke_on_load=True)
def run_pre(self, name, args, kwargs):
for e in self.extensions:
obj = e.obj
pre = getattr(obj, 'pre', None)
if pre:
LOG.debug(_("Running %(name)s pre-hook: %(obj)s") % locals())
pre(*args, **kwargs)
def run_post(self, name, rv, args, kwargs):
for e in reversed(self.extensions):
obj = e.obj
post = getattr(obj, 'post', None)
if post:
LOG.debug(_("Running %(name)s post-hook: %(obj)s") % locals())
post(rv, *args, **kwargs)
def add_hook(name):
"""Execute optional pre and post methods around the decorated
function. This is useful for customization around callables.
"""
def outer(f):
@functools.wraps(f)
def inner(*args, **kwargs):
manager = _HOOKS.setdefault(name, HookManager(name))
manager.run_pre(name, args, kwargs)
rv = f(*args, **kwargs)
manager.run_post(name, rv, args, kwargs)
return rv
return inner
return outer
def reset():
"""Clear loaded hooks."""
_HOOKS.clear()
|
SnabbCo/neutron | refs/heads/master | neutron/tests/unit/vmware/db/test_nsx_db.py | 13 | # Copyright 2013 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron import context
from neutron.db import api as db
from neutron.db import models_v2
from neutron.openstack.common.db import exception as d_exc
from neutron.plugins.vmware.dbexts import db as nsx_db
from neutron.plugins.vmware.dbexts import models
from neutron.tests import base
class NsxDBTestCase(base.BaseTestCase):
def setUp(self):
super(NsxDBTestCase, self).setUp()
db.configure_db()
self.ctx = context.get_admin_context()
self.addCleanup(db.clear_db)
def _setup_neutron_network_and_port(self, network_id, port_id):
with self.ctx.session.begin(subtransactions=True):
self.ctx.session.add(models_v2.Network(id=network_id))
port = models_v2.Port(id=port_id,
network_id=network_id,
mac_address='foo_mac_address',
admin_state_up=True,
status='ACTIVE',
device_id='',
device_owner='')
self.ctx.session.add(port)
def test_add_neutron_nsx_port_mapping_handle_duplicate_constraint(self):
neutron_net_id = 'foo_neutron_network_id'
neutron_port_id = 'foo_neutron_port_id'
nsx_port_id = 'foo_nsx_port_id'
nsx_switch_id = 'foo_nsx_switch_id'
self._setup_neutron_network_and_port(neutron_net_id, neutron_port_id)
nsx_db.add_neutron_nsx_port_mapping(
self.ctx.session, neutron_port_id, nsx_switch_id, nsx_port_id)
# Call the method twice to trigger a db duplicate constraint error
nsx_db.add_neutron_nsx_port_mapping(
self.ctx.session, neutron_port_id, nsx_switch_id, nsx_port_id)
result = (self.ctx.session.query(models.NeutronNsxPortMapping).
filter_by(neutron_id=neutron_port_id).one())
self.assertEqual(nsx_port_id, result.nsx_port_id)
self.assertEqual(neutron_port_id, result.neutron_id)
def test_add_neutron_nsx_port_mapping_raise_on_duplicate_constraint(self):
neutron_net_id = 'foo_neutron_network_id'
neutron_port_id = 'foo_neutron_port_id'
nsx_port_id_1 = 'foo_nsx_port_id_1'
nsx_port_id_2 = 'foo_nsx_port_id_2'
nsx_switch_id = 'foo_nsx_switch_id'
self._setup_neutron_network_and_port(neutron_net_id, neutron_port_id)
nsx_db.add_neutron_nsx_port_mapping(
self.ctx.session, neutron_port_id, nsx_switch_id, nsx_port_id_1)
# Call the method twice to trigger a db duplicate constraint error,
# this time with a different nsx port id!
self.assertRaises(d_exc.DBDuplicateEntry,
nsx_db.add_neutron_nsx_port_mapping,
self.ctx.session, neutron_port_id,
nsx_switch_id, nsx_port_id_2)
def test_add_neutron_nsx_port_mapping_raise_integrity_constraint(self):
neutron_port_id = 'foo_neutron_port_id'
nsx_port_id = 'foo_nsx_port_id'
nsx_switch_id = 'foo_nsx_switch_id'
self.assertRaises(d_exc.DBError,
nsx_db.add_neutron_nsx_port_mapping,
self.ctx.session, neutron_port_id,
nsx_switch_id, nsx_port_id)
|
b-com/watcher-metering | refs/heads/master | watcher_metering/tests/agent/test_agent.py | 1 | # -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
from collections import OrderedDict
import os
import types
from mock import MagicMock
from mock import Mock
from mock import patch
from mock import PropertyMock
import msgpack
import operator
from oslo_config import cfg
from oslotest.base import BaseTestCase
from stevedore.driver import DriverManager
from stevedore.extension import Extension
from watcher_metering.agent.agent import Agent
from watcher_metering.agent.measurement import Measurement
from watcher_metering.tests.agent.agent_fixtures import ConfFixture
from watcher_metering.tests.agent.agent_fixtures import DummyMetricPuller
from watcher_metering.tests.agent.agent_fixtures import FakeMetricPuller
class TestAgent(BaseTestCase):
# patches to be applied for each test in this test suite
patches = []
def setUp(self):
super(TestAgent, self).setUp()
self.conf = cfg.ConfigOpts()
# To load the drivers without using the config file
self.useFixture(ConfFixture(self.conf))
def _fake_parse(self, args=[]):
return cfg.ConfigOpts._parse_cli_opts(self, [])
_fake_parse_method = types.MethodType(_fake_parse, self.conf)
self.conf._parse_cli_opts = _fake_parse_method
# First dependency to be returned
self.dummy_driver_manager = DriverManager.make_test_instance(
extension=Extension(
name=DummyMetricPuller.get_name(),
entry_point='fake.entry.point',
plugin=DummyMetricPuller,
obj=None,
),
namespace='TESTING',
)
# 2nd dependency to be returned
self.fake_driver_manager = DriverManager.make_test_instance(
extension=Extension(
name=FakeMetricPuller.get_name(),
entry_point='fake.entry.point',
plugin=FakeMetricPuller,
obj=None,
),
namespace='TESTING',
)
self.defaults_drivers = {
DummyMetricPuller.get_name(): self.dummy_driver_manager,
FakeMetricPuller.get_name(): self.fake_driver_manager,
}
def _fake_loader(name, **kw):
return self.defaults_drivers[name]
# Patches the agent socket
self.m_agent_socket = MagicMock(autospec=True)
self.patches.extend([
# Deactivates the nanomsg socket
patch(
"watcher_metering.agent.agent.nanomsg.Socket",
new=self.m_agent_socket,
),
# Sets the test namespace to 'TESTING'
patch.object(
Agent,
"namespace",
PropertyMock(return_value='TESTING'),
),
# Patches the driver manager to retourn our test drivers
# instead of the real ones
patch(
"watcher_metering.load.loader.DriverManager",
MagicMock(side_effect=_fake_loader),
),
])
# Applies all of our patches before each test
for _patch in self.patches:
_patch.start()
self.agent = Agent(
conf=self.conf,
driver_names=self.conf.agent.driver_names,
use_nanoconfig_service=False,
publisher_endpoint="fake",
nanoconfig_service_endpoint="",
nanoconfig_update_endpoint="",
nanoconfig_profile="nanoconfig://test_profile"
)
# Default ticking is set to 0 to reduce test execution time
self.agent.TICK_INTERVAL = 0
def tearDown(self):
super(TestAgent, self).tearDown()
# The drivers are stored at the class level so we need to clear
# it after each test
self.agent.drivers.clear()
for _patch in self.patches:
_patch.stop()
def test_register_driver(self):
expected_driver1_key = "metrics_driver.dummy_data.puller.dummy"
expected_driver2_key = "metrics_driver.fake_data.puller.fake"
self.agent.register_drivers()
self.assertEqual(
sorted(self.agent.drivers.keys()),
[expected_driver1_key, expected_driver2_key]
)
sorted_drivers = OrderedDict(
sorted(self.agent.drivers.items(), key=operator.itemgetter(0))
)
self.assertEqual(len(sorted_drivers), 2)
driver1 = self.agent.drivers[expected_driver1_key]
driver2 = self.agent.drivers[expected_driver2_key]
self.assertEqual(driver1.title, "metrics_driver.dummy")
self.assertEqual(driver1.probe_id, "data.puller.dummy")
self.assertEqual(driver1.interval, 0.01)
self.assertEqual(driver2.title, "metrics_driver.fake")
self.assertEqual(driver2.probe_id, "data.puller.fake")
self.assertEqual(driver2.interval, 0.01)
self.assertIn(self.agent, driver1._observers)
self.assertIn(self.agent, driver2._observers)
def test_unregister_driver(self):
driver_key = "metrics_driver.dummy_data.puller.dummy"
self.agent.register_drivers()
self.agent.unregister_driver(driver_key)
# Initial is 2 drivers => 2 - 1 == 1
self.assertEqual(len(self.agent.drivers), 1)
@patch.object(Measurement, "as_dict")
def test_send_measurements(self, m_as_dict):
self.agent.register_drivers()
measurement_dict = OrderedDict(
name="dummy.data.puller",
unit="",
type_="",
value=13.37,
resource_id="test_hostname",
host="test_hostname",
timestamp="2015-08-04T15:15:45.703542",
)
m_as_dict.return_value = measurement_dict
measurement = Measurement(**measurement_dict)
for driver in self.agent.drivers.values():
driver.send_measurements([measurement])
break # only the first one
expected_encoded_msg = msgpack.dumps(measurement_dict)
self.m_agent_socket.return_value.send.assert_called_once_with(
expected_encoded_msg
)
@patch.object(DummyMetricPuller, "is_alive")
@patch.object(DummyMetricPuller, "start")
@patch("watcher_metering.agent.manager.MetricManager.lock")
def test_check_drivers_alive(self, m_lock, m_start, m_is_alive):
m_lock.acquire = Mock(return_value=True) # Emulates a thread behavior
m_lock.release = Mock(return_value=True) # Emulates a thread behavior
m_is_alive.return_value = True # Emulates a thread that is running
m_start.return_value = None
self.agent.register_drivers()
self.agent.check_drivers_alive()
self.assertTrue(m_is_alive.called)
self.assertFalse(m_start.called)
@patch.object(DummyMetricPuller, "is_alive")
@patch.object(DummyMetricPuller, "start")
@patch("watcher_metering.agent.manager.MetricManager.lock")
def test_check_drivers_alive_with_driver_stopped(self, m_lock, m_start,
m_is_alive):
m_lock.acquire = Mock(return_value=True) # Emulates a thread behavior
m_lock.release = Mock(return_value=True) # Emulates a thread behavior
m_is_alive.side_effect = [False, True]
m_start.side_effect = [RuntimeError, True, True] # Fails once
self.agent.register_drivers()
# should re-run the driver
self.agent.check_drivers_alive()
self.assertEqual(m_is_alive.call_count, 1)
self.assertEqual(m_start.call_count, 2)
@patch.object(os._Environ, "__setitem__")
@patch("watcher_metering.agent.agent.os.environ.get")
def test_setup_nanoconfig_valid_using_default(self, m_env_getter,
m_env_setter):
# Override default where it is set to False
m_env_getter.side_effect = ["FAKE_NN_CONFIG_SERVICE",
"FAKE_NN_CONFIG_UPDATES"]
self.agent.use_nanoconfig_service = True
self.agent.nanoconfig_service_endpoint = ""
self.agent.nanoconfig_update_endpoint = ""
self.agent.set_nanoconfig_endpoints()
self.assertEqual(m_env_getter.call_count, 2)
m_env_getter.assert_any_call("NN_CONFIG_SERVICE") # First call
m_env_getter.assert_called_with("NN_CONFIG_UPDATES") # Last call
self.assertEqual(m_env_setter.call_count, 0)
self.assertEqual(self.agent.nanoconfig_service_endpoint,
"FAKE_NN_CONFIG_SERVICE")
self.assertEqual(self.agent.nanoconfig_update_endpoint,
"FAKE_NN_CONFIG_UPDATES")
@patch.object(os._Environ, "__setitem__")
@patch("watcher_metering.agent.agent.os.environ.get")
def test_setup_nanoconfig_valid_custom_values(self, m_env_getter,
m_env_setter):
# Override default where it is set to False
m_env_getter.side_effect = ["FAKE_NN_CONFIG_SERVICE",
"FAKE_NN_CONFIG_UPDATES"]
self.agent.use_nanoconfig_service = True
self.agent.nanoconfig_service_endpoint = "CUSTOM_NN_CONFIG_SERVICE"
self.agent.nanoconfig_update_endpoint = "CUSTOM_NN_CONFIG_UPDATES"
self.agent.set_nanoconfig_endpoints()
self.assertEqual(m_env_getter.call_count, 2)
m_env_getter.assert_any_call("NN_CONFIG_SERVICE")
m_env_getter.assert_called_with("NN_CONFIG_UPDATES")
m_env_setter.assert_any_call("NN_CONFIG_SERVICE",
"CUSTOM_NN_CONFIG_SERVICE")
m_env_setter.assert_called_with("NN_CONFIG_UPDATES",
"CUSTOM_NN_CONFIG_UPDATES")
self.assertEqual(self.agent.nanoconfig_service_endpoint,
"CUSTOM_NN_CONFIG_SERVICE")
self.assertEqual(self.agent.nanoconfig_update_endpoint,
"CUSTOM_NN_CONFIG_UPDATES")
@patch.object(os._Environ, "__setitem__")
@patch("watcher_metering.agent.agent.os.environ.get")
def test_setup_nanoconfig_invalid_service(self, m_env_getter,
m_env_setter):
# Override default where it is set to False
m_env_getter.return_value = "" # Emulates empty ENV vars
self.agent.use_nanoconfig_service = True
self.agent.nanoconfig_service_endpoint = ""
self.agent.nanoconfig_update_endpoint = "CUSTOM_NN_CONFIG_UPDATES"
self.assertRaises(ValueError, self.agent.set_nanoconfig_endpoints)
m_env_getter.assert_called_once_with("NN_CONFIG_SERVICE")
self.assertEqual(m_env_setter.call_count, 0)
@patch.object(os._Environ, "__setitem__")
@patch("watcher_metering.agent.agent.os.environ.get")
def test_setup_nanoconfig_invalid_update(self, m_env_getter, m_env_setter):
# Override default where it is set to False
m_env_getter.return_value = "" # Emulates empty ENV vars
self.agent.use_nanoconfig_service = True
self.agent.nanoconfig_service_endpoint = "CUSTOM_NN_CONFIG_SERVICE"
self.agent.nanoconfig_update_endpoint = ""
self.assertRaises(ValueError, self.agent.set_nanoconfig_endpoints)
m_env_getter.assert_any_call("NN_CONFIG_SERVICE")
m_env_getter.assert_called_with("NN_CONFIG_UPDATES")
m_env_setter.assert_called_once_with("NN_CONFIG_SERVICE",
"CUSTOM_NN_CONFIG_SERVICE")
@patch.object(Agent, 'check_drivers_alive', MagicMock())
@patch("watcher_metering.agent.manager."
"MetricManager.terminated",
new_callable=PropertyMock)
def test_run_agent(self, m_terminated):
# Patches the guard/exit condition of the thread periodic event loop
# -> 1st time = False (carry on) and 2nd = True (Should terminate)
m_terminated.side_effect = [False, True]
self.agent.run()
self.assertEqual(m_terminated.call_count, 2)
@patch.object(DummyMetricPuller, 'send_measurements', MagicMock())
def test_stop_agent(self):
self.agent.register_drivers()
self.agent.start()
self.agent.join(timeout=.01)
self.agent.stop()
self.assertEqual(len(self.agent.drivers.values()), 2)
self.assertTrue(
all([driver.terminated for driver in self.agent.drivers.values()])
)
self.assertTrue(self.agent.terminated)
self.assertFalse(self.agent.is_alive())
|
goldsborough/.emacs | refs/heads/master | .emacs.d/.python-environments/default/lib/python3.5/site-packages/pkg_resources/_vendor/six.py | 2715 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.10.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
|
lidalei/DataMining | refs/heads/master | random_forests.py | 1 | import os, time
from joblib import Parallel, delayed
from openml.apiconnector import APIConnector
from scipy.io.arff import loadarff
import numpy as np
import matplotlib.pylab as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.metrics import get_scorer, zero_one_loss
from sklearn.tree.tree import DecisionTreeClassifier
def get_dataset(did):
home_dir = os.path.expanduser("~")
openml_dir = os.path.join(home_dir, ".openml")
cache_dir = os.path.join(openml_dir, "cache")
with open(os.path.join(openml_dir, "apikey.txt"), 'r') as fh:
key = fh.readline().rstrip('\n')
fh.close()
openml = APIConnector(cache_directory = cache_dir, apikey = key)
dataset = openml.download_dataset(did)
# print('Data-set name: %s'%dataset.name)
# print(dataset.description)
data, meta = loadarff(dataset.data_file)
target_attribute = dataset.default_target_attribute
target_attribute_names = meta[target_attribute][1]
X, y, attribute_names = dataset.get_dataset(target = target_attribute, return_attribute_names = True)
return X, y, attribute_names, target_attribute_names
def bias_var(true_preds, sum_preds, counts, n_replicas):
'''
compute bias and variance
@param true_preds: true labels
@param sum_preds: array of summation of the predictions of each sample
@param counts: the times each sample is tested (predicted)
@return: squared bias, variance
'''
sample_bias = np.absolute(true_preds - sum_preds / counts)
sample_var = sample_bias * (1.0 - sample_bias)
weighted_sample_bias_2 = np.power(sample_bias, 2.0) * (counts / n_replicas)
weighted_sample_var = sample_var * (counts / n_replicas)
bias = np.mean(weighted_sample_bias_2)
var = np.mean(weighted_sample_var)
return bias, var
def clf_bias_var(clf, X, y, n_replicas):
roc_auc_scorer = get_scorer("roc_auc")
# roc_auc_scorer(clf, X_test, y_test)
auc_scores = []
error_scores = []
counts = np.zeros(X.shape[0], dtype = np.float64)
sum_preds = np.zeros(X.shape[0], dtype = np.float64)
for it in xrange(n_replicas):
# generate train sets and test sets
train_indices = np.random.randint(X.shape[0], size = X.shape[0])
# get test sets
in_train = np.unique(train_indices)
mask = np.ones(X.shape[0], dtype = np.bool)
mask[in_train] = False
test_indices = np.arange(X.shape[0])[mask]
clf.fit(X[train_indices], y[train_indices])
auc_scores.append(roc_auc_scorer(clf, X[test_indices], y[test_indices]))
error_scores.append(zero_one_loss(y[test_indices], clf.predict(X[test_indices])))
preds = clf.predict(X)
for index in test_indices:
counts[index] += 1
sum_preds[index] += preds[index]
test_mask = (counts > 0) # indices of samples that have been tested
# print('counts mean: {}'.format(np.mean(counts)))
# print('counts standard derivation: {}'.format(np.std(counts)))
bias, var = bias_var(y[test_mask], sum_preds[test_mask], counts[test_mask], n_replicas)
return auc_scores, error_scores, bias, var
if __name__ == '__main__':
## get dataset
X, y, attribute_names, target_attribute_names = get_dataset(44)
ns = np.logspace(11, 0, num = 12, endpoint = True, base = 2.0, dtype = np.int32)
fig, ax = plt.subplots(1, 1)
fig.suptitle('OOB error versus cross validation error', fontsize = 'x-large')
## OOB scores
oob_err_rates = []
for n in ns:
rnd_forest_clf = RandomForestClassifier(n_estimators = n, bootstrap = True, oob_score = True)
rnd_forest_clf.fit(X, y)
oob_err_rates.append(1.0 - rnd_forest_clf.oob_score_)
# plot_surface(ax, rnd_forest_clf, X, y)
ax.plot(ns, oob_err_rates, '-o', label = 'OOB error')
## cross validation scores
cv_err_rates = []
for n in ns:
rnd_forest_clf = RandomForestClassifier(n_estimators = n, bootstrap = True, oob_score = False)
scores = cross_val_score(rnd_forest_clf, X, y, cv = 10, n_jobs = -1)
cv_err_rates.append([1.0 - np.mean(scores), np.std(scores)])
# plot_surface(ax, rnd_forest_clf, X, y)
cv_err_rates = np.array(cv_err_rates)
ax.plot(ns, cv_err_rates[:, 0], '-o', label = 'Cross validation error')
# ax.plot(ns, cv_err_rates[:, 1], label = 'CV error std')
ax.grid(True)
ax.legend(loc = 'best', fontsize = 'large')
ax.set_xlabel('Number of trees', fontsize = 'large')
ax.set_ylabel('Error rate', fontsize = 'large')
ax.set_xlim(np.min(ns) - 1, np.max(ns) + 4)
## compare a single tree with RandomForest ensemble, using 100 bootstrap
figure, (ax1, ax2) = plt.subplots(2, 1)
n_replicas = 200
# compute bias and variance for a tree
cart = DecisionTreeClassifier()
auc_scores, error_scores, bias, var = clf_bias_var(cart, X, y, n_replicas)
print('auc mean: {}, std: {}'.format(np.mean(auc_scores), np.std(auc_scores)))
print('error mean: {}, std: {}'.format(np.mean(error_scores), np.std(error_scores)))
print('bias: {}, var: {}'.format(bias, var))
# ax1.plot(ns[[0, -1]], [bias, bias], '--', label = 'CART bias')
# ax1.plot(ns[[0, -1]], [var, var], '--', label = 'CART variance')
aucs = []
err_rates = []
biases_vars = []
start_time = time.time()
results = Parallel(n_jobs = 8)(delayed(clf_bias_var)(RandomForestClassifier(n_estimators = n, bootstrap = True, oob_score = False),
X, y, n_replicas) for n in ns)
print('Time: {}'.format(time.time() - start_time))
for auc_scores, error_scores, bias, var in results:
print('auc mean: {}, std: {}'.format(np.mean(auc_scores), np.std(auc_scores)))
print('error mean: {}, std: {}'.format(np.mean(error_scores), np.std(error_scores)))
print('squared bias: {}, var: {}'.format(bias, var))
aucs.append(np.mean(auc_scores))
err_rates.append(np.mean(error_scores))
biases_vars.append([bias, var])
biases_vars = np.array(biases_vars)
ax1.plot(ns, aucs, 'o-', label = 'Random Forest AUC scores')
ax1.legend(loc = 'best', fontsize = 'medium')
ax1.set_xlabel('Number of trees', fontsize = 'medium')
ax1.set_xlim(np.min(ns) - 1, np.max(ns) + 4)
ax1.grid(True, which = 'both')
ax2.plot(ns, err_rates, 'o-', label = 'Random Forest error rate')
ax2.plot(ns, biases_vars[:, 0], 'o-', label = 'Random forest squared bias')
ax2.plot(ns, biases_vars[:, 1], 'o-', label = 'Random forest variance')
ax2.legend(loc = 'best', fontsize = 'medium')
ax2.set_xlabel('Number of trees', fontsize = 'medium')
ax2.set_xlim(np.min(ns) - 1, np.max(ns) + 4)
ax2.grid(True, which = 'both')
plt.tight_layout()
plt.show() |
fernand/scipy | refs/heads/master | scipy/_lib/_testutils.py | 39 | """
Generic test utilities and decorators.
"""
from __future__ import division, print_function, absolute_import
import os
import sys
from numpy.testing import dec
__all__ = ['knownfailure_overridable', 'suppressed_stdout']
def knownfailure_overridable(msg=None):
if not msg:
msg = "Undiagnosed issues (corner cases, wrong comparison values, or otherwise)"
msg = msg + " [Set environment variable SCIPY_XFAIL=1 to run this test nevertheless.]"
def deco(func):
try:
if bool(os.environ['SCIPY_XFAIL']):
return func
except (ValueError, KeyError):
pass
return dec.knownfailureif(True, msg)(func)
return deco
def suppressed_stdout(f):
import nose
def pwrapper(*arg, **kwargs):
oldstdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
try:
return f(*arg, **kwargs)
finally:
sys.stdout.close()
sys.stdout = oldstdout
return nose.tools.make_decorator(f)(pwrapper)
|
whereismyjetpack/ansible | refs/heads/devel | lib/ansible/utils/module_docs_fragments/aws.py | 14 | # (c) 2014, Will Thames <will@thames.id.au>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# AWS only documentation fragment
DOCUMENTATION = """
options:
ec2_url:
description:
- Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Ignored for modules where region is required. Must be specified for all other modules if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used.
required: false
default: null
aliases: []
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_ACCESS_KEY, AWS_SECRET_KEY, or EC2_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY_ID, AWS_ACCESS_KEY or EC2_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
security_token:
description:
- AWS STS security token. If not set then the value of the AWS_SECURITY_TOKEN or EC2_SECURITY_TOKEN environment variable is used.
required: false
default: null
aliases: [ 'access_token' ]
version_added: "1.6"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
profile:
description:
- Uses a boto profile. Only works with boto >= 2.24.0.
required: false
default: null
aliases: []
version_added: "1.6"
requirements:
- "python >= 2.6"
- boto
notes:
- If parameters are not set within the module, the following
environment variables can be used in decreasing order of precedence
C(AWS_URL) or C(EC2_URL),
C(AWS_ACCESS_KEY_ID) or C(AWS_ACCESS_KEY) or C(EC2_ACCESS_KEY),
C(AWS_SECRET_ACCESS_KEY) or C(AWS_SECRET_KEY) or C(EC2_SECRET_KEY),
C(AWS_SECURITY_TOKEN) or C(EC2_SECURITY_TOKEN),
C(AWS_REGION) or C(EC2_REGION)
- Ansible uses the boto configuration file (typically ~/.boto) if no
credentials are provided. See http://boto.readthedocs.org/en/latest/boto_config_tut.html
- C(AWS_REGION) or C(EC2_REGION) can be typically be used to specify the
AWS region, when required, but this can also be configured in the boto config file
"""
|
anirudhvenkats/clowdflows | refs/heads/master | workflows/vipercharts/views.py | 12133432 | |
quantum13/mlbootcamp5 | refs/heads/master | qml_workdir/__init__.py | 12133432 | |
stevenewey/django | refs/heads/master | tests/m2m_intermediary/__init__.py | 12133432 | |
richpolis/siveinpy | refs/heads/master | env/lib/python2.7/site-packages/django/conf/locale/fa/__init__.py | 12133432 | |
viniciusgama/blog_gae | refs/heads/master | django/test/client.py | 73 | import urllib
from urlparse import urlparse, urlunparse, urlsplit
import sys
import os
import re
import mimetypes
import warnings
from copy import copy
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest
from django.core.signals import got_request_exception
from django.http import SimpleCookie, HttpRequest, QueryDict
from django.template import TemplateDoesNotExist
from django.test import signals
from django.utils.functional import curry
from django.utils.encoding import smart_str
from django.utils.http import urlencode
from django.utils.importlib import import_module
from django.utils.itercompat import is_iterable
from django.db import transaction, close_connection
from django.test.utils import ContextList
__all__ = ('Client', 'RequestFactory', 'encode_file', 'encode_multipart')
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
CONTENT_TYPE_RE = re.compile('.*; charset=([\w\d-]+);?')
class FakePayload(object):
"""
A wrapper around StringIO that restricts what can be read since data from
the network can't be seeked and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in Real Life.
"""
def __init__(self, content):
self.__content = StringIO(content)
self.__len = len(content)
def read(self, num_bytes=None):
if num_bytes is None:
num_bytes = self.__len or 0
assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes.
Uses the WSGI interface to compose requests, but returns
the raw HttpResponse object
"""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super(ClientHandler, self).__init__(*args, **kwargs)
def __call__(self, environ):
from django.conf import settings
from django.core import signals
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
self.load_middleware()
signals.request_started.send(sender=self.__class__)
try:
request = WSGIRequest(environ)
# sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably
# required for backwards compatibility with external tests against
# admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
response = self.get_response(request)
finally:
signals.request_finished.disconnect(close_connection)
signals.request_finished.send(sender=self.__class__)
signals.request_finished.connect(close_connection)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Stores templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
"""
store.setdefault('templates', []).append(template)
store.setdefault('context', ContextList()).append(copy(context))
def encode_multipart(boundary, data):
"""
Encodes multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
to_str = lambda s: smart_str(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
is_file = lambda thing: hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, basestring) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend([
'--' + boundary,
'Content-Disposition: form-data; name="%s"' % to_str(key),
'',
to_str(item)
])
else:
lines.extend([
'--' + boundary,
'Content-Disposition: form-data; name="%s"' % to_str(key),
'',
to_str(value)
])
lines.extend([
'--' + boundary + '--',
'',
])
return '\r\n'.join(lines)
def encode_file(boundary, key, file):
to_str = lambda s: smart_str(s, settings.DEFAULT_CHARSET)
if hasattr(file, 'content_type'):
content_type = file.content_type
else:
content_type = mimetypes.guess_type(file.name)[0]
if content_type is None:
content_type = 'application/octet-stream'
return [
'--' + boundary,
'Content-Disposition: form-data; name="%s"; filename="%s"' \
% (to_str(key), to_str(os.path.basename(file.name))),
'Content-Type: %s' % content_type,
'',
file.read()
]
class RequestFactory(object):
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
"""
def __init__(self, **defaults):
self.defaults = defaults
self.cookies = SimpleCookie()
self.errors = StringIO()
def _base_environ(self, **request):
"""
The base environment for a request.
"""
environ = {
'HTTP_COOKIE': self.cookies.output(header='', sep='; '),
'PATH_INFO': '/',
'QUERY_STRING': '',
'REMOTE_ADDR': '127.0.0.1',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'SERVER_NAME': 'testserver',
'SERVER_PORT': '80',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1,0),
'wsgi.url_scheme': 'http',
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
}
environ.update(self.defaults)
environ.update(request)
return environ
def request(self, **request):
"Construct a generic request object."
return WSGIRequest(self._base_environ(**request))
def _get_path(self, parsed):
# If there are parameters, add them
if parsed[3]:
return urllib.unquote(parsed[2] + ";" + parsed[3])
else:
return urllib.unquote(parsed[2])
def get(self, path, data={}, **extra):
"Construct a GET request"
parsed = urlparse(path)
r = {
'CONTENT_TYPE': 'text/html; charset=utf-8',
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': urlencode(data, doseq=True) or parsed[4],
'REQUEST_METHOD': 'GET',
'wsgi.input': FakePayload('')
}
r.update(extra)
return self.request(**r)
def post(self, path, data={}, content_type=MULTIPART_CONTENT,
**extra):
"Construct a POST request."
if content_type is MULTIPART_CONTENT:
post_data = encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
post_data = smart_str(data, encoding=charset)
parsed = urlparse(path)
r = {
'CONTENT_LENGTH': len(post_data),
'CONTENT_TYPE': content_type,
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': parsed[4],
'REQUEST_METHOD': 'POST',
'wsgi.input': FakePayload(post_data),
}
r.update(extra)
return self.request(**r)
def head(self, path, data={}, **extra):
"Construct a HEAD request."
parsed = urlparse(path)
r = {
'CONTENT_TYPE': 'text/html; charset=utf-8',
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': urlencode(data, doseq=True) or parsed[4],
'REQUEST_METHOD': 'HEAD',
'wsgi.input': FakePayload('')
}
r.update(extra)
return self.request(**r)
def options(self, path, data={}, **extra):
"Constrict an OPTIONS request"
parsed = urlparse(path)
r = {
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': urlencode(data, doseq=True) or parsed[4],
'REQUEST_METHOD': 'OPTIONS',
'wsgi.input': FakePayload('')
}
r.update(extra)
return self.request(**r)
def put(self, path, data={}, content_type=MULTIPART_CONTENT,
**extra):
"Construct a PUT request."
if content_type is MULTIPART_CONTENT:
post_data = encode_multipart(BOUNDARY, data)
else:
post_data = data
# Make `data` into a querystring only if it's not already a string. If
# it is a string, we'll assume that the caller has already encoded it.
query_string = None
if not isinstance(data, basestring):
query_string = urlencode(data, doseq=True)
parsed = urlparse(path)
r = {
'CONTENT_LENGTH': len(post_data),
'CONTENT_TYPE': content_type,
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': query_string or parsed[4],
'REQUEST_METHOD': 'PUT',
'wsgi.input': FakePayload(post_data),
}
r.update(extra)
return self.request(**r)
def delete(self, path, data={}, **extra):
"Construct a DELETE request."
parsed = urlparse(path)
r = {
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': urlencode(data, doseq=True) or parsed[4],
'REQUEST_METHOD': 'DELETE',
'wsgi.input': FakePayload('')
}
r.update(extra)
return self.request(**r)
class Client(RequestFactory):
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(self, enforce_csrf_checks=False, **defaults):
super(Client, self).__init__(**defaults)
self.handler = ClientHandler(enforce_csrf_checks)
self.exc_info = None
def store_exc_info(self, **kwargs):
"""
Stores exceptions when they are generated by a view.
"""
self.exc_info = sys.exc_info()
def _session(self):
"""
Obtains the current session variables.
"""
if 'django.contrib.sessions' in settings.INSTALLED_APPS:
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME, None)
if cookie:
return engine.SessionStore(cookie.value)
return {}
session = property(_session)
def request(self, **request):
"""
The master request method. Composes the environment dictionary
and passes to the handler, returning the result of the handler.
Assumes defaults for the query environment, which can be overridden
using the arguments to the request.
"""
environ = self._base_environ(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = curry(store_rendered_templates, data)
signals.template_rendered.connect(on_template_render, dispatch_uid="template-render")
# Capture exceptions created by the handler.
got_request_exception.connect(self.store_exc_info, dispatch_uid="request-exception")
try:
try:
response = self.handler(environ)
except TemplateDoesNotExist, e:
# If the view raises an exception, Django will attempt to show
# the 500.html template. If that template is not available,
# we should ignore the error in favor of re-raising the
# underlying exception that caused the 500 error. Any other
# template found to be missing during view error handling
# should be reported as-is.
if e.args != ('500.html',):
raise
# Look for a signalled exception, clear the current context
# exception data, then re-raise the signalled exception.
# Also make sure that the signalled exception is cleared from
# the local cache!
if self.exc_info:
exc_info = self.exc_info
self.exc_info = None
raise exc_info[1], None, exc_info[2]
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
# Flatten a single context. Not really necessary anymore thanks to
# the __getattr__ flattening in ContextList, but has some edge-case
# backwards-compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Provide a backwards-compatible (but pending deprecation) response.template
def _get_template(self):
warnings.warn("response.template is deprecated; use response.templates instead (which is always a list)",
PendingDeprecationWarning, stacklevel=2)
if not self.templates:
return None
elif len(self.templates) == 1:
return self.templates[0]
return self.templates
response.__class__.template = property(_get_template)
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
finally:
signals.template_rendered.disconnect(dispatch_uid="template-render")
got_request_exception.disconnect(dispatch_uid="request-exception")
def get(self, path, data={}, follow=False, **extra):
"""
Requests a response from the server using GET.
"""
response = super(Client, self).get(path, data=data, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def post(self, path, data={}, content_type=MULTIPART_CONTENT,
follow=False, **extra):
"""
Requests a response from the server using POST.
"""
response = super(Client, self).post(path, data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def head(self, path, data={}, follow=False, **extra):
"""
Request a response from the server using HEAD.
"""
response = super(Client, self).head(path, data=data, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def options(self, path, data={}, follow=False, **extra):
"""
Request a response from the server using OPTIONS.
"""
response = super(Client, self).options(path, data=data, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def put(self, path, data={}, content_type=MULTIPART_CONTENT,
follow=False, **extra):
"""
Send a resource to the server using PUT.
"""
response = super(Client, self).put(path, data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def delete(self, path, data={}, follow=False, **extra):
"""
Send a DELETE request to the server.
"""
response = super(Client, self).delete(path, data=data, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def login(self, **credentials):
"""
Sets the Factory to appear as if it has successfully logged into a site.
Returns True if login is possible; False if the provided credentials
are incorrect, or the user is inactive, or if the sessions framework is
not available.
"""
user = authenticate(**credentials)
if user and user.is_active \
and 'django.contrib.sessions' in settings.INSTALLED_APPS:
engine = import_module(settings.SESSION_ENGINE)
# Create a fake request to store login details.
request = HttpRequest()
if self.session:
request.session = self.session
else:
request.session = engine.SessionStore()
login(request, user)
# Save the session values.
request.session.save()
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
return True
else:
return False
def logout(self):
"""
Removes the authenticated user's cookies and session object.
Causes the authenticated user to be logged out.
"""
session = import_module(settings.SESSION_ENGINE).SessionStore()
session_cookie = self.cookies.get(settings.SESSION_COOKIE_NAME)
if session_cookie:
session.delete(session_key=session_cookie.value)
self.cookies = SimpleCookie()
def _handle_redirects(self, response, **extra):
"Follows any redirects by requesting responses from the server using GET."
response.redirect_chain = []
while response.status_code in (301, 302, 303, 307):
url = response['Location']
scheme, netloc, path, query, fragment = urlsplit(url)
redirect_chain = response.redirect_chain
redirect_chain.append((url, response.status_code))
if scheme:
extra['wsgi.url_scheme'] = scheme
# The test client doesn't handle external links,
# but since the situation is simulated in test_client,
# we fake things here by ignoring the netloc portion of the
# redirected URL.
response = self.get(path, QueryDict(query), follow=False, **extra)
response.redirect_chain = redirect_chain
# Prevent loops
if response.redirect_chain[-1] in response.redirect_chain[0:-1]:
break
return response
|
kevinwilde/WildeBot | refs/heads/master | src/mybot/Lib/site-packages/click/core.py | 63 | import os
import sys
from contextlib import contextmanager
from itertools import repeat
from functools import update_wrapper
from .types import convert_type, IntRange, BOOL
from .utils import make_str, make_default_short_help, echo, get_os_args
from .exceptions import ClickException, UsageError, BadParameter, Abort, \
MissingParameter
from .termui import prompt, confirm
from .formatting import HelpFormatter, join_options
from .parser import OptionParser, split_opt
from .globals import push_context, pop_context
from ._compat import PY2, isidentifier, iteritems
from ._unicodefun import _check_for_unicode_literals, _verify_python3_env
_missing = object()
SUBCOMMAND_METAVAR = 'COMMAND [ARGS]...'
SUBCOMMANDS_METAVAR = 'COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]...'
def _bashcomplete(cmd, prog_name, complete_var=None):
"""Internal handler for the bash completion support."""
if complete_var is None:
complete_var = '_%s_COMPLETE' % (prog_name.replace('-', '_')).upper()
complete_instr = os.environ.get(complete_var)
if not complete_instr:
return
from ._bashcomplete import bashcomplete
if bashcomplete(cmd, prog_name, complete_var, complete_instr):
sys.exit(1)
def _check_multicommand(base_command, cmd_name, cmd, register=False):
if not base_command.chain or not isinstance(cmd, MultiCommand):
return
if register:
hint = 'It is not possible to add multi commands as children to ' \
'another multi command that is in chain mode'
else:
hint = 'Found a multi command as subcommand to a multi command ' \
'that is in chain mode. This is not supported'
raise RuntimeError('%s. Command "%s" is set to chain and "%s" was '
'added as subcommand but it in itself is a '
'multi command. ("%s" is a %s within a chained '
'%s named "%s"). This restriction was supposed to '
'be lifted in 6.0 but the fix was flawed. This '
'will be fixed in Click 7.0' % (
hint, base_command.name, cmd_name,
cmd_name, cmd.__class__.__name__,
base_command.__class__.__name__,
base_command.name))
def batch(iterable, batch_size):
return list(zip(*repeat(iter(iterable), batch_size)))
def invoke_param_callback(callback, ctx, param, value):
code = getattr(callback, '__code__', None)
args = getattr(code, 'co_argcount', 3)
if args < 3:
# This will become a warning in Click 3.0:
from warnings import warn
warn(Warning('Invoked legacy parameter callback "%s". The new '
'signature for such callbacks starting with '
'click 2.0 is (ctx, param, value).'
% callback), stacklevel=3)
return callback(ctx, value)
return callback(ctx, param, value)
@contextmanager
def augment_usage_errors(ctx, param=None):
"""Context manager that attaches extra information to exceptions that
fly.
"""
try:
yield
except BadParameter as e:
if e.ctx is None:
e.ctx = ctx
if param is not None and e.param is None:
e.param = param
raise
except UsageError as e:
if e.ctx is None:
e.ctx = ctx
raise
def iter_params_for_processing(invocation_order, declaration_order):
"""Given a sequence of parameters in the order as should be considered
for processing and an iterable of parameters that exist, this returns
a list in the correct order as they should be processed.
"""
def sort_key(item):
try:
idx = invocation_order.index(item)
except ValueError:
idx = float('inf')
return (not item.is_eager, idx)
return sorted(declaration_order, key=sort_key)
class Context(object):
"""The context is a special internal object that holds state relevant
for the script execution at every single level. It's normally invisible
to commands unless they opt-in to getting access to it.
The context is useful as it can pass internal objects around and can
control special execution features such as reading data from
environment variables.
A context can be used as context manager in which case it will call
:meth:`close` on teardown.
.. versionadded:: 2.0
Added the `resilient_parsing`, `help_option_names`,
`token_normalize_func` parameters.
.. versionadded:: 3.0
Added the `allow_extra_args` and `allow_interspersed_args`
parameters.
.. versionadded:: 4.0
Added the `color`, `ignore_unknown_options`, and
`max_content_width` parameters.
:param command: the command class for this context.
:param parent: the parent context.
:param info_name: the info name for this invocation. Generally this
is the most descriptive name for the script or
command. For the toplevel script it is usually
the name of the script, for commands below it it's
the name of the script.
:param obj: an arbitrary object of user data.
:param auto_envvar_prefix: the prefix to use for automatic environment
variables. If this is `None` then reading
from environment variables is disabled. This
does not affect manually set environment
variables which are always read.
:param default_map: a dictionary (like object) with default values
for parameters.
:param terminal_width: the width of the terminal. The default is
inherit from parent context. If no context
defines the terminal width then auto
detection will be applied.
:param max_content_width: the maximum width for content rendered by
Click (this currently only affects help
pages). This defaults to 80 characters if
not overridden. In other words: even if the
terminal is larger than that, Click will not
format things wider than 80 characters by
default. In addition to that, formatters might
add some safety mapping on the right.
:param resilient_parsing: if this flag is enabled then Click will
parse without any interactivity or callback
invocation. This is useful for implementing
things such as completion support.
:param allow_extra_args: if this is set to `True` then extra arguments
at the end will not raise an error and will be
kept on the context. The default is to inherit
from the command.
:param allow_interspersed_args: if this is set to `False` then options
and arguments cannot be mixed. The
default is to inherit from the command.
:param ignore_unknown_options: instructs click to ignore options it does
not know and keeps them for later
processing.
:param help_option_names: optionally a list of strings that define how
the default help parameter is named. The
default is ``['--help']``.
:param token_normalize_func: an optional function that is used to
normalize tokens (options, choices,
etc.). This for instance can be used to
implement case insensitive behavior.
:param color: controls if the terminal supports ANSI colors or not. The
default is autodetection. This is only needed if ANSI
codes are used in texts that Click prints which is by
default not the case. This for instance would affect
help output.
"""
def __init__(self, command, parent=None, info_name=None, obj=None,
auto_envvar_prefix=None, default_map=None,
terminal_width=None, max_content_width=None,
resilient_parsing=False, allow_extra_args=None,
allow_interspersed_args=None,
ignore_unknown_options=None, help_option_names=None,
token_normalize_func=None, color=None):
#: the parent context or `None` if none exists.
self.parent = parent
#: the :class:`Command` for this context.
self.command = command
#: the descriptive information name
self.info_name = info_name
#: the parsed parameters except if the value is hidden in which
#: case it's not remembered.
self.params = {}
#: the leftover arguments.
self.args = []
#: protected arguments. These are arguments that are prepended
#: to `args` when certain parsing scenarios are encountered but
#: must be never propagated to another arguments. This is used
#: to implement nested parsing.
self.protected_args = []
if obj is None and parent is not None:
obj = parent.obj
#: the user object stored.
self.obj = obj
self._meta = getattr(parent, 'meta', {})
#: A dictionary (-like object) with defaults for parameters.
if default_map is None \
and parent is not None \
and parent.default_map is not None:
default_map = parent.default_map.get(info_name)
self.default_map = default_map
#: This flag indicates if a subcommand is going to be executed. A
#: group callback can use this information to figure out if it's
#: being executed directly or because the execution flow passes
#: onwards to a subcommand. By default it's None, but it can be
#: the name of the subcommand to execute.
#:
#: If chaining is enabled this will be set to ``'*'`` in case
#: any commands are executed. It is however not possible to
#: figure out which ones. If you require this knowledge you
#: should use a :func:`resultcallback`.
self.invoked_subcommand = None
if terminal_width is None and parent is not None:
terminal_width = parent.terminal_width
#: The width of the terminal (None is autodetection).
self.terminal_width = terminal_width
if max_content_width is None and parent is not None:
max_content_width = parent.max_content_width
#: The maximum width of formatted content (None implies a sensible
#: default which is 80 for most things).
self.max_content_width = max_content_width
if allow_extra_args is None:
allow_extra_args = command.allow_extra_args
#: Indicates if the context allows extra args or if it should
#: fail on parsing.
#:
#: .. versionadded:: 3.0
self.allow_extra_args = allow_extra_args
if allow_interspersed_args is None:
allow_interspersed_args = command.allow_interspersed_args
#: Indicates if the context allows mixing of arguments and
#: options or not.
#:
#: .. versionadded:: 3.0
self.allow_interspersed_args = allow_interspersed_args
if ignore_unknown_options is None:
ignore_unknown_options = command.ignore_unknown_options
#: Instructs click to ignore options that a command does not
#: understand and will store it on the context for later
#: processing. This is primarily useful for situations where you
#: want to call into external programs. Generally this pattern is
#: strongly discouraged because it's not possibly to losslessly
#: forward all arguments.
#:
#: .. versionadded:: 4.0
self.ignore_unknown_options = ignore_unknown_options
if help_option_names is None:
if parent is not None:
help_option_names = parent.help_option_names
else:
help_option_names = ['--help']
#: The names for the help options.
self.help_option_names = help_option_names
if token_normalize_func is None and parent is not None:
token_normalize_func = parent.token_normalize_func
#: An optional normalization function for tokens. This is
#: options, choices, commands etc.
self.token_normalize_func = token_normalize_func
#: Indicates if resilient parsing is enabled. In that case Click
#: will do its best to not cause any failures.
self.resilient_parsing = resilient_parsing
# If there is no envvar prefix yet, but the parent has one and
# the command on this level has a name, we can expand the envvar
# prefix automatically.
if auto_envvar_prefix is None:
if parent is not None \
and parent.auto_envvar_prefix is not None and \
self.info_name is not None:
auto_envvar_prefix = '%s_%s' % (parent.auto_envvar_prefix,
self.info_name.upper())
else:
self.auto_envvar_prefix = auto_envvar_prefix.upper()
self.auto_envvar_prefix = auto_envvar_prefix
if color is None and parent is not None:
color = parent.color
#: Controls if styling output is wanted or not.
self.color = color
self._close_callbacks = []
self._depth = 0
def __enter__(self):
self._depth += 1
push_context(self)
return self
def __exit__(self, exc_type, exc_value, tb):
self._depth -= 1
if self._depth == 0:
self.close()
pop_context()
@contextmanager
def scope(self, cleanup=True):
"""This helper method can be used with the context object to promote
it to the current thread local (see :func:`get_current_context`).
The default behavior of this is to invoke the cleanup functions which
can be disabled by setting `cleanup` to `False`. The cleanup
functions are typically used for things such as closing file handles.
If the cleanup is intended the context object can also be directly
used as a context manager.
Example usage::
with ctx.scope():
assert get_current_context() is ctx
This is equivalent::
with ctx:
assert get_current_context() is ctx
.. versionadded:: 5.0
:param cleanup: controls if the cleanup functions should be run or
not. The default is to run these functions. In
some situations the context only wants to be
temporarily pushed in which case this can be disabled.
Nested pushes automatically defer the cleanup.
"""
if not cleanup:
self._depth += 1
try:
with self as rv:
yield rv
finally:
if not cleanup:
self._depth -= 1
@property
def meta(self):
"""This is a dictionary which is shared with all the contexts
that are nested. It exists so that click utiltiies can store some
state here if they need to. It is however the responsibility of
that code to manage this dictionary well.
The keys are supposed to be unique dotted strings. For instance
module paths are a good choice for it. What is stored in there is
irrelevant for the operation of click. However what is important is
that code that places data here adheres to the general semantics of
the system.
Example usage::
LANG_KEY = __name__ + '.lang'
def set_language(value):
ctx = get_current_context()
ctx.meta[LANG_KEY] = value
def get_language():
return get_current_context().meta.get(LANG_KEY, 'en_US')
.. versionadded:: 5.0
"""
return self._meta
def make_formatter(self):
"""Creates the formatter for the help and usage output."""
return HelpFormatter(width=self.terminal_width,
max_width=self.max_content_width)
def call_on_close(self, f):
"""This decorator remembers a function as callback that should be
executed when the context tears down. This is most useful to bind
resource handling to the script execution. For instance, file objects
opened by the :class:`File` type will register their close callbacks
here.
:param f: the function to execute on teardown.
"""
self._close_callbacks.append(f)
return f
def close(self):
"""Invokes all close callbacks."""
for cb in self._close_callbacks:
cb()
self._close_callbacks = []
@property
def command_path(self):
"""The computed command path. This is used for the ``usage``
information on the help page. It's automatically created by
combining the info names of the chain of contexts to the root.
"""
rv = ''
if self.info_name is not None:
rv = self.info_name
if self.parent is not None:
rv = self.parent.command_path + ' ' + rv
return rv.lstrip()
def find_root(self):
"""Finds the outermost context."""
node = self
while node.parent is not None:
node = node.parent
return node
def find_object(self, object_type):
"""Finds the closest object of a given type."""
node = self
while node is not None:
if isinstance(node.obj, object_type):
return node.obj
node = node.parent
def ensure_object(self, object_type):
"""Like :meth:`find_object` but sets the innermost object to a
new instance of `object_type` if it does not exist.
"""
rv = self.find_object(object_type)
if rv is None:
self.obj = rv = object_type()
return rv
def lookup_default(self, name):
"""Looks up the default for a parameter name. This by default
looks into the :attr:`default_map` if available.
"""
if self.default_map is not None:
rv = self.default_map.get(name)
if callable(rv):
rv = rv()
return rv
def fail(self, message):
"""Aborts the execution of the program with a specific error
message.
:param message: the error message to fail with.
"""
raise UsageError(message, self)
def abort(self):
"""Aborts the script."""
raise Abort()
def exit(self, code=0):
"""Exits the application with a given exit code."""
sys.exit(code)
def get_usage(self):
"""Helper method to get formatted usage string for the current
context and command.
"""
return self.command.get_usage(self)
def get_help(self):
"""Helper method to get formatted help page for the current
context and command.
"""
return self.command.get_help(self)
def invoke(*args, **kwargs):
"""Invokes a command callback in exactly the way it expects. There
are two ways to invoke this method:
1. the first argument can be a callback and all other arguments and
keyword arguments are forwarded directly to the function.
2. the first argument is a click command object. In that case all
arguments are forwarded as well but proper click parameters
(options and click arguments) must be keyword arguments and Click
will fill in defaults.
Note that before Click 3.2 keyword arguments were not properly filled
in against the intention of this code and no context was created. For
more information about this change and why it was done in a bugfix
release see :ref:`upgrade-to-3.2`.
"""
self, callback = args[:2]
ctx = self
# It's also possible to invoke another command which might or
# might not have a callback. In that case we also fill
# in defaults and make a new context for this command.
if isinstance(callback, Command):
other_cmd = callback
callback = other_cmd.callback
ctx = Context(other_cmd, info_name=other_cmd.name, parent=self)
if callback is None:
raise TypeError('The given command does not have a '
'callback that can be invoked.')
for param in other_cmd.params:
if param.name not in kwargs and param.expose_value:
kwargs[param.name] = param.get_default(ctx)
args = args[2:]
with augment_usage_errors(self):
with ctx:
return callback(*args, **kwargs)
def forward(*args, **kwargs):
"""Similar to :meth:`invoke` but fills in default keyword
arguments from the current context if the other command expects
it. This cannot invoke callbacks directly, only other commands.
"""
self, cmd = args[:2]
# It's also possible to invoke another command which might or
# might not have a callback.
if not isinstance(cmd, Command):
raise TypeError('Callback is not a command.')
for param in self.params:
if param not in kwargs:
kwargs[param] = self.params[param]
return self.invoke(cmd, **kwargs)
class BaseCommand(object):
"""The base command implements the minimal API contract of commands.
Most code will never use this as it does not implement a lot of useful
functionality but it can act as the direct subclass of alternative
parsing methods that do not depend on the Click parser.
For instance, this can be used to bridge Click and other systems like
argparse or docopt.
Because base commands do not implement a lot of the API that other
parts of Click take for granted, they are not supported for all
operations. For instance, they cannot be used with the decorators
usually and they have no built-in callback system.
.. versionchanged:: 2.0
Added the `context_settings` parameter.
:param name: the name of the command to use unless a group overrides it.
:param context_settings: an optional dictionary with defaults that are
passed to the context object.
"""
#: the default for the :attr:`Context.allow_extra_args` flag.
allow_extra_args = False
#: the default for the :attr:`Context.allow_interspersed_args` flag.
allow_interspersed_args = True
#: the default for the :attr:`Context.ignore_unknown_options` flag.
ignore_unknown_options = False
def __init__(self, name, context_settings=None):
#: the name the command thinks it has. Upon registering a command
#: on a :class:`Group` the group will default the command name
#: with this information. You should instead use the
#: :class:`Context`\'s :attr:`~Context.info_name` attribute.
self.name = name
if context_settings is None:
context_settings = {}
#: an optional dictionary with defaults passed to the context.
self.context_settings = context_settings
def get_usage(self, ctx):
raise NotImplementedError('Base commands cannot get usage')
def get_help(self, ctx):
raise NotImplementedError('Base commands cannot get help')
def make_context(self, info_name, args, parent=None, **extra):
"""This function when given an info name and arguments will kick
off the parsing and create a new :class:`Context`. It does not
invoke the actual command callback though.
:param info_name: the info name for this invokation. Generally this
is the most descriptive name for the script or
command. For the toplevel script it's usually
the name of the script, for commands below it it's
the name of the script.
:param args: the arguments to parse as list of strings.
:param parent: the parent context if available.
:param extra: extra keyword arguments forwarded to the context
constructor.
"""
for key, value in iteritems(self.context_settings):
if key not in extra:
extra[key] = value
ctx = Context(self, info_name=info_name, parent=parent, **extra)
with ctx.scope(cleanup=False):
self.parse_args(ctx, args)
return ctx
def parse_args(self, ctx, args):
"""Given a context and a list of arguments this creates the parser
and parses the arguments, then modifies the context as necessary.
This is automatically invoked by :meth:`make_context`.
"""
raise NotImplementedError('Base commands do not know how to parse '
'arguments.')
def invoke(self, ctx):
"""Given a context, this invokes the command. The default
implementation is raising a not implemented error.
"""
raise NotImplementedError('Base commands are not invokable by default')
def main(self, args=None, prog_name=None, complete_var=None,
standalone_mode=True, **extra):
"""This is the way to invoke a script with all the bells and
whistles as a command line application. This will always terminate
the application after a call. If this is not wanted, ``SystemExit``
needs to be caught.
This method is also available by directly calling the instance of
a :class:`Command`.
.. versionadded:: 3.0
Added the `standalone_mode` flag to control the standalone mode.
:param args: the arguments that should be used for parsing. If not
provided, ``sys.argv[1:]`` is used.
:param prog_name: the program name that should be used. By default
the program name is constructed by taking the file
name from ``sys.argv[0]``.
:param complete_var: the environment variable that controls the
bash completion support. The default is
``"_<prog_name>_COMPLETE"`` with prog name in
uppercase.
:param standalone_mode: the default behavior is to invoke the script
in standalone mode. Click will then
handle exceptions and convert them into
error messages and the function will never
return but shut down the interpreter. If
this is set to `False` they will be
propagated to the caller and the return
value of this function is the return value
of :meth:`invoke`.
:param extra: extra keyword arguments are forwarded to the context
constructor. See :class:`Context` for more information.
"""
# If we are in Python 3, we will verify that the environment is
# sane at this point of reject further execution to avoid a
# broken script.
if not PY2:
_verify_python3_env()
else:
_check_for_unicode_literals()
if args is None:
args = get_os_args()
else:
args = list(args)
if prog_name is None:
prog_name = make_str(os.path.basename(
sys.argv and sys.argv[0] or __file__))
# Hook for the Bash completion. This only activates if the Bash
# completion is actually enabled, otherwise this is quite a fast
# noop.
_bashcomplete(self, prog_name, complete_var)
try:
try:
with self.make_context(prog_name, args, **extra) as ctx:
rv = self.invoke(ctx)
if not standalone_mode:
return rv
ctx.exit()
except (EOFError, KeyboardInterrupt):
echo(file=sys.stderr)
raise Abort()
except ClickException as e:
if not standalone_mode:
raise
e.show()
sys.exit(e.exit_code)
except Abort:
if not standalone_mode:
raise
echo('Aborted!', file=sys.stderr)
sys.exit(1)
def __call__(self, *args, **kwargs):
"""Alias for :meth:`main`."""
return self.main(*args, **kwargs)
class Command(BaseCommand):
"""Commands are the basic building block of command line interfaces in
Click. A basic command handles command line parsing and might dispatch
more parsing to commands nested below it.
.. versionchanged:: 2.0
Added the `context_settings` parameter.
:param name: the name of the command to use unless a group overrides it.
:param context_settings: an optional dictionary with defaults that are
passed to the context object.
:param callback: the callback to invoke. This is optional.
:param params: the parameters to register with this command. This can
be either :class:`Option` or :class:`Argument` objects.
:param help: the help string to use for this command.
:param epilog: like the help string but it's printed at the end of the
help page after everything else.
:param short_help: the short help to use for this command. This is
shown on the command listing of the parent command.
:param add_help_option: by default each command registers a ``--help``
option. This can be disabled by this parameter.
"""
def __init__(self, name, context_settings=None, callback=None,
params=None, help=None, epilog=None, short_help=None,
options_metavar='[OPTIONS]', add_help_option=True):
BaseCommand.__init__(self, name, context_settings)
#: the callback to execute when the command fires. This might be
#: `None` in which case nothing happens.
self.callback = callback
#: the list of parameters for this command in the order they
#: should show up in the help page and execute. Eager parameters
#: will automatically be handled before non eager ones.
self.params = params or []
self.help = help
self.epilog = epilog
self.options_metavar = options_metavar
if short_help is None and help:
short_help = make_default_short_help(help)
self.short_help = short_help
self.add_help_option = add_help_option
def get_usage(self, ctx):
formatter = ctx.make_formatter()
self.format_usage(ctx, formatter)
return formatter.getvalue().rstrip('\n')
def get_params(self, ctx):
rv = self.params
help_option = self.get_help_option(ctx)
if help_option is not None:
rv = rv + [help_option]
return rv
def format_usage(self, ctx, formatter):
"""Writes the usage line into the formatter."""
pieces = self.collect_usage_pieces(ctx)
formatter.write_usage(ctx.command_path, ' '.join(pieces))
def collect_usage_pieces(self, ctx):
"""Returns all the pieces that go into the usage line and returns
it as a list of strings.
"""
rv = [self.options_metavar]
for param in self.get_params(ctx):
rv.extend(param.get_usage_pieces(ctx))
return rv
def get_help_option_names(self, ctx):
"""Returns the names for the help option."""
all_names = set(ctx.help_option_names)
for param in self.params:
all_names.difference_update(param.opts)
all_names.difference_update(param.secondary_opts)
return all_names
def get_help_option(self, ctx):
"""Returns the help option object."""
help_options = self.get_help_option_names(ctx)
if not help_options or not self.add_help_option:
return
def show_help(ctx, param, value):
if value and not ctx.resilient_parsing:
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
return Option(help_options, is_flag=True,
is_eager=True, expose_value=False,
callback=show_help,
help='Show this message and exit.')
def make_parser(self, ctx):
"""Creates the underlying option parser for this command."""
parser = OptionParser(ctx)
parser.allow_interspersed_args = ctx.allow_interspersed_args
parser.ignore_unknown_options = ctx.ignore_unknown_options
for param in self.get_params(ctx):
param.add_to_parser(parser, ctx)
return parser
def get_help(self, ctx):
"""Formats the help into a string and returns it. This creates a
formatter and will call into the following formatting methods:
"""
formatter = ctx.make_formatter()
self.format_help(ctx, formatter)
return formatter.getvalue().rstrip('\n')
def format_help(self, ctx, formatter):
"""Writes the help into the formatter if it exists.
This calls into the following methods:
- :meth:`format_usage`
- :meth:`format_help_text`
- :meth:`format_options`
- :meth:`format_epilog`
"""
self.format_usage(ctx, formatter)
self.format_help_text(ctx, formatter)
self.format_options(ctx, formatter)
self.format_epilog(ctx, formatter)
def format_help_text(self, ctx, formatter):
"""Writes the help text to the formatter if it exists."""
if self.help:
formatter.write_paragraph()
with formatter.indentation():
formatter.write_text(self.help)
def format_options(self, ctx, formatter):
"""Writes all the options into the formatter if they exist."""
opts = []
for param in self.get_params(ctx):
rv = param.get_help_record(ctx)
if rv is not None:
opts.append(rv)
if opts:
with formatter.section('Options'):
formatter.write_dl(opts)
def format_epilog(self, ctx, formatter):
"""Writes the epilog into the formatter if it exists."""
if self.epilog:
formatter.write_paragraph()
with formatter.indentation():
formatter.write_text(self.epilog)
def parse_args(self, ctx, args):
parser = self.make_parser(ctx)
opts, args, param_order = parser.parse_args(args=args)
for param in iter_params_for_processing(
param_order, self.get_params(ctx)):
value, args = param.handle_parse_result(ctx, opts, args)
if args and not ctx.allow_extra_args and not ctx.resilient_parsing:
ctx.fail('Got unexpected extra argument%s (%s)'
% (len(args) != 1 and 's' or '',
' '.join(map(make_str, args))))
ctx.args = args
return args
def invoke(self, ctx):
"""Given a context, this invokes the attached callback (if it exists)
in the right way.
"""
if self.callback is not None:
return ctx.invoke(self.callback, **ctx.params)
class MultiCommand(Command):
"""A multi command is the basic implementation of a command that
dispatches to subcommands. The most common version is the
:class:`Group`.
:param invoke_without_command: this controls how the multi command itself
is invoked. By default it's only invoked
if a subcommand is provided.
:param no_args_is_help: this controls what happens if no arguments are
provided. This option is enabled by default if
`invoke_without_command` is disabled or disabled
if it's enabled. If enabled this will add
``--help`` as argument if no arguments are
passed.
:param subcommand_metavar: the string that is used in the documentation
to indicate the subcommand place.
:param chain: if this is set to `True` chaining of multiple subcommands
is enabled. This restricts the form of commands in that
they cannot have optional arguments but it allows
multiple commands to be chained together.
:param result_callback: the result callback to attach to this multi
command.
"""
allow_extra_args = True
allow_interspersed_args = False
def __init__(self, name=None, invoke_without_command=False,
no_args_is_help=None, subcommand_metavar=None,
chain=False, result_callback=None, **attrs):
Command.__init__(self, name, **attrs)
if no_args_is_help is None:
no_args_is_help = not invoke_without_command
self.no_args_is_help = no_args_is_help
self.invoke_without_command = invoke_without_command
if subcommand_metavar is None:
if chain:
subcommand_metavar = SUBCOMMANDS_METAVAR
else:
subcommand_metavar = SUBCOMMAND_METAVAR
self.subcommand_metavar = subcommand_metavar
self.chain = chain
#: The result callback that is stored. This can be set or
#: overridden with the :func:`resultcallback` decorator.
self.result_callback = result_callback
if self.chain:
for param in self.params:
if isinstance(param, Argument) and not param.required:
raise RuntimeError('Multi commands in chain mode cannot '
'have optional arguments.')
def collect_usage_pieces(self, ctx):
rv = Command.collect_usage_pieces(self, ctx)
rv.append(self.subcommand_metavar)
return rv
def format_options(self, ctx, formatter):
Command.format_options(self, ctx, formatter)
self.format_commands(ctx, formatter)
def resultcallback(self, replace=False):
"""Adds a result callback to the chain command. By default if a
result callback is already registered this will chain them but
this can be disabled with the `replace` parameter. The result
callback is invoked with the return value of the subcommand
(or the list of return values from all subcommands if chaining
is enabled) as well as the parameters as they would be passed
to the main callback.
Example::
@click.group()
@click.option('-i', '--input', default=23)
def cli(input):
return 42
@cli.resultcallback()
def process_result(result, input):
return result + input
.. versionadded:: 3.0
:param replace: if set to `True` an already existing result
callback will be removed.
"""
def decorator(f):
old_callback = self.result_callback
if old_callback is None or replace:
self.result_callback = f
return f
def function(__value, *args, **kwargs):
return f(old_callback(__value, *args, **kwargs),
*args, **kwargs)
self.result_callback = rv = update_wrapper(function, f)
return rv
return decorator
def format_commands(self, ctx, formatter):
"""Extra format methods for multi methods that adds all the commands
after the options.
"""
rows = []
for subcommand in self.list_commands(ctx):
cmd = self.get_command(ctx, subcommand)
# What is this, the tool lied about a command. Ignore it
if cmd is None:
continue
help = cmd.short_help or ''
rows.append((subcommand, help))
if rows:
with formatter.section('Commands'):
formatter.write_dl(rows)
def parse_args(self, ctx, args):
if not args and self.no_args_is_help and not ctx.resilient_parsing:
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
rest = Command.parse_args(self, ctx, args)
if self.chain:
ctx.protected_args = rest
ctx.args = []
elif rest:
ctx.protected_args, ctx.args = rest[:1], rest[1:]
return ctx.args
def invoke(self, ctx):
def _process_result(value):
if self.result_callback is not None:
value = ctx.invoke(self.result_callback, value,
**ctx.params)
return value
if not ctx.protected_args:
# If we are invoked without command the chain flag controls
# how this happens. If we are not in chain mode, the return
# value here is the return value of the command.
# If however we are in chain mode, the return value is the
# return value of the result processor invoked with an empty
# list (which means that no subcommand actually was executed).
if self.invoke_without_command:
if not self.chain:
return Command.invoke(self, ctx)
with ctx:
Command.invoke(self, ctx)
return _process_result([])
ctx.fail('Missing command.')
# Fetch args back out
args = ctx.protected_args + ctx.args
ctx.args = []
ctx.protected_args = []
# If we're not in chain mode, we only allow the invocation of a
# single command but we also inform the current context about the
# name of the command to invoke.
if not self.chain:
# Make sure the context is entered so we do not clean up
# resources until the result processor has worked.
with ctx:
cmd_name, cmd, args = self.resolve_command(ctx, args)
ctx.invoked_subcommand = cmd_name
Command.invoke(self, ctx)
sub_ctx = cmd.make_context(cmd_name, args, parent=ctx)
with sub_ctx:
return _process_result(sub_ctx.command.invoke(sub_ctx))
# In chain mode we create the contexts step by step, but after the
# base command has been invoked. Because at that point we do not
# know the subcommands yet, the invoked subcommand attribute is
# set to ``*`` to inform the command that subcommands are executed
# but nothing else.
with ctx:
ctx.invoked_subcommand = args and '*' or None
Command.invoke(self, ctx)
# Otherwise we make every single context and invoke them in a
# chain. In that case the return value to the result processor
# is the list of all invoked subcommand's results.
contexts = []
while args:
cmd_name, cmd, args = self.resolve_command(ctx, args)
sub_ctx = cmd.make_context(cmd_name, args, parent=ctx,
allow_extra_args=True,
allow_interspersed_args=False)
contexts.append(sub_ctx)
args, sub_ctx.args = sub_ctx.args, []
rv = []
for sub_ctx in contexts:
with sub_ctx:
rv.append(sub_ctx.command.invoke(sub_ctx))
return _process_result(rv)
def resolve_command(self, ctx, args):
cmd_name = make_str(args[0])
original_cmd_name = cmd_name
# Get the command
cmd = self.get_command(ctx, cmd_name)
# If we can't find the command but there is a normalization
# function available, we try with that one.
if cmd is None and ctx.token_normalize_func is not None:
cmd_name = ctx.token_normalize_func(cmd_name)
cmd = self.get_command(ctx, cmd_name)
# If we don't find the command we want to show an error message
# to the user that it was not provided. However, there is
# something else we should do: if the first argument looks like
# an option we want to kick off parsing again for arguments to
# resolve things like --help which now should go to the main
# place.
if cmd is None:
if split_opt(cmd_name)[0]:
self.parse_args(ctx, ctx.args)
ctx.fail('No such command "%s".' % original_cmd_name)
return cmd_name, cmd, args[1:]
def get_command(self, ctx, cmd_name):
"""Given a context and a command name, this returns a
:class:`Command` object if it exists or returns `None`.
"""
raise NotImplementedError()
def list_commands(self, ctx):
"""Returns a list of subcommand names in the order they should
appear.
"""
return []
class Group(MultiCommand):
"""A group allows a command to have subcommands attached. This is the
most common way to implement nesting in Click.
:param commands: a dictionary of commands.
"""
def __init__(self, name=None, commands=None, **attrs):
MultiCommand.__init__(self, name, **attrs)
#: the registered subcommands by their exported names.
self.commands = commands or {}
def add_command(self, cmd, name=None):
"""Registers another :class:`Command` with this group. If the name
is not provided, the name of the command is used.
"""
name = name or cmd.name
if name is None:
raise TypeError('Command has no name.')
_check_multicommand(self, name, cmd, register=True)
self.commands[name] = cmd
def command(self, *args, **kwargs):
"""A shortcut decorator for declaring and attaching a command to
the group. This takes the same arguments as :func:`command` but
immediately registers the created command with this instance by
calling into :meth:`add_command`.
"""
def decorator(f):
cmd = command(*args, **kwargs)(f)
self.add_command(cmd)
return cmd
return decorator
def group(self, *args, **kwargs):
"""A shortcut decorator for declaring and attaching a group to
the group. This takes the same arguments as :func:`group` but
immediately registers the created command with this instance by
calling into :meth:`add_command`.
"""
def decorator(f):
cmd = group(*args, **kwargs)(f)
self.add_command(cmd)
return cmd
return decorator
def get_command(self, ctx, cmd_name):
return self.commands.get(cmd_name)
def list_commands(self, ctx):
return sorted(self.commands)
class CommandCollection(MultiCommand):
"""A command collection is a multi command that merges multiple multi
commands together into one. This is a straightforward implementation
that accepts a list of different multi commands as sources and
provides all the commands for each of them.
"""
def __init__(self, name=None, sources=None, **attrs):
MultiCommand.__init__(self, name, **attrs)
#: The list of registered multi commands.
self.sources = sources or []
def add_source(self, multi_cmd):
"""Adds a new multi command to the chain dispatcher."""
self.sources.append(multi_cmd)
def get_command(self, ctx, cmd_name):
for source in self.sources:
rv = source.get_command(ctx, cmd_name)
if rv is not None:
if self.chain:
_check_multicommand(self, cmd_name, rv)
return rv
def list_commands(self, ctx):
rv = set()
for source in self.sources:
rv.update(source.list_commands(ctx))
return sorted(rv)
class Parameter(object):
"""A parameter to a command comes in two versions: they are either
:class:`Option`\s or :class:`Argument`\s. Other subclasses are currently
not supported by design as some of the internals for parsing are
intentionally not finalized.
Some settings are supported by both options and arguments.
.. versionchanged:: 2.0
Changed signature for parameter callback to also be passed the
parameter. In Click 2.0, the old callback format will still work,
but it will raise a warning to give you change to migrate the
code easier.
:param param_decls: the parameter declarations for this option or
argument. This is a list of flags or argument
names.
:param type: the type that should be used. Either a :class:`ParamType`
or a Python type. The later is converted into the former
automatically if supported.
:param required: controls if this is optional or not.
:param default: the default value if omitted. This can also be a callable,
in which case it's invoked when the default is needed
without any arguments.
:param callback: a callback that should be executed after the parameter
was matched. This is called as ``fn(ctx, param,
value)`` and needs to return the value. Before Click
2.0, the signature was ``(ctx, value)``.
:param nargs: the number of arguments to match. If not ``1`` the return
value is a tuple instead of single value. The default for
nargs is ``1`` (except if the type is a tuple, then it's
the arity of the tuple).
:param metavar: how the value is represented in the help page.
:param expose_value: if this is `True` then the value is passed onwards
to the command callback and stored on the context,
otherwise it's skipped.
:param is_eager: eager values are processed before non eager ones. This
should not be set for arguments or it will inverse the
order of processing.
:param envvar: a string or list of strings that are environment variables
that should be checked.
"""
param_type_name = 'parameter'
def __init__(self, param_decls=None, type=None, required=False,
default=None, callback=None, nargs=None, metavar=None,
expose_value=True, is_eager=False, envvar=None):
self.name, self.opts, self.secondary_opts = \
self._parse_decls(param_decls or (), expose_value)
self.type = convert_type(type, default)
# Default nargs to what the type tells us if we have that
# information available.
if nargs is None:
if self.type.is_composite:
nargs = self.type.arity
else:
nargs = 1
self.required = required
self.callback = callback
self.nargs = nargs
self.multiple = False
self.expose_value = expose_value
self.default = default
self.is_eager = is_eager
self.metavar = metavar
self.envvar = envvar
@property
def human_readable_name(self):
"""Returns the human readable name of this parameter. This is the
same as the name for options, but the metavar for arguments.
"""
return self.name
def make_metavar(self):
if self.metavar is not None:
return self.metavar
metavar = self.type.get_metavar(self)
if metavar is None:
metavar = self.type.name.upper()
if self.nargs != 1:
metavar += '...'
return metavar
def get_default(self, ctx):
"""Given a context variable this calculates the default value."""
# Otherwise go with the regular default.
if callable(self.default):
rv = self.default()
else:
rv = self.default
return self.type_cast_value(ctx, rv)
def add_to_parser(self, parser, ctx):
pass
def consume_value(self, ctx, opts):
value = opts.get(self.name)
if value is None:
value = ctx.lookup_default(self.name)
if value is None:
value = self.value_from_envvar(ctx)
return value
def type_cast_value(self, ctx, value):
"""Given a value this runs it properly through the type system.
This automatically handles things like `nargs` and `multiple` as
well as composite types.
"""
if self.type.is_composite:
if self.nargs <= 1:
raise TypeError('Attempted to invoke composite type '
'but nargs has been set to %s. This is '
'not supported; nargs needs to be set to '
'a fixed value > 1.' % self.nargs)
if self.multiple:
return tuple(self.type(x or (), self, ctx) for x in value or ())
return self.type(value or (), self, ctx)
def _convert(value, level):
if level == 0:
return self.type(value, self, ctx)
return tuple(_convert(x, level - 1) for x in value or ())
return _convert(value, (self.nargs != 1) + bool(self.multiple))
def process_value(self, ctx, value):
"""Given a value and context this runs the logic to convert the
value as necessary.
"""
# If the value we were given is None we do nothing. This way
# code that calls this can easily figure out if something was
# not provided. Otherwise it would be converted into an empty
# tuple for multiple invocations which is inconvenient.
if value is not None:
return self.type_cast_value(ctx, value)
def value_is_missing(self, value):
if value is None:
return True
if (self.nargs != 1 or self.multiple) and value == ():
return True
return False
def full_process_value(self, ctx, value):
value = self.process_value(ctx, value)
if value is None:
value = self.get_default(ctx)
if self.required and self.value_is_missing(value):
raise MissingParameter(ctx=ctx, param=self)
return value
def resolve_envvar_value(self, ctx):
if self.envvar is None:
return
if isinstance(self.envvar, (tuple, list)):
for envvar in self.envvar:
rv = os.environ.get(envvar)
if rv is not None:
return rv
else:
return os.environ.get(self.envvar)
def value_from_envvar(self, ctx):
rv = self.resolve_envvar_value(ctx)
if rv is not None and self.nargs != 1:
rv = self.type.split_envvar_value(rv)
return rv
def handle_parse_result(self, ctx, opts, args):
with augment_usage_errors(ctx, param=self):
value = self.consume_value(ctx, opts)
try:
value = self.full_process_value(ctx, value)
except Exception:
if not ctx.resilient_parsing:
raise
value = None
if self.callback is not None:
try:
value = invoke_param_callback(
self.callback, ctx, self, value)
except Exception:
if not ctx.resilient_parsing:
raise
if self.expose_value:
ctx.params[self.name] = value
return value, args
def get_help_record(self, ctx):
pass
def get_usage_pieces(self, ctx):
return []
class Option(Parameter):
"""Options are usually optional values on the command line and
have some extra features that arguments don't have.
All other parameters are passed onwards to the parameter constructor.
:param show_default: controls if the default value should be shown on the
help page. Normally, defaults are not shown.
:param prompt: if set to `True` or a non empty string then the user will
be prompted for input if not set. If set to `True` the
prompt will be the option name capitalized.
:param confirmation_prompt: if set then the value will need to be confirmed
if it was prompted for.
:param hide_input: if this is `True` then the input on the prompt will be
hidden from the user. This is useful for password
input.
:param is_flag: forces this option to act as a flag. The default is
auto detection.
:param flag_value: which value should be used for this flag if it's
enabled. This is set to a boolean automatically if
the option string contains a slash to mark two options.
:param multiple: if this is set to `True` then the argument is accepted
multiple times and recorded. This is similar to ``nargs``
in how it works but supports arbitrary number of
arguments.
:param count: this flag makes an option increment an integer.
:param allow_from_autoenv: if this is enabled then the value of this
parameter will be pulled from an environment
variable in case a prefix is defined on the
context.
:param help: the help string.
"""
param_type_name = 'option'
def __init__(self, param_decls=None, show_default=False,
prompt=False, confirmation_prompt=False,
hide_input=False, is_flag=None, flag_value=None,
multiple=False, count=False, allow_from_autoenv=True,
type=None, help=None, **attrs):
default_is_missing = attrs.get('default', _missing) is _missing
Parameter.__init__(self, param_decls, type=type, **attrs)
if prompt is True:
prompt_text = self.name.replace('_', ' ').capitalize()
elif prompt is False:
prompt_text = None
else:
prompt_text = prompt
self.prompt = prompt_text
self.confirmation_prompt = confirmation_prompt
self.hide_input = hide_input
# Flags
if is_flag is None:
if flag_value is not None:
is_flag = True
else:
is_flag = bool(self.secondary_opts)
if is_flag and default_is_missing:
self.default = False
if flag_value is None:
flag_value = not self.default
self.is_flag = is_flag
self.flag_value = flag_value
if self.is_flag and isinstance(self.flag_value, bool) \
and type is None:
self.type = BOOL
self.is_bool_flag = True
else:
self.is_bool_flag = False
# Counting
self.count = count
if count:
if type is None:
self.type = IntRange(min=0)
if default_is_missing:
self.default = 0
self.multiple = multiple
self.allow_from_autoenv = allow_from_autoenv
self.help = help
self.show_default = show_default
# Sanity check for stuff we don't support
if __debug__:
if self.nargs < 0:
raise TypeError('Options cannot have nargs < 0')
if self.prompt and self.is_flag and not self.is_bool_flag:
raise TypeError('Cannot prompt for flags that are not bools.')
if not self.is_bool_flag and self.secondary_opts:
raise TypeError('Got secondary option for non boolean flag.')
if self.is_bool_flag and self.hide_input \
and self.prompt is not None:
raise TypeError('Hidden input does not work with boolean '
'flag prompts.')
if self.count:
if self.multiple:
raise TypeError('Options cannot be multiple and count '
'at the same time.')
elif self.is_flag:
raise TypeError('Options cannot be count and flags at '
'the same time.')
def _parse_decls(self, decls, expose_value):
opts = []
secondary_opts = []
name = None
possible_names = []
for decl in decls:
if isidentifier(decl):
if name is not None:
raise TypeError('Name defined twice')
name = decl
else:
split_char = decl[:1] == '/' and ';' or '/'
if split_char in decl:
first, second = decl.split(split_char, 1)
first = first.rstrip()
if first:
possible_names.append(split_opt(first))
opts.append(first)
second = second.lstrip()
if second:
secondary_opts.append(second.lstrip())
else:
possible_names.append(split_opt(decl))
opts.append(decl)
if name is None and possible_names:
possible_names.sort(key=lambda x: len(x[0]))
name = possible_names[-1][1].replace('-', '_').lower()
if not isidentifier(name):
name = None
if name is None:
if not expose_value:
return None, opts, secondary_opts
raise TypeError('Could not determine name for option')
if not opts and not secondary_opts:
raise TypeError('No options defined but a name was passed (%s). '
'Did you mean to declare an argument instead '
'of an option?' % name)
return name, opts, secondary_opts
def add_to_parser(self, parser, ctx):
kwargs = {
'dest': self.name,
'nargs': self.nargs,
'obj': self,
}
if self.multiple:
action = 'append'
elif self.count:
action = 'count'
else:
action = 'store'
if self.is_flag:
kwargs.pop('nargs', None)
if self.is_bool_flag and self.secondary_opts:
parser.add_option(self.opts, action=action + '_const',
const=True, **kwargs)
parser.add_option(self.secondary_opts, action=action +
'_const', const=False, **kwargs)
else:
parser.add_option(self.opts, action=action + '_const',
const=self.flag_value,
**kwargs)
else:
kwargs['action'] = action
parser.add_option(self.opts, **kwargs)
def get_help_record(self, ctx):
any_prefix_is_slash = []
def _write_opts(opts):
rv, any_slashes = join_options(opts)
if any_slashes:
any_prefix_is_slash[:] = [True]
if not self.is_flag and not self.count:
rv += ' ' + self.make_metavar()
return rv
rv = [_write_opts(self.opts)]
if self.secondary_opts:
rv.append(_write_opts(self.secondary_opts))
help = self.help or ''
extra = []
if self.default is not None and self.show_default:
extra.append('default: %s' % (
', '.join('%s' % d for d in self.default)
if isinstance(self.default, (list, tuple))
else self.default, ))
if self.required:
extra.append('required')
if extra:
help = '%s[%s]' % (help and help + ' ' or '', '; '.join(extra))
return ((any_prefix_is_slash and '; ' or ' / ').join(rv), help)
def get_default(self, ctx):
# If we're a non boolean flag out default is more complex because
# we need to look at all flags in the same group to figure out
# if we're the the default one in which case we return the flag
# value as default.
if self.is_flag and not self.is_bool_flag:
for param in ctx.command.params:
if param.name == self.name and param.default:
return param.flag_value
return None
return Parameter.get_default(self, ctx)
def prompt_for_value(self, ctx):
"""This is an alternative flow that can be activated in the full
value processing if a value does not exist. It will prompt the
user until a valid value exists and then returns the processed
value as result.
"""
# Calculate the default before prompting anything to be stable.
default = self.get_default(ctx)
# If this is a prompt for a flag we need to handle this
# differently.
if self.is_bool_flag:
return confirm(self.prompt, default)
return prompt(self.prompt, default=default,
hide_input=self.hide_input,
confirmation_prompt=self.confirmation_prompt,
value_proc=lambda x: self.process_value(ctx, x))
def resolve_envvar_value(self, ctx):
rv = Parameter.resolve_envvar_value(self, ctx)
if rv is not None:
return rv
if self.allow_from_autoenv and \
ctx.auto_envvar_prefix is not None:
envvar = '%s_%s' % (ctx.auto_envvar_prefix, self.name.upper())
return os.environ.get(envvar)
def value_from_envvar(self, ctx):
rv = self.resolve_envvar_value(ctx)
if rv is None:
return None
value_depth = (self.nargs != 1) + bool(self.multiple)
if value_depth > 0 and rv is not None:
rv = self.type.split_envvar_value(rv)
if self.multiple and self.nargs != 1:
rv = batch(rv, self.nargs)
return rv
def full_process_value(self, ctx, value):
if value is None and self.prompt is not None \
and not ctx.resilient_parsing:
return self.prompt_for_value(ctx)
return Parameter.full_process_value(self, ctx, value)
class Argument(Parameter):
"""Arguments are positional parameters to a command. They generally
provide fewer features than options but can have infinite ``nargs``
and are required by default.
All parameters are passed onwards to the parameter constructor.
"""
param_type_name = 'argument'
def __init__(self, param_decls, required=None, **attrs):
if required is None:
if attrs.get('default') is not None:
required = False
else:
required = attrs.get('nargs', 1) > 0
Parameter.__init__(self, param_decls, required=required, **attrs)
if self.default is not None and self.nargs < 0:
raise TypeError('nargs=-1 in combination with a default value '
'is not supported.')
@property
def human_readable_name(self):
if self.metavar is not None:
return self.metavar
return self.name.upper()
def make_metavar(self):
if self.metavar is not None:
return self.metavar
var = self.name.upper()
if not self.required:
var = '[%s]' % var
if self.nargs != 1:
var += '...'
return var
def _parse_decls(self, decls, expose_value):
if not decls:
if not expose_value:
return None, [], []
raise TypeError('Could not determine name for argument')
if len(decls) == 1:
name = arg = decls[0]
name = name.replace('-', '_').lower()
elif len(decls) == 2:
name, arg = decls
else:
raise TypeError('Arguments take exactly one or two '
'parameter declarations, got %d' % len(decls))
return name, [arg], []
def get_usage_pieces(self, ctx):
return [self.make_metavar()]
def add_to_parser(self, parser, ctx):
parser.add_argument(dest=self.name, nargs=self.nargs,
obj=self)
# Circular dependency between decorators and core
from .decorators import command, group
|
paperreduction/fabric-bolt | refs/heads/master | fabric_bolt/core/mixins/models.py | 32 | """Some generic model mixins"""
from django.db import models
class TrackingFields(models.Model):
"""Generic model for some generic fields related to when a record was created, updated and deleted
In some cases the date_delete field is not useful since the actual record won't be there. However,
we're using it for archiving an object and have an active_records model manager."""
date_created = models.DateTimeField(auto_now_add=True)
date_update = models.DateTimeField(auto_now=True)
date_deleted = models.DateTimeField(blank=True, null=True)
class Meta:
abstract = True |
shakamunyi/neutron-dvr | refs/heads/master | neutron/plugins/vmware/common/securitygroups.py | 36 | # Copyright 2013 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.openstack.common import log
from neutron.plugins.vmware.common import nsx_utils
LOG = log.getLogger(__name__)
# Protocol number look up for supported protocols
protocol_num_look_up = {'tcp': 6, 'icmp': 1, 'udp': 17}
def _convert_to_nsx_rule(session, cluster, rule, with_id=False):
"""Converts a Neutron security group rule to the NSX format.
This routine also replaces Neutron IDs with NSX UUIDs.
"""
nsx_rule = {}
params = ['remote_ip_prefix', 'protocol',
'remote_group_id', 'port_range_min',
'port_range_max', 'ethertype']
if with_id:
params.append('id')
for param in params:
value = rule.get(param)
if param not in rule:
nsx_rule[param] = value
elif not value:
pass
elif param == 'remote_ip_prefix':
nsx_rule['ip_prefix'] = rule['remote_ip_prefix']
elif param == 'remote_group_id':
nsx_rule['profile_uuid'] = nsx_utils.get_nsx_security_group_id(
session, cluster, rule['remote_group_id'])
elif param == 'protocol':
try:
nsx_rule['protocol'] = int(rule['protocol'])
except (ValueError, TypeError):
nsx_rule['protocol'] = (
protocol_num_look_up[rule['protocol']])
else:
nsx_rule[param] = value
return nsx_rule
def _convert_to_nsx_rules(session, cluster, rules, with_id=False):
"""Converts a list of Neutron security group rules to the NSX format."""
nsx_rules = {'logical_port_ingress_rules': [],
'logical_port_egress_rules': []}
for direction in ['logical_port_ingress_rules',
'logical_port_egress_rules']:
for rule in rules[direction]:
nsx_rules[direction].append(
_convert_to_nsx_rule(session, cluster, rule, with_id))
return nsx_rules
def get_security_group_rules_nsx_format(session, cluster,
security_group_rules, with_id=False):
"""Convert neutron security group rules into NSX format.
This routine splits Neutron security group rules into two lists, one
for ingress rules and the other for egress rules.
"""
def fields(rule):
_fields = ['remote_ip_prefix', 'remote_group_id', 'protocol',
'port_range_min', 'port_range_max', 'protocol', 'ethertype']
if with_id:
_fields.append('id')
return dict((k, v) for k, v in rule.iteritems() if k in _fields)
ingress_rules = []
egress_rules = []
for rule in security_group_rules:
if rule.get('souce_group_id'):
rule['remote_group_id'] = nsx_utils.get_nsx_security_group_id(
session, cluster, rule['remote_group_id'])
if rule['direction'] == 'ingress':
ingress_rules.append(fields(rule))
elif rule['direction'] == 'egress':
egress_rules.append(fields(rule))
rules = {'logical_port_ingress_rules': egress_rules,
'logical_port_egress_rules': ingress_rules}
return _convert_to_nsx_rules(session, cluster, rules, with_id)
def merge_security_group_rules_with_current(session, cluster,
new_rules, current_rules):
merged_rules = get_security_group_rules_nsx_format(
session, cluster, current_rules)
for new_rule in new_rules:
rule = new_rule['security_group_rule']
if rule['direction'] == 'ingress':
merged_rules['logical_port_egress_rules'].append(
_convert_to_nsx_rule(session, cluster, rule))
elif rule['direction'] == 'egress':
merged_rules['logical_port_ingress_rules'].append(
_convert_to_nsx_rule(session, cluster, rule))
return merged_rules
def remove_security_group_with_id_and_id_field(rules, rule_id):
"""Remove rule by rule_id.
This function receives all of the current rule associated with a
security group and then removes the rule that matches the rule_id. In
addition it removes the id field in the dict with each rule since that
should not be passed to nsx.
"""
for rule_direction in rules.values():
item_to_remove = None
for port_rule in rule_direction:
if port_rule['id'] == rule_id:
item_to_remove = port_rule
else:
# remove key from dictionary for NSX
del port_rule['id']
if item_to_remove:
rule_direction.remove(item_to_remove)
|
divio/django-cms | refs/heads/develop | cms/admin/placeholderadmin.py | 1 | import uuid
import warnings
from urllib.parse import parse_qsl, urlparse
from django.urls import re_path
from django.contrib.admin.helpers import AdminForm
from django.contrib.admin.utils import get_deleted_objects
from django.core.exceptions import PermissionDenied
from django.db import router, transaction
from django.http import (
HttpResponse,
HttpResponseBadRequest,
HttpResponseForbidden,
HttpResponseNotFound,
HttpResponseRedirect,
)
from django.shortcuts import get_list_or_404, get_object_or_404, render
from django.template.response import TemplateResponse
from django.utils.decorators import method_decorator
from django.utils.encoding import force_text
from django.utils.html import conditional_escape
from django.utils import translation
from django.utils.translation import gettext as _
from django.views.decorators.clickjacking import xframe_options_sameorigin
from django.views.decorators.http import require_POST
from cms import operations
from cms.admin.forms import PluginAddValidationForm
from cms.constants import SLUG_REGEXP
from cms.exceptions import PluginLimitReached
from cms.models.placeholdermodel import Placeholder
from cms.models.placeholderpluginmodel import PlaceholderReference
from cms.models.pluginmodel import CMSPlugin
from cms.plugin_pool import plugin_pool
from cms.signals import pre_placeholder_operation, post_placeholder_operation
from cms.toolbar.utils import get_plugin_tree_as_json
from cms.utils import copy_plugins, get_current_site
from cms.utils.conf import get_cms_setting
from cms.utils.i18n import get_language_code, get_language_list
from cms.utils.plugins import has_reached_plugin_limit, reorder_plugins
from cms.utils.urlutils import admin_reverse
_no_default = object()
def get_int(int_str, default=_no_default):
"""
For convenience a get-like method for taking the int() of a string.
:param int_str: the string to convert to integer
:param default: an optional value to return if ValueError is raised.
:return: the int() of «int_str» or «default» on exception.
"""
if default == _no_default:
return int(int_str)
else:
try:
return int(int_str)
except ValueError:
return default
def _instance_overrides_method(base, instance, method_name):
"""
Returns True if instance overrides a method (method_name)
inherited from base.
"""
bound_method = getattr(instance.__class__, method_name)
unbound_method = getattr(base, method_name)
return unbound_method != bound_method
class FrontendEditableAdminMixin:
frontend_editable_fields = []
def get_urls(self):
"""
Register the url for the single field edit view
"""
info = "%s_%s" % (self.model._meta.app_label, self.model._meta.model_name)
def pat(regex, fn): return re_path(regex, self.admin_site.admin_view(fn), name='%s_%s' % (info, fn.__name__))
url_patterns = [
pat(r'edit-field/(%s)/([a-z\-]+)/$' % SLUG_REGEXP, self.edit_field),
]
return url_patterns + super().get_urls()
def _get_object_for_single_field(self, object_id, language):
# Quick and dirty way to retrieve objects for django-hvad
# Cleaner implementation will extend this method in a child mixin
try:
return self.model.objects.language(language).get(pk=object_id)
except AttributeError:
return self.model.objects.get(pk=object_id)
def edit_field(self, request, object_id, language):
obj = self._get_object_for_single_field(object_id, language)
opts = obj.__class__._meta
saved_successfully = False
cancel_clicked = request.POST.get("_cancel", False)
raw_fields = request.GET.get("edit_fields")
fields = [field for field in raw_fields.split(",") if field in self.frontend_editable_fields]
if not fields:
context = {
'opts': opts,
'message': force_text(_("Field %s not found")) % raw_fields
}
return render(request, 'admin/cms/page/plugin/error_form.html', context)
if not request.user.has_perm("{0}.change_{1}".format(self.model._meta.app_label,
self.model._meta.model_name)):
context = {
'opts': opts,
'message': force_text(_("You do not have permission to edit this item"))
}
return render(request, 'admin/cms/page/plugin/error_form.html', context)
# Dynamically creates the form class with only `field_name` field
# enabled
form_class = self.get_form(request, obj, fields=fields)
if not cancel_clicked and request.method == 'POST':
form = form_class(instance=obj, data=request.POST)
if form.is_valid():
form.save()
saved_successfully = True
else:
form = form_class(instance=obj)
admin_form = AdminForm(form, fieldsets=[(None, {'fields': fields})], prepopulated_fields={},
model_admin=self)
media = self.media + admin_form.media
context = {
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
'title': opts.verbose_name,
'plugin': None,
'plugin_id': None,
'adminform': admin_form,
'add': False,
'is_popup': True,
'media': media,
'opts': opts,
'change': True,
'save_as': False,
'has_add_permission': False,
'window_close_timeout': 10,
}
if cancel_clicked:
# cancel button was clicked
context.update({
'cancel': True,
})
return render(request, 'admin/cms/page/plugin/confirm_form.html', context)
if not cancel_clicked and request.method == 'POST' and saved_successfully:
return render(request, 'admin/cms/page/plugin/confirm_form.html', context)
return render(request, 'admin/cms/page/plugin/change_form.html', context)
class PlaceholderAdminMixin:
def _get_attached_admin(self, placeholder):
return placeholder._get_attached_admin(admin_site=self.admin_site)
def _get_operation_language(self, request):
# Unfortunately the ?language GET query
# has a special meaning on the CMS.
# It allows users to see another language while maintaining
# the same url. This complicates language detection.
site = get_current_site()
parsed_url = urlparse(request.GET['cms_path'])
queries = dict(parse_qsl(parsed_url.query))
language = queries.get('language')
if not language:
language = translation.get_language_from_path(parsed_url.path)
return get_language_code(language, site_id=site.pk)
def _get_operation_origin(self, request):
return urlparse(request.GET['cms_path']).path
def _send_pre_placeholder_operation(self, request, operation, **kwargs):
token = str(uuid.uuid4())
if not request.GET.get('cms_path'):
warnings.warn('All custom placeholder admin endpoints require '
'a "cms_path" GET query which points to the path '
'where the request originates from.'
'This backwards compatible shim will be removed on 3.5 '
'and an HttpBadRequest response will be returned instead.',
UserWarning)
return token
pre_placeholder_operation.send(
sender=self.__class__,
operation=operation,
request=request,
language=self._get_operation_language(request),
token=token,
origin=self._get_operation_origin(request),
**kwargs
)
return token
def _send_post_placeholder_operation(self, request, operation, token, **kwargs):
if not request.GET.get('cms_path'):
# No need to re-raise the warning
return
post_placeholder_operation.send(
sender=self.__class__,
operation=operation,
request=request,
language=self._get_operation_language(request),
token=token,
origin=self._get_operation_origin(request),
**kwargs
)
def _get_plugin_from_id(self, plugin_id):
queryset = CMSPlugin.objects.values_list('plugin_type', flat=True)
plugin_type = get_list_or_404(queryset, pk=plugin_id)[0]
# CMSPluginBase subclass
plugin_class = plugin_pool.get_plugin(plugin_type)
real_queryset = plugin_class.get_render_queryset().select_related('parent', 'placeholder')
return get_object_or_404(real_queryset, pk=plugin_id)
def get_urls(self):
"""
Register the plugin specific urls (add/edit/copy/remove/move)
"""
info = "%s_%s" % (self.model._meta.app_label, self.model._meta.model_name)
def pat(regex, fn): return re_path(regex, self.admin_site.admin_view(fn), name='%s_%s' % (info, fn.__name__))
url_patterns = [
pat(r'copy-plugins/$', self.copy_plugins),
pat(r'add-plugin/$', self.add_plugin),
pat(r'edit-plugin/(%s)/$' % SLUG_REGEXP, self.edit_plugin),
pat(r'delete-plugin/(%s)/$' % SLUG_REGEXP, self.delete_plugin),
pat(r'clear-placeholder/(%s)/$' % SLUG_REGEXP, self.clear_placeholder),
pat(r'move-plugin/$', self.move_plugin),
]
return url_patterns + super().get_urls()
def has_add_plugin_permission(self, request, placeholder, plugin_type):
return placeholder.has_add_plugin_permission(request.user, plugin_type)
def has_change_plugin_permission(self, request, plugin):
placeholder = plugin.placeholder
return placeholder.has_change_plugin_permission(request.user, plugin)
def has_delete_plugin_permission(self, request, plugin):
placeholder = plugin.placeholder
return placeholder.has_delete_plugin_permission(request.user, plugin)
def has_copy_plugins_permission(self, request, plugins):
# Plugins can only be copied to the clipboard
placeholder = request.toolbar.clipboard
return placeholder.has_add_plugins_permission(request.user, plugins)
def has_copy_from_clipboard_permission(self, request, placeholder, plugins):
return placeholder.has_add_plugins_permission(request.user, plugins)
def has_copy_from_placeholder_permission(self, request, source_placeholder, target_placeholder, plugins):
if not source_placeholder.has_add_plugins_permission(request.user, plugins):
return False
return target_placeholder.has_add_plugins_permission(request.user, plugins)
def has_move_plugin_permission(self, request, plugin, target_placeholder):
placeholder = plugin.placeholder
return placeholder.has_move_plugin_permission(request.user, plugin, target_placeholder)
def has_clear_placeholder_permission(self, request, placeholder, language=None):
if language:
languages = [language]
else:
# fetch all languages this placeholder contains
# based on it's plugins
languages = (
placeholder
.cmsplugin_set
.values_list('language', flat=True)
.distinct()
.order_by()
)
return placeholder.has_clear_permission(request.user, languages)
def get_placeholder_template(self, request, placeholder):
pass
@xframe_options_sameorigin
def add_plugin(self, request):
"""
Shows the add plugin form and saves it on POST.
Requires the following GET parameters:
- cms_path
- placeholder_id
- plugin_type
- plugin_language
- plugin_parent (optional)
- plugin_position (optional)
"""
form = PluginAddValidationForm(request.GET)
if not form.is_valid():
# list() is necessary for python 3 compatibility.
# errors is s dict mapping fields to a list of errors
# for that field.
error = list(form.errors.values())[0][0]
return HttpResponseBadRequest(conditional_escape(force_text(error)))
plugin_data = form.cleaned_data
placeholder = plugin_data['placeholder_id']
plugin_type = plugin_data['plugin_type']
if not self.has_add_plugin_permission(request, placeholder, plugin_type):
message = force_text(_('You do not have permission to add a plugin'))
return HttpResponseForbidden(message)
parent = plugin_data.get('plugin_parent')
if parent:
position = parent.cmsplugin_set.count()
else:
position = CMSPlugin.objects.filter(
parent__isnull=True,
language=plugin_data['plugin_language'],
placeholder=placeholder,
).count()
plugin_data['position'] = position
plugin_class = plugin_pool.get_plugin(plugin_type)
plugin_instance = plugin_class(plugin_class.model, self.admin_site)
# Setting attributes on the form class is perfectly fine.
# The form class is created by modelform factory every time
# this get_form() method is called.
plugin_instance._cms_initial_attributes = {
'language': plugin_data['plugin_language'],
'placeholder': plugin_data['placeholder_id'],
'parent': plugin_data.get('plugin_parent', None),
'plugin_type': plugin_data['plugin_type'],
'position': plugin_data['position'],
}
response = plugin_instance.add_view(request)
plugin = getattr(plugin_instance, 'saved_object', None)
if plugin:
plugin.placeholder.mark_as_dirty(plugin.language, clear_cache=False)
if plugin_instance._operation_token:
tree_order = placeholder.get_plugin_tree_order(plugin.parent_id)
self._send_post_placeholder_operation(
request,
operation=operations.ADD_PLUGIN,
token=plugin_instance._operation_token,
plugin=plugin,
placeholder=plugin.placeholder,
tree_order=tree_order,
)
return response
@method_decorator(require_POST)
@xframe_options_sameorigin
@transaction.atomic
def copy_plugins(self, request):
"""
POST request should have the following data:
- cms_path
- source_language
- source_placeholder_id
- source_plugin_id (optional)
- target_language
- target_placeholder_id
- target_plugin_id (deprecated/unused)
"""
source_placeholder_id = request.POST['source_placeholder_id']
target_language = request.POST['target_language']
target_placeholder_id = request.POST['target_placeholder_id']
source_placeholder = get_object_or_404(Placeholder, pk=source_placeholder_id)
target_placeholder = get_object_or_404(Placeholder, pk=target_placeholder_id)
if not target_language or not target_language in get_language_list():
return HttpResponseBadRequest(force_text(_("Language must be set to a supported language!")))
copy_to_clipboard = target_placeholder.pk == request.toolbar.clipboard.pk
source_plugin_id = request.POST.get('source_plugin_id', None)
if copy_to_clipboard and source_plugin_id:
new_plugin = self._copy_plugin_to_clipboard(
request,
source_placeholder,
target_placeholder,
)
new_plugins = [new_plugin]
elif copy_to_clipboard:
new_plugin = self._copy_placeholder_to_clipboard(
request,
source_placeholder,
target_placeholder,
)
new_plugins = [new_plugin]
else:
new_plugins = self._add_plugins_from_placeholder(
request,
source_placeholder,
target_placeholder,
)
data = get_plugin_tree_as_json(request, new_plugins)
return HttpResponse(data, content_type='application/json')
def _copy_plugin_to_clipboard(self, request, source_placeholder, target_placeholder):
source_language = request.POST['source_language']
source_plugin_id = request.POST.get('source_plugin_id')
target_language = request.POST['target_language']
source_plugin = get_object_or_404(
CMSPlugin,
pk=source_plugin_id,
language=source_language,
)
old_plugins = (
CMSPlugin
.get_tree(parent=source_plugin)
.filter(placeholder=source_placeholder)
.order_by('path')
)
if not self.has_copy_plugins_permission(request, old_plugins):
message = _('You do not have permission to copy these plugins.')
raise PermissionDenied(force_text(message))
# Empty the clipboard
target_placeholder.clear()
plugin_pairs = copy_plugins.copy_plugins_to(
old_plugins,
to_placeholder=target_placeholder,
to_language=target_language,
)
return plugin_pairs[0][0]
def _copy_placeholder_to_clipboard(self, request, source_placeholder, target_placeholder):
source_language = request.POST['source_language']
target_language = request.POST['target_language']
# User is copying the whole placeholder to the clipboard.
old_plugins = source_placeholder.get_plugins_list(language=source_language)
if not self.has_copy_plugins_permission(request, old_plugins):
message = _('You do not have permission to copy this placeholder.')
raise PermissionDenied(force_text(message))
# Empty the clipboard
target_placeholder.clear()
# Create a PlaceholderReference plugin which in turn
# creates a blank placeholder called "clipboard"
# the real clipboard has the reference placeholder inside but the plugins
# are inside of the newly created blank clipboard.
# This allows us to wrap all plugins in the clipboard under one plugin
reference = PlaceholderReference.objects.create(
name=source_placeholder.get_label(),
plugin_type='PlaceholderPlugin',
language=target_language,
placeholder=target_placeholder,
)
copy_plugins.copy_plugins_to(
old_plugins,
to_placeholder=reference.placeholder_ref,
to_language=target_language,
)
return reference
def _add_plugins_from_placeholder(self, request, source_placeholder, target_placeholder):
# Plugins are being copied from a placeholder in another language
# using the "Copy from language" placeholder operation.
source_language = request.POST['source_language']
target_language = request.POST['target_language']
old_plugins = source_placeholder.get_plugins_list(language=source_language)
# Check if the user can copy plugins from source placeholder to
# target placeholder.
has_permissions = self.has_copy_from_placeholder_permission(
request,
source_placeholder,
target_placeholder,
old_plugins,
)
if not has_permissions:
message = _('You do not have permission to copy these plugins.')
raise PermissionDenied(force_text(message))
target_tree_order = target_placeholder.get_plugin_tree_order(
language=target_language,
parent_id=None,
)
operation_token = self._send_pre_placeholder_operation(
request,
operation=operations.ADD_PLUGINS_FROM_PLACEHOLDER,
plugins=old_plugins,
source_language=source_language,
source_placeholder=source_placeholder,
target_language=target_language,
target_placeholder=target_placeholder,
target_order=target_tree_order,
)
copied_plugins = copy_plugins.copy_plugins_to(
old_plugins,
to_placeholder=target_placeholder,
to_language=target_language,
)
new_plugin_ids = (new.pk for new, old in copied_plugins)
# Creates a list of PKs for the top-level plugins ordered by
# their position.
top_plugins = (pair for pair in copied_plugins if not pair[0].parent_id)
top_plugins_pks = [p[0].pk for p in sorted(top_plugins, key=lambda pair: pair[1].position)]
# All new plugins are added to the bottom
target_tree_order = target_tree_order + top_plugins_pks
reorder_plugins(
target_placeholder,
parent_id=None,
language=target_language,
order=target_tree_order,
)
target_placeholder.mark_as_dirty(target_language, clear_cache=False)
new_plugins = CMSPlugin.objects.filter(pk__in=new_plugin_ids).order_by('path')
new_plugins = list(new_plugins)
self._send_post_placeholder_operation(
request,
operation=operations.ADD_PLUGINS_FROM_PLACEHOLDER,
token=operation_token,
plugins=new_plugins,
source_language=source_language,
source_placeholder=source_placeholder,
target_language=target_language,
target_placeholder=target_placeholder,
target_order=target_tree_order,
)
return new_plugins
@xframe_options_sameorigin
def edit_plugin(self, request, plugin_id):
try:
plugin_id = int(plugin_id)
except ValueError:
return HttpResponseNotFound(force_text(_("Plugin not found")))
obj = self._get_plugin_from_id(plugin_id)
# CMSPluginBase subclass instance
plugin_instance = obj.get_plugin_class_instance(admin=self.admin_site)
if not self.has_change_plugin_permission(request, obj):
return HttpResponseForbidden(force_text(_("You do not have permission to edit this plugin")))
response = plugin_instance.change_view(request, str(plugin_id))
plugin = getattr(plugin_instance, 'saved_object', None)
if plugin:
plugin.placeholder.mark_as_dirty(plugin.language, clear_cache=False)
if plugin_instance._operation_token:
self._send_post_placeholder_operation(
request,
operation=operations.CHANGE_PLUGIN,
token=plugin_instance._operation_token,
old_plugin=obj,
new_plugin=plugin,
placeholder=plugin.placeholder,
)
return response
@method_decorator(require_POST)
@xframe_options_sameorigin
@transaction.atomic
def move_plugin(self, request):
"""
Performs a move or a "paste" operation (when «move_a_copy» is set)
POST request with following parameters:
- plugin_id
- placeholder_id
- plugin_language (optional)
- plugin_parent (optional)
- plugin_order (array, optional)
- move_a_copy (Boolean, optional) (anything supplied here except a case-
insensitive "false" is True)
NOTE: If move_a_copy is set, the plugin_order should contain an item
'__COPY__' with the desired destination of the copied plugin.
"""
# plugin_id and placeholder_id are required, so, if nothing is supplied,
# an ValueError exception will be raised by get_int().
try:
plugin_id = get_int(request.POST.get('plugin_id'))
except TypeError:
raise RuntimeError("'plugin_id' is a required parameter.")
plugin = self._get_plugin_from_id(plugin_id)
try:
placeholder_id = get_int(request.POST.get('placeholder_id'))
except TypeError:
raise RuntimeError("'placeholder_id' is a required parameter.")
except ValueError:
raise RuntimeError("'placeholder_id' must be an integer string.")
placeholder = Placeholder.objects.get(pk=placeholder_id)
# The rest are optional
parent_id = get_int(request.POST.get('plugin_parent', ""), None)
target_language = request.POST['target_language']
move_a_copy = request.POST.get('move_a_copy')
move_a_copy = (move_a_copy and move_a_copy != "0" and
move_a_copy.lower() != "false")
move_to_clipboard = placeholder == request.toolbar.clipboard
source_placeholder = plugin.placeholder
order = request.POST.getlist("plugin_order[]")
if placeholder != source_placeholder:
try:
template = self.get_placeholder_template(request, placeholder)
has_reached_plugin_limit(placeholder, plugin.plugin_type,
target_language, template=template)
except PluginLimitReached as er:
return HttpResponseBadRequest(er)
# order should be a list of plugin primary keys
# it's important that the plugins being referenced
# are all part of the same tree.
exclude_from_order_check = ['__COPY__', str(plugin.pk)]
ordered_plugin_ids = [int(pk) for pk in order if pk not in exclude_from_order_check]
plugins_in_tree_count = (
placeholder
.get_plugins(target_language)
.filter(parent=parent_id, pk__in=ordered_plugin_ids)
.count()
)
if len(ordered_plugin_ids) != plugins_in_tree_count:
# order does not match the tree on the db
message = _('order parameter references plugins in different trees')
return HttpResponseBadRequest(force_text(message))
# True if the plugin is not being moved from the clipboard
# to a placeholder or from a placeholder to the clipboard.
move_a_plugin = not move_a_copy and not move_to_clipboard
if parent_id and plugin.parent_id != parent_id:
target_parent = get_object_or_404(CMSPlugin, pk=parent_id)
if move_a_plugin and target_parent.placeholder_id != placeholder.pk:
return HttpResponseBadRequest(force_text(
_('parent must be in the same placeholder')))
if move_a_plugin and target_parent.language != target_language:
return HttpResponseBadRequest(force_text(
_('parent must be in the same language as '
'plugin_language')))
elif parent_id:
target_parent = plugin.parent
else:
target_parent = None
new_plugin = None
fetch_tree = False
if move_a_copy and plugin.plugin_type == "PlaceholderPlugin":
new_plugins = self._paste_placeholder(
request,
plugin=plugin,
target_language=target_language,
target_placeholder=placeholder,
tree_order=order,
)
elif move_a_copy:
fetch_tree = True
new_plugin = self._paste_plugin(
request,
plugin=plugin,
target_parent=target_parent,
target_language=target_language,
target_placeholder=placeholder,
tree_order=order,
)
elif move_to_clipboard:
new_plugin = self._cut_plugin(
request,
plugin=plugin,
target_language=target_language,
target_placeholder=placeholder,
)
new_plugins = [new_plugin]
else:
fetch_tree = True
new_plugin = self._move_plugin(
request,
plugin=plugin,
target_parent=target_parent,
target_language=target_language,
target_placeholder=placeholder,
tree_order=order,
)
if new_plugin and fetch_tree:
root = (new_plugin.parent or new_plugin)
new_plugins = [root] + list(root.get_descendants().order_by('path'))
# Mark the target placeholder as dirty
placeholder.mark_as_dirty(target_language)
if placeholder != source_placeholder:
# Plugin is being moved or copied into a separate placeholder
# Mark source placeholder as dirty
source_placeholder.mark_as_dirty(plugin.language)
data = get_plugin_tree_as_json(request, new_plugins)
return HttpResponse(data, content_type='application/json')
def _paste_plugin(self, request, plugin, target_language,
target_placeholder, tree_order, target_parent=None):
plugins = (
CMSPlugin
.get_tree(parent=plugin)
.filter(placeholder=plugin.placeholder_id)
.order_by('path')
)
plugins = list(plugins)
if not self.has_copy_from_clipboard_permission(request, target_placeholder, plugins):
message = force_text(_("You have no permission to paste this plugin"))
raise PermissionDenied(message)
if target_parent:
target_parent_id = target_parent.pk
else:
target_parent_id = None
target_tree_order = [int(pk) for pk in tree_order if not pk == '__COPY__']
action_token = self._send_pre_placeholder_operation(
request,
operation=operations.PASTE_PLUGIN,
plugin=plugin,
target_language=target_language,
target_placeholder=target_placeholder,
target_parent_id=target_parent_id,
target_order=target_tree_order,
)
plugin_pairs = copy_plugins.copy_plugins_to(
plugins,
to_placeholder=target_placeholder,
to_language=target_language,
parent_plugin_id=target_parent_id,
)
root_plugin = plugin_pairs[0][0]
# If an ordering was supplied, replace the item that has
# been copied with the new copy
target_tree_order.insert(tree_order.index('__COPY__'), root_plugin.pk)
reorder_plugins(
target_placeholder,
parent_id=target_parent_id,
language=target_language,
order=target_tree_order,
)
target_placeholder.mark_as_dirty(target_language, clear_cache=False)
# Fetch from db to update position and other tree values
root_plugin.refresh_from_db()
self._send_post_placeholder_operation(
request,
operation=operations.PASTE_PLUGIN,
plugin=root_plugin.get_bound_plugin(),
token=action_token,
target_language=target_language,
target_placeholder=target_placeholder,
target_parent_id=target_parent_id,
target_order=target_tree_order,
)
return root_plugin
def _paste_placeholder(self, request, plugin, target_language,
target_placeholder, tree_order):
plugins = plugin.placeholder_ref.get_plugins_list()
if not self.has_copy_from_clipboard_permission(request, target_placeholder, plugins):
message = force_text(_("You have no permission to paste this placeholder"))
raise PermissionDenied(message)
target_tree_order = [int(pk) for pk in tree_order if not pk == '__COPY__']
action_token = self._send_pre_placeholder_operation(
request,
operation=operations.PASTE_PLACEHOLDER,
plugins=plugins,
target_language=target_language,
target_placeholder=target_placeholder,
target_order=target_tree_order,
)
new_plugins = copy_plugins.copy_plugins_to(
plugins,
to_placeholder=target_placeholder,
to_language=target_language,
)
new_plugin_ids = (new.pk for new, old in new_plugins)
# Creates a list of PKs for the top-level plugins ordered by
# their position.
top_plugins = (pair for pair in new_plugins if not pair[0].parent_id)
top_plugins_pks = [p[0].pk for p in sorted(top_plugins, key=lambda pair: pair[1].position)]
# If an ordering was supplied, we should replace the item that has
# been copied with the new plugins
target_tree_order[tree_order.index('__COPY__'):0] = top_plugins_pks
reorder_plugins(
target_placeholder,
parent_id=None,
language=target_language,
order=target_tree_order,
)
target_placeholder.mark_as_dirty(target_language, clear_cache=False)
new_plugins = (
CMSPlugin
.objects
.filter(pk__in=new_plugin_ids)
.order_by('path')
.select_related('placeholder')
)
new_plugins = list(new_plugins)
self._send_post_placeholder_operation(
request,
operation=operations.PASTE_PLACEHOLDER,
token=action_token,
plugins=new_plugins,
target_language=target_language,
target_placeholder=target_placeholder,
target_order=target_tree_order,
)
return new_plugins
def _move_plugin(self, request, plugin, target_language,
target_placeholder, tree_order, target_parent=None):
if not self.has_move_plugin_permission(request, plugin, target_placeholder):
message = force_text(_("You have no permission to move this plugin"))
raise PermissionDenied(message)
plugin_data = {
'language': target_language,
'placeholder': target_placeholder,
}
source_language = plugin.language
source_placeholder = plugin.placeholder
source_tree_order = source_placeholder.get_plugin_tree_order(
language=source_language,
parent_id=plugin.parent_id,
)
if target_parent:
target_parent_id = target_parent.pk
else:
target_parent_id = None
if target_placeholder != source_placeholder:
target_tree_order = target_placeholder.get_plugin_tree_order(
language=target_language,
parent_id=target_parent_id,
)
else:
target_tree_order = source_tree_order
action_token = self._send_pre_placeholder_operation(
request,
operation=operations.MOVE_PLUGIN,
plugin=plugin,
source_language=source_language,
source_placeholder=source_placeholder,
source_parent_id=plugin.parent_id,
source_order=source_tree_order,
target_language=target_language,
target_placeholder=target_placeholder,
target_parent_id=target_parent_id,
target_order=target_tree_order,
)
if target_parent and plugin.parent != target_parent:
# Plugin is being moved to another tree (under another parent)
updated_plugin = plugin.update(refresh=True, parent=target_parent, **plugin_data)
updated_plugin = updated_plugin.move(target_parent, pos='last-child')
elif target_parent:
# Plugin is being moved within the same tree (different position, same parent)
updated_plugin = plugin.update(refresh=True, **plugin_data)
else:
# Plugin is being moved to the root (no parent)
target = CMSPlugin.get_last_root_node()
updated_plugin = plugin.update(refresh=True, parent=None, **plugin_data)
updated_plugin = updated_plugin.move(target, pos='right')
# Update all children to match the parent's
# language and placeholder
updated_plugin.get_descendants().update(**plugin_data)
# Avoid query by removing the plugin being moved
# from the source order
new_source_order = list(source_tree_order)
new_source_order.remove(updated_plugin.pk)
# Reorder all plugins in the target placeholder according to the
# passed order
new_target_order = [int(pk) for pk in tree_order]
reorder_plugins(
target_placeholder,
parent_id=target_parent_id,
language=target_language,
order=new_target_order,
)
target_placeholder.mark_as_dirty(target_language, clear_cache=False)
if source_placeholder != target_placeholder:
source_placeholder.mark_as_dirty(source_language, clear_cache=False)
# Refresh plugin to get new tree and position values
updated_plugin.refresh_from_db()
self._send_post_placeholder_operation(
request,
operation=operations.MOVE_PLUGIN,
plugin=updated_plugin.get_bound_plugin(),
token=action_token,
source_language=source_language,
source_placeholder=source_placeholder,
source_parent_id=plugin.parent_id,
source_order=new_source_order,
target_language=target_language,
target_placeholder=target_placeholder,
target_parent_id=target_parent_id,
target_order=new_target_order,
)
return updated_plugin
def _cut_plugin(self, request, plugin, target_language, target_placeholder):
if not self.has_move_plugin_permission(request, plugin, target_placeholder):
message = force_text(_("You have no permission to cut this plugin"))
raise PermissionDenied(message)
plugin_data = {
'language': target_language,
'placeholder': target_placeholder,
}
source_language = plugin.language
source_placeholder = plugin.placeholder
source_tree_order = source_placeholder.get_plugin_tree_order(
language=source_language,
parent_id=plugin.parent_id,
)
action_token = self._send_pre_placeholder_operation(
request,
operation=operations.CUT_PLUGIN,
plugin=plugin,
clipboard=target_placeholder,
clipboard_language=target_language,
source_language=source_language,
source_placeholder=source_placeholder,
source_parent_id=plugin.parent_id,
source_order=source_tree_order,
)
# Empty the clipboard
target_placeholder.clear()
target = CMSPlugin.get_last_root_node()
updated_plugin = plugin.update(refresh=True, parent=None, **plugin_data)
updated_plugin = updated_plugin.move(target, pos='right')
# Update all children to match the parent's
# language and placeholder (clipboard)
updated_plugin.get_descendants().update(**plugin_data)
# Avoid query by removing the plugin being moved
# from the source order
new_source_order = list(source_tree_order)
new_source_order.remove(updated_plugin.pk)
source_placeholder.mark_as_dirty(target_language, clear_cache=False)
self._send_post_placeholder_operation(
request,
operation=operations.CUT_PLUGIN,
token=action_token,
plugin=updated_plugin.get_bound_plugin(),
clipboard=target_placeholder,
clipboard_language=target_language,
source_language=source_language,
source_placeholder=source_placeholder,
source_parent_id=plugin.parent_id,
source_order=new_source_order,
)
return updated_plugin
@xframe_options_sameorigin
def delete_plugin(self, request, plugin_id):
plugin = self._get_plugin_from_id(plugin_id)
if not self.has_delete_plugin_permission(request, plugin):
return HttpResponseForbidden(force_text(
_("You do not have permission to delete this plugin")))
opts = plugin._meta
router.db_for_write(opts.model)
get_deleted_objects_additional_kwargs = {'request': request}
deleted_objects, __, perms_needed, protected = get_deleted_objects(
[plugin], admin_site=self.admin_site,
**get_deleted_objects_additional_kwargs
)
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
raise PermissionDenied(_("You do not have permission to delete this plugin"))
obj_display = force_text(plugin)
placeholder = plugin.placeholder
plugin_tree_order = placeholder.get_plugin_tree_order(
language=plugin.language,
parent_id=plugin.parent_id,
)
operation_token = self._send_pre_placeholder_operation(
request,
operation=operations.DELETE_PLUGIN,
plugin=plugin,
placeholder=placeholder,
tree_order=plugin_tree_order,
)
plugin.delete()
placeholder.mark_as_dirty(plugin.language, clear_cache=False)
reorder_plugins(
placeholder=placeholder,
parent_id=plugin.parent_id,
language=plugin.language,
)
self.log_deletion(request, plugin, obj_display)
self.message_user(request, _('The %(name)s plugin "%(obj)s" was deleted successfully.') % {
'name': force_text(opts.verbose_name), 'obj': force_text(obj_display)})
# Avoid query by removing the plugin being deleted
# from the tree order list
new_plugin_tree_order = list(plugin_tree_order)
new_plugin_tree_order.remove(plugin.pk)
self._send_post_placeholder_operation(
request,
operation=operations.DELETE_PLUGIN,
token=operation_token,
plugin=plugin,
placeholder=placeholder,
tree_order=new_plugin_tree_order,
)
return HttpResponseRedirect(admin_reverse('index', current_app=self.admin_site.name))
plugin_name = force_text(plugin.get_plugin_class().name)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": plugin_name}
else:
title = _("Are you sure?")
context = {
"title": title,
"object_name": plugin_name,
"object": plugin,
"deleted_objects": deleted_objects,
"perms_lacking": perms_needed,
"protected": protected,
"opts": opts,
"app_label": opts.app_label,
}
request.current_app = self.admin_site.name
return TemplateResponse(
request, "admin/cms/page/plugin/delete_confirmation.html", context
)
@xframe_options_sameorigin
def clear_placeholder(self, request, placeholder_id):
placeholder = get_object_or_404(Placeholder, pk=placeholder_id)
language = request.GET.get('language')
if placeholder.pk == request.toolbar.clipboard.pk:
# User is clearing the clipboard, no need for permission
# checks here as the clipboard is unique per user.
# There could be a case where a plugin has relationship to
# an object the user does not have permission to delete.
placeholder.clear(language)
return HttpResponseRedirect(admin_reverse('index', current_app=self.admin_site.name))
if not self.has_clear_placeholder_permission(request, placeholder, language):
return HttpResponseForbidden(force_text(_("You do not have permission to clear this placeholder")))
opts = Placeholder._meta
router.db_for_write(Placeholder)
plugins = placeholder.get_plugins_list(language)
get_deleted_objects_additional_kwargs = {'request': request}
deleted_objects, __, perms_needed, protected = get_deleted_objects(
plugins, admin_site=self.admin_site,
**get_deleted_objects_additional_kwargs
)
obj_display = force_text(placeholder)
if request.POST:
# The user has already confirmed the deletion.
if perms_needed:
return HttpResponseForbidden(force_text(_("You do not have permission to clear this placeholder")))
operation_token = self._send_pre_placeholder_operation(
request,
operation=operations.CLEAR_PLACEHOLDER,
plugins=plugins,
placeholder=placeholder,
)
placeholder.clear(language)
placeholder.mark_as_dirty(language, clear_cache=False)
self.log_deletion(request, placeholder, obj_display)
self.message_user(request, _('The placeholder "%(obj)s" was cleared successfully.') % {
'obj': obj_display})
self._send_post_placeholder_operation(
request,
operation=operations.CLEAR_PLACEHOLDER,
token=operation_token,
plugins=plugins,
placeholder=placeholder,
)
return HttpResponseRedirect(admin_reverse('index', current_app=self.admin_site.name))
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": obj_display}
else:
title = _("Are you sure?")
context = {
"title": title,
"object_name": _("placeholder"),
"object": placeholder,
"deleted_objects": deleted_objects,
"perms_lacking": perms_needed,
"protected": protected,
"opts": opts,
"app_label": opts.app_label,
}
request.current_app = self.admin_site.name
return TemplateResponse(request, "admin/cms/page/plugin/delete_confirmation.html", context)
|
paplorinc/intellij-community | refs/heads/master | python/testData/codeInsight/smartEnter/googleDocStringColonAndIndentAfterSection.py | 53 | def func():
"""
A<caret>rgs
""" |
zhulin2609/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_hybi.py | 628 | # Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file provides classes and helper functions for parsing/building frames
of the WebSocket protocol (RFC 6455).
Specification:
http://tools.ietf.org/html/rfc6455
"""
from collections import deque
import logging
import os
import struct
import time
from mod_pywebsocket import common
from mod_pywebsocket import util
from mod_pywebsocket._stream_base import BadOperationException
from mod_pywebsocket._stream_base import ConnectionTerminatedException
from mod_pywebsocket._stream_base import InvalidFrameException
from mod_pywebsocket._stream_base import InvalidUTF8Exception
from mod_pywebsocket._stream_base import StreamBase
from mod_pywebsocket._stream_base import UnsupportedFrameException
_NOOP_MASKER = util.NoopMasker()
class Frame(object):
def __init__(self, fin=1, rsv1=0, rsv2=0, rsv3=0,
opcode=None, payload=''):
self.fin = fin
self.rsv1 = rsv1
self.rsv2 = rsv2
self.rsv3 = rsv3
self.opcode = opcode
self.payload = payload
# Helper functions made public to be used for writing unittests for WebSocket
# clients.
def create_length_header(length, mask):
"""Creates a length header.
Args:
length: Frame length. Must be less than 2^63.
mask: Mask bit. Must be boolean.
Raises:
ValueError: when bad data is given.
"""
if mask:
mask_bit = 1 << 7
else:
mask_bit = 0
if length < 0:
raise ValueError('length must be non negative integer')
elif length <= 125:
return chr(mask_bit | length)
elif length < (1 << 16):
return chr(mask_bit | 126) + struct.pack('!H', length)
elif length < (1 << 63):
return chr(mask_bit | 127) + struct.pack('!Q', length)
else:
raise ValueError('Payload is too big for one frame')
def create_header(opcode, payload_length, fin, rsv1, rsv2, rsv3, mask):
"""Creates a frame header.
Raises:
Exception: when bad data is given.
"""
if opcode < 0 or 0xf < opcode:
raise ValueError('Opcode out of range')
if payload_length < 0 or (1 << 63) <= payload_length:
raise ValueError('payload_length out of range')
if (fin | rsv1 | rsv2 | rsv3) & ~1:
raise ValueError('FIN bit and Reserved bit parameter must be 0 or 1')
header = ''
first_byte = ((fin << 7)
| (rsv1 << 6) | (rsv2 << 5) | (rsv3 << 4)
| opcode)
header += chr(first_byte)
header += create_length_header(payload_length, mask)
return header
def _build_frame(header, body, mask):
if not mask:
return header + body
masking_nonce = os.urandom(4)
masker = util.RepeatedXorMasker(masking_nonce)
return header + masking_nonce + masker.mask(body)
def _filter_and_format_frame_object(frame, mask, frame_filters):
for frame_filter in frame_filters:
frame_filter.filter(frame)
header = create_header(
frame.opcode, len(frame.payload), frame.fin,
frame.rsv1, frame.rsv2, frame.rsv3, mask)
return _build_frame(header, frame.payload, mask)
def create_binary_frame(
message, opcode=common.OPCODE_BINARY, fin=1, mask=False, frame_filters=[]):
"""Creates a simple binary frame with no extension, reserved bit."""
frame = Frame(fin=fin, opcode=opcode, payload=message)
return _filter_and_format_frame_object(frame, mask, frame_filters)
def create_text_frame(
message, opcode=common.OPCODE_TEXT, fin=1, mask=False, frame_filters=[]):
"""Creates a simple text frame with no extension, reserved bit."""
encoded_message = message.encode('utf-8')
return create_binary_frame(encoded_message, opcode, fin, mask,
frame_filters)
def parse_frame(receive_bytes, logger=None,
ws_version=common.VERSION_HYBI_LATEST,
unmask_receive=True):
"""Parses a frame. Returns a tuple containing each header field and
payload.
Args:
receive_bytes: a function that reads frame data from a stream or
something similar. The function takes length of the bytes to be
read. The function must raise ConnectionTerminatedException if
there is not enough data to be read.
logger: a logging object.
ws_version: the version of WebSocket protocol.
unmask_receive: unmask received frames. When received unmasked
frame, raises InvalidFrameException.
Raises:
ConnectionTerminatedException: when receive_bytes raises it.
InvalidFrameException: when the frame contains invalid data.
"""
if not logger:
logger = logging.getLogger()
logger.log(common.LOGLEVEL_FINE, 'Receive the first 2 octets of a frame')
received = receive_bytes(2)
first_byte = ord(received[0])
fin = (first_byte >> 7) & 1
rsv1 = (first_byte >> 6) & 1
rsv2 = (first_byte >> 5) & 1
rsv3 = (first_byte >> 4) & 1
opcode = first_byte & 0xf
second_byte = ord(received[1])
mask = (second_byte >> 7) & 1
payload_length = second_byte & 0x7f
logger.log(common.LOGLEVEL_FINE,
'FIN=%s, RSV1=%s, RSV2=%s, RSV3=%s, opcode=%s, '
'Mask=%s, Payload_length=%s',
fin, rsv1, rsv2, rsv3, opcode, mask, payload_length)
if (mask == 1) != unmask_receive:
raise InvalidFrameException(
'Mask bit on the received frame did\'nt match masking '
'configuration for received frames')
# The HyBi and later specs disallow putting a value in 0x0-0xFFFF
# into the 8-octet extended payload length field (or 0x0-0xFD in
# 2-octet field).
valid_length_encoding = True
length_encoding_bytes = 1
if payload_length == 127:
logger.log(common.LOGLEVEL_FINE,
'Receive 8-octet extended payload length')
extended_payload_length = receive_bytes(8)
payload_length = struct.unpack(
'!Q', extended_payload_length)[0]
if payload_length > 0x7FFFFFFFFFFFFFFF:
raise InvalidFrameException(
'Extended payload length >= 2^63')
if ws_version >= 13 and payload_length < 0x10000:
valid_length_encoding = False
length_encoding_bytes = 8
logger.log(common.LOGLEVEL_FINE,
'Decoded_payload_length=%s', payload_length)
elif payload_length == 126:
logger.log(common.LOGLEVEL_FINE,
'Receive 2-octet extended payload length')
extended_payload_length = receive_bytes(2)
payload_length = struct.unpack(
'!H', extended_payload_length)[0]
if ws_version >= 13 and payload_length < 126:
valid_length_encoding = False
length_encoding_bytes = 2
logger.log(common.LOGLEVEL_FINE,
'Decoded_payload_length=%s', payload_length)
if not valid_length_encoding:
logger.warning(
'Payload length is not encoded using the minimal number of '
'bytes (%d is encoded using %d bytes)',
payload_length,
length_encoding_bytes)
if mask == 1:
logger.log(common.LOGLEVEL_FINE, 'Receive mask')
masking_nonce = receive_bytes(4)
masker = util.RepeatedXorMasker(masking_nonce)
logger.log(common.LOGLEVEL_FINE, 'Mask=%r', masking_nonce)
else:
masker = _NOOP_MASKER
logger.log(common.LOGLEVEL_FINE, 'Receive payload data')
if logger.isEnabledFor(common.LOGLEVEL_FINE):
receive_start = time.time()
raw_payload_bytes = receive_bytes(payload_length)
if logger.isEnabledFor(common.LOGLEVEL_FINE):
logger.log(
common.LOGLEVEL_FINE,
'Done receiving payload data at %s MB/s',
payload_length / (time.time() - receive_start) / 1000 / 1000)
logger.log(common.LOGLEVEL_FINE, 'Unmask payload data')
if logger.isEnabledFor(common.LOGLEVEL_FINE):
unmask_start = time.time()
unmasked_bytes = masker.mask(raw_payload_bytes)
if logger.isEnabledFor(common.LOGLEVEL_FINE):
logger.log(
common.LOGLEVEL_FINE,
'Done unmasking payload data at %s MB/s',
payload_length / (time.time() - unmask_start) / 1000 / 1000)
return opcode, unmasked_bytes, fin, rsv1, rsv2, rsv3
class FragmentedFrameBuilder(object):
"""A stateful class to send a message as fragments."""
def __init__(self, mask, frame_filters=[], encode_utf8=True):
"""Constructs an instance."""
self._mask = mask
self._frame_filters = frame_filters
# This is for skipping UTF-8 encoding when building text type frames
# from compressed data.
self._encode_utf8 = encode_utf8
self._started = False
# Hold opcode of the first frame in messages to verify types of other
# frames in the message are all the same.
self._opcode = common.OPCODE_TEXT
def build(self, payload_data, end, binary):
if binary:
frame_type = common.OPCODE_BINARY
else:
frame_type = common.OPCODE_TEXT
if self._started:
if self._opcode != frame_type:
raise ValueError('Message types are different in frames for '
'the same message')
opcode = common.OPCODE_CONTINUATION
else:
opcode = frame_type
self._opcode = frame_type
if end:
self._started = False
fin = 1
else:
self._started = True
fin = 0
if binary or not self._encode_utf8:
return create_binary_frame(
payload_data, opcode, fin, self._mask, self._frame_filters)
else:
return create_text_frame(
payload_data, opcode, fin, self._mask, self._frame_filters)
def _create_control_frame(opcode, body, mask, frame_filters):
frame = Frame(opcode=opcode, payload=body)
for frame_filter in frame_filters:
frame_filter.filter(frame)
if len(frame.payload) > 125:
raise BadOperationException(
'Payload data size of control frames must be 125 bytes or less')
header = create_header(
frame.opcode, len(frame.payload), frame.fin,
frame.rsv1, frame.rsv2, frame.rsv3, mask)
return _build_frame(header, frame.payload, mask)
def create_ping_frame(body, mask=False, frame_filters=[]):
return _create_control_frame(common.OPCODE_PING, body, mask, frame_filters)
def create_pong_frame(body, mask=False, frame_filters=[]):
return _create_control_frame(common.OPCODE_PONG, body, mask, frame_filters)
def create_close_frame(body, mask=False, frame_filters=[]):
return _create_control_frame(
common.OPCODE_CLOSE, body, mask, frame_filters)
def create_closing_handshake_body(code, reason):
body = ''
if code is not None:
if (code > common.STATUS_USER_PRIVATE_MAX or
code < common.STATUS_NORMAL_CLOSURE):
raise BadOperationException('Status code is out of range')
if (code == common.STATUS_NO_STATUS_RECEIVED or
code == common.STATUS_ABNORMAL_CLOSURE or
code == common.STATUS_TLS_HANDSHAKE):
raise BadOperationException('Status code is reserved pseudo '
'code')
encoded_reason = reason.encode('utf-8')
body = struct.pack('!H', code) + encoded_reason
return body
class StreamOptions(object):
"""Holds option values to configure Stream objects."""
def __init__(self):
"""Constructs StreamOptions."""
# Filters applied to frames.
self.outgoing_frame_filters = []
self.incoming_frame_filters = []
# Filters applied to messages. Control frames are not affected by them.
self.outgoing_message_filters = []
self.incoming_message_filters = []
self.encode_text_message_to_utf8 = True
self.mask_send = False
self.unmask_receive = True
class Stream(StreamBase):
"""A class for parsing/building frames of the WebSocket protocol
(RFC 6455).
"""
def __init__(self, request, options):
"""Constructs an instance.
Args:
request: mod_python request.
"""
StreamBase.__init__(self, request)
self._logger = util.get_class_logger(self)
self._options = options
self._request.client_terminated = False
self._request.server_terminated = False
# Holds body of received fragments.
self._received_fragments = []
# Holds the opcode of the first fragment.
self._original_opcode = None
self._writer = FragmentedFrameBuilder(
self._options.mask_send, self._options.outgoing_frame_filters,
self._options.encode_text_message_to_utf8)
self._ping_queue = deque()
def _receive_frame(self):
"""Receives a frame and return data in the frame as a tuple containing
each header field and payload separately.
Raises:
ConnectionTerminatedException: when read returns empty
string.
InvalidFrameException: when the frame contains invalid data.
"""
def _receive_bytes(length):
return self.receive_bytes(length)
return parse_frame(receive_bytes=_receive_bytes,
logger=self._logger,
ws_version=self._request.ws_version,
unmask_receive=self._options.unmask_receive)
def _receive_frame_as_frame_object(self):
opcode, unmasked_bytes, fin, rsv1, rsv2, rsv3 = self._receive_frame()
return Frame(fin=fin, rsv1=rsv1, rsv2=rsv2, rsv3=rsv3,
opcode=opcode, payload=unmasked_bytes)
def receive_filtered_frame(self):
"""Receives a frame and applies frame filters and message filters.
The frame to be received must satisfy following conditions:
- The frame is not fragmented.
- The opcode of the frame is TEXT or BINARY.
DO NOT USE this method except for testing purpose.
"""
frame = self._receive_frame_as_frame_object()
if not frame.fin:
raise InvalidFrameException(
'Segmented frames must not be received via '
'receive_filtered_frame()')
if (frame.opcode != common.OPCODE_TEXT and
frame.opcode != common.OPCODE_BINARY):
raise InvalidFrameException(
'Control frames must not be received via '
'receive_filtered_frame()')
for frame_filter in self._options.incoming_frame_filters:
frame_filter.filter(frame)
for message_filter in self._options.incoming_message_filters:
frame.payload = message_filter.filter(frame.payload)
return frame
def send_message(self, message, end=True, binary=False):
"""Send message.
Args:
message: text in unicode or binary in str to send.
binary: send message as binary frame.
Raises:
BadOperationException: when called on a server-terminated
connection or called with inconsistent message type or
binary parameter.
"""
if self._request.server_terminated:
raise BadOperationException(
'Requested send_message after sending out a closing handshake')
if binary and isinstance(message, unicode):
raise BadOperationException(
'Message for binary frame must be instance of str')
for message_filter in self._options.outgoing_message_filters:
message = message_filter.filter(message, end, binary)
try:
# Set this to any positive integer to limit maximum size of data in
# payload data of each frame.
MAX_PAYLOAD_DATA_SIZE = -1
if MAX_PAYLOAD_DATA_SIZE <= 0:
self._write(self._writer.build(message, end, binary))
return
bytes_written = 0
while True:
end_for_this_frame = end
bytes_to_write = len(message) - bytes_written
if (MAX_PAYLOAD_DATA_SIZE > 0 and
bytes_to_write > MAX_PAYLOAD_DATA_SIZE):
end_for_this_frame = False
bytes_to_write = MAX_PAYLOAD_DATA_SIZE
frame = self._writer.build(
message[bytes_written:bytes_written + bytes_to_write],
end_for_this_frame,
binary)
self._write(frame)
bytes_written += bytes_to_write
# This if must be placed here (the end of while block) so that
# at least one frame is sent.
if len(message) <= bytes_written:
break
except ValueError, e:
raise BadOperationException(e)
def _get_message_from_frame(self, frame):
"""Gets a message from frame. If the message is composed of fragmented
frames and the frame is not the last fragmented frame, this method
returns None. The whole message will be returned when the last
fragmented frame is passed to this method.
Raises:
InvalidFrameException: when the frame doesn't match defragmentation
context, or the frame contains invalid data.
"""
if frame.opcode == common.OPCODE_CONTINUATION:
if not self._received_fragments:
if frame.fin:
raise InvalidFrameException(
'Received a termination frame but fragmentation '
'not started')
else:
raise InvalidFrameException(
'Received an intermediate frame but '
'fragmentation not started')
if frame.fin:
# End of fragmentation frame
self._received_fragments.append(frame.payload)
message = ''.join(self._received_fragments)
self._received_fragments = []
return message
else:
# Intermediate frame
self._received_fragments.append(frame.payload)
return None
else:
if self._received_fragments:
if frame.fin:
raise InvalidFrameException(
'Received an unfragmented frame without '
'terminating existing fragmentation')
else:
raise InvalidFrameException(
'New fragmentation started without terminating '
'existing fragmentation')
if frame.fin:
# Unfragmented frame
self._original_opcode = frame.opcode
return frame.payload
else:
# Start of fragmentation frame
if common.is_control_opcode(frame.opcode):
raise InvalidFrameException(
'Control frames must not be fragmented')
self._original_opcode = frame.opcode
self._received_fragments.append(frame.payload)
return None
def _process_close_message(self, message):
"""Processes close message.
Args:
message: close message.
Raises:
InvalidFrameException: when the message is invalid.
"""
self._request.client_terminated = True
# Status code is optional. We can have status reason only if we
# have status code. Status reason can be empty string. So,
# allowed cases are
# - no application data: no code no reason
# - 2 octet of application data: has code but no reason
# - 3 or more octet of application data: both code and reason
if len(message) == 0:
self._logger.debug('Received close frame (empty body)')
self._request.ws_close_code = (
common.STATUS_NO_STATUS_RECEIVED)
elif len(message) == 1:
raise InvalidFrameException(
'If a close frame has status code, the length of '
'status code must be 2 octet')
elif len(message) >= 2:
self._request.ws_close_code = struct.unpack(
'!H', message[0:2])[0]
self._request.ws_close_reason = message[2:].decode(
'utf-8', 'replace')
self._logger.debug(
'Received close frame (code=%d, reason=%r)',
self._request.ws_close_code,
self._request.ws_close_reason)
# As we've received a close frame, no more data is coming over the
# socket. We can now safely close the socket without worrying about
# RST sending.
if self._request.server_terminated:
self._logger.debug(
'Received ack for server-initiated closing handshake')
return
self._logger.debug(
'Received client-initiated closing handshake')
code = common.STATUS_NORMAL_CLOSURE
reason = ''
if hasattr(self._request, '_dispatcher'):
dispatcher = self._request._dispatcher
code, reason = dispatcher.passive_closing_handshake(
self._request)
if code is None and reason is not None and len(reason) > 0:
self._logger.warning(
'Handler specified reason despite code being None')
reason = ''
if reason is None:
reason = ''
self._send_closing_handshake(code, reason)
self._logger.debug(
'Acknowledged closing handshake initiated by the peer '
'(code=%r, reason=%r)', code, reason)
def _process_ping_message(self, message):
"""Processes ping message.
Args:
message: ping message.
"""
try:
handler = self._request.on_ping_handler
if handler:
handler(self._request, message)
return
except AttributeError, e:
pass
self._send_pong(message)
def _process_pong_message(self, message):
"""Processes pong message.
Args:
message: pong message.
"""
# TODO(tyoshino): Add ping timeout handling.
inflight_pings = deque()
while True:
try:
expected_body = self._ping_queue.popleft()
if expected_body == message:
# inflight_pings contains pings ignored by the
# other peer. Just forget them.
self._logger.debug(
'Ping %r is acked (%d pings were ignored)',
expected_body, len(inflight_pings))
break
else:
inflight_pings.append(expected_body)
except IndexError, e:
# The received pong was unsolicited pong. Keep the
# ping queue as is.
self._ping_queue = inflight_pings
self._logger.debug('Received a unsolicited pong')
break
try:
handler = self._request.on_pong_handler
if handler:
handler(self._request, message)
except AttributeError, e:
pass
def receive_message(self):
"""Receive a WebSocket frame and return its payload as a text in
unicode or a binary in str.
Returns:
payload data of the frame
- as unicode instance if received text frame
- as str instance if received binary frame
or None iff received closing handshake.
Raises:
BadOperationException: when called on a client-terminated
connection.
ConnectionTerminatedException: when read returns empty
string.
InvalidFrameException: when the frame contains invalid
data.
UnsupportedFrameException: when the received frame has
flags, opcode we cannot handle. You can ignore this
exception and continue receiving the next frame.
"""
if self._request.client_terminated:
raise BadOperationException(
'Requested receive_message after receiving a closing '
'handshake')
while True:
# mp_conn.read will block if no bytes are available.
# Timeout is controlled by TimeOut directive of Apache.
frame = self._receive_frame_as_frame_object()
# Check the constraint on the payload size for control frames
# before extension processes the frame.
# See also http://tools.ietf.org/html/rfc6455#section-5.5
if (common.is_control_opcode(frame.opcode) and
len(frame.payload) > 125):
raise InvalidFrameException(
'Payload data size of control frames must be 125 bytes or '
'less')
for frame_filter in self._options.incoming_frame_filters:
frame_filter.filter(frame)
if frame.rsv1 or frame.rsv2 or frame.rsv3:
raise UnsupportedFrameException(
'Unsupported flag is set (rsv = %d%d%d)' %
(frame.rsv1, frame.rsv2, frame.rsv3))
message = self._get_message_from_frame(frame)
if message is None:
continue
for message_filter in self._options.incoming_message_filters:
message = message_filter.filter(message)
if self._original_opcode == common.OPCODE_TEXT:
# The WebSocket protocol section 4.4 specifies that invalid
# characters must be replaced with U+fffd REPLACEMENT
# CHARACTER.
try:
return message.decode('utf-8')
except UnicodeDecodeError, e:
raise InvalidUTF8Exception(e)
elif self._original_opcode == common.OPCODE_BINARY:
return message
elif self._original_opcode == common.OPCODE_CLOSE:
self._process_close_message(message)
return None
elif self._original_opcode == common.OPCODE_PING:
self._process_ping_message(message)
elif self._original_opcode == common.OPCODE_PONG:
self._process_pong_message(message)
else:
raise UnsupportedFrameException(
'Opcode %d is not supported' % self._original_opcode)
def _send_closing_handshake(self, code, reason):
body = create_closing_handshake_body(code, reason)
frame = create_close_frame(
body, mask=self._options.mask_send,
frame_filters=self._options.outgoing_frame_filters)
self._request.server_terminated = True
self._write(frame)
def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason='',
wait_response=True):
"""Closes a WebSocket connection.
Args:
code: Status code for close frame. If code is None, a close
frame with empty body will be sent.
reason: string representing close reason.
wait_response: True when caller want to wait the response.
Raises:
BadOperationException: when reason is specified with code None
or reason is not an instance of both str and unicode.
"""
if self._request.server_terminated:
self._logger.debug(
'Requested close_connection but server is already terminated')
return
if code is None:
if reason is not None and len(reason) > 0:
raise BadOperationException(
'close reason must not be specified if code is None')
reason = ''
else:
if not isinstance(reason, str) and not isinstance(reason, unicode):
raise BadOperationException(
'close reason must be an instance of str or unicode')
self._send_closing_handshake(code, reason)
self._logger.debug(
'Initiated closing handshake (code=%r, reason=%r)',
code, reason)
if (code == common.STATUS_GOING_AWAY or
code == common.STATUS_PROTOCOL_ERROR) or not wait_response:
# It doesn't make sense to wait for a close frame if the reason is
# protocol error or that the server is going away. For some of
# other reasons, it might not make sense to wait for a close frame,
# but it's not clear, yet.
return
# TODO(ukai): 2. wait until the /client terminated/ flag has been set,
# or until a server-defined timeout expires.
#
# For now, we expect receiving closing handshake right after sending
# out closing handshake.
message = self.receive_message()
if message is not None:
raise ConnectionTerminatedException(
'Didn\'t receive valid ack for closing handshake')
# TODO: 3. close the WebSocket connection.
# note: mod_python Connection (mp_conn) doesn't have close method.
def send_ping(self, body=''):
frame = create_ping_frame(
body,
self._options.mask_send,
self._options.outgoing_frame_filters)
self._write(frame)
self._ping_queue.append(body)
def _send_pong(self, body):
frame = create_pong_frame(
body,
self._options.mask_send,
self._options.outgoing_frame_filters)
self._write(frame)
def get_last_received_opcode(self):
"""Returns the opcode of the WebSocket message which the last received
frame belongs to. The return value is valid iff immediately after
receive_message call.
"""
return self._original_opcode
# vi:sts=4 sw=4 et
|
WebSpider/headphones | refs/heads/master | lib/gntp/config.py | 128 | # Copyright: 2013 Paul Traylor
# These sources are released under the terms of the MIT license: see LICENSE
"""
The gntp.config module is provided as an extended GrowlNotifier object that takes
advantage of the ConfigParser module to allow us to setup some default values
(such as hostname, password, and port) in a more global way to be shared among
programs using gntp
"""
import logging
import os
import gntp.notifier
import gntp.shim
__all__ = [
'mini',
'GrowlNotifier'
]
logger = logging.getLogger(__name__)
class GrowlNotifier(gntp.notifier.GrowlNotifier):
"""
ConfigParser enhanced GrowlNotifier object
For right now, we are only interested in letting users overide certain
values from ~/.gntp
::
[gntp]
hostname = ?
password = ?
port = ?
"""
def __init__(self, *args, **kwargs):
config = gntp.shim.RawConfigParser({
'hostname': kwargs.get('hostname', 'localhost'),
'password': kwargs.get('password'),
'port': kwargs.get('port', 23053),
})
config.read([os.path.expanduser('~/.gntp')])
# If the file does not exist, then there will be no gntp section defined
# and the config.get() lines below will get confused. Since we are not
# saving the config, it should be safe to just add it here so the
# code below doesn't complain
if not config.has_section('gntp'):
logger.info('Error reading ~/.gntp config file')
config.add_section('gntp')
kwargs['password'] = config.get('gntp', 'password')
kwargs['hostname'] = config.get('gntp', 'hostname')
kwargs['port'] = config.getint('gntp', 'port')
super(GrowlNotifier, self).__init__(*args, **kwargs)
def mini(description, **kwargs):
"""Single notification function
Simple notification function in one line. Has only one required parameter
and attempts to use reasonable defaults for everything else
:param string description: Notification message
"""
kwargs['notifierFactory'] = GrowlNotifier
gntp.notifier.mini(description, **kwargs)
if __name__ == '__main__':
# If we're running this module directly we're likely running it as a test
# so extra debugging is useful
logging.basicConfig(level=logging.INFO)
mini('Testing mini notification')
|
gbiggs/rtlogplayer | refs/heads/master | rt_logplayer/simpkl_log.py | 1 | #!/usr/bin/env python
# -*- Python -*-
# -*- coding: utf-8 -*-
'''rtshell
Copyright (C) 2009-2011
Geoffrey Biggs
RT-Synthesis Research Group
Intelligent Systems Research Institute,
National Institute of Advanced Industrial Science and Technology (AIST),
Japan
All rights reserved.
Licensed under the Eclipse Public License -v 1.0 (EPL)
http://www.opensource.org/licenses/eclipse-1.0.txt
Pickle-based log.
'''
import copy
import os
import pickle
import traceback
import ilog
###############################################################################
## Current position pointer
class CurPos(object):
def __init__(self, index=0, timestamp=0, prev_pos=0, cache=0, file_pos=0):
super(CurPos, self).__init__()
self.index = index
self.ts = timestamp
self.prev = prev_pos
self.cache = cache
self.fp = file_pos
def __str__(self):
return 'Index: {0}, timestamp: {1}, previous position: {2}, cache '\
'position: {3}, file position: {4}'.format(self.index, self.ts,
self.prev, self.cache, self.fp)
###############################################################################
## Simple pickle-based log object. Its support for the full log interface
## is rudimentary and slow (although writing and simple reading should be fast
## enough).
##
## The simple pickle-based format is as follows (each entry is serialised):
## Port specification (in the metadata block)
## [Data entries: (Index, Time stamp, Data)]
class SimplePickleLog(ilog.Log):
# Indices in data entries for bits of data
INDEX = 0
TS = 1
DATA = 2
FP = 3
PREV = 4
# Spare space at the start for pointers
BUFFER_SIZE = 256
def __init__(self, filename='', *args, **kwargs):
self._is_open = False
self._fn = filename
self._cur_pos = CurPos()
self._start = None
self._end = None
self._next = None
self._write_ind = 0
self._prev_pos = 0
super(SimplePickleLog, self).__init__(*args, **kwargs)
def __str__(self):
return 'SimplePickleLog({0}, {1}) at position {2}.'.format(self._fn,
self._mode, self._cur_pos)
def write(self, timestamp, data):
val = (self._write_ind, timestamp, data, self._file.tell(), self._prev_pos)
# Track the start of the last entry for later writing at the file start
self._cur_pos.ts = timestamp
self._end = copy.copy(self._cur_pos)
# Record the new "previous" position before writing
self._prev_pos = self._file.tell()
self._write(val)
# Update the current position to after the new final record
self._cur_pos.index = val[self.INDEX] + 1
self._cur_pos.ts = -1
self._cur_pos.prev = self._prev_pos
self._cur_pos.cache = self._prev_pos
self._cur_pos.fp = self._file.tell()
self._write_ind += 1
self._vb_print('Wrote entry at ({0}, {1}, {2}, {3}).'.format(
val[self.INDEX], val[self.TS], val[self.FP], val[self.PREV]))
def read(self, timestamp=None, number=None):
if number is not None:
return self._read_number(number)
elif timestamp is not None:
return self._read_to_timestamp(timestamp)
else:
return self._read_single_entry()
def rewind(self):
self._vb_print('Rewinding log from position {0}.'.format(
self._cur_pos))
if self._mode == 'r':
self._file.seek(0)
else:
self._file.truncate()
self._write_ind = 0
self._init_log()
def seek(self, timestamp=None, index=None):
self._vb_print('Seeking log from position {0}.'.format(self._cur_pos))
if index is not None:
self._seek_to_index(index)
elif timestamp is not None:
self._seek_to_timestamp(timestamp)
# Do nothing if neither is set
self._vb_print('New current position: {0}.'.format(self._cur_pos))
def _backup_one(self):
'''Reverses in the log one entry.'''
self._vb_print('Backing up one entry from {0}.'.format(self._cur_pos))
if self._cur_pos.index == 0:
# Already at the start
self._vb_print('Backup already at start.')
return
else:
self._next = None
target = self._cur_pos.prev
# Move back in the file one entry
self._file.seek(target)
# Update the next pointer
self._next = self._read()
self._update_cur_pos(self._next)
self._vb_print('New current position: {0}.'.format(self._cur_pos))
def _close(self):
if not self._is_open:
return
if self._mode == 'w':
# Go back to the beginning and write the end position
self._file.seek(0)
self._file.seek(self._buf_start) # Skip the meta data
self._write(self._end)
self._vb_print('Wrote end pointer: {0}'.format(self._end))
self._file.close()
self._is_open = False
self._start = None
self._end = None
self._vb_print('Closed file.')
def _eof(self):
return self._next is None
def _get_cur_pos(self):
self._vb_print('Current position: {0}'.format(self._cur_pos))
return self._cur_pos.index, self._cur_pos.ts
def _get_start(self):
if self._start is None:
self._set_start()
self._vb_print('Start position: {0}'.format(self._start))
return (self._start.index, self._start.ts)
def _get_end(self):
self._vb_print('End position: {0}'.format(self._end))
return (self._end.index, self._end.ts)
def _init_log(self):
if self._mode == 'r':
self._vb_print('Initialising log for reading.')
# Read out the metadata
self._meta = self._read()
pos = self._file.tell()
# Read the end marker
self._end = self._read()
# Skip to the start of the data
self._file.seek(pos + self.BUFFER_SIZE)
self._vb_print('Read end position: {0}'.format(self._end))
# Grab the position of the first entry and make it the current
self._set_start()
self._cur_pos = copy.copy(self._start)
# Get the first entry
self._next = self._read()
else:
self._vb_print('Initialising log for writing.')
# Write the metadata
self._write(self._meta)
self._vb_print('Wrote meta data of length {0}'.format(
self._file.tell()))
self._buf_start = self._file.tell()
# Put some blank space to write the end marker
self._file.write(''.ljust(self.BUFFER_SIZE))
self._vb_print('Wrote buffer of length {0} at position {1}'.format(
self.BUFFER_SIZE, self._buf_start))
self._write_ind = 0
self._prev_pos = 0
self._cur_pos = CurPos(file_pos=self._file.tell())
self._vb_print('First entry will be written at {0}'.format(
self._cur_pos))
def _open(self):
if self._is_open:
return
if self._mode == 'r':
flags = 'rb'
elif self._mode == 'w':
flags = 'wb'
else:
raise NotImplementedError
self._file = open(self._fn, flags)
self._init_log()
self._is_open = True
self._vb_print('Opened file {0} in mode {1}.'.format(self._fn,
self._mode))
def _read(self):
'''Read a single entry from the log.'''
self._vb_print('Reading one data block at {0}.'.format(
self._file.tell()))
try:
data = pickle.load(self._file)
except EOFError:
self._vb_print('End of log reached.')
raise ilog.EndOfLogError
return data
def _read_number(self, number):
self._vb_print('Reading {0} entries.'.format(number))
res = []
if number < 0:
raise ValueError
if not self._next:
self._vb_print('End of log before reading.')
return []
try:
for ii in range(number):
res.append((self._next[self.INDEX], self._next[self.TS],
self._next[self.DATA]))
self._next = self._read()
if not self._next:
self._set_eof_pos()
self._vb_print('End of log during reading, current '\
'position is {1}.'.format(self._cur_pos))
break
self._update_cur_pos(self._next)
self._vb_print('Read entry {0} of {1}, current position '\
'is {2}.'.format(ii + 1, number, self._cur_pos))
except ilog.EndOfLogError:
self._set_eof_pos()
self._next = None
self._vb_print('End of log while reading, current '\
'position is {0}.'.format(self._cur_pos))
self._vb_print('Finished reading; current position is ' \
'{0}.'.format(self._cur_pos))
return res
def _read_to_timestamp(self, timestamp):
self._vb_print('Reading until time stamp {0}.'.format(timestamp))
res = []
if timestamp < 0:
raise ValueError
if not self._next:
self._vb_print('End of log before reading.')
return []
if self._cur_pos.ts > timestamp:
# The time limit is before the next item - nothing to read
self._vb_print('Current position is beyond the time limit.')
return []
try:
while self._next[self.TS] <= timestamp:
res.append((self._next[self.INDEX], self._next[self.TS],
self._next[self.DATA]))
self._next = self._read()
if not self._next:
self._set_eof_pos()
self._vb_print('End of log during reading, current '\
'position is {1}.'.format(self._cur_pos))
break
self._update_cur_pos(self._next)
self._vb_print('Read entry at time index {0}, current '\
'position is {1}.'.format(res[-1][1], self._cur_pos))
except ilog.EndOfLogError:
self._set_eof_pos()
self._next = None
self._vb_print('End of log while reading, current '\
'position is {0}.'.format(self._cur_pos))
self._vb_print('Finished reading; current position is ' \
'{0}.'.format(self._cur_pos))
return res
def _read_single_entry(self):
self._vb_print('Reading a single entry.')
if not self._next:
self._vb_print('End of log before reading.')
return []
else:
res = [(self._next[self.INDEX], self._next[self.TS],
self._next[self.DATA])]
try:
self._next = self._read()
except ilog.EndOfLogError:
self._next = None
if not self._next:
self._set_eof_pos()
self._vb_print('End of log during reading, current '\
'position is {0}.'.format(self._cur_pos))
else:
self._update_cur_pos(self._next)
self._vb_print('Read entry, current position is ' \
'{0}.'.format(self._cur_pos))
self._vb_print('Cached next entry is {0}'.format(self._next))
return res
def _seek_to_index(self, ind):
'''Seeks forward or backward in the log to find the given index.'''
if ind == self._cur_pos.index:
self._vb_print('Seek by index: already at destination.')
return
if ind < 0:
raise ilog.InvalidIndexError
elif ind < self._cur_pos.index:
# Rewind
# TODO: Rewinding may be more efficient in many cases if done by
# fast-forwarding from the start of the file rather than traversing
# backwards.
self._vb_print('Rewinding to index {0}.'.format(ind))
while self._cur_pos.index > ind and self._cur_pos.index > 0:
self._backup_one()
else:
# Fast-forward
self._vb_print('Fast-forwarding to index {0}.'.format(ind))
while self._cur_pos.index < ind:
if not self.read():
break # EOF
self._vb_print('New current position is {0}.'.format(self._cur_pos))
def _seek_to_timestamp(self, ts):
'''Seeks forward or backward in the log to find the given timestamp.'''
if ts == self._cur_pos.ts and not self.eof:
self._vb_print('Seek by timestamp: already at destination.')
return
elif ts < self._cur_pos.ts or self.eof:
# Rewind
self._vb_print('Rewinding to timestamp {0}.'.format(ts))
while (self._cur_pos.ts > ts and self._cur_pos.index > 0) or \
self.eof:
self._backup_one()
# Need to move one forward again, unless have hit the beginning
if self._cur_pos.ts < ts:
self.read()
else:
self._vb_print('Fast-forwarding to timestamp {0}.'.format(ts))
# Fast-forward
while self._cur_pos.ts < ts:
if not self.read():
break # EOF
self._vb_print('New current position is {0}.'.format(self._cur_pos))
def _set_eof_pos(self):
'''Sets the current position to the end-of-file value.'''
self._vb_print('Setting EOF at file position {0}, prev cur pos '\
'{1}'.format(self._file.tell(), self._cur_pos))
self._cur_pos.index += 1 # The "next" index
# Don't touch the time stamp (indicates the end time of the file)
self._cur_pos.prev = self._cur_pos.cache # This is the final entry
self._cur_pos.cache = 0 # No valid entry at current file position
self._cur_pos.fp = self._file.tell() # This is the end of the file
def _set_start(self):
# Save the current position
current = self._file.tell()
# Move to the start
self._file.seek(0)
# Skip the metadata block
self._read()
# Skip the buffer
self._file.seek(self.BUFFER_SIZE, os.SEEK_CUR)
# Read the first entry
pos = self._file.tell()
entry = self._read()
self._start = CurPos(entry[self.INDEX], entry[self.TS],
entry[self.PREV], pos, self._file.tell())
self._file.seek(current)
self._vb_print('Measured start position: {0}'.format(self._start))
def _update_cur_pos(self, val):
'''Updates the current pos from a data entry.'''
self._cur_pos.index = val[self.INDEX]
self._cur_pos.ts = val[self.TS]
self._cur_pos.prev = val[self.PREV]
self._cur_pos.cache = self._cur_pos.fp
self._cur_pos.fp = self._file.tell()
def _write(self, data):
'''Pickle some data and write it to the file.'''
self._vb_print('Writing one data block.')
pickle.dump(data, self._file, pickle.HIGHEST_PROTOCOL)
|
damonkohler/sl4a | refs/heads/master | python/src/Lib/multiprocessing/pool.py | 52 | #
# Module providing the `Pool` class for managing a process pool
#
# multiprocessing/pool.py
#
# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt
#
__all__ = ['Pool']
#
# Imports
#
import threading
import Queue
import itertools
import collections
import time
from multiprocessing import Process, cpu_count, TimeoutError
from multiprocessing.util import Finalize, debug
#
# Constants representing the state of a pool
#
RUN = 0
CLOSE = 1
TERMINATE = 2
#
# Miscellaneous
#
job_counter = itertools.count()
def mapstar(args):
return map(*args)
#
# Code run by worker processes
#
def worker(inqueue, outqueue, initializer=None, initargs=()):
put = outqueue.put
get = inqueue.get
if hasattr(inqueue, '_writer'):
inqueue._writer.close()
outqueue._reader.close()
if initializer is not None:
initializer(*initargs)
while 1:
try:
task = get()
except (EOFError, IOError):
debug('worker got EOFError or IOError -- exiting')
break
if task is None:
debug('worker got sentinel -- exiting')
break
job, i, func, args, kwds = task
try:
result = (True, func(*args, **kwds))
except Exception, e:
result = (False, e)
put((job, i, result))
#
# Class representing a process pool
#
class Pool(object):
'''
Class which supports an async version of the `apply()` builtin
'''
Process = Process
def __init__(self, processes=None, initializer=None, initargs=()):
self._setup_queues()
self._taskqueue = Queue.Queue()
self._cache = {}
self._state = RUN
if processes is None:
try:
processes = cpu_count()
except NotImplementedError:
processes = 1
self._pool = []
for i in range(processes):
w = self.Process(
target=worker,
args=(self._inqueue, self._outqueue, initializer, initargs)
)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
self._task_handler = threading.Thread(
target=Pool._handle_tasks,
args=(self._taskqueue, self._quick_put, self._outqueue, self._pool)
)
self._task_handler.daemon = True
self._task_handler._state = RUN
self._task_handler.start()
self._result_handler = threading.Thread(
target=Pool._handle_results,
args=(self._outqueue, self._quick_get, self._cache)
)
self._result_handler.daemon = True
self._result_handler._state = RUN
self._result_handler.start()
self._terminate = Finalize(
self, self._terminate_pool,
args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
self._task_handler, self._result_handler, self._cache),
exitpriority=15
)
def _setup_queues(self):
from .queues import SimpleQueue
self._inqueue = SimpleQueue()
self._outqueue = SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
def apply(self, func, args=(), kwds={}):
'''
Equivalent of `apply()` builtin
'''
assert self._state == RUN
return self.apply_async(func, args, kwds).get()
def map(self, func, iterable, chunksize=None):
'''
Equivalent of `map()` builtin
'''
assert self._state == RUN
return self.map_async(func, iterable, chunksize).get()
def imap(self, func, iterable, chunksize=1):
'''
Equivalent of `itertools.imap()` -- can be MUCH slower than `Pool.map()`
'''
assert self._state == RUN
if chunksize == 1:
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def imap_unordered(self, func, iterable, chunksize=1):
'''
Like `imap()` method but ordering of results is arbitrary
'''
assert self._state == RUN
if chunksize == 1:
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def apply_async(self, func, args=(), kwds={}, callback=None):
'''
Asynchronous equivalent of `apply()` builtin
'''
assert self._state == RUN
result = ApplyResult(self._cache, callback)
self._taskqueue.put(([(result._job, None, func, args, kwds)], None))
return result
def map_async(self, func, iterable, chunksize=None, callback=None):
'''
Asynchronous equivalent of `map()` builtin
'''
assert self._state == RUN
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
if chunksize is None:
chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
if extra:
chunksize += 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = MapResult(self._cache, chunksize, len(iterable), callback)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), None))
return result
@staticmethod
def _handle_tasks(taskqueue, put, outqueue, pool):
thread = threading.current_thread()
for taskseq, set_length in iter(taskqueue.get, None):
i = -1
for i, task in enumerate(taskseq):
if thread._state:
debug('task handler found thread._state != RUN')
break
try:
put(task)
except IOError:
debug('could not put task on queue')
break
else:
if set_length:
debug('doing set_length()')
set_length(i+1)
continue
break
else:
debug('task handler got sentinel')
try:
# tell result handler to finish when cache is empty
debug('task handler sending sentinel to result handler')
outqueue.put(None)
# tell workers there is no more work
debug('task handler sending sentinel to workers')
for p in pool:
put(None)
except IOError:
debug('task handler got IOError when sending sentinels')
debug('task handler exiting')
@staticmethod
def _handle_results(outqueue, get, cache):
thread = threading.current_thread()
while 1:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if thread._state:
assert thread._state == TERMINATE
debug('result handler found thread._state=TERMINATE')
break
if task is None:
debug('result handler got sentinel')
break
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
while cache and thread._state != TERMINATE:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if task is None:
debug('result handler ignoring extra sentinel')
continue
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
if hasattr(outqueue, '_reader'):
debug('ensuring that outqueue is not full')
# If we don't make room available in outqueue then
# attempts to add the sentinel (None) to outqueue may
# block. There is guaranteed to be no more than 2 sentinels.
try:
for i in range(10):
if not outqueue._reader.poll():
break
get()
except (IOError, EOFError):
pass
debug('result handler exiting: len(cache)=%s, thread._state=%s',
len(cache), thread._state)
@staticmethod
def _get_tasks(func, it, size):
it = iter(it)
while 1:
x = tuple(itertools.islice(it, size))
if not x:
return
yield (func, x)
def __reduce__(self):
raise NotImplementedError(
'pool objects cannot be passed between processes or pickled'
)
def close(self):
debug('closing pool')
if self._state == RUN:
self._state = CLOSE
self._taskqueue.put(None)
def terminate(self):
debug('terminating pool')
self._state = TERMINATE
self._terminate()
def join(self):
debug('joining pool')
assert self._state in (CLOSE, TERMINATE)
self._task_handler.join()
self._result_handler.join()
for p in self._pool:
p.join()
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# task_handler may be blocked trying to put items on inqueue
debug('removing tasks from inqueue until task handler finished')
inqueue._rlock.acquire()
while task_handler.is_alive() and inqueue._reader.poll():
inqueue._reader.recv()
time.sleep(0)
@classmethod
def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,
task_handler, result_handler, cache):
# this is guaranteed to only be called once
debug('finalizing pool')
task_handler._state = TERMINATE
taskqueue.put(None) # sentinel
debug('helping task handler/workers to finish')
cls._help_stuff_finish(inqueue, task_handler, len(pool))
assert result_handler.is_alive() or len(cache) == 0
result_handler._state = TERMINATE
outqueue.put(None) # sentinel
if pool and hasattr(pool[0], 'terminate'):
debug('terminating workers')
for p in pool:
p.terminate()
debug('joining task handler')
task_handler.join(1e100)
debug('joining result handler')
result_handler.join(1e100)
if pool and hasattr(pool[0], 'terminate'):
debug('joining pool workers')
for p in pool:
p.join()
#
# Class whose instances are returned by `Pool.apply_async()`
#
class ApplyResult(object):
def __init__(self, cache, callback):
self._cond = threading.Condition(threading.Lock())
self._job = job_counter.next()
self._cache = cache
self._ready = False
self._callback = callback
cache[self._job] = self
def ready(self):
return self._ready
def successful(self):
assert self._ready
return self._success
def wait(self, timeout=None):
self._cond.acquire()
try:
if not self._ready:
self._cond.wait(timeout)
finally:
self._cond.release()
def get(self, timeout=None):
self.wait(timeout)
if not self._ready:
raise TimeoutError
if self._success:
return self._value
else:
raise self._value
def _set(self, i, obj):
self._success, self._value = obj
if self._callback and self._success:
self._callback(self._value)
self._cond.acquire()
try:
self._ready = True
self._cond.notify()
finally:
self._cond.release()
del self._cache[self._job]
#
# Class whose instances are returned by `Pool.map_async()`
#
class MapResult(ApplyResult):
def __init__(self, cache, chunksize, length, callback):
ApplyResult.__init__(self, cache, callback)
self._success = True
self._value = [None] * length
self._chunksize = chunksize
if chunksize <= 0:
self._number_left = 0
self._ready = True
else:
self._number_left = length//chunksize + bool(length % chunksize)
def _set(self, i, success_result):
success, result = success_result
if success:
self._value[i*self._chunksize:(i+1)*self._chunksize] = result
self._number_left -= 1
if self._number_left == 0:
if self._callback:
self._callback(self._value)
del self._cache[self._job]
self._cond.acquire()
try:
self._ready = True
self._cond.notify()
finally:
self._cond.release()
else:
self._success = False
self._value = result
del self._cache[self._job]
self._cond.acquire()
try:
self._ready = True
self._cond.notify()
finally:
self._cond.release()
#
# Class whose instances are returned by `Pool.imap()`
#
class IMapIterator(object):
def __init__(self, cache):
self._cond = threading.Condition(threading.Lock())
self._job = job_counter.next()
self._cache = cache
self._items = collections.deque()
self._index = 0
self._length = None
self._unsorted = {}
cache[self._job] = self
def __iter__(self):
return self
def next(self, timeout=None):
self._cond.acquire()
try:
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
self._cond.wait(timeout)
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
raise TimeoutError
finally:
self._cond.release()
success, value = item
if success:
return value
raise value
__next__ = next # XXX
def _set(self, i, obj):
self._cond.acquire()
try:
if self._index == i:
self._items.append(obj)
self._index += 1
while self._index in self._unsorted:
obj = self._unsorted.pop(self._index)
self._items.append(obj)
self._index += 1
self._cond.notify()
else:
self._unsorted[i] = obj
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
def _set_length(self, length):
self._cond.acquire()
try:
self._length = length
if self._index == self._length:
self._cond.notify()
del self._cache[self._job]
finally:
self._cond.release()
#
# Class whose instances are returned by `Pool.imap_unordered()`
#
class IMapUnorderedIterator(IMapIterator):
def _set(self, i, obj):
self._cond.acquire()
try:
self._items.append(obj)
self._index += 1
self._cond.notify()
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
#
#
#
class ThreadPool(Pool):
from .dummy import Process
def __init__(self, processes=None, initializer=None, initargs=()):
Pool.__init__(self, processes, initializer, initargs)
def _setup_queues(self):
self._inqueue = Queue.Queue()
self._outqueue = Queue.Queue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# put sentinels at head of inqueue to make workers finish
inqueue.not_empty.acquire()
try:
inqueue.queue.clear()
inqueue.queue.extend([None] * size)
inqueue.not_empty.notify_all()
finally:
inqueue.not_empty.release()
|
hendradarwin/VTK | refs/heads/master | Imaging/Core/Testing/Python/ReslicePermuteSlab.py | 20 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# this script tests vtkImageReslice with different slab modes
# Image pipeline
reader = vtk.vtkImageReader()
reader.ReleaseDataFlagOff()
reader.SetDataByteOrderToLittleEndian()
reader.SetDataScalarTypeToUnsignedShort()
reader.SetDataExtent(0,63,0,63,1,93)
reader.SetDataSpacing(3.2,3.2,1.5)
reader.SetDataOrigin(-100.8,-100.8,-70.5)
reader.SetFilePrefix("" + str(VTK_DATA_ROOT) + "/Data/headsq/quarter")
reader.SetDataMask(0x7fff)
caster = vtk.vtkImageCast()
caster.SetInputConnection(reader.GetOutputPort())
caster.SetOutputScalarTypeToFloat()
reslice1 = vtk.vtkImageReslice()
reslice1.SetInputConnection(reader.GetOutputPort())
reslice1.SetSlabModeToMean()
reslice1.SlabTrapezoidIntegrationOn()
reslice1.SetSlabNumberOfSlices(45)
reslice1.SetInterpolationModeToLinear()
reslice1.SetOutputDimensionality(2)
reslice1.SetOutputSpacing(3.2,3.2,1.5)
reslice1.SetOutputExtent(0,63,0,63,0,0)
reslice2 = vtk.vtkImageReslice()
reslice2.SetInputConnection(caster.GetOutputPort())
reslice2.SetSlabModeToSum()
reslice2.SetSlabNumberOfSlices(93)
reslice2.SetInterpolationModeToLinear()
reslice2.SetOutputDimensionality(2)
reslice2.SetOutputSpacing(3.2,3.2,1.5)
reslice2.SetOutputExtent(0,63,0,63,0,0)
reslice2.SetResliceAxesDirectionCosines([1,0,0,0,0,-1,0,1,0])
reslice3 = vtk.vtkImageReslice()
reslice3.SetInputConnection(reader.GetOutputPort())
reslice3.SetSlabModeToMax()
reslice3.SetInterpolationModeToNearestNeighbor()
reslice3.SetSlabNumberOfSlices(50)
reslice3.SetOutputDimensionality(2)
reslice3.SetOutputSpacing(3.2,3.2,1.5)
reslice3.SetOutputExtent(0,63,0,63,0,0)
reslice3.SetResliceAxesDirectionCosines([0,+1,0,0,0,-1,-1,0,0])
reslice4 = vtk.vtkImageReslice()
reslice4.SetInputConnection(reader.GetOutputPort())
reslice4.SetSlabModeToMin()
reslice4.SetSlabNumberOfSlices(2)
reslice4.SetInterpolationModeToCubic()
reslice4.SetOutputDimensionality(2)
reslice4.SetOutputSpacing(3.2,3.2,1.5)
reslice4.SetOutputExtent(0,63,0,63,0,0)
reslice4.SetResliceAxesDirectionCosines([0,0,1,0,1,0,-1,0,0])
mapper1 = vtk.vtkImageMapper()
mapper1.SetInputConnection(reslice1.GetOutputPort())
mapper1.SetColorWindow(2000)
mapper1.SetColorLevel(1000)
mapper1.SetZSlice(0)
mapper2 = vtk.vtkImageMapper()
mapper2.SetInputConnection(reslice2.GetOutputPort())
mapper2.SetColorWindow(50000)
mapper2.SetColorLevel(100000)
mapper2.SetZSlice(0)
mapper3 = vtk.vtkImageMapper()
mapper3.SetInputConnection(reslice3.GetOutputPort())
mapper3.SetColorWindow(2000)
mapper3.SetColorLevel(1000)
mapper3.SetZSlice(0)
mapper4 = vtk.vtkImageMapper()
mapper4.SetInputConnection(reslice4.GetOutputPort())
mapper4.SetColorWindow(2000)
mapper4.SetColorLevel(1000)
mapper4.SetZSlice(0)
actor1 = vtk.vtkActor2D()
actor1.SetMapper(mapper1)
actor2 = vtk.vtkActor2D()
actor2.SetMapper(mapper2)
actor3 = vtk.vtkActor2D()
actor3.SetMapper(mapper3)
actor4 = vtk.vtkActor2D()
actor4.SetMapper(mapper4)
imager1 = vtk.vtkRenderer()
imager1.AddActor2D(actor1)
imager1.SetViewport(0.5,0.0,1.0,0.5)
imager2 = vtk.vtkRenderer()
imager2.AddActor2D(actor2)
imager2.SetViewport(0.0,0.0,0.5,0.5)
imager3 = vtk.vtkRenderer()
imager3.AddActor2D(actor3)
imager3.SetViewport(0.5,0.5,1.0,1.0)
imager4 = vtk.vtkRenderer()
imager4.AddActor2D(actor4)
imager4.SetViewport(0.0,0.5,0.5,1.0)
imgWin = vtk.vtkRenderWindow()
imgWin.AddRenderer(imager1)
imgWin.AddRenderer(imager2)
imgWin.AddRenderer(imager3)
imgWin.AddRenderer(imager4)
imgWin.SetSize(150,128)
imgWin.Render()
# --- end of script --
|
plotly/python-api | refs/heads/master | packages/python/plotly/plotly/validators/scattercarpet/hoverlabel/font/_size.py | 1 | import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="size", parent_name="scattercarpet.hoverlabel.font", **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
min=kwargs.pop("min", 1),
role=kwargs.pop("role", "style"),
**kwargs
)
|
currychou/1 | refs/heads/master | static/Brython3.1.0-20150301-090019/Lib/formatter.py | 751 | """Generic output formatting.
Formatter objects transform an abstract flow of formatting events into
specific output events on writer objects. Formatters manage several stack
structures to allow various properties of a writer object to be changed and
restored; writers need not be able to handle relative changes nor any sort
of ``change back'' operation. Specific writer properties which may be
controlled via formatter objects are horizontal alignment, font, and left
margin indentations. A mechanism is provided which supports providing
arbitrary, non-exclusive style settings to a writer as well. Additional
interfaces facilitate formatting events which are not reversible, such as
paragraph separation.
Writer objects encapsulate device interfaces. Abstract devices, such as
file formats, are supported as well as physical devices. The provided
implementations all work with abstract devices. The interface makes
available mechanisms for setting the properties which formatter objects
manage and inserting data into the output.
"""
import sys
AS_IS = None
class NullFormatter:
"""A formatter which does nothing.
If the writer parameter is omitted, a NullWriter instance is created.
No methods of the writer are called by NullFormatter instances.
Implementations should inherit from this class if implementing a writer
interface but don't need to inherit any implementation.
"""
def __init__(self, writer=None):
if writer is None:
writer = NullWriter()
self.writer = writer
def end_paragraph(self, blankline): pass
def add_line_break(self): pass
def add_hor_rule(self, *args, **kw): pass
def add_label_data(self, format, counter, blankline=None): pass
def add_flowing_data(self, data): pass
def add_literal_data(self, data): pass
def flush_softspace(self): pass
def push_alignment(self, align): pass
def pop_alignment(self): pass
def push_font(self, x): pass
def pop_font(self): pass
def push_margin(self, margin): pass
def pop_margin(self): pass
def set_spacing(self, spacing): pass
def push_style(self, *styles): pass
def pop_style(self, n=1): pass
def assert_line_data(self, flag=1): pass
class AbstractFormatter:
"""The standard formatter.
This implementation has demonstrated wide applicability to many writers,
and may be used directly in most circumstances. It has been used to
implement a full-featured World Wide Web browser.
"""
# Space handling policy: blank spaces at the boundary between elements
# are handled by the outermost context. "Literal" data is not checked
# to determine context, so spaces in literal data are handled directly
# in all circumstances.
def __init__(self, writer):
self.writer = writer # Output device
self.align = None # Current alignment
self.align_stack = [] # Alignment stack
self.font_stack = [] # Font state
self.margin_stack = [] # Margin state
self.spacing = None # Vertical spacing state
self.style_stack = [] # Other state, e.g. color
self.nospace = 1 # Should leading space be suppressed
self.softspace = 0 # Should a space be inserted
self.para_end = 1 # Just ended a paragraph
self.parskip = 0 # Skipped space between paragraphs?
self.hard_break = 1 # Have a hard break
self.have_label = 0
def end_paragraph(self, blankline):
if not self.hard_break:
self.writer.send_line_break()
self.have_label = 0
if self.parskip < blankline and not self.have_label:
self.writer.send_paragraph(blankline - self.parskip)
self.parskip = blankline
self.have_label = 0
self.hard_break = self.nospace = self.para_end = 1
self.softspace = 0
def add_line_break(self):
if not (self.hard_break or self.para_end):
self.writer.send_line_break()
self.have_label = self.parskip = 0
self.hard_break = self.nospace = 1
self.softspace = 0
def add_hor_rule(self, *args, **kw):
if not self.hard_break:
self.writer.send_line_break()
self.writer.send_hor_rule(*args, **kw)
self.hard_break = self.nospace = 1
self.have_label = self.para_end = self.softspace = self.parskip = 0
def add_label_data(self, format, counter, blankline = None):
if self.have_label or not self.hard_break:
self.writer.send_line_break()
if not self.para_end:
self.writer.send_paragraph((blankline and 1) or 0)
if isinstance(format, str):
self.writer.send_label_data(self.format_counter(format, counter))
else:
self.writer.send_label_data(format)
self.nospace = self.have_label = self.hard_break = self.para_end = 1
self.softspace = self.parskip = 0
def format_counter(self, format, counter):
label = ''
for c in format:
if c == '1':
label = label + ('%d' % counter)
elif c in 'aA':
if counter > 0:
label = label + self.format_letter(c, counter)
elif c in 'iI':
if counter > 0:
label = label + self.format_roman(c, counter)
else:
label = label + c
return label
def format_letter(self, case, counter):
label = ''
while counter > 0:
counter, x = divmod(counter-1, 26)
# This makes a strong assumption that lowercase letters
# and uppercase letters form two contiguous blocks, with
# letters in order!
s = chr(ord(case) + x)
label = s + label
return label
def format_roman(self, case, counter):
ones = ['i', 'x', 'c', 'm']
fives = ['v', 'l', 'd']
label, index = '', 0
# This will die of IndexError when counter is too big
while counter > 0:
counter, x = divmod(counter, 10)
if x == 9:
label = ones[index] + ones[index+1] + label
elif x == 4:
label = ones[index] + fives[index] + label
else:
if x >= 5:
s = fives[index]
x = x-5
else:
s = ''
s = s + ones[index]*x
label = s + label
index = index + 1
if case == 'I':
return label.upper()
return label
def add_flowing_data(self, data):
if not data: return
prespace = data[:1].isspace()
postspace = data[-1:].isspace()
data = " ".join(data.split())
if self.nospace and not data:
return
elif prespace or self.softspace:
if not data:
if not self.nospace:
self.softspace = 1
self.parskip = 0
return
if not self.nospace:
data = ' ' + data
self.hard_break = self.nospace = self.para_end = \
self.parskip = self.have_label = 0
self.softspace = postspace
self.writer.send_flowing_data(data)
def add_literal_data(self, data):
if not data: return
if self.softspace:
self.writer.send_flowing_data(" ")
self.hard_break = data[-1:] == '\n'
self.nospace = self.para_end = self.softspace = \
self.parskip = self.have_label = 0
self.writer.send_literal_data(data)
def flush_softspace(self):
if self.softspace:
self.hard_break = self.para_end = self.parskip = \
self.have_label = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
def push_alignment(self, align):
if align and align != self.align:
self.writer.new_alignment(align)
self.align = align
self.align_stack.append(align)
else:
self.align_stack.append(self.align)
def pop_alignment(self):
if self.align_stack:
del self.align_stack[-1]
if self.align_stack:
self.align = align = self.align_stack[-1]
self.writer.new_alignment(align)
else:
self.align = None
self.writer.new_alignment(None)
def push_font(self, font):
size, i, b, tt = font
if self.softspace:
self.hard_break = self.para_end = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
if self.font_stack:
csize, ci, cb, ctt = self.font_stack[-1]
if size is AS_IS: size = csize
if i is AS_IS: i = ci
if b is AS_IS: b = cb
if tt is AS_IS: tt = ctt
font = (size, i, b, tt)
self.font_stack.append(font)
self.writer.new_font(font)
def pop_font(self):
if self.font_stack:
del self.font_stack[-1]
if self.font_stack:
font = self.font_stack[-1]
else:
font = None
self.writer.new_font(font)
def push_margin(self, margin):
self.margin_stack.append(margin)
fstack = [m for m in self.margin_stack if m]
if not margin and fstack:
margin = fstack[-1]
self.writer.new_margin(margin, len(fstack))
def pop_margin(self):
if self.margin_stack:
del self.margin_stack[-1]
fstack = [m for m in self.margin_stack if m]
if fstack:
margin = fstack[-1]
else:
margin = None
self.writer.new_margin(margin, len(fstack))
def set_spacing(self, spacing):
self.spacing = spacing
self.writer.new_spacing(spacing)
def push_style(self, *styles):
if self.softspace:
self.hard_break = self.para_end = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
for style in styles:
self.style_stack.append(style)
self.writer.new_styles(tuple(self.style_stack))
def pop_style(self, n=1):
del self.style_stack[-n:]
self.writer.new_styles(tuple(self.style_stack))
def assert_line_data(self, flag=1):
self.nospace = self.hard_break = not flag
self.para_end = self.parskip = self.have_label = 0
class NullWriter:
"""Minimal writer interface to use in testing & inheritance.
A writer which only provides the interface definition; no actions are
taken on any methods. This should be the base class for all writers
which do not need to inherit any implementation methods.
"""
def __init__(self): pass
def flush(self): pass
def new_alignment(self, align): pass
def new_font(self, font): pass
def new_margin(self, margin, level): pass
def new_spacing(self, spacing): pass
def new_styles(self, styles): pass
def send_paragraph(self, blankline): pass
def send_line_break(self): pass
def send_hor_rule(self, *args, **kw): pass
def send_label_data(self, data): pass
def send_flowing_data(self, data): pass
def send_literal_data(self, data): pass
class AbstractWriter(NullWriter):
"""A writer which can be used in debugging formatters, but not much else.
Each method simply announces itself by printing its name and
arguments on standard output.
"""
def new_alignment(self, align):
print("new_alignment(%r)" % (align,))
def new_font(self, font):
print("new_font(%r)" % (font,))
def new_margin(self, margin, level):
print("new_margin(%r, %d)" % (margin, level))
def new_spacing(self, spacing):
print("new_spacing(%r)" % (spacing,))
def new_styles(self, styles):
print("new_styles(%r)" % (styles,))
def send_paragraph(self, blankline):
print("send_paragraph(%r)" % (blankline,))
def send_line_break(self):
print("send_line_break()")
def send_hor_rule(self, *args, **kw):
print("send_hor_rule()")
def send_label_data(self, data):
print("send_label_data(%r)" % (data,))
def send_flowing_data(self, data):
print("send_flowing_data(%r)" % (data,))
def send_literal_data(self, data):
print("send_literal_data(%r)" % (data,))
class DumbWriter(NullWriter):
"""Simple writer class which writes output on the file object passed in
as the file parameter or, if file is omitted, on standard output. The
output is simply word-wrapped to the number of columns specified by
the maxcol parameter. This class is suitable for reflowing a sequence
of paragraphs.
"""
def __init__(self, file=None, maxcol=72):
self.file = file or sys.stdout
self.maxcol = maxcol
NullWriter.__init__(self)
self.reset()
def reset(self):
self.col = 0
self.atbreak = 0
def send_paragraph(self, blankline):
self.file.write('\n'*blankline)
self.col = 0
self.atbreak = 0
def send_line_break(self):
self.file.write('\n')
self.col = 0
self.atbreak = 0
def send_hor_rule(self, *args, **kw):
self.file.write('\n')
self.file.write('-'*self.maxcol)
self.file.write('\n')
self.col = 0
self.atbreak = 0
def send_literal_data(self, data):
self.file.write(data)
i = data.rfind('\n')
if i >= 0:
self.col = 0
data = data[i+1:]
data = data.expandtabs()
self.col = self.col + len(data)
self.atbreak = 0
def send_flowing_data(self, data):
if not data: return
atbreak = self.atbreak or data[0].isspace()
col = self.col
maxcol = self.maxcol
write = self.file.write
for word in data.split():
if atbreak:
if col + len(word) >= maxcol:
write('\n')
col = 0
else:
write(' ')
col = col + 1
write(word)
col = col + len(word)
atbreak = 1
self.col = col
self.atbreak = data[-1].isspace()
def test(file = None):
w = DumbWriter()
f = AbstractFormatter(w)
if file is not None:
fp = open(file)
elif sys.argv[1:]:
fp = open(sys.argv[1])
else:
fp = sys.stdin
for line in fp:
if line == '\n':
f.end_paragraph(1)
else:
f.add_flowing_data(line)
f.end_paragraph(0)
if __name__ == '__main__':
test()
|
lumig242/Hue-Integration-with-CDAP | refs/heads/pull3 | desktop/core/ext-py/Paste-2.0.1/paste/urlmap.py | 33 | # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
Map URL prefixes to WSGI applications. See ``URLMap``
"""
import re
import os
import cgi
try:
# Python 3
from collections import MutableMapping as DictMixin
except ImportError:
# Python 2
from UserDict import DictMixin
from paste import httpexceptions
__all__ = ['URLMap', 'PathProxyURLMap']
def urlmap_factory(loader, global_conf, **local_conf):
if 'not_found_app' in local_conf:
not_found_app = local_conf.pop('not_found_app')
else:
not_found_app = global_conf.get('not_found_app')
if not_found_app:
not_found_app = loader.get_app(not_found_app, global_conf=global_conf)
urlmap = URLMap(not_found_app=not_found_app)
for path, app_name in local_conf.items():
path = parse_path_expression(path)
app = loader.get_app(app_name, global_conf=global_conf)
urlmap[path] = app
return urlmap
def parse_path_expression(path):
"""
Parses a path expression like 'domain foobar.com port 20 /' or
just '/foobar' for a path alone. Returns as an address that
URLMap likes.
"""
parts = path.split()
domain = port = path = None
while parts:
if parts[0] == 'domain':
parts.pop(0)
if not parts:
raise ValueError("'domain' must be followed with a domain name")
if domain:
raise ValueError("'domain' given twice")
domain = parts.pop(0)
elif parts[0] == 'port':
parts.pop(0)
if not parts:
raise ValueError("'port' must be followed with a port number")
if port:
raise ValueError("'port' given twice")
port = parts.pop(0)
else:
if path:
raise ValueError("more than one path given (have %r, got %r)"
% (path, parts[0]))
path = parts.pop(0)
s = ''
if domain:
s = 'http://%s' % domain
if port:
if not domain:
raise ValueError("If you give a port, you must also give a domain")
s += ':' + port
if path:
if s:
s += '/'
s += path
return s
class URLMap(DictMixin):
"""
URLMap instances are dictionary-like object that dispatch to one
of several applications based on the URL.
The dictionary keys are URLs to match (like
``PATH_INFO.startswith(url)``), and the values are applications to
dispatch to. URLs are matched most-specific-first, i.e., longest
URL first. The ``SCRIPT_NAME`` and ``PATH_INFO`` environmental
variables are adjusted to indicate the new context.
URLs can also include domains, like ``http://blah.com/foo``, or as
tuples ``('blah.com', '/foo')``. This will match domain names; without
the ``http://domain`` or with a domain of ``None`` any domain will be
matched (so long as no other explicit domain matches). """
def __init__(self, not_found_app=None):
self.applications = []
if not not_found_app:
not_found_app = self.not_found_app
self.not_found_application = not_found_app
def __len__(self):
return len(self.applications)
def __iter__(self):
for app_url, app in self.applications:
yield app_url
norm_url_re = re.compile('//+')
domain_url_re = re.compile('^(http|https)://')
def not_found_app(self, environ, start_response):
mapper = environ.get('paste.urlmap_object')
if mapper:
matches = [p for p, a in mapper.applications]
extra = 'defined apps: %s' % (
',\n '.join(map(repr, matches)))
else:
extra = ''
extra += '\nSCRIPT_NAME: %r' % environ.get('SCRIPT_NAME')
extra += '\nPATH_INFO: %r' % environ.get('PATH_INFO')
extra += '\nHTTP_HOST: %r' % environ.get('HTTP_HOST')
app = httpexceptions.HTTPNotFound(
environ['PATH_INFO'],
comment=cgi.escape(extra)).wsgi_application
return app(environ, start_response)
def normalize_url(self, url, trim=True):
if isinstance(url, (list, tuple)):
domain = url[0]
url = self.normalize_url(url[1])[1]
return domain, url
assert (not url or url.startswith('/')
or self.domain_url_re.search(url)), (
"URL fragments must start with / or http:// (you gave %r)" % url)
match = self.domain_url_re.search(url)
if match:
url = url[match.end():]
if '/' in url:
domain, url = url.split('/', 1)
url = '/' + url
else:
domain, url = url, ''
else:
domain = None
url = self.norm_url_re.sub('/', url)
if trim:
url = url.rstrip('/')
return domain, url
def sort_apps(self):
"""
Make sure applications are sorted with longest URLs first
"""
def key(app_desc):
(domain, url), app = app_desc
if not domain:
# Make sure empty domains sort last:
return '\xff', -len(url)
else:
return domain, -len(url)
apps = [(key(desc), desc) for desc in self.applications]
apps.sort()
self.applications = [desc for (sortable, desc) in apps]
def __setitem__(self, url, app):
if app is None:
try:
del self[url]
except KeyError:
pass
return
dom_url = self.normalize_url(url)
if dom_url in self:
del self[dom_url]
self.applications.append((dom_url, app))
self.sort_apps()
def __getitem__(self, url):
dom_url = self.normalize_url(url)
for app_url, app in self.applications:
if app_url == dom_url:
return app
raise KeyError(
"No application with the url %r (domain: %r; existing: %s)"
% (url[1], url[0] or '*', self.applications))
def __delitem__(self, url):
url = self.normalize_url(url)
for app_url, app in self.applications:
if app_url == url:
self.applications.remove((app_url, app))
break
else:
raise KeyError(
"No application with the url %r" % (url,))
def keys(self):
return [app_url for app_url, app in self.applications]
def __call__(self, environ, start_response):
host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower()
if ':' in host:
host, port = host.split(':', 1)
else:
if environ['wsgi.url_scheme'] == 'http':
port = '80'
else:
port = '443'
path_info = environ.get('PATH_INFO')
path_info = self.normalize_url(path_info, False)[1]
for (domain, app_url), app in self.applications:
if domain and domain != host and domain != host+':'+port:
continue
if (path_info == app_url
or path_info.startswith(app_url + '/')):
environ['SCRIPT_NAME'] += app_url
environ['PATH_INFO'] = path_info[len(app_url):]
return app(environ, start_response)
environ['paste.urlmap_object'] = self
return self.not_found_application(environ, start_response)
class PathProxyURLMap(object):
"""
This is a wrapper for URLMap that catches any strings that
are passed in as applications; these strings are treated as
filenames (relative to `base_path`) and are passed to the
callable `builder`, which will return an application.
This is intended for cases when configuration files can be
treated as applications.
`base_paste_url` is the URL under which all applications added through
this wrapper must go. Use ``""`` if you want this to not
change incoming URLs.
"""
def __init__(self, map, base_paste_url, base_path, builder):
self.map = map
self.base_paste_url = self.map.normalize_url(base_paste_url)
self.base_path = base_path
self.builder = builder
def __setitem__(self, url, app):
if isinstance(app, (str, unicode)):
app_fn = os.path.join(self.base_path, app)
app = self.builder(app_fn)
url = self.map.normalize_url(url)
# @@: This means http://foo.com/bar will potentially
# match foo.com, but /base_paste_url/bar, which is unintuitive
url = (url[0] or self.base_paste_url[0],
self.base_paste_url[1] + url[1])
self.map[url] = app
def __getattr__(self, attr):
return getattr(self.map, attr)
# This is really the only settable attribute
def not_found_application__get(self):
return self.map.not_found_application
def not_found_application__set(self, value):
self.map.not_found_application = value
not_found_application = property(not_found_application__get,
not_found_application__set)
|
837468220/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/idlelib/tabbedpages.py | 49 | """An implementation of tabbed pages using only standard Tkinter.
Originally developed for use in IDLE. Based on tabpage.py.
Classes exported:
TabbedPageSet -- A Tkinter implementation of a tabbed-page widget.
TabSet -- A widget containing tabs (buttons) in one or more rows.
"""
from tkinter import *
class InvalidNameError(Exception): pass
class AlreadyExistsError(Exception): pass
class TabSet(Frame):
"""A widget containing tabs (buttons) in one or more rows.
Only one tab may be selected at a time.
"""
def __init__(self, page_set, select_command,
tabs=None, n_rows=1, max_tabs_per_row=5,
expand_tabs=False, **kw):
"""Constructor arguments:
select_command -- A callable which will be called when a tab is
selected. It is called with the name of the selected tab as an
argument.
tabs -- A list of strings, the names of the tabs. Should be specified in
the desired tab order. The first tab will be the default and first
active tab. If tabs is None or empty, the TabSet will be initialized
empty.
n_rows -- Number of rows of tabs to be shown. If n_rows <= 0 or is
None, then the number of rows will be decided by TabSet. See
_arrange_tabs() for details.
max_tabs_per_row -- Used for deciding how many rows of tabs are needed,
when the number of rows is not constant. See _arrange_tabs() for
details.
"""
Frame.__init__(self, page_set, **kw)
self.select_command = select_command
self.n_rows = n_rows
self.max_tabs_per_row = max_tabs_per_row
self.expand_tabs = expand_tabs
self.page_set = page_set
self._tabs = {}
self._tab2row = {}
if tabs:
self._tab_names = list(tabs)
else:
self._tab_names = []
self._selected_tab = None
self._tab_rows = []
self.padding_frame = Frame(self, height=2,
borderwidth=0, relief=FLAT,
background=self.cget('background'))
self.padding_frame.pack(side=TOP, fill=X, expand=False)
self._arrange_tabs()
def add_tab(self, tab_name):
"""Add a new tab with the name given in tab_name."""
if not tab_name:
raise InvalidNameError("Invalid Tab name: '%s'" % tab_name)
if tab_name in self._tab_names:
raise AlreadyExistsError("Tab named '%s' already exists" %tab_name)
self._tab_names.append(tab_name)
self._arrange_tabs()
def remove_tab(self, tab_name):
"""Remove the tab named <tab_name>"""
if not tab_name in self._tab_names:
raise KeyError("No such Tab: '%s" % page_name)
self._tab_names.remove(tab_name)
self._arrange_tabs()
def set_selected_tab(self, tab_name):
"""Show the tab named <tab_name> as the selected one"""
if tab_name == self._selected_tab:
return
if tab_name is not None and tab_name not in self._tabs:
raise KeyError("No such Tab: '%s" % page_name)
# deselect the current selected tab
if self._selected_tab is not None:
self._tabs[self._selected_tab].set_normal()
self._selected_tab = None
if tab_name is not None:
# activate the tab named tab_name
self._selected_tab = tab_name
tab = self._tabs[tab_name]
tab.set_selected()
# move the tab row with the selected tab to the bottom
tab_row = self._tab2row[tab]
tab_row.pack_forget()
tab_row.pack(side=TOP, fill=X, expand=0)
def _add_tab_row(self, tab_names, expand_tabs):
if not tab_names:
return
tab_row = Frame(self)
tab_row.pack(side=TOP, fill=X, expand=0)
self._tab_rows.append(tab_row)
for tab_name in tab_names:
tab = TabSet.TabButton(tab_name, self.select_command,
tab_row, self)
if expand_tabs:
tab.pack(side=LEFT, fill=X, expand=True)
else:
tab.pack(side=LEFT)
self._tabs[tab_name] = tab
self._tab2row[tab] = tab_row
# tab is the last one created in the above loop
tab.is_last_in_row = True
def _reset_tab_rows(self):
while self._tab_rows:
tab_row = self._tab_rows.pop()
tab_row.destroy()
self._tab2row = {}
def _arrange_tabs(self):
"""
Arrange the tabs in rows, in the order in which they were added.
If n_rows >= 1, this will be the number of rows used. Otherwise the
number of rows will be calculated according to the number of tabs and
max_tabs_per_row. In this case, the number of rows may change when
adding/removing tabs.
"""
# remove all tabs and rows
while self._tabs:
self._tabs.popitem()[1].destroy()
self._reset_tab_rows()
if not self._tab_names:
return
if self.n_rows is not None and self.n_rows > 0:
n_rows = self.n_rows
else:
# calculate the required number of rows
n_rows = (len(self._tab_names) - 1) // self.max_tabs_per_row + 1
# not expanding the tabs with more than one row is very ugly
expand_tabs = self.expand_tabs or n_rows > 1
i = 0 # index in self._tab_names
for row_index in range(n_rows):
# calculate required number of tabs in this row
n_tabs = (len(self._tab_names) - i - 1) // (n_rows - row_index) + 1
tab_names = self._tab_names[i:i + n_tabs]
i += n_tabs
self._add_tab_row(tab_names, expand_tabs)
# re-select selected tab so it is properly displayed
selected = self._selected_tab
self.set_selected_tab(None)
if selected in self._tab_names:
self.set_selected_tab(selected)
class TabButton(Frame):
"""A simple tab-like widget."""
bw = 2 # borderwidth
def __init__(self, name, select_command, tab_row, tab_set):
"""Constructor arguments:
name -- The tab's name, which will appear in its button.
select_command -- The command to be called upon selection of the
tab. It is called with the tab's name as an argument.
"""
Frame.__init__(self, tab_row, borderwidth=self.bw, relief=RAISED)
self.name = name
self.select_command = select_command
self.tab_set = tab_set
self.is_last_in_row = False
self.button = Radiobutton(
self, text=name, command=self._select_event,
padx=5, pady=1, takefocus=FALSE, indicatoron=FALSE,
highlightthickness=0, selectcolor='', borderwidth=0)
self.button.pack(side=LEFT, fill=X, expand=True)
self._init_masks()
self.set_normal()
def _select_event(self, *args):
"""Event handler for tab selection.
With TabbedPageSet, this calls TabbedPageSet.change_page, so that
selecting a tab changes the page.
Note that this does -not- call set_selected -- it will be called by
TabSet.set_selected_tab, which should be called when whatever the
tabs are related to changes.
"""
self.select_command(self.name)
return
def set_selected(self):
"""Assume selected look"""
self._place_masks(selected=True)
def set_normal(self):
"""Assume normal look"""
self._place_masks(selected=False)
def _init_masks(self):
page_set = self.tab_set.page_set
background = page_set.pages_frame.cget('background')
# mask replaces the middle of the border with the background color
self.mask = Frame(page_set, borderwidth=0, relief=FLAT,
background=background)
# mskl replaces the bottom-left corner of the border with a normal
# left border
self.mskl = Frame(page_set, borderwidth=0, relief=FLAT,
background=background)
self.mskl.ml = Frame(self.mskl, borderwidth=self.bw,
relief=RAISED)
self.mskl.ml.place(x=0, y=-self.bw,
width=2*self.bw, height=self.bw*4)
# mskr replaces the bottom-right corner of the border with a normal
# right border
self.mskr = Frame(page_set, borderwidth=0, relief=FLAT,
background=background)
self.mskr.mr = Frame(self.mskr, borderwidth=self.bw,
relief=RAISED)
def _place_masks(self, selected=False):
height = self.bw
if selected:
height += self.bw
self.mask.place(in_=self,
relx=0.0, x=0,
rely=1.0, y=0,
relwidth=1.0, width=0,
relheight=0.0, height=height)
self.mskl.place(in_=self,
relx=0.0, x=-self.bw,
rely=1.0, y=0,
relwidth=0.0, width=self.bw,
relheight=0.0, height=height)
page_set = self.tab_set.page_set
if selected and ((not self.is_last_in_row) or
(self.winfo_rootx() + self.winfo_width() <
page_set.winfo_rootx() + page_set.winfo_width())
):
# for a selected tab, if its rightmost edge isn't on the
# rightmost edge of the page set, the right mask should be one
# borderwidth shorter (vertically)
height -= self.bw
self.mskr.place(in_=self,
relx=1.0, x=0,
rely=1.0, y=0,
relwidth=0.0, width=self.bw,
relheight=0.0, height=height)
self.mskr.mr.place(x=-self.bw, y=-self.bw,
width=2*self.bw, height=height + self.bw*2)
# finally, lower the tab set so that all of the frames we just
# placed hide it
self.tab_set.lower()
class TabbedPageSet(Frame):
"""A Tkinter tabbed-pane widget.
Constains set of 'pages' (or 'panes') with tabs above for selecting which
page is displayed. Only one page will be displayed at a time.
Pages may be accessed through the 'pages' attribute, which is a dictionary
of pages, using the name given as the key. A page is an instance of a
subclass of Tk's Frame widget.
The page widgets will be created (and destroyed when required) by the
TabbedPageSet. Do not call the page's pack/place/grid/destroy methods.
Pages may be added or removed at any time using the add_page() and
remove_page() methods.
"""
class Page(object):
"""Abstract base class for TabbedPageSet's pages.
Subclasses must override the _show() and _hide() methods.
"""
uses_grid = False
def __init__(self, page_set):
self.frame = Frame(page_set, borderwidth=2, relief=RAISED)
def _show(self):
raise NotImplementedError
def _hide(self):
raise NotImplementedError
class PageRemove(Page):
"""Page class using the grid placement manager's "remove" mechanism."""
uses_grid = True
def _show(self):
self.frame.grid(row=0, column=0, sticky=NSEW)
def _hide(self):
self.frame.grid_remove()
class PageLift(Page):
"""Page class using the grid placement manager's "lift" mechanism."""
uses_grid = True
def __init__(self, page_set):
super(TabbedPageSet.PageLift, self).__init__(page_set)
self.frame.grid(row=0, column=0, sticky=NSEW)
self.frame.lower()
def _show(self):
self.frame.lift()
def _hide(self):
self.frame.lower()
class PagePackForget(Page):
"""Page class using the pack placement manager's "forget" mechanism."""
def _show(self):
self.frame.pack(fill=BOTH, expand=True)
def _hide(self):
self.frame.pack_forget()
def __init__(self, parent, page_names=None, page_class=PageLift,
n_rows=1, max_tabs_per_row=5, expand_tabs=False,
**kw):
"""Constructor arguments:
page_names -- A list of strings, each will be the dictionary key to a
page's widget, and the name displayed on the page's tab. Should be
specified in the desired page order. The first page will be the default
and first active page. If page_names is None or empty, the
TabbedPageSet will be initialized empty.
n_rows, max_tabs_per_row -- Parameters for the TabSet which will
manage the tabs. See TabSet's docs for details.
page_class -- Pages can be shown/hidden using three mechanisms:
* PageLift - All pages will be rendered one on top of the other. When
a page is selected, it will be brought to the top, thus hiding all
other pages. Using this method, the TabbedPageSet will not be resized
when pages are switched. (It may still be resized when pages are
added/removed.)
* PageRemove - When a page is selected, the currently showing page is
hidden, and the new page shown in its place. Using this method, the
TabbedPageSet may resize when pages are changed.
* PagePackForget - This mechanism uses the pack placement manager.
When a page is shown it is packed, and when it is hidden it is
unpacked (i.e. pack_forget). This mechanism may also cause the
TabbedPageSet to resize when the page is changed.
"""
Frame.__init__(self, parent, **kw)
self.page_class = page_class
self.pages = {}
self._pages_order = []
self._current_page = None
self._default_page = None
self.columnconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
self.pages_frame = Frame(self)
self.pages_frame.grid(row=1, column=0, sticky=NSEW)
if self.page_class.uses_grid:
self.pages_frame.columnconfigure(0, weight=1)
self.pages_frame.rowconfigure(0, weight=1)
# the order of the following commands is important
self._tab_set = TabSet(self, self.change_page, n_rows=n_rows,
max_tabs_per_row=max_tabs_per_row,
expand_tabs=expand_tabs)
if page_names:
for name in page_names:
self.add_page(name)
self._tab_set.grid(row=0, column=0, sticky=NSEW)
self.change_page(self._default_page)
def add_page(self, page_name):
"""Add a new page with the name given in page_name."""
if not page_name:
raise InvalidNameError("Invalid TabPage name: '%s'" % page_name)
if page_name in self.pages:
raise AlreadyExistsError(
"TabPage named '%s' already exists" % page_name)
self.pages[page_name] = self.page_class(self.pages_frame)
self._pages_order.append(page_name)
self._tab_set.add_tab(page_name)
if len(self.pages) == 1: # adding first page
self._default_page = page_name
self.change_page(page_name)
def remove_page(self, page_name):
"""Destroy the page whose name is given in page_name."""
if not page_name in self.pages:
raise KeyError("No such TabPage: '%s" % page_name)
self._pages_order.remove(page_name)
# handle removing last remaining, default, or currently shown page
if len(self._pages_order) > 0:
if page_name == self._default_page:
# set a new default page
self._default_page = self._pages_order[0]
else:
self._default_page = None
if page_name == self._current_page:
self.change_page(self._default_page)
self._tab_set.remove_tab(page_name)
page = self.pages.pop(page_name)
page.frame.destroy()
def change_page(self, page_name):
"""Show the page whose name is given in page_name."""
if self._current_page == page_name:
return
if page_name is not None and page_name not in self.pages:
raise KeyError("No such TabPage: '%s'" % page_name)
if self._current_page is not None:
self.pages[self._current_page]._hide()
self._current_page = None
if page_name is not None:
self._current_page = page_name
self.pages[page_name]._show()
self._tab_set.set_selected_tab(page_name)
if __name__ == '__main__':
# test dialog
root=Tk()
tabPage=TabbedPageSet(root, page_names=['Foobar','Baz'], n_rows=0,
expand_tabs=False,
)
tabPage.pack(side=TOP, expand=TRUE, fill=BOTH)
Label(tabPage.pages['Foobar'].frame, text='Foo', pady=20).pack()
Label(tabPage.pages['Foobar'].frame, text='Bar', pady=20).pack()
Label(tabPage.pages['Baz'].frame, text='Baz').pack()
entryPgName=Entry(root)
buttonAdd=Button(root, text='Add Page',
command=lambda:tabPage.add_page(entryPgName.get()))
buttonRemove=Button(root, text='Remove Page',
command=lambda:tabPage.remove_page(entryPgName.get()))
labelPgName=Label(root, text='name of page to add/remove:')
buttonAdd.pack(padx=5, pady=5)
buttonRemove.pack(padx=5, pady=5)
labelPgName.pack(padx=5)
entryPgName.pack(padx=5)
root.mainloop()
|
neherlab/treetool | refs/heads/master | augur/src/mutation_tree.py | 1 | import time, re, os, argparse,shutil, sys
from tree_refine import tree_refine
from virus_clean import virus_clean
from virus_filter import flu_filter
from date_util import numerical_date
from collections import defaultdict
from process import process, virus_config
from Bio import SeqIO, AlignIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Align import MultipleSeqAlignment
import numpy as np
from itertools import izip
path_to_augur = './' + '/'.join(sys.argv[0].split('/')[:-2])
std_outgroup_file_blast = path_to_augur+'/source-data/outgroups.fasta'
std_outgroup_file_nuc = path_to_augur+'/source-data/outgroups_nucleotides_unspliced.fasta'
no_raxml_threshold = 15000
virus_config.update({
# data source and sequence parsing/cleaning/processing
'fasta_fields':{0:'strain', 1:'isolate_id', 2:'date', 3:'subtype', 4:'country', 5:'region', 7:'host', 6:'passage'},
'cds':[0,None], # define the HA start i n 0 numbering
'verbose':3
})
def get_date(strain):
from datetime import datetime
date_str = strain.split('|')[2]
try:
collection_date = datetime.strptime(date_str, '%Y-%m-%d')
return collection_date.strftime('%Y-%m-%d')
except:
collection_date = datetime.strptime(date_str[:4], '%Y')
return collection_date.strftime('%Y-%m-%d')
class mutation_tree(process, flu_filter, tree_refine, virus_clean):
"""docstring for mutation_tree"""
def __init__(self, aln_fname, outgroup, include_ref_strains = True, outdir = './', formats = ['pdf','png'], verbose = 0, **kwargs):
process.__init__(self, **kwargs)
flu_filter.__init__(self, alignment_file = aln_fname, **kwargs)
tree_refine.__init__(self, **kwargs)
virus_clean.__init__(self, **kwargs)
self.midpoint_rooting = False
self.include_ref_strains = include_ref_strains
self.verbose = verbose
self.formats = formats
self.outdir = outdir.rstrip('/')+'/'
self.auspice_tree_fname = self.outdir + 'tree.json'
self.auspice_align_fname = self.outdir + 'aln.fasta'
self.auspice_aa_align_fname = self.outdir + 'aa_aln.fasta'
self.auspice_sequences_fname = self.outdir + 'sequences.json'
self.auspice_frequencies_fname = None
self.auspice_meta_fname = self.outdir + 'meta.json'
self.path_to_augur = path_to_augur
if os.path.isfile(outgroup):
tmp = [{'strain':seq.name, 'seq':str(record.seq).upper(), 'desc':seq.description}
for seq in SeqIO.parse(outgroup, 'fasta') ]
if len(tmp):
self.outgroup = tmp[0]
if len(tmp)>1:
print "More than one sequence in ", outgroup, "taking first"
if self.verbose:
print "using outgroup found in file ", outgroup
elif outgroup=='auto':
print "automatically determine outgroup"
self.auto_outgroup_blast()
elif isinstance(outgroup, basestring):
seq_names = [x['strain'] for x in self.viruses]
if outgroup in seq_names:
self.outgroup = self.viruses.pop(seq_names.index(outgroup))
if self.verbose:
print "using outgroup found in alignment", outgroup
else:
standard_outgroups = self.load_standard_outgroups()
if outgroup in standard_outgroups:
self.outgroup = standard_outgroups[outgroup]
if self.verbose:
print "using standard outgroup", outgroup
else:
raise ValueError("outgroup %s not found" % outgroup)
return
if "anno:" in self.outgroup['desc']:
anno = [x for x in self.outgroup['desc'].split() if "anno:" in x][0]
anno = (anno.split(':')[1]).split('_')
tmp = [(anno[2*i], int(anno[2*i+1])) for i in range(len(anno)/2)]
self.anno = sorted(tmp, key=lambda x:x[1])
print("Using annotation",self.anno)
else:
self.anno = None
print("No annotation found")
#self.anno = sorted((('SP',0), ('HA1',16), ('HA2',329+16)), key=lambda x:x[1])
self.viruses.append(self.outgroup)
self.filter_geo(prune=False)
self.make_strain_names_unique()
def load_standard_outgroups(self):
return {'|'.join(seq.description.split()[1].split('|')[:2]).replace(' ',''):
{'seq':str(seq.seq).upper(),
'strain':seq.description.split()[1].split('|')[1].replace(' ',''),
'desc':seq.description,
'date':get_date(seq.description)}
for seq in SeqIO.parse(std_outgroup_file_nuc, 'fasta')}
def auto_outgroup_blast(self):
from random import sample
from Bio.Blast.Applications import NcbiblastxCommandline
from Bio.Blast import NCBIXML
self.make_run_dir()
nvir = 10
max_ref_seqs = 5
tmp_dates = []
for v in self.viruses:
try:
tmp_dates.append(numerical_date(v["date"]))
except:
print("Can't parse date for",v['strain'], v['date'])
earliest_date = np.min(tmp_dates)
all_strains = [v["strain"] for v in self.viruses]
representatives = [SeqRecord(Seq(v['seq']), id=v['strain']) for v in sample(self.viruses, min(nvir, len(self.viruses)))]
standard_outgroups = self.load_standard_outgroups()
SeqIO.write(representatives, self.run_dir+'representatives.fasta', 'fasta')
blast_out = self.run_dir+"outgroup_blast.xml"
blast_cline = NcbiblastxCommandline(query=self.run_dir+"representatives.fasta", db=std_outgroup_file_blast, evalue=0.01,
outfmt=5, out=blast_out)
stdout, stderr = blast_cline()
with open(blast_out, 'r') as bfile:
og_blast = NCBIXML.parse(bfile)
by_og = defaultdict(list)
for rep in og_blast:
for hit in rep.alignments:
for aln in hit.hsps:
by_og[hit.hit_def].append((rep.query, aln.score, aln.score/aln.align_length, 1.0*aln.identities/aln.align_length))
by_og = by_og.items()
print by_og[1]
# sort by number of hits, then mean score
by_og.sort(key = lambda x:(len(x[1]), np.mean([y[1] for y in x[1]])), reverse=True)
outgroups_older_than_sample = [(og, hits) for (og, hits) in by_og
if (numerical_date(standard_outgroups[og]['date'])<earliest_date-5) or
('A/California/07/2009' in standard_outgroups[og]['strain'])]
if len(outgroups_older_than_sample) and np.mean([y[-1] for y in outgroups_older_than_sample[0][1]])>0.8:
outgroup = outgroups_older_than_sample[0][0]
else:
outgroup = by_og[0][0]
self.midpoint_rooting = True
print("will root at midpoint")
for oi, (ref, hits) in enumerate(by_og):
if (np.max([y[-1] for y in hits])>0.9+oi*0.02) and ref!=outgroup:
self.viruses.append(standard_outgroups[ref])
print("including reference strain ",ref, [y[-1] for y in hits])
if oi>max_ref_seqs:
break
self.outgroup = standard_outgroups[outgroup]
if 'A/California/07/2009' not in self.outgroup['strain']:
self.outgroup['strain']+='OG'
prot = Seq(self.outgroup['seq']).translate(to_stop=True)
self.cds = [0,min(len(prot)*3,len(self.outgroup['seq']))]
print("chosen outgroup",self.outgroup['strain'])
def refine(self):
self.node_lookup = {node.taxon.label:node for node in self.tree.leaf_iter()}
self.unique_date()
if 'A/California/07/2009' not in self.outgroup['strain']:
self.remove_outgroup()
self.ladderize()
self.collapse()
self.add_nuc_mutations()
self.add_node_attributes()
if self.cds is not None:
self.translate_all()
self.add_aa_mutations()
if self.anno is not None:
divides = np.array([x[1] for x in self.anno])
for node in self.tree.postorder_node_iter():
node.alt_aa_muts = ""
tmp = defaultdict(list)
if len(node.aa_muts):
for mut in node.aa_muts.split(','):
anc,pos,der = mut[0], int(mut[1:-1]), mut[-1]
ii = divides.searchsorted(pos)-1
if ii>0:
tmp[ii].append(anc+str(pos-divides[ii])+der)
for ii, anno in enumerate(self.anno):
if len(tmp[ii]):
node.alt_aa_muts+=anno[0]+': '+','.join(tmp[ii])+" "
self.layout()
for v in self.viruses:
if v.strain in self.node_lookup:
node = self.node_lookup[v.strain]
for attr in ['strain', 'desc']:
try:
node.__setattr__(attr, v.__getattribute__(attr))
except:
pass
# make an amino acid aligment
from Bio.Align import MultipleSeqAlignment
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
if self.cds is not None:
tmp_aaseqs = [SeqRecord(Seq(node.aa_seq), id=node.strain, annotations = {'num_date':node.num_date, 'region':node.region}) for node in self.tree.leaf_iter()]
tmp_aaseqs.sort(key = lambda x:x.annotations['num_date'])
self.aa_aln = MultipleSeqAlignment(tmp_aaseqs)
tmp_nucseqs = [SeqRecord(Seq(node.seq), id=node.strain, annotations = {'num_date':node.num_date, 'region':node.region}) for node in self.tree.leaf_iter()]
tmp_nucseqs.sort(key = lambda x:x.annotations['num_date'])
self.nuc_aln = MultipleSeqAlignment(tmp_nucseqs)
def export(self):
from bio_draw import muttree_draw
def select_fontsize(n):
if n<10:
return 12
elif n<50:
return 10
else:
return 8
def branch_label_func(n):
max_muts = 5
if hasattr(n,'aa_muts'):
if alt:
muts = n.alt_aa_muts
else:
muts = n.aa_muts
else:
print(n,"has no amino acid mutations")
try:
muts = n.nuc_muts
except:
print(n,"has no nucleotide mutations")
muts = ""
tmp = muts.split(',')
if len(tmp)>max_muts:
return ', '.join(tmp[:max_muts])+' + '+str(len(tmp)-max_muts)+' others'
else:
return ', '.join(tmp)
from Bio import Phylo
import matplotlib
matplotlib.use('cairo')
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size':select_fontsize(len(self.viruses))})
plt.ioff()
from tree_util import to_Biopython
tmp_tree = to_Biopython(self.tree)
tmp_tree.ladderize()
for alt in [False] if self.anno is None else [False, True]:
fig = plt.figure('Tree', figsize = (15,2+len(self.viruses)/5))
ax = plt.subplot('111')
muttree_draw(tmp_tree, axes=ax, show_confidence=False, do_show=False,
label_func = lambda x: x.name,
branch_labels = branch_label_func
)
ax.invert_yaxis()
tl = np.diff(ax.get_xticks())[0]
lengthbar = tl/2
plt.plot( [0,lengthbar],[len(self.viruses),len(self.viruses)], lw=10, c='k')
plt.text(lengthbar/2, len(self.viruses)+0.1, str(lengthbar),horizontalalignment='center',fontsize=16)
ax.set_axis_off()
for fmt in self.formats:
if alt:
plt.savefig(self.outdir+'tree_alt.'+fmt)
else:
plt.savefig(self.outdir+'tree.'+fmt)
for t in tmp_tree.find_clades():
t.label = t.name # save original name
if hasattr(t,"strain"):
t.name = t.strain
else:
t.name = ""
if alt:
muts = t.alt_aa_muts if hasattr(t,'alt_aa_muts') else t.nuc_muts
else:
muts = t.aa_muts if hasattr(t,'aa_muts') else t.nuc_muts
if len(t.name) and len(muts): t.name+='-'
t.name+='_'.join(muts.split(',')).replace(' ','')
if alt:
Phylo.write(tmp_tree, self.outdir+'tree_alt.nwk', 'newick')
else:
Phylo.write(tmp_tree, self.outdir+'tree.nwk', 'newick')
for t in tmp_tree.find_clades(): # revert to original name
t.name = t.label
plt.close('Tree')
for n in self.tree.leaf_iter():
for field in self.fasta_fields.values():
if (not hasattr(n, field)) or n.__dict__[field]=="":
n.__dict__[field]="Unknown"
for n in self.tree.postorder_internal_node_iter():
for field in self.fasta_fields.values():
n.__dict__[field]="Unknown"
if self.cds is None:
self.export_to_auspice(tree_fields = ['nuc_muts','num_date']+self.fasta_fields.values(), seq='nuc')
else:
self.export_to_auspice(tree_fields = ['aa_muts','alt_aa_muts','num_date']+self.fasta_fields.values())
def make_strain_names_unique(self):
strain_to_seq = defaultdict(list)
for v in self.viruses:
strain_to_seq[v['strain'].upper()].append(v)
for strain, strain_list in strain_to_seq.iteritems():
if len(strain_list)>1:
for ii, virus in enumerate(strain_list):
virus['strain']+='-'+str(ii+1)
def run(self, raxml_time_limit):
rax_tlimit = raxml_time_limit
self.align()
for v in self.viruses:
v.description=''
AlignIO.write(self.viruses, self.auspice_align_fname, 'fasta')
self.remove_insertions()
if len(self.viruses)>no_raxml_threshold:
rax_tlimit = 0
print "--- Tree infer at " + time.strftime("%H:%M:%S") + " ---"
self.infer_tree(rax_tlimit)
print "--- Infer ancestral sequences " + time.strftime("%H:%M:%S") + " ---"
self.infer_ancestral() # -> every node has a sequence
print "--- Tree refine at " + time.strftime("%H:%M:%S") + " ---"
self.refine()
if self.cds:
aa_aln = MultipleSeqAlignment([])
for node in self.tree.leaf_iter():
aa_aln.append(SeqRecord(id=node.strain, seq=Seq(node.aa_seq), description=''))
AlignIO.write(aa_aln, self.auspice_aa_align_fname, 'fasta')
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Build a tree given a fasta file and annotate braches with mutations')
parser.add_argument('--aln', required = True, type = str, help ="fasta file with input sequences")
parser.add_argument('--outgroup', required = True, type = str, help ="outgroup to root the tree, strain label or fasta file")
parser.add_argument('--cds', nargs = '+', type = int, default = None, help='part of the outgroup sequence that is to be translated')
parser.add_argument('--out', type = str, default = 'output/', help='output directory')
parser.add_argument('--nthreads', type = int, default=1, help ="number of threads to use (mafft and raxml)")
parser.add_argument('--mplconfigdir', type = str, default="/tmp/", help ="directory for matplotlib configuration directory")
params = parser.parse_args()
# set matplot configuration path, needs to be writable and set before matplotib import (in .export)
os.environ['MPLCONFIGDIR'] = params.mplconfigdir
# check and parse cds
if params.cds is None:
virus_config['cds']=None
else:
if len(params.cds)==2:
virus_config['cds']=params.cds
elif len(params.cds)==1:
virus_config['cds']=(params.cds[0], None)
else:
raise ValueError("Expecting a cds of length 1 (start only) or 2, got "+str(params.cds))
exit()
debug_fasta_dir = path_to_augur+'/broken_fasta'
if not os.path.isdir(debug_fasta_dir):
os.makedirs(debug_fasta_dir)
tmp_fasta_fname = debug_fasta_dir+'/'+'_'.join(['aln', time.strftime('%Y%m%d-%H%M%S',time.gmtime()),
str(np.random.randint(0,1000000))])+'.fasta'
shutil.copy2(params.aln, tmp_fasta_fname)
# check and create output directory
if not os.path.isdir(params.out):
try:
os.makedirs(params.out)
os.makedirs(params.out+'/js')
os.makedirs(params.out+'/css')
except OSError as e:
print "Cannot create output directory",e
if not os.path.isdir(params.out+'/js'):
try:
os.makedirs(params.out+'/js')
except OSError as e:
print "Cannot create output directory",e
if not os.path.isdir(params.out+'/css'):
try:
os.makedirs(params.out+'/css')
except OSError as e:
print "Cannot create output directory",e
shutil.copy2(params.aln, params.out+'/input_sequences.fasta')
shutil.copy2(path_to_augur + '/../auspice/error.html', params.out+'/index.html')
shutil.copy2(path_to_augur + '/../auspice/js/muttree.js', params.out+'/js/muttree.js')
shutil.copy2(path_to_augur + '/../auspice/js/msa.min.js', params.out+'/js/msa.min.js')
shutil.copy2(path_to_augur + '/../auspice/js/d3.min.js', params.out+'/js/d3.min.js')
shutil.copy2(path_to_augur + '/../auspice/js/d3.tip.js', params.out+'/js/d3.tip.js')
shutil.copy2(path_to_augur + '/../auspice/js/FileSaver.js', params.out+'/js/FileSaver.js')
shutil.copy2(path_to_augur + '/../auspice/js/autocomplete.js', params.out+'/js/autocomplete.js')
shutil.copy2(path_to_augur + '/../auspice/css/style.css', params.out+'/css/style.css')
virus_config["outdir"]=params.out
virus_config["nthreads"]=params.nthreads
try:
muttree = mutation_tree(params.aln, params.outgroup, **virus_config)
muttree.run(raxml_time_limit=0.1)
muttree.export()
with open(muttree.outdir+'/js/fields.js', 'w') as ofile:
for field in ['passage', 'host', 'subtype','region']:
try:
tmp = sorted(set([x.__dict__[field] for x in muttree.tree.leaf_iter()]))
except:
tmp = ["Unknown"]
if "Unknown" not in tmp: tmp.append("Unknown")
ofile.write(field + 's = [' + ', '.join(map(lambda x:'"'+str(x)+'"',tmp))+']\n')
shutil.copy2(path_to_augur + '/../auspice/index.html', muttree.outdir+'index.html')
os.remove(tmp_fasta_fname)
except:
print("treetool run failed")
|
nandub/yammer | refs/heads/master | test/lib/NavStoreParser.py | 1 | #
# Copyright 2002, 2004 John T. Reese.
# email: jtr at ofb.net
#
# This file is part of Yammer.
#
# Yammer is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Yammer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Yammer; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import formatter, htmllib, os, string, urllib, re
import Store, NavForm
from TestFailed import TestFailed
newhtmlelements= 'link script frameset style table tr td th tbody' + \
' div span'
newhtmlelementsempty= 'frame'
class NavStoreParser(htmllib.HTMLParser):
def __init__(self, page):
htmllib.HTMLParser.__init__(self, formatter.NullFormatter())
# turn on elements that weren't in HTML 2
for f in newhtmlelements.split():
self.setifnot('start_' + f, self.fake_start)
self.setifnot('end_' + f, self.fake_end)
for f in newhtmlelementsempty.split():
self.setifnot('do_' + f, self.fake_start)
self._page= page
self._httpRoot= page._httpRoot
self._root= page.root()
self._ofp= Store.storeopen(page.url(), self._root, 'w')
self._anchors= {}
self._forms= {}
self._frames= {}
self._siteRoot= page.siteRoot()
self._url= page.url()
self.feed(page.body())
self.close()
def setifnot(self, att, value):
if not hasattr(self, att):
setattr(self, att, value)
def fake_start(self, attrs): pass
def fake_end(self): pass
# parse methods
def handle_starttag(self, tag, method, attrs):
attrs= self.fixReferences(attrs)
self._ofp.write('<' + tag + '\n')
[self._ofp.write(' %s="%s"' % (a,v)) for (a,v) in attrs
if a != 'srcreal']
self._ofp.write('>')
method(attrs)
def handle_endtag(self, tag, method):
self._ofp.write('</%s>' % tag)
method()
def handle_data(self, data):
self._ofp.write(data)
return htmllib.HTMLParser.handle_data(self, data)
def start_a(self, attrs):
hrefs= [h for k,h in attrs if k == 'href']
if len(hrefs) == 1:
self._href= hrefs[0]
self.save_bgn()
else:
self._href= None
def end_a(self):
if self._href is not None:
data= self.save_end()
self._anchors[data]= self._href
def do_img(self, attrs):
src= self.getAtt(attrs, 'src')
srcreal= self.getAtt(attrs, 'srcreal')
if srcreal is not None:
uo= urllib.urlopen(srcreal)
fp= Store.storeopen(srcreal, self._root, 'w')
fp.write(uo.read())
uo.close()
fp.close()
def do_frame(self, attrs):
name= self.getAtt(attrs, 'name')
src= self.getAtt(attrs, 'src')
srcreal= self.getAtt(attrs, 'srcreal')
if name is not None:
t= self._page.testSuite().createSubpage(srcreal)
self._frames[name]= t
def start_form(self, attrs):
name= self.getAtt(attrs, 'name')
if name is not None:
action= self.getAtt(attrs, 'action')
if action is None:
action= self._url
method= self.getAtt(attrs, 'method')
if method is None:
method= 'get'
method= method.lower()
self._currentForm= NavForm.NavForm(name, action, method,
self._page)
def end_form(self):
self._forms[self._currentForm.name()]= self._currentForm
# form builders
def do_input(self, attrs):
name= self.getAtt(attrs, 'name')
type= self.getAtt(attrs, 'type')
if type is None:
type= 'text'
if name is not None:
value= self.getAtt(attrs, 'value')
if type == 'submit':
self._currentForm.addSubmit(name, value)
else:
self._currentForm.addInput(name, value)
def start_textarea(self, attrs):
name= self.getAtt(attrs, 'name')
if name is not None:
self.save_bgn()
self._cur_name= name
def end_textarea(self):
data= self.save_end()
self._currentForm.addTextarea(self._cur_name, data)
def start_select(self, attrs):
name= self.getAtt(attrs, 'name')
if name is not None:
self._currentForm.startSelect(name)
def end_select(self):
self._currentForm.endSelect()
def start_option(self, attrs):
value= self.getAtt(attrs, 'value')
selected= self.getAtt(attrs, 'selected')
self._cur_value= value
self._cur_selected= selected is not None
self.save_bgn()
def end_option(self):
data= self.save_end()
self._currentForm.addOption(self._cur_value, self._cur_selected,
data)
# parse utilities
def getAtt(self, attrs, att):
atts= [(a,v) for (a,v) in attrs if a == att]
if len(atts) > 1:
raise TestFailed('multiple attributes "%s" found: %s' % \
(att, `atts`))
if len(atts) == 0:
return None
return atts[0][1]
def fixReferences(self, attrs):
newattrs= []
for a,v in attrs:
if (a == 'href' or a == 'src') and v.find(':') < 0:
v= self.fixRef(v)
if a == 'src':
newattrs.append(('srcreal', v))
v= Store.storeurl(v, self._root, self._httpRoot)
newattrs.append((a, v))
return newattrs
def fixRef(self, ref):
if ref.startswith('/'):
return self._siteRoot + ref[1:]
else:
urlparts= self._url.split('/')
urldir= string.join(urlparts[0:-1], '/')
return urldir + '/' + ref
# access parsed information
def getA(self, contentre):
return [(d, self._anchors[d]) for d in self._anchors.keys()
if re.match(contentre, d)]
def listAs(self):
return self._anchors
def getForm(self, formName):
if not self._forms.has_key(formName):
raise TestFailed('page does not contain form named %s' %
formName)
return self._forms[formName]
def listForms(self):
return self._forms
def getFrame(self, frameName):
if not self._frames.has_key(frameName):
raise TestFailed('page does not contain frame named %s' %
frameName)
return self._frames[frameName]
def listFrames(self):
return self._frames
|
brett-lempereur/rfidreader | refs/heads/master | rfidreader/__init__.py | 1 | """
This package provides an interface and commands for interacting with the
SL030 RFID reader on a Raspberry Pi.
"""
|
chugunovyar/factoryForBuild | refs/heads/master | env/lib/python2.7/site-packages/pybrain/supervised/evolino/gindividual.py | 35 | __author__ = 'Michael Isik'
class Individual(object):
""" Simple abstract template for a minimal individual """
def getGenome(self):
""" Should return a reference to the genome.
"""
raise NotImplementedError()
def copy(self):
""" Should return a full copy of the individual
"""
raise NotImplementedError()
|
Andrei-III/troll_updater | refs/heads/master | core/__init__.py | 12133432 | |
hrpt-se/hrpt | refs/heads/master | apps/pollster/migrations/__init__.py | 12133432 | |
pasqualguerrero/django | refs/heads/master | tests/model_formsets/__init__.py | 12133432 | |
csm0042/rpihome | refs/heads/master | tests/schedule_test.py | 1 | #!/usr/bin/python3
""" schedule_test.py:
"""
# Import Required Libraries (Standard, Third Party, Local) ****************************************
import copy
import datetime
import logging
import multiprocessing
import unittest
import sys
if __name__ == "__main__": sys.path.append("..")
from rpihome.modules.schedule import Condition, OnRange, Day, Week, GoogleSheetsSchedule, GoogleSheetToSched
from rpihome.devices.device import Device
# Define test class *******************************************************************************
class Test_Schedule(unittest.TestCase):
def setUp(self):
self.logger = logging.getLogger(__name__)
self.schedule = Week()
self.logger.debug("Finished running setup")
def test_data_structure_before_load(self):
self.logger.debug("Testing structure of data object immediately after creation")
self.assertEqual(len(self.schedule.day), 7)
self.assertEqual(len(self.schedule.day[0].range), 0)
self.assertEqual(len(self.schedule.monday.range), 0)
self.assertEqual(len(self.schedule.day[1].range), 0)
self.assertEqual(len(self.schedule.tuesday.range), 0)
self.assertEqual(len(self.schedule.day[2].range), 0)
self.assertEqual(len(self.schedule.wednesday.range), 0)
self.assertEqual(len(self.schedule.day[3].range), 0)
self.assertEqual(len(self.schedule.thursday.range), 0)
self.assertEqual(len(self.schedule.day[4].range), 0)
self.assertEqual(len(self.schedule.friday.range), 0)
self.assertEqual(len(self.schedule.day[5].range), 0)
self.assertEqual(len(self.schedule.saturday.range), 0)
self.assertEqual(len(self.schedule.day[6].range), 0)
self.assertEqual(len(self.schedule.sunday.range), 0)
def test_range_load(self):
self.logger.debug("Testing loading of a single range to a single day")
self.schedule.monday.add_range(on_time=datetime.time(5, 40), off_time=datetime.time(6, 30))
self.assertEqual(len(self.schedule.day[0].range), 1)
self.assertEqual(len(self.schedule.monday.range), 1)
self.assertEqual(len(self.schedule.monday.range[0].condition), 0)
self.assertEqual(self.schedule.monday.range[0].on_time, datetime.time(5, 40))
self.assertEqual(self.schedule.monday.range[0].off_time, datetime.time(6, 30))
def test_condition_load(self):
self.logger.debug("Testing the addition of a condition to a single day's single on-range")
self.schedule.monday.add_range(on_time=datetime.time(5, 40), off_time=datetime.time(6, 30))
self.schedule.monday.range[0].add_condition(condition="user1", state="true")
self.assertEqual(len(self.schedule.day[0].range), 1)
self.assertEqual(len(self.schedule.monday.range), 1)
self.assertEqual(len(self.schedule.monday.range[0].condition), 1)
self.assertEqual(self.schedule.monday.range[0].on_time, datetime.time(5, 40))
self.assertEqual(self.schedule.monday.range[0].off_time, datetime.time(6, 30))
self.assertEqual(self.schedule.monday.range[0].condition[0].condition, "user1")
self.assertEqual(self.schedule.monday.range[0].condition[0].state, "true")
def test_load_multiple_ranges_single_day(self):
self.logger.debug("Testing data loading multiple ranges for a single day")
self.schedule.monday.add_range(on_time=datetime.time(5, 40), off_time=datetime.time(6, 30))
self.schedule.monday.range[0].add_condition(andor="and", condition="user1", state="true")
self.schedule.monday.range[0].add_condition(andor="and", condition="user2", state="true")
self.schedule.monday.range[0].add_condition(andor="or", condition="user3", state="true")
self.schedule.monday.add_range(on_time=datetime.time(6, 30), off_time=datetime.time(7, 0))
self.schedule.monday.range[1].add_condition(andor="and", condition="user1", state="true")
self.schedule.monday.range[1].add_condition(andor="and", condition="user2", state="false")
self.schedule.monday.range[1].add_condition(andor="and", condition="user3", state="false")
self.assertEqual(len(self.schedule.monday.range), 2)
self.assertEqual(len(self.schedule.monday.range[0].condition), 3)
self.assertEqual(len(self.schedule.monday.range[1].condition), 3)
self.assertEqual(self.schedule.monday.range[0].on_time, datetime.time(5, 40))
self.assertEqual(self.schedule.monday.range[0].off_time, datetime.time(6, 30))
self.assertEqual(self.schedule.monday.range[1].on_time, datetime.time(6, 30))
self.assertEqual(self.schedule.monday.range[1].off_time, datetime.time(7, 0))
def test_complex_load_single_day(self):
self.logger.debug("testing complex loading of on/off range data with conditions for a single day")
self.schedule.monday.date = datetime.date(2016, 12, 5)
self.schedule.monday.add_range_with_conditions(on_time=datetime.time(5, 40),
off_time=datetime.time(6, 30),
conditions=[("and", "user1", "true"),
("and", "user2", "true"),
("or", "user3", "true")])
self.schedule.monday.add_range_with_conditions(on_time=datetime.time(6, 30),
off_time=datetime.time(7, 0),
conditions=[("and", "user1", "true"),
("and", "user2", "false"),
("and", "user3", "false")])
self.assertEqual(self.schedule.monday.date, datetime.date(2016, 12, 5))
self.assertEqual(len(self.schedule.monday.range), 2)
self.assertEqual(len(self.schedule.monday.range[0].condition), 3)
self.assertEqual(len(self.schedule.monday.range[1].condition), 3)
self.assertEqual(self.schedule.monday.range[0].on_time, datetime.time(5, 40))
self.assertEqual(self.schedule.monday.range[0].off_time, datetime.time(6, 30))
self.assertEqual(self.schedule.monday.range[0].condition[0].andor, "and")
self.assertEqual(self.schedule.monday.range[0].condition[0].condition, "user1")
self.assertEqual(self.schedule.monday.range[0].condition[0].state, "true")
self.assertEqual(self.schedule.monday.range[0].condition[1].andor, "and")
self.assertEqual(self.schedule.monday.range[0].condition[1].condition, "user2")
self.assertEqual(self.schedule.monday.range[0].condition[1].state, "true")
self.assertEqual(self.schedule.monday.range[0].condition[2].andor, "or")
self.assertEqual(self.schedule.monday.range[0].condition[2].condition, "user3")
self.assertEqual(self.schedule.monday.range[0].condition[2].state, "true")
self.assertEqual(self.schedule.monday.range[1].on_time, datetime.time(6, 30))
self.assertEqual(self.schedule.monday.range[1].off_time, datetime.time(7, 0))
self.assertEqual(self.schedule.monday.range[1].condition[0].andor, "and")
self.assertEqual(self.schedule.monday.range[1].condition[0].condition, "user1")
self.assertEqual(self.schedule.monday.range[1].condition[0].state, "true")
self.assertEqual(self.schedule.monday.range[1].condition[1].andor, "and")
self.assertEqual(self.schedule.monday.range[1].condition[1].condition, "user2")
self.assertEqual(self.schedule.monday.range[1].condition[1].state, "false")
self.assertEqual(self.schedule.monday.range[1].condition[2].andor, "and")
self.assertEqual(self.schedule.monday.range[1].condition[2].condition, "user3")
self.assertEqual(self.schedule.monday.range[1].condition[2].state, "false")
def test_google_sheets_read(self):
self.google_sheets_reader = GoogleSheetsSchedule()
self.records = self.google_sheets_reader.read_data()
self.schedule_builder = GoogleSheetToSched(self.logger)
self.schedule_builder.main(self.records)
if __name__ == "__main__":
logging.basicConfig(stream=sys.stdout)
logger = logging.getLogger(__name__)
logger.level = logging.DEBUG
logger.debug("\n\nStarting log\n")
unittest.main()
|
schacon/git | refs/heads/master | git_remote_helpers/git/importer.py | 1 | import os
import subprocess
class GitImporter(object):
"""An importer for testgit repositories.
This importer simply delegates to git fast-import.
"""
def __init__(self, repo):
"""Creates a new importer for the specified repo.
"""
self.repo = repo
def do_import(self, base):
"""Imports a fast-import stream to the given directory.
Simply delegates to git fast-import.
"""
dirname = self.repo.get_base_path(base)
if self.repo.local:
gitdir = self.repo.gitpath
else:
gitdir = os.path.abspath(os.path.join(dirname, '.git'))
path = os.path.abspath(os.path.join(dirname, 'git.marks'))
if not os.path.exists(dirname):
os.makedirs(dirname)
args = ["git", "--git-dir=" + gitdir, "fast-import", "--quiet", "--export-marks=" + path]
if os.path.exists(path):
args.append("--import-marks=" + path)
subprocess.check_call(args)
|
selboo/xunlei-lixian | refs/heads/master | lixian_commands/delete.py | 2 |
from lixian_commands.util import *
from lixian_cli_parser import *
from lixian_encoding import default_encoding
from lixian_colors import colors
import lixian_help
import lixian_query
@command_line_parser(help=lixian_help.delete)
@with_parser(parse_login)
@with_parser(parse_colors)
@with_parser(parse_logging)
@command_line_option('i')
@command_line_option('all')
def delete_task(args):
client = create_client(args)
to_delete = lixian_query.search_tasks(client, args)
if not to_delete:
print 'Nothing to delete'
return
with colors(args.colors).red.bold():
print "Below files are going to be deleted:"
for x in to_delete:
print x['name'].encode(default_encoding)
if args.i:
yes_or_no = raw_input('Are your sure to delete below files from Xunlei cloud? ')
while yes_or_no.lower() not in ('y', 'yes', 'n', 'no'):
yes_or_no = raw_input('yes or no? ')
if yes_or_no.lower() in ('y', 'yes'):
pass
elif yes_or_no.lower() in ('n', 'no'):
raise RuntimeError('Deletion abort per user request.')
client.delete_tasks(to_delete)
|
TomBaxter/osf.io | refs/heads/develop | osf/management/commands/add_notification_subscription.py | 22 | # -*- coding: utf-8 -*-
# This is a management command, rather than a migration script, for two primary reasons:
# 1. It makes no changes to database structure (e.g. AlterField), only database content.
# 2. It takes a long time to run and the site doesn't need to be down that long.
from __future__ import unicode_literals
import logging
import django
django.setup()
from django.core.management.base import BaseCommand
from django.db import transaction
from website.notifications.utils import to_subscription_key
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def add_reviews_notification_setting(notification_type, state=None):
if state:
OSFUser = state.get_model('osf', 'OSFUser')
NotificationSubscription = state.get_model('osf', 'NotificationSubscription')
else:
from osf.models import OSFUser, NotificationSubscription
active_users = OSFUser.objects.filter(date_confirmed__isnull=False).exclude(date_disabled__isnull=False).exclude(is_active=False).order_by('id')
total_active_users = active_users.count()
logger.info('About to add a global_reviews setting for {} users.'.format(total_active_users))
total_created = 0
for user in active_users.iterator():
user_subscription_id = to_subscription_key(user._id, notification_type)
subscription = NotificationSubscription.load(user_subscription_id)
if not subscription:
logger.info('No {} subscription found for user {}. Subscribing...'.format(notification_type, user._id))
subscription = NotificationSubscription(_id=user_subscription_id, owner=user, event_name=notification_type)
subscription.save() # Need to save in order to access m2m fields
subscription.add_user_to_subscription(user, 'email_transactional')
else:
logger.info('User {} already has a {} subscription'.format(user._id, notification_type))
total_created += 1
logger.info('Added subscriptions for {}/{} users'.format(total_created, total_active_users))
class Command(BaseCommand):
"""
Add subscription to all active users for given notification type.
"""
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--dry',
action='store_true',
dest='dry_run',
help='Run migration and roll back changes to db',
)
parser.add_argument(
'--notification',
type=str,
required=True,
help='Notification type to subscribe users to',
)
def handle(self, *args, **options):
dry_run = options.get('dry_run', False)
state = options.get('state', None)
if not dry_run:
script_utils.add_file_logger(logger, __file__)
with transaction.atomic():
add_reviews_notification_setting(notification_type=options['notification'], state=state)
if dry_run:
raise RuntimeError('Dry run, transaction rolled back.')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.