repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
apache/nuvem | refs/heads/before_restructure | nuvem-parallel/nuvem/shuffle_.py | 1 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
def get(r, l):
from random import shuffle
rl = list(l.get(r))
shuffle(rl)
return tuple(rl)
|
neuman/orchid | refs/heads/master | core/migrations/0002_auto__add_field_score_created_at__add_field_score_updated_at__add_fiel.py | 2 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Score.created_at'
db.add_column(u'core_score', 'created_at',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=datetime.datetime(2015, 1, 22, 0, 0), blank=True),
keep_default=False)
# Adding field 'Score.updated_at'
db.add_column(u'core_score', 'updated_at',
self.gf('django.db.models.fields.DateTimeField')(auto_now=True, default=datetime.datetime(2015, 1, 22, 0, 0), blank=True),
keep_default=False)
# Adding field 'Score.changed_by'
db.add_column(u'core_score', 'changed_by',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name=u'core_score_related', null=True, to=orm['auth.User']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Score.created_at'
db.delete_column(u'core_score', 'created_at')
# Deleting field 'Score.updated_at'
db.delete_column(u'core_score', 'updated_at')
# Deleting field 'Score.changed_by'
db.delete_column(u'core_score', 'changed_by_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.image': {
'Meta': {'object_name': 'Image'},
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'core_image_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'original_file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'core.indicator': {
'Meta': {'object_name': 'Indicator'},
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'core_indicator_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'form': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['forms.Form']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'form_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'maximum_monthly_records': ('django.db.models.fields.IntegerField', [], {'default': '20'}),
'passing_percentage': ('django.db.models.fields.FloatField', [], {'default': '85'}),
'title': ('django.db.models.fields.TextField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'core.location': {
'Meta': {'object_name': 'Location'},
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'core_location_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['core.Image']", 'null': 'True', 'blank': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['core.Indicator']", 'null': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'position': ('geoposition.fields.GeopositionField', [], {'max_length': '42'}),
'title': ('django.db.models.fields.TextField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'core.score': {
'Meta': {'object_name': 'Score'},
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'core_score_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'entry_count': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Indicator']"}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Location']"}),
'month': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'passing': ('django.db.models.fields.BooleanField', [], {}),
'passing_entry_count': ('django.db.models.fields.IntegerField', [], {}),
'score': ('django.db.models.fields.FloatField', [], {'default': '85'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
u'core.userprofile': {
'Meta': {'object_name': 'UserProfile'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'forms.form': {
'Meta': {'object_name': 'Form'},
'button_text': ('django.db.models.fields.CharField', [], {'default': "u'Submit'", 'max_length': '50'}),
'email_copies': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'email_from': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email_subject': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intro': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'response': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'send_email': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'default': '[1]', 'to': u"orm['sites.Site']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['core'] |
jrnz/hadoop | refs/heads/master | contrib/hod/testing/lib.py | 182 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import unittest, re, sys
class BaseTestSuite():
def __init__(self, name, excludes):
self.name = name
self.excludes = excludes
pass
def runTests(self):
# Create a runner
self.runner = unittest.TextTestRunner()
# Get all the test-case classes
# From module import *
mod = __import__(self.name, fromlist=['*'])
modItemsList = dir(mod)
allsuites = []
# Create all the test suites
for modItem in modItemsList:
if re.search(r"^test_", modItem):
# Yes this is a test class
if modItem not in self.excludes:
test_class = getattr(mod, modItem)
allsuites.append(unittest.makeSuite(test_class))
# Create a master suite to be run.
alltests = unittest.TestSuite(tuple(allsuites))
# Run the master test suite.
runner = self.runner.run(alltests)
if(runner.wasSuccessful()): return 0
printLine( "%s test(s) failed." % runner.failures.__len__())
printLine( "%s test(s) threw errors." % runner.errors.__len__())
return runner.failures.__len__() + runner.errors.__len__()
def cleanUp(self):
# suite tearDown
pass
def printLine(str):
print >>sys.stderr, str
def printSeparator():
str = ""
for i in range(0,79):
str = str + "*"
print >>sys.stderr, "\n", str, "\n"
# This class captures all log messages logged by hodRunner and other classes.
# It is then used to verify that certain log messages have come. This is one
# way to validate that messages printed to the logger are correctly written.
class MockLogger:
def __init__(self):
self.__logLines = {}
def info(self, message):
self.__logLines[message] = 'info'
def critical(self, message):
self.__logLines[message] = 'critical'
def warn(self, message):
self.__logLines[message] = 'warn'
def debug(self, message):
# don't track debug lines.
pass
# verify a certain message has been logged at the defined level of severity.
def hasMessage(self, message, level):
if not self.__logLines.has_key(message):
return False
return self.__logLines[message] == level
# Stub class to test cluster manipulation operations.
class MockHadoopCluster:
def __init__(self):
# store the operations received.
self.__operations = {}
def delete_job(self, jobid):
self.__operations['delete_job'] = [jobid]
def is_cluster_deallocated(self, dummy):
return False
def wasOperationPerformed(self, operation, args):
if self.__operations.has_key(operation):
actualArgs = self.__operations[operation]
for arg in actualArgs:
if arg not in args:
break
else:
return True
return False
|
proxysh/Safejumper-for-Mac | refs/heads/master | buildmac/Resources/env/lib/python2.7/site-packages/twisted/conch/test/test_insults.py | 13 | # -*- test-case-name: twisted.conch.test.test_insults -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.python.reflect import namedAny
from twisted.trial import unittest
from twisted.test.proto_helpers import StringTransport
from twisted.conch.insults.insults import ServerProtocol, ClientProtocol
from twisted.conch.insults.insults import (CS_UK, CS_US, CS_DRAWING,
CS_ALTERNATE,
CS_ALTERNATE_SPECIAL,
BLINK, UNDERLINE)
from twisted.conch.insults.insults import G0, G1
from twisted.conch.insults.insults import modes, privateModes
from twisted.python.compat import intToBytes, iterbytes
from twisted.python.constants import ValueConstant, Values
import textwrap
def _getattr(mock, name):
return super(Mock, mock).__getattribute__(name)
def occurrences(mock):
return _getattr(mock, 'occurrences')
def methods(mock):
return _getattr(mock, 'methods')
def _append(mock, obj):
occurrences(mock).append(obj)
default = object()
def _ecmaCodeTableCoordinate(column, row):
"""
Return the byte in 7- or 8-bit code table identified by C{column}
and C{row}.
"An 8-bit code table consists of 256 positions arranged in 16
columns and 16 rows. The columns and rows are numbered 00 to 15."
"A 7-bit code table consists of 128 positions arranged in 8
columns and 16 rows. The columns are numbered 00 to 07 and the
rows 00 to 15 (see figure 1)."
p.5 of "Standard ECMA-35: Character Code Structure and Extension
Techniques", 6th Edition (December 1994).
"""
# 8 and 15 both happen to take up 4 bits, so the first number
# should be shifted by 4 for both the 7- and 8-bit tables.
return bytes(bytearray([(column << 4) | row]))
def _makeControlFunctionSymbols(name, colOffset, names, doc):
# the value for each name is the concatenation of the bit values
# of its x, y locations, with an offset of 4 added to its x value.
# so CUP is (0 + 4, 8) = (4, 8) = 4||8 = 1001000 = 72 = b"H"
# this is how it's defined in the standard!
attrs = {name: ValueConstant(_ecmaCodeTableCoordinate(i + colOffset, j))
for j, row in enumerate(names)
for i, name in enumerate(row)
if name}
attrs["__doc__"] = doc
return type(name, (Values,), attrs)
CSFinalByte = _makeControlFunctionSymbols(
"CSFinalByte",
colOffset=4,
names=[
# 4, 5, 6
['ICH', 'DCH', 'HPA'],
['CUU', 'SSE', 'HPR'],
['CUD', 'CPR', 'REP'],
['CUF', 'SU', 'DA'],
['CUB', 'SD', 'VPA'],
['CNL', 'NP', 'VPR'],
['CPL', 'PP', 'HVP'],
['CHA', 'CTC', 'TBC'],
['CUP', 'ECH', 'SM'],
['CHT', 'CVT', 'MC'],
['ED', 'CBT', 'HPB'],
['EL', 'SRS', 'VPB'],
['IL', 'PTX', 'RM'],
['DL', 'SDS', 'SGR'],
['EF', 'SIMD', 'DSR'],
['EA', None, 'DAQ'],
],
doc=textwrap.dedent("""
Symbolic constants for all control sequence final bytes
that do not imply intermediate bytes. This happens to cover
movement control sequences.
See page 11 of "Standard ECMA 48: Control Functions for Coded
Character Sets", 5th Edition (June 1991).
Each L{ValueConstant} maps a control sequence name to L{bytes}
"""))
C1SevenBit = _makeControlFunctionSymbols(
"C1SevenBit",
colOffset=4,
names=[
[None, "DCS"],
[None, "PU1"],
["BPH", "PU2"],
["NBH", "STS"],
[None, "CCH"],
["NEL", "MW"],
["SSA", "SPA"],
["ESA", "EPA"],
["HTS", "SOS"],
["HTJ", None],
["VTS", "SCI"],
["PLD", "CSI"],
["PLU", "ST"],
["RI", "OSC"],
["SS2", "PM"],
["SS3", "APC"],
],
doc=textwrap.dedent("""
Symbolic constants for all 7 bit versions of the C1 control functions
See page 9 "Standard ECMA 48: Control Functions for Coded
Character Sets", 5th Edition (June 1991).
Each L{ValueConstant} maps a control sequence name to L{bytes}
"""))
class Mock(object):
callReturnValue = default
def __init__(self, methods=None, callReturnValue=default):
"""
@param methods: Mapping of names to return values
@param callReturnValue: object __call__ should return
"""
self.occurrences = []
if methods is None:
methods = {}
self.methods = methods
if callReturnValue is not default:
self.callReturnValue = callReturnValue
def __call__(self, *a, **kw):
returnValue = _getattr(self, 'callReturnValue')
if returnValue is default:
returnValue = Mock()
# _getattr(self, 'occurrences').append(('__call__', returnValue, a, kw))
_append(self, ('__call__', returnValue, a, kw))
return returnValue
def __getattribute__(self, name):
methods = _getattr(self, 'methods')
if name in methods:
attrValue = Mock(callReturnValue=methods[name])
else:
attrValue = Mock()
# _getattr(self, 'occurrences').append((name, attrValue))
_append(self, (name, attrValue))
return attrValue
class MockMixin:
def assertCall(self, occurrence, methodName, expectedPositionalArgs=(),
expectedKeywordArgs={}):
attr, mock = occurrence
self.assertEqual(attr, methodName)
self.assertEqual(len(occurrences(mock)), 1)
[(call, result, args, kw)] = occurrences(mock)
self.assertEqual(call, "__call__")
self.assertEqual(args, expectedPositionalArgs)
self.assertEqual(kw, expectedKeywordArgs)
return result
_byteGroupingTestTemplate = """\
def testByte%(groupName)s(self):
transport = StringTransport()
proto = Mock()
parser = self.protocolFactory(lambda: proto)
parser.factory = self
parser.makeConnection(transport)
bytes = self.TEST_BYTES
while bytes:
chunk = bytes[:%(bytesPer)d]
bytes = bytes[%(bytesPer)d:]
parser.dataReceived(chunk)
self.verifyResults(transport, proto, parser)
"""
class ByteGroupingsMixin(MockMixin):
protocolFactory = None
for word, n in [('Pairs', 2), ('Triples', 3), ('Quads', 4), ('Quints', 5), ('Sexes', 6)]:
exec(_byteGroupingTestTemplate % {'groupName': word, 'bytesPer': n})
del word, n
def verifyResults(self, transport, proto, parser):
result = self.assertCall(occurrences(proto).pop(0), "makeConnection", (parser,))
self.assertEqual(occurrences(result), [])
del _byteGroupingTestTemplate
class ServerArrowKeysTests(ByteGroupingsMixin, unittest.TestCase):
protocolFactory = ServerProtocol
# All the arrow keys once
TEST_BYTES = b'\x1b[A\x1b[B\x1b[C\x1b[D'
def verifyResults(self, transport, proto, parser):
ByteGroupingsMixin.verifyResults(self, transport, proto, parser)
for arrow in (parser.UP_ARROW, parser.DOWN_ARROW,
parser.RIGHT_ARROW, parser.LEFT_ARROW):
result = self.assertCall(occurrences(proto).pop(0), "keystrokeReceived", (arrow, None))
self.assertEqual(occurrences(result), [])
self.assertFalse(occurrences(proto))
class PrintableCharactersTests(ByteGroupingsMixin, unittest.TestCase):
protocolFactory = ServerProtocol
# Some letters and digits, first on their own, then capitalized,
# then modified with alt
TEST_BYTES = b'abc123ABC!@#\x1ba\x1bb\x1bc\x1b1\x1b2\x1b3'
def verifyResults(self, transport, proto, parser):
ByteGroupingsMixin.verifyResults(self, transport, proto, parser)
for char in iterbytes(b'abc123ABC!@#'):
result = self.assertCall(occurrences(proto).pop(0), "keystrokeReceived", (char, None))
self.assertEqual(occurrences(result), [])
for char in iterbytes(b'abc123'):
result = self.assertCall(occurrences(proto).pop(0), "keystrokeReceived", (char, parser.ALT))
self.assertEqual(occurrences(result), [])
occs = occurrences(proto)
self.assertFalse(occs, "%r should have been []" % (occs,))
class ServerFunctionKeysTests(ByteGroupingsMixin, unittest.TestCase):
"""Test for parsing and dispatching function keys (F1 - F12)
"""
protocolFactory = ServerProtocol
byteList = []
for byteCodes in (b'OP', b'OQ', b'OR', b'OS', # F1 - F4
b'15~', b'17~', b'18~', b'19~', # F5 - F8
b'20~', b'21~', b'23~', b'24~'): # F9 - F12
byteList.append(b'\x1b[' + byteCodes)
TEST_BYTES = b''.join(byteList)
del byteList, byteCodes
def verifyResults(self, transport, proto, parser):
ByteGroupingsMixin.verifyResults(self, transport, proto, parser)
for funcNum in range(1, 13):
funcArg = getattr(parser, 'F%d' % (funcNum,))
result = self.assertCall(occurrences(proto).pop(0), "keystrokeReceived", (funcArg, None))
self.assertEqual(occurrences(result), [])
self.assertFalse(occurrences(proto))
class ClientCursorMovementTests(ByteGroupingsMixin, unittest.TestCase):
protocolFactory = ClientProtocol
d2 = b"\x1b[2B"
r4 = b"\x1b[4C"
u1 = b"\x1b[A"
l2 = b"\x1b[2D"
# Move the cursor down two, right four, up one, left two, up one, left two
TEST_BYTES = d2 + r4 + u1 + l2 + u1 + l2
del d2, r4, u1, l2
def verifyResults(self, transport, proto, parser):
ByteGroupingsMixin.verifyResults(self, transport, proto, parser)
for (method, count) in [('Down', 2), ('Forward', 4), ('Up', 1),
('Backward', 2), ('Up', 1), ('Backward', 2)]:
result = self.assertCall(occurrences(proto).pop(0), "cursor" + method, (count,))
self.assertEqual(occurrences(result), [])
self.assertFalse(occurrences(proto))
class ClientControlSequencesTests(unittest.TestCase, MockMixin):
def setUp(self):
self.transport = StringTransport()
self.proto = Mock()
self.parser = ClientProtocol(lambda: self.proto)
self.parser.factory = self
self.parser.makeConnection(self.transport)
result = self.assertCall(occurrences(self.proto).pop(0), "makeConnection", (self.parser,))
self.assertFalse(occurrences(result))
def testSimpleCardinals(self):
self.parser.dataReceived(
b''.join(
[b''.join([b'\x1b[' + n + ch
for n in (b'', intToBytes(2), intToBytes(20), intToBytes(200))]
) for ch in iterbytes(b'BACD')
]))
occs = occurrences(self.proto)
for meth in ("Down", "Up", "Forward", "Backward"):
for count in (1, 2, 20, 200):
result = self.assertCall(occs.pop(0), "cursor" + meth, (count,))
self.assertFalse(occurrences(result))
self.assertFalse(occs)
def testScrollRegion(self):
self.parser.dataReceived(b'\x1b[5;22r\x1b[r')
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "setScrollRegion", (5, 22))
self.assertFalse(occurrences(result))
result = self.assertCall(occs.pop(0), "setScrollRegion", (None, None))
self.assertFalse(occurrences(result))
self.assertFalse(occs)
def testHeightAndWidth(self):
self.parser.dataReceived(b"\x1b#3\x1b#4\x1b#5\x1b#6")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "doubleHeightLine", (True,))
self.assertFalse(occurrences(result))
result = self.assertCall(occs.pop(0), "doubleHeightLine", (False,))
self.assertFalse(occurrences(result))
result = self.assertCall(occs.pop(0), "singleWidthLine")
self.assertFalse(occurrences(result))
result = self.assertCall(occs.pop(0), "doubleWidthLine")
self.assertFalse(occurrences(result))
self.assertFalse(occs)
def testCharacterSet(self):
self.parser.dataReceived(
b''.join(
[b''.join([b'\x1b' + g + n for n in iterbytes(b'AB012')])
for g in iterbytes(b'()')
]))
occs = occurrences(self.proto)
for which in (G0, G1):
for charset in (CS_UK, CS_US, CS_DRAWING, CS_ALTERNATE, CS_ALTERNATE_SPECIAL):
result = self.assertCall(occs.pop(0), "selectCharacterSet", (charset, which))
self.assertFalse(occurrences(result))
self.assertFalse(occs)
def testShifting(self):
self.parser.dataReceived(b"\x15\x14")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "shiftIn")
self.assertFalse(occurrences(result))
result = self.assertCall(occs.pop(0), "shiftOut")
self.assertFalse(occurrences(result))
self.assertFalse(occs)
def testSingleShifts(self):
self.parser.dataReceived(b"\x1bN\x1bO")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "singleShift2")
self.assertFalse(occurrences(result))
result = self.assertCall(occs.pop(0), "singleShift3")
self.assertFalse(occurrences(result))
self.assertFalse(occs)
def testKeypadMode(self):
self.parser.dataReceived(b"\x1b=\x1b>")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "applicationKeypadMode")
self.assertFalse(occurrences(result))
result = self.assertCall(occs.pop(0), "numericKeypadMode")
self.assertFalse(occurrences(result))
self.assertFalse(occs)
def testCursor(self):
self.parser.dataReceived(b"\x1b7\x1b8")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "saveCursor")
self.assertFalse(occurrences(result))
result = self.assertCall(occs.pop(0), "restoreCursor")
self.assertFalse(occurrences(result))
self.assertFalse(occs)
def testReset(self):
self.parser.dataReceived(b"\x1bc")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "reset")
self.assertFalse(occurrences(result))
self.assertFalse(occs)
def testIndex(self):
self.parser.dataReceived(b"\x1bD\x1bM\x1bE")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "index")
self.assertFalse(occurrences(result))
result = self.assertCall(occs.pop(0), "reverseIndex")
self.assertFalse(occurrences(result))
result = self.assertCall(occs.pop(0), "nextLine")
self.assertFalse(occurrences(result))
self.assertFalse(occs)
def testModes(self):
self.parser.dataReceived(
b"\x1b[" + b';'.join(map(intToBytes, [modes.KAM, modes.IRM, modes.LNM])) + b"h")
self.parser.dataReceived(
b"\x1b[" + b';'.join(map(intToBytes, [modes.KAM, modes.IRM, modes.LNM])) + b"l")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "setModes", ([modes.KAM, modes.IRM, modes.LNM],))
self.assertFalse(occurrences(result))
result = self.assertCall(occs.pop(0), "resetModes", ([modes.KAM, modes.IRM, modes.LNM],))
self.assertFalse(occurrences(result))
self.assertFalse(occs)
def testErasure(self):
self.parser.dataReceived(
b"\x1b[K\x1b[1K\x1b[2K\x1b[J\x1b[1J\x1b[2J\x1b[3P")
occs = occurrences(self.proto)
for meth in ("eraseToLineEnd", "eraseToLineBeginning", "eraseLine",
"eraseToDisplayEnd", "eraseToDisplayBeginning",
"eraseDisplay"):
result = self.assertCall(occs.pop(0), meth)
self.assertFalse(occurrences(result))
result = self.assertCall(occs.pop(0), "deleteCharacter", (3,))
self.assertFalse(occurrences(result))
self.assertFalse(occs)
def testLineDeletion(self):
self.parser.dataReceived(b"\x1b[M\x1b[3M")
occs = occurrences(self.proto)
for arg in (1, 3):
result = self.assertCall(occs.pop(0), "deleteLine", (arg,))
self.assertFalse(occurrences(result))
self.assertFalse(occs)
def testLineInsertion(self):
self.parser.dataReceived(b"\x1b[L\x1b[3L")
occs = occurrences(self.proto)
for arg in (1, 3):
result = self.assertCall(occs.pop(0), "insertLine", (arg,))
self.assertFalse(occurrences(result))
self.assertFalse(occs)
def testCursorPosition(self):
methods(self.proto)['reportCursorPosition'] = (6, 7)
self.parser.dataReceived(b"\x1b[6n")
self.assertEqual(self.transport.value(), b"\x1b[7;8R")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "reportCursorPosition")
# This isn't really an interesting assert, since it only tests that
# our mock setup is working right, but I'll include it anyway.
self.assertEqual(result, (6, 7))
def test_applicationDataBytes(self):
"""
Contiguous non-control bytes are passed to a single call to the
C{write} method of the terminal to which the L{ClientProtocol} is
connected.
"""
occs = occurrences(self.proto)
self.parser.dataReceived(b'a')
self.assertCall(occs.pop(0), "write", (b"a",))
self.parser.dataReceived(b'bc')
self.assertCall(occs.pop(0), "write", (b"bc",))
def _applicationDataTest(self, data, calls):
occs = occurrences(self.proto)
self.parser.dataReceived(data)
while calls:
self.assertCall(occs.pop(0), *calls.pop(0))
self.assertFalse(occs, "No other calls should happen: %r" % (occs,))
def test_shiftInAfterApplicationData(self):
"""
Application data bytes followed by a shift-in command are passed to a
call to C{write} before the terminal's C{shiftIn} method is called.
"""
self._applicationDataTest(
b'ab\x15', [
("write", (b"ab",)),
("shiftIn",)])
def test_shiftOutAfterApplicationData(self):
"""
Application data bytes followed by a shift-out command are passed to a
call to C{write} before the terminal's C{shiftOut} method is called.
"""
self._applicationDataTest(
b'ab\x14', [
("write", (b"ab",)),
("shiftOut",)])
def test_cursorBackwardAfterApplicationData(self):
"""
Application data bytes followed by a cursor-backward command are passed
to a call to C{write} before the terminal's C{cursorBackward} method is
called.
"""
self._applicationDataTest(
b'ab\x08', [
("write", (b"ab",)),
("cursorBackward",)])
def test_escapeAfterApplicationData(self):
"""
Application data bytes followed by an escape character are passed to a
call to C{write} before the terminal's handler method for the escape is
called.
"""
# Test a short escape
self._applicationDataTest(
b'ab\x1bD', [
("write", (b"ab",)),
("index",)])
# And a long escape
self._applicationDataTest(
b'ab\x1b[4h', [
("write", (b"ab",)),
("setModes", ([4],))])
# There's some other cases too, but they're all handled by the same
# codepaths as above.
class ServerProtocolOutputTests(unittest.TestCase):
"""
Tests for the bytes L{ServerProtocol} writes to its transport when its
methods are called.
"""
# From ECMA 48: CSI is represented by bit combinations 01/11
# (representing ESC) and 05/11 in a 7-bit code or by bit
# combination 09/11 in an 8-bit code
ESC = _ecmaCodeTableCoordinate(1, 11)
CSI = ESC + _ecmaCodeTableCoordinate(5, 11)
def setUp(self):
self.protocol = ServerProtocol()
self.transport = StringTransport()
self.protocol.makeConnection(self.transport)
def test_cursorUp(self):
"""
L{ServerProtocol.cursorUp} writes the control sequence
ending with L{CSFinalByte.CUU} to its transport.
"""
self.protocol.cursorUp(1)
self.assertEqual(self.transport.value(),
self.CSI + b'1' + CSFinalByte.CUU.value)
def test_cursorDown(self):
"""
L{ServerProtocol.cursorDown} writes the control sequence
ending with L{CSFinalByte.CUD} to its transport.
"""
self.protocol.cursorDown(1)
self.assertEqual(self.transport.value(),
self.CSI + b'1' + CSFinalByte.CUD.value)
def test_cursorForward(self):
"""
L{ServerProtocol.cursorForward} writes the control sequence
ending with L{CSFinalByte.CUF} to its transport.
"""
self.protocol.cursorForward(1)
self.assertEqual(self.transport.value(),
self.CSI + b'1' + CSFinalByte.CUF.value)
def test_cursorBackward(self):
"""
L{ServerProtocol.cursorBackward} writes the control sequence
ending with L{CSFinalByte.CUB} to its transport.
"""
self.protocol.cursorBackward(1)
self.assertEqual(self.transport.value(),
self.CSI + b'1' + CSFinalByte.CUB.value)
def test_cursorPosition(self):
"""
L{ServerProtocol.cursorPosition} writes a control sequence
ending with L{CSFinalByte.CUP} and containing the expected
coordinates to its transport.
"""
self.protocol.cursorPosition(0, 0)
self.assertEqual(self.transport.value(),
self.CSI + b'1;1' + CSFinalByte.CUP.value)
def test_cursorHome(self):
"""
L{ServerProtocol.cursorHome} writes a control sequence ending
with L{CSFinalByte.CUP} and no parameters, so that the client
defaults to (1, 1).
"""
self.protocol.cursorHome()
self.assertEqual(self.transport.value(),
self.CSI + CSFinalByte.CUP.value)
def test_index(self):
"""
L{ServerProtocol.index} writes the control sequence ending in
the 8-bit code table coordinates 4, 4.
Note that ECMA48 5th Edition removes C{IND}.
"""
self.protocol.index()
self.assertEqual(self.transport.value(),
self.ESC + _ecmaCodeTableCoordinate(4, 4))
def test_reverseIndex(self):
"""
L{ServerProtocol.reverseIndex} writes the control sequence
ending in the L{C1SevenBit.RI}.
"""
self.protocol.reverseIndex()
self.assertEqual(self.transport.value(),
self.ESC + C1SevenBit.RI.value)
def test_nextLine(self):
"""
L{ServerProtocol.nextLine} writes C{"\r\n"} to its transport.
"""
# Why doesn't it write ESC E? Because ESC E is poorly supported. For
# example, gnome-terminal (many different versions) fails to scroll if
# it receives ESC E and the cursor is already on the last row.
self.protocol.nextLine()
self.assertEqual(self.transport.value(), b"\r\n")
def test_setModes(self):
"""
L{ServerProtocol.setModes} writes a control sequence
containing the requested modes and ending in the
L{CSFinalByte.SM}.
"""
modesToSet = [modes.KAM, modes.IRM, modes.LNM]
self.protocol.setModes(modesToSet)
self.assertEqual(self.transport.value(),
self.CSI +
b';'.join(map(intToBytes, modesToSet)) +
CSFinalByte.SM.value)
def test_setPrivateModes(self):
"""
L{ServerProtocol.setPrivatesModes} writes a control sequence
containing the requested private modes and ending in the
L{CSFinalByte.SM}.
"""
privateModesToSet = [privateModes.ERROR,
privateModes.COLUMN,
privateModes.ORIGIN]
self.protocol.setModes(privateModesToSet)
self.assertEqual(self.transport.value(),
self.CSI +
b';'.join(map(intToBytes, privateModesToSet)) +
CSFinalByte.SM.value)
def test_resetModes(self):
"""
L{ServerProtocol.resetModes} writes the control sequence
ending in the L{CSFinalByte.RM}.
"""
modesToSet = [modes.KAM, modes.IRM, modes.LNM]
self.protocol.resetModes(modesToSet)
self.assertEqual(self.transport.value(),
self.CSI +
b';'.join(map(intToBytes, modesToSet)) +
CSFinalByte.RM.value)
def test_singleShift2(self):
"""
L{ServerProtocol.singleShift2} writes an escape sequence
followed by L{C1SevenBit.SS2}
"""
self.protocol.singleShift2()
self.assertEqual(self.transport.value(),
self.ESC + C1SevenBit.SS2.value)
def test_singleShift3(self):
"""
L{ServerProtocol.singleShift3} writes an escape sequence
followed by L{C1SevenBit.SS3}
"""
self.protocol.singleShift3()
self.assertEqual(self.transport.value(),
self.ESC + C1SevenBit.SS3.value)
def test_selectGraphicRendition(self):
"""
L{ServerProtocol.selectGraphicRendition} writes a control
sequence containing the requested attributes and ending with
L{CSFinalByte.SGR}
"""
self.protocol.selectGraphicRendition(str(BLINK), str(UNDERLINE))
self.assertEqual(self.transport.value(),
self.CSI +
intToBytes(BLINK) + b';' + intToBytes(UNDERLINE) +
CSFinalByte.SGR.value)
def test_horizontalTabulationSet(self):
"""
L{ServerProtocol.horizontalTabulationSet} writes the escape
sequence ending in L{C1SevenBit.HTS}
"""
self.protocol.horizontalTabulationSet()
self.assertEqual(self.transport.value(),
self.ESC +
C1SevenBit.HTS.value)
def test_eraseToLineEnd(self):
"""
L{ServerProtocol.eraseToLineEnd} writes the control sequence
sequence ending in L{CSFinalByte.EL} and no parameters,
forcing the client to default to 0 (from the active present
position's current location to the end of the line.)
"""
self.protocol.eraseToLineEnd()
self.assertEqual(self.transport.value(),
self.CSI + CSFinalByte.EL.value)
def test_eraseToLineBeginning(self):
"""
L{ServerProtocol.eraseToLineBeginning} writes the control
sequence sequence ending in L{CSFinalByte.EL} and a parameter
of 1 (from the beginning of the line up to and include the
active present position's current location.)
"""
self.protocol.eraseToLineBeginning()
self.assertEqual(self.transport.value(),
self.CSI + b'1' + CSFinalByte.EL.value)
def test_eraseLine(self):
"""
L{ServerProtocol.eraseLine} writes the control
sequence sequence ending in L{CSFinalByte.EL} and a parameter
of 2 (the entire line.)
"""
self.protocol.eraseLine()
self.assertEqual(self.transport.value(),
self.CSI + b'2' + CSFinalByte.EL.value)
def test_eraseToDisplayEnd(self):
"""
L{ServerProtocol.eraseToDisplayEnd} writes the control
sequence sequence ending in L{CSFinalByte.ED} and no parameters,
forcing the client to default to 0 (from the active present
position's current location to the end of the page.)
"""
self.protocol.eraseToDisplayEnd()
self.assertEqual(self.transport.value(),
self.CSI + CSFinalByte.ED.value)
def test_eraseToDisplayBeginning(self):
"""
L{ServerProtocol.eraseToDisplayBeginning} writes the control
sequence sequence ending in L{CSFinalByte.ED} a parameter of 1
(from the beginning of the page up to and include the active
present position's current location.)
"""
self.protocol.eraseToDisplayBeginning()
self.assertEqual(self.transport.value(),
self.CSI + b'1' + CSFinalByte.ED.value)
def test_eraseToDisplay(self):
"""
L{ServerProtocol.eraseDisplay} writes the control sequence
sequence ending in L{CSFinalByte.ED} a parameter of 2 (the
entire page)
"""
self.protocol.eraseDisplay()
self.assertEqual(self.transport.value(),
self.CSI + b'2' + CSFinalByte.ED.value)
def test_deleteCharacter(self):
"""
L{ServerProtocol.deleteCharacter} writes the control sequence
containing the number of characters to delete and ending in
L{CSFinalByte.DCH}
"""
self.protocol.deleteCharacter(4)
self.assertEqual(self.transport.value(),
self.CSI + b'4' + CSFinalByte.DCH.value)
def test_insertLine(self):
"""
L{ServerProtocol.insertLine} writes the control sequence
containing the number of lines to insert and ending in
L{CSFinalByte.IL}
"""
self.protocol.insertLine(5)
self.assertEqual(self.transport.value(),
self.CSI + b'5' + CSFinalByte.IL.value)
def test_deleteLine(self):
"""
L{ServerProtocol.deleteLine} writes the control sequence
containing the number of lines to delete and ending in
L{CSFinalByte.DL}
"""
self.protocol.deleteLine(6)
self.assertEqual(self.transport.value(),
self.CSI + b'6' + CSFinalByte.DL.value)
def test_setScrollRegionNoArgs(self):
"""
With no arguments, L{ServerProtocol.setScrollRegion} writes a
control sequence with no parameters, but a parameter
separator, and ending in C{b'r'}.
"""
self.protocol.setScrollRegion()
self.assertEqual(self.transport.value(), self.CSI + b';' + b'r')
def test_setScrollRegionJustFirst(self):
"""
With just a value for its C{first} argument,
L{ServerProtocol.setScrollRegion} writes a control sequence with
that parameter, a parameter separator, and finally a C{b'r'}.
"""
self.protocol.setScrollRegion(first=1)
self.assertEqual(self.transport.value(), self.CSI + b'1;' + b'r')
def test_setScrollRegionJustLast(self):
"""
With just a value for its C{last} argument,
L{ServerProtocol.setScrollRegion} writes a control sequence with
a parameter separator, that parameter, and finally a C{b'r'}.
"""
self.protocol.setScrollRegion(last=1)
self.assertEqual(self.transport.value(), self.CSI + b';1' + b'r')
def test_setScrollRegionFirstAndLast(self):
"""
When given both C{first} and C{last}
L{ServerProtocol.setScrollRegion} writes a control sequence with
the first parameter, a parameter separator, the last
parameter, and finally a C{b'r'}.
"""
self.protocol.setScrollRegion(first=1, last=2)
self.assertEqual(self.transport.value(), self.CSI + b'1;2' + b'r')
def test_reportCursorPosition(self):
"""
L{ServerProtocol.reportCursorPosition} writes a control
sequence ending in L{CSFinalByte.DSR} with a parameter of 6
(the Device Status Report returns the current active
position.)
"""
self.protocol.reportCursorPosition()
self.assertEqual(self.transport.value(),
self.CSI + b'6' + CSFinalByte.DSR.value)
class DeprecationsTests(unittest.TestCase):
"""
Tests to ensure deprecation of L{insults.colors} and L{insults.client}
"""
def ensureDeprecated(self, message):
"""
Ensures that the correct deprecation warning was issued.
"""
warnings = self.flushWarnings()
self.assertIs(warnings[0]['category'], DeprecationWarning)
self.assertEqual(warnings[0]['message'], message)
self.assertEqual(len(warnings), 1)
def test_colors(self):
"""
The L{insults.colors} module is deprecated
"""
namedAny('twisted.conch.insults.colors')
self.ensureDeprecated("twisted.conch.insults.colors was deprecated "
"in Twisted 10.1.0: Please use "
"twisted.conch.insults.helper instead.")
def test_client(self):
"""
The L{insults.client} module is deprecated
"""
namedAny('twisted.conch.insults.client')
self.ensureDeprecated("twisted.conch.insults.client was deprecated "
"in Twisted 10.1.0: Please use "
"twisted.conch.insults.insults instead.")
|
SnappleCap/oh-mainline | refs/heads/master | vendor/packages/south/south/tests/fakeapp/migrations/0001_spam.py | 173 | from south.db import db
from django.db import models
class Migration:
def forwards(self):
# Model 'Spam'
db.create_table("southtest_spam", (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('weight', models.FloatField()),
('expires', models.DateTimeField()),
('name', models.CharField(max_length=255))
))
def backwards(self):
db.delete_table("southtest_spam")
|
dlab-berkeley/collaboratool-archive | refs/heads/master | bsd2/vagrant-ansible/ansible/test/TestFilters.py | 4 | '''
Test bundled filters
'''
import unittest, tempfile, shutil
from ansible import playbook, inventory, callbacks
INVENTORY = inventory.Inventory(['localhost'])
BOOK = '''
- hosts: localhost
vars:
var: { a: [1,2,3] }
tasks:
- template: src=%s dest=%s
'''
SRC = '''
-
{{ var|to_json }}
-
{{ var|to_nice_json }}
-
{{ var|to_yaml }}
-
{{ var|to_nice_yaml }}
'''
DEST = '''
-
{"a": [1, 2, 3]}
-
{
"a": [
1,
2,
3
]
}
-
a: [1, 2, 3]
-
a:
- 1
- 2
- 3
'''
class TestFilters(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(dir='/tmp')
def tearDown(self):
shutil.rmtree(self.tmpdir)
def temp(self, name, data=''):
'''write a temporary file and return the name'''
name = self.tmpdir + '/' + name
with open(name, 'w') as f:
f.write(data)
return name
#def test_filters(self):
# this test is pretty low level using a playbook, hence I am disabling it for now -- MPD.
#return
#src = self.temp('src.j2', SRC)
#dest = self.temp('dest.txt')
#book = self.temp('book', BOOK % (src, dest))
#playbook.PlayBook(
# playbook = book,
# inventory = INVENTORY,
# transport = 'local',
# callbacks = callbacks.PlaybookCallbacks(),
# runner_callbacks = callbacks.DefaultRunnerCallbacks(),
# stats = callbacks.AggregateStats(),
#).run()
out = open(dest).read()
self.assertEqual(DEST, out)
|
katoken-0215/FikaNote | refs/heads/master | app/__init__.py | 12133432 | |
roverdotcom/pyexiv2 | refs/heads/master | test/__init__.py | 12133432 | |
comger/migrant | refs/heads/master | web/restful/account.py | 1 | # -*- coding:utf-8 -*-
"""
account action
author comger@gmail.com
"""
import json
from kpages import url
from kpages.model import ModelMaster
import tornado
from utility import RestfulHandler,BaseHandler
from logic.utility import *
from logic.account import INIT, ACTIVATED, IDENTIFIED
from logic.city import TName as T_CITY
from logic.label import add as addlabel
from logic.openfireusers import add as openfire_add
from utils.string_utils import random_key
AModel = ModelMaster()('AccountModel')
@url(r'/m/account/login')
class LoginHandler(BaseHandler):
def post(self):
r, v = AModel.login(self.get_argument('username'), self.get_argument('password'))
if r:
self.set_secure_cookie('uid', v['_id'])
self.set_secure_cookie('nickname', v.get('nickname', v['username']))
if v.get('isadmin') == True:
self.set_secure_cookie('__ADMIN_USER_ID', v['_id'])
del v['password']
self.write(dict(status=r, data=v))
else:
self.write(dict(status=False, data=v))
@url(r'/m/account/join')
class RegisterHandler(BaseHandler):
def post(self):
username = self.get_argument('username', None)
password = self.get_argument('password', None)
result, value = AModel.add(username, password,
self.get_argument('city', None), status=INIT)
'''
if result:
if '@' in username:
username, email_host = username.split('@')
openfire_add(username, password, username)
'''
self.write(dict(status=result, data=value))
@url(r'/m/auth/login')
class AuthLoginHandler(BaseHandler):
def post(self):
r,v = AModel.auth_login(self.get_argument('site'),
self.get_argument('otherid'),self.get_argument('name'))
if r:
self.set_secure_cookie('uid',v['_id'])
del v['password']
del v['status']
self.write(dict(status = r, data = v))
@url(r'/m/account/update')
class UpdateHandler(RestfulHandler):
def post(self):
self.get()
def get(self):
args = {
'mobile': self.get_argument('mobile', None),
'labels': self.get_arguments('labels'),
'profession': self.get_arguments('profession'),
'intro': self.get_argument('intro'),
'skill': self.get_arguments('skill'),
'area': self.get_argumet('area'),
'nickname': self.get_argument('nickname'),
'parent_city': self.get_argument('parent_city', None),
'city': self.get_argument('city', None)
}
uv = AModel.info(self.uid)
for i in args.get('profession'):
if i and not i in uv.get('profession',()):
addlabel(0,i)
for i in args.get('skill'):
if i and not i in uv.get('skill',()):
addlabel(1,i)
for i in args.get('labels'):
if i and not i in uv.get('labels',()):
addlabel(2,i)
v = AModel.update(self.uid, **args)
self.write(dict(status=True, data=v))
@url(r'/m/account/resetpwd')
@url(r'/m/account/resetpwd/(.*)')
class ResetPwdHandler(RestfulHandler):
def post(self, _id=None):
old_password = self.get_argument('password')
new_password = self.get_argument('new_password')
confirm_password = self.get_argument('confirm_password')
if new_password == confirm_password:
r, v = AModel.reset_pwd(_id or self.uid, old_password, new_password)
self.write(dict(status=r, data=v))
else:
self.write(dict(status=False, data='DIFFERENT_PWD'))
@url(r'/m/account/forgot_password')
class ForgetPwdHandler(BaseHandler):
@tornado.web.asynchronous
def get(self):
username = self.get_argument('username', None)
r, v = AModel.forgot_pwd(username,self.request.host)
self.write(dict(status=r, data=v))
self.finish()
@url(r'/m/account/reset_forgotten_password')
class UpdateForgottenPwdHandler(BaseHandler):
def post(self):
key = self.get_argument('key')
new_password = self.get_argument('new_password')
confirm_password = self.get_argument('confirm_password')
print key ,new_password, confirm_password
if new_password != confirm_password:
self.write(dict(status=False, data='两次输入的密码不一致'))
else:
r, v = AModel.reset_forgotten_password(key, new_password)
self.write(dict(status=r, data=v))
@url(r'/m/account/info')
@url(r'/m/account/info/(.*)')
class InfoHandler(RestfulHandler):
def get(self, _id=None):
v = AModel.info(_id or self.uid)
if not v:
return self.write(dict(status=False, data=v))
if v and v.get('parent_city', None):
cr, cv = m_info(T_CITY, v['parent_city'])
v['parent_city'] = cv['name']
v['parent_city_id'] = cv['_id']
if v and v.get('city', None):
cr, cv = m_info(T_CITY, v['city'])
v['city'] = cv['name']
v['city_id'] = cv['_id']
if v and v.get('to_city', None):
cr, cv = m_info(T_CITY, v['to_city'])
v['to_city'] = cv['name']
v['to_city_id'] = cv['_id']
del v['password']
self.write(dict(status=True, data=v))
|
viki9698/jizhanggroup | refs/heads/master | djangotoolbox/errorviews.py | 70 | from django import http
from django.template import RequestContext, loader
def server_error(request, template_name='500.html'):
"""
500 error handler.
Templates: `500.html`
Context:
request_path
The path of the requested URL (e.g., '/app/pages/bad_page/')
"""
# You need to create a 500.html template.
t = loader.get_template(template_name)
return http.HttpResponseServerError(
t.render(RequestContext(request, {'request_path': request.path})))
|
drewandersonnz/openshift-tools | refs/heads/prod | openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/openshift_management/filter_plugins/oo_management_filters.py | 46 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Filter methods for the management role
"""
def oo_filter_container_providers(results):
"""results - the result from posting the API calls for adding new
providers"""
all_results = []
for result in results:
if 'results' in result['json']:
# We got an OK response
res = result['json']['results'][0]
all_results.append("Provider '{}' - Added successfully".format(res['name']))
elif 'error' in result['json']:
# This was a problem
all_results.append("Provider '{}' - Failed to add. Message: {}".format(
result['item']['name'], result['json']['error']['message']))
return all_results
class FilterModule(object):
""" Custom ansible filter mapping """
# pylint: disable=no-self-use, too-few-public-methods
def filters(self):
""" returns a mapping of filters to methods """
return {
"oo_filter_container_providers": oo_filter_container_providers,
}
|
kenshay/ImageScripter | refs/heads/master | Script_Runner/PYTHON/Tools/scripts/combinerefs.py | 15 | #! /usr/bin/env python3
"""
combinerefs path
A helper for analyzing PYTHONDUMPREFS output.
When the PYTHONDUMPREFS envar is set in a debug build, at Python shutdown
time Py_FinalizeEx() prints the list of all live objects twice: first it
prints the repr() of each object while the interpreter is still fully intact.
After cleaning up everything it can, it prints all remaining live objects
again, but the second time just prints their addresses, refcounts, and type
names (because the interpreter has been torn down, calling repr methods at
this point can get into infinite loops or blow up).
Save all this output into a file, then run this script passing the path to
that file. The script finds both output chunks, combines them, then prints
a line of output for each object still alive at the end:
address refcnt typename repr
address is the address of the object, in whatever format the platform C
produces for a %p format code.
refcnt is of the form
"[" ref "]"
when the object's refcount is the same in both PYTHONDUMPREFS output blocks,
or
"[" ref_before "->" ref_after "]"
if the refcount changed.
typename is object->ob_type->tp_name, extracted from the second PYTHONDUMPREFS
output block.
repr is repr(object), extracted from the first PYTHONDUMPREFS output block.
CAUTION: If object is a container type, it may not actually contain all the
objects shown in the repr: the repr was captured from the first output block,
and some of the containees may have been released since then. For example,
it's common for the line showing the dict of interned strings to display
strings that no longer exist at the end of Py_FinalizeEx; this can be recognized
(albeit painfully) because such containees don't have a line of their own.
The objects are listed in allocation order, with most-recently allocated
printed first, and the first object allocated printed last.
Simple examples:
00857060 [14] str '__len__'
The str object '__len__' is alive at shutdown time, and both PYTHONDUMPREFS
output blocks said there were 14 references to it. This is probably due to
C modules that intern the string "__len__" and keep a reference to it in a
file static.
00857038 [46->5] tuple ()
46-5 = 41 references to the empty tuple were removed by the cleanup actions
between the times PYTHONDUMPREFS produced output.
00858028 [1025->1456] str '<dummy key>'
The string '<dummy key>', which is used in dictobject.c to overwrite a real
key that gets deleted, grew several hundred references during cleanup. It
suggests that stuff did get removed from dicts by cleanup, but that the dicts
themselves are staying alive for some reason. """
import re
import sys
# Generate lines from fileiter. If whilematch is true, continue reading
# while the regexp object pat matches line. If whilematch is false, lines
# are read so long as pat doesn't match them. In any case, the first line
# that doesn't match pat (when whilematch is true), or that does match pat
# (when whilematch is false), is lost, and fileiter will resume at the line
# following it.
def read(fileiter, pat, whilematch):
for line in fileiter:
if bool(pat.match(line)) == whilematch:
yield line
else:
break
def combine(fname):
f = open(fname)
fi = iter(f)
for line in read(fi, re.compile(r'^Remaining objects:$'), False):
pass
crack = re.compile(r'([a-zA-Z\d]+) \[(\d+)\] (.*)')
addr2rc = {}
addr2guts = {}
before = 0
for line in read(fi, re.compile(r'^Remaining object addresses:$'), False):
m = crack.match(line)
if m:
addr, addr2rc[addr], addr2guts[addr] = m.groups()
before += 1
else:
print('??? skipped:', line)
after = 0
for line in read(fi, crack, True):
after += 1
m = crack.match(line)
assert m
addr, rc, guts = m.groups() # guts is type name here
if addr not in addr2rc:
print('??? new object created while tearing down:', line.rstrip())
continue
print(addr, end=' ')
if rc == addr2rc[addr]:
print('[%s]' % rc, end=' ')
else:
print('[%s->%s]' % (addr2rc[addr], rc), end=' ')
print(guts, addr2guts[addr])
f.close()
print("%d objects before, %d after" % (before, after))
if __name__ == '__main__':
combine(sys.argv[1])
|
Suite5/DataColibri | refs/heads/master | allauth/socialaccount/providers/__init__.py | 12 | from django.conf import settings
from django.utils import importlib
class ProviderRegistry(object):
def __init__(self):
self.provider_map = {}
self.loaded = False
def get_list(self):
self.load()
return self.provider_map.values()
def register(self, cls):
self.load()
self.provider_map[cls.id] = cls()
def by_id(self, id):
self.load()
return self.provider_map[id]
def as_choices(self):
self.load()
for provider in self.get_list():
yield (provider.id, provider.name)
def load(self):
if not self.loaded:
for app in settings.INSTALLED_APPS:
provider_module = app + '.provider'
try:
importlib.import_module(provider_module)
except ImportError:
pass
self.loaded = True
registry = ProviderRegistry()
|
kramwens/order_bot | refs/heads/master | venv/lib/python2.7/site-packages/twilio/rest/resources/base.py | 32 | import logging
import os
import platform
from six import (
integer_types,
string_types,
binary_type,
iteritems
)
from ...compat import urlencode
from ...compat import urlparse
from ...compat import urlunparse
from ... import __version__
from ...exceptions import TwilioException
from ..exceptions import TwilioRestException
from .connection import Connection
from .imports import parse_qs, httplib2, json
from .util import (
parse_iso_date,
parse_rfc2822_date,
transform_params,
UNSET_TIMEOUT,
)
logger = logging.getLogger('twilio')
class Response(object):
"""
Take a httplib2 response and turn it into a requests response
"""
def __init__(self, httplib_resp, content, url):
self.content = content
self.cached = False
self.status_code = int(httplib_resp.status)
self.ok = self.status_code < 400
self.url = url
def get_cert_file():
""" Get the cert file location or bail """
# XXX - this currently fails test coverage because we don't actually go
# over the network anywhere. Might be good to have a test that stands up a
# local server and authenticates against it.
try:
# Apparently __file__ is not available in all places so wrapping this
# in a try/catch
current_path = os.path.realpath(__file__)
ca_cert_path = os.path.join(current_path, "..", "..", "..",
"conf", "cacert.pem")
return os.path.abspath(ca_cert_path)
except Exception:
# None means use the default system file
return None
def make_request(method, url, params=None, data=None, headers=None,
cookies=None, files=None, auth=None, timeout=None,
allow_redirects=False, proxies=None):
"""Sends an HTTP request
:param str method: The HTTP method to use
:param str url: The URL to request
:param dict params: Query parameters to append to the URL
:param dict data: Parameters to go in the body of the HTTP request
:param dict headers: HTTP Headers to send with the request
:param float timeout: Socket/Read timeout for the request
:return: An http response
:rtype: A :class:`Response <models.Response>` object
See the requests documentation for explanation of all these parameters
Currently proxies, files, and cookies are all ignored
"""
http = httplib2.Http(
timeout=timeout,
ca_certs=get_cert_file(),
proxy_info=Connection.proxy_info(),
)
http.follow_redirects = allow_redirects
if auth is not None:
http.add_credentials(auth[0], auth[1])
def encode_atom(atom):
if isinstance(atom, (integer_types, binary_type)):
return atom
elif isinstance(atom, string_types):
return atom.encode('utf-8')
else:
raise ValueError('list elements should be an integer, '
'binary, or string')
if data is not None:
udata = {}
for k, v in iteritems(data):
key = k.encode('utf-8')
if isinstance(v, (list, tuple, set)):
udata[key] = [encode_atom(x) for x in v]
elif isinstance(v, (integer_types, binary_type, string_types)):
udata[key] = encode_atom(v)
else:
raise ValueError('data should be an integer, '
'binary, or string, or sequence ')
data = urlencode(udata, doseq=True)
if params is not None:
enc_params = urlencode(params, doseq=True)
if urlparse(url).query:
url = '%s&%s' % (url, enc_params)
else:
url = '%s?%s' % (url, enc_params)
resp, content = http.request(url, method, headers=headers, body=data)
# Format httplib2 request as requests object
return Response(resp, content.decode('utf-8'), url)
def make_twilio_request(method, uri, **kwargs):
"""
Make a request to Twilio. Throws an error
:return: a requests-like HTTP response
:rtype: :class:`RequestsResponse`
:raises TwilioRestException: if the response is a 400
or 500-level response.
"""
headers = kwargs.get("headers", {})
user_agent = "twilio-python/%s (Python %s)" % (
__version__,
platform.python_version(),
)
headers["User-Agent"] = user_agent
headers["Accept-Charset"] = "utf-8"
if method == "POST" and "Content-Type" not in headers:
headers["Content-Type"] = "application/x-www-form-urlencoded"
kwargs["headers"] = headers
if "Accept" not in headers:
headers["Accept"] = "application/json"
if kwargs.pop('use_json_extension', False):
uri += ".json"
resp = make_request(method, uri, **kwargs)
if not resp.ok:
try:
error = json.loads(resp.content)
code = error["code"]
message = error["message"]
except:
code = None
message = resp.content
raise TwilioRestException(status=resp.status_code, method=method,
uri=resp.url, msg=message, code=code)
return resp
class Resource(object):
"""A REST Resource"""
name = "Resource"
use_json_extension = False
def __init__(self, base_uri, auth, timeout=UNSET_TIMEOUT):
self.base_uri = base_uri
self.auth = auth
self.timeout = timeout
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.__dict__ == other.__dict__)
def __hash__(self):
return hash(frozenset(self.__dict__))
def __ne__(self, other):
return not self.__eq__(other)
def request(self, method, uri, **kwargs):
"""
Send an HTTP request to the resource.
:raises: a :exc:`~twilio.TwilioRestException`
"""
if 'timeout' not in kwargs and self.timeout is not UNSET_TIMEOUT:
kwargs['timeout'] = self.timeout
kwargs['use_json_extension'] = self.use_json_extension
resp = make_twilio_request(method, uri, auth=self.auth, **kwargs)
logger.debug(resp.content)
if method == "DELETE":
return resp, {}
else:
return resp, json.loads(resp.content)
@property
def uri(self):
format = (self.base_uri, self.name)
return "%s/%s" % format
class InstanceResource(Resource):
""" The object representation of an instance response from the Twilio API
:param parent: The parent list class for this instance resource.
For example, the parent for a :class:`~twilio.rest.resources.Call`
would be a :class:`~twilio.rest.resources.Calls` object.
:type parent: :class:`~twilio.rest.resources.ListResource`
:param str sid: The 34-character unique identifier for this instance
"""
subresources = []
id_key = "sid"
use_json_extension = True
def __init__(self, parent, sid):
self.parent = parent
self.name = sid
super(InstanceResource, self).__init__(
parent.uri,
parent.auth,
parent.timeout
)
def load(self, entries):
if "from" in entries.keys():
entries["from_"] = entries["from"]
del entries["from"]
if "uri" in entries.keys():
del entries["uri"]
for key in entries.keys():
if (key.startswith("date_") and
isinstance(entries[key], string_types)):
entries[key] = self._parse_date(entries[key])
self.__dict__.update(entries)
def load_subresources(self):
"""
Load all subresources
"""
for resource in self.subresources:
list_resource = resource(
self.uri,
self.parent.auth,
self.parent.timeout
)
self.__dict__[list_resource.key] = list_resource
def update_instance(self, **kwargs):
""" Make a POST request to the API to update an object's properties
:return: None, this is purely side effecting
:raises: a :class:`~twilio.rest.RestException` on failure
"""
a = self.parent.update(self.name, **kwargs)
self.load(a.__dict__)
def delete_instance(self):
""" Make a DELETE request to the API to delete the object
:return: None, this is purely side effecting
:raises: a :class:`~twilio.rest.RestException` on failure
"""
return self.parent.delete(self.name)
def _parse_date(self, s):
return parse_rfc2822_date(s)
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.name[0:5])
class NextGenInstanceResource(InstanceResource):
use_json_extension = False
def __init__(self, *args, **kwargs):
super(NextGenInstanceResource, self).__init__(*args, **kwargs)
def _parse_date(self, s):
return parse_iso_date(s)
class ListResource(Resource):
name = "Resources"
instance = InstanceResource
use_json_extension = True
def __init__(self, *args, **kwargs):
super(ListResource, self).__init__(*args, **kwargs)
try:
self.key
except AttributeError:
self.key = self.name.lower()
def get(self, sid):
""" Get an instance resource by its sid
Usage:
.. code-block:: python
message = client.messages.get("SM1234")
print message.body
:rtype: :class:`~twilio.rest.resources.InstanceResource`
:raises: a :exc:`~twilio.TwilioRestException` if a resource with that
sid does not exist, or the request fails
"""
return self.get_instance(sid)
def get_instance(self, sid):
"""Request the specified instance resource"""
uri = "%s/%s" % (self.uri, sid)
resp, item = self.request("GET", uri)
return self.load_instance(item)
def get_instances(self, params):
"""
Query the list resource for a list of InstanceResources.
Raises a :exc:`~twilio.TwilioRestException` if requesting a page of
results that does not exist.
:param dict params: List of URL parameters to be included in request
:param int page: The page of results to retrieve (most recent at 0)
:param int page_size: The number of results to be returned.
:returns: -- the list of resources
"""
params = transform_params(params)
resp, page = self.request("GET", self.uri, params=params)
if self.key not in page:
raise TwilioException("Key %s not present in response" % self.key)
return [self.load_instance(ir) for ir in page[self.key]]
def create_instance(self, body):
"""
Create an InstanceResource via a POST to the List Resource
:param dict body: Dictionary of POST data
"""
resp, instance = self.request("POST", self.uri,
data=transform_params(body))
if resp.status_code not in (200, 201):
raise TwilioRestException(resp.status_code,
self.uri, "Resource not created")
return self.load_instance(instance)
def delete_instance(self, sid):
"""
Delete an InstanceResource via DELETE
body: string -- HTTP Body for the quest
"""
uri = "%s/%s" % (self.uri, sid)
resp, instance = self.request("DELETE", uri)
return resp.status_code == 204
def update_instance(self, sid, body):
"""
Update an InstanceResource via a POST
sid: string -- String identifier for the list resource
body: dictionary -- Dict of items to POST
"""
uri = "%s/%s" % (self.uri, sid)
resp, entry = self.request("POST", uri, data=transform_params(body))
return self.load_instance(entry)
def iter(self, **kwargs):
""" Return all instance resources using an iterator
This will fetch a page of resources from the API and yield them in
turn. When the page is exhausted, this will make a request to the API
to retrieve the next page. Hence you may notice a pattern - the library
will loop through 50 objects very quickly, but there will be a delay
retrieving the 51st as the library must make another request to the API
for resources.
Example usage:
.. code-block:: python
for message in client.messages:
print message.sid
"""
params = transform_params(kwargs)
while True:
resp, page = self.request("GET", self.uri, params=params)
if self.key not in page:
raise StopIteration()
for ir in page[self.key]:
yield self.load_instance(ir)
if not page.get('next_page_uri', ''):
raise StopIteration()
o = urlparse(page['next_page_uri'])
params.update(parse_qs(o.query))
def load_instance(self, data):
instance = self.instance(self, data[self.instance.id_key])
instance.load(data)
instance.load_subresources()
return instance
def __str__(self):
return '<%s>' % (self.__class__.__name__)
def list(self, **kw):
"""Query the list resource for a list of InstanceResources.
:param int page: The page of results to retrieve (most recent at 0)
:param int page_size: The number of results to be returned.
"""
return self.get_instances(kw)
class NextGenListResource(ListResource):
name = "Resources"
instance = NextGenInstanceResource
use_json_extension = False
def __init__(self, *args, **kwargs):
super(NextGenListResource, self).__init__(*args, **kwargs)
def iter(self, **kwargs):
""" Return all instance resources using an iterator
This will fetch a page of resources from the API and yield them in
turn. When the page is exhausted, this will make a request to the API
to retrieve the next page. Hence you may notice a pattern - the library
will loop through 50 objects very quickly, but there will be a delay
retrieving the 51st as the library must make another request to the API
for resources.
Example usage:
.. code-block:: python
for message in client.messages:
print message.sid
"""
params = urlencode(transform_params(kwargs))
parsed = urlparse(self.uri)
url = urlunparse(parsed[:4] + (params, ) + (parsed[5], ))
while True:
resp, page = self.request("GET", url)
key = page.get('meta', {}).get('key')
if key is None or key not in page:
raise StopIteration()
for ir in page[key]:
yield self.load_instance(ir)
url = page.get('meta', {}).get('next_page_url')
if not url:
raise StopIteration()
def get_instances(self, params):
"""
Query the list resource for a list of InstanceResources.
Raises a :exc:`~twilio.TwilioRestException` if requesting a page of
results that does not exist.
:param dict params: List of URL parameters to be included in request
:param int page: The page of results to retrieve (most recent at 0)
:param int page_size: The number of results to be returned.
:returns: -- the list of resources
"""
params = transform_params(params)
resp, page = self.request("GET", self.uri, params=params)
key = page.get('meta', {}).get('key')
if key is None:
raise TwilioException(
"Unable to determine resource key from response"
)
if key not in page:
raise TwilioException("Key %s not present in response" % key)
return [self.load_instance(ir) for ir in page[key]]
|
mdaniel/intellij-community | refs/heads/master | python/testData/completion/notImportedQualifiedName/VariantsFromInternalSkeletonsExcludedUnlessExported/site-packages/mypackage/__init__.py | 18 | from ._impl import func_exported
|
arlolra/exitaddr | refs/heads/master | server.py | 1 | #!/usr/bin/env python
import json
from twisted.web import server, resource
from twisted.internet import reactor
from common import Exitaddr, options
DEFAULT_PORT = 8080
exitaddr_results = None
def addHeader(request):
h = request.responseHeaders
h.addRawHeader(b"content-type", b"application/json")
class Res(resource.Resource):
def getChild(self, name, request):
''' handle trailing / '''
if name == '':
return self
return resource.Resource.getChild(self, name, request)
class Exits(Res):
''' json dump of our state '''
def render_GET(self, request):
addHeader(request)
return json.dumps(exitaddr_results, indent=4)
class IP(Res):
''' json response with the remote host ip '''
def render_GET(self, request):
host = request.transport.getPeer().host
header = request.received_headers.get("X-Forwared-For", None)
if header is not None:
host = header.split(',')[-1].strip()
response = {"IP": host}
addHeader(request)
return json.dumps(response, indent=4)
class Ser(Exitaddr):
def __init__(self, *args, **kwargs):
Exitaddr.__init__(self, *args, **kwargs)
self.fld = 0
def passed(self, result):
pass
def failed(self, result):
print result[0].id_hex[1:], "failed"
self.fld += 1
def finished(self, results):
global exitaddr_results
res = {}
for key in results.keys():
res[key] = results[key][1]
exitaddr_results = res
print ""
print "failed", self.fld
print "exit list ready!"
def main():
root = resource.Resource()
root.putChild("exits", Exits())
root.putChild("ip", IP())
reactor.listenTCP(DEFAULT_PORT, server.Site(root))
# sample a few for now
options.num_exits = 25
exitaddr = Ser(reactor, options)
print "listening on", DEFAULT_PORT
exitaddr.start()
if __name__ == "__main__":
main()
|
python-security/pyt | refs/heads/master | pyt/formatters/screen.py | 1 | """This formatter outputs the issues as color-coded text."""
from ..vulnerabilities.vulnerability_helper import SanitisedVulnerability, UnknownVulnerability
RESET = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
DANGER = '\033[31m'
GOOD = '\033[32m'
HIGHLIGHT = '\033[45;1m'
RED_ON_WHITE = '\033[31m\033[107m'
def color(string, color_string):
return color_string + str(string) + RESET
def report(
vulnerabilities,
fileobj,
print_sanitised,
):
"""
Prints issues in color-coded text format.
Args:
vulnerabilities: list of vulnerabilities to report
fileobj: The output file object, which may be sys.stdout
"""
n_vulnerabilities = len(vulnerabilities)
unsanitised_vulnerabilities = [v for v in vulnerabilities if not isinstance(v, SanitisedVulnerability)]
n_unsanitised = len(unsanitised_vulnerabilities)
n_sanitised = n_vulnerabilities - n_unsanitised
heading = "{} vulnerabilit{} found{}.\n".format(
'No' if n_unsanitised == 0 else n_unsanitised,
'y' if n_unsanitised == 1 else 'ies',
" (plus {} sanitised)".format(n_sanitised) if n_sanitised else "",
)
vulnerabilities_to_print = vulnerabilities if print_sanitised else unsanitised_vulnerabilities
with fileobj:
for i, vulnerability in enumerate(vulnerabilities_to_print, start=1):
fileobj.write(vulnerability_to_str(i, vulnerability))
if n_unsanitised == 0:
fileobj.write(color(heading, GOOD))
else:
fileobj.write(color(heading, DANGER))
def vulnerability_to_str(i, vulnerability):
lines = []
lines.append(color('Vulnerability {}'.format(i), UNDERLINE))
lines.append('File: {}'.format(color(vulnerability.source.path, BOLD)))
lines.append(
'User input at line {}, source "{}":'.format(
vulnerability.source.line_number,
color(vulnerability.source_trigger_word, HIGHLIGHT),
)
)
lines.append('\t{}'.format(color(vulnerability.source.label, RED_ON_WHITE)))
if vulnerability.reassignment_nodes:
previous_path = None
lines.append('Reassigned in:')
for node in vulnerability.reassignment_nodes:
if node.path != previous_path:
lines.append('\tFile: {}'.format(node.path))
previous_path = node.path
label = node.label
if (
isinstance(vulnerability, SanitisedVulnerability) and
node.label == vulnerability.sanitiser.label
):
label = color(label, GOOD)
lines.append(
'\t Line {}:\t{}'.format(
node.line_number,
label,
)
)
if vulnerability.source.path != vulnerability.sink.path:
lines.append('File: {}'.format(color(vulnerability.sink.path, BOLD)))
lines.append(
'Reaches line {}, sink "{}"'.format(
vulnerability.sink.line_number,
color(vulnerability.sink_trigger_word, HIGHLIGHT),
)
)
lines.append('\t{}'.format(
color(vulnerability.sink.label, RED_ON_WHITE)
))
if isinstance(vulnerability, SanitisedVulnerability):
lines.append(
'This vulnerability is {}{} by {}'.format(
color('potentially ', BOLD) if not vulnerability.confident else '',
color('sanitised', GOOD),
color(vulnerability.sanitiser.label, BOLD),
)
)
elif isinstance(vulnerability, UnknownVulnerability):
lines.append(
'This vulnerability is unknown due to "{}"'.format(
color(vulnerability.unknown_assignment.label, BOLD),
)
)
return '\n'.join(lines) + '\n\n'
|
pastebt/dwm | refs/heads/master | haiuken.py | 1 | #!/usr/bin/env python
# -*- coding: utf8 -*-
import re
import sys
import json
from base64 import b64decode
try:
import urllib.parse as urllib
except ImportError:
import urllib
from mybs import MyHtmlParser
from comm import DWM, match1, echo, start, get_kind_size, UTITLE
class HYG(DWM): # http://haiuken.com/ 海宇根
handle_list = ['/haiuken.com/']
def query_info(self, url):
# http://haiuken.com/theatre/2muu/
vid = match1(url, r'haiuken.com/theatre/([^/]+)/')
echo("vid=", vid)
hutf = self.get_hutf(url)
m = MyHtmlParser(tidy=False)
m.feed(hutf)
if self.title == UTITLE:
title = m.select("head title")[0].text
if title.startswith("Theatre - "):
title = title[10:]
else:
title = self.title
ret = m.select(".bg2 .tmpl img")
ips = json.dumps([r['src'].split("://")[1].split('/')[0] for r in ret])
d = {"xEvent": "UIMovieComments.Error",
"xJson": ips}
hutf = self.get_html("http://haiuken.com/ajax/theatre/%s/" % vid,
postdata=urllib.urlencode(d).encode("utf8"))
ret = json.loads(hutf)
url = b64decode(ret['Data']['Error'].encode('utf8')).decode('utf8')
return title, None, [url], None
if __name__ == '__main__':
start(HYG)
|
whiskyechobravo/flask-debut | refs/heads/master | debut/__init__.py | 12133432 | |
eeroniemi/dd-agent | refs/heads/master | tests/checks/mock/__init__.py | 12133432 | |
vibhorag/scikit-learn | refs/heads/master | sklearn/datasets/tests/__init__.py | 12133432 | |
AltSchool/django | refs/heads/master | django/contrib/admindocs/tests/__init__.py | 12133432 | |
tessera-metrics/tessera | refs/heads/master | tessera-server/tessera/client/api/__init__.py | 12133432 | |
keishi/chromium | refs/heads/master | build/escape_unicode.py | 155 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Convert any unicode characters found in the input file to C literals."""
import codecs
import optparse
import os
import sys
def main(argv):
parser = optparse.OptionParser()
usage = 'Usage: %prog -o <output_dir> <input_file>'
parser.set_usage(usage)
parser.add_option('-o', dest='output_dir')
options, arglist = parser.parse_args(argv)
if not options.output_dir:
print "output_dir required"
return 1
if len(arglist) != 2:
print "input_file required"
return 1
in_filename = arglist[1]
if not in_filename.endswith('.utf8'):
print "input_file should end in .utf8"
return 1
out_filename = os.path.join(options.output_dir, os.path.basename(
os.path.splitext(in_filename)[0]))
WriteEscapedFile(in_filename, out_filename)
return 0
def WriteEscapedFile(in_filename, out_filename):
input_data = codecs.open(in_filename, 'r', 'utf8').read()
with codecs.open(out_filename, 'w', 'ascii') as out_file:
for i, char in enumerate(input_data):
if ord(char) > 127:
out_file.write(repr(char.encode('utf8'))[1:-1])
if input_data[i + 1:i + 2] in '0123456789abcdefABCDEF':
out_file.write('""')
else:
out_file.write(char.encode('ascii'))
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
QwertyManiac/seal-cdh4 | refs/heads/master | seal/lib/aligner/bwa/bwa_core.py | 1 | # Copyright (C) 2011-2012 CRS4.
#
# This file is part of Seal.
#
# Seal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Seal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Seal. If not, see <http://www.gnu.org/licenses/>.
import os
import ctypes as ct
from constants import *
libbwa_path = os.path.join(os.path.split(__file__)[0], 'libbwa.so')
libbwa = ct.CDLL(libbwa_path)
Q_OFFSET = {
"fastq-sanger": 33,
"fastq-solexa": 64,
"fastq-illumina": 64
}
################
# Basic types
################
p_char_p = ct.POINTER(ct.c_char_p)
################
def make_cigar(raw_cigar, n_cigar, read_len):
if n_cigar == 0:
return [(read_len, 'M')]
cigar = []
for k in xrange(n_cigar):
size = raw_cigar[k] & CIGAR_LN_MASK
op = "MIDS"[raw_cigar[k]>>CIGAR_OP_SHIFT]
cigar.append((size, op))
return cigar
def get_cigar_pos_end(cigar, pos):
"""Returns the position one beyond the last coordinate of the cigar"""
x = pos
for size, op in cigar:
if op in 'MD':
x += size
return x
#-STRUCT-WRAPPERS--------------------------------------------------------------
class gap_opt_t(ct.Structure):
_fields_ = [("s_mm", ct.c_int32),
("s_gapo", ct.c_int32),
("s_gape", ct.c_int32),
("mode", ct.c_int32),
("indel_end_skip", ct.c_int32),
("max_del_occ", ct.c_int32),
("max_entries", ct.c_int32),
("fnr", ct.c_float),
("max_diff", ct.c_int32),
("max_gapo", ct.c_int32),
("max_gape", ct.c_int32),
("max_seed_diff", ct.c_int32),
("seed_len", ct.c_int32),
("n_threads", ct.c_int32),
("max_top2", ct.c_int32),
("trim_qual", ct.c_int32),
]
class pe_opt_t(ct.Structure):
_fields_ = [("max_isize", ct.c_int),
("max_occ", ct.c_int),
("n_multi", ct.c_int),
("N_multi", ct.c_int),
("type", ct.c_int),
("is_sw", ct.c_int),
("is_preload", ct.c_int),
("ap_prior", ct.c_double)]
class bwt_t(ct.Structure):
_fields_ = [("primary", ct.c_uint32), #S^{-1}(0), or the primary index of BWT
("L2", ct.c_uint32 * 5), #C(), cumulative count
("seq_len", ct.c_uint32), # sequence length
("bwt_size", ct.c_uint32), # size of bwt, about seq_len/4
("bwt", ct.POINTER(ct.c_uint32)), # BWT
("cnt_table", ct.c_uint32 * 256), # cnt_table[256]
("sa_intv", ct.c_int32),
("n_sa", ct.c_uint32),
("sa", ct.POINTER(ct.c_uint32))]
class bntann1_t(ct.Structure):
_fields_ = [("offset", ct.c_int64),
("len", ct.c_int32),
("n_ambs", ct.c_int32),
("gi", ct.c_uint32),
("name", ct.c_char_p),
("anno", ct.c_char_p)]
class bntamb1_t(ct.Structure):
_fields_ = [("offset", ct.c_int64),
("len", ct.c_int32),
("amb", ct.c_char)]
class bntseq_t(ct.Structure):
_fields_ = [("l_pac", ct.c_int64),
("n_seqs", ct.c_int32),
("seed", ct.c_uint32),
("anns", ct.POINTER(bntann1_t)), # n_seqs elements
("n_holes", ct.c_int32),
("ambs", ct.POINTER(bntamb1_t)), # n_holes elements
("fp_pac", ct.c_void_p)] # this is really a FILE *fp_pac
class bwt_width_t(ct.Structure):
_fields_ = [('w', ct.c_uint32),
('bid', ct.c_int32)]
class bwt_aln1_t(ct.Structure):
_fields_ = [('n_mm', ct.c_uint32, 8),
('n_gapo', ct.c_uint32, 8),
('n_gape', ct.c_uint32, 8),
('a', ct.c_uint32, 1),
('k', ct.c_uint32),
('l', ct.c_uint32),
('score', ct.c_int32)]
# CRS4 patch: 'gap' split into 'n_gapo' + 'n_gape'; added score; 'mm' => 'n_mm'
class bwt_multi1_t(ct.Structure):
_fields_ = [('pos', ct.c_uint32),
('n_cigar', ct.c_uint32, 15),
('n_gapo', ct.c_uint32, 8),
('n_gape', ct.c_uint32, 8),
('n_mm', ct.c_uint32, 8),
('strand', ct.c_uint32, 1),
('score', ct.c_int32),
('cigar', ct.POINTER(ct.c_uint16))]
def __init__(self):
super(bwt_multi1_t, self).__init__()
def get_cigar(self, read_len):
if not hasattr(self, "__cigar"):
self.__cigar = make_cigar(self.cigar, self.n_cigar, read_len)
return self.__cigar
def get_pos_end(self, read_len):
if not hasattr(self, "__pos_end"):
self.__pos_end = get_cigar_pos_end(self.get_cigar(read_len), self.pos)
return self.__pos_end
class bwa_seq_t(ct.Structure):
_fields_ = [('name', ct.c_char_p),
('seq', ct.POINTER(ct.c_uint8)),
('rseq', ct.POINTER(ct.c_uint8)),
('qual', ct.POINTER(ct.c_uint8)),
('len', ct.c_uint32, 20),
('strand', ct.c_uint32, 1),
('type', ct.c_uint32, 2),
('dummy', ct.c_uint32, 1),
('extra_flag', ct.c_uint32, 8),
('n_mm', ct.c_uint32, 8),
('n_gapo', ct.c_uint32, 8),
('n_gape', ct.c_uint32, 8),
('mapQ', ct.c_uint32, 8),
('score', ct.c_int32),
('clip_len', ct.c_int32),
('n_aln', ct.c_int32),
('aln', ct.POINTER(bwt_aln1_t)),
('n_multi', ct.c_int32),
('multi', ct.POINTER(bwt_multi1_t)),
('sa', ct.c_uint32),
('pos', ct.c_uint32),
('c1', ct.c_uint64, 28),
('c2', ct.c_uint64, 28),
('seQ', ct.c_uint64, 8),
('n_cigar', ct.c_int32),
('cigar', ct.POINTER(ct.c_uint16)),
('tid', ct.c_int32),
('bc', ct.c_char * 16), # in-struct array, up to 15 bases, null-terminated
('full_len', ct.c_uint32, 20),
('nm', ct.c_uint32, 12),
('md', ct.c_char_p)]
def __len__(self):
"""
Return the full length of the sequence. Note that only
the section up to clip_len is used for the alignment. Read
trimming in fact modifies the value of clip_len."""
return self.full_len
def __compute_cigar_ops(self):
"""
Returns an array of tuples (lenth, op) corresponding to
the alignment of this sequence.
Based on bwa_print_sam1 from bwase.c
"""
if self.type == BWA_TYPE_NO_MATCH:
return []
elif self.n_cigar == 0:
return [(self.len, 'M')]
else:
cigar = []
for k in xrange(self.n_cigar):
size = self.cigar[k] & CIGAR_LN_MASK
op = "MIDS"[self.cigar[k]>>CIGAR_OP_SHIFT]
cigar.append((size, op))
return cigar
def __compute_pos_end(self):
"""
Computes the position on the reference where this aligned
sequence ends, based on the alignment described by the
CIGAR operations and the starting position self.pos.
Based on pos_end in bwase.c.
NOTE: pos_end uses p->len instead of full_len or clip_len.
Raises: ValueError if self is of type BWA_TYPE_NO_MATCH
"""
if self.type == BWA_TYPE_NO_MATCH:
raise ValueError("Cannot calculate end position of an unmapped sequence")
cig = self.get_cigar()
if len(cig) == 0:
return self.pos + self.len
else:
return get_cigar_pos_end(cig, self.pos)
def get_cigar(self):
if not hasattr(self, "__cigar"):
self.__cigar = self.__compute_cigar_ops()
return self.__cigar
def get_pos_end(self):
if not hasattr(self, "__pos_end"):
self.__pos_end = self.__compute_pos_end()
return self.__pos_end
def get_pos_5(self):
"""
position of the 5' end.
Raises: ValueError if self is of type BWA_TYPE_NO_MATCH
"""
if self.type == BWA_TYPE_NO_MATCH:
raise ValueError("Cannot calculate 5' position of an unmapped sequence")
if self.strand:
return self.get_pos_end()
else:
return self.pos
def get_seq(self):
"""
Return seq (the original sequence) as a text string.
"""
if not hasattr(self, "__seq"):
self.__seq = ''.join(['ACGTN'[self.seq[k]] for k in xrange(self.full_len)])
return self.__seq
def get_rseq(self):
"""
Return complement of seq as a text string.
"""
# XXX: note that for in BWA rseq is practically a temporary buffer.
# It's only correct in the range [0,clip_len). BWA itself creates the
# complement sequence for output directly from the seq array.
if not hasattr(self, "__rseq"):
self.__rseq = ''.join(['TGCAN'[self.seq[k]] for k in xrange(self.full_len-1, -1, -1)])
return self.__rseq
# Get a string with the quality of the bases using an ASCII-33 encoding
def get_qual_string(self):
if not hasattr(self, "__qual"):
ptr = self.qual
if not ptr:
self.__qual = ''
else:
self.__qual = ''.join([chr(ptr[k]) for k in xrange(self.full_len)])
return self.__qual
def get_name(self):
return self.name
def itermulti(self):
"""
Yields alignment-specific info for all hits, including the 'main' one.
"""
yield self
for i in xrange(self.n_multi):
yield self.multi[i]
bwa_seq_p_t = ct.POINTER(bwa_seq_t)
class isize_info_t(ct.Structure):
_fields_ = [('avg', ct.c_double),
('std', ct.c_double),
('ap_prior', ct.c_double),
('low', ct.c_uint32),
('high', ct.c_uint32),
('high_bayesian', ct.c_uint32)]
#-END-STRUCT-WRAPPERS----------------------------------------------------------
init_g_log_n = libbwa.init_g_log_n
init_g_log_n.argtypes = []
init_g_log_n()
init_g_hash = libbwa.init_g_hash
init_g_hash.argtypes = []
init_g_hash()
#------------------------------------------------------------
gap_opt_p_t = ct.POINTER(gap_opt_t)
gap_init_opt = libbwa.gap_init_opt
gap_init_opt.argtypes = []
gap_init_opt.restype = gap_opt_p_t
#------------------------------------------------------------
pe_opt_p_t = ct.POINTER(pe_opt_t)
pe_init_opt = libbwa.bwa_init_pe_opt
pe_init_opt.argtypes = []
pe_init_opt.restype = pe_opt_p_t
#------------------------------------------------------------
bntseq_p_t = ct.POINTER(bntseq_t)
bns_restore = libbwa.bns_restore
bns_restore.argtypes = [ct.c_char_p]
bns_restore.restype = bntseq_p_t
restore_bns=bns_restore
bns_destroy_base = libbwa.bns_destroy
bns_destroy_base.argtypes = [bntseq_p_t]
#------------------------------------------------------------
bwt_p_t = ct.POINTER(bwt_t)
bwt_restore_bwt = libbwa.bwt_restore_bwt
bwt_restore_bwt.argtypes = [ct.c_char_p]
bwt_restore_bwt.restype = bwt_p_t
restore_bwt=bwt_restore_bwt
bwt_restore_sa = libbwa.bwt_restore_sa
bwt_restore_sa.argtypes = [ct.c_char_p, bwt_p_t]
restore_sa=bwt_restore_sa
bwt_destroy_base = libbwa.bwt_destroy
bwt_destroy_base.argtypes = [bwt_p_t]
#--
bwt_restore_bwt_mmap = libbwa.bwt_restore_bwt_mmap
bwt_restore_bwt_mmap.argtypes = [ct.c_char_p]
bwt_restore_bwt_mmap.restype = bwt_p_t
restore_bwt_mmap=bwt_restore_bwt_mmap
bwt_restore_sa_mmap = libbwa.bwt_restore_sa_mmap
bwt_restore_sa_mmap.argtypes = [ct.c_char_p, bwt_p_t]
restore_sa_mmap=bwt_restore_sa_mmap
bwt_destroy_mmap = libbwa.bwt_destroy_mmap
bwt_destroy_mmap.argtypes = [bwt_p_t]
ubyte_p_t = ct.POINTER(ct.c_ubyte)
bwt_restore_reference_mmap_helper = libbwa.bwt_restore_reference_mmap_helper
bwt_restore_reference_mmap_helper.argtypes = [ct.c_char_p, ct.c_int32]
bwt_restore_reference_mmap_helper.restype = ubyte_p_t
#------------------------------------------------------------
bwa_cal_sa_reg_gap_mt = libbwa.bwa_cal_sa_reg_gap_mt
# (bwt_t *const bwt[2], int n_seqs, bwa_seq_t *seqs, const gap_opt_t *opt) ;
bwa_cal_sa_reg_gap_mt.argtypes = [bwt_p_t * 2, ct.c_int32, bwa_seq_p_t, gap_opt_p_t]
cal_sa_reg_gap_mt = bwa_cal_sa_reg_gap_mt
#------------------------------------------------------------
bwa_cal_sa_reg_gap = libbwa.bwa_cal_sa_reg_gap
# (int tid, bwt_t *const bwt[2], int n_seqs, bwa_seq_t *seqs, const gap_opt_t *opt) ;
bwa_cal_sa_reg_gap.argtypes = [ct.c_int32, bwt_p_t * 2, ct.c_int32, bwa_seq_p_t, gap_opt_p_t]
cal_sa_reg_gap = bwa_cal_sa_reg_gap
#------------------------------------------------------------
bwa_cal_pac_pos_pe = libbwa.bwa_cal_pac_pos_pe_memory
bwa_cal_pac_pos_pe.argtypes = [bwt_p_t * 2, ct.c_int, bwa_seq_p_t * 2,
ct.POINTER(isize_info_t),
pe_opt_p_t, gap_opt_p_t, ct.POINTER(isize_info_t)]
bwa_cal_pac_pos_pe.restype = ct.c_int
#------------------------------------------------------------
bns_coor_pac2real = libbwa.bns_coor_pac2real
#int bns_coor_pac2real(const bntseq_t *bns, int64_t pac_coor, int len, int32_t *real_seq)
bns_coor_pac2real.argtypes = [bntseq_p_t, ct.c_uint64, ct.c_uint,
ct.POINTER(ct.c_uint32)]
bns_coor_pac2real.restype = ct.c_int
#------------------------------------------------------------
bwa_trim_read = libbwa.bwa_trim_read
# int bwa_trim_read(int trim_qual, bwa_seq_t *p)
bwa_trim_read.argtypes = [ct.c_int, bwa_seq_p_t]
bwa_trim_read.restype = ct.c_int
#------------------------------------------------------------
bwa_seq_reverse = libbwa.seq_reverse
# void seq_reverse(int len, ubyte_t *seq, int is_comp)
bwa_seq_reverse.argtypes = [ ct.c_int, ct.POINTER(ct.c_ubyte), ct.c_int ]
bwa_seq_reverse.restype = None
#------------------------------------------------------------
init_sequences = libbwa.bwa_init_sequences
# bwa_seq_t* bwa_init_sequences(const char**const data2d, const size_t n_seqs, const int q_offset, const int trim_qual)
init_sequences.argtypes = [p_char_p, ct.c_uint, ct.c_int, ct.c_int]
init_sequences.restype = bwa_seq_p_t
#------------------------------------------------------------
# Start Python functions
def get_seq_id(bns, pac_coordinate, ref_len):
seq_id = ct.c_uint32()
nn = bns_coor_pac2real(bns, pac_coordinate, ref_len, ct.byref(seq_id))
return seq_id.value, nn
def cal_pac_pos_pe(bwts, n_seqs, bwsa, ii, popt, gopt, last_ii):
bwsa_c = (bwa_seq_p_t * 2)()
bwsa_c[0] = bwsa[0]
bwsa_c[1] = bwsa[1]
res = bwa_cal_pac_pos_pe(bwts, n_seqs, bwsa_c, ct.byref(ii), popt, gopt,
ct.byref(last_ii))
return res
#------------------------------------------------------------
#ubyte_t *bwa_paired_sw(const bntseq_t *bns, const ubyte_t *_pacseq,
# int n_seqs, bwa_seq_t *seqs[2],
# const pe_opt_t *popt, const isize_info_t *ii);
bwa_paired_sw = libbwa.bwa_paired_sw
bwa_paired_sw.argtypes = [bntseq_p_t, ct.POINTER(ct.c_uint8), ct.c_int,
bwa_seq_p_t * 2,
pe_opt_p_t,
ct.POINTER(isize_info_t)]
bwa_paired_sw.restype = ct.POINTER(ct.c_uint8)
def paired_sw(bns, pacseq, n_seq_pairs, bwsa, popt, ii, offset=0):
bwsa_c = (bwa_seq_p_t * 2)()
bwsa_c[0] = ct.cast(ct.byref(bwsa[0][offset]), bwa_seq_p_t)
bwsa_c[1] = ct.cast(ct.byref(bwsa[1][offset]), bwa_seq_p_t)
r = bwa_paired_sw(bns, pacseq, n_seq_pairs, bwsa_c, popt, ct.byref(ii))
return r
#------------------------------------------------------------
bwa_refine_gapped = libbwa.bwa_refine_gapped_memory
bwa_refine_gapped.argtypes = [bntseq_p_t, ct.POINTER(ct.c_uint8), ct.c_int, bwa_seq_p_t]
def refine_gapped(bns, n_seqs, seqs, pacseq):
seqs_c = ct.cast(seqs, bwa_seq_p_t)
return bwa_refine_gapped(bns, pacseq, n_seqs, seqs_c)
#------------------------------------------------------------
bwa_free_read_seq = libbwa.bwa_free_read_seq
bwa_free_read_seq.argtypes = [ct.c_int32, bwa_seq_p_t]
free_seq = bwa_free_read_seq
def build_bws_array(sequence_pairs, qtype="fastq-sanger", trim_qual=0):
"""
Build an array of BWA seq pairs from a list of sequence pairs.
A sequence pair is a (name, read_seq, read_qual, mate_seq, mate_qual).
"""
# allocate two arrays of bwa_seq_t, one for each set of reads (1 and 2)
bwsa_t = bwa_seq_p_t * 2
bwsa = bwsa_t()
if qtype == "fastq-sanger" or qtype == "fastq-illumina":
# this is the offset that has been added to the quality scores,
# which varies depending on the format.
q_offset = Q_OFFSET[qtype]
else:
# XXX: to support fastq-solexa we'd have to rewrite the quality arrays
# here according to the formula round(q[i] + 10 * log10(1+10**(-q[i]/10.)))
raise ValueError("sorry. '%s' is not a supported qtype" % qtype)
row_type = (ct.c_char_p * 3)
pointers = (row_type*len(sequence_pairs))()
for read_no in 0,1:
if read_no == 0:
row_indices = (0, 1, 2)
else:
row_indices = (0, 3, 4)
for i, row in enumerate(sequence_pairs):
if len(row) != 5:
raise ValueError("Invalid row %d. Expecting (name, read1, qual1, read2, qual2)"
" but got a row with length %d\n%s" % (i, len(row), str(row)))
for j in xrange(3):
pointers[i][j] = row[ row_indices[j] ]
bwsa[read_no] = init_sequences( ct.cast(pointers, p_char_p), len(sequence_pairs), q_offset, trim_qual)
if bwsa[read_no] is None:
raise RuntimeError("There was an error creating the BWA sequences")
return bwsa
def restore_index_base(root_name):
bwts_t = bwt_p_t * 2
bwts = bwts_t()
bwts[0] = restore_bwt(root_name + '.bwt')
bwts[1] = restore_bwt(root_name + '.rbwt')
restore_sa(root_name + '.sa', bwts[0])
restore_sa(root_name + '.rsa', bwts[1])
return bwts
def restore_reference_base(root_name):
bns = restore_bns(root_name)
fd = open(root_name + '.pac')
pacseq_str = fd.read(bns[0].l_pac/4 + 1)
p = ct.c_char_p(pacseq_str)
ubyte_p_t = ct.POINTER(ct.c_ubyte)
pacseq = ct.cast(p, ubyte_p_t)
return (bns, pacseq)
#--------------------------------------------------------------------
def mmap_magic():
return ct.c_uint32.in_dll(libbwa, "BWA_MMAP_SA_MAGIC")
import struct
def make_suffix_array_for_mmap(src, dest):
s = open(src)
d = open(dest, 'w')
# Write the magic number in native byte order so that it will only
# be read correctly by machines that use the same byte ordering
# (since the data is also in native byte order).
d.write(struct.pack("=I", mmap_magic().value ))
d.write(s.read(4*7))
d.write(struct.pack("=I", 0x0ffffffff)) # uint32_t(-1)
buf_size = 1000000
while True:
c = s.read(buf_size)
if not c:
break
d.write(c)
d.close()
s.close()
def make_suffix_arrays_for_mmap(root_name):
make_suffix_array_for_mmap(root_name + '.sa', root_name + '.sax')
make_suffix_array_for_mmap(root_name + '.rsa', root_name + '.rsax')
def restore_index_mmap(root_name):
bwts_t = bwt_p_t * 2
bwts = bwts_t()
bwts[0] = restore_bwt_mmap(root_name + '.bwt')
bwts[1] = restore_bwt_mmap(root_name + '.rbwt')
restore_sa_mmap(root_name + '.sax', bwts[0])
restore_sa_mmap(root_name + '.rsax', bwts[1])
return bwts
def restore_reference_mmap(root_name):
bns = restore_bns(root_name)
pac_fname = root_name + '.pac'
pacseq = bwt_restore_reference_mmap_helper(pac_fname, bns[0].l_pac/4 + 1)
return (bns, pacseq)
#------------------------------------------------------------
def restore_index(root, mmap=False):
if mmap:
return restore_index_mmap(root)
else:
return restore_index_base(root)
def restore_reference(root, mmap=False):
if mmap:
return restore_reference_mmap(root)
else:
return restore_reference_base(root)
def bwt_destroy(bwt, mmap=False):
if mmap:
bwt_destroy_mmap(bwt)
else:
bwt_destroy_base(bwt)
def bns_destroy(bnsp, mmap=False):
bns_destroy_base(bnsp)
|
murtidash/dom4gameserver | refs/heads/master | scripts/TurnTracker/makettweb.py | 1 | #!/usr/bin/python
import sqlite3
from ttdatabase import *
loadPlayers()
loadTurns()
name = getName()
ofilename = "TurnTrack - %s.html" % name
ofile = open(ofilename, "w+")
## Header
head = "<h2> Turn Tracking for %s </h2>" % name
ofile.write(head)
## Body
ofile.write("<p><table width=100%>")
colorDisposition = {
1:'green',
0:'gray',
2:'red',
3:'black'
}
for player in players.keys():
ofile.write("<tr><td width = 7%%>%s</td>" % player)
for turn in turns.keys():
ttt = getTurnDisposition(players[player],turns[turn])
ofile.write('<td style="background-color:')
ofile.write(colorDisposition[ttt])
ofile.write('"> </td>')
ofile.write("</tr>")
ofile.write("</table>")
## Footer
ofile.write("""
<p>
<table width=40%>
<tr><td width=25%>No Info:</td><td style="background-color:gray" width=25%> </td>
<td width=25%>Played:</td><td style="background-color:green" width=25%> </td></tr>
<tr><td>Stale:</td><td style="background-color:red"> </td><td>AI Controlled:</td><td style="background-color:black"> </td></tr>
</table>
"""
)
ofile.close()
|
andpp/cherrymusic | refs/heads/devel | cherrymusicserver/migrations/test/test_migration_0003.py | 7 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012-2014 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
import nose
from mock import *
from nose.tools import *
from cherrymusicserver.test import helpers
import base64
import codecs
import hashlib
import os
from cherrymusicserver import pathprovider
from cherrymusicserver.migrations import migration_0003
def test_migration():
tests = [
['empty dir',
[],
['.hashpath']],
['migrated dir',
['.hashpath', _oldname('b/c')],
['.hashpath', _oldname('b/c')]],
['standard migration',
[_oldname('a'), _oldname('b/c')],
[_newname('a'), _newname('b/c'), '.hashpath']],
['hidden file',
['.foo', _oldname('b/c')],
['.foo', _newname('b/c'), '.hashpath']],
['invalid base64 encoding',
['badbase64='],
['badbase64=', '.hashpath']],
]
for description, startnames, wantnames in tests:
check_filelist.description = 'migration_0003 (albumart): ' + description
yield check_filelist, startnames, wantnames
def check_filelist(startnames, wantnames):
with helpers.tempdir('cherrymusic.test_migration_0003') as tmpd:
artfolder = helpers.mkpath('art/', tmpd)
for name in startnames:
helpers.mkpath(name, artfolder)
with patch('cherrymusicserver.pathprovider.albumArtFilePath', _mock_artpath(artfolder)):
migration_0003.migrate()
expected, result = sorted(wantnames), sorted(os.listdir(artfolder))
eq_(expected, result, '\n%r\n%r' % (expected, result))
def _oldname(s):
"copied from pathprovider"
utf8_bytestr = codecs.encode(s, 'UTF-8')
utf8_altchar = codecs.encode('+-', 'UTF-8')
return codecs.decode(base64.b64encode(utf8_bytestr, utf8_altchar), 'UTF-8')
def _newname(s):
utf8_bytestr = codecs.encode(s, 'UTF-8')
return hashlib.md5(utf8_bytestr).hexdigest() + '.thumb'
_real_artpath = pathprovider.albumArtFilePath
def _mock_artpath(tmpd):
return lambda s: os.path.join(tmpd, os.path.basename(_real_artpath(s)) if s else '')
if __name__ == '__main__':
nose.runmodule()
|
Mixser/django | refs/heads/master | django/contrib/syndication/views.py | 192 | from __future__ import unicode_literals
from calendar import timegm
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.http import Http404, HttpResponse
from django.template import TemplateDoesNotExist, loader
from django.utils import feedgenerator, six
from django.utils.encoding import force_text, iri_to_uri, smart_text
from django.utils.html import escape
from django.utils.http import http_date
from django.utils.timezone import get_default_timezone, is_naive, make_aware
def add_domain(domain, url, secure=False):
protocol = 'https' if secure else 'http'
if url.startswith('//'):
# Support network-path reference (see #16753) - RSS requires a protocol
url = '%s:%s' % (protocol, url)
elif not url.startswith(('http://', 'https://', 'mailto:')):
url = iri_to_uri('%s://%s%s' % (protocol, domain, url))
return url
class FeedDoesNotExist(ObjectDoesNotExist):
pass
class Feed(object):
feed_type = feedgenerator.DefaultFeed
title_template = None
description_template = None
def __call__(self, request, *args, **kwargs):
try:
obj = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
raise Http404('Feed object does not exist.')
feedgen = self.get_feed(obj, request)
response = HttpResponse(content_type=feedgen.content_type)
if hasattr(self, 'item_pubdate') or hasattr(self, 'item_updateddate'):
# if item_pubdate or item_updateddate is defined for the feed, set
# header so as ConditionalGetMiddleware is able to send 304 NOT MODIFIED
response['Last-Modified'] = http_date(
timegm(feedgen.latest_post_date().utctimetuple()))
feedgen.write(response, 'utf-8')
return response
def item_title(self, item):
# Titles should be double escaped by default (see #6533)
return escape(force_text(item))
def item_description(self, item):
return force_text(item)
def item_link(self, item):
try:
return item.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured(
'Give your %s class a get_absolute_url() method, or define an '
'item_link() method in your Feed class.' % item.__class__.__name__
)
def __get_dynamic_attr(self, attname, obj, default=None):
try:
attr = getattr(self, attname)
except AttributeError:
return default
if callable(attr):
# Check co_argcount rather than try/excepting the function and
# catching the TypeError, because something inside the function
# may raise the TypeError. This technique is more accurate.
try:
code = six.get_function_code(attr)
except AttributeError:
code = six.get_function_code(attr.__call__)
if code.co_argcount == 2: # one argument is 'self'
return attr(obj)
else:
return attr()
return attr
def feed_extra_kwargs(self, obj):
"""
Returns an extra keyword arguments dictionary that is used when
initializing the feed generator.
"""
return {}
def item_extra_kwargs(self, item):
"""
Returns an extra keyword arguments dictionary that is used with
the `add_item` call of the feed generator.
"""
return {}
def get_object(self, request, *args, **kwargs):
return None
def get_context_data(self, **kwargs):
"""
Returns a dictionary to use as extra context if either
``self.description_template`` or ``self.item_template`` are used.
Default implementation preserves the old behavior
of using {'obj': item, 'site': current_site} as the context.
"""
return {'obj': kwargs.get('item'), 'site': kwargs.get('site')}
def get_feed(self, obj, request):
"""
Returns a feedgenerator.DefaultFeed object, fully populated, for
this feed. Raises FeedDoesNotExist for invalid parameters.
"""
current_site = get_current_site(request)
link = self.__get_dynamic_attr('link', obj)
link = add_domain(current_site.domain, link, request.is_secure())
feed = self.feed_type(
title=self.__get_dynamic_attr('title', obj),
subtitle=self.__get_dynamic_attr('subtitle', obj),
link=link,
description=self.__get_dynamic_attr('description', obj),
language=settings.LANGUAGE_CODE,
feed_url=add_domain(
current_site.domain,
self.__get_dynamic_attr('feed_url', obj) or request.path,
request.is_secure(),
),
author_name=self.__get_dynamic_attr('author_name', obj),
author_link=self.__get_dynamic_attr('author_link', obj),
author_email=self.__get_dynamic_attr('author_email', obj),
categories=self.__get_dynamic_attr('categories', obj),
feed_copyright=self.__get_dynamic_attr('feed_copyright', obj),
feed_guid=self.__get_dynamic_attr('feed_guid', obj),
ttl=self.__get_dynamic_attr('ttl', obj),
**self.feed_extra_kwargs(obj)
)
title_tmp = None
if self.title_template is not None:
try:
title_tmp = loader.get_template(self.title_template)
except TemplateDoesNotExist:
pass
description_tmp = None
if self.description_template is not None:
try:
description_tmp = loader.get_template(self.description_template)
except TemplateDoesNotExist:
pass
for item in self.__get_dynamic_attr('items', obj):
context = self.get_context_data(item=item, site=current_site,
obj=obj, request=request)
if title_tmp is not None:
title = title_tmp.render(context, request)
else:
title = self.__get_dynamic_attr('item_title', item)
if description_tmp is not None:
description = description_tmp.render(context, request)
else:
description = self.__get_dynamic_attr('item_description', item)
link = add_domain(
current_site.domain,
self.__get_dynamic_attr('item_link', item),
request.is_secure(),
)
enc = None
enc_url = self.__get_dynamic_attr('item_enclosure_url', item)
if enc_url:
enc = feedgenerator.Enclosure(
url=smart_text(enc_url),
length=smart_text(self.__get_dynamic_attr('item_enclosure_length', item)),
mime_type=smart_text(self.__get_dynamic_attr('item_enclosure_mime_type', item))
)
author_name = self.__get_dynamic_attr('item_author_name', item)
if author_name is not None:
author_email = self.__get_dynamic_attr('item_author_email', item)
author_link = self.__get_dynamic_attr('item_author_link', item)
else:
author_email = author_link = None
tz = get_default_timezone()
pubdate = self.__get_dynamic_attr('item_pubdate', item)
if pubdate and is_naive(pubdate):
pubdate = make_aware(pubdate, tz)
updateddate = self.__get_dynamic_attr('item_updateddate', item)
if updateddate and is_naive(updateddate):
updateddate = make_aware(updateddate, tz)
feed.add_item(
title=title,
link=link,
description=description,
unique_id=self.__get_dynamic_attr('item_guid', item, link),
unique_id_is_permalink=self.__get_dynamic_attr(
'item_guid_is_permalink', item),
enclosure=enc,
pubdate=pubdate,
updateddate=updateddate,
author_name=author_name,
author_email=author_email,
author_link=author_link,
categories=self.__get_dynamic_attr('item_categories', item),
item_copyright=self.__get_dynamic_attr('item_copyright', item),
**self.item_extra_kwargs(item)
)
return feed
|
cppisfun/GameEngine | refs/heads/master | foreign/boost/libs/python/pyste/dist/setup.py | 13 | # Copyright Bruno da Silva de Oliveira 2006. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
from distutils.core import setup
import py2exe
import sys
sys.path.append('../src')
setup(name='pyste', scripts=['../src/pyste.py'])
|
coderbone/SickRage-alt | refs/heads/master | requirements/__init__.py | 12133432 | |
shashank971/edx-platform | refs/heads/master | lms/djangoapps/discussion_api/__init__.py | 12133432 | |
ondrokrc/gramps | refs/heads/master | gramps/webapp/grampsdb/templatetags/__init__.py | 12133432 | |
SebasSBM/django | refs/heads/master | tests/template_loader/__init__.py | 12133432 | |
myang321/django | refs/heads/master | tests/model_options/models/__init__.py | 12133432 | |
madAndroid/jenkins-job-builder | refs/heads/master | tests/properties/__init__.py | 12133432 | |
javachengwc/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/django/conf/locale/et/__init__.py | 12133432 | |
sdcooke/django | refs/heads/master | tests/max_lengths/__init__.py | 12133432 | |
mtils/ems | refs/heads/master | ems/qt/tool_widgets/__init__.py | 12133432 | |
srikantbmandal/ansible | refs/heads/devel | lib/ansible/modules/cloud/digital_ocean/__init__.py | 12133432 | |
hybrideagle/django | refs/heads/master | tests/datetimes/__init__.py | 12133432 | |
nharraud/b2share | refs/heads/master | invenio/legacy/bibmatch/scripts/__init__.py | 12133432 | |
impromptuartist/impromptuartist.github.io | refs/heads/master | node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/lexers/_mapping.py | 189 | # -*- coding: utf-8 -*-
"""
pygments.lexers._mapping
~~~~~~~~~~~~~~~~~~~~~~~~
Lexer mapping defintions. This file is generated by itself. Everytime
you change something on a builtin lexer defintion, run this script from
the lexers folder to update it.
Do not alter the LEXERS dictionary by hand.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
LEXERS = {
'ABAPLexer': ('pygments.lexers.other', 'ABAP', ('abap',), ('*.abap',), ('text/x-abap',)),
'ActionScript3Lexer': ('pygments.lexers.web', 'ActionScript 3', ('as3', 'actionscript3'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
'ActionScriptLexer': ('pygments.lexers.web', 'ActionScript', ('as', 'actionscript'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')),
'AdaLexer': ('pygments.lexers.compiled', 'Ada', ('ada', 'ada95ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
'AgdaLexer': ('pygments.lexers.functional', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)),
'AntlrActionScriptLexer': ('pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-as', 'antlr-actionscript'), ('*.G', '*.g'), ()),
'AntlrCSharpLexer': ('pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()),
'AntlrCppLexer': ('pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()),
'AntlrJavaLexer': ('pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()),
'AntlrLexer': ('pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()),
'AntlrObjectiveCLexer': ('pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()),
'AntlrPerlLexer': ('pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()),
'AntlrPythonLexer': ('pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()),
'AntlrRubyLexer': ('pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()),
'ApacheConfLexer': ('pygments.lexers.text', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)),
'AppleScriptLexer': ('pygments.lexers.other', 'AppleScript', ('applescript',), ('*.applescript',), ()),
'AspectJLexer': ('pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
'AsymptoteLexer': ('pygments.lexers.other', 'Asymptote', ('asy', 'asymptote'), ('*.asy',), ('text/x-asymptote',)),
'AutoItLexer': ('pygments.lexers.other', 'AutoIt', ('autoit', 'Autoit'), ('*.au3',), ('text/x-autoit',)),
'AutohotkeyLexer': ('pygments.lexers.other', 'autohotkey', ('ahk', 'autohotkey'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
'AwkLexer': ('pygments.lexers.other', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)),
'BBCodeLexer': ('pygments.lexers.text', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
'BaseMakefileLexer': ('pygments.lexers.text', 'Base Makefile', ('basemake',), (), ()),
'BashLexer': ('pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '.bashrc', 'bashrc', '.bash_*', 'bash_*'), ('application/x-sh', 'application/x-shellscript')),
'BashSessionLexer': ('pygments.lexers.shell', 'Bash Session', ('console',), ('*.sh-session',), ('application/x-shell-session',)),
'BatchLexer': ('pygments.lexers.shell', 'Batchfile', ('bat', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
'BefungeLexer': ('pygments.lexers.other', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
'BlitzBasicLexer': ('pygments.lexers.compiled', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)),
'BlitzMaxLexer': ('pygments.lexers.compiled', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
'BooLexer': ('pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)),
'BrainfuckLexer': ('pygments.lexers.other', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)),
'BroLexer': ('pygments.lexers.other', 'Bro', ('bro',), ('*.bro',), ()),
'BugsLexer': ('pygments.lexers.math', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()),
'CLexer': ('pygments.lexers.compiled', 'C', ('c',), ('*.c', '*.h', '*.idc'), ('text/x-chdr', 'text/x-csrc')),
'CMakeLexer': ('pygments.lexers.text', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)),
'CObjdumpLexer': ('pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)),
'CSharpAspxLexer': ('pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'CSharpLexer': ('pygments.lexers.dotnet', 'C#', ('csharp', 'c#'), ('*.cs',), ('text/x-csharp',)),
'Ca65Lexer': ('pygments.lexers.asm', 'ca65', ('ca65',), ('*.s',), ()),
'CbmBasicV2Lexer': ('pygments.lexers.other', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()),
'CeylonLexer': ('pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)),
'Cfengine3Lexer': ('pygments.lexers.other', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()),
'CheetahHtmlLexer': ('pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')),
'CheetahJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Cheetah', ('js+cheetah', 'javascript+cheetah', 'js+spitfire', 'javascript+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')),
'CheetahLexer': ('pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
'CheetahXmlLexer': ('pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
'ClayLexer': ('pygments.lexers.compiled', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)),
'ClojureLexer': ('pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj',), ('text/x-clojure', 'application/x-clojure')),
'CobolFreeformatLexer': ('pygments.lexers.compiled', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
'CobolLexer': ('pygments.lexers.compiled', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)),
'CoffeeScriptLexer': ('pygments.lexers.web', 'CoffeeScript', ('coffee-script', 'coffeescript', 'coffee'), ('*.coffee',), ('text/coffeescript',)),
'ColdfusionHtmlLexer': ('pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml', '*.cfc'), ('application/x-coldfusion',)),
'ColdfusionLexer': ('pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
'CommonLispLexer': ('pygments.lexers.functional', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp', '*.el'), ('text/x-common-lisp',)),
'CoqLexer': ('pygments.lexers.functional', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)),
'CppLexer': ('pygments.lexers.compiled', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP'), ('text/x-c++hdr', 'text/x-c++src')),
'CppObjdumpLexer': ('pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
'CrocLexer': ('pygments.lexers.agile', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)),
'CssDjangoLexer': ('pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), (), ('text/css+django', 'text/css+jinja')),
'CssErbLexer': ('pygments.lexers.templates', 'CSS+Ruby', ('css+erb', 'css+ruby'), (), ('text/css+ruby',)),
'CssGenshiLexer': ('pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)),
'CssLexer': ('pygments.lexers.web', 'CSS', ('css',), ('*.css',), ('text/css',)),
'CssPhpLexer': ('pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)),
'CssSmartyLexer': ('pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)),
'CudaLexer': ('pygments.lexers.compiled', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)),
'CythonLexer': ('pygments.lexers.compiled', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')),
'DLexer': ('pygments.lexers.compiled', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)),
'DObjdumpLexer': ('pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)),
'DarcsPatchLexer': ('pygments.lexers.text', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()),
'DartLexer': ('pygments.lexers.web', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)),
'DebianControlLexer': ('pygments.lexers.text', 'Debian Control file', ('control', 'debcontrol'), ('control',), ()),
'DelphiLexer': ('pygments.lexers.compiled', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas',), ('text/x-pascal',)),
'DgLexer': ('pygments.lexers.agile', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)),
'DiffLexer': ('pygments.lexers.text', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')),
'DjangoLexer': ('pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')),
'DtdLexer': ('pygments.lexers.web', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)),
'DuelLexer': ('pygments.lexers.web', 'Duel', ('duel', 'Duel Engine', 'Duel View', 'JBST', 'jbst', 'JsonML+BST'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')),
'DylanConsoleLexer': ('pygments.lexers.compiled', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)),
'DylanLexer': ('pygments.lexers.compiled', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)),
'DylanLidLexer': ('pygments.lexers.compiled', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)),
'ECLLexer': ('pygments.lexers.other', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)),
'ECLexer': ('pygments.lexers.compiled', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')),
'EbnfLexer': ('pygments.lexers.text', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)),
'ElixirConsoleLexer': ('pygments.lexers.functional', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)),
'ElixirLexer': ('pygments.lexers.functional', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.exs'), ('text/x-elixir',)),
'ErbLexer': ('pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)),
'ErlangLexer': ('pygments.lexers.functional', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)),
'ErlangShellLexer': ('pygments.lexers.functional', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)),
'EvoqueHtmlLexer': ('pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), ('*.html',), ('text/html+evoque',)),
'EvoqueLexer': ('pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)),
'EvoqueXmlLexer': ('pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)),
'FSharpLexer': ('pygments.lexers.dotnet', 'FSharp', ('fsharp',), ('*.fs', '*.fsi'), ('text/x-fsharp',)),
'FactorLexer': ('pygments.lexers.agile', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)),
'FancyLexer': ('pygments.lexers.agile', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)),
'FantomLexer': ('pygments.lexers.compiled', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
'FelixLexer': ('pygments.lexers.compiled', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
'FortranLexer': ('pygments.lexers.compiled', 'Fortran', ('fortran',), ('*.f', '*.f90', '*.F', '*.F90'), ('text/x-fortran',)),
'FoxProLexer': ('pygments.lexers.foxpro', 'FoxPro', ('Clipper', 'XBase'), ('*.PRG', '*.prg'), ()),
'GLShaderLexer': ('pygments.lexers.compiled', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
'GasLexer': ('pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)),
'GenshiLexer': ('pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')),
'GenshiTextLexer': ('pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')),
'GettextLexer': ('pygments.lexers.text', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')),
'GherkinLexer': ('pygments.lexers.other', 'Gherkin', ('Cucumber', 'cucumber', 'Gherkin', 'gherkin'), ('*.feature',), ('text/x-gherkin',)),
'GnuplotLexer': ('pygments.lexers.other', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)),
'GoLexer': ('pygments.lexers.compiled', 'Go', ('go',), ('*.go',), ('text/x-gosrc',)),
'GoodDataCLLexer': ('pygments.lexers.other', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)),
'GosuLexer': ('pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)),
'GosuTemplateLexer': ('pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)),
'GroffLexer': ('pygments.lexers.text', 'Groff', ('groff', 'nroff', 'man'), ('*.[1234567]', '*.man'), ('application/x-troff', 'text/troff')),
'GroovyLexer': ('pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy',), ('text/x-groovy',)),
'HamlLexer': ('pygments.lexers.web', 'Haml', ('haml', 'HAML'), ('*.haml',), ('text/x-haml',)),
'HaskellLexer': ('pygments.lexers.functional', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)),
'HaxeLexer': ('pygments.lexers.web', 'Haxe', ('hx', 'Haxe', 'haxe', 'haXe', 'hxsl'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')),
'HtmlDjangoLexer': ('pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), (), ('text/html+django', 'text/html+jinja')),
'HtmlGenshiLexer': ('pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
'HtmlLexer': ('pygments.lexers.web', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
'HtmlPhpLexer': ('pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')),
'HtmlSmartyLexer': ('pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)),
'HttpLexer': ('pygments.lexers.text', 'HTTP', ('http',), (), ()),
'HxmlLexer': ('pygments.lexers.text', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()),
'HybrisLexer': ('pygments.lexers.other', 'Hybris', ('hybris', 'hy'), ('*.hy', '*.hyb'), ('text/x-hybris', 'application/x-hybris')),
'IDLLexer': ('pygments.lexers.math', 'IDL', ('idl',), ('*.pro',), ('text/idl',)),
'IgorLexer': ('pygments.lexers.math', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)),
'IniLexer': ('pygments.lexers.text', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg'), ('text/x-ini',)),
'IoLexer': ('pygments.lexers.agile', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)),
'IokeLexer': ('pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)),
'IrcLogsLexer': ('pygments.lexers.text', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)),
'JadeLexer': ('pygments.lexers.web', 'Jade', ('jade', 'JADE'), ('*.jade',), ('text/x-jade',)),
'JagsLexer': ('pygments.lexers.math', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()),
'JavaLexer': ('pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)),
'JavascriptDjangoLexer': ('pygments.lexers.templates', 'JavaScript+Django/Jinja', ('js+django', 'javascript+django', 'js+jinja', 'javascript+jinja'), (), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')),
'JavascriptErbLexer': ('pygments.lexers.templates', 'JavaScript+Ruby', ('js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')),
'JavascriptGenshiLexer': ('pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')),
'JavascriptLexer': ('pygments.lexers.web', 'JavaScript', ('js', 'javascript'), ('*.js',), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')),
'JavascriptPhpLexer': ('pygments.lexers.templates', 'JavaScript+PHP', ('js+php', 'javascript+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')),
'JavascriptSmartyLexer': ('pygments.lexers.templates', 'JavaScript+Smarty', ('js+smarty', 'javascript+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')),
'JsonLexer': ('pygments.lexers.web', 'JSON', ('json',), ('*.json',), ('application/json',)),
'JspLexer': ('pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)),
'JuliaConsoleLexer': ('pygments.lexers.math', 'Julia console', ('jlcon',), (), ()),
'JuliaLexer': ('pygments.lexers.math', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')),
'KconfigLexer': ('pygments.lexers.other', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)),
'KokaLexer': ('pygments.lexers.functional', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)),
'KotlinLexer': ('pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt',), ('text/x-kotlin',)),
'LassoCssLexer': ('pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)),
'LassoHtmlLexer': ('pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')),
'LassoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Lasso', ('js+lasso', 'javascript+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')),
'LassoLexer': ('pygments.lexers.web', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)),
'LassoXmlLexer': ('pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)),
'LighttpdConfLexer': ('pygments.lexers.text', 'Lighttpd configuration file', ('lighty', 'lighttpd'), (), ('text/x-lighttpd-conf',)),
'LiterateAgdaLexer': ('pygments.lexers.functional', 'Literate Agda', ('lagda', 'literate-agda'), ('*.lagda',), ('text/x-literate-agda',)),
'LiterateHaskellLexer': ('pygments.lexers.functional', 'Literate Haskell', ('lhs', 'literate-haskell', 'lhaskell'), ('*.lhs',), ('text/x-literate-haskell',)),
'LiveScriptLexer': ('pygments.lexers.web', 'LiveScript', ('live-script', 'livescript'), ('*.ls',), ('text/livescript',)),
'LlvmLexer': ('pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)),
'LogosLexer': ('pygments.lexers.compiled', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
'LogtalkLexer': ('pygments.lexers.other', 'Logtalk', ('logtalk',), ('*.lgt',), ('text/x-logtalk',)),
'LuaLexer': ('pygments.lexers.agile', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')),
'MOOCodeLexer': ('pygments.lexers.other', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)),
'MakefileLexer': ('pygments.lexers.text', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)),
'MakoCssLexer': ('pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)),
'MakoHtmlLexer': ('pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)),
'MakoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Mako', ('js+mako', 'javascript+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')),
'MakoLexer': ('pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)),
'MakoXmlLexer': ('pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)),
'MaqlLexer': ('pygments.lexers.other', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')),
'MasonLexer': ('pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)),
'MatlabLexer': ('pygments.lexers.math', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)),
'MatlabSessionLexer': ('pygments.lexers.math', 'Matlab session', ('matlabsession',), (), ()),
'MiniDLexer': ('pygments.lexers.agile', 'MiniD', ('minid',), ('*.md',), ('text/x-minidsrc',)),
'ModelicaLexer': ('pygments.lexers.other', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)),
'Modula2Lexer': ('pygments.lexers.compiled', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)),
'MoinWikiLexer': ('pygments.lexers.text', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
'MonkeyLexer': ('pygments.lexers.compiled', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)),
'MoonScriptLexer': ('pygments.lexers.agile', 'MoonScript', ('moon', 'moonscript'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')),
'MscgenLexer': ('pygments.lexers.other', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()),
'MuPADLexer': ('pygments.lexers.math', 'MuPAD', ('mupad',), ('*.mu',), ()),
'MxmlLexer': ('pygments.lexers.web', 'MXML', ('mxml',), ('*.mxml',), ()),
'MySqlLexer': ('pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)),
'MyghtyCssLexer': ('pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)),
'MyghtyHtmlLexer': ('pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)),
'MyghtyJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Myghty', ('js+myghty', 'javascript+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')),
'MyghtyLexer': ('pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)),
'MyghtyXmlLexer': ('pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
'NSISLexer': ('pygments.lexers.other', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)),
'NasmLexer': ('pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM'), ('text/x-nasm',)),
'NemerleLexer': ('pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
'NesCLexer': ('pygments.lexers.compiled', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)),
'NewLispLexer': ('pygments.lexers.functional', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl'), ('text/x-newlisp', 'application/x-newlisp')),
'NewspeakLexer': ('pygments.lexers.other', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)),
'NginxConfLexer': ('pygments.lexers.text', 'Nginx configuration file', ('nginx',), (), ('text/x-nginx-conf',)),
'NimrodLexer': ('pygments.lexers.compiled', 'Nimrod', ('nimrod', 'nim'), ('*.nim', '*.nimrod'), ('text/x-nimrod',)),
'NumPyLexer': ('pygments.lexers.math', 'NumPy', ('numpy',), (), ()),
'ObjdumpLexer': ('pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)),
'ObjectiveCLexer': ('pygments.lexers.compiled', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)),
'ObjectiveCppLexer': ('pygments.lexers.compiled', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)),
'ObjectiveJLexer': ('pygments.lexers.web', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)),
'OcamlLexer': ('pygments.lexers.functional', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)),
'OctaveLexer': ('pygments.lexers.math', 'Octave', ('octave',), ('*.m',), ('text/octave',)),
'OocLexer': ('pygments.lexers.compiled', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)),
'OpaLexer': ('pygments.lexers.functional', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)),
'OpenEdgeLexer': ('pygments.lexers.other', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')),
'Perl6Lexer': ('pygments.lexers.agile', 'Perl6', ('perl6', 'pl6'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6'), ('text/x-perl6', 'application/x-perl6')),
'PerlLexer': ('pygments.lexers.agile', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm'), ('text/x-perl', 'application/x-perl')),
'PhpLexer': ('pygments.lexers.web', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)),
'PlPgsqlLexer': ('pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
'PostScriptLexer': ('pygments.lexers.other', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)),
'PostgresConsoleLexer': ('pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)),
'PostgresLexer': ('pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)),
'PovrayLexer': ('pygments.lexers.other', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)),
'PowerShellLexer': ('pygments.lexers.shell', 'PowerShell', ('powershell', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)),
'PrologLexer': ('pygments.lexers.compiled', 'Prolog', ('prolog',), ('*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)),
'PropertiesLexer': ('pygments.lexers.text', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)),
'ProtoBufLexer': ('pygments.lexers.other', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()),
'PuppetLexer': ('pygments.lexers.other', 'Puppet', ('puppet',), ('*.pp',), ()),
'PyPyLogLexer': ('pygments.lexers.text', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)),
'Python3Lexer': ('pygments.lexers.agile', 'Python 3', ('python3', 'py3'), (), ('text/x-python3', 'application/x-python3')),
'Python3TracebackLexer': ('pygments.lexers.agile', 'Python 3.0 Traceback', ('py3tb',), ('*.py3tb',), ('text/x-python3-traceback',)),
'PythonConsoleLexer': ('pygments.lexers.agile', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)),
'PythonLexer': ('pygments.lexers.agile', 'Python', ('python', 'py', 'sage'), ('*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript', '*.tac', '*.sage'), ('text/x-python', 'application/x-python')),
'PythonTracebackLexer': ('pygments.lexers.agile', 'Python Traceback', ('pytb',), ('*.pytb',), ('text/x-python-traceback',)),
'QmlLexer': ('pygments.lexers.web', 'QML', ('qml', 'Qt Meta Language', 'Qt modeling Language'), ('*.qml',), ('application/x-qml',)),
'RConsoleLexer': ('pygments.lexers.math', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()),
'RPMSpecLexer': ('pygments.lexers.other', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)),
'RacketLexer': ('pygments.lexers.functional', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktl'), ('text/x-racket', 'application/x-racket')),
'RagelCLexer': ('pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()),
'RagelCppLexer': ('pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()),
'RagelDLexer': ('pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()),
'RagelEmbeddedLexer': ('pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()),
'RagelJavaLexer': ('pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()),
'RagelLexer': ('pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()),
'RagelObjectiveCLexer': ('pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()),
'RagelRubyLexer': ('pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()),
'RawTokenLexer': ('pygments.lexers.special', 'Raw token data', ('raw',), (), ('application/x-pygments-tokens',)),
'RdLexer': ('pygments.lexers.math', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)),
'RebolLexer': ('pygments.lexers.other', 'REBOL', ('rebol',), ('*.r', '*.r3'), ('text/x-rebol',)),
'RedcodeLexer': ('pygments.lexers.other', 'Redcode', ('redcode',), ('*.cw',), ()),
'RegeditLexer': ('pygments.lexers.text', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)),
'RexxLexer': ('pygments.lexers.other', 'Rexx', ('rexx', 'ARexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)),
'RhtmlLexer': ('pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)),
'RobotFrameworkLexer': ('pygments.lexers.other', 'RobotFramework', ('RobotFramework', 'robotframework'), ('*.txt', '*.robot'), ('text/x-robotframework',)),
'RstLexer': ('pygments.lexers.text', 'reStructuredText', ('rst', 'rest', 'restructuredtext'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')),
'RubyConsoleLexer': ('pygments.lexers.agile', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)),
'RubyLexer': ('pygments.lexers.agile', 'Ruby', ('rb', 'ruby', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby'), ('text/x-ruby', 'application/x-ruby')),
'RustLexer': ('pygments.lexers.compiled', 'Rust', ('rust',), ('*.rs', '*.rc'), ('text/x-rustsrc',)),
'SLexer': ('pygments.lexers.math', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
'SMLLexer': ('pygments.lexers.functional', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
'SassLexer': ('pygments.lexers.web', 'Sass', ('sass', 'SASS'), ('*.sass',), ('text/x-sass',)),
'ScalaLexer': ('pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)),
'ScamlLexer': ('pygments.lexers.web', 'Scaml', ('scaml', 'SCAML'), ('*.scaml',), ('text/x-scaml',)),
'SchemeLexer': ('pygments.lexers.functional', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')),
'ScilabLexer': ('pygments.lexers.math', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)),
'ScssLexer': ('pygments.lexers.web', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)),
'ShellSessionLexer': ('pygments.lexers.shell', 'Shell Session', ('shell-session',), ('*.shell-session',), ('application/x-sh-session',)),
'SmaliLexer': ('pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
'SmalltalkLexer': ('pygments.lexers.other', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)),
'SmartyLexer': ('pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)),
'SnobolLexer': ('pygments.lexers.other', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)),
'SourcePawnLexer': ('pygments.lexers.other', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)),
'SourcesListLexer': ('pygments.lexers.text', 'Debian Sourcelist', ('sourceslist', 'sources.list', 'debsources'), ('sources.list',), ()),
'SqlLexer': ('pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)),
'SqliteConsoleLexer': ('pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)),
'SquidConfLexer': ('pygments.lexers.text', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)),
'SspLexer': ('pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)),
'StanLexer': ('pygments.lexers.math', 'Stan', ('stan',), ('*.stan',), ()),
'SwigLexer': ('pygments.lexers.compiled', 'SWIG', ('Swig', 'swig'), ('*.swg', '*.i'), ('text/swig',)),
'SystemVerilogLexer': ('pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)),
'TclLexer': ('pygments.lexers.agile', 'Tcl', ('tcl',), ('*.tcl',), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
'TcshLexer': ('pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
'TeaTemplateLexer': ('pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)),
'TexLexer': ('pygments.lexers.text', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')),
'TextLexer': ('pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)),
'TreetopLexer': ('pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()),
'TypeScriptLexer': ('pygments.lexers.web', 'TypeScript', ('ts',), ('*.ts',), ('text/x-typescript',)),
'UrbiscriptLexer': ('pygments.lexers.other', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)),
'VGLLexer': ('pygments.lexers.other', 'VGL', ('vgl',), ('*.rpf',), ()),
'ValaLexer': ('pygments.lexers.compiled', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)),
'VbNetAspxLexer': ('pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'VbNetLexer': ('pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')),
'VelocityHtmlLexer': ('pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)),
'VelocityLexer': ('pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()),
'VelocityXmlLexer': ('pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)),
'VerilogLexer': ('pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)),
'VhdlLexer': ('pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)),
'VimLexer': ('pygments.lexers.text', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)),
'XQueryLexer': ('pygments.lexers.web', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')),
'XmlDjangoLexer': ('pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), (), ('application/xml+django', 'application/xml+jinja')),
'XmlErbLexer': ('pygments.lexers.templates', 'XML+Ruby', ('xml+erb', 'xml+ruby'), (), ('application/xml+ruby',)),
'XmlLexer': ('pygments.lexers.web', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')),
'XmlPhpLexer': ('pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)),
'XmlSmartyLexer': ('pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)),
'XsltLexer': ('pygments.lexers.web', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')),
'XtendLexer': ('pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)),
'YamlLexer': ('pygments.lexers.text', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)),
}
if __name__ == '__main__':
import sys
import os
# lookup lexers
found_lexers = []
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
for filename in os.listdir('.'):
if filename.endswith('.py') and not filename.startswith('_'):
module_name = 'pygments.lexers.%s' % filename[:-3]
print module_name
module = __import__(module_name, None, None, [''])
for lexer_name in module.__all__:
lexer = getattr(module, lexer_name)
found_lexers.append(
'%r: %r' % (lexer_name,
(module_name,
lexer.name,
tuple(lexer.aliases),
tuple(lexer.filenames),
tuple(lexer.mimetypes))))
# sort them, that should make the diff files for svn smaller
found_lexers.sort()
# extract useful sourcecode from this file
f = open(__file__)
try:
content = f.read()
finally:
f.close()
header = content[:content.find('LEXERS = {')]
footer = content[content.find("if __name__ == '__main__':"):]
# write new file
f = open(__file__, 'wb')
f.write(header)
f.write('LEXERS = {\n %s,\n}\n\n' % ',\n '.join(found_lexers))
f.write(footer)
f.close()
|
AppVentus/AvTime-client | refs/heads/master | packages/wakatime/wakatime/packages/pygments2/pygments/lexers/_mapping.py | 189 | # -*- coding: utf-8 -*-
"""
pygments.lexers._mapping
~~~~~~~~~~~~~~~~~~~~~~~~
Lexer mapping defintions. This file is generated by itself. Everytime
you change something on a builtin lexer defintion, run this script from
the lexers folder to update it.
Do not alter the LEXERS dictionary by hand.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
LEXERS = {
'ABAPLexer': ('pygments.lexers.other', 'ABAP', ('abap',), ('*.abap',), ('text/x-abap',)),
'ActionScript3Lexer': ('pygments.lexers.web', 'ActionScript 3', ('as3', 'actionscript3'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
'ActionScriptLexer': ('pygments.lexers.web', 'ActionScript', ('as', 'actionscript'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')),
'AdaLexer': ('pygments.lexers.compiled', 'Ada', ('ada', 'ada95ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
'AgdaLexer': ('pygments.lexers.functional', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)),
'AntlrActionScriptLexer': ('pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-as', 'antlr-actionscript'), ('*.G', '*.g'), ()),
'AntlrCSharpLexer': ('pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()),
'AntlrCppLexer': ('pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()),
'AntlrJavaLexer': ('pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()),
'AntlrLexer': ('pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()),
'AntlrObjectiveCLexer': ('pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()),
'AntlrPerlLexer': ('pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()),
'AntlrPythonLexer': ('pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()),
'AntlrRubyLexer': ('pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()),
'ApacheConfLexer': ('pygments.lexers.text', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)),
'AppleScriptLexer': ('pygments.lexers.other', 'AppleScript', ('applescript',), ('*.applescript',), ()),
'AspectJLexer': ('pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
'AsymptoteLexer': ('pygments.lexers.other', 'Asymptote', ('asy', 'asymptote'), ('*.asy',), ('text/x-asymptote',)),
'AutoItLexer': ('pygments.lexers.other', 'AutoIt', ('autoit', 'Autoit'), ('*.au3',), ('text/x-autoit',)),
'AutohotkeyLexer': ('pygments.lexers.other', 'autohotkey', ('ahk', 'autohotkey'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
'AwkLexer': ('pygments.lexers.other', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)),
'BBCodeLexer': ('pygments.lexers.text', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
'BaseMakefileLexer': ('pygments.lexers.text', 'Base Makefile', ('basemake',), (), ()),
'BashLexer': ('pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '.bashrc', 'bashrc', '.bash_*', 'bash_*'), ('application/x-sh', 'application/x-shellscript')),
'BashSessionLexer': ('pygments.lexers.shell', 'Bash Session', ('console',), ('*.sh-session',), ('application/x-shell-session',)),
'BatchLexer': ('pygments.lexers.shell', 'Batchfile', ('bat', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
'BefungeLexer': ('pygments.lexers.other', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
'BlitzBasicLexer': ('pygments.lexers.compiled', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)),
'BlitzMaxLexer': ('pygments.lexers.compiled', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
'BooLexer': ('pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)),
'BrainfuckLexer': ('pygments.lexers.other', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)),
'BroLexer': ('pygments.lexers.other', 'Bro', ('bro',), ('*.bro',), ()),
'BugsLexer': ('pygments.lexers.math', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()),
'CLexer': ('pygments.lexers.compiled', 'C', ('c',), ('*.c', '*.h', '*.idc'), ('text/x-chdr', 'text/x-csrc')),
'CMakeLexer': ('pygments.lexers.text', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)),
'CObjdumpLexer': ('pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)),
'CSharpAspxLexer': ('pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'CSharpLexer': ('pygments.lexers.dotnet', 'C#', ('csharp', 'c#'), ('*.cs',), ('text/x-csharp',)),
'Ca65Lexer': ('pygments.lexers.asm', 'ca65', ('ca65',), ('*.s',), ()),
'CbmBasicV2Lexer': ('pygments.lexers.other', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()),
'CeylonLexer': ('pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)),
'Cfengine3Lexer': ('pygments.lexers.other', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()),
'CheetahHtmlLexer': ('pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')),
'CheetahJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Cheetah', ('js+cheetah', 'javascript+cheetah', 'js+spitfire', 'javascript+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')),
'CheetahLexer': ('pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
'CheetahXmlLexer': ('pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
'ClayLexer': ('pygments.lexers.compiled', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)),
'ClojureLexer': ('pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj',), ('text/x-clojure', 'application/x-clojure')),
'CobolFreeformatLexer': ('pygments.lexers.compiled', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
'CobolLexer': ('pygments.lexers.compiled', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)),
'CoffeeScriptLexer': ('pygments.lexers.web', 'CoffeeScript', ('coffee-script', 'coffeescript', 'coffee'), ('*.coffee',), ('text/coffeescript',)),
'ColdfusionHtmlLexer': ('pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml', '*.cfc'), ('application/x-coldfusion',)),
'ColdfusionLexer': ('pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
'CommonLispLexer': ('pygments.lexers.functional', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp', '*.el'), ('text/x-common-lisp',)),
'CoqLexer': ('pygments.lexers.functional', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)),
'CppLexer': ('pygments.lexers.compiled', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP'), ('text/x-c++hdr', 'text/x-c++src')),
'CppObjdumpLexer': ('pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
'CrocLexer': ('pygments.lexers.agile', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)),
'CssDjangoLexer': ('pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), (), ('text/css+django', 'text/css+jinja')),
'CssErbLexer': ('pygments.lexers.templates', 'CSS+Ruby', ('css+erb', 'css+ruby'), (), ('text/css+ruby',)),
'CssGenshiLexer': ('pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)),
'CssLexer': ('pygments.lexers.web', 'CSS', ('css',), ('*.css',), ('text/css',)),
'CssPhpLexer': ('pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)),
'CssSmartyLexer': ('pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)),
'CudaLexer': ('pygments.lexers.compiled', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)),
'CythonLexer': ('pygments.lexers.compiled', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')),
'DLexer': ('pygments.lexers.compiled', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)),
'DObjdumpLexer': ('pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)),
'DarcsPatchLexer': ('pygments.lexers.text', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()),
'DartLexer': ('pygments.lexers.web', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)),
'DebianControlLexer': ('pygments.lexers.text', 'Debian Control file', ('control', 'debcontrol'), ('control',), ()),
'DelphiLexer': ('pygments.lexers.compiled', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas',), ('text/x-pascal',)),
'DgLexer': ('pygments.lexers.agile', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)),
'DiffLexer': ('pygments.lexers.text', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')),
'DjangoLexer': ('pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')),
'DtdLexer': ('pygments.lexers.web', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)),
'DuelLexer': ('pygments.lexers.web', 'Duel', ('duel', 'Duel Engine', 'Duel View', 'JBST', 'jbst', 'JsonML+BST'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')),
'DylanConsoleLexer': ('pygments.lexers.compiled', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)),
'DylanLexer': ('pygments.lexers.compiled', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)),
'DylanLidLexer': ('pygments.lexers.compiled', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)),
'ECLLexer': ('pygments.lexers.other', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)),
'ECLexer': ('pygments.lexers.compiled', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')),
'EbnfLexer': ('pygments.lexers.text', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)),
'ElixirConsoleLexer': ('pygments.lexers.functional', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)),
'ElixirLexer': ('pygments.lexers.functional', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.exs'), ('text/x-elixir',)),
'ErbLexer': ('pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)),
'ErlangLexer': ('pygments.lexers.functional', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)),
'ErlangShellLexer': ('pygments.lexers.functional', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)),
'EvoqueHtmlLexer': ('pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), ('*.html',), ('text/html+evoque',)),
'EvoqueLexer': ('pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)),
'EvoqueXmlLexer': ('pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)),
'FSharpLexer': ('pygments.lexers.dotnet', 'FSharp', ('fsharp',), ('*.fs', '*.fsi'), ('text/x-fsharp',)),
'FactorLexer': ('pygments.lexers.agile', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)),
'FancyLexer': ('pygments.lexers.agile', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)),
'FantomLexer': ('pygments.lexers.compiled', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
'FelixLexer': ('pygments.lexers.compiled', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
'FortranLexer': ('pygments.lexers.compiled', 'Fortran', ('fortran',), ('*.f', '*.f90', '*.F', '*.F90'), ('text/x-fortran',)),
'FoxProLexer': ('pygments.lexers.foxpro', 'FoxPro', ('Clipper', 'XBase'), ('*.PRG', '*.prg'), ()),
'GLShaderLexer': ('pygments.lexers.compiled', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
'GasLexer': ('pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)),
'GenshiLexer': ('pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')),
'GenshiTextLexer': ('pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')),
'GettextLexer': ('pygments.lexers.text', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')),
'GherkinLexer': ('pygments.lexers.other', 'Gherkin', ('Cucumber', 'cucumber', 'Gherkin', 'gherkin'), ('*.feature',), ('text/x-gherkin',)),
'GnuplotLexer': ('pygments.lexers.other', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)),
'GoLexer': ('pygments.lexers.compiled', 'Go', ('go',), ('*.go',), ('text/x-gosrc',)),
'GoodDataCLLexer': ('pygments.lexers.other', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)),
'GosuLexer': ('pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)),
'GosuTemplateLexer': ('pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)),
'GroffLexer': ('pygments.lexers.text', 'Groff', ('groff', 'nroff', 'man'), ('*.[1234567]', '*.man'), ('application/x-troff', 'text/troff')),
'GroovyLexer': ('pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy',), ('text/x-groovy',)),
'HamlLexer': ('pygments.lexers.web', 'Haml', ('haml', 'HAML'), ('*.haml',), ('text/x-haml',)),
'HaskellLexer': ('pygments.lexers.functional', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)),
'HaxeLexer': ('pygments.lexers.web', 'Haxe', ('hx', 'Haxe', 'haxe', 'haXe', 'hxsl'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')),
'HtmlDjangoLexer': ('pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), (), ('text/html+django', 'text/html+jinja')),
'HtmlGenshiLexer': ('pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
'HtmlLexer': ('pygments.lexers.web', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
'HtmlPhpLexer': ('pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')),
'HtmlSmartyLexer': ('pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)),
'HttpLexer': ('pygments.lexers.text', 'HTTP', ('http',), (), ()),
'HxmlLexer': ('pygments.lexers.text', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()),
'HybrisLexer': ('pygments.lexers.other', 'Hybris', ('hybris', 'hy'), ('*.hy', '*.hyb'), ('text/x-hybris', 'application/x-hybris')),
'IDLLexer': ('pygments.lexers.math', 'IDL', ('idl',), ('*.pro',), ('text/idl',)),
'IgorLexer': ('pygments.lexers.math', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)),
'IniLexer': ('pygments.lexers.text', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg'), ('text/x-ini',)),
'IoLexer': ('pygments.lexers.agile', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)),
'IokeLexer': ('pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)),
'IrcLogsLexer': ('pygments.lexers.text', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)),
'JadeLexer': ('pygments.lexers.web', 'Jade', ('jade', 'JADE'), ('*.jade',), ('text/x-jade',)),
'JagsLexer': ('pygments.lexers.math', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()),
'JavaLexer': ('pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)),
'JavascriptDjangoLexer': ('pygments.lexers.templates', 'JavaScript+Django/Jinja', ('js+django', 'javascript+django', 'js+jinja', 'javascript+jinja'), (), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')),
'JavascriptErbLexer': ('pygments.lexers.templates', 'JavaScript+Ruby', ('js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')),
'JavascriptGenshiLexer': ('pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')),
'JavascriptLexer': ('pygments.lexers.web', 'JavaScript', ('js', 'javascript'), ('*.js',), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')),
'JavascriptPhpLexer': ('pygments.lexers.templates', 'JavaScript+PHP', ('js+php', 'javascript+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')),
'JavascriptSmartyLexer': ('pygments.lexers.templates', 'JavaScript+Smarty', ('js+smarty', 'javascript+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')),
'JsonLexer': ('pygments.lexers.web', 'JSON', ('json',), ('*.json',), ('application/json',)),
'JspLexer': ('pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)),
'JuliaConsoleLexer': ('pygments.lexers.math', 'Julia console', ('jlcon',), (), ()),
'JuliaLexer': ('pygments.lexers.math', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')),
'KconfigLexer': ('pygments.lexers.other', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)),
'KokaLexer': ('pygments.lexers.functional', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)),
'KotlinLexer': ('pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt',), ('text/x-kotlin',)),
'LassoCssLexer': ('pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)),
'LassoHtmlLexer': ('pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')),
'LassoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Lasso', ('js+lasso', 'javascript+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')),
'LassoLexer': ('pygments.lexers.web', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)),
'LassoXmlLexer': ('pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)),
'LighttpdConfLexer': ('pygments.lexers.text', 'Lighttpd configuration file', ('lighty', 'lighttpd'), (), ('text/x-lighttpd-conf',)),
'LiterateAgdaLexer': ('pygments.lexers.functional', 'Literate Agda', ('lagda', 'literate-agda'), ('*.lagda',), ('text/x-literate-agda',)),
'LiterateHaskellLexer': ('pygments.lexers.functional', 'Literate Haskell', ('lhs', 'literate-haskell', 'lhaskell'), ('*.lhs',), ('text/x-literate-haskell',)),
'LiveScriptLexer': ('pygments.lexers.web', 'LiveScript', ('live-script', 'livescript'), ('*.ls',), ('text/livescript',)),
'LlvmLexer': ('pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)),
'LogosLexer': ('pygments.lexers.compiled', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
'LogtalkLexer': ('pygments.lexers.other', 'Logtalk', ('logtalk',), ('*.lgt',), ('text/x-logtalk',)),
'LuaLexer': ('pygments.lexers.agile', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')),
'MOOCodeLexer': ('pygments.lexers.other', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)),
'MakefileLexer': ('pygments.lexers.text', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)),
'MakoCssLexer': ('pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)),
'MakoHtmlLexer': ('pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)),
'MakoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Mako', ('js+mako', 'javascript+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')),
'MakoLexer': ('pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)),
'MakoXmlLexer': ('pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)),
'MaqlLexer': ('pygments.lexers.other', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')),
'MasonLexer': ('pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)),
'MatlabLexer': ('pygments.lexers.math', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)),
'MatlabSessionLexer': ('pygments.lexers.math', 'Matlab session', ('matlabsession',), (), ()),
'MiniDLexer': ('pygments.lexers.agile', 'MiniD', ('minid',), ('*.md',), ('text/x-minidsrc',)),
'ModelicaLexer': ('pygments.lexers.other', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)),
'Modula2Lexer': ('pygments.lexers.compiled', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)),
'MoinWikiLexer': ('pygments.lexers.text', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
'MonkeyLexer': ('pygments.lexers.compiled', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)),
'MoonScriptLexer': ('pygments.lexers.agile', 'MoonScript', ('moon', 'moonscript'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')),
'MscgenLexer': ('pygments.lexers.other', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()),
'MuPADLexer': ('pygments.lexers.math', 'MuPAD', ('mupad',), ('*.mu',), ()),
'MxmlLexer': ('pygments.lexers.web', 'MXML', ('mxml',), ('*.mxml',), ()),
'MySqlLexer': ('pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)),
'MyghtyCssLexer': ('pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)),
'MyghtyHtmlLexer': ('pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)),
'MyghtyJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Myghty', ('js+myghty', 'javascript+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')),
'MyghtyLexer': ('pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)),
'MyghtyXmlLexer': ('pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
'NSISLexer': ('pygments.lexers.other', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)),
'NasmLexer': ('pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM'), ('text/x-nasm',)),
'NemerleLexer': ('pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
'NesCLexer': ('pygments.lexers.compiled', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)),
'NewLispLexer': ('pygments.lexers.functional', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl'), ('text/x-newlisp', 'application/x-newlisp')),
'NewspeakLexer': ('pygments.lexers.other', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)),
'NginxConfLexer': ('pygments.lexers.text', 'Nginx configuration file', ('nginx',), (), ('text/x-nginx-conf',)),
'NimrodLexer': ('pygments.lexers.compiled', 'Nimrod', ('nimrod', 'nim'), ('*.nim', '*.nimrod'), ('text/x-nimrod',)),
'NumPyLexer': ('pygments.lexers.math', 'NumPy', ('numpy',), (), ()),
'ObjdumpLexer': ('pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)),
'ObjectiveCLexer': ('pygments.lexers.compiled', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)),
'ObjectiveCppLexer': ('pygments.lexers.compiled', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)),
'ObjectiveJLexer': ('pygments.lexers.web', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)),
'OcamlLexer': ('pygments.lexers.functional', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)),
'OctaveLexer': ('pygments.lexers.math', 'Octave', ('octave',), ('*.m',), ('text/octave',)),
'OocLexer': ('pygments.lexers.compiled', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)),
'OpaLexer': ('pygments.lexers.functional', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)),
'OpenEdgeLexer': ('pygments.lexers.other', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')),
'Perl6Lexer': ('pygments.lexers.agile', 'Perl6', ('perl6', 'pl6'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6'), ('text/x-perl6', 'application/x-perl6')),
'PerlLexer': ('pygments.lexers.agile', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm'), ('text/x-perl', 'application/x-perl')),
'PhpLexer': ('pygments.lexers.web', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)),
'PlPgsqlLexer': ('pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
'PostScriptLexer': ('pygments.lexers.other', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)),
'PostgresConsoleLexer': ('pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)),
'PostgresLexer': ('pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)),
'PovrayLexer': ('pygments.lexers.other', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)),
'PowerShellLexer': ('pygments.lexers.shell', 'PowerShell', ('powershell', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)),
'PrologLexer': ('pygments.lexers.compiled', 'Prolog', ('prolog',), ('*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)),
'PropertiesLexer': ('pygments.lexers.text', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)),
'ProtoBufLexer': ('pygments.lexers.other', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()),
'PuppetLexer': ('pygments.lexers.other', 'Puppet', ('puppet',), ('*.pp',), ()),
'PyPyLogLexer': ('pygments.lexers.text', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)),
'Python3Lexer': ('pygments.lexers.agile', 'Python 3', ('python3', 'py3'), (), ('text/x-python3', 'application/x-python3')),
'Python3TracebackLexer': ('pygments.lexers.agile', 'Python 3.0 Traceback', ('py3tb',), ('*.py3tb',), ('text/x-python3-traceback',)),
'PythonConsoleLexer': ('pygments.lexers.agile', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)),
'PythonLexer': ('pygments.lexers.agile', 'Python', ('python', 'py', 'sage'), ('*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript', '*.tac', '*.sage'), ('text/x-python', 'application/x-python')),
'PythonTracebackLexer': ('pygments.lexers.agile', 'Python Traceback', ('pytb',), ('*.pytb',), ('text/x-python-traceback',)),
'QmlLexer': ('pygments.lexers.web', 'QML', ('qml', 'Qt Meta Language', 'Qt modeling Language'), ('*.qml',), ('application/x-qml',)),
'RConsoleLexer': ('pygments.lexers.math', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()),
'RPMSpecLexer': ('pygments.lexers.other', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)),
'RacketLexer': ('pygments.lexers.functional', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktl'), ('text/x-racket', 'application/x-racket')),
'RagelCLexer': ('pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()),
'RagelCppLexer': ('pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()),
'RagelDLexer': ('pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()),
'RagelEmbeddedLexer': ('pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()),
'RagelJavaLexer': ('pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()),
'RagelLexer': ('pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()),
'RagelObjectiveCLexer': ('pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()),
'RagelRubyLexer': ('pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()),
'RawTokenLexer': ('pygments.lexers.special', 'Raw token data', ('raw',), (), ('application/x-pygments-tokens',)),
'RdLexer': ('pygments.lexers.math', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)),
'RebolLexer': ('pygments.lexers.other', 'REBOL', ('rebol',), ('*.r', '*.r3'), ('text/x-rebol',)),
'RedcodeLexer': ('pygments.lexers.other', 'Redcode', ('redcode',), ('*.cw',), ()),
'RegeditLexer': ('pygments.lexers.text', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)),
'RexxLexer': ('pygments.lexers.other', 'Rexx', ('rexx', 'ARexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)),
'RhtmlLexer': ('pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)),
'RobotFrameworkLexer': ('pygments.lexers.other', 'RobotFramework', ('RobotFramework', 'robotframework'), ('*.txt', '*.robot'), ('text/x-robotframework',)),
'RstLexer': ('pygments.lexers.text', 'reStructuredText', ('rst', 'rest', 'restructuredtext'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')),
'RubyConsoleLexer': ('pygments.lexers.agile', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)),
'RubyLexer': ('pygments.lexers.agile', 'Ruby', ('rb', 'ruby', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby'), ('text/x-ruby', 'application/x-ruby')),
'RustLexer': ('pygments.lexers.compiled', 'Rust', ('rust',), ('*.rs', '*.rc'), ('text/x-rustsrc',)),
'SLexer': ('pygments.lexers.math', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
'SMLLexer': ('pygments.lexers.functional', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
'SassLexer': ('pygments.lexers.web', 'Sass', ('sass', 'SASS'), ('*.sass',), ('text/x-sass',)),
'ScalaLexer': ('pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)),
'ScamlLexer': ('pygments.lexers.web', 'Scaml', ('scaml', 'SCAML'), ('*.scaml',), ('text/x-scaml',)),
'SchemeLexer': ('pygments.lexers.functional', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')),
'ScilabLexer': ('pygments.lexers.math', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)),
'ScssLexer': ('pygments.lexers.web', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)),
'ShellSessionLexer': ('pygments.lexers.shell', 'Shell Session', ('shell-session',), ('*.shell-session',), ('application/x-sh-session',)),
'SmaliLexer': ('pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
'SmalltalkLexer': ('pygments.lexers.other', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)),
'SmartyLexer': ('pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)),
'SnobolLexer': ('pygments.lexers.other', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)),
'SourcePawnLexer': ('pygments.lexers.other', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)),
'SourcesListLexer': ('pygments.lexers.text', 'Debian Sourcelist', ('sourceslist', 'sources.list', 'debsources'), ('sources.list',), ()),
'SqlLexer': ('pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)),
'SqliteConsoleLexer': ('pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)),
'SquidConfLexer': ('pygments.lexers.text', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)),
'SspLexer': ('pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)),
'StanLexer': ('pygments.lexers.math', 'Stan', ('stan',), ('*.stan',), ()),
'SwigLexer': ('pygments.lexers.compiled', 'SWIG', ('Swig', 'swig'), ('*.swg', '*.i'), ('text/swig',)),
'SystemVerilogLexer': ('pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)),
'TclLexer': ('pygments.lexers.agile', 'Tcl', ('tcl',), ('*.tcl',), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
'TcshLexer': ('pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
'TeaTemplateLexer': ('pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)),
'TexLexer': ('pygments.lexers.text', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')),
'TextLexer': ('pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)),
'TreetopLexer': ('pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()),
'TypeScriptLexer': ('pygments.lexers.web', 'TypeScript', ('ts',), ('*.ts',), ('text/x-typescript',)),
'UrbiscriptLexer': ('pygments.lexers.other', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)),
'VGLLexer': ('pygments.lexers.other', 'VGL', ('vgl',), ('*.rpf',), ()),
'ValaLexer': ('pygments.lexers.compiled', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)),
'VbNetAspxLexer': ('pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'VbNetLexer': ('pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')),
'VelocityHtmlLexer': ('pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)),
'VelocityLexer': ('pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()),
'VelocityXmlLexer': ('pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)),
'VerilogLexer': ('pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)),
'VhdlLexer': ('pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)),
'VimLexer': ('pygments.lexers.text', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)),
'XQueryLexer': ('pygments.lexers.web', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')),
'XmlDjangoLexer': ('pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), (), ('application/xml+django', 'application/xml+jinja')),
'XmlErbLexer': ('pygments.lexers.templates', 'XML+Ruby', ('xml+erb', 'xml+ruby'), (), ('application/xml+ruby',)),
'XmlLexer': ('pygments.lexers.web', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')),
'XmlPhpLexer': ('pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)),
'XmlSmartyLexer': ('pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)),
'XsltLexer': ('pygments.lexers.web', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')),
'XtendLexer': ('pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)),
'YamlLexer': ('pygments.lexers.text', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)),
}
if __name__ == '__main__':
import sys
import os
# lookup lexers
found_lexers = []
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
for filename in os.listdir('.'):
if filename.endswith('.py') and not filename.startswith('_'):
module_name = 'pygments.lexers.%s' % filename[:-3]
print module_name
module = __import__(module_name, None, None, [''])
for lexer_name in module.__all__:
lexer = getattr(module, lexer_name)
found_lexers.append(
'%r: %r' % (lexer_name,
(module_name,
lexer.name,
tuple(lexer.aliases),
tuple(lexer.filenames),
tuple(lexer.mimetypes))))
# sort them, that should make the diff files for svn smaller
found_lexers.sort()
# extract useful sourcecode from this file
f = open(__file__)
try:
content = f.read()
finally:
f.close()
header = content[:content.find('LEXERS = {')]
footer = content[content.find("if __name__ == '__main__':"):]
# write new file
f = open(__file__, 'wb')
f.write(header)
f.write('LEXERS = {\n %s,\n}\n\n' % ',\n '.join(found_lexers))
f.write(footer)
f.close()
|
gpitel/pyjs | refs/heads/master | tests/test047.py | 13 | global x
x = 5
x += 1
while x > 0:
print x
x -= 1
try:
print 5
except:
print 2
if __name__ == '__main__':
print x
print 5
for x in range(10):
print x
|
BiobankLab/FA_TOOL | refs/heads/master | fatool/sequence.py | 1 | # -*- coding: utf-8 -*-
from string import maketrans
from collections import Counter
import fuzzy
import re
import logging
class Sequence(object):
# 1
tdict_standard = {
'GCA':'A','GCC':'A','GCG':'A','GCT':'A', 'TGC':'C','TGT':'C', 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'TTC':'F', 'TTT':'F', 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G', 'CAC':'H', 'CAT':'H', 'ATA':'I', 'ATC':'I', 'ATT':'I',
'AAA':'K', 'AAG':'K', 'TTA':'L', 'TTG':'L', 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L', 'ATG':'M', 'AAC':'N', 'AAT':'N',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P', 'CAA':'Q', 'CAG':'Q', 'AGA':'R', 'AGG':'R', 'CGA':'R', 'CGC':'R', 'CGG':'R',
'CGT':'R', 'AGC':'S', 'AGT':'S', 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S', 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V', 'TGG':'W', 'TAC':'Y', 'TAT':'Y', 'TAG': '*', 'TGA':'*', 'TAA':'*'
}
start_standard = ['ATG', 'TTG', 'CTG']
standard_stop = ['TAA', 'TAG', 'TGA']
# 2
tdict_vertebrate_mitochondrial = {
'GCA':'A','GCC':'A','GCG':'A','GCT':'A', 'TGC':'C','TGT':'C', 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'TTC':'F', 'TTT':'F', 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G', 'CAC':'H', 'CAT':'H', 'ATA':'M', 'ATC':'I', 'ATT':'I',
'AAA':'K', 'AAG':'K', 'TTA':'L', 'TTG':'L', 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L', 'ATG':'M', 'AAC':'N', 'AAT':'N',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P', 'CAA':'Q', 'CAG':'Q', 'AGA':'*', 'AGG':'*', 'CGA':'R', 'CGC':'R', 'CGG':'R',
'CGT':'R', 'AGC':'S', 'AGT':'S', 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S', 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V', 'TGG':'W', 'TAC':'Y', 'TAT':'Y', 'TAG': '*', 'TGA':'W', 'TAA':'*'
}
# 3
tdict_yeast_mitochondrial = {
'GCA':'A','GCC':'A','GCG':'A','GCT':'A', 'TGC':'C','TGT':'C', 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'TTC':'F', 'TTT':'F', 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G', 'CAC':'H', 'CAT':'H', 'ATA':'M', 'ATC':'I', 'ATT':'I',
'AAA':'K', 'AAG':'K', 'TTA':'L', 'TTG':'L', 'CTA':'T', 'CTC':'T', 'CTG':'T', 'CTT':'T', 'ATG':'M', 'AAC':'N', 'AAT':'N',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P', 'CAA':'Q', 'CAG':'Q', 'AGA':'R', 'AGG':'R', 'CGA':'R', 'CGC':'R', 'CGG':'R',
'CGT':'R', 'AGC':'S', 'AGT':'S', 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S', 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V', 'TGG':'W', 'TAC':'Y', 'TAT':'Y', 'TAG': '*', 'TGA':'W', 'TAA':'*'
}
# 11
tdict_bacterial_archaeal_plant_plastid = {
'GCA':'A','GCC':'A','GCG':'A','GCT':'A', 'TGC':'C','TGT':'C', 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'TTC':'F', 'TTT':'F', 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G', 'CAC':'H', 'CAT':'H', 'ATA':'I', 'ATC':'I', 'ATT':'I',
'AAA':'K', 'AAG':'K', 'TTA':'L', 'TTG':'L', 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L', 'ATG':'M', 'AAC':'N', 'AAT':'N',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P', 'CAA':'Q', 'CAG':'Q', 'AGA':'R', 'AGG':'R', 'CGA':'R', 'CGC':'R', 'CGG':'R',
'CGT':'R', 'AGC':'S', 'AGT':'S', 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S', 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V', 'TGG':'W', 'TAC':'Y', 'TAT':'Y', 'TAG': '*', 'TGA':'*', 'TAA':'*'
}
def __init__(self, name, seq, quality = None):
if Sequence.validate_name_string(name):
self.name = name
else:
raise NameError('Sequence name have to start with ">" or "@"')
self.seq = seq.strip()
self.quality = quality
# def is_valid(self):
# def validate_name(self):
@staticmethod
def validate_name_string(nstr):
if re.search('^>', nstr):
return 1
elif re.search('^@', nstr):
return 1
else:
return 0
def validate_seq(self):
'''
validates general seqence not specified for DNA or others.
'''
return Sequence.generic_validate(self.seq, '[^ACGNTUBDHKMRSVWY\-\nacgntubdhkmrsvwy]')
@staticmethod
def generic_validate(seq, domain):
# pattern created from passed domain (domain contains chars that are not allowed)
pattern = re.compile(domain) #'[^ACGNTUBDHKMRSVWY\-\nacgntubdhkmrsvwy]'
# if sequence contains illegal chars
if pattern.search(seq):
# if digits it can be ok if format like (60 xxxxxxxxxx xxx...)
if re.search('(\d+)', seq):
# to check that we have to transform array
seq_array = seq.split('\n')
new_array = [] # array to store new sequence as array of arrays
for r in seq_array:
r = r.lstrip() # removing ' ' from beginings and ends
nr = r.split(' ') # split to array to catch all blocks aaaaaaaaaa aaaaaaaaaa
new_array.append(nr)
end_of_seq_array = len(seq_array)
# if min. two lines calculate expected line length
if end_of_seq_array > 1:
line_length = int(new_array[1][0])-int(new_array[0][0])
# validate ecah block (between " " [space]) of given sequence
i = 0
while i < end_of_seq_array:
if not re.search('(\d+)', new_array[i][0]):
return 7 # line doesn't starts with digit
if not (len(new_array[i])-1)*10 == line_length and i != (end_of_seq_array-1):
return 0 # bad line length
for a, r in enumerate(new_array[i][1:]): # skip first elem which is digit
if len(r) != 10: # block not eq 10
if len(r) < 10: # if less it can be ok if last elem of last line
if(i == end_of_seq_array - 1):
if a != len(new_array[i]) - 2: # if last -2 because enumerate is from first elem not 0 elem.
return 0 # not last elem of last line
else:
return 0 # not last line
else:
return 0 # block not eq 10
if pattern.search(r):
return 0
i += 1
else:
return 0 # digit is not first char
# return pattern.search(seq) but nan error code returned before
return 1
return 1 # valid
# def validate_dna_seq(self):
# def validate_other_seq(self):
@staticmethod
def detailed_validate_generic(seq, domain):
not_valid = 0
missmatches = {}
# pattern created from passed domain (domain contains chars that are not allowed)
pattern = re.compile(domain)
# find not allowed chars in sequence
m = pattern.finditer(seq)
log_info = []
# if not allowed chars found
if m:
# it may be 61 xxxxxxxxxx xxx.... format
if re.search('(\d+)', seq):
seq_array = seq.split('\n')
new_array = [] # array to store new sequence after cleaning and transformation
for r in seq_array:
r = r.lstrip() # removing ' ' from beginings and ends
nr = r.split(' ') # split to array to catch all blocks aaaaaaaaaa aaaaaaaaaa
new_array.append(nr)
end_of_seq_array = len(seq_array)
# if min. two lines calculate expected line length
if end_of_seq_array > 1:
line_length = int(new_array[1][0])-int(new_array[0][0])
# validate each block (between " " [space]) of given sequence
i = 0
while i < end_of_seq_array:
# digit on begining of line was not found - error
if not re.search('(\d+)', new_array[i][0]):
log_info.append('line '+str(i+1)+": line doesn't starts with digit") # line doesn't starts with digit
# check if line length = expected line length last line can be shorter
if not (len(new_array[i])-1)*10 == line_length and i != (end_of_seq_array-1):
#return 0 # bad line length
log_info.append('line '+str(i+1)+': bad line length')
#chcek all blocks if are eq 10 (last can be shorter)
for a, r in enumerate(new_array[i][1:]): # skip first elem which is digit
if len(r) != 10: # block not eq 10
if len(r) < 10: # if less it can be ok if last elem of last line
if(i == end_of_seq_array - 1):
if a != len(new_array[i]) - 2: # if last -2 because enumerate is from first elem not 0 elem.
log_info.append('line '+str(i+1)+': block '+str(a+1)+' contains les then 10 chars') # not last elem of last line
else:
log_info.append('line '+str(i+1)+': block '+str(a+1)+' contains les then 10 chars') # not last line
else:
log_info.append('line '+str(i+1)+': block '+str(a+1)+' contains more then 10 chars') # block gt 10
# if block contains illegal chars now after transtrmation it should contain only legal chars.
if pattern.search(r):
log_info.append('line '+str(i+1)+': block '+str(a+1)+' contains illegal chars')
i += 1
else:
# in this case it is not seq like "10 xxxxx xxxxx"
for mitem in m:
log_info.append('Position:\t'+str(mitem.start())+'\tvalue:\t'+str(mitem.group()))
# none of not allowed chars were found sequence OK
return log_info
# def detailed_validate_dna_seq(self):
# def detailed_validate_other_seq(self):
def cut(self, length, step):
'''
cutting contig into smaller parts accordigly to supplied params
length of contig (number of chars)
step offset between current and next start
'''
self.normalize()
i = 0
contig_end = len(self.seq) # last position of contig
contig_list = [] # contig list returning by function
while i+length <= contig_end:
contig_list.append(Sequence(self.name+'_frag_'+str(i + 1)+':'+str(i + length), str(self.seq[i:i+length])))
i = i+step
return contig_list
def cut_name(self, length, start = 0):
self.name = self.name[start:length]
def leave_name_after_marker(self, mark, length = 0, keep_marker = 1):
m = re.search(re.escape(mark), self.name)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.debug(m)
logger.debug(keep_marker)
if m:
# keep original marker or skip it
if keep_marker == 1:
s = m.start()
else:
s = m.end()
# defined length or return string to end
if length > 0:
self.name = '>'+self.name[s:s+length].lstrip('>')
else:
self.name = '>'+self.name[s:].lstrip('>')
return 1
return 0
def reverse(self):
'''
creates reversed sequence
'''
self.normalize()
nr = re.sub('\n', '', self.seq)
rev = nr[::-1]
rev = rev.translate(maketrans('ACTGactg', 'TGACtgac'))
# creating 80 chars lines
#rev = re.sub("(.{80})", '\\1\n', rev, 0)
return Sequence('>rev_'+self.name.lstrip('>'), rev)
def normalize(self):
self.seq = re.sub(' ', '', self.seq)
self.seq = re.sub('^\d', '', self.seq, re.M)
self.seq = re.sub('\n', '', self.seq)
self.seq = re.sub('\r', '', self.seq)
def statistics(self):
'''
returns simple statistics for contig
'''
self.normalize()
r = {}
c = Counter(self.seq)
r['A'] = c['A']+c['a']
r['C'] = c['C']+c['c']
r['G'] = c['G']+c['g']
r['T'] = c['T']+c['t']
r['N'] = c['N']+c['n']
r['L'] = len(self.seq)
return r
#def getRange(self, start, stop):
# return self.seq[start:stop]
def translate_dna2rna(self):
nc = self.seq.translate(maketrans('ACTGactg', 'UGACugac'))
return Sequence('>rna_'+self.name, nc)
def translate_rna2dna(self):
nc = self.seq.translate(maketrans('UGACugac', 'ACTGactg'))
return Sequence('>dna_'+self.name, nc)
# ctrl f1 frame 1 forward, r1 frame 1 revers, fall torward all frames, rall reverse all frames, all in this way?
# supply dict of translation or its constant?
@staticmethod
def translate2protein_in_range_generic(seq, start, stop, tdict):
p = ''
p_stop = ''
# search results in distribution to frames
frame1 = []
frame2 = []
frame3 = []
# creating pattern (from dict) to find start codons
for r in start:
p += r+'|'
p = '('+p.rstrip('|')+')'
# creating pattern to find stop codons
for r in stop:
p_stop += r+'|'
p_stop = '('+p_stop.rstrip('|')+')'
m = re.finditer(p, seq)
# there will be stored latest string position for each frame
frame_iterator = [0,0,0]
stop_pos = len(seq) # where to stop searching if no stopcodon found
# using each found start codon
for r in m:
# if start is lower then last used position skip it.
if frame_iterator[r.start()%3] <= r.start():
# set i for start position of current start contig
i = r.start()
ret = ''
while i+3 <= stop_pos:
ret += Sequence.translate(seq[i:i+3], tdict)
if re.match(p_stop, seq[i:i+3]):
i = i+3
break
else:
i = i+3
frame_iterator[r.start()%3] = i
if r.start()%3 == 0:
frame1.append((ret,r.start(),i,str(r.start()/3+1),str(i-r.start())))
elif r.start()%3 == 1:
frame2.append((ret,r.start(),i,str(r.start()/3+1),str(i-r.start())))
elif r.start()%3 == 2:
frame3.append((ret,r.start(),i,str(r.start()/3+1),str(i-r.start())))
return [frame1, frame2, frame3]
def translate2protein_in_range(self, start, stop, tdict):
f = Sequence.translate2protein_in_range_generic(self.seq, start, stop, tdict)
r = Sequence.translate2protein_in_range_generic(self.reverse().seq, start, stop, tdict)
return {'fwd':f, 'rev':r}
@staticmethod
def translate2protein_generic(seq, tdict):
# +5 to secure all frames
f1 = ''
f2 = ''
f3 = ''
i = 0
while i+5 < len(seq):
f1 += Sequence.translate(seq[i:i+3], tdict)
f2 += Sequence.translate(seq[i+1:i+4], tdict)
f3 += Sequence.translate(seq[i+2:i+5], tdict)
i = i + 3
return [('',f1,seq[-2:]),(seq[0:1],f2,seq[-1:]),(seq[0:2],f2,'')]
def translate2protein(self, tdict):
f = Sequence.translate2protein_generic(self.seq, tdict)
r = Sequence.translate2protein_generic(self.reverse().seq, tdict)
return {'fwd':f, 'rev':r}
@staticmethod
def translate(codon, tdict):
if codon in tdict:
return tdict[codon]
else:
return '|'+codon+'|'
def find_aprox_motif(self, motif, missmatch_level):
self.normalize()
return fuzzy.find_all_motifs(motif, self.seq, missmatch_level, hs_start_pos = 0)
def find_primers(self, start, stop, mode, len_min = 50, len_max = 10000):
return self.find_aprox_primers(start, stop, mode, 0, len_min, len_max)
def find_aprox_primers(self, start, stop, mode, missmatch_level = 0, len_min = 50, len_max = 10000):
#start 5'->3'
# add missmatch_level condition if 50%>
logger = logging.getLogger(__name__)
#logger.setLevel(logging.DEBUG)
logger.debug('given args: start:'+start+' stop: '+stop+' mode: '+mode+' mm level: '+str(missmatch_level)+' len_min: '+str(len_min)+' len_max: '+str(len_max))
#logger.debug('sequence: '+self.seq)
if mode.upper() == 'FR':
rev = stop[::-1]
stop = rev.translate(maketrans('ACTGactg', 'TGACtgac'))
elif mode.upper() != 'FF':
raise ('Unexpected mode: '+str(mode)+' expected values [FR|FF]')
r_list = []
self.normalize()
res = fuzzy.find_all_motifs_in_aprox_range(start, stop, self.seq, missmatch_level, 0, len_min, len_max)
if res:
r_list.extend(res)
res = fuzzy.find_all_motifs_in_aprox_range(start, stop, self.reverse().seq, missmatch_level, 0, len_min, len_max)
if res:
r_list.extend(res)
logger.debug(r_list)
return r_list
def equal_to_name_frag(self, name_frag):
if re.search(re.escape(name_frag), self.name):
#print re.search(name_frag, self.name)
return 1
return 0
def __str__(self):
'''
creates nicely outputed string
'''
if re.search('^@', self.name) and len(self.quality) == len(self.seq):
return self.name+'\n'+self.seq+'\n+\n'+self.quality+'\n'
else:
return self.name+'\n'+re.sub("(.{80})", '\\1\n', self.seq, 0)+'\n'
def __len__(self):
return len(self.seq)
def __cmp__(self, other):
if self.seq == other.seq:
return 0
else:
return 1
def __eq__(self, other):
return self.seq == other.seq
|
ryangallen/django | refs/heads/master | tests/migrations2/test_migrations_2_first/0001_initial.py | 427 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("migrations", "__first__"),
]
operations = [
migrations.CreateModel(
"OtherAuthor",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=255)),
("slug", models.SlugField(null=True)),
("age", models.IntegerField(default=0)),
("silly_field", models.BooleanField(default=False)),
],
),
]
|
ehartsuyker/securedrop | refs/heads/develop | securedrop/journalist_app/forms.py | 2 | # -*- coding: utf-8 -*-
from flask_babel import lazy_gettext as gettext
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed, FileRequired
from wtforms import (TextAreaField, TextField, BooleanField, HiddenField,
ValidationError)
from wtforms.validators import InputRequired, Optional
from models import Journalist
def otp_secret_validation(form, field):
strip_whitespace = field.data.replace(' ', '')
if len(strip_whitespace) != 40:
raise ValidationError(gettext(
'HOTP secrets are 40 characters long - '
'you have entered {num_chars}.'.format(
num_chars=len(strip_whitespace)
)))
def minimum_length_validation(form, field):
if len(field.data) < Journalist.MIN_USERNAME_LEN:
raise ValidationError(
gettext('Field must be at least {min_chars} '
'characters long but only got '
'{num_chars}.'.format(
min_chars=Journalist.MIN_USERNAME_LEN,
num_chars=len(field.data))))
class NewUserForm(FlaskForm):
username = TextField('username', validators=[
InputRequired(message=gettext('This field is required.')),
minimum_length_validation
])
password = HiddenField('password')
is_admin = BooleanField('is_admin')
is_hotp = BooleanField('is_hotp')
otp_secret = TextField('otp_secret', validators=[
otp_secret_validation,
Optional()
])
class ReplyForm(FlaskForm):
message = TextAreaField(
'Message',
id="content-area",
validators=[
InputRequired(message=gettext(
'You cannot send an empty reply.')),
],
)
class LogoForm(FlaskForm):
logo = FileField(validators=[
FileRequired(message=gettext('File required.')),
FileAllowed(['png'],
message=gettext("You can only upload PNG image files."))
])
|
sallaire/Sick-Beard | refs/heads/development | sickbeard/scene_exceptions.py | 30 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import re
import os
import urllib, urllib2, httplib
import sickbeard
from sickbeard import helpers
from sickbeard import name_cache
from sickbeard import logger
from sickbeard import db
try:
import json
except ImportError:
from lib import simplejson as json
from sickbeard.exceptions import ex
def get_scene_exceptions(tvdb_id, season=-1):
"""
Given a tvdb_id, return a list of all the scene exceptions.
"""
myDB = db.DBConnection("cache.db")
exceptions = myDB.select("SELECT show_name FROM scene_exceptions WHERE tvdb_id = ? and season= ?", [tvdb_id, season])
return [cur_exception["show_name"] for cur_exception in exceptions]
def get_scene_exception_by_name(show_name):
"""
Given a show name, return the tvdbid of the exception, None if no exception
is present.
"""
myDB = db.DBConnection("cache.db")
# try the obvious case first
exception_result = myDB.select("SELECT tvdb_id FROM scene_exceptions WHERE LOWER(show_name) = ?", [show_name.lower()])
if exception_result:
return int(exception_result[0]["tvdb_id"])
all_exception_results = myDB.select("SELECT show_name, tvdb_id FROM scene_exceptions")
for cur_exception in all_exception_results:
cur_exception_name = cur_exception["show_name"]
cur_tvdb_id = int(cur_exception["tvdb_id"])
if show_name.lower() in (cur_exception_name.lower(), helpers.sanitizeSceneName(cur_exception_name).lower().replace('.', ' ')):
logger.log(u"Scene exception lookup got tvdb id "+str(cur_tvdb_id)+u", using that", logger.DEBUG)
return cur_tvdb_id
return None
def retrieve_exceptions():
"""
Looks up the exceptions on github, parses them into a dict, and inserts them into the
scene_exceptions table in cache.db. Also clears the scene name cache.
"""
exception_dict = {}
# exceptions are stored on github pages
url = 'http://midgetspy.github.com/sb_tvdb_scene_exceptions/exceptions.txt'
excepfile= os.path.join(os.path.join(sickbeard.PROG_DIR,'Used_Files'),'exceptions.txt')
logger.log(u"Check scene exceptions file to update db")
f=open(excepfile,"r")
data = f.read()
if data is None:
# When urlData is None, trouble connecting to github
logger.log(u"Check scene exceptions update failed. Unable to get file: " + excepfile, logger.ERROR)
return
else:
# each exception is on one line with the format tvdb_id: 'show name 1', 'show name 2', etc
for cur_line in data.splitlines():
try:
cur_line = cur_line.decode('utf-8')
except:
cur_line = cur_line.decode('latin-1')
tvdb_id, sep, aliases = cur_line.partition(':') #@UnusedVariable
if not aliases:
continue
tvdb_id = int(tvdb_id)
# regex out the list of shows, taking \' into account
alias_list = [{re.sub(r'\\(.)', r'\1', x):-1} for x in re.findall(r"'(.*?)(?<!\\)',?", aliases)]
exception_dict[tvdb_id] = alias_list
xem_exceptions = _xem_excpetions_fetcher()
exception_dict = dict(xem_exceptions.items() + exception_dict.items())
if not len(exception_dict):
logger.log("Retreived exception list is totally empty. Assuming remote server error not flushing local and stoping now")
return False
myDB = db.DBConnection("cache.db")
# write all the exceptions we got off the net into the database
for cur_tvdb_id in exception_dict:
# get a list of the existing exceptions for this ID
existing_exceptions = [x["show_name"] for x in myDB.select("SELECT * FROM scene_exceptions WHERE tvdb_id = ?", [cur_tvdb_id])]
for cur_exception_dict in exception_dict[cur_tvdb_id]:
# if this exception isn't already in the DB then add it
cur_exception, curSeason = cur_exception_dict.items()[0]
if cur_exception not in existing_exceptions:
myDB.action("INSERT INTO scene_exceptions (tvdb_id, show_name, season) VALUES (?,?,?)", [cur_tvdb_id, cur_exception, curSeason])
name_cache.clearCache()
f.close()
def update_scene_exceptions(tvdb_id, scene_exceptions):
"""
Given a tvdb_id, and a list of all show scene exceptions, update the db.
"""
myDB = db.DBConnection("cache.db")
sql_cur_season = myDB.select("SELECT season FROM scene_exceptions WHERE tvdb_id=?", [tvdb_id])
if sql_cur_season:
cur_season = sql_cur_season[0][0]
else:
cur_season =-1
myDB.action('DELETE FROM scene_exceptions WHERE tvdb_id=?', [tvdb_id])
for cur_exception in scene_exceptions:
myDB.action("INSERT INTO scene_exceptions (tvdb_id, show_name, season) VALUES (?,?,?)", [tvdb_id, cur_exception, cur_season])
name_cache.clearCache()
def _xem_excpetions_fetcher():
exception_dict = {}
opener = urllib2.build_opener()
url = "http://thexem.de/map/allNames?origin=tvdb&seasonNumbers=1"
try:
f = opener.open(url)
except (EOFError, IOError), e:
logger.log(u"Unable to connect to XEM. Is thexem.de down ?" + ex(e), logger.ERROR)
return exception_dict
except httplib.InvalidURL, e:
logger.log(u"Invalid XEM host. Is thexem.de down ?: " + ex(e), logger.ERROR)
return exception_dict
if not f:
logger.log(u"Empty response from " + url + ": " + ex(e), logger.ERROR)
return exception_dict
try:
xemJson = json.loads(f.read())
except ValueError, e:
pass
if xemJson['result'] == 'failure':
return exception_dict
for tvdbid, names in xemJson['data'].items():
exception_dict[int(tvdbid)] = names
logger.log(u"xem exception dict: " + str(exception_dict), logger.DEBUG)
return exception_dict
def getSceneSeasons(tvdb_id):
"""get a list of season numbers that have scene excpetions
"""
myDB = db.DBConnection("cache.db")
seasons = myDB.select("SELECT DISTINCT season FROM scene_exceptions WHERE tvdb_id = ?", [tvdb_id])
return [cur_exception["season"] for cur_exception in seasons]
|
anurag-ks/eden | refs/heads/master | modules/templates/ARC/menus.py | 13 | # -*- coding: utf-8 -*-
from gluon import current
from s3 import *
from s3layouts import *
try:
# Nice to not have to copy this file, but currently imported in s3layouts.py
#from templates.IFRC.layouts import *
from .layouts import *
except ImportError:
pass
import s3menus as default
#red_cross_filter = {"organisation_type.name" : "Red Cross / Red Crescent"}
# =============================================================================
class S3MainMenu(default.S3MainMenu):
""" Custom Application Main Menu """
# -------------------------------------------------------------------------
@classmethod
def menu(cls):
""" Compose Menu """
# Modules menus
main_menu = MM()(
cls.menu_modules(),
)
# Additional menus
current.menu.personal = cls.menu_personal()
current.menu.dashboard = cls.menu_dashboard()
current.menu.org = cls.menu_org()
return main_menu
# -------------------------------------------------------------------------
@classmethod
def menu_modules(cls):
""" Custom Modules Menu """
T = current.T
return [
homepage("gis")(
),
homepage("cms", f="newsfeed", m="datalist", name=T("News"))(
),
homepage("event", name= "Situational Awareness")(
MM("Incidents", c="event", f="incident"),
#MM("Incident Reports", c="event", f="incident_report"),
MM("Deployments", c="deploy", f="mission"),
MM("Assessments", c="survey", f="series"),
MM("Situation Reports", c="doc", f="sitrep"),
MM("Messages", c="msg", f="index"),
# Have as a Tab on the Incident?
#MM("Budgets", c="budget", f="budget"),
# Have as a Tab on the Incident?
#MM("Assignments", c="deploy", f="assignment"),
),
#homepage("sit", "survey")(
# MM("Assessment Templates", c="survey", f="template"),
# MM("Assessments", c="survey", f="series"),
# MM("Situation Reports", c="sit", f="report"),
#),
#homepage("deploy", f="mission", m="summary",
# vars={"~.status__belongs": "2"})(
# MM("Missions", c="deploy", f="mission", m="summary"),
# MM("Human Resources", c="deploy", f="human_resource", m="summary"),
#),
homepage("cr", name="Operations Management")(
MM("Budgets", c="budget", f="budget"),
MM("Requests", c="req", f="req"),
MM("Received Shipments", c="inv", f="recv"),
MM("Sent Shipments", c="inv", f="send"),
MM("Shelters", c="cr", f="shelter", m="summary"),
MM("Warehouses", c="inv", f="warehouse"),
),
homepage("project")(
MM("Projects", c="project", f="project"),
#MM("Communities", c="project", f="location"),
MM("Tasks", c="project", f="task"),
),
homepage("hrm", "org", "vol", "asset", name=T("Resources"),
vars=dict(group="staff"))(
MM("Staff", c="hrm", f="staff", m="summary"),
MM("Volunteers", c="vol", f="volunteer", m="summary"),
MM("Teams", c="hrm", f="group"),
MM("Organizations", c="org", f="organisation",
#vars = red_cross_filter
),
MM("Facilities", c="org", f="facility", m="summary"),
MM("Offices", c="org", f="office", m="summary"),
MM("Job Titles", c="hrm", f="job_title"),
#MM("Skill List", c="hrm", f="skill"),
MM("Training Events", c="hrm", f="training_event"),
MM("Training Courses", c="hrm", f="course"),
MM("Certificate List", c="hrm", f="certificate"),
MM("Assets", c="asset", f="asset", m="summary"),
MM("Items", c="supply", f="item"),
MM("Vehicles", c="vehicle", f="vehicle", m="summary"),
),
#homepage("vol", name=T("Volunteers"))(
# MM("Volunteers", c="vol", f="volunteer", m="summary"),
# MM("Teams", c="vol", f="group"),
# MM("Volunteer Roles", c="vol", f="job_title"),
# MM("Programs", c="vol", f="programme"),
# #MM("Skill List", c="vol", f="skill"),
# MM("Training Events", c="vol", f="training_event"),
# MM("Training Courses", c="vol", f="course"),
# MM("Certificate List", c="vol", f="certificate"),
#),
#homepage("member")(
# MM("Members", c="member", f="membership", m="summary"),
#),
#homepage("cr", "inv", "org", name="Facilities")(
# MM("Facilities", c="org", f="facility", m="summary"),
# MM("Offices", c="org", f="office", m="summary"),
# MM("Shelters", c="cr", f="shelter", m="summary"),
# MM("Warehouses", c="inv", f="warehouse", m="summary"),
#),
#homepage("asset")(
# MM("Assets", c="asset", f="asset", m="summary"),
# MM("Items", c="asset", f="item", m="summary"),
# MM("Vehicles", c="vehicle", f="vehicle", m="summary"),
#),
#homepage("inv", "supply", "req", name="Inventory")(
# MM("Warehouses", c="inv", f="warehouse"),
# MM("Received Shipments", c="inv", f="recv"),
# MM("Sent Shipments", c="inv", f="send"),
# MM("Items", c="supply", f="item"),
# MM("Item Catalogs", c="supply", f="catalog"),
# MM("Item Categories", c="supply", f="item_category"),
# MM("Requests", c="req", f="req"),
# #MM("Commitments", f="commit"),
#),
#homepage("vulnerability")(
# MM("Map", c="vulnerability", f="index"),
#),
]
# -------------------------------------------------------------------------
@classmethod
def menu_dashboard(cls):
""" Dashboard Menu (at bottom of page) """
DB = S3DashBoardMenuLayout
request = current.request
controller = request.controller
if controller == "vol":
dashboard = DB()(
DB("Volunteers",
c="vol",
image = "graphic_staff_wide.png",
title = "Volunteers")(
DB("Manage Volunteer Data", f="volunteer", m="summary"),
DB("Manage Teams Data", f="group"),
),
DB("Catalogs",
c="hrm",
image="graphic_catalogue.png",
title="Catalogs")(
DB("Certificates", f="certificate"),
DB("Training Courses", f="course"),
#DB("Skills", f="skill"),
DB("Job Titles", f="job_title")
))
elif controller in ("hrm", "org"):
dashboard = DB()(
DB("Staff",
c="hrm",
image = "graphic_staff_wide.png",
title = "Staff")(
DB("Manage Staff Data", f="staff", m="summary"),
DB("Manage Teams Data", f="group"),
),
DB("Offices",
c="org",
image = "graphic_office.png",
title = "Offices")(
DB("Manage Offices Data", f="office"),
DB("Manage Organizations", f="organisation",
#vars=red_cross_filter
),
),
DB("Catalogs",
c="hrm",
image="graphic_catalogue.png",
title="Catalogs")(
DB("Certificates", f="certificate"),
DB("Training Courses", f="course"),
#DB("Skills", f="skill"),
DB("Job Titles", f="job_title")
))
elif controller == "default" and request.function == "index":
dashboard = DB(_id="dashboard")(
DB("Assessments", c="survey", f="index",
image = "graphic_assessments.png",
title = "Assessments",
text = "Design, deploy & analyze surveys."),
DB("Projects", c="project", f="index",
image = "graphic_tools.png",
title = "Projects",
text = "Tracking and analysis of Projects and Activities."),
DB("Staff", c="hrm", f="staff", m="summary",
image = "graphic_staff.png",
title = "Staff",
text = "Add new and manage existing staff."),
DB("Volunteers", c="vol", f="volunteer", m="summary",
image = "graphic_volunteers.png",
title = "Volunteers",
text = "Add new and manage existing volunteers."),
#DB("Members", c="member", f="membership", m="summary",
# image = "graphic_members.png",
# title = "Members",
# text = "Add new and manage existing members."),
DB("Assets", c="asset", f="index",
image = "graphic_assets.png",
title = "Assets",
text = "Manage office inventories and assets."),
DB("Warehouses", c="inv", f="index",
image = "graphic_warehouse.png",
title = "Warehouses",
text = "Stocks and relief items."),
)
else:
dashboard = None
return dashboard
# -------------------------------------------------------------------------
@classmethod
def menu_org(cls):
""" Custom Organisation Menu """
OM = S3OrgMenuLayout
return OM()
# -------------------------------------------------------------------------
@classmethod
def menu_personal(cls):
""" Custom Personal Menu """
auth = current.auth
s3 = current.response.s3
settings = current.deployment_settings
# Language selector
menu_lang = ML("Language", right=True)
for language in s3.l10n_languages.items():
code, name = language
menu_lang(
ML(name, translate=False, lang_code=code, lang_name=name)
)
if not auth.is_logged_in():
request = current.request
login_next = URL(args=request.args, vars=request.vars)
if request.controller == "default" and \
request.function == "user" and \
"_next" in request.get_vars:
login_next = request.get_vars["_next"]
self_registration = settings.get_security_registration_visible()
menu_personal = MP()(
MP("Register", c="default", f="user",
m="register", check=self_registration),
MP("Login", c="default", f="user",
m="login", vars=dict(_next=login_next)),
MP("Lost Password", c="default", f="user",
m="retrieve_password"),
menu_lang
)
else:
s3_has_role = auth.s3_has_role
is_org_admin = lambda i: s3_has_role("ORG_ADMIN") and \
not s3_has_role("ADMIN")
menu_personal = MP()(
MP("Administration", c="admin", f="index",
check=s3_has_role("ADMIN")),
MP("Administration", c="admin", f="user",
check=is_org_admin),
MP("Profile", c="default", f="person"),
MP("Subscription", c="default", f="index", args=["subscriptions"]),
# Allow space for 'Subscription'
#MP("Change Password", c="default", f="user",
MP("Password", c="default", f="user",
m="change_password"),
MP("Logout", c="default", f="user",
m="logout"),
menu_lang,
)
return menu_personal
# =============================================================================
class S3OptionsMenu(default.S3OptionsMenu):
""" Custom Controller Menus """
# -------------------------------------------------------------------------
def admin(self):
""" ADMIN menu """
menu = super(S3OptionsMenu, self).admin()
gis_item = M("Map Settings", c="gis", f="config")
menu.append(gis_item)
return menu
# -------------------------------------------------------------------------
@staticmethod
def asset():
""" ASSET Controller """
ADMIN = current.session.s3.system_roles.ADMIN
return M(c="asset")(
M("Assets", f="asset", m="summary")(
M("Create", m="create"),
#M("Map", m="map"),
M("Import", m="import", p="create"),
),
#M("Brands", f="brand",
# restrict=[ADMIN])(
# M("Create", m="create"),
#),
M("Items", f="item", m="summary")(
M("Create", m="create"),
M("Import", f="catalog_item", m="import", p="create"),
),
M("Item Categories", f="item_category",
restrict=[ADMIN])(
M("Create", m="create"),
),
M("Catalogs", f="catalog",
restrict=[ADMIN])(
M("Create", m="create"),
),
M("Suppliers", f="supplier")(
M("Create", m="create"),
M("Import", m="import", p="create"),
),
M("Vehicles", c="vehicle" , f="vehicle")(
M("Create", m="create"),
M("Import", m="import", p="create"),
M("Map", m="map"),
),
M("Vehicle Types", c="vehicle", f="vehicle_type")(
M("Create", m="create"),
),
)
# -------------------------------------------------------------------------
def doc(self):
""" Situation Reports """
# Same as Events
return self.event()
# -------------------------------------------------------------------------
@staticmethod
def event():
""" Incidents """
return M()(
#M("Events", c="event", f="event")(
# M("Create", m="create"),
#),
M("Incidents", c="event", f="incident", args="summary")(
M("Create", m="create"),
),
#M("Incident Reports", c="event", f="incident_report")(
# M("Create", m="create"),
# #M("Open Incidents", vars={"open": 1}),
# M("Map", m="map"),
# #M("Timeline", args="timeline"),
# M("Report", m="report")
#),
M("Assessments", c="survey", f="series")(
M("Create", m="create"),
),
M("Situation Reports", c="doc", f="sitrep")(
M("Create", m="create"),
),
M("Incident Types", c="event", f="incident_type",
check=current.auth.s3_has_role(current.session.s3.system_roles.ADMIN))(
M("Create", m="create"),
),
#M("Reports", c="event", f="incident_report", m="report")(
# M("Incident Reports", m="report"),
#),
)
# -------------------------------------------------------------------------
def gis(self):
""" GIS / Mapping """
if current.request.function == "index":
# Empty so as to leave maximum space for the Map
# - functionality accessible via the Admin menu instead
return None
else:
return super(S3OptionsMenu, self).gis()
# -------------------------------------------------------------------------
@staticmethod
def hrm():
""" HRM / Human Resource Management """
session = current.session
s3 = current.session.s3
ADMIN = s3.system_roles.ADMIN
if "hrm" not in s3:
current.s3db.hrm_vars()
hrm_vars = s3.hrm
SECTORS = "Clusters" if current.deployment_settings.get_ui_label_cluster() \
else "Sectors"
manager_mode = lambda i: hrm_vars.mode is None
personal_mode = lambda i: hrm_vars.mode is not None
is_org_admin = lambda i: hrm_vars.orgs and True or \
ADMIN in s3.roles
is_super_editor = lambda i: current.auth.s3_has_role("staff_super") or \
current.auth.s3_has_role("vol_super")
staff = {"group": "staff"}
return M()(
M("Staff", c="hrm", f=("staff", "person"), m="summary",
check=manager_mode)(
M("Create", m="create"),
M("Import", f="person", m="import",
vars=staff, p="create"),
),
M("Staff & Volunteers (Combined)",
c="hrm", f="human_resource", m="summary",
check=[manager_mode, is_super_editor]),
M("Teams", c="hrm", f="group",
check=manager_mode)(
M("Create", m="create"),
M("Search Members", f="group_membership"),
M("Import", f="group_membership", m="import"),
),
M("Organizations", c="org",
f="organisation",
#vars=red_cross_filter,
check=manager_mode)(
M("Create", m="create",
#vars=red_cross_filter
),
M("Import", m="import", p="create", check=is_org_admin)
),
M("Offices", c="org", f="office",
check=manager_mode)(
M("Create", m="create"),
M("Import", m="import", p="create"),
),
M("Facilities", c="org", f="facility")(
M("Create", m="create"),
),
M("Facility Types", c="org", f="facility_type",
restrict=[ADMIN])(
M("Create", m="create"),
),
M("Department Catalog", c="hrm", f="department",
check=manager_mode)(
M("Create", m="create"),
),
M("Job Title Catalog", c="hrm", f="job_title",
check=manager_mode)(
M("Create", m="create"),
M("Import", m="import", p="create", check=is_org_admin),
),
#M("Skill Catalog", f="skill",
# check=manager_mode)(
# M("Create", m="create"),
# #M("Skill Provisions", f="skill_provision"),
#),
M("Training Events", c="hrm", f="training_event",
check=manager_mode)(
M("Create", m="create"),
M("Search Training Participants", f="training"),
M("Import Participant List", f="training", m="import"),
),
M("Reports", c="hrm", f="staff", m="report",
check=manager_mode)(
M("Staff Report", m="report"),
M("Expiring Staff Contracts Report",
vars=dict(expiring="1")),
M("Training Report", f="training", m="report"),
),
M("Training Course Catalog", c="hrm", f="course",
check=manager_mode)(
M("Create", m="create"),
M("Import", m="import", p="create", check=is_org_admin),
M("Course Certificates", f="course_certificate"),
),
M("Certificate Catalog", c="hrm", f="certificate",
check=manager_mode)(
M("Create", m="create"),
M("Import", m="import", p="create", check=is_org_admin),
#M("Skill Equivalence", f="certificate_skill"),
),
M("Organization Types", c="org", f="organisation_type",
restrict=[ADMIN],
check=manager_mode)(
M("Create", m="create"),
),
M("Office Types", c="org", f="office_type",
restrict=[ADMIN],
check=manager_mode)(
M("Create", m="create"),
),
#M("Facility Types", c="org", f="facility_type",
# restrict=[ADMIN],
# check=manager_mode)(
# M("Create", m="create"),
#),
#M("My Profile", c="hrm", f="person",
# check=personal_mode, vars=dict(access="personal")),
# This provides the link to switch to the manager mode:
M("Human Resources", c="hrm", f="index",
check=[personal_mode, is_org_admin]),
# This provides the link to switch to the personal mode:
#M("Personal Profile", c="hrm", f="person",
# check=manager_mode, vars=dict(access="personal"))
)
# -------------------------------------------------------------------------
@staticmethod
def inv():
""" INV / Inventory """
ADMIN = current.session.s3.system_roles.ADMIN
s3db = current.s3db
s3db.inv_recv_crud_strings()
inv_recv_list = current.response.s3.crud_strings.inv_recv.title_list
#settings = current.deployment_settings
#use_adjust = lambda i: not settings.get_inv_direct_stock_edits()
#def use_adjust(i):
# db = current.db
# otable = s3db.org_organisation
# try:
# ausrc = db(otable.name == "Australian Red Cross").select(otable.id,
# limitby=(0, 1)
# ).first().id
# except:
# # No IFRC prepop done - skip (e.g. testing impacts of CSS changes in this theme)
# return False
# if current.auth.root_org() == ausrc:
# # AusRC use proper Logistics workflow
# return True
# else:
# # Others use simplified version
# return False
#use_commit = lambda i: settings.get_req_use_commit()
return M()(
#M("Home", f="index"),
M("Warehouses", c="inv", f="warehouse")(
M("Create", m="create"),
M("Import", m="import", p="create"),
),
M("Warehouse Stock", c="inv", f="inv_item")(
M("Search Shipped Items", f="track_item"),
#M("Adjust Stock Levels", f="adj", check=use_adjust),
#M("Kitting", f="kit"),
M("Import", f="inv_item", m="import", p="create"),
),
M("Requests", c="req", f="req")(
M("Create", m="create"),
M("Requested Items", f="req_item"),
M("Recurring Requests", f="req_template"),
),
#M("Commitments", c="req", f="commit", check=use_commit)(
#),
M("Reports", c="inv", f="inv_item")(
M("Warehouse Stock", f="inv_item",m="report"),
#M("Expiration Report", c="inv", f="track_item",
# vars=dict(report="exp")),
#M("Monetization Report", c="inv", f="inv_item",
# vars=dict(report="mon")),
#M("Utilization Report", c="inv", f="track_item",
# vars=dict(report="util")),
#M("Summary of Incoming Supplies", c="inv", f="track_item",
# vars=dict(report="inc")),
# M("Summary of Releases", c="inv", f="track_item",
# vars=dict(report="rel")),
),
M(inv_recv_list, c="inv", f="recv")(
M("Create", m="create"),
),
M("Sent Shipments", c="inv", f="send")(
M("Create", m="create"),
M("Search Shipped Items", f="track_item"),
),
M("Items", c="supply", f="item", m="summary")(
M("Create", m="create"),
M("Import", f="catalog_item", m="import", p="create"),
),
# Catalog Items moved to be next to the Item Categories
#M("Catalog Items", c="supply", f="catalog_item")(
# M("Create", m="create"),
#),
#M("Brands", c="supply", f="brand",
# restrict=[ADMIN])(
# M("Create", m="create"),
#),
M("Catalogs", c="supply", f="catalog")(
M("Create", m="create"),
),
M("Item Categories", c="supply", f="item_category",
restrict=[ADMIN])(
M("Create", m="create"),
),
M("Suppliers", c="inv", f="supplier")(
M("Create", m="create"),
M("Import", m="import", p="create"),
),
)
# -------------------------------------------------------------------------
def org(self):
""" Organisation Management """
# Same as HRM
return self.hrm()
# -------------------------------------------------------------------------
def req(self):
""" Requests Management """
# Same as Inventory
return self.inv()
# -------------------------------------------------------------------------
def survey(self):
""" Survey """
# Same as Events
return self.event()
ADMIN = current.session.s3.system_roles.ADMIN
# Do we have a series_id?
series_id = False
get_vars = Storage()
try:
series_id = int(current.request.args[0])
except:
try:
(dummy, series_id) = current.request.get_vars["viewing"].split(".")
series_id = int(series_id)
except:
pass
if series_id:
get_vars.viewing = "survey_complete.%s" % series_id
return M(c="survey")(
M("Assessment Templates", f="template")(
M("Create", m="create"),
),
#M("Section", f="section")(
# M("Create", args="create"),
#),
M("Assessments", f="series")(
M("Create", m="create"),
),
M("Situation Reports", c="sit", f="report")(
M("Create", m="create"),
),
M("Administration", f="admin", restrict=[ADMIN])(
M("Import Templates", f="question_list",
m="import", p="create"),
M("Import Template Layout", f="formatter",
m="import", p="create"),
M("Import Completed Assessment Forms", f="complete",
m="import", p="create", vars=get_vars, check=series_id),
),
)
# -------------------------------------------------------------------------
def vehicle(self):
return self.asset()
# -------------------------------------------------------------------------
@staticmethod
def vol():
""" Volunteer Management """
auth = current.auth
s3 = current.session.s3
ADMIN = s3.system_roles.ADMIN
# Custom conditions for the check-hook, as lambdas in order
# to have them checked only immediately before rendering:
manager_mode = lambda i: s3.hrm.mode is None
personal_mode = lambda i: s3.hrm.mode is not None
is_org_admin = lambda i: s3.hrm.orgs and True or \
ADMIN in s3.roles
is_super_editor = lambda i: auth.s3_has_role("vol_super") or \
auth.s3_has_role("staff_super")
settings = current.deployment_settings
show_programmes = lambda i: settings.get_hrm_vol_experience() == "programme"
show_tasks = lambda i: settings.has_module("project") and \
settings.get_project_mode_task()
teams = settings.get_hrm_teams()
use_teams = lambda i: teams
not_vnrc = lambda i: auth.root_org_name() != "Viet Nam Red Cross"
skills_menu = lambda i: auth.root_org_name() in ("Afghan Red Crescent Society",
"Indonesian Red Cross Society (Pelang Merah Indonesia)",
"Viet Nam Red Cross",
)
check_org_dependent_field = lambda tablename, fieldname: \
settings.set_org_dependent_field(tablename, fieldname,
enable_field = False)
return M(c="vol")(
M("Volunteers", f="volunteer", m="summary",
check=[manager_mode])(
M("Create", m="create"),
M("Import", f="person", m="import",
vars={"group":"volunteer"}, p="create"),
),
M("Staff & Volunteers (Combined)",
c="vol", f="human_resource", m="summary",
check=[manager_mode, is_super_editor]),
M(teams, f="group",
check=[manager_mode, use_teams])(
M("Create", m="create"),
M("Search Members", f="group_membership"),
M("Import", f="group_membership", m="import"),
),
#M("Department Catalog", f="department",
# check=manager_mode)(
# M("Create", m="create"),
#),
M("Volunteer Role Catalog", f="job_title",
check=[manager_mode, not_vnrc])(
M("Create", m="create"),
M("Import", m="import", p="create", check=is_org_admin),
),
M("Skill Catalog", f="skill",
check=[manager_mode, skills_menu])(
M("Create", m="create"),
#M("Skill Provisions", f="skill_provision"),
),
M("Training Events", f="training_event",
check=manager_mode)(
M("Create", m="create"),
M("Search Training Participants", f="training"),
M("Import Participant List", f="training", m="import"),
),
M("Training Course Catalog", f="course",
check=manager_mode)(
M("Create", m="create"),
#M("Course Certificates", f="course_certificate"),
),
M("Certificate Catalog", f="certificate",
check=manager_mode)(
M("Create", m="create"),
#M("Skill Equivalence", f="certificate_skill"),
),
M("Programs", f="programme",
check=[manager_mode, show_programmes])(
M("Create", m="create"),
M("Import Hours", f="programme_hours", m="import"),
),
M("Awards", f="award",
check=[manager_mode, is_org_admin])(
M("Create", m="create"),
),
M("Volunteer Cluster Type", f="cluster_type",
check = check_org_dependent_field("vol_volunteer_cluster",
"vol_cluster_type_id"))(
M("Create", m="create"),
),
M("Volunteer Cluster", f="cluster",
check = check_org_dependent_field("vol_volunteer_cluster",
"vol_cluster_id"))(
M("Create", m="create"),
),
M("Volunteer Cluster Position", f="cluster_position",
check = check_org_dependent_field("vol_volunteer_cluster",
"vol_cluster_position_id"))(
M("Create", m="create"),
),
M("Reports", f="volunteer", m="report",
check=manager_mode)(
M("Volunteer Report", m="report"),
M("Hours by Role Report", f="programme_hours", m="report",
vars=Storage(rows="job_title_id",
cols="month",
fact="sum(hours)"),
check=show_programmes),
M("Hours by Program Report", f="programme_hours", m="report",
vars=Storage(rows="programme_id",
cols="month",
fact="sum(hours)"),
check=show_programmes),
M("Training Report", f="training", m="report"),
),
#M("My Profile", f="person",
# check=personal_mode, vars=dict(access="personal")),
M("My Tasks", f="task",
check=[personal_mode, show_tasks],
vars=dict(access="personal",
mine=1)),
# This provides the link to switch to the manager mode:
M("Volunteer Management", f="index",
check=[personal_mode, is_org_admin]),
# This provides the link to switch to the personal mode:
#M("Personal Profile", f="person",
# check=manager_mode, vars=dict(access="personal"))
)
# END =========================================================================
|
pchauncey/ansible | refs/heads/devel | lib/ansible/modules/network/layer3/net_vrf.py | 96 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: net_vrf
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage VRFs on network devices
description:
- This module provides declarative management of VRFs
on network devices.
options:
name:
description:
- Name of the VRF.
interfaces:
description:
- List of interfaces the VRF should be configured on.
aggregate:
description: List of VRFs definitions
purge:
description:
- Purge VRFs not defined in the I(aggregate) parameter.
default: no
state:
description:
- State of the VRF configuration.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: Create VRF named MANAGEMENT
net_vrf:
name: MANAGEMENT
- name: remove VRF named MANAGEMENT
net_vrf:
name: MANAGEMENT
state: absent
- name: Create aggregate of VRFs with purge
net_vrf:
aggregate:
- { name: test4, rd: "1:204" }
- { name: test5, rd: "1:205" }
state: present
purge: yes
- name: Delete aggregate of VRFs
net_vrf:
aggregate:
- name: test2
- name: test3
- name: test4
- name: test5
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- vrf definition MANAGEMENT
"""
|
nan86150/ImageFusion | refs/heads/master | lib/python2.7/site-packages/scipy/linalg/linalg_version.py | 156 | from __future__ import division, print_function, absolute_import
major = 0
minor = 4
micro = 9
linalg_version = '%(major)d.%(minor)d.%(micro)d' % (locals())
|
OTWillems/GEO1005 | refs/heads/master | SpatialDecision/external/networkx/generators/tests/test_geometric.py | 88 | #!/usr/bin/env python
from nose.tools import *
import networkx as nx
class TestGeneratorsGeometric():
def test_random_geometric_graph(self):
G=nx.random_geometric_graph(50,0.25)
assert_equal(len(G),50)
def test_geographical_threshold_graph(self):
G=nx.geographical_threshold_graph(50,100)
assert_equal(len(G),50)
def test_waxman_graph(self):
G=nx.waxman_graph(50,0.5,0.1)
assert_equal(len(G),50)
G=nx.waxman_graph(50,0.5,0.1,L=1)
assert_equal(len(G),50)
def test_naviable_small_world(self):
G = nx.navigable_small_world_graph(5,p=1,q=0)
gg = nx.grid_2d_graph(5,5).to_directed()
assert_true(nx.is_isomorphic(G,gg))
G = nx.navigable_small_world_graph(5,p=1,q=0,dim=3)
gg = nx.grid_graph([5,5,5]).to_directed()
assert_true(nx.is_isomorphic(G,gg))
G = nx.navigable_small_world_graph(5,p=1,q=0,dim=1)
gg = nx.grid_graph([5]).to_directed()
assert_true(nx.is_isomorphic(G,gg))
|
adblockplus/gyp | refs/heads/master | pylib/gyp/generator/cmake.py | 29 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""cmake output module
This module is under development and should be considered experimental.
This module produces cmake (2.8.8+) input as its output. One CMakeLists.txt is
created for each configuration.
This module's original purpose was to support editing in IDEs like KDevelop
which use CMake for project management. It is also possible to use CMake to
generate projects for other IDEs such as eclipse cdt and code::blocks. QtCreator
will convert the CMakeLists.txt to a code::blocks cbp for the editor to read,
but build using CMake. As a result QtCreator editor is unaware of compiler
defines. The generated CMakeLists.txt can also be used to build on Linux. There
is currently no support for building on platforms other than Linux.
The generated CMakeLists.txt should properly compile all projects. However,
there is a mismatch between gyp and cmake with regard to linking. All attempts
are made to work around this, but CMake sometimes sees -Wl,--start-group as a
library and incorrectly repeats it. As a result the output of this generator
should not be relied on for building.
When using with kdevelop, use version 4.4+. Previous versions of kdevelop will
not be able to find the header file directories described in the generated
CMakeLists.txt file.
"""
import multiprocessing
import os
import signal
import string
import subprocess
import gyp.common
import gyp.xcode_emulation
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
'SHARED_LIB_SUFFIX': '.so',
'SHARED_LIB_DIR': '${builddir}/lib.${TOOLSET}',
'LIB_DIR': '${obj}.${TOOLSET}',
'INTERMEDIATE_DIR': '${obj}.${TOOLSET}/${TARGET}/geni',
'SHARED_INTERMEDIATE_DIR': '${obj}/gen',
'PRODUCT_DIR': '${builddir}',
'RULE_INPUT_PATH': '${RULE_INPUT_PATH}',
'RULE_INPUT_DIRNAME': '${RULE_INPUT_DIRNAME}',
'RULE_INPUT_NAME': '${RULE_INPUT_NAME}',
'RULE_INPUT_ROOT': '${RULE_INPUT_ROOT}',
'RULE_INPUT_EXT': '${RULE_INPUT_EXT}',
'CONFIGURATION_NAME': '${configuration}',
}
FULL_PATH_VARS = ('${CMAKE_CURRENT_LIST_DIR}', '${builddir}', '${obj}')
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = True
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 's', # cc
'.S': 's', # cc
}
def RemovePrefix(a, prefix):
"""Returns 'a' without 'prefix' if it starts with 'prefix'."""
return a[len(prefix):] if a.startswith(prefix) else a
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
return any(filename.endswith(e) for e in COMPILABLE_EXTENSIONS)
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def NormjoinPathForceCMakeSource(base_path, rel_path):
"""Resolves rel_path against base_path and returns the result.
If rel_path is an absolute path it is returned unchanged.
Otherwise it is resolved against base_path and normalized.
If the result is a relative path, it is forced to be relative to the
CMakeLists.txt.
"""
if os.path.isabs(rel_path):
return rel_path
if any([rel_path.startswith(var) for var in FULL_PATH_VARS]):
return rel_path
# TODO: do we need to check base_path for absolute variables as well?
return os.path.join('${CMAKE_CURRENT_LIST_DIR}',
os.path.normpath(os.path.join(base_path, rel_path)))
def NormjoinPath(base_path, rel_path):
"""Resolves rel_path against base_path and returns the result.
TODO: what is this really used for?
If rel_path begins with '$' it is returned unchanged.
Otherwise it is resolved against base_path if relative, then normalized.
"""
if rel_path.startswith('$') and not rel_path.startswith('${configuration}'):
return rel_path
return os.path.normpath(os.path.join(base_path, rel_path))
def CMakeStringEscape(a):
"""Escapes the string 'a' for use inside a CMake string.
This means escaping
'\' otherwise it may be seen as modifying the next character
'"' otherwise it will end the string
';' otherwise the string becomes a list
The following do not need to be escaped
'#' when the lexer is in string state, this does not start a comment
The following are yet unknown
'$' generator variables (like ${obj}) must not be escaped,
but text $ should be escaped
what is wanted is to know which $ come from generator variables
"""
return a.replace('\\', '\\\\').replace(';', '\\;').replace('"', '\\"')
def SetFileProperty(output, source_name, property_name, values, sep):
"""Given a set of source file, sets the given property on them."""
output.write('set_source_files_properties(')
output.write(source_name)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetFilesProperty(output, variable, property_name, values, sep):
"""Given a set of source files, sets the given property on them."""
output.write('set_source_files_properties(')
WriteVariable(output, variable)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetTargetProperty(output, target_name, property_name, values, sep=''):
"""Given a target, sets the given property."""
output.write('set_target_properties(')
output.write(target_name)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetVariable(output, variable_name, value):
"""Sets a CMake variable."""
output.write('set(')
output.write(variable_name)
output.write(' "')
output.write(CMakeStringEscape(value))
output.write('")\n')
def SetVariableList(output, variable_name, values):
"""Sets a CMake variable to a list."""
if not values:
return SetVariable(output, variable_name, "")
if len(values) == 1:
return SetVariable(output, variable_name, values[0])
output.write('list(APPEND ')
output.write(variable_name)
output.write('\n "')
output.write('"\n "'.join([CMakeStringEscape(value) for value in values]))
output.write('")\n')
def UnsetVariable(output, variable_name):
"""Unsets a CMake variable."""
output.write('unset(')
output.write(variable_name)
output.write(')\n')
def WriteVariable(output, variable_name, prepend=None):
if prepend:
output.write(prepend)
output.write('${')
output.write(variable_name)
output.write('}')
class CMakeTargetType(object):
def __init__(self, command, modifier, property_modifier):
self.command = command
self.modifier = modifier
self.property_modifier = property_modifier
cmake_target_type_from_gyp_target_type = {
'executable': CMakeTargetType('add_executable', None, 'RUNTIME'),
'static_library': CMakeTargetType('add_library', 'STATIC', 'ARCHIVE'),
'shared_library': CMakeTargetType('add_library', 'SHARED', 'LIBRARY'),
'loadable_module': CMakeTargetType('add_library', 'MODULE', 'LIBRARY'),
'none': CMakeTargetType('add_custom_target', 'SOURCES', None),
}
def StringToCMakeTargetName(a):
"""Converts the given string 'a' to a valid CMake target name.
All invalid characters are replaced by '_'.
Invalid for cmake: ' ', '/', '(', ')', '"'
Invalid for make: ':'
Invalid for unknown reasons but cause failures: '.'
"""
return a.translate(string.maketrans(' /():."', '_______'))
def WriteActions(target_name, actions, extra_sources, extra_deps,
path_to_gyp, output):
"""Write CMake for the 'actions' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_sources: [(<cmake_src>, <src>)] to append with generated source files.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
for action in actions:
action_name = StringToCMakeTargetName(action['action_name'])
action_target_name = '%s__%s' % (target_name, action_name)
inputs = action['inputs']
inputs_name = action_target_name + '__input'
SetVariableList(output, inputs_name,
[NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs])
outputs = action['outputs']
cmake_outputs = [NormjoinPathForceCMakeSource(path_to_gyp, out)
for out in outputs]
outputs_name = action_target_name + '__output'
SetVariableList(output, outputs_name, cmake_outputs)
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources.extend(zip(cmake_outputs, outputs))
# add_custom_command
output.write('add_custom_command(OUTPUT ')
WriteVariable(output, outputs_name)
output.write('\n')
if len(dirs) > 0:
for directory in dirs:
output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ')
output.write(directory)
output.write('\n')
output.write(' COMMAND ')
output.write(gyp.common.EncodePOSIXShellList(action['action']))
output.write('\n')
output.write(' DEPENDS ')
WriteVariable(output, inputs_name)
output.write('\n')
output.write(' WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write(' COMMENT ')
if 'message' in action:
output.write(action['message'])
else:
output.write(action_target_name)
output.write('\n')
output.write(' VERBATIM\n')
output.write(')\n')
# add_custom_target
output.write('add_custom_target(')
output.write(action_target_name)
output.write('\n DEPENDS ')
WriteVariable(output, outputs_name)
output.write('\n SOURCES ')
WriteVariable(output, inputs_name)
output.write('\n)\n')
extra_deps.append(action_target_name)
def NormjoinRulePathForceCMakeSource(base_path, rel_path, rule_source):
if rel_path.startswith(("${RULE_INPUT_PATH}","${RULE_INPUT_DIRNAME}")):
if any([rule_source.startswith(var) for var in FULL_PATH_VARS]):
return rel_path
return NormjoinPathForceCMakeSource(base_path, rel_path)
def WriteRules(target_name, rules, extra_sources, extra_deps,
path_to_gyp, output):
"""Write CMake for the 'rules' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_sources: [(<cmake_src>, <src>)] to append with generated source files.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
for rule in rules:
rule_name = StringToCMakeTargetName(target_name + '__' + rule['rule_name'])
inputs = rule.get('inputs', [])
inputs_name = rule_name + '__input'
SetVariableList(output, inputs_name,
[NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs])
outputs = rule['outputs']
var_outputs = []
for count, rule_source in enumerate(rule.get('rule_sources', [])):
action_name = rule_name + '_' + str(count)
rule_source_dirname, rule_source_basename = os.path.split(rule_source)
rule_source_root, rule_source_ext = os.path.splitext(rule_source_basename)
SetVariable(output, 'RULE_INPUT_PATH', rule_source)
SetVariable(output, 'RULE_INPUT_DIRNAME', rule_source_dirname)
SetVariable(output, 'RULE_INPUT_NAME', rule_source_basename)
SetVariable(output, 'RULE_INPUT_ROOT', rule_source_root)
SetVariable(output, 'RULE_INPUT_EXT', rule_source_ext)
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir)
# Create variables for the output, as 'local' variable will be unset.
these_outputs = []
for output_index, out in enumerate(outputs):
output_name = action_name + '_' + str(output_index)
SetVariable(output, output_name,
NormjoinRulePathForceCMakeSource(path_to_gyp, out,
rule_source))
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.append(('${' + output_name + '}', out))
these_outputs.append('${' + output_name + '}')
var_outputs.append('${' + output_name + '}')
# add_custom_command
output.write('add_custom_command(OUTPUT\n')
for out in these_outputs:
output.write(' ')
output.write(out)
output.write('\n')
for directory in dirs:
output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ')
output.write(directory)
output.write('\n')
output.write(' COMMAND ')
output.write(gyp.common.EncodePOSIXShellList(rule['action']))
output.write('\n')
output.write(' DEPENDS ')
WriteVariable(output, inputs_name)
output.write(' ')
output.write(NormjoinPath(path_to_gyp, rule_source))
output.write('\n')
# CMAKE_CURRENT_LIST_DIR is where the CMakeLists.txt lives.
# The cwd is the current build directory.
output.write(' WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write(' COMMENT ')
if 'message' in rule:
output.write(rule['message'])
else:
output.write(action_name)
output.write('\n')
output.write(' VERBATIM\n')
output.write(')\n')
UnsetVariable(output, 'RULE_INPUT_PATH')
UnsetVariable(output, 'RULE_INPUT_DIRNAME')
UnsetVariable(output, 'RULE_INPUT_NAME')
UnsetVariable(output, 'RULE_INPUT_ROOT')
UnsetVariable(output, 'RULE_INPUT_EXT')
# add_custom_target
output.write('add_custom_target(')
output.write(rule_name)
output.write(' DEPENDS\n')
for out in var_outputs:
output.write(' ')
output.write(out)
output.write('\n')
output.write('SOURCES ')
WriteVariable(output, inputs_name)
output.write('\n')
for rule_source in rule.get('rule_sources', []):
output.write(' ')
output.write(NormjoinPath(path_to_gyp, rule_source))
output.write('\n')
output.write(')\n')
extra_deps.append(rule_name)
def WriteCopies(target_name, copies, extra_deps, path_to_gyp, output):
"""Write CMake for the 'copies' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
copy_name = target_name + '__copies'
# CMake gets upset with custom targets with OUTPUT which specify no output.
have_copies = any(copy['files'] for copy in copies)
if not have_copies:
output.write('add_custom_target(')
output.write(copy_name)
output.write(')\n')
extra_deps.append(copy_name)
return
class Copy(object):
def __init__(self, ext, command):
self.cmake_inputs = []
self.cmake_outputs = []
self.gyp_inputs = []
self.gyp_outputs = []
self.ext = ext
self.inputs_name = None
self.outputs_name = None
self.command = command
file_copy = Copy('', 'copy')
dir_copy = Copy('_dirs', 'copy_directory')
for copy in copies:
files = copy['files']
destination = copy['destination']
for src in files:
path = os.path.normpath(src)
basename = os.path.split(path)[1]
dst = os.path.join(destination, basename)
copy = file_copy if os.path.basename(src) else dir_copy
copy.cmake_inputs.append(NormjoinPathForceCMakeSource(path_to_gyp, src))
copy.cmake_outputs.append(NormjoinPathForceCMakeSource(path_to_gyp, dst))
copy.gyp_inputs.append(src)
copy.gyp_outputs.append(dst)
for copy in (file_copy, dir_copy):
if copy.cmake_inputs:
copy.inputs_name = copy_name + '__input' + copy.ext
SetVariableList(output, copy.inputs_name, copy.cmake_inputs)
copy.outputs_name = copy_name + '__output' + copy.ext
SetVariableList(output, copy.outputs_name, copy.cmake_outputs)
# add_custom_command
output.write('add_custom_command(\n')
output.write('OUTPUT')
for copy in (file_copy, dir_copy):
if copy.outputs_name:
WriteVariable(output, copy.outputs_name, ' ')
output.write('\n')
for copy in (file_copy, dir_copy):
for src, dst in zip(copy.gyp_inputs, copy.gyp_outputs):
# 'cmake -E copy src dst' will create the 'dst' directory if needed.
output.write('COMMAND ${CMAKE_COMMAND} -E %s ' % copy.command)
output.write(src)
output.write(' ')
output.write(dst)
output.write("\n")
output.write('DEPENDS')
for copy in (file_copy, dir_copy):
if copy.inputs_name:
WriteVariable(output, copy.inputs_name, ' ')
output.write('\n')
output.write('WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write('COMMENT Copying for ')
output.write(target_name)
output.write('\n')
output.write('VERBATIM\n')
output.write(')\n')
# add_custom_target
output.write('add_custom_target(')
output.write(copy_name)
output.write('\n DEPENDS')
for copy in (file_copy, dir_copy):
if copy.outputs_name:
WriteVariable(output, copy.outputs_name, ' ')
output.write('\n SOURCES')
if file_copy.inputs_name:
WriteVariable(output, file_copy.inputs_name, ' ')
output.write('\n)\n')
extra_deps.append(copy_name)
def CreateCMakeTargetBaseName(qualified_target):
"""This is the name we would like the target to have."""
_, gyp_target_name, gyp_target_toolset = (
gyp.common.ParseQualifiedTarget(qualified_target))
cmake_target_base_name = gyp_target_name
if gyp_target_toolset and gyp_target_toolset != 'target':
cmake_target_base_name += '_' + gyp_target_toolset
return StringToCMakeTargetName(cmake_target_base_name)
def CreateCMakeTargetFullName(qualified_target):
"""An unambiguous name for the target."""
gyp_file, gyp_target_name, gyp_target_toolset = (
gyp.common.ParseQualifiedTarget(qualified_target))
cmake_target_full_name = gyp_file + ':' + gyp_target_name
if gyp_target_toolset and gyp_target_toolset != 'target':
cmake_target_full_name += '_' + gyp_target_toolset
return StringToCMakeTargetName(cmake_target_full_name)
class CMakeNamer(object):
"""Converts Gyp target names into CMake target names.
CMake requires that target names be globally unique. One way to ensure
this is to fully qualify the names of the targets. Unfortunatly, this
ends up with all targets looking like "chrome_chrome_gyp_chrome" instead
of just "chrome". If this generator were only interested in building, it
would be possible to fully qualify all target names, then create
unqualified target names which depend on all qualified targets which
should have had that name. This is more or less what the 'make' generator
does with aliases. However, one goal of this generator is to create CMake
files for use with IDEs, and fully qualified names are not as user
friendly.
Since target name collision is rare, we do the above only when required.
Toolset variants are always qualified from the base, as this is required for
building. However, it also makes sense for an IDE, as it is possible for
defines to be different.
"""
def __init__(self, target_list):
self.cmake_target_base_names_conficting = set()
cmake_target_base_names_seen = set()
for qualified_target in target_list:
cmake_target_base_name = CreateCMakeTargetBaseName(qualified_target)
if cmake_target_base_name not in cmake_target_base_names_seen:
cmake_target_base_names_seen.add(cmake_target_base_name)
else:
self.cmake_target_base_names_conficting.add(cmake_target_base_name)
def CreateCMakeTargetName(self, qualified_target):
base_name = CreateCMakeTargetBaseName(qualified_target)
if base_name in self.cmake_target_base_names_conficting:
return CreateCMakeTargetFullName(qualified_target)
return base_name
def WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use,
options, generator_flags, all_qualified_targets, flavor,
output):
# The make generator does this always.
# TODO: It would be nice to be able to tell CMake all dependencies.
circular_libs = generator_flags.get('circular', True)
if not generator_flags.get('standalone', False):
output.write('\n#')
output.write(qualified_target)
output.write('\n')
gyp_file, _, _ = gyp.common.ParseQualifiedTarget(qualified_target)
rel_gyp_file = gyp.common.RelativePath(gyp_file, options.toplevel_dir)
rel_gyp_dir = os.path.dirname(rel_gyp_file)
# Relative path from build dir to top dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, options.toplevel_dir)
# Relative path from build dir to gyp dir.
build_to_gyp = os.path.join(build_to_top, rel_gyp_dir)
path_from_cmakelists_to_gyp = build_to_gyp
spec = target_dicts.get(qualified_target, {})
config = spec.get('configurations', {}).get(config_to_use, {})
xcode_settings = None
if flavor == 'mac':
xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
target_name = spec.get('target_name', '<missing target name>')
target_type = spec.get('type', '<missing target type>')
target_toolset = spec.get('toolset')
cmake_target_type = cmake_target_type_from_gyp_target_type.get(target_type)
if cmake_target_type is None:
print ('Target %s has unknown target type %s, skipping.' %
( target_name, target_type ) )
return
SetVariable(output, 'TARGET', target_name)
SetVariable(output, 'TOOLSET', target_toolset)
cmake_target_name = namer.CreateCMakeTargetName(qualified_target)
extra_sources = []
extra_deps = []
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
WriteActions(cmake_target_name, spec['actions'], extra_sources, extra_deps,
path_from_cmakelists_to_gyp, output)
# Rules must be early like actions.
if 'rules' in spec:
WriteRules(cmake_target_name, spec['rules'], extra_sources, extra_deps,
path_from_cmakelists_to_gyp, output)
# Copies
if 'copies' in spec:
WriteCopies(cmake_target_name, spec['copies'], extra_deps,
path_from_cmakelists_to_gyp, output)
# Target and sources
srcs = spec.get('sources', [])
# Gyp separates the sheep from the goats based on file extensions.
# A full separation is done here because of flag handing (see below).
s_sources = []
c_sources = []
cxx_sources = []
linkable_sources = []
other_sources = []
for src in srcs:
_, ext = os.path.splitext(src)
src_type = COMPILABLE_EXTENSIONS.get(ext, None)
src_norm_path = NormjoinPath(path_from_cmakelists_to_gyp, src);
if src_type == 's':
s_sources.append(src_norm_path)
elif src_type == 'cc':
c_sources.append(src_norm_path)
elif src_type == 'cxx':
cxx_sources.append(src_norm_path)
elif Linkable(ext):
linkable_sources.append(src_norm_path)
else:
other_sources.append(src_norm_path)
for extra_source in extra_sources:
src, real_source = extra_source
_, ext = os.path.splitext(real_source)
src_type = COMPILABLE_EXTENSIONS.get(ext, None)
if src_type == 's':
s_sources.append(src)
elif src_type == 'cc':
c_sources.append(src)
elif src_type == 'cxx':
cxx_sources.append(src)
elif Linkable(ext):
linkable_sources.append(src)
else:
other_sources.append(src)
s_sources_name = None
if s_sources:
s_sources_name = cmake_target_name + '__asm_srcs'
SetVariableList(output, s_sources_name, s_sources)
c_sources_name = None
if c_sources:
c_sources_name = cmake_target_name + '__c_srcs'
SetVariableList(output, c_sources_name, c_sources)
cxx_sources_name = None
if cxx_sources:
cxx_sources_name = cmake_target_name + '__cxx_srcs'
SetVariableList(output, cxx_sources_name, cxx_sources)
linkable_sources_name = None
if linkable_sources:
linkable_sources_name = cmake_target_name + '__linkable_srcs'
SetVariableList(output, linkable_sources_name, linkable_sources)
other_sources_name = None
if other_sources:
other_sources_name = cmake_target_name + '__other_srcs'
SetVariableList(output, other_sources_name, other_sources)
# CMake gets upset when executable targets provide no sources.
# http://www.cmake.org/pipermail/cmake/2010-July/038461.html
dummy_sources_name = None
has_sources = (s_sources_name or
c_sources_name or
cxx_sources_name or
linkable_sources_name or
other_sources_name)
if target_type == 'executable' and not has_sources:
dummy_sources_name = cmake_target_name + '__dummy_srcs'
SetVariable(output, dummy_sources_name,
"${obj}.${TOOLSET}/${TARGET}/genc/dummy.c")
output.write('if(NOT EXISTS "')
WriteVariable(output, dummy_sources_name)
output.write('")\n')
output.write(' file(WRITE "')
WriteVariable(output, dummy_sources_name)
output.write('" "")\n')
output.write("endif()\n")
# CMake is opposed to setting linker directories and considers the practice
# of setting linker directories dangerous. Instead, it favors the use of
# find_library and passing absolute paths to target_link_libraries.
# However, CMake does provide the command link_directories, which adds
# link directories to targets defined after it is called.
# As a result, link_directories must come before the target definition.
# CMake unfortunately has no means of removing entries from LINK_DIRECTORIES.
library_dirs = config.get('library_dirs')
if library_dirs is not None:
output.write('link_directories(')
for library_dir in library_dirs:
output.write(' ')
output.write(NormjoinPath(path_from_cmakelists_to_gyp, library_dir))
output.write('\n')
output.write(')\n')
output.write(cmake_target_type.command)
output.write('(')
output.write(cmake_target_name)
if cmake_target_type.modifier is not None:
output.write(' ')
output.write(cmake_target_type.modifier)
if s_sources_name:
WriteVariable(output, s_sources_name, ' ')
if c_sources_name:
WriteVariable(output, c_sources_name, ' ')
if cxx_sources_name:
WriteVariable(output, cxx_sources_name, ' ')
if linkable_sources_name:
WriteVariable(output, linkable_sources_name, ' ')
if other_sources_name:
WriteVariable(output, other_sources_name, ' ')
if dummy_sources_name:
WriteVariable(output, dummy_sources_name, ' ')
output.write(')\n')
# Let CMake know if the 'all' target should depend on this target.
exclude_from_all = ('TRUE' if qualified_target not in all_qualified_targets
else 'FALSE')
SetTargetProperty(output, cmake_target_name,
'EXCLUDE_FROM_ALL', exclude_from_all)
for extra_target_name in extra_deps:
SetTargetProperty(output, extra_target_name,
'EXCLUDE_FROM_ALL', exclude_from_all)
# Output name and location.
if target_type != 'none':
# Link as 'C' if there are no other files
if not c_sources and not cxx_sources:
SetTargetProperty(output, cmake_target_name, 'LINKER_LANGUAGE', ['C'])
# Mark uncompiled sources as uncompiled.
if other_sources_name:
output.write('set_source_files_properties(')
WriteVariable(output, other_sources_name, '')
output.write(' PROPERTIES HEADER_FILE_ONLY "TRUE")\n')
# Mark object sources as linkable.
if linkable_sources_name:
output.write('set_source_files_properties(')
WriteVariable(output, other_sources_name, '')
output.write(' PROPERTIES EXTERNAL_OBJECT "TRUE")\n')
# Output directory
target_output_directory = spec.get('product_dir')
if target_output_directory is None:
if target_type in ('executable', 'loadable_module'):
target_output_directory = generator_default_variables['PRODUCT_DIR']
elif target_type == 'shared_library':
target_output_directory = '${builddir}/lib.${TOOLSET}'
elif spec.get('standalone_static_library', False):
target_output_directory = generator_default_variables['PRODUCT_DIR']
else:
base_path = gyp.common.RelativePath(os.path.dirname(gyp_file),
options.toplevel_dir)
target_output_directory = '${obj}.${TOOLSET}'
target_output_directory = (
os.path.join(target_output_directory, base_path))
cmake_target_output_directory = NormjoinPathForceCMakeSource(
path_from_cmakelists_to_gyp,
target_output_directory)
SetTargetProperty(output,
cmake_target_name,
cmake_target_type.property_modifier + '_OUTPUT_DIRECTORY',
cmake_target_output_directory)
# Output name
default_product_prefix = ''
default_product_name = target_name
default_product_ext = ''
if target_type == 'static_library':
static_library_prefix = generator_default_variables['STATIC_LIB_PREFIX']
default_product_name = RemovePrefix(default_product_name,
static_library_prefix)
default_product_prefix = static_library_prefix
default_product_ext = generator_default_variables['STATIC_LIB_SUFFIX']
elif target_type in ('loadable_module', 'shared_library'):
shared_library_prefix = generator_default_variables['SHARED_LIB_PREFIX']
default_product_name = RemovePrefix(default_product_name,
shared_library_prefix)
default_product_prefix = shared_library_prefix
default_product_ext = generator_default_variables['SHARED_LIB_SUFFIX']
elif target_type != 'executable':
print ('ERROR: What output file should be generated?',
'type', target_type, 'target', target_name)
product_prefix = spec.get('product_prefix', default_product_prefix)
product_name = spec.get('product_name', default_product_name)
product_ext = spec.get('product_extension')
if product_ext:
product_ext = '.' + product_ext
else:
product_ext = default_product_ext
SetTargetProperty(output, cmake_target_name, 'PREFIX', product_prefix)
SetTargetProperty(output, cmake_target_name,
cmake_target_type.property_modifier + '_OUTPUT_NAME',
product_name)
SetTargetProperty(output, cmake_target_name, 'SUFFIX', product_ext)
# Make the output of this target referenceable as a source.
cmake_target_output_basename = product_prefix + product_name + product_ext
cmake_target_output = os.path.join(cmake_target_output_directory,
cmake_target_output_basename)
SetFileProperty(output, cmake_target_output, 'GENERATED', ['TRUE'], '')
# Includes
includes = config.get('include_dirs')
if includes:
# This (target include directories) is what requires CMake 2.8.8
includes_name = cmake_target_name + '__include_dirs'
SetVariableList(output, includes_name,
[NormjoinPathForceCMakeSource(path_from_cmakelists_to_gyp, include)
for include in includes])
output.write('set_property(TARGET ')
output.write(cmake_target_name)
output.write(' APPEND PROPERTY INCLUDE_DIRECTORIES ')
WriteVariable(output, includes_name, '')
output.write(')\n')
# Defines
defines = config.get('defines')
if defines is not None:
SetTargetProperty(output,
cmake_target_name,
'COMPILE_DEFINITIONS',
defines,
';')
# Compile Flags - http://www.cmake.org/Bug/view.php?id=6493
# CMake currently does not have target C and CXX flags.
# So, instead of doing...
# cflags_c = config.get('cflags_c')
# if cflags_c is not None:
# SetTargetProperty(output, cmake_target_name,
# 'C_COMPILE_FLAGS', cflags_c, ' ')
# cflags_cc = config.get('cflags_cc')
# if cflags_cc is not None:
# SetTargetProperty(output, cmake_target_name,
# 'CXX_COMPILE_FLAGS', cflags_cc, ' ')
# Instead we must...
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cxx = config.get('cflags_cc', [])
if xcode_settings:
cflags = xcode_settings.GetCflags(config_to_use)
cflags_c = xcode_settings.GetCflagsC(config_to_use)
cflags_cxx = xcode_settings.GetCflagsCC(config_to_use)
#cflags_objc = xcode_settings.GetCflagsObjC(config_to_use)
#cflags_objcc = xcode_settings.GetCflagsObjCC(config_to_use)
if (not cflags_c or not c_sources) and (not cflags_cxx or not cxx_sources):
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', cflags, ' ')
elif c_sources and not (s_sources or cxx_sources):
flags = []
flags.extend(cflags)
flags.extend(cflags_c)
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ')
elif cxx_sources and not (s_sources or c_sources):
flags = []
flags.extend(cflags)
flags.extend(cflags_cxx)
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ')
else:
# TODO: This is broken, one cannot generally set properties on files,
# as other targets may require different properties on the same files.
if s_sources and cflags:
SetFilesProperty(output, s_sources_name, 'COMPILE_FLAGS', cflags, ' ')
if c_sources and (cflags or cflags_c):
flags = []
flags.extend(cflags)
flags.extend(cflags_c)
SetFilesProperty(output, c_sources_name, 'COMPILE_FLAGS', flags, ' ')
if cxx_sources and (cflags or cflags_cxx):
flags = []
flags.extend(cflags)
flags.extend(cflags_cxx)
SetFilesProperty(output, cxx_sources_name, 'COMPILE_FLAGS', flags, ' ')
# Linker flags
ldflags = config.get('ldflags')
if ldflags is not None:
SetTargetProperty(output, cmake_target_name, 'LINK_FLAGS', ldflags, ' ')
# XCode settings
xcode_settings = config.get('xcode_settings', {})
for xcode_setting, xcode_value in xcode_settings.viewitems():
SetTargetProperty(output, cmake_target_name,
"XCODE_ATTRIBUTE_%s" % xcode_setting, xcode_value,
'' if isinstance(xcode_value, str) else ' ')
# Note on Dependencies and Libraries:
# CMake wants to handle link order, resolving the link line up front.
# Gyp does not retain or enforce specifying enough information to do so.
# So do as other gyp generators and use --start-group and --end-group.
# Give CMake as little information as possible so that it doesn't mess it up.
# Dependencies
rawDeps = spec.get('dependencies', [])
static_deps = []
shared_deps = []
other_deps = []
for rawDep in rawDeps:
dep_cmake_name = namer.CreateCMakeTargetName(rawDep)
dep_spec = target_dicts.get(rawDep, {})
dep_target_type = dep_spec.get('type', None)
if dep_target_type == 'static_library':
static_deps.append(dep_cmake_name)
elif dep_target_type == 'shared_library':
shared_deps.append(dep_cmake_name)
else:
other_deps.append(dep_cmake_name)
# ensure all external dependencies are complete before internal dependencies
# extra_deps currently only depend on their own deps, so otherwise run early
if static_deps or shared_deps or other_deps:
for extra_dep in extra_deps:
output.write('add_dependencies(')
output.write(extra_dep)
output.write('\n')
for deps in (static_deps, shared_deps, other_deps):
for dep in gyp.common.uniquer(deps):
output.write(' ')
output.write(dep)
output.write('\n')
output.write(')\n')
linkable = target_type in ('executable', 'loadable_module', 'shared_library')
other_deps.extend(extra_deps)
if other_deps or (not linkable and (static_deps or shared_deps)):
output.write('add_dependencies(')
output.write(cmake_target_name)
output.write('\n')
for dep in gyp.common.uniquer(other_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if not linkable:
for deps in (static_deps, shared_deps):
for lib_dep in gyp.common.uniquer(deps):
output.write(' ')
output.write(lib_dep)
output.write('\n')
output.write(')\n')
# Libraries
if linkable:
external_libs = [lib for lib in spec.get('libraries', []) if len(lib) > 0]
if external_libs or static_deps or shared_deps:
output.write('target_link_libraries(')
output.write(cmake_target_name)
output.write('\n')
if static_deps:
write_group = circular_libs and len(static_deps) > 1 and flavor != 'mac'
if write_group:
output.write('-Wl,--start-group\n')
for dep in gyp.common.uniquer(static_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if write_group:
output.write('-Wl,--end-group\n')
if shared_deps:
for dep in gyp.common.uniquer(shared_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if external_libs:
for lib in gyp.common.uniquer(external_libs):
output.write(' "')
output.write(RemovePrefix(lib, "$(SDKROOT)"))
output.write('"\n')
output.write(')\n')
UnsetVariable(output, 'TOOLSET')
UnsetVariable(output, 'TARGET')
def GenerateOutputForConfig(target_list, target_dicts, data,
params, config_to_use):
options = params['options']
generator_flags = params['generator_flags']
flavor = gyp.common.GetFlavor(params)
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to cmake easier, cmake doesn't put anything here.
# Each Gyp configuration creates a different CMakeLists.txt file
# to avoid incompatibilities between Gyp and CMake configurations.
generator_dir = os.path.relpath(options.generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(os.path.join(generator_dir,
output_dir,
config_to_use))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
output_file = os.path.join(toplevel_build, 'CMakeLists.txt')
gyp.common.EnsureDirExists(output_file)
output = open(output_file, 'w')
output.write('cmake_minimum_required(VERSION 2.8.8 FATAL_ERROR)\n')
output.write('cmake_policy(VERSION 2.8.8)\n')
gyp_file, project_target, _ = gyp.common.ParseQualifiedTarget(target_list[-1])
output.write('project(')
output.write(project_target)
output.write(')\n')
SetVariable(output, 'configuration', config_to_use)
ar = None
cc = None
cxx = None
make_global_settings = data[gyp_file].get('make_global_settings', [])
build_to_top = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
for key, value in make_global_settings:
if key == 'AR':
ar = os.path.join(build_to_top, value)
if key == 'CC':
cc = os.path.join(build_to_top, value)
if key == 'CXX':
cxx = os.path.join(build_to_top, value)
ar = gyp.common.GetEnvironFallback(['AR_target', 'AR'], ar)
cc = gyp.common.GetEnvironFallback(['CC_target', 'CC'], cc)
cxx = gyp.common.GetEnvironFallback(['CXX_target', 'CXX'], cxx)
if ar:
SetVariable(output, 'CMAKE_AR', ar)
if cc:
SetVariable(output, 'CMAKE_C_COMPILER', cc)
if cxx:
SetVariable(output, 'CMAKE_CXX_COMPILER', cxx)
# The following appears to be as-yet undocumented.
# http://public.kitware.com/Bug/view.php?id=8392
output.write('enable_language(ASM)\n')
# ASM-ATT does not support .S files.
# output.write('enable_language(ASM-ATT)\n')
if cc:
SetVariable(output, 'CMAKE_ASM_COMPILER', cc)
SetVariable(output, 'builddir', '${CMAKE_CURRENT_BINARY_DIR}')
SetVariable(output, 'obj', '${builddir}/obj')
output.write('\n')
# TODO: Undocumented/unsupported (the CMake Java generator depends on it).
# CMake by default names the object resulting from foo.c to be foo.c.o.
# Gyp traditionally names the object resulting from foo.c foo.o.
# This should be irrelevant, but some targets extract .o files from .a
# and depend on the name of the extracted .o files.
output.write('set(CMAKE_C_OUTPUT_EXTENSION_REPLACE 1)\n')
output.write('set(CMAKE_CXX_OUTPUT_EXTENSION_REPLACE 1)\n')
output.write('\n')
# Force ninja to use rsp files. Otherwise link and ar lines can get too long,
# resulting in 'Argument list too long' errors.
# However, rsp files don't work correctly on Mac.
if flavor != 'mac':
output.write('set(CMAKE_NINJA_FORCE_RESPONSE_FILE 1)\n')
output.write('\n')
namer = CMakeNamer(target_list)
# The list of targets upon which the 'all' target should depend.
# CMake has it's own implicit 'all' target, one is not created explicitly.
all_qualified_targets = set()
for build_file in params['build_files']:
for qualified_target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_qualified_targets.add(qualified_target)
for qualified_target in target_list:
if flavor == 'mac':
gyp_file, _, _ = gyp.common.ParseQualifiedTarget(qualified_target)
spec = target_dicts[qualified_target]
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[gyp_file], spec)
WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use,
options, generator_flags, all_qualified_targets, flavor, output)
output.close()
def PerformBuild(data, configurations, params):
options = params['options']
generator_flags = params['generator_flags']
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to cmake easier, cmake doesn't put anything here.
generator_dir = os.path.relpath(options.generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
for config_name in configurations:
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(os.path.join(generator_dir,
output_dir,
config_name))
arguments = ['cmake', '-G', 'Ninja']
print 'Generating [%s]: %s' % (config_name, arguments)
subprocess.check_call(arguments, cwd=build_dir)
arguments = ['ninja', '-C', build_dir]
print 'Building [%s]: %s' % (config_name, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
target_list, target_dicts, data, params, config_name = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data,
params, user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append((target_list, target_dicts, data,
params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data,
params, config_name)
|
u0m3/gr-baz | refs/heads/master | gr_baz_attach.py | 4 | #!/usr/bin/env python
import sys
import os
if __name__ == '__main__':
if len(sys.argv) == 1:
print 'usage: gr_baz_attach.py [path to gnuradio src tree]'
exit()
top_gr_cmakelists = os.path.join(sys.argv[1], 'CMakeLists.txt')
gr_baz_src_dir = os.path.dirname(os.path.abspath(__file__)).replace("\\","\\\\")
content = open(top_gr_cmakelists).read()
if 'BazSubProj.cmake' not in content:
content = content.replace('add_subdirectory(grc)',
"""add_subdirectory(grc)
file(TO_CMAKE_PATH %s GR_BAZ_SRC_DIR)
include(${GR_BAZ_SRC_DIR}/BazSubProj.cmake)"""%gr_baz_src_dir)
open(top_gr_cmakelists, 'w').write(content)
|
frouty/odoo_oph | refs/heads/dev_70 | addons/base_report_designer/plugin/openerp_report_designer/bin/script/ExportToRML.py | 87 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2013 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import os
import uno
import unohelper
import string
import tempfile
import base64
import sys
reload(sys)
sys.setdefaultencoding("utf8")
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from LoginTest import *
from lib.error import *
from lib.tools import *
from lib.logreport import *
from lib.rpc import *
database="test"
uid = 3
class ExportToRML( unohelper.Base, XJobExecutor ):
def __init__(self, ctx):
self.ctx = ctx
self.module = "openerp_report"
self.version = "0.1"
LoginTest()
if not loginstatus and __name__=="package":
exit(1)
desktop=getDesktop()
doc = desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
global url
self.sock=RPCSession(url)
# Read Data from sxw file
tmpsxw = tempfile.mktemp('.'+"sxw")
if not doc.hasLocation():
mytype = Array(makePropertyValue("MediaType","application/vnd.sun.xml.writer"),)
doc.storeAsURL("file://"+tmpsxw,mytype)
data = read_data_from_file( get_absolute_file_path( doc.getURL()[7:] ) )
file_type = doc.getURL()[7:].split(".")[-1]
if docinfo.getUserFieldValue(2) == "":
ErrorDialog("Please Save this file on server","Use Send To Server Option in OpenERP Report Menu","Error")
exit(1)
filename = self.GetAFileName()
if not filename:
exit(1)
global passwd
self.password = passwd
try:
res = self.sock.execute(database, uid, self.password, 'ir.actions.report.xml', 'sxwtorml',base64.encodestring(data),file_type)
if res['report_rml_content']:
write_data_to_file(get_absolute_file_path(filename), res['report_rml_content'])
except Exception,e:
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('ExportToRML',LOG_ERROR, info)
ErrorDialog("Cannot save the file to the hard drive.", "Exception: %s." % e, "Error" )
def GetAFileName(self):
sFilePickerArgs = Array(10)
oFileDialog = createUnoService("com.sun.star.ui.dialogs.FilePicker")
oFileDialog.initialize(sFilePickerArgs)
oFileDialog.appendFilter("OpenERP Report File Save To ....","*.rml")
f_path = "OpenERP-"+ os.path.basename( tempfile.mktemp("","") ) + ".rml"
initPath = tempfile.gettempdir()
oUcb = createUnoService("com.sun.star.ucb.SimpleFileAccess")
if oUcb.exists(initPath):
oFileDialog.setDisplayDirectory('file://' + ( os.name == 'nt' and '/' or '' ) + initPath )
oFileDialog.setDefaultName(f_path )
sPath = oFileDialog.execute() == 1 and oFileDialog.Files[0] or ''
oFileDialog.dispose()
sPath = sPath[7:]
if sPath.startswith('localhost/'):
slash = int(os.name == 'nt')
sPath = sPath[9 + slash:]
return sPath
if __name__<>"package" and __name__=="__main__":
ExportToRML(None)
elif __name__=="package":
g_ImplementationHelper.addImplementation( ExportToRML, "org.openoffice.openerp.report.exporttorml", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
qenter/vlc-android | refs/heads/master | toolchains/arm/lib/python2.7/test/test_lib2to3.py | 137 | # Skipping test_parser and test_all_fixers
# because of running
from lib2to3.tests import (test_fixers, test_pytree, test_util, test_refactor,
test_parser, test_main as test_main_)
import unittest
from test.test_support import run_unittest
def suite():
tests = unittest.TestSuite()
loader = unittest.TestLoader()
for m in (test_fixers, test_pytree,test_util, test_refactor, test_parser,
test_main_):
tests.addTests(loader.loadTestsFromModule(m))
return tests
def test_main():
run_unittest(suite())
if __name__ == '__main__':
test_main()
|
sysalexis/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/unittest/test/testmock/testmock.py | 73 | import copy
import sys
import unittest
from unittest.test.testmock.support import is_instance
from unittest import mock
from unittest.mock import (
call, DEFAULT, patch, sentinel,
MagicMock, Mock, NonCallableMock,
NonCallableMagicMock, _CallList,
create_autospec
)
class Iter(object):
def __init__(self):
self.thing = iter(['this', 'is', 'an', 'iter'])
def __iter__(self):
return self
def next(self):
return next(self.thing)
__next__ = next
class Something(object):
def meth(self, a, b, c, d=None):
pass
@classmethod
def cmeth(cls, a, b, c, d=None):
pass
@staticmethod
def smeth(a, b, c, d=None):
pass
class MockTest(unittest.TestCase):
def test_all(self):
# if __all__ is badly defined then import * will raise an error
# We have to exec it because you can't import * inside a method
# in Python 3
exec("from unittest.mock import *")
def test_constructor(self):
mock = Mock()
self.assertFalse(mock.called, "called not initialised correctly")
self.assertEqual(mock.call_count, 0,
"call_count not initialised correctly")
self.assertTrue(is_instance(mock.return_value, Mock),
"return_value not initialised correctly")
self.assertEqual(mock.call_args, None,
"call_args not initialised correctly")
self.assertEqual(mock.call_args_list, [],
"call_args_list not initialised correctly")
self.assertEqual(mock.method_calls, [],
"method_calls not initialised correctly")
# Can't use hasattr for this test as it always returns True on a mock
self.assertNotIn('_items', mock.__dict__,
"default mock should not have '_items' attribute")
self.assertIsNone(mock._mock_parent,
"parent not initialised correctly")
self.assertIsNone(mock._mock_methods,
"methods not initialised correctly")
self.assertEqual(mock._mock_children, {},
"children not initialised incorrectly")
def test_return_value_in_constructor(self):
mock = Mock(return_value=None)
self.assertIsNone(mock.return_value,
"return value in constructor not honoured")
def test_repr(self):
mock = Mock(name='foo')
self.assertIn('foo', repr(mock))
self.assertIn("'%s'" % id(mock), repr(mock))
mocks = [(Mock(), 'mock'), (Mock(name='bar'), 'bar')]
for mock, name in mocks:
self.assertIn('%s.bar' % name, repr(mock.bar))
self.assertIn('%s.foo()' % name, repr(mock.foo()))
self.assertIn('%s.foo().bing' % name, repr(mock.foo().bing))
self.assertIn('%s()' % name, repr(mock()))
self.assertIn('%s()()' % name, repr(mock()()))
self.assertIn('%s()().foo.bar.baz().bing' % name,
repr(mock()().foo.bar.baz().bing))
def test_repr_with_spec(self):
class X(object):
pass
mock = Mock(spec=X)
self.assertIn(" spec='X' ", repr(mock))
mock = Mock(spec=X())
self.assertIn(" spec='X' ", repr(mock))
mock = Mock(spec_set=X)
self.assertIn(" spec_set='X' ", repr(mock))
mock = Mock(spec_set=X())
self.assertIn(" spec_set='X' ", repr(mock))
mock = Mock(spec=X, name='foo')
self.assertIn(" spec='X' ", repr(mock))
self.assertIn(" name='foo' ", repr(mock))
mock = Mock(name='foo')
self.assertNotIn("spec", repr(mock))
mock = Mock()
self.assertNotIn("spec", repr(mock))
mock = Mock(spec=['foo'])
self.assertNotIn("spec", repr(mock))
def test_side_effect(self):
mock = Mock()
def effect(*args, **kwargs):
raise SystemError('kablooie')
mock.side_effect = effect
self.assertRaises(SystemError, mock, 1, 2, fish=3)
mock.assert_called_with(1, 2, fish=3)
results = [1, 2, 3]
def effect():
return results.pop()
mock.side_effect = effect
self.assertEqual([mock(), mock(), mock()], [3, 2, 1],
"side effect not used correctly")
mock = Mock(side_effect=sentinel.SideEffect)
self.assertEqual(mock.side_effect, sentinel.SideEffect,
"side effect in constructor not used")
def side_effect():
return DEFAULT
mock = Mock(side_effect=side_effect, return_value=sentinel.RETURN)
self.assertEqual(mock(), sentinel.RETURN)
def test_autospec_side_effect(self):
# Test for issue17826
results = [1, 2, 3]
def effect():
return results.pop()
def f():
pass
mock = create_autospec(f)
mock.side_effect = [1, 2, 3]
self.assertEqual([mock(), mock(), mock()], [1, 2, 3],
"side effect not used correctly in create_autospec")
# Test where side effect is a callable
results = [1, 2, 3]
mock = create_autospec(f)
mock.side_effect = effect
self.assertEqual([mock(), mock(), mock()], [3, 2, 1],
"callable side effect not used correctly")
@unittest.skipUnless('java' in sys.platform,
'This test only applies to Jython')
def test_java_exception_side_effect(self):
import java
mock = Mock(side_effect=java.lang.RuntimeException("Boom!"))
# can't use assertRaises with java exceptions
try:
mock(1, 2, fish=3)
except java.lang.RuntimeException:
pass
else:
self.fail('java exception not raised')
mock.assert_called_with(1,2, fish=3)
def test_reset_mock(self):
parent = Mock()
spec = ["something"]
mock = Mock(name="child", parent=parent, spec=spec)
mock(sentinel.Something, something=sentinel.SomethingElse)
something = mock.something
mock.something()
mock.side_effect = sentinel.SideEffect
return_value = mock.return_value
return_value()
mock.reset_mock()
self.assertEqual(mock._mock_name, "child",
"name incorrectly reset")
self.assertEqual(mock._mock_parent, parent,
"parent incorrectly reset")
self.assertEqual(mock._mock_methods, spec,
"methods incorrectly reset")
self.assertFalse(mock.called, "called not reset")
self.assertEqual(mock.call_count, 0, "call_count not reset")
self.assertEqual(mock.call_args, None, "call_args not reset")
self.assertEqual(mock.call_args_list, [], "call_args_list not reset")
self.assertEqual(mock.method_calls, [],
"method_calls not initialised correctly: %r != %r" %
(mock.method_calls, []))
self.assertEqual(mock.mock_calls, [])
self.assertEqual(mock.side_effect, sentinel.SideEffect,
"side_effect incorrectly reset")
self.assertEqual(mock.return_value, return_value,
"return_value incorrectly reset")
self.assertFalse(return_value.called, "return value mock not reset")
self.assertEqual(mock._mock_children, {'something': something},
"children reset incorrectly")
self.assertEqual(mock.something, something,
"children incorrectly cleared")
self.assertFalse(mock.something.called, "child not reset")
def test_reset_mock_recursion(self):
mock = Mock()
mock.return_value = mock
# used to cause recursion
mock.reset_mock()
def test_call(self):
mock = Mock()
self.assertTrue(is_instance(mock.return_value, Mock),
"Default return_value should be a Mock")
result = mock()
self.assertEqual(mock(), result,
"different result from consecutive calls")
mock.reset_mock()
ret_val = mock(sentinel.Arg)
self.assertTrue(mock.called, "called not set")
self.assertEqual(mock.call_count, 1, "call_count incoreect")
self.assertEqual(mock.call_args, ((sentinel.Arg,), {}),
"call_args not set")
self.assertEqual(mock.call_args_list, [((sentinel.Arg,), {})],
"call_args_list not initialised correctly")
mock.return_value = sentinel.ReturnValue
ret_val = mock(sentinel.Arg, key=sentinel.KeyArg)
self.assertEqual(ret_val, sentinel.ReturnValue,
"incorrect return value")
self.assertEqual(mock.call_count, 2, "call_count incorrect")
self.assertEqual(mock.call_args,
((sentinel.Arg,), {'key': sentinel.KeyArg}),
"call_args not set")
self.assertEqual(mock.call_args_list, [
((sentinel.Arg,), {}),
((sentinel.Arg,), {'key': sentinel.KeyArg})
],
"call_args_list not set")
def test_call_args_comparison(self):
mock = Mock()
mock()
mock(sentinel.Arg)
mock(kw=sentinel.Kwarg)
mock(sentinel.Arg, kw=sentinel.Kwarg)
self.assertEqual(mock.call_args_list, [
(),
((sentinel.Arg,),),
({"kw": sentinel.Kwarg},),
((sentinel.Arg,), {"kw": sentinel.Kwarg})
])
self.assertEqual(mock.call_args,
((sentinel.Arg,), {"kw": sentinel.Kwarg}))
def test_assert_called_with(self):
mock = Mock()
mock()
# Will raise an exception if it fails
mock.assert_called_with()
self.assertRaises(AssertionError, mock.assert_called_with, 1)
mock.reset_mock()
self.assertRaises(AssertionError, mock.assert_called_with)
mock(1, 2, 3, a='fish', b='nothing')
mock.assert_called_with(1, 2, 3, a='fish', b='nothing')
def test_assert_called_with_function_spec(self):
def f(a, b, c, d=None):
pass
mock = Mock(spec=f)
mock(1, b=2, c=3)
mock.assert_called_with(1, 2, 3)
mock.assert_called_with(a=1, b=2, c=3)
self.assertRaises(AssertionError, mock.assert_called_with,
1, b=3, c=2)
# Expected call doesn't match the spec's signature
with self.assertRaises(AssertionError) as cm:
mock.assert_called_with(e=8)
self.assertIsInstance(cm.exception.__cause__, TypeError)
def test_assert_called_with_method_spec(self):
def _check(mock):
mock(1, b=2, c=3)
mock.assert_called_with(1, 2, 3)
mock.assert_called_with(a=1, b=2, c=3)
self.assertRaises(AssertionError, mock.assert_called_with,
1, b=3, c=2)
mock = Mock(spec=Something().meth)
_check(mock)
mock = Mock(spec=Something.cmeth)
_check(mock)
mock = Mock(spec=Something().cmeth)
_check(mock)
mock = Mock(spec=Something.smeth)
_check(mock)
mock = Mock(spec=Something().smeth)
_check(mock)
def test_assert_called_once_with(self):
mock = Mock()
mock()
# Will raise an exception if it fails
mock.assert_called_once_with()
mock()
self.assertRaises(AssertionError, mock.assert_called_once_with)
mock.reset_mock()
self.assertRaises(AssertionError, mock.assert_called_once_with)
mock('foo', 'bar', baz=2)
mock.assert_called_once_with('foo', 'bar', baz=2)
mock.reset_mock()
mock('foo', 'bar', baz=2)
self.assertRaises(
AssertionError,
lambda: mock.assert_called_once_with('bob', 'bar', baz=2)
)
def test_assert_called_once_with_function_spec(self):
def f(a, b, c, d=None):
pass
mock = Mock(spec=f)
mock(1, b=2, c=3)
mock.assert_called_once_with(1, 2, 3)
mock.assert_called_once_with(a=1, b=2, c=3)
self.assertRaises(AssertionError, mock.assert_called_once_with,
1, b=3, c=2)
# Expected call doesn't match the spec's signature
with self.assertRaises(AssertionError) as cm:
mock.assert_called_once_with(e=8)
self.assertIsInstance(cm.exception.__cause__, TypeError)
# Mock called more than once => always fails
mock(4, 5, 6)
self.assertRaises(AssertionError, mock.assert_called_once_with,
1, 2, 3)
self.assertRaises(AssertionError, mock.assert_called_once_with,
4, 5, 6)
def test_attribute_access_returns_mocks(self):
mock = Mock()
something = mock.something
self.assertTrue(is_instance(something, Mock), "attribute isn't a mock")
self.assertEqual(mock.something, something,
"different attributes returned for same name")
# Usage example
mock = Mock()
mock.something.return_value = 3
self.assertEqual(mock.something(), 3, "method returned wrong value")
self.assertTrue(mock.something.called,
"method didn't record being called")
def test_attributes_have_name_and_parent_set(self):
mock = Mock()
something = mock.something
self.assertEqual(something._mock_name, "something",
"attribute name not set correctly")
self.assertEqual(something._mock_parent, mock,
"attribute parent not set correctly")
def test_method_calls_recorded(self):
mock = Mock()
mock.something(3, fish=None)
mock.something_else.something(6, cake=sentinel.Cake)
self.assertEqual(mock.something_else.method_calls,
[("something", (6,), {'cake': sentinel.Cake})],
"method calls not recorded correctly")
self.assertEqual(mock.method_calls, [
("something", (3,), {'fish': None}),
("something_else.something", (6,), {'cake': sentinel.Cake})
],
"method calls not recorded correctly")
def test_method_calls_compare_easily(self):
mock = Mock()
mock.something()
self.assertEqual(mock.method_calls, [('something',)])
self.assertEqual(mock.method_calls, [('something', (), {})])
mock = Mock()
mock.something('different')
self.assertEqual(mock.method_calls, [('something', ('different',))])
self.assertEqual(mock.method_calls,
[('something', ('different',), {})])
mock = Mock()
mock.something(x=1)
self.assertEqual(mock.method_calls, [('something', {'x': 1})])
self.assertEqual(mock.method_calls, [('something', (), {'x': 1})])
mock = Mock()
mock.something('different', some='more')
self.assertEqual(mock.method_calls, [
('something', ('different',), {'some': 'more'})
])
def test_only_allowed_methods_exist(self):
for spec in ['something'], ('something',):
for arg in 'spec', 'spec_set':
mock = Mock(**{arg: spec})
# this should be allowed
mock.something
self.assertRaisesRegex(
AttributeError,
"Mock object has no attribute 'something_else'",
getattr, mock, 'something_else'
)
def test_from_spec(self):
class Something(object):
x = 3
__something__ = None
def y(self):
pass
def test_attributes(mock):
# should work
mock.x
mock.y
mock.__something__
self.assertRaisesRegex(
AttributeError,
"Mock object has no attribute 'z'",
getattr, mock, 'z'
)
self.assertRaisesRegex(
AttributeError,
"Mock object has no attribute '__foobar__'",
getattr, mock, '__foobar__'
)
test_attributes(Mock(spec=Something))
test_attributes(Mock(spec=Something()))
def test_wraps_calls(self):
real = Mock()
mock = Mock(wraps=real)
self.assertEqual(mock(), real())
real.reset_mock()
mock(1, 2, fish=3)
real.assert_called_with(1, 2, fish=3)
def test_wraps_call_with_nondefault_return_value(self):
real = Mock()
mock = Mock(wraps=real)
mock.return_value = 3
self.assertEqual(mock(), 3)
self.assertFalse(real.called)
def test_wraps_attributes(self):
class Real(object):
attribute = Mock()
real = Real()
mock = Mock(wraps=real)
self.assertEqual(mock.attribute(), real.attribute())
self.assertRaises(AttributeError, lambda: mock.fish)
self.assertNotEqual(mock.attribute, real.attribute)
result = mock.attribute.frog(1, 2, fish=3)
Real.attribute.frog.assert_called_with(1, 2, fish=3)
self.assertEqual(result, Real.attribute.frog())
def test_exceptional_side_effect(self):
mock = Mock(side_effect=AttributeError)
self.assertRaises(AttributeError, mock)
mock = Mock(side_effect=AttributeError('foo'))
self.assertRaises(AttributeError, mock)
def test_baseexceptional_side_effect(self):
mock = Mock(side_effect=KeyboardInterrupt)
self.assertRaises(KeyboardInterrupt, mock)
mock = Mock(side_effect=KeyboardInterrupt('foo'))
self.assertRaises(KeyboardInterrupt, mock)
def test_assert_called_with_message(self):
mock = Mock()
self.assertRaisesRegex(AssertionError, 'Not called',
mock.assert_called_with)
def test_assert_called_once_with_message(self):
mock = Mock(name='geoffrey')
self.assertRaisesRegex(AssertionError,
r"Expected 'geoffrey' to be called once\.",
mock.assert_called_once_with)
def test__name__(self):
mock = Mock()
self.assertRaises(AttributeError, lambda: mock.__name__)
mock.__name__ = 'foo'
self.assertEqual(mock.__name__, 'foo')
def test_spec_list_subclass(self):
class Sub(list):
pass
mock = Mock(spec=Sub(['foo']))
mock.append(3)
mock.append.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock, 'foo')
def test_spec_class(self):
class X(object):
pass
mock = Mock(spec=X)
self.assertIsInstance(mock, X)
mock = Mock(spec=X())
self.assertIsInstance(mock, X)
self.assertIs(mock.__class__, X)
self.assertEqual(Mock().__class__.__name__, 'Mock')
mock = Mock(spec_set=X)
self.assertIsInstance(mock, X)
mock = Mock(spec_set=X())
self.assertIsInstance(mock, X)
def test_setting_attribute_with_spec_set(self):
class X(object):
y = 3
mock = Mock(spec=X)
mock.x = 'foo'
mock = Mock(spec_set=X)
def set_attr():
mock.x = 'foo'
mock.y = 'foo'
self.assertRaises(AttributeError, set_attr)
def test_copy(self):
current = sys.getrecursionlimit()
self.addCleanup(sys.setrecursionlimit, current)
# can't use sys.maxint as this doesn't exist in Python 3
sys.setrecursionlimit(int(10e8))
# this segfaults without the fix in place
copy.copy(Mock())
def test_subclass_with_properties(self):
class SubClass(Mock):
def _get(self):
return 3
def _set(self, value):
raise NameError('strange error')
some_attribute = property(_get, _set)
s = SubClass(spec_set=SubClass)
self.assertEqual(s.some_attribute, 3)
def test():
s.some_attribute = 3
self.assertRaises(NameError, test)
def test():
s.foo = 'bar'
self.assertRaises(AttributeError, test)
def test_setting_call(self):
mock = Mock()
def __call__(self, a):
return self._mock_call(a)
type(mock).__call__ = __call__
mock('one')
mock.assert_called_with('one')
self.assertRaises(TypeError, mock, 'one', 'two')
def test_dir(self):
mock = Mock()
attrs = set(dir(mock))
type_attrs = set([m for m in dir(Mock) if not m.startswith('_')])
# all public attributes from the type are included
self.assertEqual(set(), type_attrs - attrs)
# creates these attributes
mock.a, mock.b
self.assertIn('a', dir(mock))
self.assertIn('b', dir(mock))
# instance attributes
mock.c = mock.d = None
self.assertIn('c', dir(mock))
self.assertIn('d', dir(mock))
# magic methods
mock.__iter__ = lambda s: iter([])
self.assertIn('__iter__', dir(mock))
def test_dir_from_spec(self):
mock = Mock(spec=unittest.TestCase)
testcase_attrs = set(dir(unittest.TestCase))
attrs = set(dir(mock))
# all attributes from the spec are included
self.assertEqual(set(), testcase_attrs - attrs)
# shadow a sys attribute
mock.version = 3
self.assertEqual(dir(mock).count('version'), 1)
def test_filter_dir(self):
patcher = patch.object(mock, 'FILTER_DIR', False)
patcher.start()
try:
attrs = set(dir(Mock()))
type_attrs = set(dir(Mock))
# ALL attributes from the type are included
self.assertEqual(set(), type_attrs - attrs)
finally:
patcher.stop()
def test_configure_mock(self):
mock = Mock(foo='bar')
self.assertEqual(mock.foo, 'bar')
mock = MagicMock(foo='bar')
self.assertEqual(mock.foo, 'bar')
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
mock = Mock(**kwargs)
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
mock = Mock()
mock.configure_mock(**kwargs)
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def assertRaisesWithMsg(self, exception, message, func, *args, **kwargs):
# needed because assertRaisesRegex doesn't work easily with newlines
try:
func(*args, **kwargs)
except:
instance = sys.exc_info()[1]
self.assertIsInstance(instance, exception)
else:
self.fail('Exception %r not raised' % (exception,))
msg = str(instance)
self.assertEqual(msg, message)
def test_assert_called_with_failure_message(self):
mock = NonCallableMock()
expected = "mock(1, '2', 3, bar='foo')"
message = 'Expected call: %s\nNot called'
self.assertRaisesWithMsg(
AssertionError, message % (expected,),
mock.assert_called_with, 1, '2', 3, bar='foo'
)
mock.foo(1, '2', 3, foo='foo')
asserters = [
mock.foo.assert_called_with, mock.foo.assert_called_once_with
]
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo(1, '2', 3, bar='foo')"
message = 'Expected call: %s\nActual call: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
meth, 1, '2', 3, bar='foo'
)
# just kwargs
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo(bar='foo')"
message = 'Expected call: %s\nActual call: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
meth, bar='foo'
)
# just args
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo(1, 2, 3)"
message = 'Expected call: %s\nActual call: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
meth, 1, 2, 3
)
# empty
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo()"
message = 'Expected call: %s\nActual call: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual), meth
)
def test_mock_calls(self):
mock = MagicMock()
# need to do this because MagicMock.mock_calls used to just return
# a MagicMock which also returned a MagicMock when __eq__ was called
self.assertIs(mock.mock_calls == [], True)
mock = MagicMock()
mock()
expected = [('', (), {})]
self.assertEqual(mock.mock_calls, expected)
mock.foo()
expected.append(call.foo())
self.assertEqual(mock.mock_calls, expected)
# intermediate mock_calls work too
self.assertEqual(mock.foo.mock_calls, [('', (), {})])
mock = MagicMock()
mock().foo(1, 2, 3, a=4, b=5)
expected = [
('', (), {}), ('().foo', (1, 2, 3), dict(a=4, b=5))
]
self.assertEqual(mock.mock_calls, expected)
self.assertEqual(mock.return_value.foo.mock_calls,
[('', (1, 2, 3), dict(a=4, b=5))])
self.assertEqual(mock.return_value.mock_calls,
[('foo', (1, 2, 3), dict(a=4, b=5))])
mock = MagicMock()
mock().foo.bar().baz()
expected = [
('', (), {}), ('().foo.bar', (), {}),
('().foo.bar().baz', (), {})
]
self.assertEqual(mock.mock_calls, expected)
self.assertEqual(mock().mock_calls,
call.foo.bar().baz().call_list())
for kwargs in dict(), dict(name='bar'):
mock = MagicMock(**kwargs)
int(mock.foo)
expected = [('foo.__int__', (), {})]
self.assertEqual(mock.mock_calls, expected)
mock = MagicMock(**kwargs)
mock.a()()
expected = [('a', (), {}), ('a()', (), {})]
self.assertEqual(mock.mock_calls, expected)
self.assertEqual(mock.a().mock_calls, [call()])
mock = MagicMock(**kwargs)
mock(1)(2)(3)
self.assertEqual(mock.mock_calls, call(1)(2)(3).call_list())
self.assertEqual(mock().mock_calls, call(2)(3).call_list())
self.assertEqual(mock()().mock_calls, call(3).call_list())
mock = MagicMock(**kwargs)
mock(1)(2)(3).a.b.c(4)
self.assertEqual(mock.mock_calls,
call(1)(2)(3).a.b.c(4).call_list())
self.assertEqual(mock().mock_calls,
call(2)(3).a.b.c(4).call_list())
self.assertEqual(mock()().mock_calls,
call(3).a.b.c(4).call_list())
mock = MagicMock(**kwargs)
int(mock().foo.bar().baz())
last_call = ('().foo.bar().baz().__int__', (), {})
self.assertEqual(mock.mock_calls[-1], last_call)
self.assertEqual(mock().mock_calls,
call.foo.bar().baz().__int__().call_list())
self.assertEqual(mock().foo.bar().mock_calls,
call.baz().__int__().call_list())
self.assertEqual(mock().foo.bar().baz.mock_calls,
call().__int__().call_list())
def test_subclassing(self):
class Subclass(Mock):
pass
mock = Subclass()
self.assertIsInstance(mock.foo, Subclass)
self.assertIsInstance(mock(), Subclass)
class Subclass(Mock):
def _get_child_mock(self, **kwargs):
return Mock(**kwargs)
mock = Subclass()
self.assertNotIsInstance(mock.foo, Subclass)
self.assertNotIsInstance(mock(), Subclass)
def test_arg_lists(self):
mocks = [
Mock(),
MagicMock(),
NonCallableMock(),
NonCallableMagicMock()
]
def assert_attrs(mock):
names = 'call_args_list', 'method_calls', 'mock_calls'
for name in names:
attr = getattr(mock, name)
self.assertIsInstance(attr, _CallList)
self.assertIsInstance(attr, list)
self.assertEqual(attr, [])
for mock in mocks:
assert_attrs(mock)
if callable(mock):
mock()
mock(1, 2)
mock(a=3)
mock.reset_mock()
assert_attrs(mock)
mock.foo()
mock.foo.bar(1, a=3)
mock.foo(1).bar().baz(3)
mock.reset_mock()
assert_attrs(mock)
def test_call_args_two_tuple(self):
mock = Mock()
mock(1, a=3)
mock(2, b=4)
self.assertEqual(len(mock.call_args), 2)
args, kwargs = mock.call_args
self.assertEqual(args, (2,))
self.assertEqual(kwargs, dict(b=4))
expected_list = [((1,), dict(a=3)), ((2,), dict(b=4))]
for expected, call_args in zip(expected_list, mock.call_args_list):
self.assertEqual(len(call_args), 2)
self.assertEqual(expected[0], call_args[0])
self.assertEqual(expected[1], call_args[1])
def test_side_effect_iterator(self):
mock = Mock(side_effect=iter([1, 2, 3]))
self.assertEqual([mock(), mock(), mock()], [1, 2, 3])
self.assertRaises(StopIteration, mock)
mock = MagicMock(side_effect=['a', 'b', 'c'])
self.assertEqual([mock(), mock(), mock()], ['a', 'b', 'c'])
self.assertRaises(StopIteration, mock)
mock = Mock(side_effect='ghi')
self.assertEqual([mock(), mock(), mock()], ['g', 'h', 'i'])
self.assertRaises(StopIteration, mock)
class Foo(object):
pass
mock = MagicMock(side_effect=Foo)
self.assertIsInstance(mock(), Foo)
mock = Mock(side_effect=Iter())
self.assertEqual([mock(), mock(), mock(), mock()],
['this', 'is', 'an', 'iter'])
self.assertRaises(StopIteration, mock)
def test_side_effect_iterator_exceptions(self):
for Klass in Mock, MagicMock:
iterable = (ValueError, 3, KeyError, 6)
m = Klass(side_effect=iterable)
self.assertRaises(ValueError, m)
self.assertEqual(m(), 3)
self.assertRaises(KeyError, m)
self.assertEqual(m(), 6)
def test_side_effect_setting_iterator(self):
mock = Mock()
mock.side_effect = iter([1, 2, 3])
self.assertEqual([mock(), mock(), mock()], [1, 2, 3])
self.assertRaises(StopIteration, mock)
side_effect = mock.side_effect
self.assertIsInstance(side_effect, type(iter([])))
mock.side_effect = ['a', 'b', 'c']
self.assertEqual([mock(), mock(), mock()], ['a', 'b', 'c'])
self.assertRaises(StopIteration, mock)
side_effect = mock.side_effect
self.assertIsInstance(side_effect, type(iter([])))
this_iter = Iter()
mock.side_effect = this_iter
self.assertEqual([mock(), mock(), mock(), mock()],
['this', 'is', 'an', 'iter'])
self.assertRaises(StopIteration, mock)
self.assertIs(mock.side_effect, this_iter)
def test_side_effect_iterator_default(self):
mock = Mock(return_value=2)
mock.side_effect = iter([1, DEFAULT])
self.assertEqual([mock(), mock()], [1, 2])
def test_assert_has_calls_any_order(self):
mock = Mock()
mock(1, 2)
mock(a=3)
mock(3, 4)
mock(b=6)
mock(b=6)
kalls = [
call(1, 2), ({'a': 3},),
((3, 4),), ((), {'a': 3}),
('', (1, 2)), ('', {'a': 3}),
('', (1, 2), {}), ('', (), {'a': 3})
]
for kall in kalls:
mock.assert_has_calls([kall], any_order=True)
for kall in call(1, '2'), call(b=3), call(), 3, None, 'foo':
self.assertRaises(
AssertionError, mock.assert_has_calls,
[kall], any_order=True
)
kall_lists = [
[call(1, 2), call(b=6)],
[call(3, 4), call(1, 2)],
[call(b=6), call(b=6)],
]
for kall_list in kall_lists:
mock.assert_has_calls(kall_list, any_order=True)
kall_lists = [
[call(b=6), call(b=6), call(b=6)],
[call(1, 2), call(1, 2)],
[call(3, 4), call(1, 2), call(5, 7)],
[call(b=6), call(3, 4), call(b=6), call(1, 2), call(b=6)],
]
for kall_list in kall_lists:
self.assertRaises(
AssertionError, mock.assert_has_calls,
kall_list, any_order=True
)
def test_assert_has_calls(self):
kalls1 = [
call(1, 2), ({'a': 3},),
((3, 4),), call(b=6),
('', (1,), {'b': 6}),
]
kalls2 = [call.foo(), call.bar(1)]
kalls2.extend(call.spam().baz(a=3).call_list())
kalls2.extend(call.bam(set(), foo={}).fish([1]).call_list())
mocks = []
for mock in Mock(), MagicMock():
mock(1, 2)
mock(a=3)
mock(3, 4)
mock(b=6)
mock(1, b=6)
mocks.append((mock, kalls1))
mock = Mock()
mock.foo()
mock.bar(1)
mock.spam().baz(a=3)
mock.bam(set(), foo={}).fish([1])
mocks.append((mock, kalls2))
for mock, kalls in mocks:
for i in range(len(kalls)):
for step in 1, 2, 3:
these = kalls[i:i+step]
mock.assert_has_calls(these)
if len(these) > 1:
self.assertRaises(
AssertionError,
mock.assert_has_calls,
list(reversed(these))
)
def test_assert_has_calls_with_function_spec(self):
def f(a, b, c, d=None):
pass
mock = Mock(spec=f)
mock(1, b=2, c=3)
mock(4, 5, c=6, d=7)
mock(10, 11, c=12)
calls = [
('', (1, 2, 3), {}),
('', (4, 5, 6), {'d': 7}),
((10, 11, 12), {}),
]
mock.assert_has_calls(calls)
mock.assert_has_calls(calls, any_order=True)
mock.assert_has_calls(calls[1:])
mock.assert_has_calls(calls[1:], any_order=True)
mock.assert_has_calls(calls[:-1])
mock.assert_has_calls(calls[:-1], any_order=True)
# Reversed order
calls = list(reversed(calls))
with self.assertRaises(AssertionError):
mock.assert_has_calls(calls)
mock.assert_has_calls(calls, any_order=True)
with self.assertRaises(AssertionError):
mock.assert_has_calls(calls[1:])
mock.assert_has_calls(calls[1:], any_order=True)
with self.assertRaises(AssertionError):
mock.assert_has_calls(calls[:-1])
mock.assert_has_calls(calls[:-1], any_order=True)
def test_assert_any_call(self):
mock = Mock()
mock(1, 2)
mock(a=3)
mock(1, b=6)
mock.assert_any_call(1, 2)
mock.assert_any_call(a=3)
mock.assert_any_call(1, b=6)
self.assertRaises(
AssertionError,
mock.assert_any_call
)
self.assertRaises(
AssertionError,
mock.assert_any_call,
1, 3
)
self.assertRaises(
AssertionError,
mock.assert_any_call,
a=4
)
def test_assert_any_call_with_function_spec(self):
def f(a, b, c, d=None):
pass
mock = Mock(spec=f)
mock(1, b=2, c=3)
mock(4, 5, c=6, d=7)
mock.assert_any_call(1, 2, 3)
mock.assert_any_call(a=1, b=2, c=3)
mock.assert_any_call(4, 5, 6, 7)
mock.assert_any_call(a=4, b=5, c=6, d=7)
self.assertRaises(AssertionError, mock.assert_any_call,
1, b=3, c=2)
# Expected call doesn't match the spec's signature
with self.assertRaises(AssertionError) as cm:
mock.assert_any_call(e=8)
self.assertIsInstance(cm.exception.__cause__, TypeError)
def test_mock_calls_create_autospec(self):
def f(a, b):
pass
obj = Iter()
obj.f = f
funcs = [
create_autospec(f),
create_autospec(obj).f
]
for func in funcs:
func(1, 2)
func(3, 4)
self.assertEqual(
func.mock_calls, [call(1, 2), call(3, 4)]
)
#Issue21222
def test_create_autospec_with_name(self):
m = mock.create_autospec(object(), name='sweet_func')
self.assertIn('sweet_func', repr(m))
def test_mock_add_spec(self):
class _One(object):
one = 1
class _Two(object):
two = 2
class Anything(object):
one = two = three = 'four'
klasses = [
Mock, MagicMock, NonCallableMock, NonCallableMagicMock
]
for Klass in list(klasses):
klasses.append(lambda K=Klass: K(spec=Anything))
klasses.append(lambda K=Klass: K(spec_set=Anything))
for Klass in klasses:
for kwargs in dict(), dict(spec_set=True):
mock = Klass()
#no error
mock.one, mock.two, mock.three
for One, Two in [(_One, _Two), (['one'], ['two'])]:
for kwargs in dict(), dict(spec_set=True):
mock.mock_add_spec(One, **kwargs)
mock.one
self.assertRaises(
AttributeError, getattr, mock, 'two'
)
self.assertRaises(
AttributeError, getattr, mock, 'three'
)
if 'spec_set' in kwargs:
self.assertRaises(
AttributeError, setattr, mock, 'three', None
)
mock.mock_add_spec(Two, **kwargs)
self.assertRaises(
AttributeError, getattr, mock, 'one'
)
mock.two
self.assertRaises(
AttributeError, getattr, mock, 'three'
)
if 'spec_set' in kwargs:
self.assertRaises(
AttributeError, setattr, mock, 'three', None
)
# note that creating a mock, setting an instance attribute, and
# *then* setting a spec doesn't work. Not the intended use case
def test_mock_add_spec_magic_methods(self):
for Klass in MagicMock, NonCallableMagicMock:
mock = Klass()
int(mock)
mock.mock_add_spec(object)
self.assertRaises(TypeError, int, mock)
mock = Klass()
mock['foo']
mock.__int__.return_value =4
mock.mock_add_spec(int)
self.assertEqual(int(mock), 4)
self.assertRaises(TypeError, lambda: mock['foo'])
def test_adding_child_mock(self):
for Klass in NonCallableMock, Mock, MagicMock, NonCallableMagicMock:
mock = Klass()
mock.foo = Mock()
mock.foo()
self.assertEqual(mock.method_calls, [call.foo()])
self.assertEqual(mock.mock_calls, [call.foo()])
mock = Klass()
mock.bar = Mock(name='name')
mock.bar()
self.assertEqual(mock.method_calls, [])
self.assertEqual(mock.mock_calls, [])
# mock with an existing _new_parent but no name
mock = Klass()
mock.baz = MagicMock()()
mock.baz()
self.assertEqual(mock.method_calls, [])
self.assertEqual(mock.mock_calls, [])
def test_adding_return_value_mock(self):
for Klass in Mock, MagicMock:
mock = Klass()
mock.return_value = MagicMock()
mock()()
self.assertEqual(mock.mock_calls, [call(), call()()])
def test_manager_mock(self):
class Foo(object):
one = 'one'
two = 'two'
manager = Mock()
p1 = patch.object(Foo, 'one')
p2 = patch.object(Foo, 'two')
mock_one = p1.start()
self.addCleanup(p1.stop)
mock_two = p2.start()
self.addCleanup(p2.stop)
manager.attach_mock(mock_one, 'one')
manager.attach_mock(mock_two, 'two')
Foo.two()
Foo.one()
self.assertEqual(manager.mock_calls, [call.two(), call.one()])
def test_magic_methods_mock_calls(self):
for Klass in Mock, MagicMock:
m = Klass()
m.__int__ = Mock(return_value=3)
m.__float__ = MagicMock(return_value=3.0)
int(m)
float(m)
self.assertEqual(m.mock_calls, [call.__int__(), call.__float__()])
self.assertEqual(m.method_calls, [])
def test_mock_parents(self):
for Klass in Mock, MagicMock:
m = Klass()
original_repr = repr(m)
m.return_value = m
self.assertIs(m(), m)
self.assertEqual(repr(m), original_repr)
m.reset_mock()
self.assertIs(m(), m)
self.assertEqual(repr(m), original_repr)
m = Klass()
m.b = m.a
self.assertIn("name='mock.a'", repr(m.b))
self.assertIn("name='mock.a'", repr(m.a))
m.reset_mock()
self.assertIn("name='mock.a'", repr(m.b))
self.assertIn("name='mock.a'", repr(m.a))
m = Klass()
original_repr = repr(m)
m.a = m()
m.a.return_value = m
self.assertEqual(repr(m), original_repr)
self.assertEqual(repr(m.a()), original_repr)
def test_attach_mock(self):
classes = Mock, MagicMock, NonCallableMagicMock, NonCallableMock
for Klass in classes:
for Klass2 in classes:
m = Klass()
m2 = Klass2(name='foo')
m.attach_mock(m2, 'bar')
self.assertIs(m.bar, m2)
self.assertIn("name='mock.bar'", repr(m2))
m.bar.baz(1)
self.assertEqual(m.mock_calls, [call.bar.baz(1)])
self.assertEqual(m.method_calls, [call.bar.baz(1)])
def test_attach_mock_return_value(self):
classes = Mock, MagicMock, NonCallableMagicMock, NonCallableMock
for Klass in Mock, MagicMock:
for Klass2 in classes:
m = Klass()
m2 = Klass2(name='foo')
m.attach_mock(m2, 'return_value')
self.assertIs(m(), m2)
self.assertIn("name='mock()'", repr(m2))
m2.foo()
self.assertEqual(m.mock_calls, call().foo().call_list())
def test_attribute_deletion(self):
for mock in (Mock(), MagicMock(), NonCallableMagicMock(),
NonCallableMock()):
self.assertTrue(hasattr(mock, 'm'))
del mock.m
self.assertFalse(hasattr(mock, 'm'))
del mock.f
self.assertFalse(hasattr(mock, 'f'))
self.assertRaises(AttributeError, getattr, mock, 'f')
def test_class_assignable(self):
for mock in Mock(), MagicMock():
self.assertNotIsInstance(mock, int)
mock.__class__ = int
self.assertIsInstance(mock, int)
mock.foo
if __name__ == '__main__':
unittest.main()
|
hikelee/projector | refs/heads/master | admin/launcher/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
dreamsxin/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/test/test_codeop.py | 118 | """
Test cases for codeop.py
Nick Mathewson
"""
import unittest
from test.support import run_unittest, is_jython
from codeop import compile_command, PyCF_DONT_IMPLY_DEDENT
import io
if is_jython:
import sys
def unify_callables(d):
for n,v in d.items():
if hasattr(v, '__call__'):
d[n] = True
return d
class CodeopTests(unittest.TestCase):
def assertValid(self, str, symbol='single'):
'''succeed iff str is a valid piece of code'''
if is_jython:
code = compile_command(str, "<input>", symbol)
self.assertTrue(code)
if symbol == "single":
d,r = {},{}
saved_stdout = sys.stdout
sys.stdout = io.StringIO()
try:
exec(code, d)
exec(compile(str,"<input>","single"), r)
finally:
sys.stdout = saved_stdout
elif symbol == 'eval':
ctx = {'a': 2}
d = { 'value': eval(code,ctx) }
r = { 'value': eval(str,ctx) }
self.assertEqual(unify_callables(r),unify_callables(d))
else:
expected = compile(str, "<input>", symbol, PyCF_DONT_IMPLY_DEDENT)
self.assertEqual(compile_command(str, "<input>", symbol), expected)
def assertIncomplete(self, str, symbol='single'):
'''succeed iff str is the start of a valid piece of code'''
self.assertEqual(compile_command(str, symbol=symbol), None)
def assertInvalid(self, str, symbol='single', is_syntax=1):
'''succeed iff str is the start of an invalid piece of code'''
try:
compile_command(str,symbol=symbol)
self.fail("No exception raised for invalid code")
except SyntaxError:
self.assertTrue(is_syntax)
except OverflowError:
self.assertTrue(not is_syntax)
def test_valid(self):
av = self.assertValid
# special case
if not is_jython:
self.assertEqual(compile_command(""),
compile("pass", "<input>", 'single',
PyCF_DONT_IMPLY_DEDENT))
self.assertEqual(compile_command("\n"),
compile("pass", "<input>", 'single',
PyCF_DONT_IMPLY_DEDENT))
else:
av("")
av("\n")
av("a = 1")
av("\na = 1")
av("a = 1\n")
av("a = 1\n\n")
av("\n\na = 1\n\n")
av("def x():\n pass\n")
av("if 1:\n pass\n")
av("\n\nif 1: pass\n")
av("\n\nif 1: pass\n\n")
av("def x():\n\n pass\n")
av("def x():\n pass\n \n")
av("def x():\n pass\n \n")
av("pass\n")
av("3**3\n")
av("if 9==3:\n pass\nelse:\n pass\n")
av("if 1:\n pass\n if 1:\n pass\n else:\n pass\n")
av("#a\n#b\na = 3\n")
av("#a\n\n \na=3\n")
av("a=3\n\n")
av("a = 9+ \\\n3")
av("3**3","eval")
av("(lambda z: \n z**3)","eval")
av("9+ \\\n3","eval")
av("9+ \\\n3\n","eval")
av("\n\na**3","eval")
av("\n \na**3","eval")
av("#a\n#b\na**3","eval")
av("\n\na = 1\n\n")
av("\n\nif 1: a=1\n\n")
av("if 1:\n pass\n if 1:\n pass\n else:\n pass\n")
av("#a\n\n \na=3\n\n")
av("\n\na**3","eval")
av("\n \na**3","eval")
av("#a\n#b\na**3","eval")
av("def f():\n try: pass\n finally: [x for x in (1,2)]\n")
av("def f():\n pass\n#foo\n")
av("@a.b.c\ndef f():\n pass\n")
def test_incomplete(self):
ai = self.assertIncomplete
ai("(a **")
ai("(a,b,")
ai("(a,b,(")
ai("(a,b,(")
ai("a = (")
ai("a = {")
ai("b + {")
ai("if 9==3:\n pass\nelse:")
ai("if 9==3:\n pass\nelse:\n")
ai("if 9==3:\n pass\nelse:\n pass")
ai("if 1:")
ai("if 1:\n")
ai("if 1:\n pass\n if 1:\n pass\n else:")
ai("if 1:\n pass\n if 1:\n pass\n else:\n")
ai("if 1:\n pass\n if 1:\n pass\n else:\n pass")
ai("def x():")
ai("def x():\n")
ai("def x():\n\n")
ai("def x():\n pass")
ai("def x():\n pass\n ")
ai("def x():\n pass\n ")
ai("\n\ndef x():\n pass")
ai("a = 9+ \\")
ai("a = 'a\\")
ai("a = '''xy")
ai("","eval")
ai("\n","eval")
ai("(","eval")
ai("(\n\n\n","eval")
ai("(9+","eval")
ai("9+ \\","eval")
ai("lambda z: \\","eval")
ai("if True:\n if True:\n if True: \n")
ai("@a(")
ai("@a(b")
ai("@a(b,")
ai("@a(b,c")
ai("@a(b,c,")
ai("from a import (")
ai("from a import (b")
ai("from a import (b,")
ai("from a import (b,c")
ai("from a import (b,c,")
ai("[");
ai("[a");
ai("[a,");
ai("[a,b");
ai("[a,b,");
ai("{");
ai("{a");
ai("{a:");
ai("{a:b");
ai("{a:b,");
ai("{a:b,c");
ai("{a:b,c:");
ai("{a:b,c:d");
ai("{a:b,c:d,");
ai("a(")
ai("a(b")
ai("a(b,")
ai("a(b,c")
ai("a(b,c,")
ai("a[")
ai("a[b")
ai("a[b,")
ai("a[b:")
ai("a[b:c")
ai("a[b:c:")
ai("a[b:c:d")
ai("def a(")
ai("def a(b")
ai("def a(b,")
ai("def a(b,c")
ai("def a(b,c,")
ai("(")
ai("(a")
ai("(a,")
ai("(a,b")
ai("(a,b,")
ai("if a:\n pass\nelif b:")
ai("if a:\n pass\nelif b:\n pass\nelse:")
ai("while a:")
ai("while a:\n pass\nelse:")
ai("for a in b:")
ai("for a in b:\n pass\nelse:")
ai("try:")
ai("try:\n pass\nexcept:")
ai("try:\n pass\nfinally:")
ai("try:\n pass\nexcept:\n pass\nfinally:")
ai("with a:")
ai("with a as b:")
ai("class a:")
ai("class a(")
ai("class a(b")
ai("class a(b,")
ai("class a():")
ai("[x for")
ai("[x for x in")
ai("[x for x in (")
ai("(x for")
ai("(x for x in")
ai("(x for x in (")
def test_invalid(self):
ai = self.assertInvalid
ai("a b")
ai("a @")
ai("a b @")
ai("a ** @")
ai("a = ")
ai("a = 9 +")
ai("def x():\n\npass\n")
ai("\n\n if 1: pass\n\npass")
ai("a = 9+ \\\n")
ai("a = 'a\\ ")
ai("a = 'a\\\n")
ai("a = 1","eval")
ai("a = (","eval")
ai("]","eval")
ai("())","eval")
ai("[}","eval")
ai("9+","eval")
ai("lambda z:","eval")
ai("a b","eval")
ai("return 2.3")
ai("if (a == 1 and b = 2): pass")
ai("del 1")
ai("del ()")
ai("del (1,)")
ai("del [1]")
ai("del '1'")
ai("[i for i in range(10)] = (1, 2, 3)")
def test_filename(self):
self.assertEqual(compile_command("a = 1\n", "abc").co_filename,
compile("a = 1\n", "abc", 'single').co_filename)
self.assertNotEqual(compile_command("a = 1\n", "abc").co_filename,
compile("a = 1\n", "def", 'single').co_filename)
def test_main():
run_unittest(CodeopTests)
if __name__ == "__main__":
test_main()
|
jeeftor/alfredToday | refs/heads/master | src/workflow/notify.py | 31 | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2015 deanishe@deanishe.net
#
# MIT Licence. See http://opensource.org/licenses/MIT
#
# Created on 2015-11-26
#
# TODO: Exclude this module from test and code coverage in py2.6
"""
Post notifications via the macOS Notification Center. This feature
is only available on Mountain Lion (10.8) and later. It will
silently fail on older systems.
The main API is a single function, :func:`~workflow.notify.notify`.
It works by copying a simple application to your workflow's data
directory. It replaces the application's icon with your workflow's
icon and then calls the application to post notifications.
"""
from __future__ import print_function, unicode_literals
import os
import plistlib
import shutil
import subprocess
import sys
import tarfile
import tempfile
import uuid
import workflow
_wf = None
_log = None
#: Available system sounds from System Preferences > Sound > Sound Effects
SOUNDS = (
'Basso',
'Blow',
'Bottle',
'Frog',
'Funk',
'Glass',
'Hero',
'Morse',
'Ping',
'Pop',
'Purr',
'Sosumi',
'Submarine',
'Tink',
)
def wf():
"""Return Workflow object for this module.
Returns:
workflow.Workflow: Workflow object for current workflow.
"""
global _wf
if _wf is None:
_wf = workflow.Workflow()
return _wf
def log():
"""Return logger for this module.
Returns:
logging.Logger: Logger for this module.
"""
global _log
if _log is None:
_log = wf().logger
return _log
def notifier_program():
"""Return path to notifier applet executable.
Returns:
unicode: Path to Notify.app ``applet`` executable.
"""
return wf().datafile('Notify.app/Contents/MacOS/applet')
def notifier_icon_path():
"""Return path to icon file in installed Notify.app.
Returns:
unicode: Path to ``applet.icns`` within the app bundle.
"""
return wf().datafile('Notify.app/Contents/Resources/applet.icns')
def install_notifier():
"""Extract ``Notify.app`` from the workflow to data directory.
Changes the bundle ID of the installed app and gives it the
workflow's icon.
"""
archive = os.path.join(os.path.dirname(__file__), 'Notify.tgz')
destdir = wf().datadir
app_path = os.path.join(destdir, 'Notify.app')
n = notifier_program()
log().debug('installing Notify.app to %r ...', destdir)
# z = zipfile.ZipFile(archive, 'r')
# z.extractall(destdir)
tgz = tarfile.open(archive, 'r:gz')
tgz.extractall(destdir)
assert os.path.exists(n), \
'Notify.app could not be installed in %s' % destdir
# Replace applet icon
icon = notifier_icon_path()
workflow_icon = wf().workflowfile('icon.png')
if os.path.exists(icon):
os.unlink(icon)
png_to_icns(workflow_icon, icon)
# Set file icon
# PyObjC isn't available for 2.6, so this is 2.7 only. Actually,
# none of this code will "work" on pre-10.8 systems. Let it run
# until I figure out a better way of excluding this module
# from coverage in py2.6.
if sys.version_info >= (2, 7): # pragma: no cover
from AppKit import NSWorkspace, NSImage
ws = NSWorkspace.sharedWorkspace()
img = NSImage.alloc().init()
img.initWithContentsOfFile_(icon)
ws.setIcon_forFile_options_(img, app_path, 0)
# Change bundle ID of installed app
ip_path = os.path.join(app_path, 'Contents/Info.plist')
bundle_id = '{0}.{1}'.format(wf().bundleid, uuid.uuid4().hex)
data = plistlib.readPlist(ip_path)
log().debug('changing bundle ID to %r', bundle_id)
data['CFBundleIdentifier'] = bundle_id
plistlib.writePlist(data, ip_path)
def validate_sound(sound):
"""Coerce ``sound`` to valid sound name.
Returns ``None`` for invalid sounds. Sound names can be found
in ``System Preferences > Sound > Sound Effects``.
Args:
sound (str): Name of system sound.
Returns:
str: Proper name of sound or ``None``.
"""
if not sound:
return None
# Case-insensitive comparison of `sound`
if sound.lower() in [s.lower() for s in SOUNDS]:
# Title-case is correct for all system sounds as of macOS 10.11
return sound.title()
return None
def notify(title='', text='', sound=None):
"""Post notification via Notify.app helper.
Args:
title (str, optional): Notification title.
text (str, optional): Notification body text.
sound (str, optional): Name of sound to play.
Raises:
ValueError: Raised if both ``title`` and ``text`` are empty.
Returns:
bool: ``True`` if notification was posted, else ``False``.
"""
if title == text == '':
raise ValueError('Empty notification')
sound = validate_sound(sound) or ''
n = notifier_program()
if not os.path.exists(n):
install_notifier()
env = os.environ.copy()
enc = 'utf-8'
env['NOTIFY_TITLE'] = title.encode(enc)
env['NOTIFY_MESSAGE'] = text.encode(enc)
env['NOTIFY_SOUND'] = sound.encode(enc)
cmd = [n]
retcode = subprocess.call(cmd, env=env)
if retcode == 0:
return True
log().error('Notify.app exited with status {0}.'.format(retcode))
return False
def convert_image(inpath, outpath, size):
"""Convert an image file using ``sips``.
Args:
inpath (str): Path of source file.
outpath (str): Path to destination file.
size (int): Width and height of destination image in pixels.
Raises:
RuntimeError: Raised if ``sips`` exits with non-zero status.
"""
cmd = [
b'sips',
b'-z', str(size), str(size),
inpath,
b'--out', outpath]
# log().debug(cmd)
with open(os.devnull, 'w') as pipe:
retcode = subprocess.call(cmd, stdout=pipe, stderr=subprocess.STDOUT)
if retcode != 0:
raise RuntimeError('sips exited with %d' % retcode)
def png_to_icns(png_path, icns_path):
"""Convert PNG file to ICNS using ``iconutil``.
Create an iconset from the source PNG file. Generate PNG files
in each size required by macOS, then call ``iconutil`` to turn
them into a single ICNS file.
Args:
png_path (str): Path to source PNG file.
icns_path (str): Path to destination ICNS file.
Raises:
RuntimeError: Raised if ``iconutil`` or ``sips`` fail.
"""
tempdir = tempfile.mkdtemp(prefix='aw-', dir=wf().datadir)
try:
iconset = os.path.join(tempdir, 'Icon.iconset')
assert not os.path.exists(iconset), \
'iconset already exists: ' + iconset
os.makedirs(iconset)
# Copy source icon to icon set and generate all the other
# sizes needed
configs = []
for i in (16, 32, 128, 256, 512):
configs.append(('icon_{0}x{0}.png'.format(i), i))
configs.append((('icon_{0}x{0}@2x.png'.format(i), i * 2)))
shutil.copy(png_path, os.path.join(iconset, 'icon_256x256.png'))
shutil.copy(png_path, os.path.join(iconset, 'icon_128x128@2x.png'))
for name, size in configs:
outpath = os.path.join(iconset, name)
if os.path.exists(outpath):
continue
convert_image(png_path, outpath, size)
cmd = [
b'iconutil',
b'-c', b'icns',
b'-o', icns_path,
iconset]
retcode = subprocess.call(cmd)
if retcode != 0:
raise RuntimeError('iconset exited with %d' % retcode)
assert os.path.exists(icns_path), \
'generated ICNS file not found: ' + repr(icns_path)
finally:
try:
shutil.rmtree(tempdir)
except OSError: # pragma: no cover
pass
if __name__ == '__main__': # pragma: nocover
# Simple command-line script to test module with
# This won't work on 2.6, as `argparse` isn't available
# by default.
import argparse
from unicodedata import normalize
def ustr(s):
"""Coerce `s` to normalised Unicode."""
return normalize('NFD', s.decode('utf-8'))
p = argparse.ArgumentParser()
p.add_argument('-p', '--png', help="PNG image to convert to ICNS.")
p.add_argument('-l', '--list-sounds', help="Show available sounds.",
action='store_true')
p.add_argument('-t', '--title',
help="Notification title.", type=ustr,
default='')
p.add_argument('-s', '--sound', type=ustr,
help="Optional notification sound.", default='')
p.add_argument('text', type=ustr,
help="Notification body text.", default='', nargs='?')
o = p.parse_args()
# List available sounds
if o.list_sounds:
for sound in SOUNDS:
print(sound)
sys.exit(0)
# Convert PNG to ICNS
if o.png:
icns = os.path.join(
os.path.dirname(o.png),
os.path.splitext(os.path.basename(o.png))[0] + '.icns')
print('converting {0!r} to {1!r} ...'.format(o.png, icns),
file=sys.stderr)
assert not os.path.exists(icns), \
'destination file already exists: ' + icns
png_to_icns(o.png, icns)
sys.exit(0)
# Post notification
if o.title == o.text == '':
print('ERROR: empty notification.', file=sys.stderr)
sys.exit(1)
else:
notify(o.title, o.text, o.sound)
|
TRIOrganization/TRI | refs/heads/master | TRI/main/migrations/0018_artificialintellignece_pub_date.py | 1 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-10 18:37
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('main', '0017_networking_pub_date'),
]
operations = [
migrations.AddField(
model_name='artificialintellignece',
name='pub_date',
field=models.DateTimeField(default=datetime.datetime(2017, 6, 10, 18, 37, 2, 690591, tzinfo=utc), verbose_name='date published'),
preserve_default=False,
),
]
|
bdang2012/taiga-back-casting | refs/heads/branch_casting | taiga/auth/api.py | 1 | # Copyright (C) 2014-2015 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014-2015 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2015 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from functools import partial
from enum import Enum
from django.utils.translation import ugettext as _
from django.conf import settings
from taiga.base.api import serializers
from taiga.base.api import viewsets
from taiga.base.decorators import list_route
from taiga.base import exceptions as exc
from taiga.base import response
from .serializers import PublicRegisterSerializer
from .serializers import PrivateRegisterForExistingUserSerializer
from .serializers import PrivateRegisterForNewUserSerializer
from .services import private_register_for_existing_user
from .services import private_register_for_new_user
from .services import public_register
from .services import make_auth_response_data
from .services import get_auth_plugins
from .permissions import AuthPermission
def _parse_data(data:dict, *, cls):
"""
Generic function for parse user data using
specified serializer on `cls` keyword parameter.
Raises: RequestValidationError exception if
some errors found when data is validated.
Returns the parsed data.
"""
serializer = cls(data=data)
if not serializer.is_valid():
raise exc.RequestValidationError(serializer.errors)
return serializer.data
# Parse public register data
parse_public_register_data = partial(_parse_data, cls=PublicRegisterSerializer)
# Parse private register data for existing user
parse_private_register_for_existing_user_data = \
partial(_parse_data, cls=PrivateRegisterForExistingUserSerializer)
# Parse private register data for new user
parse_private_register_for_new_user_data = \
partial(_parse_data, cls=PrivateRegisterForNewUserSerializer)
class RegisterTypeEnum(Enum):
new_user = 1
existing_user = 2
def parse_register_type(userdata:dict) -> str:
"""
Parses user data and detects that register type is.
It returns RegisterTypeEnum value.
"""
# Create adhoc inner serializer for avoid parse
# manually the user data.
class _serializer(serializers.Serializer):
existing = serializers.BooleanField()
instance = _serializer(data=userdata)
if not instance.is_valid():
raise exc.RequestValidationError(instance.errors)
if instance.data["existing"]:
return RegisterTypeEnum.existing_user
return RegisterTypeEnum.new_user
class AuthViewSet(viewsets.ViewSet):
permission_classes = (AuthPermission,)
def _public_register(self, request):
if not settings.PUBLIC_REGISTER_ENABLED:
raise exc.BadRequest(_("Public register is disabled."))
try:
data = parse_public_register_data(request.DATA)
user = public_register(**data)
except exc.IntegrityError as e:
raise exc.BadRequest(e.detail)
data = make_auth_response_data(user)
return response.Created(data)
def _private_register(self, request):
register_type = parse_register_type(request.DATA)
if register_type is RegisterTypeEnum.existing_user:
data = parse_private_register_for_existing_user_data(request.DATA)
user = private_register_for_existing_user(**data)
else:
data = parse_private_register_for_new_user_data(request.DATA)
user = private_register_for_new_user(**data)
data = make_auth_response_data(user)
return response.Created(data)
@list_route(methods=["POST"])
def register(self, request, **kwargs):
self.check_permissions(request, 'register', None)
type = request.DATA.get("type", None)
if type == "public":
return self._public_register(request)
elif type == "private":
return self._private_register(request)
raise exc.BadRequest(_("invalid register type"))
# Login view: /api/v1/auth
def create(self, request, **kwargs):
self.check_permissions(request, 'create', None)
auth_plugins = get_auth_plugins()
login_type = request.DATA.get("type", None)
print(request.DATA)
if login_type in auth_plugins:
data = auth_plugins[login_type]['login_func'](request)
return response.Ok(data)
raise exc.BadRequest(_("invalid login type"))
|
azureplus/chromium_depot_tools | refs/heads/master | third_party/pylint/lint.py | 46 | # Copyright (c) 2003-2014 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
""" %prog [options] module_or_package
Check that a module satisfies a coding standard (and more !).
%prog --help
Display this help message and exit.
%prog --help-msg <msg-id>[,<msg-id>]
Display help messages about given message identifiers and exit.
"""
from __future__ import print_function
import collections
import contextlib
import itertools
import operator
import os
try:
import multiprocessing
except ImportError:
multiprocessing = None
import sys
import tokenize
import warnings
import astroid
from astroid.__pkginfo__ import version as astroid_version
from astroid import modutils
from logilab.common import configuration
from logilab.common import optik_ext
from logilab.common import interface
from logilab.common import textutils
from logilab.common import ureports
from logilab.common.__pkginfo__ import version as common_version
import six
from pylint import checkers
from pylint import interfaces
from pylint import reporters
from pylint import utils
from pylint import config
from pylint.__pkginfo__ import version
MANAGER = astroid.MANAGER
def _get_new_args(message):
location = (
message.abspath,
message.path,
message.module,
message.obj,
message.line,
message.column,
)
return (
message.msg_id,
message.symbol,
location,
message.msg,
message.confidence,
)
def _get_python_path(filepath):
dirname = os.path.realpath(os.path.expanduser(filepath))
if not os.path.isdir(dirname):
dirname = os.path.dirname(dirname)
while True:
if not os.path.exists(os.path.join(dirname, "__init__.py")):
return dirname
old_dirname = dirname
dirname = os.path.dirname(dirname)
if old_dirname == dirname:
return os.getcwd()
def _merge_stats(stats):
merged = {}
for stat in stats:
for key, item in six.iteritems(stat):
if key not in merged:
merged[key] = item
else:
if isinstance(item, dict):
merged[key].update(item)
else:
merged[key] = merged[key] + item
return merged
# Python Linter class #########################################################
MSGS = {
'F0001': ('%s',
'fatal',
'Used when an error occurred preventing the analysis of a \
module (unable to find it for instance).'),
'F0002': ('%s: %s',
'astroid-error',
'Used when an unexpected error occurred while building the '
'Astroid representation. This is usually accompanied by a '
'traceback. Please report such errors !'),
'F0003': ('ignored builtin module %s',
'ignored-builtin-module',
'Used to indicate that the user asked to analyze a builtin '
'module which has been skipped.'),
'F0010': ('error while code parsing: %s',
'parse-error',
'Used when an exception occured while building the Astroid '
'representation which could be handled by astroid.'),
'I0001': ('Unable to run raw checkers on built-in module %s',
'raw-checker-failed',
'Used to inform that a built-in module has not been checked '
'using the raw checkers.'),
'I0010': ('Unable to consider inline option %r',
'bad-inline-option',
'Used when an inline option is either badly formatted or can\'t '
'be used inside modules.'),
'I0011': ('Locally disabling %s (%s)',
'locally-disabled',
'Used when an inline option disables a message or a messages '
'category.'),
'I0012': ('Locally enabling %s (%s)',
'locally-enabled',
'Used when an inline option enables a message or a messages '
'category.'),
'I0013': ('Ignoring entire file',
'file-ignored',
'Used to inform that the file will not be checked'),
'I0020': ('Suppressed %s (from line %d)',
'suppressed-message',
'A message was triggered on a line, but suppressed explicitly '
'by a disable= comment in the file. This message is not '
'generated for messages that are ignored due to configuration '
'settings.'),
'I0021': ('Useless suppression of %s',
'useless-suppression',
'Reported when a message is explicitly disabled for a line or '
'a block of code, but never triggered.'),
'I0022': ('Pragma "%s" is deprecated, use "%s" instead',
'deprecated-pragma',
'Some inline pylint options have been renamed or reworked, '
'only the most recent form should be used. '
'NOTE:skip-all is only available with pylint >= 0.26',
{'old_names': [('I0014', 'deprecated-disable-all')]}),
'E0001': ('%s',
'syntax-error',
'Used when a syntax error is raised for a module.'),
'E0011': ('Unrecognized file option %r',
'unrecognized-inline-option',
'Used when an unknown inline option is encountered.'),
'E0012': ('Bad option value %r',
'bad-option-value',
'Used when a bad value for an inline option is encountered.'),
}
def _deprecated_option(shortname, opt_type):
def _warn_deprecated(option, optname, *args): # pylint: disable=unused-argument
sys.stderr.write('Warning: option %s is deprecated and ignored.\n' % (optname,))
return {'short': shortname, 'help': 'DEPRECATED', 'hide': True,
'type': opt_type, 'action': 'callback', 'callback': _warn_deprecated}
if multiprocessing is not None:
class ChildLinter(multiprocessing.Process): # pylint: disable=no-member
def run(self):
tasks_queue, results_queue, self._config = self._args # pylint: disable=no-member
self._config["jobs"] = 1 # Child does not parallelize any further.
# Run linter for received files/modules.
for file_or_module in iter(tasks_queue.get, 'STOP'):
result = self._run_linter(file_or_module[0])
try:
results_queue.put(result)
except Exception as ex:
print("internal error with sending report for module %s" % file_or_module, file=sys.stderr)
print(ex, file=sys.stderr)
results_queue.put({})
def _run_linter(self, file_or_module):
linter = PyLinter()
# Register standard checkers.
linter.load_default_plugins()
# Load command line plugins.
# TODO linter.load_plugin_modules(self._plugins)
linter.load_configuration(**self._config)
linter.set_reporter(reporters.CollectingReporter())
# Run the checks.
linter.check(file_or_module)
msgs = [_get_new_args(m) for m in linter.reporter.messages]
return (file_or_module, linter.file_state.base_name, linter.current_name,
msgs, linter.stats, linter.msg_status)
class PyLinter(configuration.OptionsManagerMixIn,
utils.MessagesHandlerMixIn,
utils.ReportsHandlerMixIn,
checkers.BaseTokenChecker):
"""lint Python modules using external checkers.
This is the main checker controlling the other ones and the reports
generation. It is itself both a raw checker and an astroid checker in order
to:
* handle message activation / deactivation at the module level
* handle some basic but necessary stats'data (number of classes, methods...)
IDE plugins developpers: you may have to call
`astroid.builder.MANAGER.astroid_cache.clear()` accross run if you want
to ensure the latest code version is actually checked.
"""
__implements__ = (interfaces.ITokenChecker, )
name = 'master'
priority = 0
level = 0
msgs = MSGS
@staticmethod
def make_options():
return (('ignore',
{'type' : 'csv', 'metavar' : '<file>[,<file>...]',
'dest' : 'black_list', 'default' : ('CVS',),
'help' : 'Add files or directories to the blacklist. '
'They should be base names, not paths.'}),
('persistent',
{'default': True, 'type' : 'yn', 'metavar' : '<y_or_n>',
'level': 1,
'help' : 'Pickle collected data for later comparisons.'}),
('load-plugins',
{'type' : 'csv', 'metavar' : '<modules>', 'default' : (),
'level': 1,
'help' : 'List of plugins (as comma separated values of '
'python modules names) to load, usually to register '
'additional checkers.'}),
('output-format',
{'default': 'text', 'type': 'string', 'metavar' : '<format>',
'short': 'f',
'group': 'Reports',
'help' : 'Set the output format. Available formats are text,'
' parseable, colorized, msvs (visual studio) and html. You '
'can also give a reporter class, eg mypackage.mymodule.'
'MyReporterClass.'}),
('files-output',
{'default': 0, 'type' : 'yn', 'metavar' : '<y_or_n>',
'group': 'Reports', 'level': 1,
'help' : 'Put messages in a separate file for each module / '
'package specified on the command line instead of printing '
'them on stdout. Reports (if any) will be written in a file '
'name "pylint_global.[txt|html]".'}),
('reports',
{'default': 1, 'type' : 'yn', 'metavar' : '<y_or_n>',
'short': 'r',
'group': 'Reports',
'help' : 'Tells whether to display a full report or only the '
'messages'}),
('evaluation',
{'type' : 'string', 'metavar' : '<python_expression>',
'group': 'Reports', 'level': 1,
'default': '10.0 - ((float(5 * error + warning + refactor + '
'convention) / statement) * 10)',
'help' : 'Python expression which should return a note less '
'than 10 (10 is the highest note). You have access '
'to the variables errors warning, statement which '
'respectively contain the number of errors / '
'warnings messages and the total number of '
'statements analyzed. This is used by the global '
'evaluation report (RP0004).'}),
('comment',
{'default': 0, 'type' : 'yn', 'metavar' : '<y_or_n>',
'group': 'Reports', 'level': 1,
'help' : 'Add a comment according to your evaluation note. '
'This is used by the global evaluation report (RP0004).'}),
('confidence',
{'type' : 'multiple_choice', 'metavar': '<levels>',
'default': '',
'choices': [c.name for c in interfaces.CONFIDENCE_LEVELS],
'group': 'Messages control',
'help' : 'Only show warnings with the listed confidence levels.'
' Leave empty to show all. Valid levels: %s' % (
', '.join(c.name for c in interfaces.CONFIDENCE_LEVELS),)}),
('enable',
{'type' : 'csv', 'metavar': '<msg ids>',
'short': 'e',
'group': 'Messages control',
'help' : 'Enable the message, report, category or checker with the '
'given id(s). You can either give multiple identifier '
'separated by comma (,) or put this option multiple time. '
'See also the "--disable" option for examples. '}),
('disable',
{'type' : 'csv', 'metavar': '<msg ids>',
'short': 'd',
'group': 'Messages control',
'help' : 'Disable the message, report, category or checker '
'with the given id(s). You can either give multiple identifiers'
' separated by comma (,) or put this option multiple times '
'(only on the command line, not in the configuration file '
'where it should appear only once).'
'You can also use "--disable=all" to disable everything first '
'and then reenable specific checks. For example, if you want '
'to run only the similarities checker, you can use '
'"--disable=all --enable=similarities". '
'If you want to run only the classes checker, but have no '
'Warning level messages displayed, use'
'"--disable=all --enable=classes --disable=W"'}),
('msg-template',
{'type' : 'string', 'metavar': '<template>',
'group': 'Reports',
'help' : ('Template used to display messages. '
'This is a python new-style format string '
'used to format the message information. '
'See doc for all details')
}),
('include-ids', _deprecated_option('i', 'yn')),
('symbols', _deprecated_option('s', 'yn')),
('jobs',
{'type' : 'int', 'metavar': '<n-processes>',
'short': 'j',
'default': 1,
'help' : '''Use multiple processes to speed up Pylint.''',
}),
('unsafe-load-any-extension',
{'type': 'yn', 'metavar': '<yn>', 'default': False, 'hide': True,
'help': ('Allow loading of arbitrary C extensions. Extensions'
' are imported into the active Python interpreter and'
' may run arbitrary code.')}),
('extension-pkg-whitelist',
{'type': 'csv', 'metavar': '<pkg[,pkg]>', 'default': [],
'help': ('A comma-separated list of package or module names'
' from where C extensions may be loaded. Extensions are'
' loading into the active Python interpreter and may run'
' arbitrary code')}
),
)
option_groups = (
('Messages control', 'Options controling analysis messages'),
('Reports', 'Options related to output formating and reporting'),
)
def __init__(self, options=(), reporter=None, option_groups=(),
pylintrc=None):
# some stuff has to be done before ancestors initialization...
#
# messages store / checkers / reporter / astroid manager
self.msgs_store = utils.MessagesStore()
self.reporter = None
self._reporter_name = None
self._reporters = {}
self._checkers = collections.defaultdict(list)
self._pragma_lineno = {}
self._ignore_file = False
# visit variables
self.file_state = utils.FileState()
self.current_name = None
self.current_file = None
self.stats = None
# init options
self._external_opts = options
self.options = options + PyLinter.make_options()
self.option_groups = option_groups + PyLinter.option_groups
self._options_methods = {
'enable': self.enable,
'disable': self.disable}
self._bw_options_methods = {'disable-msg': self.disable,
'enable-msg': self.enable}
full_version = '%%prog %s, \nastroid %s, common %s\nPython %s' % (
version, astroid_version, common_version, sys.version)
configuration.OptionsManagerMixIn.__init__(
self, usage=__doc__,
version=full_version,
config_file=pylintrc or config.PYLINTRC)
utils.MessagesHandlerMixIn.__init__(self)
utils.ReportsHandlerMixIn.__init__(self)
checkers.BaseTokenChecker.__init__(self)
# provided reports
self.reports = (('RP0001', 'Messages by category',
report_total_messages_stats),
('RP0002', '% errors / warnings by module',
report_messages_by_module_stats),
('RP0003', 'Messages',
report_messages_stats),
('RP0004', 'Global evaluation',
self.report_evaluation),
)
self.register_checker(self)
self._dynamic_plugins = set()
self.load_provider_defaults()
if reporter:
self.set_reporter(reporter)
def load_default_plugins(self):
checkers.initialize(self)
reporters.initialize(self)
# Make sure to load the default reporter, because
# the option has been set before the plugins had been loaded.
if not self.reporter:
self._load_reporter()
def load_plugin_modules(self, modnames):
"""take a list of module names which are pylint plugins and load
and register them
"""
for modname in modnames:
if modname in self._dynamic_plugins:
continue
self._dynamic_plugins.add(modname)
module = modutils.load_module_from_name(modname)
module.register(self)
def _load_reporter(self):
name = self._reporter_name.lower()
if name in self._reporters:
self.set_reporter(self._reporters[name]())
else:
qname = self._reporter_name
module = modutils.load_module_from_name(
modutils.get_module_part(qname))
class_name = qname.split('.')[-1]
reporter_class = getattr(module, class_name)
self.set_reporter(reporter_class())
def set_reporter(self, reporter):
"""set the reporter used to display messages and reports"""
self.reporter = reporter
reporter.linter = self
def set_option(self, optname, value, action=None, optdict=None):
"""overridden from configuration.OptionsProviderMixin to handle some
special options
"""
if optname in self._options_methods or \
optname in self._bw_options_methods:
if value:
try:
meth = self._options_methods[optname]
except KeyError:
meth = self._bw_options_methods[optname]
warnings.warn('%s is deprecated, replace it by %s' % (
optname, optname.split('-')[0]),
DeprecationWarning)
value = optik_ext.check_csv(None, optname, value)
if isinstance(value, (list, tuple)):
for _id in value:
meth(_id, ignore_unknown=True)
else:
meth(value)
return # no need to call set_option, disable/enable methods do it
elif optname == 'output-format':
self._reporter_name = value
# If the reporters are already available, load
# the reporter class.
if self._reporters:
self._load_reporter()
try:
checkers.BaseTokenChecker.set_option(self, optname,
value, action, optdict)
except configuration.UnsupportedAction:
print('option %s can\'t be read from config file' % \
optname, file=sys.stderr)
def register_reporter(self, reporter_class):
self._reporters[reporter_class.name] = reporter_class
def report_order(self):
reports = sorted(self._reports, key=lambda x: getattr(x, 'name', ''))
try:
# Remove the current reporter and add it
# at the end of the list.
reports.pop(reports.index(self))
except ValueError:
pass
else:
reports.append(self)
return reports
# checkers manipulation methods ############################################
def register_checker(self, checker):
"""register a new checker
checker is an object implementing IRawChecker or / and IAstroidChecker
"""
assert checker.priority <= 0, 'checker priority can\'t be >= 0'
self._checkers[checker.name].append(checker)
for r_id, r_title, r_cb in checker.reports:
self.register_report(r_id, r_title, r_cb, checker)
self.register_options_provider(checker)
if hasattr(checker, 'msgs'):
self.msgs_store.register_messages(checker)
checker.load_defaults()
# Register the checker, but disable all of its messages.
# TODO(cpopa): we should have a better API for this.
if not getattr(checker, 'enabled', True):
self.disable(checker.name)
def disable_noerror_messages(self):
for msgcat, msgids in six.iteritems(self.msgs_store._msgs_by_category):
if msgcat == 'E':
for msgid in msgids:
self.enable(msgid)
else:
for msgid in msgids:
self.disable(msgid)
def disable_reporters(self):
"""disable all reporters"""
for reporters in six.itervalues(self._reports):
for report_id, _, _ in reporters:
self.disable_report(report_id)
def error_mode(self):
"""error mode: enable only errors; no reports, no persistent"""
self.disable_noerror_messages()
self.disable('miscellaneous')
self.set_option('reports', False)
self.set_option('persistent', False)
# block level option handling #############################################
#
# see func_block_disable_msg.py test case for expected behaviour
def process_tokens(self, tokens):
"""process tokens from the current module to search for module/block
level options
"""
control_pragmas = {'disable', 'enable'}
for (tok_type, content, start, _, _) in tokens:
if tok_type != tokenize.COMMENT:
continue
match = utils.OPTION_RGX.search(content)
if match is None:
continue
if match.group(1).strip() == "disable-all" or \
match.group(1).strip() == 'skip-file':
if match.group(1).strip() == "disable-all":
self.add_message('deprecated-pragma', line=start[0],
args=('disable-all', 'skip-file'))
self.add_message('file-ignored', line=start[0])
self._ignore_file = True
return
try:
opt, value = match.group(1).split('=', 1)
except ValueError:
self.add_message('bad-inline-option', args=match.group(1).strip(),
line=start[0])
continue
opt = opt.strip()
if opt in self._options_methods or opt in self._bw_options_methods:
try:
meth = self._options_methods[opt]
except KeyError:
meth = self._bw_options_methods[opt]
# found a "(dis|en)able-msg" pragma deprecated suppresssion
self.add_message('deprecated-pragma', line=start[0], args=(opt, opt.replace('-msg', '')))
for msgid in textutils.splitstrip(value):
# Add the line where a control pragma was encountered.
if opt in control_pragmas:
self._pragma_lineno[msgid] = start[0]
try:
if (opt, msgid) == ('disable', 'all'):
self.add_message('deprecated-pragma', line=start[0], args=('disable=all', 'skip-file'))
self.add_message('file-ignored', line=start[0])
self._ignore_file = True
return
meth(msgid, 'module', start[0])
except utils.UnknownMessage:
self.add_message('bad-option-value', args=msgid, line=start[0])
else:
self.add_message('unrecognized-inline-option', args=opt, line=start[0])
# code checking methods ###################################################
def get_checkers(self):
"""return all available checkers as a list"""
return [self] + [c for checkers in six.itervalues(self._checkers)
for c in checkers if c is not self]
def prepare_checkers(self):
"""return checkers needed for activated messages and reports"""
if not self.config.reports:
self.disable_reporters()
# get needed checkers
neededcheckers = [self]
for checker in self.get_checkers()[1:]:
# fatal errors should not trigger enable / disabling a checker
messages = set(msg for msg in checker.msgs
if msg[0] != 'F' and self.is_message_enabled(msg))
if (messages or
any(self.report_is_enabled(r[0]) for r in checker.reports)):
neededcheckers.append(checker)
# Sort checkers by priority
neededcheckers = sorted(neededcheckers,
key=operator.attrgetter('priority'),
reverse=True)
return neededcheckers
def should_analyze_file(self, modname, path): # pylint: disable=unused-argument, no-self-use
"""Returns whether or not a module should be checked.
This implementation returns True for all python source file, indicating
that all files should be linted.
Subclasses may override this method to indicate that modules satisfying
certain conditions should not be linted.
:param str modname: The name of the module to be checked.
:param str path: The full path to the source code of the module.
:returns: True if the module should be checked.
:rtype: bool
"""
return path.endswith('.py')
def check(self, files_or_modules):
"""main checking entry: check a list of files or modules from their
name.
"""
# initialize msgs_state now that all messages have been registered into
# the store
for msg in self.msgs_store.messages:
if not msg.may_be_emitted():
self._msgs_state[msg.msgid] = False
if not isinstance(files_or_modules, (list, tuple)):
files_or_modules = (files_or_modules,)
if self.config.jobs == 1:
with fix_import_path(files_or_modules):
self._do_check(files_or_modules)
else:
# Hack that permits running pylint, on Windows, with -m switch
# and with --jobs, as in 'python -2 -m pylint .. --jobs'.
# For more details why this is needed,
# see Python issue http://bugs.python.org/issue10845.
mock_main = __name__ != '__main__' # -m switch
if mock_main:
sys.modules['__main__'] = sys.modules[__name__]
try:
self._parallel_check(files_or_modules)
finally:
if mock_main:
sys.modules.pop('__main__')
def _parallel_task(self, files_or_modules):
# Prepare configuration for child linters.
filter_options = {'symbols', 'include-ids', 'long-help'}
filter_options.update([opt_name for opt_name, _ in self._external_opts])
config = {}
for opt_providers in six.itervalues(self._all_options):
for optname, optdict, val in opt_providers.options_and_values():
if optname not in filter_options:
config[optname] = configuration.format_option_value(optdict, val)
childs = []
manager = multiprocessing.Manager() # pylint: disable=no-member
tasks_queue = manager.Queue() # pylint: disable=no-member
results_queue = manager.Queue() # pylint: disable=no-member
for _ in range(self.config.jobs):
cl = ChildLinter(args=(tasks_queue, results_queue, config))
cl.start() # pylint: disable=no-member
childs.append(cl)
# send files to child linters
for files_or_module in files_or_modules:
tasks_queue.put([files_or_module])
# collect results from child linters
failed = False
for _ in files_or_modules:
try:
result = results_queue.get()
except Exception as ex:
print("internal error while receiving results from child linter",
file=sys.stderr)
print(ex, file=sys.stderr)
failed = True
break
yield result
# Stop child linters and wait for their completion.
for _ in range(self.config.jobs):
tasks_queue.put('STOP')
for cl in childs:
cl.join()
if failed:
print("Error occured, stopping the linter.", file=sys.stderr)
sys.exit(32)
def _parallel_check(self, files_or_modules):
# Reset stats.
self.open()
all_stats = []
for result in self._parallel_task(files_or_modules):
(
file_or_module,
self.file_state.base_name,
module,
messages,
stats,
msg_status
) = result
if file_or_module == files_or_modules[-1]:
last_module = module
for msg in messages:
msg = utils.Message(*msg)
self.set_current_module(module)
self.reporter.handle_message(msg)
all_stats.append(stats)
self.msg_status |= msg_status
self.stats = _merge_stats(itertools.chain(all_stats, [self.stats]))
self.current_name = last_module
# Insert stats data to local checkers.
for checker in self.get_checkers():
if checker is not self:
checker.stats = self.stats
def _do_check(self, files_or_modules):
walker = utils.PyLintASTWalker(self)
checkers = self.prepare_checkers()
tokencheckers = [c for c in checkers
if interface.implements(c, interfaces.ITokenChecker)
and c is not self]
rawcheckers = [c for c in checkers
if interface.implements(c, interfaces.IRawChecker)]
# notify global begin
for checker in checkers:
checker.open()
if interface.implements(checker, interfaces.IAstroidChecker):
walker.add_checker(checker)
# build ast and check modules or packages
for descr in self.expand_files(files_or_modules):
modname, filepath = descr['name'], descr['path']
if not descr['isarg'] and not self.should_analyze_file(modname, filepath):
continue
if self.config.files_output:
reportfile = 'pylint_%s.%s' % (modname, self.reporter.extension)
self.reporter.set_output(open(reportfile, 'w'))
self.set_current_module(modname, filepath)
# get the module representation
ast_node = self.get_ast(filepath, modname)
if ast_node is None:
continue
# XXX to be correct we need to keep module_msgs_state for every
# analyzed module (the problem stands with localized messages which
# are only detected in the .close step)
self.file_state = utils.FileState(descr['basename'])
self._ignore_file = False
# fix the current file (if the source file was not available or
# if it's actually a c extension)
self.current_file = ast_node.file # pylint: disable=maybe-no-member
self.check_astroid_module(ast_node, walker, rawcheckers, tokencheckers)
# warn about spurious inline messages handling
for msgid, line, args in self.file_state.iter_spurious_suppression_messages(self.msgs_store):
self.add_message(msgid, line, None, args)
# notify global end
self.stats['statement'] = walker.nbstatements
checkers.reverse()
for checker in checkers:
checker.close()
def expand_files(self, modules):
"""get modules and errors from a list of modules and handle errors
"""
result, errors = utils.expand_modules(modules, self.config.black_list)
for error in errors:
message = modname = error["mod"]
key = error["key"]
self.set_current_module(modname)
if key == "fatal":
message = str(error["ex"]).replace(os.getcwd() + os.sep, '')
self.add_message(key, args=message)
return result
def set_current_module(self, modname, filepath=None):
"""set the name of the currently analyzed module and
init statistics for it
"""
if not modname and filepath is None:
return
self.reporter.on_set_current_module(modname, filepath)
self.current_name = modname
self.current_file = filepath or modname
self.stats['by_module'][modname] = {}
self.stats['by_module'][modname]['statement'] = 0
for msg_cat in six.itervalues(utils.MSG_TYPES):
self.stats['by_module'][modname][msg_cat] = 0
def get_ast(self, filepath, modname):
"""return a ast(roid) representation for a module"""
try:
return MANAGER.ast_from_file(filepath, modname, source=True)
except SyntaxError as ex:
self.add_message('syntax-error', line=ex.lineno, args=ex.msg)
except astroid.AstroidBuildingException as ex:
self.add_message('parse-error', args=ex)
except Exception as ex: # pylint: disable=broad-except
import traceback
traceback.print_exc()
self.add_message('astroid-error', args=(ex.__class__, ex))
def check_astroid_module(self, ast_node, walker,
rawcheckers, tokencheckers):
"""Check a module from its astroid representation."""
try:
tokens = utils.tokenize_module(ast_node)
except tokenize.TokenError as ex:
self.add_message('syntax-error', line=ex.args[1][0], args=ex.args[0])
return
if not ast_node.pure_python:
self.add_message('raw-checker-failed', args=ast_node.name)
else:
#assert astroid.file.endswith('.py')
# invoke ITokenChecker interface on self to fetch module/block
# level options
self.process_tokens(tokens)
if self._ignore_file:
return False
# walk ast to collect line numbers
self.file_state.collect_block_lines(self.msgs_store, ast_node)
# run raw and tokens checkers
for checker in rawcheckers:
checker.process_module(ast_node)
for checker in tokencheckers:
checker.process_tokens(tokens)
# generate events to astroid checkers
walker.walk(ast_node)
return True
# IAstroidChecker interface #################################################
def open(self):
"""initialize counters"""
self.stats = {'by_module' : {},
'by_msg' : {},
}
MANAGER.always_load_extensions = self.config.unsafe_load_any_extension
MANAGER.extension_package_whitelist.update(
self.config.extension_pkg_whitelist)
for msg_cat in six.itervalues(utils.MSG_TYPES):
self.stats[msg_cat] = 0
def generate_reports(self):
"""close the whole package /module, it's time to make reports !
if persistent run, pickle results for later comparison
"""
if self.file_state.base_name is not None:
# load previous results if any
previous_stats = config.load_results(self.file_state.base_name)
# XXX code below needs refactoring to be more reporter agnostic
self.reporter.on_close(self.stats, previous_stats)
if self.config.reports:
sect = self.make_reports(self.stats, previous_stats)
if self.config.files_output:
filename = 'pylint_global.' + self.reporter.extension
self.reporter.set_output(open(filename, 'w'))
else:
sect = ureports.Section()
if self.config.reports or self.config.output_format == 'html':
self.reporter.display_results(sect)
# save results if persistent run
if self.config.persistent:
config.save_results(self.stats, self.file_state.base_name)
else:
if self.config.output_format == 'html':
# No output will be emitted for the html
# reporter if the file doesn't exist, so emit
# the results here.
self.reporter.display_results(ureports.Section())
self.reporter.on_close(self.stats, {})
# specific reports ########################################################
def report_evaluation(self, sect, stats, previous_stats):
"""make the global evaluation report"""
# check with at least check 1 statements (usually 0 when there is a
# syntax error preventing pylint from further processing)
if stats['statement'] == 0:
raise utils.EmptyReport()
# get a global note for the code
evaluation = self.config.evaluation
try:
note = eval(evaluation, {}, self.stats) # pylint: disable=eval-used
except Exception as ex: # pylint: disable=broad-except
msg = 'An exception occurred while rating: %s' % ex
else:
stats['global_note'] = note
msg = 'Your code has been rated at %.2f/10' % note
pnote = previous_stats.get('global_note')
if pnote is not None:
msg += ' (previous run: %.2f/10, %+.2f)' % (pnote, note - pnote)
if self.config.comment:
msg = '%s\n%s' % (msg, config.get_note_message(note))
sect.append(ureports.Text(msg))
# some reporting functions ####################################################
def report_total_messages_stats(sect, stats, previous_stats):
"""make total errors / warnings report"""
lines = ['type', 'number', 'previous', 'difference']
lines += checkers.table_lines_from_stats(stats, previous_stats,
('convention', 'refactor',
'warning', 'error'))
sect.append(ureports.Table(children=lines, cols=4, rheaders=1))
def report_messages_stats(sect, stats, _):
"""make messages type report"""
if not stats['by_msg']:
# don't print this report when we didn't detected any errors
raise utils.EmptyReport()
in_order = sorted([(value, msg_id)
for msg_id, value in six.iteritems(stats['by_msg'])
if not msg_id.startswith('I')])
in_order.reverse()
lines = ('message id', 'occurrences')
for value, msg_id in in_order:
lines += (msg_id, str(value))
sect.append(ureports.Table(children=lines, cols=2, rheaders=1))
def report_messages_by_module_stats(sect, stats, _):
"""make errors / warnings by modules report"""
if len(stats['by_module']) == 1:
# don't print this report when we are analysing a single module
raise utils.EmptyReport()
by_mod = collections.defaultdict(dict)
for m_type in ('fatal', 'error', 'warning', 'refactor', 'convention'):
total = stats[m_type]
for module in six.iterkeys(stats['by_module']):
mod_total = stats['by_module'][module][m_type]
if total == 0:
percent = 0
else:
percent = float((mod_total)*100) / total
by_mod[module][m_type] = percent
sorted_result = []
for module, mod_info in six.iteritems(by_mod):
sorted_result.append((mod_info['error'],
mod_info['warning'],
mod_info['refactor'],
mod_info['convention'],
module))
sorted_result.sort()
sorted_result.reverse()
lines = ['module', 'error', 'warning', 'refactor', 'convention']
for line in sorted_result:
# Don't report clean modules.
if all(entry == 0 for entry in line[:-1]):
continue
lines.append(line[-1])
for val in line[:-1]:
lines.append('%.2f' % val)
if len(lines) == 5:
raise utils.EmptyReport()
sect.append(ureports.Table(children=lines, cols=5, rheaders=1))
# utilities ###################################################################
class ArgumentPreprocessingError(Exception):
"""Raised if an error occurs during argument preprocessing."""
def preprocess_options(args, search_for):
"""look for some options (keys of <search_for>) which have to be processed
before others
values of <search_for> are callback functions to call when the option is
found
"""
i = 0
while i < len(args):
arg = args[i]
if arg.startswith('--'):
try:
option, val = arg[2:].split('=', 1)
except ValueError:
option, val = arg[2:], None
try:
cb, takearg = search_for[option]
except KeyError:
i += 1
else:
del args[i]
if takearg and val is None:
if i >= len(args) or args[i].startswith('-'):
msg = 'Option %s expects a value' % option
raise ArgumentPreprocessingError(msg)
val = args[i]
del args[i]
elif not takearg and val is not None:
msg = "Option %s doesn't expects a value" % option
raise ArgumentPreprocessingError(msg)
cb(option, val)
else:
i += 1
@contextlib.contextmanager
def fix_import_path(args):
"""Prepare sys.path for running the linter checks.
Within this context, each of the given arguments is importable.
Paths are added to sys.path in corresponding order to the arguments.
We avoid adding duplicate directories to sys.path.
`sys.path` is reset to its original value upon exitign this context.
"""
orig = list(sys.path)
changes = []
for arg in args:
path = _get_python_path(arg)
if path in changes:
continue
else:
changes.append(path)
sys.path[:] = changes + sys.path
try:
yield
finally:
sys.path[:] = orig
class Run(object):
"""helper class to use as main for pylint :
run(*sys.argv[1:])
"""
LinterClass = PyLinter
option_groups = (
('Commands', 'Options which are actually commands. Options in this \
group are mutually exclusive.'),
)
def __init__(self, args, reporter=None, exit=True):
self._rcfile = None
self._plugins = []
try:
preprocess_options(args, {
# option: (callback, takearg)
'init-hook': (cb_init_hook, True),
'rcfile': (self.cb_set_rcfile, True),
'load-plugins': (self.cb_add_plugins, True),
})
except ArgumentPreprocessingError as ex:
print(ex, file=sys.stderr)
sys.exit(32)
self.linter = linter = self.LinterClass((
('rcfile',
{'action' : 'callback', 'callback' : lambda *args: 1,
'type': 'string', 'metavar': '<file>',
'help' : 'Specify a configuration file.'}),
('init-hook',
{'action' : 'callback', 'callback' : lambda *args: 1,
'type' : 'string', 'metavar': '<code>',
'level': 1,
'help' : 'Python code to execute, usually for sys.path '
'manipulation such as pygtk.require().'}),
('help-msg',
{'action' : 'callback', 'type' : 'string', 'metavar': '<msg-id>',
'callback' : self.cb_help_message,
'group': 'Commands',
'help' : 'Display a help message for the given message id and '
'exit. The value may be a comma separated list of message ids.'}),
('list-msgs',
{'action' : 'callback', 'metavar': '<msg-id>',
'callback' : self.cb_list_messages,
'group': 'Commands', 'level': 1,
'help' : "Generate pylint's messages."}),
('list-conf-levels',
{'action' : 'callback',
'callback' : cb_list_confidence_levels,
'group': 'Commands', 'level': 1,
'help' : "Generate pylint's messages."}),
('full-documentation',
{'action' : 'callback', 'metavar': '<msg-id>',
'callback' : self.cb_full_documentation,
'group': 'Commands', 'level': 1,
'help' : "Generate pylint's full documentation."}),
('generate-rcfile',
{'action' : 'callback', 'callback' : self.cb_generate_config,
'group': 'Commands',
'help' : 'Generate a sample configuration file according to '
'the current configuration. You can put other options '
'before this one to get them in the generated '
'configuration.'}),
('generate-man',
{'action' : 'callback', 'callback' : self.cb_generate_manpage,
'group': 'Commands',
'help' : "Generate pylint's man page.", 'hide': True}),
('errors-only',
{'action' : 'callback', 'callback' : self.cb_error_mode,
'short': 'E',
'help' : 'In error mode, checkers without error messages are '
'disabled and for others, only the ERROR messages are '
'displayed, and no reports are done by default'''}),
('py3k',
{'action' : 'callback', 'callback' : self.cb_python3_porting_mode,
'help' : 'In Python 3 porting mode, all checkers will be '
'disabled and only messages emitted by the porting '
'checker will be displayed'}),
('profile',
{'type' : 'yn', 'metavar' : '<y_or_n>',
'default': False, 'hide': True,
'help' : 'Profiled execution.'}),
), option_groups=self.option_groups, pylintrc=self._rcfile)
# register standard checkers
linter.load_default_plugins()
# load command line plugins
linter.load_plugin_modules(self._plugins)
# add some help section
linter.add_help_section('Environment variables', config.ENV_HELP, level=1)
# pylint: disable=bad-continuation
linter.add_help_section('Output',
'Using the default text output, the message format is : \n'
' \n'
' MESSAGE_TYPE: LINE_NUM:[OBJECT:] MESSAGE \n'
' \n'
'There are 5 kind of message types : \n'
' * (C) convention, for programming standard violation \n'
' * (R) refactor, for bad code smell \n'
' * (W) warning, for python specific problems \n'
' * (E) error, for probable bugs in the code \n'
' * (F) fatal, if an error occurred which prevented pylint from doing further\n'
'processing.\n'
, level=1)
linter.add_help_section('Output status code',
'Pylint should leave with following status code: \n'
' * 0 if everything went fine \n'
' * 1 if a fatal message was issued \n'
' * 2 if an error message was issued \n'
' * 4 if a warning message was issued \n'
' * 8 if a refactor message was issued \n'
' * 16 if a convention message was issued \n'
' * 32 on usage error \n'
' \n'
'status 1 to 16 will be bit-ORed so you can know which different categories has\n'
'been issued by analysing pylint output status code\n',
level=1)
# read configuration
linter.disable('pointless-except')
linter.disable('suppressed-message')
linter.disable('useless-suppression')
linter.read_config_file()
config_parser = linter.cfgfile_parser
# run init hook, if present, before loading plugins
if config_parser.has_option('MASTER', 'init-hook'):
cb_init_hook('init-hook',
textutils.unquote(config_parser.get('MASTER',
'init-hook')))
# is there some additional plugins in the file configuration, in
if config_parser.has_option('MASTER', 'load-plugins'):
plugins = textutils.splitstrip(
config_parser.get('MASTER', 'load-plugins'))
linter.load_plugin_modules(plugins)
# now we can load file config and command line, plugins (which can
# provide options) have been registered
linter.load_config_file()
if reporter:
# if a custom reporter is provided as argument, it may be overridden
# by file parameters, so re-set it here, but before command line
# parsing so it's still overrideable by command line option
linter.set_reporter(reporter)
try:
args = linter.load_command_line_configuration(args)
except SystemExit as exc:
if exc.code == 2: # bad options
exc.code = 32
raise
if not args:
print(linter.help())
sys.exit(32)
if linter.config.jobs < 0:
print("Jobs number (%d) should be greater than 0"
% linter.config.jobs, file=sys.stderr)
sys.exit(32)
if linter.config.jobs > 1 or linter.config.jobs == 0:
if multiprocessing is None:
print("Multiprocessing library is missing, "
"fallback to single process", file=sys.stderr)
linter.set_option("jobs", 1)
else:
if linter.config.jobs == 0:
linter.config.jobs = multiprocessing.cpu_count()
# insert current working directory to the python path to have a correct
# behaviour
if self.linter.config.profile:
with fix_import_path(args):
print('** profiled run', file=sys.stderr)
import cProfile, pstats
cProfile.runctx('linter.check(%r)' % args, globals(), locals(),
'stones.prof')
data = pstats.Stats('stones.prof')
data.strip_dirs()
data.sort_stats('time', 'calls')
data.print_stats(30)
else:
linter.check(args)
linter.generate_reports()
if exit:
sys.exit(self.linter.msg_status)
def cb_set_rcfile(self, name, value):
"""callback for option preprocessing (i.e. before option parsing)"""
self._rcfile = value
def cb_add_plugins(self, name, value):
"""callback for option preprocessing (i.e. before option parsing)"""
self._plugins.extend(textutils.splitstrip(value))
def cb_error_mode(self, *args, **kwargs):
"""error mode:
* disable all but error messages
* disable the 'miscellaneous' checker which can be safely deactivated in
debug
* disable reports
* do not save execution information
"""
self.linter.error_mode()
def cb_generate_config(self, *args, **kwargs):
"""optik callback for sample config file generation"""
self.linter.generate_config(skipsections=('COMMANDS',))
sys.exit(0)
def cb_generate_manpage(self, *args, **kwargs):
"""optik callback for sample config file generation"""
from pylint import __pkginfo__
self.linter.generate_manpage(__pkginfo__)
sys.exit(0)
def cb_help_message(self, option, optname, value, parser):
"""optik callback for printing some help about a particular message"""
self.linter.msgs_store.help_message(textutils.splitstrip(value))
sys.exit(0)
def cb_full_documentation(self, option, optname, value, parser):
"""optik callback for printing full documentation"""
self.linter.print_full_documentation()
sys.exit(0)
def cb_list_messages(self, option, optname, value, parser): # FIXME
"""optik callback for printing available messages"""
self.linter.msgs_store.list_messages()
sys.exit(0)
def cb_python3_porting_mode(self, *args, **kwargs):
"""Activate only the python3 porting checker."""
self.linter.disable('all')
self.linter.enable('python3')
def cb_list_confidence_levels(option, optname, value, parser):
for level in interfaces.CONFIDENCE_LEVELS:
print('%-18s: %s' % level)
sys.exit(0)
def cb_init_hook(optname, value):
"""exec arbitrary code to set sys.path for instance"""
exec(value) # pylint: disable=exec-used
if __name__ == '__main__':
Run(sys.argv[1:])
|
szopu/django | refs/heads/master | tests/forms_tests/tests/test_extra.py | 16 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import warnings
from django.forms import (
CharField, DateField, EmailField, FileField, Form, GenericIPAddressField,
HiddenInput, ImageField, IPAddressField, MultipleChoiceField,
MultiValueField, MultiWidget, PasswordInput, SelectMultiple, SlugField,
SplitDateTimeField, SplitDateTimeWidget, TextInput, URLField,
)
from django.forms.extras import SelectDateWidget
from django.forms.utils import ErrorList
from django.test import TestCase, override_settings
from django.utils import six
from django.utils import translation
from django.utils.dates import MONTHS_AP
from django.utils.encoding import force_text, smart_text, python_2_unicode_compatible
from .test_error_messages import AssertFormErrorsMixin
class GetDate(Form):
mydate = DateField(widget=SelectDateWidget)
class GetDateShowHiddenInitial(Form):
mydate = DateField(widget=SelectDateWidget, show_hidden_initial=True)
class FormsExtraTestCase(TestCase, AssertFormErrorsMixin):
###############
# Extra stuff #
###############
# The forms library comes with some extra, higher-level Field and Widget
def test_selectdate(self):
self.maxDiff = None
w = SelectDateWidget(years=('2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016'))
# Rendering the default state.
self.assertHTMLEqual(w.render('mydate', ''), """<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>""")
# Rendering the None or '' values should yield the same output.
self.assertHTMLEqual(w.render('mydate', None), w.render('mydate', ''))
# Rendering a string value.
self.assertHTMLEqual(w.render('mydate', '2010-04-15'), """<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4" selected="selected">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15" selected="selected">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>""")
# Rendering a datetime value.
self.assertHTMLEqual(w.render('mydate', datetime.date(2010, 4, 15)), w.render('mydate', '2010-04-15'))
# Invalid dates should still render the failed date.
self.assertHTMLEqual(w.render('mydate', '2010-02-31'), """<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2" selected="selected">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31" selected="selected">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>""")
# Rendering with a custom months dict.
w = SelectDateWidget(months=MONTHS_AP, years=('2013',))
self.assertHTMLEqual(w.render('mydate', ''), """<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">Jan.</option>
<option value="2">Feb.</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">Aug.</option>
<option value="9">Sept.</option>
<option value="10">Oct.</option>
<option value="11">Nov.</option>
<option value="12">Dec.</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2013">2013</option>
</select>""")
a = GetDate({'mydate_month': '4', 'mydate_day': '1', 'mydate_year': '2008'})
self.assertTrue(a.is_valid())
self.assertEqual(a.cleaned_data['mydate'], datetime.date(2008, 4, 1))
# As with any widget that implements get_value_from_datadict,
# we must be prepared to accept the input from the "as_hidden"
# rendering as well.
self.assertHTMLEqual(a['mydate'].as_hidden(), '<input type="hidden" name="mydate" value="2008-4-1" id="id_mydate" />')
b = GetDate({'mydate': '2008-4-1'})
self.assertTrue(b.is_valid())
self.assertEqual(b.cleaned_data['mydate'], datetime.date(2008, 4, 1))
# Invalid dates shouldn't be allowed
c = GetDate({'mydate_month': '2', 'mydate_day': '31', 'mydate_year': '2010'})
self.assertFalse(c.is_valid())
self.assertEqual(c.errors, {'mydate': ['Enter a valid date.']})
# label tag is correctly associated with month dropdown
d = GetDate({'mydate_month': '1', 'mydate_day': '1', 'mydate_year': '2010'})
self.assertIn('<label for="id_mydate_month">', d.as_p())
def test_selectdate_empty_label(self):
w = SelectDateWidget(years=('2014',), empty_label='empty_label')
# Rendering the default state with empty_label setted as string.
self.assertInHTML('<option value="0">empty_label</option>', w.render('mydate', ''), count=3)
w = SelectDateWidget(years=('2014',), empty_label=('empty_year', 'empty_month', 'empty_day'))
# Rendering the default state with empty_label tuple.
self.assertHTMLEqual(w.render('mydate', ''), """<select name="mydate_month" id="id_mydate_month">
<option value="0">empty_month</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">empty_day</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">empty_year</option>
<option value="2014">2014</option>
</select>""")
self.assertRaisesMessage(ValueError, 'empty_label list/tuple must have 3 elements.',
SelectDateWidget, years=('2014',), empty_label=('not enough', 'values'))
def test_multiwidget(self):
# MultiWidget and MultiValueField #############################################
# MultiWidgets are widgets composed of other widgets. They are usually
# combined with MultiValueFields - a field that is composed of other fields.
# MulitWidgets can themselved be composed of other MultiWidgets.
# SplitDateTimeWidget is one example of a MultiWidget.
class ComplexMultiWidget(MultiWidget):
def __init__(self, attrs=None):
widgets = (
TextInput(),
SelectMultiple(choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))),
SplitDateTimeWidget(),
)
super(ComplexMultiWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
data = value.split(',')
return [data[0], list(data[1]), datetime.datetime.strptime(data[2], "%Y-%m-%d %H:%M:%S")]
return [None, None, None]
def format_output(self, rendered_widgets):
return '\n'.join(rendered_widgets)
w = ComplexMultiWidget()
self.assertHTMLEqual(w.render('name', 'some text,JP,2007-04-25 06:24:00'), """<input type="text" name="name_0" value="some text" />
<select multiple="multiple" name="name_1">
<option value="J" selected="selected">John</option>
<option value="P" selected="selected">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
<input type="text" name="name_2_0" value="2007-04-25" /><input type="text" name="name_2_1" value="06:24:00" />""")
class ComplexField(MultiValueField):
def __init__(self, required=True, widget=None, label=None, initial=None):
fields = (
CharField(),
MultipleChoiceField(choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))),
SplitDateTimeField()
)
super(ComplexField, self).__init__(fields, required, widget, label, initial)
def compress(self, data_list):
if data_list:
return '%s,%s,%s' % (data_list[0], ''.join(data_list[1]), data_list[2])
return None
f = ComplexField(widget=w)
self.assertEqual(f.clean(['some text', ['J', 'P'], ['2007-04-25', '6:24:00']]), 'some text,JP,2007-04-25 06:24:00')
self.assertFormErrors(['Select a valid choice. X is not one of the available choices.'], f.clean, ['some text', ['X'], ['2007-04-25', '6:24:00']])
# If insufficient data is provided, None is substituted
self.assertFormErrors(['This field is required.'], f.clean, ['some text', ['JP']])
# test with no initial data
self.assertTrue(f.has_changed(None, ['some text', ['J', 'P'], ['2007-04-25', '6:24:00']]))
# test when the data is the same as initial
self.assertFalse(f.has_changed('some text,JP,2007-04-25 06:24:00',
['some text', ['J', 'P'], ['2007-04-25', '6:24:00']]))
# test when the first widget's data has changed
self.assertTrue(f.has_changed('some text,JP,2007-04-25 06:24:00',
['other text', ['J', 'P'], ['2007-04-25', '6:24:00']]))
# test when the last widget's data has changed. this ensures that it is not
# short circuiting while testing the widgets.
self.assertTrue(f.has_changed('some text,JP,2007-04-25 06:24:00',
['some text', ['J', 'P'], ['2009-04-25', '11:44:00']]))
class ComplexFieldForm(Form):
field1 = ComplexField(widget=w)
f = ComplexFieldForm()
self.assertHTMLEqual(f.as_table(), """<tr><th><label for="id_field1_0">Field1:</label></th><td><input type="text" name="field1_0" id="id_field1_0" />
<select multiple="multiple" name="field1_1" id="id_field1_1">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
<input type="text" name="field1_2_0" id="id_field1_2_0" /><input type="text" name="field1_2_1" id="id_field1_2_1" /></td></tr>""")
f = ComplexFieldForm({'field1_0': 'some text', 'field1_1': ['J', 'P'], 'field1_2_0': '2007-04-25', 'field1_2_1': '06:24:00'})
self.assertHTMLEqual(f.as_table(), """<tr><th><label for="id_field1_0">Field1:</label></th><td><input type="text" name="field1_0" value="some text" id="id_field1_0" />
<select multiple="multiple" name="field1_1" id="id_field1_1">
<option value="J" selected="selected">John</option>
<option value="P" selected="selected">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
<input type="text" name="field1_2_0" value="2007-04-25" id="id_field1_2_0" /><input type="text" name="field1_2_1" value="06:24:00" id="id_field1_2_1" /></td></tr>""")
self.assertEqual(f.cleaned_data['field1'], 'some text,JP,2007-04-25 06:24:00')
def test_ipaddress(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
f = IPAddressField()
self.assertFormErrors(['This field is required.'], f.clean, '')
self.assertFormErrors(['This field is required.'], f.clean, None)
self.assertEqual(f.clean(' 127.0.0.1'), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '256.125.1.5')
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
f = IPAddressField(required=False)
self.assertEqual(f.clean(''), '')
self.assertEqual(f.clean(None), '')
self.assertEqual(f.clean(' 127.0.0.1'), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '256.125.1.5')
def test_generic_ipaddress_invalid_arguments(self):
self.assertRaises(ValueError, GenericIPAddressField, protocol="hamster")
self.assertRaises(ValueError, GenericIPAddressField, protocol="ipv4", unpack_ipv4=True)
def test_generic_ipaddress_as_generic(self):
# The edge cases of the IPv6 validation code are not deeply tested
# here, they are covered in the tests for django.utils.ipv6
f = GenericIPAddressField()
self.assertFormErrors(['This field is required.'], f.clean, '')
self.assertFormErrors(['This field is required.'], f.clean, None)
self.assertEqual(f.clean(' 127.0.0.1 '), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '12345:2:3:4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3::4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3:4:5:6:7:8')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1:2')
def test_generic_ipaddress_as_ipv4_only(self):
f = GenericIPAddressField(protocol="IPv4")
self.assertFormErrors(['This field is required.'], f.clean, '')
self.assertFormErrors(['This field is required.'], f.clean, None)
self.assertEqual(f.clean(' 127.0.0.1 '), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '256.125.1.5')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, 'fe80::223:6cff:fe8a:2e8a')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '2a02::223:6cff:fe8a:2e8a')
def test_generic_ipaddress_as_ipv6_only(self):
f = GenericIPAddressField(protocol="IPv6")
self.assertFormErrors(['This field is required.'], f.clean, '')
self.assertFormErrors(['This field is required.'], f.clean, None)
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '12345:2:3:4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3::4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3:4:5:6:7:8')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1:2')
def test_generic_ipaddress_as_generic_not_required(self):
f = GenericIPAddressField(required=False)
self.assertEqual(f.clean(''), '')
self.assertEqual(f.clean(None), '')
self.assertEqual(f.clean('127.0.0.1'), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '12345:2:3:4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3::4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3:4:5:6:7:8')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1:2')
def test_generic_ipaddress_normalization(self):
# Test the normalizing code
f = GenericIPAddressField()
self.assertEqual(f.clean(' ::ffff:0a0a:0a0a '), '::ffff:10.10.10.10')
self.assertEqual(f.clean(' ::ffff:10.10.10.10 '), '::ffff:10.10.10.10')
self.assertEqual(f.clean(' 2001:000:a:0000:0:fe:fe:beef '), '2001:0:a::fe:fe:beef')
self.assertEqual(f.clean(' 2001::a:0000:0:fe:fe:beef '), '2001:0:a::fe:fe:beef')
f = GenericIPAddressField(unpack_ipv4=True)
self.assertEqual(f.clean(' ::ffff:0a0a:0a0a'), '10.10.10.10')
def test_slugfield_normalization(self):
f = SlugField()
self.assertEqual(f.clean(' aa-bb-cc '), 'aa-bb-cc')
def test_urlfield_normalization(self):
f = URLField()
self.assertEqual(f.clean('http://example.com/ '), 'http://example.com/')
def test_smart_text(self):
class Test:
if six.PY3:
def __str__(self):
return 'ŠĐĆŽćžšđ'
else:
def __str__(self):
return 'ŠĐĆŽćžšđ'.encode('utf-8')
class TestU:
if six.PY3:
def __str__(self):
return 'ŠĐĆŽćžšđ'
def __bytes__(self):
return b'Foo'
else:
def __str__(self):
return b'Foo'
def __unicode__(self):
return '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111'
self.assertEqual(smart_text(Test()), '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111')
self.assertEqual(smart_text(TestU()), '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111')
self.assertEqual(smart_text(1), '1')
self.assertEqual(smart_text('foo'), 'foo')
def test_accessing_clean(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
data = self.cleaned_data
if not self.errors:
data['username'] = data['username'].lower()
return data
f = UserForm({'username': 'SirRobin', 'password': 'blue'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['username'], 'sirrobin')
def test_changing_cleaned_data_nothing_returned(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
self.cleaned_data['username'] = self.cleaned_data['username'].lower()
# don't return anything
f = UserForm({'username': 'SirRobin', 'password': 'blue'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['username'], 'sirrobin')
def test_changing_cleaned_data_in_clean(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
data = self.cleaned_data
# Return a different dict. We have not changed self.cleaned_data.
return {
'username': data['username'].lower(),
'password': 'this_is_not_a_secret',
}
f = UserForm({'username': 'SirRobin', 'password': 'blue'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['username'], 'sirrobin')
def test_overriding_errorlist(self):
@python_2_unicode_compatible
class DivErrorList(ErrorList):
def __str__(self):
return self.as_divs()
def as_divs(self):
if not self:
return ''
return '<div class="errorlist">%s</div>' % ''.join('<div class="error">%s</div>' % force_text(e) for e in self)
class CommentForm(Form):
name = CharField(max_length=50, required=False)
email = EmailField()
comment = CharField()
data = dict(email='invalid')
f = CommentForm(data, auto_id=False, error_class=DivErrorList)
self.assertHTMLEqual(f.as_p(), """<p>Name: <input type="text" name="name" maxlength="50" /></p>
<div class="errorlist"><div class="error">Enter a valid email address.</div></div>
<p>Email: <input type="email" name="email" value="invalid" /></p>
<div class="errorlist"><div class="error">This field is required.</div></div>
<p>Comment: <input type="text" name="comment" /></p>""")
def test_multipart_encoded_form(self):
class FormWithoutFile(Form):
username = CharField()
class FormWithFile(Form):
username = CharField()
file = FileField()
class FormWithImage(Form):
image = ImageField()
self.assertFalse(FormWithoutFile().is_multipart())
self.assertTrue(FormWithFile().is_multipart())
self.assertTrue(FormWithImage().is_multipart())
def test_selectdatewidget_required(self):
class GetNotRequiredDate(Form):
mydate = DateField(widget=SelectDateWidget, required=False)
class GetRequiredDate(Form):
mydate = DateField(widget=SelectDateWidget, required=True)
self.assertFalse(GetNotRequiredDate().fields['mydate'].widget.is_required)
self.assertTrue(GetRequiredDate().fields['mydate'].widget.is_required)
@override_settings(USE_L10N=True)
class FormsExtraL10NTestCase(TestCase):
def setUp(self):
super(FormsExtraL10NTestCase, self).setUp()
translation.activate('nl')
def tearDown(self):
translation.deactivate()
super(FormsExtraL10NTestCase, self).tearDown()
def test_l10n(self):
w = SelectDateWidget(years=('2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016'))
self.assertEqual(w.value_from_datadict({'date_year': '2010', 'date_month': '8', 'date_day': '13'}, {}, 'date'), '13-08-2010')
self.assertHTMLEqual(w.render('date', '13-08-2010'), """<select name="date_day" id="id_date_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13" selected="selected">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="date_month" id="id_date_month">
<option value="0">---</option>
<option value="1">januari</option>
<option value="2">februari</option>
<option value="3">maart</option>
<option value="4">april</option>
<option value="5">mei</option>
<option value="6">juni</option>
<option value="7">juli</option>
<option value="8" selected="selected">augustus</option>
<option value="9">september</option>
<option value="10">oktober</option>
<option value="11">november</option>
<option value="12">december</option>
</select>
<select name="date_year" id="id_date_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>""")
# Years before 1900 work
w = SelectDateWidget(years=('1899',))
self.assertEqual(w.value_from_datadict({'date_year': '1899', 'date_month': '8', 'date_day': '13'}, {}, 'date'), '13-08-1899')
def test_l10n_date_changed(self):
"""
Ensure that DateField.has_changed() with SelectDateWidget works
correctly with a localized date format.
Refs #17165.
"""
# With Field.show_hidden_initial=False -----------------------
b = GetDate({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '1',
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertFalse(b.has_changed())
b = GetDate({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '2',
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertTrue(b.has_changed())
# With Field.show_hidden_initial=True ------------------------
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '1',
'initial-mydate': HiddenInput()._format_value(datetime.date(2008, 4, 1))
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertFalse(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput()._format_value(datetime.date(2008, 4, 1))
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertTrue(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput()._format_value(datetime.date(2008, 4, 1))
}, initial={'mydate': datetime.date(2008, 4, 22)})
self.assertTrue(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput()._format_value(datetime.date(2008, 4, 22))
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertFalse(b.has_changed())
def test_l10n_invalid_date_in(self):
# Invalid dates shouldn't be allowed
a = GetDate({'mydate_month': '2', 'mydate_day': '31', 'mydate_year': '2010'})
self.assertFalse(a.is_valid())
# 'Geef een geldige datum op.' = 'Enter a valid date.'
self.assertEqual(a.errors, {'mydate': ['Geef een geldige datum op.']})
def test_form_label_association(self):
# label tag is correctly associated with first rendered dropdown
a = GetDate({'mydate_month': '1', 'mydate_day': '1', 'mydate_year': '2010'})
self.assertIn('<label for="id_mydate_day">', a.as_p())
|
moto-timo/ironpython3 | refs/heads/master | Tests/test_syntax.py | 1 | #####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
from iptest.assert_util import *
from iptest.warning_util import warning_trapper
import sys
if not is_silverlight:
from iptest.process_util import *
year = 2005
month = 3
day = 16
hour = 14
minute = 53
second = 24
if 1900 < year < 2100 and 1 <= month <= 12 \
and 1 <= day <= 31 and 0 <= hour < 24 \
and 0 <= minute < 60 and 0 <= second < 60: # Looks like a valid date
pass
# Testing the (expr) support
x = 10
AreEqual(x, 10)
del x
try: y = x
except NameError: pass
else: Fail("x not deleted")
(x) = 20
AreEqual((x), 20)
del (x)
try: y = x
except NameError: pass
else: Fail("x not deleted")
# this is comment \
a=10
AreEqual(a, 10)
x = "Th\
\
e \
qu\
ick\
br\
ow\
\
n \
fo\
\
x\
ju\
mp\
s \
ove\
\
r \
th\
e l\
az\
\
y d\
og\
.\
\
\
\
12\
34\
567\
89\
0"
y="\
The\
q\
ui\
\
c\
k b\
\
r\
o\
w\
n\
\
fo\
x\
\
jum\
ps\
ov\
er \
t\
he\
la\
\
\
zy\
\
\
d\
og\
. 1\
2\
\
3\
\
\
\
\
4\
567\
\
8\
\
90\
"
AreEqual(x, y)
AreEqual("\101", "A")
x='\a\b\c\d\e\f\g\h\i\j\k\l\m\n\o\p\q\r\s\t\\u\v\w\y\z'
y='\\u0007\\u0008\\\\u0063\\\\u0064\\\\u0065\\u000C\\\\u0067\\\\u0068\\\\u0069\\\\u006a\\\\u006b\\\\u006c\\\\u006d\\u000A\\\\u006f\\\\u0070\\\\u0071\\u000D\\\\u0073\\u0009\\\\u0075\\u000B\\\\u0077\\\\u0079\\\\u007a'
Assert(x == y)
AreEqual(x, y)
for a,b in zip(x,y):
AreEqual(a,b)
Assert((10==20)==(20==10))
AreEqual(10==20, 20==10)
AreEqual(4e4-4, 4e4 - 4)
c = compile("071 + 1", "Error", "eval")
AssertError(SyntaxError, compile, "088 + 1", "Error", "eval")
AssertError(SyntaxError, compile, "099 + 1", "Error", "eval")
AssertError(SyntaxError, compile, """
try:
pass
""", "Error", "single")
AssertError(SyntaxError, compile, "x=10\ny=x.", "Error", "exec")
def run_compile_test(code, msg, lineno, skipCpy):
if skipCpy and not is_cli:
return
filename = "the file name"
try:
compile(code, filename, "exec")
except SyntaxError as e:
AreEqual(e.msg, msg)
AreEqual(e.lineno, lineno)
AreEqual(e.filename, filename)
else:
Assert(False, "Expected exception, got none")
if is_ironpython:
_yield_msg = "can't assign to yield expression"
else:
_yield_msg = "assignment to yield expression not possible"
compile_tests = [
("for x notin []:\n pass", "unexpected token 'notin'", 1, True),
("global 1", "unexpected token '1'", 1, True),
("x=10\nyield x\n", "'yield' outside function", 2, False),
("return\n", "'return' outside function", 1, False),
#("print >> 1 ,\n", "unexpected token '<eof>'", 1, False),
("def f(x=10, y):\n pass", "default value must be specified here", 1, True),
("def f(for):\n pass", "unexpected token 'for'", 1, True),
("f(3 = )", "expected name", 1, True),
("dict(a=1,a=2)", "duplicate keyword argument", 1, True),
("def f(a,a): pass", "duplicate argument 'a' in function definition", 1, False),
("def f((a,b),(c,b)): pass", "duplicate argument 'b' in function definition", 1, False),
("x = 10\nx = x[]", "unexpected token ']'", 2, True),
("break", "'break' outside loop", 1, False),
("if 1:\n\tbreak", "'break' outside loop", 2, False),
("if 1:\n\tx+y=22", "can't assign to operator", 2, False),
("if 1:\n\tdel f()", "can't delete function call", 2, False),
("def a(x):\n def b():\n print x\n del x", "can not delete variable 'x' referenced in nested scope", 2, True),
("if 1:\nfoo()\n", "expected an indented block", 2, False),
("'abc'.1", "invalid syntax", 1, True),
("'abc'.1L", "invalid syntax", 1, False),
("'abc'.1j", "invalid syntax", 1, True),
("'abc'.0xFFFF", "invalid syntax", 1, False),
("'abc' 1L", "invalid syntax", 1, True),
("'abc' 1.0", "invalid syntax", 1, True),
("'abc' 0j", "invalid syntax", 1, True),
("x = 'abc'\nx.1", "invalid syntax", 2, False),
("x = 'abc'\nx 1L", "invalid syntax", 2, False),
("x = 'abc'\nx 1.0", "invalid syntax", 2, False),
("x = 'abc'\nx 0j", "invalid syntax", 2, False),
('def f():\n del (yield 5)\n', "can't delete yield expression", 2, False),
('a,b,c += 1,2,3', "illegal expression for augmented assignment", 1, False),
('def f():\n a = yield 3 = yield 4', _yield_msg, 2, False),
('((yield a), 2,3) = (2,3,4)', "can't assign to yield expression", 1, False),
('(2,3) = (3,4)', "can't assign to literal", 1, False),
("def e():\n break", "'break' outside loop", 2, False),
("def g():\n for x in range(10):\n print x\n break\n", "'break' outside loop", 4, False),
("def g():\n for x in range(10):\n print x\n if True:\n break\n", "'break' outside loop", 5, False),
("def z():\n if True:\n break\n", "'break' outside loop", 3, False),
('from import abc', "invalid syntax", 1, False),
('() = 1', "can't assign to ()", 1, False),
("""for x in range(100):\n"""
""" try:\n"""
""" [1,2][3]\n"""
""" except IndexError:\n"""
""" pass\n"""
""" finally:\n"""
""" continue\n""", "'continue' not supported inside 'finally' clause", 7, False)
#CodePlex 15428
#("'abc'.", "invalid syntax", 1),
]
compile_tests.append(("None = 2", "cannot assign to None", 1, False))
# different error messages, ok
for test in compile_tests:
run_compile_test(*test)
AreEqual(float(repr(2.5)), 2.5)
AreEqual(eval("1, 2, 3,"), (1, 2, 3))
# eval validates end of input
AssertError(SyntaxError, compile, "1+2 1", "Error", "eval")
# empty test list in for expression
AssertError(SyntaxError, compile, "for x in : print x", "Error", "exec")
AssertError(SyntaxError, compile, "for x in : print x", "Error", "eval")
AssertError(SyntaxError, compile, "for x in : print x", "Error", "single")
# empty backquote
AssertError(SyntaxError, compile, "``", "Error", "exec")
AssertError(SyntaxError, compile, "``", "Error", "eval")
AssertError(SyntaxError, compile, "``", "Error", "single")
# empty assignment expressions
AssertError(SyntaxError, compile, "x = ", "Error", "exec")
AssertError(SyntaxError, compile, "x = ", "Error", "eval")
AssertError(SyntaxError, compile, "x = ", "Error", "single")
AssertError(SyntaxError, compile, "x = y = ", "Error", "exec")
AssertError(SyntaxError, compile, "x = y = ", "Error", "eval")
AssertError(SyntaxError, compile, "x = y = ", "Error", "single")
AssertError(SyntaxError, compile, " = ", "Error", "exec")
AssertError(SyntaxError, compile, " = ", "Error", "eval")
AssertError(SyntaxError, compile, " = ", "Error", "single")
AssertError(SyntaxError, compile, " = 4", "Error", "exec")
AssertError(SyntaxError, compile, " = 4", "Error", "eval")
AssertError(SyntaxError, compile, " = 4", "Error", "single")
AssertError(SyntaxError, compile, "x <= ", "Error", "exec")
AssertError(SyntaxError, compile, "x <= ", "Error", "eval")
AssertError(SyntaxError, compile, "x <= ", "Error", "single")
#indentation errors - BUG 864
AssertError(IndentationError, compile, "class C:\nx=2\n", "Error", "exec")
AssertError(IndentationError, compile, "class C:\n\n", "Error", "single")
#allow \f
compile('\f\f\f\f\fclass C:\f\f\f pass', 'ok', 'exec')
compile('\f\f\f\f\fclass C:\n\f\f\f print "hello"\n\f\f\f\f\f\f\f\f\f\f print "goodbye"', 'ok', 'exec')
compile('class C:\n\f\f\f print "hello"\n\f\f\f\f\f\f\f\f\f\f print "goodbye"', 'ok', 'exec')
compile('class \f\f\f\fC:\n\f print "hello"\n\f\f\f\f\f\f\f\f\f\f print "goodbye"', 'ok', 'exec')
# multiline expression passed to exec (positive test)
s = """
title = "The Cat"
Assert(title.istitle())
x = 2 + 5
AreEqual(x, 7)
"""
exec(s)
if is_cpython:
# this seems to be a CPython bug, Guido says:
# I usually append some extra newlines before passing a string to compile(). That's the usual work-around.
# There's probably a subtle bug in the tokenizer when reading from a string -- if you find it,
# please upload a patch to the tracker!
# http://mail.python.org/pipermail/python-dev/2009-May/089793.html
AssertError(SyntaxError, compile, "def f(a):\n\treturn a\n\t", "", "single")
AssertError(SyntaxError, compile, "def f(a):\n\treturn a\n\t", "", "single", 0x200)
# should work
s = "def f():\n\treturn 3"
compile(s, "<string>", "single")
AssertError(SyntaxError, compile, s, "<string>", "single", 0x200)
# Assignment to None and constant
def NoneAssign():
exec('None = 2')
def LiteralAssign():
exec("'2' = '3'")
AssertError(SyntaxError, NoneAssign)
AssertError(SyntaxError, LiteralAssign)
# beginning of the file handling
c = compile(" # some comment here \nprint 10", "", "exec")
c = compile(" \n# some comment\n \nprint 10", "", "exec")
AssertError(SyntaxError, compile, " x = 10\n\n", "", "exec")
AssertError(SyntaxError, compile, " \n #comment\n x = 10\n\n", "", "exec")
if sys.platform == 'cli':
c = compile("\\u0391 = 10\nif \\u0391 != 10: 1/0", "", "exec")
exec(c)
# from __future__ tests
AssertError(SyntaxError, compile, "def f():\n from __future__ import division", "", "exec")
AssertError(SyntaxError, compile, "'doc'\n'doc2'\nfrom __future__ import division", "", "exec")
# del x
AssertError(SyntaxError, compile, "def f():\n del x\n def g():\n return x\n", "", "exec")
AssertError(SyntaxError, compile, "def f():\n def g():\n return x\n del x\n", "", "exec")
AssertError(SyntaxError, compile, "def f():\n class g:\n def h(self):\n print x\n pass\n del x\n", "", "exec")
# add global to the picture
c = compile("def f():\n x=10\n del x\n def g():\n global x\n return x\n return g\nf()()\n", "", "exec")
AssertError(NameError, eval, c)
c = compile("def f():\n global x\n x=10\n del x\n def g():\n return x\n return g\nf()()\n", "", "exec")
AssertError(NameError, eval, c)
# global following definition test
# affected by bug# 1145
c = compile("def f():\n global a\n global a\n a = 1\n", "", "exec")
# unqualified exec in nested function
AssertError(SyntaxError, compile, "def f():\n x = 1\n def g():\n exec 'pass'\n print x", "", "exec")
# correct case - qualified exec in nested function
c = compile("def f():\n x = 10\n def g():\n exec 'pass' in {}\n print x\n", "", "exec")
# private names test
class C:
__x = 10
class ___:
__y = 20
class D:
__z = 30
AreEqual(C._C__x, 10)
AreEqual(C.___.__y, 20)
AreEqual(C.D._D__z, 30)
class B(object):
def method(self, __a):
return __a
AreEqual(B().method("__a passed in"), "__a passed in")
class B(object):
def method(self, xxx_todo_changeme):
(__a, ) = xxx_todo_changeme
return __a
AreEqual(B().method(("__a passed in", )), "__a passed in")
class B(object):
def __f(self):
pass
Assert('_B__f' in dir(B))
class B(object):
class __C(object): pass
Assert('_B__C' in dir(B))
class B(object):
x = lambda self, __a : __a
AreEqual(B.x(B(), _B__a='value'), 'value')
#Hit negative case of 'sublist' in http://www.python.org/doc/2.5.1/ref/grammar.txt.
AssertError(SyntaxError, compile, "def f((1)): pass", "", "exec")
#
# Make sure that augmented assignment also binds in the given scope
#
augassign_code = """
x = 10
def f():
x %s 10
f()
"""
def test_augassign_binding():
for op in ["+=", "-=", "**=", "*=", "//=", "/=", "%=", "<<=", ">>=", "&=", "|=", "^="]:
code = augassign_code % op
try:
exec(code, {}, {})
except:
pass
else:
Assert(False, "augassign binding test didn't raise exception")
return True
Assert(test_augassign_binding())
# tests for multiline compound statements
class MyException(Exception): pass
def test_multiline_compound_stmts():
tests = [
"if False: print 'In IF'\nelse: x = 2; raise MyException('expected')",
"if False: print 'In IF'\nelif True: x = 2;raise MyException('expected')\nelse: print 'In ELSE'",
"for i in (1,2): x = i\nelse: x = 5; raise MyException('expected')",
"while 5 in (1,2): print i\nelse:x = 2;raise MyException('expected')",
"try: x = 2\nexcept: print 'In EXCEPT'\nelse: x=20;raise MyException('expected')",
]
for test in tests:
try:
c = compile(test,"","exec")
exec(c)
except MyException:
pass
else:
Assert(False, "multiline_compound stmt test did not raise exception. test = " + test)
test_multiline_compound_stmts()
# Generators cannot have return statements with values in them. SyntaxError is thrown in those cases.
def test_generator_with_nonempty_return():
tests = [
"def f():\n return 42\n yield 3",
"def f():\n yield 42\n return 3",
"def f():\n yield 42\n return None",
"def f():\n if True:\n return 42\n yield 42",
"def f():\n try:\n return 42\n finally:\n yield 23"
]
for test in tests:
#Merlin 148614 - Change it to AssertErrorWithMessage once bug is fixed.
AssertErrorWithPartialMessage(SyntaxError, "'return' with argument inside generator", compile, test, "", "exec")
#Verify that when there is no return value error is not thrown.
def f():
yield 42
return
test_generator_with_nonempty_return()
# compile function which returns from finally, but does not yield from finally.
c = compile("def f():\n try:\n pass\n finally:\n return 1", "", "exec")
def ret_from_finally():
try:
pass
finally:
return 1
return 2
AreEqual(ret_from_finally(), 1)
def ret_from_finally2(x):
if x:
try:
pass
finally:
return 1
else:
return 2
AreEqual(ret_from_finally2(True), 1)
AreEqual(ret_from_finally2(False), 2)
def ret_from_finally_x(x):
try:
1/0
finally:
return x
AreEqual(ret_from_finally_x("Hi"), "Hi")
def ret_from_finally_x2():
try:
1/0
finally:
raise AssertionError("This one")
try:
ret_from_finally_x2()
except AssertionError as e:
AreEqual(e.args[0], "This one")
else:
Fail("Expected AssertionError, got none")
try:
pass
finally:
pass
# The try block can only have one default except clause, and it must be last
try_syntax_error_tests = [
"""
try:
pass
except:
pass
except Exception, e:
pass
""",
"""
try:
pass
except Exception, e:
pass
except:
pass
except:
pass
""",
"""
try:
pass
except:
pass
except:
pass
"""
]
for code in try_syntax_error_tests:
AssertError(SyntaxError, compile, code, "code", "exec")
def test_break_in_else_clause():
def f():
exec ('''
while i >= 0:
pass
else:
break''')
AssertError(SyntaxError, f)
#Just make sure these don't throw
print("^L")
temp = 7
print(temp)
print("No ^L's...")
# keep this at the end of the file, do not insert anything below this line
def endoffile():
return "Hi" # and some comment here
def test_syntaxerror_text():
method_missing_colon = (" def MethodTwo(self)\n", """
class HasASyntaxException:
def MethodOne(self):
print 'hello'
print 'world'
print 'again'
def MethodTwo(self)
print 'world'""")
if is_cpython: #http://ironpython.codeplex.com/workitem/28380
function_missing_colon1 = ("def f()\n", "def f()")
else:
function_missing_colon1 = ("def f()", "def f()")
function_missing_colon2 = ("def f()\n", "def f()\n")
if is_cpython: #http://ironpython.codeplex.com/workitem/28380
function_missing_colon3 = ("def f()\n", "def f()\r\n")
function_missing_colon4 = ("def f()\n", "def f()\r")
else:
function_missing_colon3 = ("def f()\r\n", "def f()\r\n")
function_missing_colon4 = ("def f()\r", "def f()\r")
function_missing_colon2a = ("def f()\n", "print 1\ndef f()\nprint 3")
if is_cpython: #http://ironpython.codeplex.com/workitem/28380
function_missing_colon3a = ("def f()\n", "print 1\ndef f()\r\nprint 3")
function_missing_colon4a = ("def f()\n", "print 1\ndef f()\rprint 3")
else:
function_missing_colon3a = ("def f()\r\n", "print 1\ndef f()\r\nprint 3")
function_missing_colon4a = ("def f()\rprint 3", "print 1\ndef f()\rprint 3")
tests = (
method_missing_colon,
#function_missing_body,
function_missing_colon1,
function_missing_colon2,
function_missing_colon3,
function_missing_colon4,
function_missing_colon2a,
function_missing_colon3a,
function_missing_colon4a,
)
for expectedText, testCase in tests:
try:
exec(testCase)
except SyntaxError as e:
AreEqual(e.text, expectedText)
def test_error_parameters():
tests = [#("if 1:", 0x200, ('unexpected EOF while parsing', ('dummy', 1, 5, 'if 1:')) ),
("if 1:\n", 0x200, ('unexpected EOF while parsing', ('dummy', 1, 6, 'if 1:\n')) ),
#("if 1:", 0x000, ('unexpected EOF while parsing', ('dummy', 1, 5, 'if 1:')) ),
("if 1:\n", 0x000, ('unexpected EOF while parsing', ('dummy', 1, 6, 'if 1:\n')) ),
("if 1:\n\n", 0x200, ('expected an indented block', ('dummy', 2, 1, '\n')) ),
("if 1:\n\n", 0x000, ('expected an indented block', ('dummy', 2, 1, '\n')) ),
#("if 1:\n if 1:", 0x200, ('expected an indented block', ('dummy', 2, 7, ' if 1:')) ),
("if 1:\n if 1:\n", 0x200, ('expected an indented block', ('dummy', 2, 8, ' if 1:\n')) ),
#("if 1:\n if 1:", 0x000, ('expected an indented block', ('dummy', 2, 7, ' if 1:')) ),
("if 1:\n if 1:\n", 0x000, ('expected an indented block', ('dummy', 2, 8, ' if 1:\n')) ),
("if 1:\n if 1:\n\n", 0x200, ('expected an indented block', ('dummy', 3, 1, '\n')) ),
("if 1:\n if 1:\n\n", 0x000, ('expected an indented block', ('dummy', 3, 1, '\n')) ),
("class MyClass(object):\n\tabc = 42\n\tdef __new__(cls):\n", 0x200, ('expected an indented block', ('dummy', 3, 19, '\tdef __new__(cls):\n')) ),
("class MyClass(object):\n\tabc = 42\n\tdef __new__(cls):\n", 0x000, ('expected an indented block', ('dummy', 3, 19, '\tdef __new__(cls):\n')) ),
("def Foo():\n\n # comment\n\n Something = -1\n\n\n\n ", 0x000, ('unindent does not match any outer indentation level', ('dummy', 9, 2, ' '))),
("def Foo():\n\n # comment\n\n Something = -1\n\n\n\n ", 0x200, ('unindent does not match any outer indentation level', ('dummy', 9, 2, ' '))),
("def Foo():\n\n # comment\n\n Something = -1\n\n\n\n ", 0x000, ('unindent does not match any outer indentation level', ('dummy', 9, 3, ' '))),
("def Foo():\n\n # comment\n\n Something = -1\n\n\n\n ", 0x200, ('unindent does not match any outer indentation level', ('dummy', 9, 3, ' '))),
]
for input, flags, res in tests:
#print repr(input), flags
try:
code3 = compile(input, "dummy", "single", flags, 1)
AssertUnreachable()
except SyntaxError as err:
AreEqual(err.args, res)
try:
exec("""
def f():
x = 3
y = 5""")
AssertUnreachable()
except IndentationError as e:
AreEqual(e.lineno, 2)
@skip("win32", "silverlight") # no encoding.Default
def test_parser_recovery():
# bunch of test infrastructure...
import clr
clr.AddReference('IronPython')
clr.AddReference('Microsoft.Scripting')
clr.AddReference('Microsoft.Dynamic')
from Microsoft.Scripting import (
TextContentProvider, SourceCodeKind, SourceUnit, ErrorSink,
SourceCodeReader
)
from Microsoft.Scripting.Runtime import CompilerContext
from IronPython import PythonOptions
from IronPython.Compiler import Parser, Tokenizer, PythonCompilerOptions, Ast
from System.IO import StringReader
from System.Text import Encoding
class MyErrorSink(ErrorSink):
def __init__(self):
self.Errors = []
def Add(self, *args):
if type(args[0]) is str:
self.AddWithPath(*args)
else:
self.AddWithSourceUnit(*args)
def AddWithPath(self, message, path, code, line, span, error, severity):
err = (
message,
path,
span,
error
)
self.Errors.append(err)
def AddWithSourceUnit(self, source, message, span, errorCode, severity):
err = (
message,
source.Path,
span,
errorCode
)
self.Errors.append(err)
class MyTextContentProvider(TextContentProvider):
def __init__(self, text):
self.text = text
def GetReader(self):
return SourceCodeReader(StringReader(self.text), Encoding.Default)
def parse_text(text):
errorSink = MyErrorSink()
sourceUnit = SourceUnit(
clr.GetCurrentRuntime().GetLanguageByName('python'),
MyTextContentProvider(text),
'foo',
SourceCodeKind.File
)
parser = Parser.CreateParser(
CompilerContext(sourceUnit, PythonCompilerOptions(), errorSink),
PythonOptions()
)
parser.ParseFile(True)
return errorSink
def TestErrors(text, errors):
res = parse_text(text)
AreEqual(len(res.Errors), len(errors))
for curErr, expectedMsg in zip(res.Errors, errors):
AreEqual(curErr[0], expectedMsg)
def PrintErrors(text):
"""helper for creating new tests"""
errors = parse_text(text)
print()
for err in errors.Errors:
print(err)
TestErrors("""class
def x(self):
pass""", ["unexpected token '<newline>'"])
TestErrors("""class x
def x(self):
pass
""", ["unexpected token '<newline>'"])
TestErrors("""class x(
def x(self):
pass""", ["unexpected token 'def'"])
TestErrors("""class X:
if x:
def x(): pass""", ['expected an indented block'])
TestErrors("""class X:
if x is None:
x =
def x(self): pass""", ["unexpected token '<newline>'"])
TestErrors("""class X:
def f(
def g(self): pass""", ["unexpected token 'def'"])
TestErrors("""class X:
def f(*
def g(self): pass""", ["unexpected token 'def'"])
TestErrors("""class X:
def f(**
def g(self): pass""", ["unexpected token 'def'"])
TestErrors("""class X:
def f(*a, **
def g(self): pass""", ["unexpected token 'def'"])
TestErrors("""f() += 1""", ["illegal expression for augmented assignment"])
def test_syntax_warnings():
# syntax error warnings are outputted using warnings.showwarning. our own warning trapper therefore
# doesn't see them. So we trap stderr here instead. We could use CPython's warning trapper if we
# checked for the presence of the stdlib.
with stderr_trapper() as trapper:
compile("def f():\n a = 1\n global a\n", "", "exec")
AreEqual(trapper.messages, [":3: SyntaxWarning: name 'a' is assigned to before global declaration"])
with stderr_trapper() as trapper:
compile("def f():\n def a(): pass\n global a\n", "", "exec")
AreEqual(trapper.messages, [":3: SyntaxWarning: name 'a' is assigned to before global declaration"])
with stderr_trapper() as trapper:
compile("def f():\n for a in []: pass\n global a\n", "", "exec")
AreEqual(trapper.messages, [":3: SyntaxWarning: name 'a' is assigned to before global declaration"])
with stderr_trapper() as trapper:
compile("def f():\n global a\n a = 1\n global a\n", "", "exec")
AreEqual(trapper.messages, [":4: SyntaxWarning: name 'a' is assigned to before global declaration"])
with stderr_trapper() as trapper:
compile("def f():\n print a\n global a\n", "", "exec")
AreEqual(trapper.messages, [":3: SyntaxWarning: name 'a' is used prior to global declaration"])
with stderr_trapper() as trapper:
compile("def f():\n a = 1\n global a\n global a\n a = 1", "", "exec")
AreEqual(trapper.messages,
[":3: SyntaxWarning: name 'a' is assigned to before global declaration",
":4: SyntaxWarning: name 'a' is assigned to before global declaration"])
with stderr_trapper() as trapper:
compile("x = 10\nglobal x\n", "", "exec")
AreEqual(trapper.messages, [":2: SyntaxWarning: name 'x' is assigned to before global declaration"])
#--MAIN------------------------------------------------------------------------
run_test(__name__)
|
googleapis/googleapis-gen | refs/heads/master | google/cloud/bigquery/reservation/v1beta1/bigquery-reservation-v1beta1-py/google/cloud/bigquery/reservation_v1beta1/services/reservation_service/transports/__init__.py | 2 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import ReservationServiceTransport
from .grpc import ReservationServiceGrpcTransport
from .grpc_asyncio import ReservationServiceGrpcAsyncIOTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[ReservationServiceTransport]]
_transport_registry['grpc'] = ReservationServiceGrpcTransport
_transport_registry['grpc_asyncio'] = ReservationServiceGrpcAsyncIOTransport
__all__ = (
'ReservationServiceTransport',
'ReservationServiceGrpcTransport',
'ReservationServiceGrpcAsyncIOTransport',
)
|
dato-code/numpy | refs/heads/master | numpy/core/machar.py | 71 | """
Machine arithmetics - determine the parameters of the
floating-point arithmetic system
Author: Pearu Peterson, September 2003
"""
from __future__ import division, absolute_import, print_function
__all__ = ['MachAr']
from numpy.core.fromnumeric import any
from numpy.core.numeric import errstate
# Need to speed this up...especially for longfloat
class MachAr(object):
"""
Diagnosing machine parameters.
Attributes
----------
ibeta : int
Radix in which numbers are represented.
it : int
Number of base-`ibeta` digits in the floating point mantissa M.
machep : int
Exponent of the smallest (most negative) power of `ibeta` that,
added to 1.0, gives something different from 1.0
eps : float
Floating-point number ``beta**machep`` (floating point precision)
negep : int
Exponent of the smallest power of `ibeta` that, substracted
from 1.0, gives something different from 1.0.
epsneg : float
Floating-point number ``beta**negep``.
iexp : int
Number of bits in the exponent (including its sign and bias).
minexp : int
Smallest (most negative) power of `ibeta` consistent with there
being no leading zeros in the mantissa.
xmin : float
Floating point number ``beta**minexp`` (the smallest [in
magnitude] usable floating value).
maxexp : int
Smallest (positive) power of `ibeta` that causes overflow.
xmax : float
``(1-epsneg) * beta**maxexp`` (the largest [in magnitude]
usable floating value).
irnd : int
In ``range(6)``, information on what kind of rounding is done
in addition, and on how underflow is handled.
ngrd : int
Number of 'guard digits' used when truncating the product
of two mantissas to fit the representation.
epsilon : float
Same as `eps`.
tiny : float
Same as `xmin`.
huge : float
Same as `xmax`.
precision : float
``- int(-log10(eps))``
resolution : float
``- 10**(-precision)``
Parameters
----------
float_conv : function, optional
Function that converts an integer or integer array to a float
or float array. Default is `float`.
int_conv : function, optional
Function that converts a float or float array to an integer or
integer array. Default is `int`.
float_to_float : function, optional
Function that converts a float array to float. Default is `float`.
Note that this does not seem to do anything useful in the current
implementation.
float_to_str : function, optional
Function that converts a single float to a string. Default is
``lambda v:'%24.16e' %v``.
title : str, optional
Title that is printed in the string representation of `MachAr`.
See Also
--------
finfo : Machine limits for floating point types.
iinfo : Machine limits for integer types.
References
----------
.. [1] Press, Teukolsky, Vetterling and Flannery,
"Numerical Recipes in C++," 2nd ed,
Cambridge University Press, 2002, p. 31.
"""
def __init__(self, float_conv=float,int_conv=int,
float_to_float=float,
float_to_str = lambda v:'%24.16e' % v,
title = 'Python floating point number'):
"""
float_conv - convert integer to float (array)
int_conv - convert float (array) to integer
float_to_float - convert float array to float
float_to_str - convert array float to str
title - description of used floating point numbers
"""
# We ignore all errors here because we are purposely triggering
# underflow to detect the properties of the runninng arch.
with errstate(under='ignore'):
self._do_init(float_conv, int_conv, float_to_float, float_to_str, title)
def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title):
max_iterN = 10000
msg = "Did not converge after %d tries with %s"
one = float_conv(1)
two = one + one
zero = one - one
# Do we really need to do this? Aren't they 2 and 2.0?
# Determine ibeta and beta
a = one
for _ in range(max_iterN):
a = a + a
temp = a + one
temp1 = temp - a
if any(temp1 - one != zero):
break
else:
raise RuntimeError(msg % (_, one.dtype))
b = one
for _ in range(max_iterN):
b = b + b
temp = a + b
itemp = int_conv(temp-a)
if any(itemp != 0):
break
else:
raise RuntimeError(msg % (_, one.dtype))
ibeta = itemp
beta = float_conv(ibeta)
# Determine it and irnd
it = -1
b = one
for _ in range(max_iterN):
it = it + 1
b = b * beta
temp = b + one
temp1 = temp - b
if any(temp1 - one != zero):
break
else:
raise RuntimeError(msg % (_, one.dtype))
betah = beta / two
a = one
for _ in range(max_iterN):
a = a + a
temp = a + one
temp1 = temp - a
if any(temp1 - one != zero):
break
else:
raise RuntimeError(msg % (_, one.dtype))
temp = a + betah
irnd = 0
if any(temp-a != zero):
irnd = 1
tempa = a + beta
temp = tempa + betah
if irnd==0 and any(temp-tempa != zero):
irnd = 2
# Determine negep and epsneg
negep = it + 3
betain = one / beta
a = one
for i in range(negep):
a = a * betain
b = a
for _ in range(max_iterN):
temp = one - a
if any(temp-one != zero):
break
a = a * beta
negep = negep - 1
# Prevent infinite loop on PPC with gcc 4.0:
if negep < 0:
raise RuntimeError("could not determine machine tolerance "
"for 'negep', locals() -> %s" % (locals()))
else:
raise RuntimeError(msg % (_, one.dtype))
negep = -negep
epsneg = a
# Determine machep and eps
machep = - it - 3
a = b
for _ in range(max_iterN):
temp = one + a
if any(temp-one != zero):
break
a = a * beta
machep = machep + 1
else:
raise RuntimeError(msg % (_, one.dtype))
eps = a
# Determine ngrd
ngrd = 0
temp = one + eps
if irnd==0 and any(temp*one - one != zero):
ngrd = 1
# Determine iexp
i = 0
k = 1
z = betain
t = one + eps
nxres = 0
for _ in range(max_iterN):
y = z
z = y*y
a = z*one # Check here for underflow
temp = z*t
if any(a+a == zero) or any(abs(z)>=y):
break
temp1 = temp * betain
if any(temp1*beta == z):
break
i = i + 1
k = k + k
else:
raise RuntimeError(msg % (_, one.dtype))
if ibeta != 10:
iexp = i + 1
mx = k + k
else:
iexp = 2
iz = ibeta
while k >= iz:
iz = iz * ibeta
iexp = iexp + 1
mx = iz + iz - 1
# Determine minexp and xmin
for _ in range(max_iterN):
xmin = y
y = y * betain
a = y * one
temp = y * t
if any(a+a != zero) and any(abs(y) < xmin):
k = k + 1
temp1 = temp * betain
if any(temp1*beta == y) and any(temp != y):
nxres = 3
xmin = y
break
else:
break
else:
raise RuntimeError(msg % (_, one.dtype))
minexp = -k
# Determine maxexp, xmax
if mx <= k + k - 3 and ibeta != 10:
mx = mx + mx
iexp = iexp + 1
maxexp = mx + minexp
irnd = irnd + nxres
if irnd >= 2:
maxexp = maxexp - 2
i = maxexp + minexp
if ibeta == 2 and not i:
maxexp = maxexp - 1
if i > 20:
maxexp = maxexp - 1
if any(a != y):
maxexp = maxexp - 2
xmax = one - epsneg
if any(xmax*one != xmax):
xmax = one - beta*epsneg
xmax = xmax / (xmin*beta*beta*beta)
i = maxexp + minexp + 3
for j in range(i):
if ibeta==2:
xmax = xmax + xmax
else:
xmax = xmax * beta
self.ibeta = ibeta
self.it = it
self.negep = negep
self.epsneg = float_to_float(epsneg)
self._str_epsneg = float_to_str(epsneg)
self.machep = machep
self.eps = float_to_float(eps)
self._str_eps = float_to_str(eps)
self.ngrd = ngrd
self.iexp = iexp
self.minexp = minexp
self.xmin = float_to_float(xmin)
self._str_xmin = float_to_str(xmin)
self.maxexp = maxexp
self.xmax = float_to_float(xmax)
self._str_xmax = float_to_str(xmax)
self.irnd = irnd
self.title = title
# Commonly used parameters
self.epsilon = self.eps
self.tiny = self.xmin
self.huge = self.xmax
import math
self.precision = int(-math.log10(float_to_float(self.eps)))
ten = two + two + two + two + two
resolution = ten ** (-self.precision)
self.resolution = float_to_float(resolution)
self._str_resolution = float_to_str(resolution)
def __str__(self):
return '''\
Machine parameters for %(title)s
---------------------------------------------------------------------
ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s
machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)
negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)
minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)
maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)
---------------------------------------------------------------------
''' % self.__dict__
if __name__ == '__main__':
print(MachAr())
|
WIPACrepo/iceprod | refs/heads/master | tests/server/daemon_test.py | 1 | """
Test script for daemon
"""
from __future__ import absolute_import, division, print_function
from tests.util import unittest_reporter, glob_tests
import logging
logger = logging.getLogger('daemon_test')
import os
import sys
import time
import random
from datetime import datetime,timedelta
from contextlib import contextmanager
from functools import partial
import shutil
import tempfile
import subprocess
import signal
import stat
import multiprocessing
try:
import cPickle as pickle
except:
import pickle
import unittest
from iceprod.server import daemon
class daemon_test(unittest.TestCase):
def setUp(self):
super(daemon_test,self).setUp()
self.test_dir = tempfile.mkdtemp(dir=os.getcwd())
def tearDown(self):
shutil.rmtree(self.test_dir)
super(daemon_test,self).tearDown()
@unittest_reporter
def test_01_Daemon(self):
"""Test daemon"""
def main(cfgfile,cfgdata):
message_queue = multiprocessing.Queue()
def handler2(signum, frame):
logging.info('Signal handler2 called with signal %s' % signum)
logging.info('Stopping...')
message_queue.put('stop')
def handler3(signum, frame):
logging.info('Signal handler3 called with signal %s' % signum)
logging.info('Killing...')
message_queue.put('kill')
time.sleep(2)
sys.exit(1)
signal.signal(signal.SIGINT, handler2)
signal.signal(signal.SIGQUIT, handler3)
with open('test','w') as f:
f.write('test')
while True:
try:
m = message_queue.get(True,10)
except:
pass
if m == 'stop':
break
elif m == 'kill':
break
pidfile = os.path.expanduser(os.path.expandvars(
os.path.join(self.test_dir,'pidfile')))
chdir = os.path.expanduser(os.path.expandvars(self.test_dir))
umask = 0o077
stdout = os.path.join(self.test_dir,'stdout')
stderr = os.path.join(self.test_dir,'stderr')
d = daemon.Daemon(pidfile,partial(main,'cfgfile','cfgdata'),
chdir=chdir,
umask=umask,
stdout=stdout,
stderr=stderr)
multiprocessing.Process(target=d.start).start()
time.sleep(1)
if not os.path.exists(pidfile):
raise Exception('pidfile creation failed')
if not os.path.exists(os.path.join(chdir,'test')):
raise Exception('chdir failed')
st = os.stat(os.path.join(chdir,'test'))
if oct(stat.S_IMODE(st[stat.ST_MODE])) != '0o600':
logger.info('mode: %r',oct(stat.S_IMODE(st[stat.ST_MODE])))
raise Exception('umask failed')
d.stop()
time.sleep(1)
if os.path.exists(pidfile):
raise Exception('pidfile still exists - stop failed')
multiprocessing.Process(target=d.start).start()
time.sleep(1)
if not os.path.exists(pidfile):
raise Exception('pidfile creation failed for start2')
multiprocessing.Process(target=d.restart).start()
time.sleep(2)
if not os.path.exists(pidfile):
raise Exception('pidfile creation failed for restart')
d.kill()
time.sleep(3)
if os.path.exists(pidfile):
raise Exception('pidfile still exists - kill failed')
def load_tests(loader, tests, pattern):
suite = unittest.TestSuite()
alltests = glob_tests(loader.getTestCaseNames(daemon_test))
suite.addTests(loader.loadTestsFromNames(alltests,daemon_test))
return suite
|
pli3/enigma2-git | refs/heads/master | lib/python/Components/Timezones.py | 2 | import xml.etree.cElementTree
from os import environ, unlink, symlink
import time
class Timezones:
def __init__(self):
self.timezones = []
self.readTimezonesFromFile()
def readTimezonesFromFile(self):
try:
root = xml.etree.cElementTree.parse('/etc/timezone.xml').getroot()
for zone in root.findall("zone"):
self.timezones.append((zone.get('name',""), zone.get('zone',"")))
except:
pass
if len(self.timezones) == 0:
self.timezones = [("UTC", "UTC")]
def activateTimezone(self, index):
if len(self.timezones) <= index:
return
environ['TZ'] = self.timezones[index][1]
try:
unlink("/etc/localtime")
except OSError:
pass
try:
symlink("/usr/share/zoneinfo/%s" %(self.timezones[index][1]), "/etc/localtime")
except OSError:
pass
try:
time.tzset()
except:
from enigma import e_tzset
e_tzset()
def getTimezoneList(self):
return [ str(x[0]) for x in self.timezones ]
def getDefaultTimezone(self):
# TODO return something more useful - depending on country-settings?
# iq - [
# t = "(GMT+01:00) Amsterdam, Berlin, Bern, Rome, Vienna"
t = "(GMT) Greenwich Mean Time : Dublin, Lisbon, London"
# ]
for (a,b) in self.timezones:
if a == t:
return a
return self.timezones[0][0]
timezones = Timezones()
|
robbiet480/python-social-auth | refs/heads/master | examples/django_example/example/app/pipeline.py | 112 | from django.shortcuts import redirect
from social.pipeline.partial import partial
@partial
def require_email(strategy, details, user=None, is_new=False, *args, **kwargs):
if kwargs.get('ajax') or user and user.email:
return
elif is_new and not details.get('email'):
email = strategy.request_data().get('email')
if email:
details['email'] = email
else:
return redirect('require_email')
|
DanielFloripa/GreenHop | refs/heads/master | Xbee/bibliotecas/pyserial-2.7/examples/wxTerminal.py | 22 | #!/usr/bin/env python
# generated by wxGlade 0.3.1 on Fri Oct 03 23:23:45 2003
#from wxPython.wx import *
import wx
import wxSerialConfigDialog
import serial
import threading
#----------------------------------------------------------------------
# Create an own event type, so that GUI updates can be delegated
# this is required as on some platforms only the main thread can
# access the GUI without crashing. wxMutexGuiEnter/wxMutexGuiLeave
# could be used too, but an event is more elegant.
SERIALRX = wx.NewEventType()
# bind to serial data receive events
EVT_SERIALRX = wx.PyEventBinder(SERIALRX, 0)
class SerialRxEvent(wx.PyCommandEvent):
eventType = SERIALRX
def __init__(self, windowID, data):
wx.PyCommandEvent.__init__(self, self.eventType, windowID)
self.data = data
def Clone(self):
self.__class__(self.GetId(), self.data)
#----------------------------------------------------------------------
ID_CLEAR = wx.NewId()
ID_SAVEAS = wx.NewId()
ID_SETTINGS = wx.NewId()
ID_TERM = wx.NewId()
ID_EXIT = wx.NewId()
NEWLINE_CR = 0
NEWLINE_LF = 1
NEWLINE_CRLF = 2
class TerminalSetup:
"""Placeholder for various terminal settings. Used to pass the
options to the TerminalSettingsDialog."""
def __init__(self):
self.echo = False
self.unprintable = False
self.newline = NEWLINE_CRLF
class TerminalSettingsDialog(wx.Dialog):
"""Simple dialog with common terminal settings like echo, newline mode."""
def __init__(self, *args, **kwds):
self.settings = kwds['settings']
del kwds['settings']
# begin wxGlade: TerminalSettingsDialog.__init__
kwds["style"] = wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwds)
self.checkbox_echo = wx.CheckBox(self, -1, "Local Echo")
self.checkbox_unprintable = wx.CheckBox(self, -1, "Show unprintable characters")
self.radio_box_newline = wx.RadioBox(self, -1, "Newline Handling", choices=["CR only", "LF only", "CR+LF"], majorDimension=0, style=wx.RA_SPECIFY_ROWS)
self.button_ok = wx.Button(self, -1, "OK")
self.button_cancel = wx.Button(self, -1, "Cancel")
self.__set_properties()
self.__do_layout()
# end wxGlade
self.__attach_events()
self.checkbox_echo.SetValue(self.settings.echo)
self.checkbox_unprintable.SetValue(self.settings.unprintable)
self.radio_box_newline.SetSelection(self.settings.newline)
def __set_properties(self):
# begin wxGlade: TerminalSettingsDialog.__set_properties
self.SetTitle("Terminal Settings")
self.radio_box_newline.SetSelection(0)
self.button_ok.SetDefault()
# end wxGlade
def __do_layout(self):
# begin wxGlade: TerminalSettingsDialog.__do_layout
sizer_2 = wx.BoxSizer(wx.VERTICAL)
sizer_3 = wx.BoxSizer(wx.HORIZONTAL)
sizer_4 = wx.StaticBoxSizer(wx.StaticBox(self, -1, "Input/Output"), wx.VERTICAL)
sizer_4.Add(self.checkbox_echo, 0, wx.ALL, 4)
sizer_4.Add(self.checkbox_unprintable, 0, wx.ALL, 4)
sizer_4.Add(self.radio_box_newline, 0, 0, 0)
sizer_2.Add(sizer_4, 0, wx.EXPAND, 0)
sizer_3.Add(self.button_ok, 0, 0, 0)
sizer_3.Add(self.button_cancel, 0, 0, 0)
sizer_2.Add(sizer_3, 0, wx.ALL|wx.ALIGN_RIGHT, 4)
self.SetAutoLayout(1)
self.SetSizer(sizer_2)
sizer_2.Fit(self)
sizer_2.SetSizeHints(self)
self.Layout()
# end wxGlade
def __attach_events(self):
self.Bind(wx.EVT_BUTTON, self.OnOK, id = self.button_ok.GetId())
self.Bind(wx.EVT_BUTTON, self.OnCancel, id = self.button_cancel.GetId())
def OnOK(self, events):
"""Update data wil new values and close dialog."""
self.settings.echo = self.checkbox_echo.GetValue()
self.settings.unprintable = self.checkbox_unprintable.GetValue()
self.settings.newline = self.radio_box_newline.GetSelection()
self.EndModal(wx.ID_OK)
def OnCancel(self, events):
"""Do not update data but close dialog."""
self.EndModal(wx.ID_CANCEL)
# end of class TerminalSettingsDialog
class TerminalFrame(wx.Frame):
"""Simple terminal program for wxPython"""
def __init__(self, *args, **kwds):
self.serial = serial.Serial()
self.serial.timeout = 0.5 #make sure that the alive event can be checked from time to time
self.settings = TerminalSetup() #placeholder for the settings
self.thread = None
self.alive = threading.Event()
# begin wxGlade: TerminalFrame.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.text_ctrl_output = wx.TextCtrl(self, -1, "", style=wx.TE_MULTILINE|wx.TE_READONLY)
# Menu Bar
self.frame_terminal_menubar = wx.MenuBar()
self.SetMenuBar(self.frame_terminal_menubar)
wxglade_tmp_menu = wx.Menu()
wxglade_tmp_menu.Append(ID_CLEAR, "&Clear", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(ID_SAVEAS, "&Save Text As...", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.AppendSeparator()
wxglade_tmp_menu.Append(ID_SETTINGS, "&Port Settings...", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(ID_TERM, "&Terminal Settings...", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.AppendSeparator()
wxglade_tmp_menu.Append(ID_EXIT, "&Exit", "", wx.ITEM_NORMAL)
self.frame_terminal_menubar.Append(wxglade_tmp_menu, "&File")
# Menu Bar end
self.__set_properties()
self.__do_layout()
# end wxGlade
self.__attach_events() #register events
self.OnPortSettings(None) #call setup dialog on startup, opens port
if not self.alive.isSet():
self.Close()
def StartThread(self):
"""Start the receiver thread"""
self.thread = threading.Thread(target=self.ComPortThread)
self.thread.setDaemon(1)
self.alive.set()
self.thread.start()
def StopThread(self):
"""Stop the receiver thread, wait util it's finished."""
if self.thread is not None:
self.alive.clear() #clear alive event for thread
self.thread.join() #wait until thread has finished
self.thread = None
def __set_properties(self):
# begin wxGlade: TerminalFrame.__set_properties
self.SetTitle("Serial Terminal")
self.SetSize((546, 383))
# end wxGlade
def __do_layout(self):
# begin wxGlade: TerminalFrame.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_1.Add(self.text_ctrl_output, 1, wx.EXPAND, 0)
self.SetAutoLayout(1)
self.SetSizer(sizer_1)
self.Layout()
# end wxGlade
def __attach_events(self):
#register events at the controls
self.Bind(wx.EVT_MENU, self.OnClear, id = ID_CLEAR)
self.Bind(wx.EVT_MENU, self.OnSaveAs, id = ID_SAVEAS)
self.Bind(wx.EVT_MENU, self.OnExit, id = ID_EXIT)
self.Bind(wx.EVT_MENU, self.OnPortSettings, id = ID_SETTINGS)
self.Bind(wx.EVT_MENU, self.OnTermSettings, id = ID_TERM)
self.text_ctrl_output.Bind(wx.EVT_CHAR, self.OnKey)
self.Bind(EVT_SERIALRX, self.OnSerialRead)
self.Bind(wx.EVT_CLOSE, self.OnClose)
def OnExit(self, event):
"""Menu point Exit"""
self.Close()
def OnClose(self, event):
"""Called on application shutdown."""
self.StopThread() #stop reader thread
self.serial.close() #cleanup
self.Destroy() #close windows, exit app
def OnSaveAs(self, event):
"""Save contents of output window."""
filename = None
dlg = wx.FileDialog(None, "Save Text As...", ".", "", "Text File|*.txt|All Files|*", wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
dlg.Destroy()
if filename is not None:
f = file(filename, 'w')
text = self.text_ctrl_output.GetValue()
if type(text) == unicode:
text = text.encode("latin1") #hm, is that a good asumption?
f.write(text)
f.close()
def OnClear(self, event):
"""Clear contents of output window."""
self.text_ctrl_output.Clear()
def OnPortSettings(self, event=None):
"""Show the portsettings dialog. The reader thread is stopped for the
settings change."""
if event is not None: #will be none when called on startup
self.StopThread()
self.serial.close()
ok = False
while not ok:
dialog_serial_cfg = wxSerialConfigDialog.SerialConfigDialog(None, -1, "",
show=wxSerialConfigDialog.SHOW_BAUDRATE|wxSerialConfigDialog.SHOW_FORMAT|wxSerialConfigDialog.SHOW_FLOW,
serial=self.serial
)
result = dialog_serial_cfg.ShowModal()
dialog_serial_cfg.Destroy()
#open port if not called on startup, open it on startup and OK too
if result == wx.ID_OK or event is not None:
try:
self.serial.open()
except serial.SerialException, e:
dlg = wx.MessageDialog(None, str(e), "Serial Port Error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
else:
self.StartThread()
self.SetTitle("Serial Terminal on %s [%s, %s%s%s%s%s]" % (
self.serial.portstr,
self.serial.baudrate,
self.serial.bytesize,
self.serial.parity,
self.serial.stopbits,
self.serial.rtscts and ' RTS/CTS' or '',
self.serial.xonxoff and ' Xon/Xoff' or '',
)
)
ok = True
else:
#on startup, dialog aborted
self.alive.clear()
ok = True
def OnTermSettings(self, event):
"""Menu point Terminal Settings. Show the settings dialog
with the current terminal settings"""
dialog = TerminalSettingsDialog(None, -1, "", settings=self.settings)
result = dialog.ShowModal()
dialog.Destroy()
def OnKey(self, event):
"""Key event handler. if the key is in the ASCII range, write it to the serial port.
Newline handling and local echo is also done here."""
code = event.GetKeyCode()
if code < 256: #is it printable?
if code == 13: #is it a newline? (check for CR which is the RETURN key)
if self.settings.echo: #do echo if needed
self.text_ctrl_output.AppendText('\n')
if self.settings.newline == NEWLINE_CR:
self.serial.write('\r') #send CR
elif self.settings.newline == NEWLINE_LF:
self.serial.write('\n') #send LF
elif self.settings.newline == NEWLINE_CRLF:
self.serial.write('\r\n') #send CR+LF
else:
char = chr(code)
if self.settings.echo: #do echo if needed
self.text_ctrl_output.WriteText(char)
self.serial.write(char) #send the charcater
else:
print "Extra Key:", code
def OnSerialRead(self, event):
"""Handle input from the serial port."""
text = event.data
if self.settings.unprintable:
text = ''.join([(c >= ' ') and c or '<%d>' % ord(c) for c in text])
self.text_ctrl_output.AppendText(text)
def ComPortThread(self):
"""Thread that handles the incomming traffic. Does the basic input
transformation (newlines) and generates an SerialRxEvent"""
while self.alive.isSet(): #loop while alive event is true
text = self.serial.read(1) #read one, with timout
if text: #check if not timeout
n = self.serial.inWaiting() #look if there is more to read
if n:
text = text + self.serial.read(n) #get it
#newline transformation
if self.settings.newline == NEWLINE_CR:
text = text.replace('\r', '\n')
elif self.settings.newline == NEWLINE_LF:
pass
elif self.settings.newline == NEWLINE_CRLF:
text = text.replace('\r\n', '\n')
event = SerialRxEvent(self.GetId(), text)
self.GetEventHandler().AddPendingEvent(event)
#~ self.OnSerialRead(text) #output text in window
# end of class TerminalFrame
class MyApp(wx.App):
def OnInit(self):
wx.InitAllImageHandlers()
frame_terminal = TerminalFrame(None, -1, "")
self.SetTopWindow(frame_terminal)
frame_terminal.Show(1)
return 1
# end of class MyApp
if __name__ == "__main__":
app = MyApp(0)
app.MainLoop()
|
fhollanda/Stepper-Motor-Interface | refs/heads/master | adapter/bridge.py | 1 | #!adapter/env/bin/python
from flask import Flask, request, abort
from flask_restful import Api
from flask_cors import CORS
from resources.abort import Abort
from resources.captures import Capture, CapturesList
from resources.copyright import Copyright
from resources.movement import SingleAxisMove, SingleAxisMoveAndCapture, DoubleAxisMoveAndCapture
from resources.scope import SetScopeConfig
import settings
app = Flask("adapter")
CORS(app)
@app.before_request
def only_json():
if request.data and not request.is_json:
abort(400)
api = Api(app)
#ABORT
api.add_resource(Abort, '/adapter/api/abort')
#CAPTURES
api.add_resource(CapturesList, '/adapter/api/captures')
api.add_resource(Capture, '/adapter/api/capture/<uuid>', '/adapter/api/capture/<uuid>/', '/adapter/api/capture/<uuid>/<fileformat>')
#COPYRIGHT
api.add_resource(Copyright, '/adapter/api/copyright')
#MOVEMENT
api.add_resource(SingleAxisMove, '/adapter/api/move/<axis>')
api.add_resource(SingleAxisMoveAndCapture, '/adapter/api/scan/<axis>')
api.add_resource(DoubleAxisMoveAndCapture, '/adapter/api/scan')
#SCOPE
api.add_resource(SetScopeConfig, '/adapter/api/scope/config')
if __name__ == '__main__':
settings.init()
app.run(debug=True, port=5001, threaded=True) |
fujunwei/chromium-crosswalk | refs/heads/master | tools/perf/page_sets/maps.py | 3 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
from page_sets import webgl_supported_shared_state
class MapsPage(page_module.Page):
def __init__(self, page_set):
super(MapsPage, self).__init__(
url='http://localhost:10020/tracker.html',
page_set=page_set,
name='Maps.maps_002',
shared_page_state_class=(
webgl_supported_shared_state.WebGLSupportedSharedState))
self.archive_data_file = 'data/maps.json'
def RunNavigateSteps(self, action_runner):
super(MapsPage, self).RunNavigateSteps(action_runner)
action_runner.Wait(3)
def RunPageInteractions(self, action_runner):
action_runner.WaitForJavaScriptCondition('window.testDone', 120)
class MapsPageSet(page_set_module.PageSet):
""" Google Maps examples """
def __init__(self):
super(MapsPageSet, self).__init__(
archive_data_file='data/maps.json',
bucket=page_set_module.PUBLIC_BUCKET)
self.AddUserStory(MapsPage(self))
|
L34p/HeXA-CTF-2015 | refs/heads/master | account/__init__.py | 12133432 | |
webmasterraj/GaSiProMo | refs/heads/master | flask/lib/python2.7/site-packages/boto/cloudfront/identity.py | 170 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import uuid
class OriginAccessIdentity(object):
def __init__(self, connection=None, config=None, id='',
s3_user_id='', comment=''):
self.connection = connection
self.config = config
self.id = id
self.s3_user_id = s3_user_id
self.comment = comment
self.etag = None
def startElement(self, name, attrs, connection):
if name == 'CloudFrontOriginAccessIdentityConfig':
self.config = OriginAccessIdentityConfig()
return self.config
else:
return None
def endElement(self, name, value, connection):
if name == 'Id':
self.id = value
elif name == 'S3CanonicalUserId':
self.s3_user_id = value
elif name == 'Comment':
self.comment = value
else:
setattr(self, name, value)
def update(self, comment=None):
new_config = OriginAccessIdentityConfig(self.connection,
self.config.caller_reference,
self.config.comment)
if comment is not None:
new_config.comment = comment
self.etag = self.connection.set_origin_identity_config(self.id, self.etag, new_config)
self.config = new_config
def delete(self):
return self.connection.delete_origin_access_identity(self.id, self.etag)
def uri(self):
return 'origin-access-identity/cloudfront/%s' % self.id
class OriginAccessIdentityConfig(object):
def __init__(self, connection=None, caller_reference='', comment=''):
self.connection = connection
if caller_reference:
self.caller_reference = caller_reference
else:
self.caller_reference = str(uuid.uuid4())
self.comment = comment
def to_xml(self):
s = '<?xml version="1.0" encoding="UTF-8"?>\n'
s += '<CloudFrontOriginAccessIdentityConfig xmlns="http://cloudfront.amazonaws.com/doc/2009-09-09/">\n'
s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference
if self.comment:
s += ' <Comment>%s</Comment>\n' % self.comment
s += '</CloudFrontOriginAccessIdentityConfig>\n'
return s
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Comment':
self.comment = value
elif name == 'CallerReference':
self.caller_reference = value
else:
setattr(self, name, value)
class OriginAccessIdentitySummary(object):
def __init__(self, connection=None, id='',
s3_user_id='', comment=''):
self.connection = connection
self.id = id
self.s3_user_id = s3_user_id
self.comment = comment
self.etag = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Id':
self.id = value
elif name == 'S3CanonicalUserId':
self.s3_user_id = value
elif name == 'Comment':
self.comment = value
else:
setattr(self, name, value)
def get_origin_access_identity(self):
return self.connection.get_origin_access_identity_info(self.id)
|
CitoEngine/cito_plugin_server | refs/heads/master | staticfiles/admin/js/compress.py | 784 | #!/usr/bin/env python
import os
import optparse
import subprocess
import sys
here = os.path.dirname(__file__)
def main():
usage = "usage: %prog [file1..fileN]"
description = """With no file paths given this script will automatically
compress all jQuery-based files of the admin app. Requires the Google Closure
Compiler library and Java version 6 or later."""
parser = optparse.OptionParser(usage, description=description)
parser.add_option("-c", dest="compiler", default="~/bin/compiler.jar",
help="path to Closure Compiler jar file")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose")
(options, args) = parser.parse_args()
compiler = os.path.expanduser(options.compiler)
if not os.path.exists(compiler):
sys.exit("Google Closure compiler jar file %s not found. Please use the -c option to specify the path." % compiler)
if not args:
if options.verbose:
sys.stdout.write("No filenames given; defaulting to admin scripts\n")
args = [os.path.join(here, f) for f in [
"actions.js", "collapse.js", "inlines.js", "prepopulate.js"]]
for arg in args:
if not arg.endswith(".js"):
arg = arg + ".js"
to_compress = os.path.expanduser(arg)
if os.path.exists(to_compress):
to_compress_min = "%s.min.js" % "".join(arg.rsplit(".js"))
cmd = "java -jar %s --js %s --js_output_file %s" % (compiler, to_compress, to_compress_min)
if options.verbose:
sys.stdout.write("Running: %s\n" % cmd)
subprocess.call(cmd.split())
else:
sys.stdout.write("File %s not found. Sure it exists?\n" % to_compress)
if __name__ == '__main__':
main()
|
wutron/dlcoal | refs/heads/master | dlcoal/__init__.py | 2 | """
Code for the DLCoal model
(duplications, losses, and coalescence)
"""
from __future__ import division
# python libs
import copy
import os
import sys
import random
from itertools import chain, izip
import traceback
from math import *
# import dlcoal C lib
from dlcoal.ctypes_export import *
dlcoalc = load_library(["..", "lib"], "libdlcoal.so")
# add pre-bundled dependencies to the python path,
# if they are not available already
try:
import rasmus, compbio
except ImportError:
from . import dep
dep.load_deps()
import rasmus, compbio
# rasmus libs
from rasmus import stats, util, treelib
# compbio libs
from compbio import birthdeath, phylo
# dlcoal libs
from . import coal, duploss, sim
#=============================================================================
# constants
PROGRAM_NAME = u"DLCoal"
PROGRAM_VERSION_MAJOR = 1
PROGRAM_VERSION_MINOR = 0
PROGRAM_VERSION_RELEASE = 0
PROGRAM_VERSION = (PROGRAM_VERSION_MAJOR,
PROGRAM_VERSION_MINOR,
PROGRAM_VERSION_RELEASE)
if PROGRAM_VERSION_RELEASE != 0:
PROGRAM_VERSION_TEXT = "%d.%d.%d" % (PROGRAM_VERSION_MAJOR,
PROGRAM_VERSION_MINOR,
PROGRAM_VERSION_RELEASE)
else:
PROGRAM_VERSION_TEXT = "%d.%d" % (PROGRAM_VERSION_MAJOR,
PROGRAM_VERSION_MINOR)
#=============================================================================
# export c functions
ex = Exporter(globals())
export = ex.export
if dlcoalc:
export(dlcoalc, "deleteTree", c_int, [c_void_p, "tree"])
export(dlcoalc, "makeTree", c_void_p, [c_int, "nnodes",
c_int_p, "ptree"])
export(dlcoalc, "setTreeDists", c_void_p, [c_void_p, "tree",
c_float_p, "dists"])
#=============================================================================
# miscellaneous
class NullLog (object):
def __init__(self):
pass
def write(self, text):
pass
def flush(self):
pass
#=============================================================================
# probability functions for DLCoal model
def prob_dlcoal_recon_topology(coal_tree, coal_recon,
locus_tree, locus_recon, locus_events,
daughters,
stree, n, duprate, lossrate,
pretime=None, premean=None,
nsamples=100,
add_spec=True, info=None):
"""
Probability of a reconcile gene tree in the DLCoal model.
coal_tree -- coalescent tree
coal_recon -- reconciliation of coalescent tree to locus tree
locus_tree -- locus tree (has dup-loss)
locus_recon -- reconciliation of locus tree to species tree
locus_events -- events dict for locus tree
stree -- species tree
n -- population sizes in species tree
duprate -- duplication rate
lossrate -- loss rate
You must also specify one of the following
pretime -- starting time before species tree
premean -- mean starting time before species tree
"""
# init popsizes for locus tree
stree_popsizes = coal.init_popsizes(stree, n)
popsizes = {}
for node in locus_tree:
popsizes[node.name] = stree_popsizes[locus_recon[node].name]
# duploss probability
dl_prob = duploss.prob_dup_loss(
locus_tree, stree, locus_recon, locus_events,
duprate, lossrate)
# daughters probability
dups = phylo.count_dup(locus_tree, locus_events)
d_prob = dups * log(.5)
# integrate over duplication times using sampling
stimes = treelib.get_tree_timestamps(stree)
prob = prob_locus_coal_recon_topology_samples(
coal_tree, coal_recon,
locus_tree, locus_recon, locus_events, popsizes,
stree, stimes,
daughters, duprate, lossrate, nsamples,
pretime, premean)
# logging info
if info is not None:
info["duploss_prob"] = dl_prob
info["daughters_prob"] = d_prob
info["coal_prob"] = prob
info["prob"] = dl_prob + d_prob + prob - log(nsamples)
return dl_prob + d_prob + prob - log(nsamples)
def prob_locus_coal_recon_topology_samples(
coal_tree, coal_recon,
locus_tree, locus_recon, locus_events, popsizes,
stree, stimes,
daughters, duprate, lossrate, nsamples,
pretime=None, premean=None):
if dlcoalc:
# sample some reason branch lengths just for logging purposes
locus_times = duploss.sample_dup_times(
locus_tree, stree, locus_recon, duprate, lossrate, pretime,
premean,
events=locus_events)
treelib.set_dists_from_timestamps(locus_tree, locus_times)
# use C code
return coal.prob_locus_coal_recon_topology_samples(
coal_tree, coal_recon,
locus_tree, locus_recon, locus_events, popsizes,
stree, stimes,
daughters, duprate, lossrate, nsamples, pretime, premean)
else:
# python backup
prob = 0.0
for i in xrange(nsamples):
# sample duplication times
locus_times = duploss.sample_dup_times(
locus_tree, stree, locus_recon, duprate, lossrate, pretime,
premean,
events=locus_events)
treelib.set_dists_from_timestamps(locus_tree, locus_times)
# coal topology probability
coal_prob = prob_locus_coal_recon_topology(
coal_tree, coal_recon, locus_tree, popsizes, daughters)
prob += exp(coal_prob)
prob = util.safelog(prob / nsamples)
return prob
def prob_locus_coal_recon_topology(tree, recon, locus_tree, n, daughters):
"""
Returns the log probability of a reconciled gene tree ('tree', 'recon')
from the coalescent model given a locus tree 'locus_tree',
population sizes 'n', and daughters set 'daughters'
"""
# initialize popsizes, lineage counts, and divergence times
popsizes = coal.init_popsizes(locus_tree, n)
lineages = coal.count_lineages_per_branch(tree, recon, locus_tree)
locus_times = treelib.get_tree_timestamps(locus_tree)
# calc log probability
lnp = coal.pmrt(
tree, recon, locus_tree, popsizes, lineages=lineages)
def walk(node, gene_counts, leaves):
if node.is_leaf():
gene_counts[node.name] = lineages[node][0]
leaves.add(node)
else:
for child in node.children:
if child in daughters:
gene_counts[child.name] = 1
leaves.add(child)
else:
walk(child, gene_counts, leaves)
for daughter in daughters:
# determine leaves of the coal subtree
gene_counts = {}
leaves = set()
walk(daughter, gene_counts, leaves)
p = coal.cdf_mrca_bounded_multicoal(
gene_counts, locus_times[daughter.parent], locus_tree, popsizes,
sroot=daughter, sleaves=leaves, stimes=locus_times)
if p == -util.INF:
return -util.INF
lnp -= p
return lnp
def rename_nodes(tree, prefix="n"):
"""Rename nodes that all names are strings"""
for node in list(tree.postorder()):
if isinstance(node.name, int):
name2 = prefix + str(node.name)
while name2 in tree.nodes:
name2 = prefix + str(tree.new_name())
tree.rename(node.name, name2)
#=============================================================================
# Input/Output
def write_dlcoal_recon(filename, coal_tree, extra,
exts={"coal_tree": ".coal.tree",
"coal_recon": ".coal.recon",
"locus_tree": ".locus.tree",
"locus_recon": ".locus.recon",
"daughters": ".daughters"
},
filenames={}):
"""Writes a reconciled gene tree to files"""
# coal
coal_tree.write(filenames.get("coal_tree", filename + exts["coal_tree"]),
rootData=True)
phylo.write_recon_events(
filenames.get("coal_recon", filename + exts["coal_recon"]),
extra["coal_recon"], noevent="none")
# locus
extra["locus_tree"].write(
filenames.get("locus_tree", filename + exts["locus_tree"]),
rootData=True)
phylo.write_recon_events(
filenames.get("locus_recon", filename + exts["locus_recon"]),
extra["locus_recon"], extra["locus_events"])
util.write_list(
filenames.get("daughters", filename + exts["daughters"]),
[x.name for x in extra["daughters"]])
def read_dlcoal_recon(filename, stree,
exts={"coal_tree": ".coal.tree",
"coal_recon": ".coal.recon",
"locus_tree": ".locus.tree",
"locus_recon": ".locus.recon",
"daughters": ".daughters"
},
filenames={}):
"""Reads a reconciled gene tree from files"""
extra = {}
# trees
coal_tree = treelib.read_tree(
filenames.get("coal_tree", filename + exts["coal_tree"]))
extra["locus_tree"] = treelib.read_tree(
filenames.get("locus_tree", filename + exts["locus_tree"]))
# recons
extra["coal_recon"], junk = phylo.read_recon_events(
filenames.get("coal_recon", filename + exts["coal_recon"]),
coal_tree, extra["locus_tree"])
extra["locus_recon"], extra["locus_events"] = phylo.read_recon_events(
filenames.get("locus_recon", filename + exts["locus_recon"]),
extra["locus_tree"], stree)
extra["daughters"] = set(
extra["locus_tree"].nodes[x] for x in util.read_strings(
filenames.get("daughters", filename + exts["daughters"])))
return coal_tree, extra
def read_log(filename):
"""Reads a DLCoal log"""
stream = util.open_stream(filename)
for line in stream:
if line.startswith("seed:"):
continue
yield eval(line, {"inf": util.INF})
def read_log_all(filename):
"""Reads a DLCoal log"""
stream = util.open_stream(filename)
return map(eval, stream)
#=============================================================================
# C interface functions
def make_ptree(tree):
"""Make parent tree array from tree"""
nodes = []
nodelookup = {}
ptree = []
def walk(node):
for child in node.children:
walk(child)
nodes.append(node)
walk(tree.root)
# ensure sort is stable
def leafsort(a, b):
if a.is_leaf():
if b.is_leaf():
return 0
else:
return -1
else:
if b.is_leaf():
return 1
else:
return 0
# bring leaves to front
nodes.sort(cmp=leafsort)
nodelookup = {}
for i, n in enumerate(nodes):
nodelookup[n] = i
for node in nodes:
if node == tree.root:
ptree.append(-1)
else:
ptree.append(nodelookup[node.parent])
assert nodes[-1] == tree.root
return ptree, nodes, nodelookup
def ptree2ctree(ptree):
"""Makes a c++ Tree from a parent array"""
pint = c_int * len(ptree)
tree = makeTree(len(ptree), pint(* ptree))
return tree
def tree2ctree(tree):
"""Make a c++ Tree from a treelib.Tree data structure"""
ptree, nodes, nodelookup = make_ptree(tree)
dists = [x.dist for x in nodes]
ctree = ptree2ctree(ptree)
setTreeDists(ctree, c_list(c_float, dists))
return ctree
def make_recon_array(tree, recon, nodes, snodelookup):
"""Make a reconciliation array from recon dict"""
recon2 = []
for node in nodes:
recon2.append(snodelookup[recon[node]])
return recon2
def make_events_array(nodes, events):
"""Make events array from events dict"""
mapping = {"gene": 0,
"spec": 1,
"dup": 2}
return [mapping[events[i]] for i in nodes]
|
nkgilley/home-assistant | refs/heads/dev | tests/components/upnp/mock_device.py | 14 | """Mock device for testing purposes."""
from typing import Mapping
from homeassistant.components.upnp.const import (
BYTES_RECEIVED,
BYTES_SENT,
PACKETS_RECEIVED,
PACKETS_SENT,
TIMESTAMP,
)
from homeassistant.components.upnp.device import Device
import homeassistant.util.dt as dt_util
class MockDevice(Device):
"""Mock device for Device."""
def __init__(self, udn):
"""Initialize mock device."""
igd_device = object()
super().__init__(igd_device)
self._udn = udn
self.added_port_mappings = []
self.removed_port_mappings = []
@classmethod
async def async_create_device(cls, hass, ssdp_location):
"""Return self."""
return cls("UDN")
@property
def udn(self) -> str:
"""Get the UDN."""
return self._udn
@property
def manufacturer(self) -> str:
"""Get manufacturer."""
return "mock-manufacturer"
@property
def name(self) -> str:
"""Get name."""
return "mock-name"
@property
def model_name(self) -> str:
"""Get the model name."""
return "mock-model-name"
@property
def device_type(self) -> str:
"""Get the device type."""
return "urn:schemas-upnp-org:device:InternetGatewayDevice:1"
async def _async_add_port_mapping(
self, external_port: int, local_ip: str, internal_port: int
) -> None:
"""Add a port mapping."""
entry = [external_port, local_ip, internal_port]
self.added_port_mappings.append(entry)
async def _async_delete_port_mapping(self, external_port: int) -> None:
"""Remove a port mapping."""
entry = external_port
self.removed_port_mappings.append(entry)
async def async_get_traffic_data(self) -> Mapping[str, any]:
"""Get traffic data."""
return {
TIMESTAMP: dt_util.utcnow(),
BYTES_RECEIVED: 0,
BYTES_SENT: 0,
PACKETS_RECEIVED: 0,
PACKETS_SENT: 0,
}
|
wenxuan-xia/niv_blog | refs/heads/master | niv_blog/urls.py | 1 | """niv_blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from core import blog
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('core.blog.urls')),
]
|
SRabbelier/Melange | refs/heads/master | thirdparty/google_appengine/lib/django_0_96/django/template/loaders/__init__.py | 12133432 | |
Harry-R/skylines | refs/heads/master | skylines/lib/waypoints/__init__.py | 12133432 | |
seckcoder/lang-learn | refs/heads/master | python/sklearn/examples/cluster/plot_cluster_iris.py | 2 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print __doc__
# Code source: Gael Varoqueux
# Modified for Documentation merge by Jaques Grobler
# License: BSD
import numpy as np
import pylab as pl
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random'),
}
fignum = 1
for name, est in estimators.iteritems():
fig = pl.figure(fignum, figsize=(4, 3))
pl.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
pl.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = pl.figure(fignum, figsize=(4, 3))
pl.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
pl.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'),
)
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
pl.show()
|
yyd01245/libavg | refs/heads/master | win/genwinimportlibs.py | 7 | #!/usr/bin/env python
import os
import string
os.chdir("deps")
def tmpFileToDef(baseName):
tmpFile = open("tmp", "r")
defFile = open(baseName+".def", "w")
defFile.write("EXPORTS\n")
content = tmpFile.readlines()
i = 0
while content[i].find("ordinal hint") == -1:
i += 1
isEmptyLine = False;
i += 2
while not(isEmptyLine):
line = string.split(content[i])
isEmptyLine = len(line) == 0
if not(isEmptyLine):
defFile.write(line[3]+"\n")
i += 1
defFile.close()
for dllName in os.listdir("bin"):
if dllName[-4:] == ".dll":
print "Generating lib for '" + dllName + "'."
os.system("dumpbin /EXPORTS bin\\"+dllName+" > tmp")
baseName = dllName[:-4]
tmpFileToDef(baseName)
os.system("lib /def:"+baseName+".def /out:lib\\"+baseName+".lib /machine:x86")
|
eezee-it/server-tools | refs/heads/8.0 | base_ir_filters_active/tests/test_base_ir_filters_active.py | 13 | # -*- coding: utf-8 -*-
# © 2016 Therp BV <http://therp.nl>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp.tests.common import TransactionCase
class TestBaseIrFiltersActive(TransactionCase):
def test_base_ir_filters_active(self):
new_filter = self.env['ir.filters'].create({
'name': 'inactive testfilter',
'user_id': False,
'domain': '[]',
'context': '{}',
'model_id': 'ir.filters',
'active': False,
})
self.assertFalse(self.env['ir.filters'].search([]) & new_filter)
self.assertTrue(
self.env['ir.filters'].with_context(active_test=False).search([]) &
new_filter)
|
jjmleiro/hue | refs/heads/master | desktop/core/ext-py/cryptography-1.3.1/src/_cffi_src/commoncrypto/sectransform.py | 10 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <Security/SecDigestTransform.h>
#include <Security/SecSignVerifyTransform.h>
#include <Security/SecEncryptTransform.h>
"""
TYPES = """
typedef ... *SecTransformRef;
CFStringRef kSecImportExportPassphrase;
CFStringRef kSecImportExportKeychain;
CFStringRef kSecImportExportAccess;
CFStringRef kSecEncryptionMode;
CFStringRef kSecEncryptKey;
CFStringRef kSecIVKey;
CFStringRef kSecModeCBCKey;
CFStringRef kSecModeCFBKey;
CFStringRef kSecModeECBKey;
CFStringRef kSecModeNoneKey;
CFStringRef kSecModeOFBKey;
CFStringRef kSecOAEPEncodingParametersAttributeName;
CFStringRef kSecPaddingKey;
CFStringRef kSecPaddingNoneKey;
CFStringRef kSecPaddingOAEPKey;
CFStringRef kSecPaddingPKCS1Key;
CFStringRef kSecPaddingPKCS5Key;
CFStringRef kSecPaddingPKCS7Key;
const CFStringRef kSecTransformInputAttributeName;
const CFStringRef kSecTransformOutputAttributeName;
const CFStringRef kSecTransformDebugAttributeName;
const CFStringRef kSecTransformTransformName;
const CFStringRef kSecTransformAbortAttributeName;
CFStringRef kSecInputIsAttributeName;
CFStringRef kSecInputIsPlainText;
CFStringRef kSecInputIsDigest;
CFStringRef kSecInputIsRaw;
const CFStringRef kSecDigestTypeAttribute;
const CFStringRef kSecDigestLengthAttribute;
const CFStringRef kSecDigestMD5;
const CFStringRef kSecDigestSHA1;
const CFStringRef kSecDigestSHA2;
"""
FUNCTIONS = """
Boolean SecTransformSetAttribute(SecTransformRef, CFStringRef, CFTypeRef,
CFErrorRef *);
SecTransformRef SecDecryptTransformCreate(SecKeyRef, CFErrorRef *);
SecTransformRef SecEncryptTransformCreate(SecKeyRef, CFErrorRef *);
SecTransformRef SecVerifyTransformCreate(SecKeyRef, CFDataRef, CFErrorRef *);
SecTransformRef SecSignTransformCreate(SecKeyRef, CFErrorRef *) ;
CFTypeRef SecTransformExecute(SecTransformRef, CFErrorRef *);
"""
MACROS = """
"""
CUSTOMIZATIONS = """
"""
|
skinny121/MCEdit-TallWorlds | refs/heads/master | stock-filters/CreateBusses.py | 11 | # Feel free to modify and use this filter however you wish. If you do,
# please give credit to SethBling.
# http://youtube.com/SethBling
from numpy import sign
displayName = "Create Busses"
def perform(level, box, options):
level.markDirtyBox(box)
bus = BusCreator(level, box, options)
bus.getTerminals()
bus.getGuides()
bus.pickAllPaths()
bus.createAllBusses()
HorizDirs = [
(1, 0, 0),
(-1, 0, 0),
(0, 0, 1),
(0, 0, -1),
]
Down = (0, -1, 0)
Up = (0, 1, 0)
def getHorizDir((x1, y1, z1), (x2, y2, z2)):
if abs(x2 - x1) > abs(z2 - z1):
return sign(x2 - x1), 0, 0
else:
if z2 == z1:
return 1, 0, 0
else:
return 0, 0, sign(z2 - z1)
def getSecondaryDir((x1, y1, z1), (x2, y2, z2)):
if abs(x2 - x1) > abs(z2 - z1):
return 0, 0, sign(z2 - z1)
else:
return sign(x2 - x1), 0, 0
def leftOf((dx1, dy1, dz1), (dx2, dy2, dz2)):
return dx1 == dz2 or dz1 == dx2 * -1
def rotateRight((dx, dy, dz)):
return -dz, dy, dx
def rotateLeft((dx, dy, dz)):
return dz, dy, -dx
def allAdjacentSamePlane(dir, secondaryDir):
right = rotateRight(dir)
left = rotateLeft(dir)
back = rotateRight(right)
if leftOf(secondaryDir, dir):
return (
dir,
left,
right,
getDir(dir, Up),
getDir(dir, Down),
getDir(left, Up),
getDir(right, Up),
getDir(left, Down),
getDir(right, Down),
back,
getDir(back, Up),
getDir(back, Down),
)
else:
return (
dir,
right,
left,
getDir(dir, Up),
getDir(dir, Down),
getDir(right, Up),
getDir(left, Up),
getDir(right, Down),
getDir(left, Down),
back,
getDir(back, Up),
getDir(back, Down),
)
def allAdjacentUp(dir, secondaryDir):
right = rotateRight(dir)
left = rotateLeft(dir)
back = rotateRight(right)
if leftOf(secondaryDir, dir):
return (
getDir(dir, Up),
getDir(left, Up),
getDir(right, Up),
getDir(back, Up),
dir,
left,
right,
back,
getDir(dir, Down),
getDir(left, Down),
getDir(right, Down),
getDir(back, Down),
)
else:
return (
getDir(dir, Up),
getDir(right, Up),
getDir(left, Up),
getDir(back, Up),
dir,
right,
left,
back,
getDir(dir, Down),
getDir(right, Down),
getDir(left, Down),
getDir(back, Down),
)
def allAdjacentDown(dir, secondaryDir):
right = rotateRight(dir)
left = rotateLeft(dir)
back = rotateRight(right)
if leftOf(secondaryDir, dir):
return (
getDir(dir, Down),
getDir(left, Down),
getDir(right, Down),
getDir(back, Down),
dir,
left,
right,
back,
getDir(dir, Up),
getDir(left, Up),
getDir(right, Up),
getDir(back, Up),
)
else:
return (
getDir(dir, Down),
getDir(right, Down),
getDir(left, Down),
getDir(back, Down),
dir,
right,
left,
back,
getDir(dir, Up),
getDir(right, Up),
getDir(left, Up),
getDir(back, Up),
)
def getDir((x, y, z), (dx, dy, dz)):
return x + dx, y + dy, z + dz
def dist((x1, y1, z1), (x2, y2, z2)):
return abs(x2 - x1) + abs(y2 - y1) + abs(z2 - z1)
def above((x1, y1, z1), (x2, y2, z2)):
return y1 > y2
def below((x1, y1, z1), (x2, y2, z2)):
return y1 < y2
def insideBox(box, (x, y, z)):
return box.minx <= x < box.maxx and box.miny <= y < box.maxy and box.minz <= z < box.maxz
Colors = {
0: "white",
1: "orange",
2: "magenta",
3: "light blue",
4: "yellow",
5: "lime green",
6: "pink",
7: "gray",
8: "light gray",
9: "cyan",
10: "purple",
11: "blue",
12: "brown",
13: "green",
14: "red",
15: "black",
}
class BusCreator:
starts = {}
ends = {}
guides = {}
path = {}
def __init__(self, level, box, options):
self.level = level
self.box = box
self.options = options
def getTerminals(self):
for x in xrange(self.box.minx, self.box.maxx):
for y in xrange(self.box.miny, self.box.maxy):
for z in xrange(self.box.minz, self.box.maxz):
(color, start) = self.isTerminal((x, y, z))
if color is not None and start is not None:
if start:
if color in self.starts:
raise Exception("Duplicate starting point for " + Colors[color] + " bus")
self.starts[color] = (x, y, z)
else:
if color in self.ends:
raise Exception("Duplicate ending point for " + Colors[color] + " bus")
self.ends[color] = (x, y, z)
def getGuides(self):
for x in xrange(self.box.minx, self.box.maxx):
for y in xrange(self.box.miny, self.box.maxy):
for z in xrange(self.box.minz, self.box.maxz):
pos = (x, y, z)
if self.getBlockAt(pos) == 35:
color = self.getBlockDataAt(pos)
if color not in self.starts or color not in self.ends:
continue
if color not in self.guides:
self.guides[color] = []
rs = getDir(pos, Up)
if rs == self.starts[color] or rs == self.ends[color]:
continue
self.guides[color].append(rs)
def isTerminal(self, (x, y, z)):
pos = (x, y, z)
for dir in HorizDirs:
otherPos = getDir(pos, dir)
towards = self.repeaterPointingTowards(pos, otherPos)
away = self.repeaterPointingAway(pos, otherPos)
if not (away or towards): # it's not a repeater pointing towards or away
continue
if self.getBlockAt(otherPos) != 55: # the other block isn't redstone
continue
if self.getBlockAt(getDir(pos, Down)) != 35: # it's not sitting on wool
continue
if self.getBlockAt(getDir(otherPos, Down)) != 35: # the other block isn't sitting on wool
continue
data = self.getBlockDataAt(getDir(pos, Down))
if self.getBlockDataAt(getDir(otherPos, Down)) != data: # the wool colors don't match
continue
return data, towards
return None, None
def pickAllPaths(self):
for color in range(0, 16):
if color in self.starts and color in self.ends:
self.pickPath(color)
def pickPath(self, color):
self.path[color] = ()
currentPos = self.starts[color]
while True:
minDist = None
minGuide = None
for guide in self.guides[color]:
guideDist = dist(currentPos, guide)
if minDist is None or guideDist < minDist:
minDist = guideDist
minGuide = guide
if dist(currentPos, self.ends[color]) == 1:
return
if minGuide is None:
return
self.path[color] = self.path[color] + (minGuide,)
currentPos = minGuide
self.guides[color].remove(minGuide)
def createAllBusses(self):
for color in range(0, 16):
if color in self.path:
self.connectDots(color)
def connectDots(self, color):
prevGuide = None
self.power = 1
for guide in self.path[color]:
if prevGuide is not None:
self.createConnection(prevGuide, guide, color)
prevGuide = guide
def createConnection(self, pos1, pos2, color):
currentPos = pos1
while currentPos != pos2:
self.power += 1
hdir = getHorizDir(currentPos, pos2)
secondaryDir = getSecondaryDir(currentPos, pos2)
if above(currentPos, pos2):
dirs = allAdjacentDown(hdir, secondaryDir)
elif below(currentPos, pos2):
dirs = allAdjacentUp(hdir, secondaryDir)
else:
dirs = allAdjacentSamePlane(hdir, secondaryDir)
if self.power == 1:
restrictions = 2
elif self.power == 15:
restrictions = 1
else:
restrictions = 0
placed = False
for dir in dirs:
pos = getDir(currentPos, dir)
if self.canPlaceRedstone(pos, currentPos, pos2, restrictions):
if self.power == 15:
self.placeRepeater(pos, dir, color)
self.power = 0
else:
self.placeRedstone(pos, color)
currentPos = pos
placed = True
break
if not placed:
# raise Exception("Algorithm failed to create bus for " + Colors[color] + " wire.")
return
def canPlaceRedstone(self, pos, fromPos, destinationPos, restrictions):
if restrictions == 1 and above(pos, fromPos): # repeater
return False
if restrictions == 2 and below(pos, fromPos): # just after repeater
return False
if restrictions == 2 and not self.repeaterPointingTowards(fromPos, pos): # just after repeater
return False
if above(pos, fromPos) and self.getBlockAt(getDir(getDir(pos, Down), Down)) == 55:
return False
if below(pos, fromPos) and self.getBlockAt(getDir(pos, Up)) != 0:
return False
if getDir(pos, Down) == destinationPos:
return False
if pos == destinationPos:
return True
if self.getBlockAt(pos) != 0:
return False
if self.getBlockAt(getDir(pos, Down)) != 0:
return False
if not insideBox(self.box, pos):
return False
for dir in allAdjacentSamePlane((1, 0, 0), (0, 0, 0)):
testPos = getDir(pos, dir)
if testPos == fromPos or testPos == getDir(fromPos, Down):
continue
if testPos == destinationPos or testPos == getDir(destinationPos, Down):
continue
blockid = self.getBlockAt(testPos)
if blockid != 0:
return False
return True
def placeRedstone(self, pos, color):
self.setBlockAt(pos, 55) # redstone
self.setBlockAt(getDir(pos, Down), 35, color) # wool
def placeRepeater(self, pos, (dx, dy, dz), color):
if dz == -1:
self.setBlockAt(pos, 93, 0) # north
elif dx == 1:
self.setBlockAt(pos, 93, 1) # east
elif dz == 1:
self.setBlockAt(pos, 93, 2) # south
elif dx == -1:
self.setBlockAt(pos, 93, 3) # west
self.setBlockAt(getDir(pos, Down), 35, color) # wool
def getBlockAt(self, (x, y, z)):
return self.level.blockAt(x, y, z)
def getBlockDataAt(self, (x, y, z)):
return self.level.blockDataAt(x, y, z)
def setBlockAt(self, (x, y, z), id, dmg=0):
self.level.setBlockAt(x, y, z, id)
self.level.setBlockDataAt(x, y, z, dmg)
def repeaterPointingTowards(self, (x1, y1, z1), (x2, y2, z2)):
blockid = self.getBlockAt((x1, y1, z1))
if blockid != 93 and blockid != 94:
return False
direction = self.level.blockDataAt(x1, y1, z1) % 4
if direction == 0 and z2 - z1 == -1:
return True
if direction == 1 and x2 - x1 == 1:
return True
if direction == 2 and z2 - z1 == 1:
return True
if direction == 3 and x2 - x1 == -1:
return True
return False
def repeaterPointingAway(self, (x1, y1, z1), (x2, y2, z2)):
blockid = self.getBlockAt((x1, y1, z1))
if blockid != 93 and blockid != 94:
return False
direction = self.level.blockDataAt(x1, y1, z1) % 4
if direction == 0 and z2 - z1 == 1:
return True
if direction == 1 and x2 - x1 == -1:
return True
if direction == 2 and z2 - z1 == -1:
return True
if direction == 3 and x2 - x1 == 1:
return True
return False
|
landism/pants | refs/heads/master | src/python/pants/cache/cache_setup.py | 3 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import threading
import urlparse
from collections import namedtuple
from six.moves import range
from pants.base.build_environment import get_buildroot
from pants.cache.artifact_cache import ArtifactCacheError
from pants.cache.local_artifact_cache import LocalArtifactCache, TempLocalArtifactCache
from pants.cache.pinger import BestUrlSelector, Pinger
from pants.cache.resolver import NoopResolver, Resolver, RESTfulResolver
from pants.cache.restful_artifact_cache import RESTfulArtifactCache
from pants.subsystem.subsystem import Subsystem
class EmptyCacheSpecError(ArtifactCacheError): pass
class LocalCacheSpecRequiredError(ArtifactCacheError): pass
class CacheSpecFormatError(ArtifactCacheError): pass
class InvalidCacheSpecError(ArtifactCacheError): pass
class RemoteCacheSpecRequiredError(ArtifactCacheError): pass
class TooManyCacheSpecsError(ArtifactCacheError): pass
CacheSpec = namedtuple('CacheSpec', ['local', 'remote'])
class CacheSetup(Subsystem):
options_scope = 'cache'
@classmethod
def register_options(cls, register):
super(CacheSetup, cls).register_options(register)
default_cache = [os.path.join(get_buildroot(), '.cache')]
register('--ignore', type=bool,
help='Ignore all other cache configuration and skip using the cache.')
register('--read', type=bool, default=True,
help='Read build artifacts from cache, if available.')
register('--write', type=bool, default=True,
help='Write build artifacts to cache, if available.')
register('--overwrite', advanced=True, type=bool,
help='If writing build artifacts to cache, overwrite existing artifacts '
'instead of skipping them.')
register('--resolver', advanced=True, choices=['none', 'rest'], default='none',
help='Select which resolver strategy to use for discovering URIs that access '
'artifact caches. none: use URIs from static config options, i.e. '
'--read-from, --write-to. rest: look up URIs by querying a RESTful '
'URL, which is a remote address from --read-from, --write-to.')
register('--read-from', advanced=True, type=list, default=default_cache,
help='The URIs of artifact caches to read directly from. Each entry is a URL of '
'a RESTful cache, a path of a filesystem cache, or a pipe-separated list of '
'alternate caches to choose from. This list is also used as input to '
'the resolver. When resolver is \'none\' list is used as is.')
register('--write-to', advanced=True, type=list, default=default_cache,
help='The URIs of artifact caches to write directly to. Each entry is a URL of'
'a RESTful cache, a path of a filesystem cache, or a pipe-separated list of '
'alternate caches to choose from. This list is also used as input to '
'the resolver. When resolver is \'none\' list is used as is.')
register('--compression-level', advanced=True, type=int, default=5,
help='The gzip compression level (0-9) for created artifacts.')
register('--dereference-symlinks', type=bool, default=True, fingerprint=True,
help='Dereference symlinks when creating cache tarball.')
register('--max-entries-per-target', advanced=True, type=int, default=8,
help='Maximum number of old cache files to keep per task target pair')
register('--pinger-timeout', advanced=True, type=float, default=0.5,
help='number of seconds before pinger times out')
register('--pinger-tries', advanced=True, type=int, default=2,
help='number of times pinger tries a cache')
register('--write-permissions', advanced=True, type=str, default=None,
help='Permissions to use when writing artifacts to a local cache, in octal.')
@classmethod
def create_cache_factory_for_task(cls, task, pinger=None, resolver=None):
return CacheFactory(cls.scoped_instance(task).get_options(),
task.context.log, task.stable_name(), pinger=pinger, resolver=resolver)
class CacheFactory(object):
def __init__(self, options, log, stable_name, pinger=None, resolver=None):
"""Create a cache factory from settings.
:param options: Task's scoped options.
:param log: Task's context log.
:param stable_name: Task's stable name.
:param pinger: Pinger to choose the best remote artifact cache URL.
:param resolver: Resolver to look up remote artifact cache URLs.
:return: cache factory.
"""
self._options = options
self._log = log
self._stable_name = stable_name
# Created on-demand.
self._read_cache = None
self._write_cache = None
# Protects local filesystem setup, and assignment to the references above.
self._cache_setup_lock = threading.Lock()
# Caches are supposed to be close, and we don't want to waste time pinging on no-op builds.
# So we ping twice with a short timeout.
# TODO: Make lazy.
self._pinger = pinger or Pinger(timeout=self._options.pinger_timeout,
tries=self._options.pinger_tries)
# resolver is also close but failing to resolve might have broader impact than
# single ping failure, therefore use a higher timeout with more retries.
if resolver:
self._resolver = resolver
elif self._options.resolver == 'rest':
self._resolver = RESTfulResolver(timeout=1.0, tries=3)
else:
self._resolver = NoopResolver()
@property
def ignore(self):
return self._options.ignore
def read_cache_available(self):
return not self.ignore and self._options.read and self.get_read_cache()
def write_cache_available(self):
return not self.ignore and self._options.write and self.get_write_cache()
def overwrite(self):
return self._options.overwrite
def get_read_cache(self):
"""Returns the read cache for this setup, creating it if necessary.
Returns None if no read cache is configured.
"""
if self._options.read_from and not self._read_cache:
cache_spec = self._resolve(self._sanitize_cache_spec(self._options.read_from))
if cache_spec:
with self._cache_setup_lock:
self._read_cache = self._do_create_artifact_cache(cache_spec, 'will read from')
return self._read_cache
def get_write_cache(self):
"""Returns the write cache for this setup, creating it if necessary.
Returns None if no write cache is configured.
"""
if self._options.write_to and not self._write_cache:
cache_spec = self._resolve(self._sanitize_cache_spec(self._options.write_to))
if cache_spec:
with self._cache_setup_lock:
self._write_cache = self._do_create_artifact_cache(cache_spec, 'will write to')
return self._write_cache
# VisibleForTesting
def _sanitize_cache_spec(self, spec):
if not isinstance(spec, (list, tuple)):
raise InvalidCacheSpecError('Invalid artifact cache spec type: {0} ({1})'.format(
type(spec), spec))
if not spec:
raise EmptyCacheSpecError()
if len(spec) > 2:
raise TooManyCacheSpecsError('Too many artifact cache specs: ({0})'.format(spec))
local_specs = [s for s in spec if self.is_local(s)]
remote_specs = [s for s in spec if self.is_remote(s)]
if not local_specs and not remote_specs:
raise CacheSpecFormatError('Invalid cache spec: {0}, must be either local or remote'
.format(spec))
if len(spec) == 2:
if not local_specs:
raise LocalCacheSpecRequiredError('One of two cache specs must be a local cache path.')
if not remote_specs:
raise RemoteCacheSpecRequiredError('One of two cache specs must be a remote spec.')
local_spec = local_specs[0] if len(local_specs) > 0 else None
remote_spec = remote_specs[0] if len(remote_specs) > 0 else None
return CacheSpec(local=local_spec, remote=remote_spec)
# VisibleForTesting
def _resolve(self, spec):
"""Attempt resolving cache URIs when a remote spec is provided. """
if not spec.remote:
return spec
try:
resolved_urls = self._resolver.resolve(spec.remote)
if resolved_urls:
# keep the bar separated list of URLs convention
return CacheSpec(local=spec.local, remote='|'.join(resolved_urls))
# no-op
return spec
except Resolver.ResolverError as e:
self._log.warn('Error while resolving from {0}: {1}'.format(spec.remote, str(e)))
# If for some reason resolver fails we continue to use local cache
if spec.local:
return CacheSpec(local=spec.local, remote=None)
# resolver fails but there is no local cache
return None
@staticmethod
def is_local(string_spec):
return string_spec.startswith('/') or string_spec.startswith('~')
@staticmethod
def is_remote(string_spec):
# both artifact cache and resolver use REST, add new protocols here once they are supported
return string_spec.startswith('http://') or string_spec.startswith('https://')
def _baseurl(self, url):
parsed_url = urlparse.urlparse(url)
return '{scheme}://{netloc}'.format(scheme=parsed_url.scheme, netloc=parsed_url.netloc)
def get_available_urls(self, urls):
"""Return reachable urls sorted by their ping times."""
baseurl_to_urls = {self._baseurl(url): url for url in urls}
pingtimes = self._pinger.pings(baseurl_to_urls.keys()) # List of pairs (host, time in ms).
self._log.debug('Artifact cache server ping times: {}'
.format(', '.join(['{}: {:.6f} secs'.format(*p) for p in pingtimes])))
sorted_pingtimes = sorted(pingtimes, key=lambda x: x[1])
available_urls = [baseurl_to_urls[baseurl] for baseurl, pingtime in sorted_pingtimes
if pingtime < Pinger.UNREACHABLE]
self._log.debug('Available cache servers: {0}'.format(available_urls))
return available_urls
def _do_create_artifact_cache(self, spec, action):
"""Returns an artifact cache for the specified spec.
spec can be:
- a path to a file-based cache root.
- a URL of a RESTful cache root.
- a bar-separated list of URLs, where we'll pick the one with the best ping times.
- A list or tuple of two specs, local, then remote, each as described above
"""
compression = self._options.compression_level
if compression not in range(1, 10):
raise ValueError('compression_level must be an integer 1-9: {}'.format(compression))
artifact_root = self._options.pants_workdir
def create_local_cache(parent_path):
path = os.path.join(parent_path, self._stable_name)
self._log.debug('{0} {1} local artifact cache at {2}'
.format(self._stable_name, action, path))
return LocalArtifactCache(artifact_root, path, compression,
self._options.max_entries_per_target,
permissions=self._options.write_permissions,
dereference=self._options.dereference_symlinks)
def create_remote_cache(remote_spec, local_cache):
urls = self.get_available_urls(remote_spec.split('|'))
if len(urls) > 0:
best_url_selector = BestUrlSelector(['{}/{}'.format(url.rstrip('/'), self._stable_name)
for url in urls])
local_cache = local_cache or TempLocalArtifactCache(artifact_root, compression)
return RESTfulArtifactCache(artifact_root, best_url_selector, local_cache)
local_cache = create_local_cache(spec.local) if spec.local else None
remote_cache = create_remote_cache(spec.remote, local_cache) if spec.remote else None
if remote_cache:
return remote_cache
return local_cache
|
vbraun/oxford-strings | refs/heads/master | lib/icalendar/cal.py | 1 | # -*- coding: utf-8 -*-
"""Calendar is a dictionary like Python object that can render itself as VCAL
files according to rfc2445.
These are the defined components.
"""
from datetime import datetime
from icalendar.caselessdict import CaselessDict
from icalendar.parser import Contentline
from icalendar.parser import Contentlines
from icalendar.parser import Parameters
from icalendar.parser import q_join
from icalendar.parser import q_split
from icalendar.parser_tools import DEFAULT_ENCODING
from icalendar.parser_tools import data_encode
from icalendar.prop import TypesFactory
from icalendar.prop import vText, vDDDLists
import pytz
######################################
# The component factory
class ComponentFactory(CaselessDict):
"""All components defined in rfc 2445 are registered in this factory class.
To get a component you can use it like this.
"""
def __init__(self, *args, **kwargs):
"""Set keys to upper for initial dict.
"""
CaselessDict.__init__(self, *args, **kwargs)
self['VEVENT'] = Event
self['VTODO'] = Todo
self['VJOURNAL'] = Journal
self['VFREEBUSY'] = FreeBusy
self['VTIMEZONE'] = Timezone
self['STANDARD'] = TimezoneStandard
self['DAYLIGHT'] = TimezoneDaylight
self['VALARM'] = Alarm
self['VCALENDAR'] = Calendar
# These Properties have multiple property values inlined in one propertyline
# seperated by comma. Use CaselessDict as simple caseless set.
INLINE = CaselessDict(
[(cat, 1) for cat in ('CATEGORIES', 'RESOURCES', 'FREEBUSY')]
)
_marker = []
class Component(CaselessDict):
"""Component is the base object for calendar, Event and the other
components defined in RFC 2445. normally you will not use this class
directy, but rather one of the subclasses.
"""
name = '' # must be defined in each component
required = () # These properties are required
singletons = () # These properties must only appear once
multiple = () # may occur more than once
exclusive = () # These properties are mutually exclusive
inclusive = () # if any occurs the other(s) MUST occur
# ('duration', 'repeat')
ignore_exceptions = False # if True, and we cannot parse this
# component, we will silently ignore
# it, rather than let the exception
# propagate upwards
# not_compliant = [''] # List of non-compliant properties.
def __init__(self, *args, **kwargs):
"""Set keys to upper for initial dict.
"""
CaselessDict.__init__(self, *args, **kwargs)
# set parameters here for properties that use non-default values
self.subcomponents = [] # Components can be nested.
self.is_broken = False # True if we ignored an exception while
# parsing a property
#def is_compliant(self, name):
# """Returns True is the given property name is compliant with the
# icalendar implementation.
#
# If the parser is too strict it might prevent parsing erroneous but
# otherwise compliant properties. So the parser is pretty lax, but it is
# possible to test for non-complience by calling this method.
# """
# return name in not_compliant
#############################
# handling of property values
def _encode(self, name, value, parameters=None, encode=1):
"""Encode values to icalendar property values.
:param name: Name of the property.
:type name: string
:param value: Value of the property. Either of a basic Python type of
any of the icalendar's own property types.
:type value: Python native type or icalendar property type.
:param parameters: Property parameter dictionary for the value. Only
available, if encode is set to True.
:type parameters: Dictionary
:param encode: True, if the value should be encoded to one of
icalendar's own property types (Fallback is "vText")
or False, if not.
:type encode: Boolean
:returns: icalendar property value
"""
if not encode:
return value
if isinstance(value, types_factory.all_types):
# Don't encode already encoded values.
return value
klass = types_factory.for_property(name)
obj = klass(value)
if parameters:
if isinstance(parameters, dict):
params = Parameters()
for key, item in parameters.items():
params[key] = item
parameters = params
assert isinstance(parameters, Parameters)
obj.params = parameters
return obj
def add(self, name, value, parameters=None, encode=1):
"""Add a property.
:param name: Name of the property.
:type name: string
:param value: Value of the property. Either of a basic Python type of
any of the icalendar's own property types.
:type value: Python native type or icalendar property type.
:param parameters: Property parameter dictionary for the value. Only
available, if encode is set to True.
:type parameters: Dictionary
:param encode: True, if the value should be encoded to one of
icalendar's own property types (Fallback is "vText")
or False, if not.
:type encode: Boolean
:returns: None
"""
if isinstance(value, datetime) and\
name.lower() in ('dtstamp', 'created', 'last-modified'):
# RFC expects UTC for those... force value conversion.
if getattr(value, 'tzinfo', False) and value.tzinfo is not None:
value = value.astimezone(pytz.utc)
else:
# assume UTC for naive datetime instances
value = pytz.utc.localize(value)
# encode value
if encode and isinstance(value, list) \
and name.lower() not in ['rdate', 'exdate']:
# Individually convert each value to an ical type except rdate and
# exdate, where lists of dates might be passed to vDDDLists.
value = [self._encode(name, v, parameters, encode) for v in value]
else:
value = self._encode(name, value, parameters, encode)
# set value
if name in self:
# If property already exists, append it.
oldval = self[name]
if isinstance(oldval, list):
if isinstance(value, list):
value = oldval + value
else:
oldval.append(value)
value = oldval
else:
value = [oldval, value]
self[name] = value
def _decode(self, name, value):
"""Internal for decoding property values.
"""
# TODO: Currently the decoded method calls the icalendar.prop instances
# from_ical. We probably want to decode properties into Python native
# types here. But when parsing from an ical string with from_ical, we
# want to encode the string into a real icalendar.prop property.
if isinstance(value, vDDDLists):
# TODO: Workaround unfinished decoding
return value
decoded = types_factory.from_ical(name, value)
# TODO: remove when proper decoded is implemented in every prop.* class
# Workaround to decode vText properly
if isinstance(decoded, vText):
decoded = decoded.encode(DEFAULT_ENCODING)
return decoded
def decoded(self, name, default=_marker):
"""Returns decoded value of property.
"""
# XXX: fail. what's this function supposed to do in the end?
# -rnix
if name in self:
value = self[name]
if isinstance(value, list):
return [self._decode(name, v) for v in value]
return self._decode(name, value)
else:
if default is _marker:
raise KeyError(name)
else:
return default
########################################################################
# Inline values. A few properties have multiple values inlined in in one
# property line. These methods are used for splitting and joining these.
def get_inline(self, name, decode=1):
"""Returns a list of values (split on comma).
"""
vals = [v.strip('" ') for v in q_split(self[name])]
if decode:
return [self._decode(name, val) for val in vals]
return vals
def set_inline(self, name, values, encode=1):
"""Converts a list of values into comma seperated string and sets value
to that.
"""
if encode:
values = [self._encode(name, value, encode=1) for value in values]
self[name] = types_factory['inline'](q_join(values))
#########################
# Handling of components
def add_component(self, component):
"""Add a subcomponent to this component.
"""
self.subcomponents.append(component)
def _walk(self, name):
"""Walk to given component.
"""
result = []
if name is None or self.name == name:
result.append(self)
for subcomponent in self.subcomponents:
result += subcomponent._walk(name)
return result
def walk(self, name=None):
"""Recursively traverses component and subcomponents. Returns sequence
of same. If name is passed, only components with name will be returned.
"""
if not name is None:
name = name.upper()
return self._walk(name)
#####################
# Generation
def property_items(self, recursive=True):
"""Returns properties in this component and subcomponents as:
[(name, value), ...]
"""
vText = types_factory['text']
properties = [('BEGIN', vText(self.name).to_ical())]
property_names = self.sorted_keys()
for name in property_names:
values = self[name]
if isinstance(values, list):
# normally one property is one line
for value in values:
properties.append((name, value))
else:
properties.append((name, values))
if recursive:
# recursion is fun!
for subcomponent in self.subcomponents:
properties += subcomponent.property_items()
properties.append(('END', vText(self.name).to_ical()))
return properties
@classmethod
def from_ical(cls, st, multiple=False):
"""Populates the component recursively from a string.
"""
stack = [] # a stack of components
comps = []
for line in Contentlines.from_ical(st): # raw parsing
if not line:
continue
name, params, vals = line.parts()
uname = name.upper()
# check for start of component
if uname == 'BEGIN':
# try and create one of the components defined in the spec,
# otherwise get a general Components for robustness.
c_name = vals.upper()
c_class = component_factory.get(c_name, cls)
component = c_class()
if not getattr(component, 'name', ''): # undefined components
component.name = c_name
stack.append(component)
# check for end of event
elif uname == 'END':
# we are done adding properties to this component
# so pop it from the stack and add it to the new top.
component = stack.pop()
if not stack: # we are at the end
comps.append(component)
else:
if not component.is_broken:
stack[-1].add_component(component)
# we are adding properties to the current top of the stack
else:
factory = types_factory.for_property(name)
component = stack[-1]
datetime_names = ('DTSTART', 'DTEND', 'RECURRENCE-ID', 'DUE',
'FREEBUSY', 'RDATE', 'EXDATE')
try:
if name in datetime_names and 'TZID' in params:
vals = factory(factory.from_ical(vals, params['TZID']))
else:
vals = factory(factory.from_ical(vals))
except ValueError:
if not component.ignore_exceptions:
raise
component.is_broken = True
else:
vals.params = params
component.add(name, vals, encode=0)
if multiple:
return comps
if len(comps) > 1:
raise ValueError('Found multiple components where '
'only one is allowed: {st!r}'.format(**locals()))
if len(comps) < 1:
raise ValueError('Found no components where '
'exactly one is required: '
'{st!r}'.format(**locals()))
return comps[0]
def __repr__(self):
return '%s(%s)' % (self.name, data_encode(self))
def content_line(self, name, value):
"""Returns property as content line.
"""
params = getattr(value, 'params', Parameters())
return Contentline.from_parts(name, params, value)
def content_lines(self):
"""Converts the Component and subcomponents into content lines.
"""
contentlines = Contentlines()
for name, value in self.property_items():
cl = self.content_line(name, value)
contentlines.append(cl)
contentlines.append('') # remember the empty string in the end
return contentlines
def to_ical(self):
content_lines = self.content_lines()
return content_lines.to_ical()
#######################################
# components defined in RFC 2445
class Event(Component):
name = 'VEVENT'
canonical_order = (
'SUMMARY', 'DTSTART', 'DTEND', 'DURATION', 'DTSTAMP',
'UID', 'RECURRENCE-ID', 'SEQUENCE',
'RRULE' 'EXRULE', 'RDATE', 'EXDATE',
)
required = ('UID',)
singletons = (
'CLASS', 'CREATED', 'DESCRIPTION', 'DTSTART', 'GEO', 'LAST-MODIFIED',
'LOCATION', 'ORGANIZER', 'PRIORITY', 'DTSTAMP', 'SEQUENCE', 'STATUS',
'SUMMARY', 'TRANSP', 'URL', 'RECURRENCE-ID', 'DTEND', 'DURATION',
'DTSTART',
)
exclusive = ('DTEND', 'DURATION', )
multiple = (
'ATTACH', 'ATTENDEE', 'CATEGORIES', 'COMMENT', 'CONTACT', 'EXDATE',
'EXRULE', 'RSTATUS', 'RELATED', 'RESOURCES', 'RDATE', 'RRULE'
)
ignore_exceptions = True
class Todo(Component):
name = 'VTODO'
required = ('UID',)
singletons = (
'CLASS', 'COMPLETED', 'CREATED', 'DESCRIPTION', 'DTSTAMP', 'DTSTART',
'GEO', 'LAST-MODIFIED', 'LOCATION', 'ORGANIZER', 'PERCENT', 'PRIORITY',
'RECURRENCE-ID', 'SEQUENCE', 'STATUS', 'SUMMARY', 'UID', 'URL', 'DUE',
'DURATION',
)
exclusive = ('DUE', 'DURATION',)
multiple = (
'ATTACH', 'ATTENDEE', 'CATEGORIES', 'COMMENT', 'CONTACT', 'EXDATE',
'EXRULE', 'RSTATUS', 'RELATED', 'RESOURCES', 'RDATE', 'RRULE'
)
class Journal(Component):
name = 'VJOURNAL'
required = ('UID',)
singletons = (
'CLASS', 'CREATED', 'DESCRIPTION', 'DTSTART', 'DTSTAMP',
'LAST-MODIFIED', 'ORGANIZER', 'RECURRENCE-ID', 'SEQUENCE', 'STATUS',
'SUMMARY', 'UID', 'URL',
)
multiple = (
'ATTACH', 'ATTENDEE', 'CATEGORIES', 'COMMENT', 'CONTACT', 'EXDATE',
'EXRULE', 'RELATED', 'RDATE', 'RRULE', 'RSTATUS',
)
class FreeBusy(Component):
name = 'VFREEBUSY'
required = ('UID',)
singletons = (
'CONTACT', 'DTSTART', 'DTEND', 'DURATION', 'DTSTAMP', 'ORGANIZER',
'UID', 'URL',
)
multiple = ('ATTENDEE', 'COMMENT', 'FREEBUSY', 'RSTATUS',)
class Timezone(Component):
name = 'VTIMEZONE'
canonical_order = ('TZID', 'STANDARD', 'DAYLIGHT',)
required = ('TZID', 'STANDARD', 'DAYLIGHT',)
singletons = ('TZID', 'LAST-MODIFIED', 'TZURL',)
class TimezoneStandard(Component):
name = 'STANDARD'
required = ('DTSTART', 'TZOFFSETTO', 'TZOFFSETFROM')
singletons = ('DTSTART', 'TZOFFSETTO', 'TZOFFSETFROM', 'RRULE')
multiple = ('COMMENT', 'RDATE', 'TZNAME')
class TimezoneDaylight(Component):
name = 'DAYLIGHT'
required = ('DTSTART', 'TZOFFSETTO', 'TZOFFSETFROM')
singletons = ('DTSTART', 'TZOFFSETTO', 'TZOFFSETFROM', 'RRULE')
multiple = ('COMMENT', 'RDATE', 'TZNAME')
class Alarm(Component):
name = 'VALARM'
# not quite sure about these ...
required = ('ACTION', 'TRIGGER',)
singletons = ('ATTACH', 'ACTION', 'TRIGGER', 'DURATION', 'REPEAT',)
inclusive = (('DURATION', 'REPEAT',),)
class Calendar(Component):
"""This is the base object for an iCalendar file.
"""
name = 'VCALENDAR'
canonical_order = ('VERSION', 'PRODID', 'CALSCALE', 'METHOD',)
required = ('prodid', 'version', )
singletons = ('prodid', 'version', )
multiple = ('calscale', 'method', )
# These are read only singleton, so one instance is enough for the module
types_factory = TypesFactory()
component_factory = ComponentFactory()
|
mlperf/training_results_v0.7 | refs/heads/master | Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/googletest/googletest/test/googletest-failfast-unittest.py | 21 | #!/usr/bin/env python
#
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test fail_fast.
A user can specify if a Google Test program should continue test execution
after a test failure via the GTEST_FAIL_FAST environment variable or the
--gtest_fail_fast flag. The default value of the flag can also be changed
by Bazel fail fast environment variable TESTBRIDGE_TEST_RUNNER_FAIL_FAST.
This script tests such functionality by invoking googletest-failfast-unittest_
(a program written with Google Test) with different environments and command
line flags.
"""
import os
import gtest_test_utils
# Constants.
# Bazel testbridge environment variable for fail fast
BAZEL_FAIL_FAST_ENV_VAR = 'TESTBRIDGE_TEST_RUNNER_FAIL_FAST'
# The environment variable for specifying fail fast.
FAIL_FAST_ENV_VAR = 'GTEST_FAIL_FAST'
# The command line flag for specifying fail fast.
FAIL_FAST_FLAG = 'gtest_fail_fast'
# The command line flag to run disabled tests.
RUN_DISABLED_FLAG = 'gtest_also_run_disabled_tests'
# The command line flag for specifying a filter.
FILTER_FLAG = 'gtest_filter'
# Command to run the googletest-failfast-unittest_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath(
'googletest-failfast-unittest_')
# The command line flag to tell Google Test to output the list of tests it
# will run.
LIST_TESTS_FLAG = '--gtest_list_tests'
# Indicates whether Google Test supports death tests.
SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess(
[COMMAND, LIST_TESTS_FLAG]).output
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def RunAndReturnOutput(test_suite=None, fail_fast=None, run_disabled=False):
"""Runs the test program and returns its output."""
args = []
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
'.GTestFailFastUnitTest.xml')
args += ['--gtest_output=xml:' + xml_path]
if fail_fast is not None:
if isinstance(fail_fast, str):
args += ['--%s=%s' % (FAIL_FAST_FLAG, fail_fast)]
elif fail_fast:
args += ['--%s' % FAIL_FAST_FLAG]
else:
args += ['--no%s' % FAIL_FAST_FLAG]
if test_suite:
args += ['--%s=%s.*' % (FILTER_FLAG, test_suite)]
if run_disabled:
args += ['--%s' % RUN_DISABLED_FLAG]
txt_out = gtest_test_utils.Subprocess([COMMAND] + args, env=environ).output
with open(xml_path) as xml_file:
return txt_out, xml_file.read()
# The unit test.
class GTestFailFastUnitTest(gtest_test_utils.TestCase):
"""Tests the env variable or the command line flag for fail_fast."""
def testDefaultBehavior(self):
"""Tests the behavior of not specifying the fail_fast."""
txt, _ = RunAndReturnOutput()
self.assertIn('22 FAILED TEST', txt)
def testGoogletestFlag(self):
txt, _ = RunAndReturnOutput(test_suite='HasSimpleTest', fail_fast=True)
self.assertIn('1 FAILED TEST', txt)
self.assertIn('[ SKIPPED ] 3 tests', txt)
txt, _ = RunAndReturnOutput(test_suite='HasSimpleTest', fail_fast=False)
self.assertIn('4 FAILED TEST', txt)
self.assertNotIn('[ SKIPPED ]', txt)
def testGoogletestEnvVar(self):
"""Tests the behavior of specifying fail_fast via Googletest env var."""
try:
SetEnvVar(FAIL_FAST_ENV_VAR, '1')
txt, _ = RunAndReturnOutput('HasSimpleTest')
self.assertIn('1 FAILED TEST', txt)
self.assertIn('[ SKIPPED ] 3 tests', txt)
SetEnvVar(FAIL_FAST_ENV_VAR, '0')
txt, _ = RunAndReturnOutput('HasSimpleTest')
self.assertIn('4 FAILED TEST', txt)
self.assertNotIn('[ SKIPPED ]', txt)
finally:
SetEnvVar(FAIL_FAST_ENV_VAR, None)
def testBazelEnvVar(self):
"""Tests the behavior of specifying fail_fast via Bazel testbridge."""
try:
SetEnvVar(BAZEL_FAIL_FAST_ENV_VAR, '1')
txt, _ = RunAndReturnOutput('HasSimpleTest')
self.assertIn('1 FAILED TEST', txt)
self.assertIn('[ SKIPPED ] 3 tests', txt)
SetEnvVar(BAZEL_FAIL_FAST_ENV_VAR, '0')
txt, _ = RunAndReturnOutput('HasSimpleTest')
self.assertIn('4 FAILED TEST', txt)
self.assertNotIn('[ SKIPPED ]', txt)
finally:
SetEnvVar(BAZEL_FAIL_FAST_ENV_VAR, None)
def testFlagOverridesEnvVar(self):
"""Tests precedence of flag over env var."""
try:
SetEnvVar(FAIL_FAST_ENV_VAR, '0')
txt, _ = RunAndReturnOutput('HasSimpleTest', True)
self.assertIn('1 FAILED TEST', txt)
self.assertIn('[ SKIPPED ] 3 tests', txt)
finally:
SetEnvVar(FAIL_FAST_ENV_VAR, None)
def testGoogletestEnvVarOverridesBazelEnvVar(self):
"""Tests that the Googletest native env var over Bazel testbridge."""
try:
SetEnvVar(BAZEL_FAIL_FAST_ENV_VAR, '0')
SetEnvVar(FAIL_FAST_ENV_VAR, '1')
txt, _ = RunAndReturnOutput('HasSimpleTest')
self.assertIn('1 FAILED TEST', txt)
self.assertIn('[ SKIPPED ] 3 tests', txt)
finally:
SetEnvVar(FAIL_FAST_ENV_VAR, None)
SetEnvVar(BAZEL_FAIL_FAST_ENV_VAR, None)
def testEventListener(self):
txt, _ = RunAndReturnOutput(test_suite='HasSkipTest', fail_fast=True)
self.assertIn('1 FAILED TEST', txt)
self.assertIn('[ SKIPPED ] 3 tests', txt)
for expected_count, callback in [(1, 'OnTestSuiteStart'),
(5, 'OnTestStart'),
(5, 'OnTestEnd'),
(5, 'OnTestPartResult'),
(1, 'OnTestSuiteEnd')]:
self.assertEqual(
expected_count, txt.count(callback),
'Expected %d calls to callback %s match count on output: %s ' %
(expected_count, callback, txt))
txt, _ = RunAndReturnOutput(test_suite='HasSkipTest', fail_fast=False)
self.assertIn('3 FAILED TEST', txt)
self.assertIn('[ SKIPPED ] 1 test', txt)
for expected_count, callback in [(1, 'OnTestSuiteStart'),
(5, 'OnTestStart'),
(5, 'OnTestEnd'),
(5, 'OnTestPartResult'),
(1, 'OnTestSuiteEnd')]:
self.assertEqual(
expected_count, txt.count(callback),
'Expected %d calls to callback %s match count on output: %s ' %
(expected_count, callback, txt))
def assertXmlResultCount(self, result, count, xml):
self.assertEqual(
count, xml.count('result="%s"' % result),
'Expected \'result="%s"\' match count of %s: %s ' %
(result, count, xml))
def assertXmlStatusCount(self, status, count, xml):
self.assertEqual(
count, xml.count('status="%s"' % status),
'Expected \'status="%s"\' match count of %s: %s ' %
(status, count, xml))
def assertFailFastXmlAndTxtOutput(self,
fail_fast,
test_suite,
passed_count,
failure_count,
skipped_count,
suppressed_count,
run_disabled=False):
"""Assert XML and text output of a test execution."""
txt, xml = RunAndReturnOutput(test_suite, fail_fast, run_disabled)
if failure_count > 0:
self.assertIn('%s FAILED TEST' % failure_count, txt)
if suppressed_count > 0:
self.assertIn('%s DISABLED TEST' % suppressed_count, txt)
if skipped_count > 0:
self.assertIn('[ SKIPPED ] %s tests' % skipped_count, txt)
self.assertXmlStatusCount('run',
passed_count + failure_count + skipped_count, xml)
self.assertXmlStatusCount('notrun', suppressed_count, xml)
self.assertXmlResultCount('completed', passed_count + failure_count, xml)
self.assertXmlResultCount('skipped', skipped_count, xml)
self.assertXmlResultCount('suppressed', suppressed_count, xml)
def assertFailFastBehavior(self,
test_suite,
passed_count,
failure_count,
skipped_count,
suppressed_count,
run_disabled=False):
"""Assert --fail_fast via flag."""
for fail_fast in ('true', '1', 't', True):
self.assertFailFastXmlAndTxtOutput(fail_fast, test_suite, passed_count,
failure_count, skipped_count,
suppressed_count, run_disabled)
def assertNotFailFastBehavior(self,
test_suite,
passed_count,
failure_count,
skipped_count,
suppressed_count,
run_disabled=False):
"""Assert --nofail_fast via flag."""
for fail_fast in ('false', '0', 'f', False):
self.assertFailFastXmlAndTxtOutput(fail_fast, test_suite, passed_count,
failure_count, skipped_count,
suppressed_count, run_disabled)
def testFlag_HasFixtureTest(self):
"""Tests the behavior of fail_fast and TEST_F."""
self.assertFailFastBehavior(
test_suite='HasFixtureTest',
passed_count=1,
failure_count=1,
skipped_count=3,
suppressed_count=0)
self.assertNotFailFastBehavior(
test_suite='HasFixtureTest',
passed_count=1,
failure_count=4,
skipped_count=0,
suppressed_count=0)
def testFlag_HasSimpleTest(self):
"""Tests the behavior of fail_fast and TEST."""
self.assertFailFastBehavior(
test_suite='HasSimpleTest',
passed_count=1,
failure_count=1,
skipped_count=3,
suppressed_count=0)
self.assertNotFailFastBehavior(
test_suite='HasSimpleTest',
passed_count=1,
failure_count=4,
skipped_count=0,
suppressed_count=0)
def testFlag_HasParametersTest(self):
"""Tests the behavior of fail_fast and TEST_P."""
self.assertFailFastBehavior(
test_suite='HasParametersSuite/HasParametersTest',
passed_count=0,
failure_count=1,
skipped_count=3,
suppressed_count=0)
self.assertNotFailFastBehavior(
test_suite='HasParametersSuite/HasParametersTest',
passed_count=0,
failure_count=4,
skipped_count=0,
suppressed_count=0)
def testFlag_HasDisabledTest(self):
"""Tests the behavior of fail_fast and Disabled test cases."""
self.assertFailFastBehavior(
test_suite='HasDisabledTest',
passed_count=1,
failure_count=1,
skipped_count=2,
suppressed_count=1,
run_disabled=False)
self.assertNotFailFastBehavior(
test_suite='HasDisabledTest',
passed_count=1,
failure_count=3,
skipped_count=0,
suppressed_count=1,
run_disabled=False)
def testFlag_HasDisabledRunDisabledTest(self):
"""Tests the behavior of fail_fast and Disabled test cases enabled."""
self.assertFailFastBehavior(
test_suite='HasDisabledTest',
passed_count=1,
failure_count=1,
skipped_count=3,
suppressed_count=0,
run_disabled=True)
self.assertNotFailFastBehavior(
test_suite='HasDisabledTest',
passed_count=1,
failure_count=4,
skipped_count=0,
suppressed_count=0,
run_disabled=True)
def testFlag_HasDisabledSuiteTest(self):
"""Tests the behavior of fail_fast and Disabled test suites."""
self.assertFailFastBehavior(
test_suite='DISABLED_HasDisabledSuite',
passed_count=0,
failure_count=0,
skipped_count=0,
suppressed_count=5,
run_disabled=False)
self.assertNotFailFastBehavior(
test_suite='DISABLED_HasDisabledSuite',
passed_count=0,
failure_count=0,
skipped_count=0,
suppressed_count=5,
run_disabled=False)
def testFlag_HasDisabledSuiteRunDisabledTest(self):
"""Tests the behavior of fail_fast and Disabled test suites enabled."""
self.assertFailFastBehavior(
test_suite='DISABLED_HasDisabledSuite',
passed_count=1,
failure_count=1,
skipped_count=3,
suppressed_count=0,
run_disabled=True)
self.assertNotFailFastBehavior(
test_suite='DISABLED_HasDisabledSuite',
passed_count=1,
failure_count=4,
skipped_count=0,
suppressed_count=0,
run_disabled=True)
if SUPPORTS_DEATH_TESTS:
def testFlag_HasDeathTest(self):
"""Tests the behavior of fail_fast and death tests."""
self.assertFailFastBehavior(
test_suite='HasDeathTest',
passed_count=1,
failure_count=1,
skipped_count=3,
suppressed_count=0)
self.assertNotFailFastBehavior(
test_suite='HasDeathTest',
passed_count=1,
failure_count=4,
skipped_count=0,
suppressed_count=0)
if __name__ == '__main__':
gtest_test_utils.Main()
|
pwoodworth/intellij-community | refs/heads/master | python/lib/Lib/site-packages/django/forms/util.py | 311 | from django.utils.html import conditional_escape
from django.utils.encoding import StrAndUnicode, force_unicode
from django.utils.safestring import mark_safe
# Import ValidationError so that it can be imported from this
# module to maintain backwards compatibility.
from django.core.exceptions import ValidationError
def flatatt(attrs):
"""
Convert a dictionary of attributes to a single string.
The returned string will contain a leading space followed by key="value",
XML-style pairs. It is assumed that the keys do not need to be XML-escaped.
If the passed dictionary is empty, then return an empty string.
"""
return u''.join([u' %s="%s"' % (k, conditional_escape(v)) for k, v in attrs.items()])
class ErrorDict(dict, StrAndUnicode):
"""
A collection of errors that knows how to display itself in various formats.
The dictionary keys are the field names, and the values are the errors.
"""
def __unicode__(self):
return self.as_ul()
def as_ul(self):
if not self: return u''
return mark_safe(u'<ul class="errorlist">%s</ul>'
% ''.join([u'<li>%s%s</li>' % (k, force_unicode(v))
for k, v in self.items()]))
def as_text(self):
return u'\n'.join([u'* %s\n%s' % (k, u'\n'.join([u' * %s' % force_unicode(i) for i in v])) for k, v in self.items()])
class ErrorList(list, StrAndUnicode):
"""
A collection of errors that knows how to display itself in various formats.
"""
def __unicode__(self):
return self.as_ul()
def as_ul(self):
if not self: return u''
return mark_safe(u'<ul class="errorlist">%s</ul>'
% ''.join([u'<li>%s</li>' % conditional_escape(force_unicode(e)) for e in self]))
def as_text(self):
if not self: return u''
return u'\n'.join([u'* %s' % force_unicode(e) for e in self])
def __repr__(self):
return repr([force_unicode(e) for e in self])
|
Syrcon/servo | refs/heads/master | tests/wpt/web-platform-tests/old-tests/webdriver/network.py | 212 | # this comes from this stack overflow post:
# http://stackoverflow.com/a/1947766/725944
# module for getting the lan ip address of the computer
import os
import socket
if os.name != "nt":
import fcntl
import struct
def get_interface_ip(ifname):
sckt = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
sckt.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
def get_lan_ip():
ip = socket.gethostbyname(socket.gethostname())
if ip.startswith("127.") and os.name != "nt":
interfaces = ["eth0","eth1","eth2","wlan0","wlan1","wifi0","ath0","ath1","ppp0"]
for ifname in interfaces:
try:
ip = get_interface_ip(ifname)
break
except IOError:
pass
return ip
|
quantopian/odo | refs/heads/master | docs/source/conf.py | 8 | # -*- coding: utf-8 -*-
#
# into documentation build configuration file, created by
# sphinx-quickstart on Sat Dec 6 16:03:44 2015.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.extlinks']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'odo'
copyright = u'2015, Continuum Analytics'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from odo import __version__ as version
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ododoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'odo.tex', u'odo Documentation',
u'Matthew Rocklin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'odo', u'odo Documentation',
[u'Continuum Analytics'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'odo', u'odo Documentation',
u'Continuum Analytics', 'odo', 'Data migrations',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'odo'
epub_author = u'Matthew Rocklin'
epub_publisher = u'Continuum Analytics'
epub_copyright = u'2015, Continuum Analytics'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
extlinks = dict(issue=('https://github.com/blaze/odo/issues/%s', '#'))
|
jbranchaud/hn-cli | refs/heads/master | RSSParser.py | 2 | #!/usr/bin/python
import sys
import requests
from xml.dom import minidom
import urllib2
def main(argv):
#request = requests.get('http://news.ycombinator.com/bigrss')
f = urllib2.urlopen('http://news.ycombinator.com/bigrss')
#xmldoc = minidom.parseString(request.text)
xmldoc = minidom.parse(f)
items = xmldoc.getElementsByTagName('item')
for item in items:
children = item.childNodes
title = str(children[0].firstChild.nodeValue.encode('utf-8'))
link = str(children[1].firstChild.nodeValue.encode('utf-8'))
comments = str(children[2].firstChild.nodeValue.encode('utf-8'))
print title + ' - ' + link + ' - ' + comments
if __name__ == "__main__":
main(sys.argv[1:])
|
applicationdevm/XlsxWriter | refs/heads/master | examples/hide_row_col.py | 9 | ###############################################################################
#
# Example of how to hide rows and columns in XlsxWriter. In order to
# hide rows without setting each one, (of approximately 1 million rows),
# Excel uses an optimisation to hide all rows that don't have data.
#
# Copyright 2013-2015, John McNamara, jmcnamara@cpan.org
#
import xlsxwriter
workbook = xlsxwriter.Workbook('hide_row_col.xlsx')
worksheet = workbook.add_worksheet()
# Write some data.
worksheet.write('D1', 'Some hidden columns.')
worksheet.write('A8', 'Some hidden rows.')
# Hide all rows without data.
worksheet.set_default_row(hide_unused_rows=True)
# Set the height of empty rows that we do want to display even if it is
# the default height.
for row in range(1, 7):
worksheet.set_row(row, 15)
# Columns can be hidden explicitly. This doesn't increase the file size..
worksheet.set_column('G:XFD', None, None, {'hidden': True})
workbook.close()
|
cwyark/micropython | refs/heads/master | tests/basics/fun_callstar.py | 46 | # function calls with *pos
def foo(a, b, c):
print(a, b, c)
foo(*(1, 2, 3))
foo(1, *(2, 3))
foo(1, 2, *(3,))
foo(1, 2, 3, *())
# Another sequence type
foo(1, 2, *[100])
# Iterator
foo(*range(3))
# pos then iterator
foo(1, *range(2, 4))
# an iterator with many elements
def foo(*rest):
print(rest)
foo(*range(10))
# method calls with *pos
class A:
def foo(self, a, b, c):
print(a, b, c)
a = A()
a.foo(*(1, 2, 3))
a.foo(1, *(2, 3))
a.foo(1, 2, *(3,))
a.foo(1, 2, 3, *())
# Another sequence type
a.foo(1, 2, *[100])
# Iterator
a.foo(*range(3))
|
googleapis/googleapis-gen | refs/heads/master | google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/services/services/search_term_view_service/transports/base.py | 1 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
import google.auth # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.ads.googleads.v6.resources.types import search_term_view
from google.ads.googleads.v6.services.types import search_term_view_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-ads-googleads',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class SearchTermViewServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for SearchTermViewService."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/adwords',
)
def __init__(
self, *,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_search_term_view: gapic_v1.method.wrap_method(
self.get_search_term_view,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_search_term_view(self) -> typing.Callable[
[search_term_view_service.GetSearchTermViewRequest],
search_term_view.SearchTermView]:
raise NotImplementedError
__all__ = (
'SearchTermViewServiceTransport',
)
|
jirikuncar/invenio | refs/heads/master | invenio/modules/oauthclient/contrib/__init__.py | 20 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Pre-configured remote applications for existing popular services. """
|
amitgandhinz/cdn | refs/heads/master | poppy/openstack/common/local.py | 378 | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Local storage of variables using weak references"""
import threading
import weakref
class WeakLocal(threading.local):
def __getattribute__(self, attr):
rval = super(WeakLocal, self).__getattribute__(attr)
if rval:
# NOTE(mikal): this bit is confusing. What is stored is a weak
# reference, not the value itself. We therefore need to lookup
# the weak reference and return the inner value here.
rval = rval()
return rval
def __setattr__(self, attr, value):
value = weakref.ref(value)
return super(WeakLocal, self).__setattr__(attr, value)
# NOTE(mikal): the name "store" should be deprecated in the future
store = WeakLocal()
# A "weak" store uses weak references and allows an object to fall out of scope
# when it falls out of scope in the code that uses the thread local storage. A
# "strong" store will hold a reference to the object so that it never falls out
# of scope.
weak_store = WeakLocal()
strong_store = threading.local()
|
alexallah/django | refs/heads/master | tests/model_options/test_default_related_name.py | 133 | from django.core.exceptions import FieldError
from django.test import TestCase
from .models.default_related_name import Author, Book, Editor
class DefaultRelatedNameTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.author = Author.objects.create(first_name='Dave', last_name='Loper')
cls.editor = Editor.objects.create(name='Test Editions', bestselling_author=cls.author)
cls.book = Book.objects.create(title='Test Book', editor=cls.editor)
cls.book.authors.add(cls.author)
def test_no_default_related_name(self):
self.assertEqual(list(self.author.editor_set.all()), [self.editor])
def test_default_related_name(self):
self.assertEqual(list(self.author.books.all()), [self.book])
def test_default_related_name_in_queryset_lookup(self):
self.assertEqual(Author.objects.get(books=self.book), self.author)
def test_model_name_not_available_in_queryset_lookup(self):
msg = "Cannot resolve keyword 'book' into field."
with self.assertRaisesMessage(FieldError, msg):
Author.objects.get(book=self.book)
def test_related_name_overrides_default_related_name(self):
self.assertEqual(list(self.editor.edited_books.all()), [self.book])
def test_inheritance(self):
# model_options is the name of the application for this test.
self.assertEqual(list(self.book.model_options_bookstores.all()), [])
def test_inheritance_with_overridden_default_related_name(self):
self.assertEqual(list(self.book.editor_stores.all()), [])
|
ptisserand/ansible | refs/heads/devel | lib/ansible/modules/cloud/cloudstack/cs_vpn_connection.py | 29 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: cs_vpn_connection
short_description: Manages site-to-site VPN connections on Apache CloudStack based clouds.
description:
- Create and remove VPN connections.
version_added: "2.5"
author: "René Moser (@resmo)"
options:
vpc:
description:
- Name of the VPC the VPN connection is related to.
required: true
vpn_customer_gateway:
description:
- Name of the VPN customer gateway.
required: true
passive:
description:
- State of the VPN connection.
- Only considered when C(state=present).
default: no
type: bool
force:
description:
- Activate the VPN gateway if not already activated on C(state=present).
- Also see M(cs_vpn_gateway).
default: no
type: bool
state:
description:
- State of the VPN connection.
default: present
choices: [ present, absent ]
domain:
description:
- Domain the VPN connection is related to.
account:
description:
- Account the VPN connection is related to.
project:
description:
- Name of the project the VPN connection is related to.
poll_async:
description:
- Poll async jobs until job has finished.
default: yes
type: bool
extends_documentation_fragment: cloudstack
'''
EXAMPLES = r'''
- name: Create a VPN connection with activated VPN gateway
local_action:
module: cs_vpn_connection
vpn_customer_gateway: my vpn connection
vpc: my vpc
- name: Create a VPN connection and force VPN gateway activation
local_action:
module: cs_vpn_connection
vpn_customer_gateway: my vpn connection
vpc: my vpc
force: yes
- name: Remove a vpn connection
local_action:
module: cs_vpn_connection
vpn_customer_gateway: my vpn connection
vpc: my vpc
state: absent
'''
RETURN = r'''
---
id:
description: UUID of the VPN connection.
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
vpn_gateway_id:
description: UUID of the VPN gateway.
returned: success
type: string
sample: 04589590-ac63-93f5-4ffc-b698b8ac38b6
domain:
description: Domain the VPN connection is related to.
returned: success
type: string
sample: example domain
account:
description: Account the VPN connection is related to.
returned: success
type: string
sample: example account
project:
description: Name of project the VPN connection is related to.
returned: success
type: string
sample: Production
created:
description: Date the connection was created.
returned: success
type: string
sample: 2014-12-01T14:57:57+0100
dpd:
description: Whether dead pear detection is enabled or not.
returned: success
type: bool
sample: true
esp_lifetime:
description: Lifetime in seconds of phase 2 VPN connection.
returned: success
type: int
sample: 86400
esp_policy:
description: IKE policy of the VPN connection.
returned: success
type: string
sample: aes256-sha1;modp1536
force_encap:
description: Whether encapsulation for NAT traversal is enforced or not.
returned: success
type: bool
sample: true
ike_lifetime:
description: Lifetime in seconds of phase 1 VPN connection.
returned: success
type: int
sample: 86400
ike_policy:
description: ESP policy of the VPN connection.
returned: success
type: string
sample: aes256-sha1;modp1536
cidrs:
description: List of CIDRs of the customer gateway.
returned: success
type: list
sample: [ 10.10.10.0/24 ]
passive:
description: Whether the connection is passive or not.
returned: success
type: bool
sample: false
public_ip:
description: IP address of the VPN gateway.
returned: success
type: string
sample: 10.100.212.10
gateway:
description: IP address of the VPN customer gateway.
returned: success
type: string
sample: 10.101.214.10
state:
description: State of the VPN connection.
returned: success
type: string
sample: Connected
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together
)
class AnsibleCloudStackVpnConnection(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackVpnConnection, self).__init__(module)
self.returns = {
'dpd': 'dpd',
'esplifetime': 'esp_lifetime',
'esppolicy': 'esp_policy',
'gateway': 'gateway',
'ikepolicy': 'ike_policy',
'ikelifetime': 'ike_lifetime',
'publicip': 'public_ip',
'passive': 'passive',
's2svpngatewayid': 'vpn_gateway_id',
}
self.vpn_customer_gateway = None
def get_vpn_customer_gateway(self, key=None, identifier=None, refresh=False):
if not refresh and self.vpn_customer_gateway:
return self._get_by_key(key, self.vpn_customer_gateway)
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id')
}
vpn_customer_gateway = identifier or self.module.params.get('vpn_customer_gateway')
vcgws = self.query_api('listVpnCustomerGateways', **args)
if vcgws:
for vcgw in vcgws['vpncustomergateway']:
if vpn_customer_gateway.lower() in [vcgw['id'], vcgw['name'].lower()]:
self.vpn_customer_gateway = vcgw
return self._get_by_key(key, self.vpn_customer_gateway)
self.fail_json(msg="VPN customer gateway not found: %s" % vpn_customer_gateway)
def get_vpn_gateway(self, key=None):
args = {
'vpcid': self.get_vpc(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
}
vpn_gateways = self.query_api('listVpnGateways', **args)
if vpn_gateways:
return self._get_by_key(key, vpn_gateways['vpngateway'][0])
elif self.module.params.get('force'):
if self.module.check_mode:
return {}
res = self.query_api('createVpnGateway', **args)
vpn_gateway = self.poll_job(res, 'vpngateway')
return self._get_by_key(key, vpn_gateway)
self.fail_json(msg="VPN gateway not found and not forced to create one")
def get_vpn_connection(self):
args = {
'vpcid': self.get_vpc(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
}
vpn_conns = self.query_api('listVpnConnections', **args)
if vpn_conns:
for vpn_conn in vpn_conns['vpnconnection']:
if self.get_vpn_customer_gateway(key='id') == vpn_conn['s2scustomergatewayid']:
return vpn_conn
def present_vpn_connection(self):
vpn_conn = self.get_vpn_connection()
args = {
's2scustomergatewayid': self.get_vpn_customer_gateway(key='id'),
's2svpngatewayid': self.get_vpn_gateway(key='id'),
'passive': self.module.params.get('passive'),
}
if not vpn_conn:
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('createVpnConnection', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
vpn_conn = self.poll_job(res, 'vpnconnection')
return vpn_conn
def absent_vpn_connection(self):
vpn_conn = self.get_vpn_connection()
if vpn_conn:
self.result['changed'] = True
args = {
'id': vpn_conn['id']
}
if not self.module.check_mode:
res = self.query_api('deleteVpnConnection', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'vpnconnection')
return vpn_conn
def get_result(self, vpn_conn):
super(AnsibleCloudStackVpnConnection, self).get_result(vpn_conn)
if vpn_conn:
if 'cidrlist' in vpn_conn:
self.result['cidrs'] = vpn_conn['cidrlist'].split(',') or [vpn_conn['cidrlist']]
# Ensure we return a bool
self.result['force_encap'] = True if vpn_conn['forceencap'] else False
args = {
'key': 'name',
'identifier': vpn_conn['s2scustomergatewayid'],
'refresh': True,
}
self.result['vpn_customer_gateway'] = self.get_vpn_customer_gateway(**args)
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
vpn_customer_gateway=dict(required=True),
vpc=dict(required=True),
domain=dict(),
account=dict(),
project=dict(),
zone=dict(),
passive=dict(type='bool', default=False),
force=dict(type='bool', default=False),
state=dict(choices=['present', 'absent'], default='present'),
poll_async=dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_vpn_conn = AnsibleCloudStackVpnConnection(module)
state = module.params.get('state')
if state == "absent":
vpn_conn = acs_vpn_conn.absent_vpn_connection()
else:
vpn_conn = acs_vpn_conn.present_vpn_connection()
result = acs_vpn_conn.get_result(vpn_conn)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.