repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
safwanrahman/mozillians
|
refs/heads/master
|
vendor-local/lib/python/unidecode/x0b1.py
|
253
|
data = (
'nyaess', # 0x00
'nyaeng', # 0x01
'nyaej', # 0x02
'nyaec', # 0x03
'nyaek', # 0x04
'nyaet', # 0x05
'nyaep', # 0x06
'nyaeh', # 0x07
'neo', # 0x08
'neog', # 0x09
'neogg', # 0x0a
'neogs', # 0x0b
'neon', # 0x0c
'neonj', # 0x0d
'neonh', # 0x0e
'neod', # 0x0f
'neol', # 0x10
'neolg', # 0x11
'neolm', # 0x12
'neolb', # 0x13
'neols', # 0x14
'neolt', # 0x15
'neolp', # 0x16
'neolh', # 0x17
'neom', # 0x18
'neob', # 0x19
'neobs', # 0x1a
'neos', # 0x1b
'neoss', # 0x1c
'neong', # 0x1d
'neoj', # 0x1e
'neoc', # 0x1f
'neok', # 0x20
'neot', # 0x21
'neop', # 0x22
'neoh', # 0x23
'ne', # 0x24
'neg', # 0x25
'negg', # 0x26
'negs', # 0x27
'nen', # 0x28
'nenj', # 0x29
'nenh', # 0x2a
'ned', # 0x2b
'nel', # 0x2c
'nelg', # 0x2d
'nelm', # 0x2e
'nelb', # 0x2f
'nels', # 0x30
'nelt', # 0x31
'nelp', # 0x32
'nelh', # 0x33
'nem', # 0x34
'neb', # 0x35
'nebs', # 0x36
'nes', # 0x37
'ness', # 0x38
'neng', # 0x39
'nej', # 0x3a
'nec', # 0x3b
'nek', # 0x3c
'net', # 0x3d
'nep', # 0x3e
'neh', # 0x3f
'nyeo', # 0x40
'nyeog', # 0x41
'nyeogg', # 0x42
'nyeogs', # 0x43
'nyeon', # 0x44
'nyeonj', # 0x45
'nyeonh', # 0x46
'nyeod', # 0x47
'nyeol', # 0x48
'nyeolg', # 0x49
'nyeolm', # 0x4a
'nyeolb', # 0x4b
'nyeols', # 0x4c
'nyeolt', # 0x4d
'nyeolp', # 0x4e
'nyeolh', # 0x4f
'nyeom', # 0x50
'nyeob', # 0x51
'nyeobs', # 0x52
'nyeos', # 0x53
'nyeoss', # 0x54
'nyeong', # 0x55
'nyeoj', # 0x56
'nyeoc', # 0x57
'nyeok', # 0x58
'nyeot', # 0x59
'nyeop', # 0x5a
'nyeoh', # 0x5b
'nye', # 0x5c
'nyeg', # 0x5d
'nyegg', # 0x5e
'nyegs', # 0x5f
'nyen', # 0x60
'nyenj', # 0x61
'nyenh', # 0x62
'nyed', # 0x63
'nyel', # 0x64
'nyelg', # 0x65
'nyelm', # 0x66
'nyelb', # 0x67
'nyels', # 0x68
'nyelt', # 0x69
'nyelp', # 0x6a
'nyelh', # 0x6b
'nyem', # 0x6c
'nyeb', # 0x6d
'nyebs', # 0x6e
'nyes', # 0x6f
'nyess', # 0x70
'nyeng', # 0x71
'nyej', # 0x72
'nyec', # 0x73
'nyek', # 0x74
'nyet', # 0x75
'nyep', # 0x76
'nyeh', # 0x77
'no', # 0x78
'nog', # 0x79
'nogg', # 0x7a
'nogs', # 0x7b
'non', # 0x7c
'nonj', # 0x7d
'nonh', # 0x7e
'nod', # 0x7f
'nol', # 0x80
'nolg', # 0x81
'nolm', # 0x82
'nolb', # 0x83
'nols', # 0x84
'nolt', # 0x85
'nolp', # 0x86
'nolh', # 0x87
'nom', # 0x88
'nob', # 0x89
'nobs', # 0x8a
'nos', # 0x8b
'noss', # 0x8c
'nong', # 0x8d
'noj', # 0x8e
'noc', # 0x8f
'nok', # 0x90
'not', # 0x91
'nop', # 0x92
'noh', # 0x93
'nwa', # 0x94
'nwag', # 0x95
'nwagg', # 0x96
'nwags', # 0x97
'nwan', # 0x98
'nwanj', # 0x99
'nwanh', # 0x9a
'nwad', # 0x9b
'nwal', # 0x9c
'nwalg', # 0x9d
'nwalm', # 0x9e
'nwalb', # 0x9f
'nwals', # 0xa0
'nwalt', # 0xa1
'nwalp', # 0xa2
'nwalh', # 0xa3
'nwam', # 0xa4
'nwab', # 0xa5
'nwabs', # 0xa6
'nwas', # 0xa7
'nwass', # 0xa8
'nwang', # 0xa9
'nwaj', # 0xaa
'nwac', # 0xab
'nwak', # 0xac
'nwat', # 0xad
'nwap', # 0xae
'nwah', # 0xaf
'nwae', # 0xb0
'nwaeg', # 0xb1
'nwaegg', # 0xb2
'nwaegs', # 0xb3
'nwaen', # 0xb4
'nwaenj', # 0xb5
'nwaenh', # 0xb6
'nwaed', # 0xb7
'nwael', # 0xb8
'nwaelg', # 0xb9
'nwaelm', # 0xba
'nwaelb', # 0xbb
'nwaels', # 0xbc
'nwaelt', # 0xbd
'nwaelp', # 0xbe
'nwaelh', # 0xbf
'nwaem', # 0xc0
'nwaeb', # 0xc1
'nwaebs', # 0xc2
'nwaes', # 0xc3
'nwaess', # 0xc4
'nwaeng', # 0xc5
'nwaej', # 0xc6
'nwaec', # 0xc7
'nwaek', # 0xc8
'nwaet', # 0xc9
'nwaep', # 0xca
'nwaeh', # 0xcb
'noe', # 0xcc
'noeg', # 0xcd
'noegg', # 0xce
'noegs', # 0xcf
'noen', # 0xd0
'noenj', # 0xd1
'noenh', # 0xd2
'noed', # 0xd3
'noel', # 0xd4
'noelg', # 0xd5
'noelm', # 0xd6
'noelb', # 0xd7
'noels', # 0xd8
'noelt', # 0xd9
'noelp', # 0xda
'noelh', # 0xdb
'noem', # 0xdc
'noeb', # 0xdd
'noebs', # 0xde
'noes', # 0xdf
'noess', # 0xe0
'noeng', # 0xe1
'noej', # 0xe2
'noec', # 0xe3
'noek', # 0xe4
'noet', # 0xe5
'noep', # 0xe6
'noeh', # 0xe7
'nyo', # 0xe8
'nyog', # 0xe9
'nyogg', # 0xea
'nyogs', # 0xeb
'nyon', # 0xec
'nyonj', # 0xed
'nyonh', # 0xee
'nyod', # 0xef
'nyol', # 0xf0
'nyolg', # 0xf1
'nyolm', # 0xf2
'nyolb', # 0xf3
'nyols', # 0xf4
'nyolt', # 0xf5
'nyolp', # 0xf6
'nyolh', # 0xf7
'nyom', # 0xf8
'nyob', # 0xf9
'nyobs', # 0xfa
'nyos', # 0xfb
'nyoss', # 0xfc
'nyong', # 0xfd
'nyoj', # 0xfe
'nyoc', # 0xff
)
|
jacroe/spynot
|
refs/heads/master
|
google/protobuf/internal/wire_format.py
|
561
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Constants and static functions to support protocol buffer wire format."""
__author__ = 'robinson@google.com (Will Robinson)'
import struct
from google.protobuf import descriptor
from google.protobuf import message
TAG_TYPE_BITS = 3 # Number of bits used to hold type info in a proto tag.
TAG_TYPE_MASK = (1 << TAG_TYPE_BITS) - 1 # 0x7
# These numbers identify the wire type of a protocol buffer value.
# We use the least-significant TAG_TYPE_BITS bits of the varint-encoded
# tag-and-type to store one of these WIRETYPE_* constants.
# These values must match WireType enum in google/protobuf/wire_format.h.
WIRETYPE_VARINT = 0
WIRETYPE_FIXED64 = 1
WIRETYPE_LENGTH_DELIMITED = 2
WIRETYPE_START_GROUP = 3
WIRETYPE_END_GROUP = 4
WIRETYPE_FIXED32 = 5
_WIRETYPE_MAX = 5
# Bounds for various integer types.
INT32_MAX = int((1 << 31) - 1)
INT32_MIN = int(-(1 << 31))
UINT32_MAX = (1 << 32) - 1
INT64_MAX = (1 << 63) - 1
INT64_MIN = -(1 << 63)
UINT64_MAX = (1 << 64) - 1
# "struct" format strings that will encode/decode the specified formats.
FORMAT_UINT32_LITTLE_ENDIAN = '<I'
FORMAT_UINT64_LITTLE_ENDIAN = '<Q'
FORMAT_FLOAT_LITTLE_ENDIAN = '<f'
FORMAT_DOUBLE_LITTLE_ENDIAN = '<d'
# We'll have to provide alternate implementations of AppendLittleEndian*() on
# any architectures where these checks fail.
if struct.calcsize(FORMAT_UINT32_LITTLE_ENDIAN) != 4:
raise AssertionError('Format "I" is not a 32-bit number.')
if struct.calcsize(FORMAT_UINT64_LITTLE_ENDIAN) != 8:
raise AssertionError('Format "Q" is not a 64-bit number.')
def PackTag(field_number, wire_type):
"""Returns an unsigned 32-bit integer that encodes the field number and
wire type information in standard protocol message wire format.
Args:
field_number: Expected to be an integer in the range [1, 1 << 29)
wire_type: One of the WIRETYPE_* constants.
"""
if not 0 <= wire_type <= _WIRETYPE_MAX:
raise message.EncodeError('Unknown wire type: %d' % wire_type)
return (field_number << TAG_TYPE_BITS) | wire_type
def UnpackTag(tag):
"""The inverse of PackTag(). Given an unsigned 32-bit number,
returns a (field_number, wire_type) tuple.
"""
return (tag >> TAG_TYPE_BITS), (tag & TAG_TYPE_MASK)
def ZigZagEncode(value):
"""ZigZag Transform: Encodes signed integers so that they can be
effectively used with varint encoding. See wire_format.h for
more details.
"""
if value >= 0:
return value << 1
return (value << 1) ^ (~0)
def ZigZagDecode(value):
"""Inverse of ZigZagEncode()."""
if not value & 0x1:
return value >> 1
return (value >> 1) ^ (~0)
# The *ByteSize() functions below return the number of bytes required to
# serialize "field number + type" information and then serialize the value.
def Int32ByteSize(field_number, int32):
return Int64ByteSize(field_number, int32)
def Int32ByteSizeNoTag(int32):
return _VarUInt64ByteSizeNoTag(0xffffffffffffffff & int32)
def Int64ByteSize(field_number, int64):
# Have to convert to uint before calling UInt64ByteSize().
return UInt64ByteSize(field_number, 0xffffffffffffffff & int64)
def UInt32ByteSize(field_number, uint32):
return UInt64ByteSize(field_number, uint32)
def UInt64ByteSize(field_number, uint64):
return TagByteSize(field_number) + _VarUInt64ByteSizeNoTag(uint64)
def SInt32ByteSize(field_number, int32):
return UInt32ByteSize(field_number, ZigZagEncode(int32))
def SInt64ByteSize(field_number, int64):
return UInt64ByteSize(field_number, ZigZagEncode(int64))
def Fixed32ByteSize(field_number, fixed32):
return TagByteSize(field_number) + 4
def Fixed64ByteSize(field_number, fixed64):
return TagByteSize(field_number) + 8
def SFixed32ByteSize(field_number, sfixed32):
return TagByteSize(field_number) + 4
def SFixed64ByteSize(field_number, sfixed64):
return TagByteSize(field_number) + 8
def FloatByteSize(field_number, flt):
return TagByteSize(field_number) + 4
def DoubleByteSize(field_number, double):
return TagByteSize(field_number) + 8
def BoolByteSize(field_number, b):
return TagByteSize(field_number) + 1
def EnumByteSize(field_number, enum):
return UInt32ByteSize(field_number, enum)
def StringByteSize(field_number, string):
return BytesByteSize(field_number, string.encode('utf-8'))
def BytesByteSize(field_number, b):
return (TagByteSize(field_number)
+ _VarUInt64ByteSizeNoTag(len(b))
+ len(b))
def GroupByteSize(field_number, message):
return (2 * TagByteSize(field_number) # START and END group.
+ message.ByteSize())
def MessageByteSize(field_number, message):
return (TagByteSize(field_number)
+ _VarUInt64ByteSizeNoTag(message.ByteSize())
+ message.ByteSize())
def MessageSetItemByteSize(field_number, msg):
# First compute the sizes of the tags.
# There are 2 tags for the beginning and ending of the repeated group, that
# is field number 1, one with field number 2 (type_id) and one with field
# number 3 (message).
total_size = (2 * TagByteSize(1) + TagByteSize(2) + TagByteSize(3))
# Add the number of bytes for type_id.
total_size += _VarUInt64ByteSizeNoTag(field_number)
message_size = msg.ByteSize()
# The number of bytes for encoding the length of the message.
total_size += _VarUInt64ByteSizeNoTag(message_size)
# The size of the message.
total_size += message_size
return total_size
def TagByteSize(field_number):
"""Returns the bytes required to serialize a tag with this field number."""
# Just pass in type 0, since the type won't affect the tag+type size.
return _VarUInt64ByteSizeNoTag(PackTag(field_number, 0))
# Private helper function for the *ByteSize() functions above.
def _VarUInt64ByteSizeNoTag(uint64):
"""Returns the number of bytes required to serialize a single varint
using boundary value comparisons. (unrolled loop optimization -WPierce)
uint64 must be unsigned.
"""
if uint64 <= 0x7f: return 1
if uint64 <= 0x3fff: return 2
if uint64 <= 0x1fffff: return 3
if uint64 <= 0xfffffff: return 4
if uint64 <= 0x7ffffffff: return 5
if uint64 <= 0x3ffffffffff: return 6
if uint64 <= 0x1ffffffffffff: return 7
if uint64 <= 0xffffffffffffff: return 8
if uint64 <= 0x7fffffffffffffff: return 9
if uint64 > UINT64_MAX:
raise message.EncodeError('Value out of range: %d' % uint64)
return 10
NON_PACKABLE_TYPES = (
descriptor.FieldDescriptor.TYPE_STRING,
descriptor.FieldDescriptor.TYPE_GROUP,
descriptor.FieldDescriptor.TYPE_MESSAGE,
descriptor.FieldDescriptor.TYPE_BYTES
)
def IsTypePackable(field_type):
"""Return true iff packable = true is valid for fields of this type.
Args:
field_type: a FieldDescriptor::Type value.
Returns:
True iff fields of this type are packable.
"""
return field_type not in NON_PACKABLE_TYPES
|
joedursun/.emacs.d
|
refs/heads/master
|
elpa/elpy-20150226.1148/elpy/tests/compat.py
|
43
|
"""Python 2/3 compatibility definitions.
These are used by the rest of Elpy to keep compatibility definitions
in one place.
"""
import sys
if sys.version_info >= (3, 0):
PYTHON3 = True
import builtins
from io import StringIO
else:
PYTHON3 = False
import __builtin__ as builtins # noqa
from StringIO import StringIO # noqa
|
iseppi/zookeepr
|
refs/heads/master
|
zk/model/voucher.py
|
5
|
"""The application's model objects"""
import sqlalchemy as sa
from meta import Base
from pylons.controllers.util import abort
from person import Person
from product import Product
from meta import Session
class Voucher(Base):
__tablename__ = 'voucher'
id = sa.Column(sa.types.Integer, primary_key=True)
code = sa.Column(sa.types.Text, nullable=False, unique=True)
comment = sa.Column(sa.types.Text, nullable=False)
leader_id = sa.Column(sa.types.Integer, sa.ForeignKey('person.id'), nullable=False)
creation_timestamp = sa.Column(sa.types.DateTime, nullable=False, default=sa.func.current_timestamp())
last_modification_timestamp = sa.Column(sa.types.DateTime, nullable=False, default=sa.func.current_timestamp(), onupdate=sa.func.current_timestamp())
leader = sa.orm.relation(Person, backref=sa.orm.backref('vouchers', cascade="all, delete-orphan"))
def __init__(self, **kwargs):
# remove the args that should never be set via creation
super(Voucher, self).__init__(**kwargs)
def __repr__(self):
return '<Voucher id=%r code=%r comment=%r leader_id=%r>' % (self.id, self.code, self.comment, self.leader_id)
@classmethod
def find_all(cls):
return Session.query(Voucher).order_by(Voucher.id).all()
@classmethod
def find_by_id(cls, id):
return Session.query(Voucher).filter_by(id=id).first()
@classmethod
def find_by_code(cls, code):
return Session.query(Voucher).filter_by(code=code).first()
class VoucherProduct(Base):
# table definitions
__tablename__ = 'voucher_product'
voucher_id = sa.Column(sa.Integer, sa.ForeignKey('voucher.id'), primary_key=True)
product_id = sa.Column(sa.Integer, sa.ForeignKey('product.id'), primary_key=True)
qty = sa.Column(sa.Integer, nullable=False)
percentage = sa.Column(sa.Integer, nullable=False)
# relations
voucher = sa.orm.relation(Voucher, lazy=True, backref=sa.orm.backref('products', cascade="all, delete-orphan"))
product = sa.orm.relation(Product, lazy=True, backref=sa.orm.backref('vouchers', cascade="all, delete-orphan"))
def __init__(self, **kwargs):
# remove the args that should never be set via creation
super(VoucherProduct, self).__init__(**kwargs)
def __repr__(self):
return '<VoucherProduct>'
@classmethod
def find_all(cls):
return Session.query(VoucherProduct).order_by(VoucherProduct.id).all()
|
jspargo/AneMo
|
refs/heads/master
|
django/lib/python2.7/site-packages/django/db/backends/mysql/client.py
|
84
|
import os
import sys
from django.db.backends import BaseDatabaseClient
class DatabaseClient(BaseDatabaseClient):
executable_name = 'mysql'
def runshell(self):
settings_dict = self.connection.settings_dict
args = [self.executable_name]
db = settings_dict['OPTIONS'].get('db', settings_dict['NAME'])
user = settings_dict['OPTIONS'].get('user', settings_dict['USER'])
passwd = settings_dict['OPTIONS'].get('passwd', settings_dict['PASSWORD'])
host = settings_dict['OPTIONS'].get('host', settings_dict['HOST'])
port = settings_dict['OPTIONS'].get('port', settings_dict['PORT'])
defaults_file = settings_dict['OPTIONS'].get('read_default_file')
# Seems to be no good way to set sql_mode with CLI.
if defaults_file:
args += ["--defaults-file=%s" % defaults_file]
if user:
args += ["--user=%s" % user]
if passwd:
args += ["--password=%s" % passwd]
if host:
if '/' in host:
args += ["--socket=%s" % host]
else:
args += ["--host=%s" % host]
if port:
args += ["--port=%s" % port]
if db:
args += [db]
if os.name == 'nt':
sys.exit(os.system(" ".join(args)))
else:
os.execvp(self.executable_name, args)
|
rodolfoams/tsp-solver
|
refs/heads/master
|
src/tspsolver/solver/geneticsearch.py
|
1
|
from random import shuffle, randint, random, choice
from ..core import Edge
from sys import maxint
from math import sqrt, ceil
mutationRate = 0.001
populationSize = 200
tournamentSize = 7
crossoverProbability = 0.85
eliteSize = 3
def totalCost(population):
# return reduce(lambda x, y: x + y[1] if isinstance(x, int) else x[1] + y[1],population)
return reduce(lambda x, y: x + y, map(lambda x: x[1],population))
# return sum([x[1] for x in population])
def cost(path, sparseMatrix):
distance = 0
for i in xrange(len(path)):
source = path[i]
target = path[(i+1)%len(path)]
distance += sparseMatrix[source.index][target.index]
return distance
def getFittest(population):
bestPath = None
bestDistance = maxint
for individual in population:
c = individual[1]
if c < bestDistance:
bestPath = individual[0]
bestDistance = c
return bestPath, bestDistance
def tournamentSelection(population):
tournament = list()
tCost = totalCost(population)
weighedPopulation = [k for k in population for i in xrange(int(ceil(float(tCost)/k[1])))]
for i in xrange(tournamentSize):
tournament.append(choice(weighedPopulation))
return getFittest(tournament)[0]
def crossover(p1, p2):
child = [None] * len(p1)
startPos = randint(0,len(p1))
endPos = randint(0,len(p1))
for i in xrange(len(p1)):
if startPos < endPos and i > startPos and i < endPos:
child[i] = p1[i]
elif startPos > endPos:
if not (i < startPos and i > endPos):
child[i] = p1[i]
for i in xrange(len(p2)):
if p2[i] not in child:
for j in xrange(len(child)):
if child[j] == None:
child[j] = p2[i]
break
return child
def mutate(path):
individual = list(path)
for i in xrange(len(individual)):
if random() < mutationRate:
j = randint(0,len(individual)-1)
aux = individual[j]
individual[j] = individual[i]
individual[i] = aux
return individual
def evolvePopulation(population, sparseMatrix):
population = sorted(population,key=lambda x: x[1])
elite = population[:eliteSize]
newPopulation = list()
for i in xrange(eliteSize,populationSize):
child = None
if random() <= crossoverProbability:
parent1 = tournamentSelection(population)
parent2 = tournamentSelection(population)
child = crossover(parent1,parent2)
else:
child = population[i][0]
newPopulation.append(child)
for i in xrange(populationSize-eliteSize):
newPopulation[i] = mutate(newPopulation[i])
return elite + [(x, cost(x,sparseMatrix)) for x in newPopulation]
def geneticSearch(graph, iterations=100):
vertices = list(graph.vertices)
population = list()
sparseMatrix = graph.sparseMatrix
bestPath = None
bestDistance = maxint
for i in xrange(populationSize):
shuffle(vertices)
aux = list(vertices)
population.append((aux,cost(aux,sparseMatrix)))
bestPath, bestDistance = getFittest(population)
for i in xrange(iterations):
population = evolvePopulation(population,sparseMatrix)
iterBestPath, iterBestDistance = getFittest(population)
if iterBestDistance < bestDistance:
bestDistance = iterBestDistance
bestPath = iterBestPath
return bestDistance
|
mlorbetske/PTVS
|
refs/heads/master
|
Python/Product/Django/Templates/Projects/DjangoWebRole/manage.py
|
57
|
#!/usr/bin/env python
"""
Command-line utility for administrative tasks.
"""
import os
import sys
if __name__ == "__main__":
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE",
"$safeprojectname$.settings"
)
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
linktlh/Toontown-journey
|
refs/heads/master
|
otp/distributed/AccountAI.py
|
5
|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
class AccountAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory("AccountAI")
|
thaim/ansible
|
refs/heads/fix-broken-link
|
test/units/modules/network/fortios/test_fortios_firewall_schedule_onetime.py
|
21
|
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_firewall_schedule_onetime
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_firewall_schedule_onetime.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_firewall_schedule_onetime_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_schedule_onetime': {
'color': '3',
'end': 'test_value_4',
'expiration_days': '5',
'name': 'default_name_6',
'start': 'test_value_7'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_schedule_onetime.fortios_firewall_schedule(input_data, fos_instance)
expected_data = {
'color': '3',
'end': 'test_value_4',
'expiration-days': '5',
'name': 'default_name_6',
'start': 'test_value_7'
}
set_method_mock.assert_called_with('firewall.schedule', 'onetime', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_firewall_schedule_onetime_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_schedule_onetime': {
'color': '3',
'end': 'test_value_4',
'expiration_days': '5',
'name': 'default_name_6',
'start': 'test_value_7'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_schedule_onetime.fortios_firewall_schedule(input_data, fos_instance)
expected_data = {
'color': '3',
'end': 'test_value_4',
'expiration-days': '5',
'name': 'default_name_6',
'start': 'test_value_7'
}
set_method_mock.assert_called_with('firewall.schedule', 'onetime', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_firewall_schedule_onetime_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'firewall_schedule_onetime': {
'color': '3',
'end': 'test_value_4',
'expiration_days': '5',
'name': 'default_name_6',
'start': 'test_value_7'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_schedule_onetime.fortios_firewall_schedule(input_data, fos_instance)
delete_method_mock.assert_called_with('firewall.schedule', 'onetime', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_firewall_schedule_onetime_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'firewall_schedule_onetime': {
'color': '3',
'end': 'test_value_4',
'expiration_days': '5',
'name': 'default_name_6',
'start': 'test_value_7'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_schedule_onetime.fortios_firewall_schedule(input_data, fos_instance)
delete_method_mock.assert_called_with('firewall.schedule', 'onetime', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_firewall_schedule_onetime_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_schedule_onetime': {
'color': '3',
'end': 'test_value_4',
'expiration_days': '5',
'name': 'default_name_6',
'start': 'test_value_7'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_schedule_onetime.fortios_firewall_schedule(input_data, fos_instance)
expected_data = {
'color': '3',
'end': 'test_value_4',
'expiration-days': '5',
'name': 'default_name_6',
'start': 'test_value_7'
}
set_method_mock.assert_called_with('firewall.schedule', 'onetime', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_firewall_schedule_onetime_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_schedule_onetime': {
'random_attribute_not_valid': 'tag',
'color': '3',
'end': 'test_value_4',
'expiration_days': '5',
'name': 'default_name_6',
'start': 'test_value_7'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_schedule_onetime.fortios_firewall_schedule(input_data, fos_instance)
expected_data = {
'color': '3',
'end': 'test_value_4',
'expiration-days': '5',
'name': 'default_name_6',
'start': 'test_value_7'
}
set_method_mock.assert_called_with('firewall.schedule', 'onetime', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
|
apprentice3d/Wox
|
refs/heads/master
|
PythonHome/Lib/site-packages/requests/packages/chardet/langgreekmodel.py
|
2762
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin7_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
win1253_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.2851%
# first 1024 sequences:1.7001%
# rest sequences: 0.0359%
# negative sequences: 0.0148%
GreekLangModel = (
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,
2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,
2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,
2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,
0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,
3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,
2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,
0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0,
0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2,
0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,
0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2,
0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0,
0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2,
0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2,
0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0,
0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0,
0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0,
0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2,
0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2,
0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,
0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,
0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,
0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0,
0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0,
0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
Latin7GreekModel = {
'charToOrderMap': Latin7_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-7"
}
Win1253GreekModel = {
'charToOrderMap': win1253_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "windows-1253"
}
# flake8: noqa
|
nolanliou/tensorflow
|
refs/heads/master
|
tensorflow/examples/how_tos/reading_data/fully_connected_preloaded.py
|
130
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trains the MNIST network using preloaded data in a constant.
Run using bazel:
bazel run --config opt \
<...>/tensorflow/examples/how_tos/reading_data:fully_connected_preloaded
or, if installed via pip:
cd tensorflow/examples/how_tos/reading_data
python fully_connected_preloaded.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.examples.tutorials.mnist import mnist
# Basic model parameters as external flags.
FLAGS = None
def run_training():
"""Train MNIST for a number of epochs."""
# Get the sets of images and labels for training, validation, and
# test on MNIST.
data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data)
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
with tf.name_scope('input'):
# Input data, pin to CPU because rest of pipeline is CPU-only
with tf.device('/cpu:0'):
input_images = tf.constant(data_sets.train.images)
input_labels = tf.constant(data_sets.train.labels)
image, label = tf.train.slice_input_producer(
[input_images, input_labels], num_epochs=FLAGS.num_epochs)
label = tf.cast(label, tf.int32)
images, labels = tf.train.batch(
[image, label], batch_size=FLAGS.batch_size)
# Build a Graph that computes predictions from the inference model.
logits = mnist.inference(images, FLAGS.hidden1, FLAGS.hidden2)
# Add to the Graph the Ops for loss calculation.
loss = mnist.loss(logits, labels)
# Add to the Graph the Ops that calculate and apply gradients.
train_op = mnist.training(loss, FLAGS.learning_rate)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = mnist.evaluation(logits, labels)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
# Create a saver for writing training checkpoints.
saver = tf.train.Saver()
# Create the op for initializing variables.
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Run the Op to initialize the variables.
sess.run(init_op)
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
# Start input enqueue threads.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# And then after everything is built, start the training loop.
try:
step = 0
while not coord.should_stop():
start_time = time.time()
# Run one step of the model.
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
# Write the summaries and print an overview fairly often.
if step % 100 == 0:
# Print status to stdout.
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value,
duration))
# Update the events file.
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
step += 1
# Save a checkpoint periodically.
if (step + 1) % 1000 == 0:
print('Saving')
saver.save(sess, FLAGS.train_dir, global_step=step)
step += 1
except tf.errors.OutOfRangeError:
print('Saving')
saver.save(sess, FLAGS.train_dir, global_step=step)
print('Done training for %d epochs, %d steps.' % (FLAGS.num_epochs, step))
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
def main(_):
run_training()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='Initial learning rate.'
)
parser.add_argument(
'--num_epochs',
type=int,
default=2,
help='Number of epochs to run trainer.'
)
parser.add_argument(
'--hidden1',
type=int,
default=128,
help='Number of units in hidden layer 1.'
)
parser.add_argument(
'--hidden2',
type=int,
default=32,
help='Number of units in hidden layer 2.'
)
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='Batch size. Must divide evenly into the dataset sizes.'
)
parser.add_argument(
'--train_dir',
type=str,
default='/tmp/data',
help='Directory to put the training data.'
)
parser.add_argument(
'--fake_data',
default=False,
help='If true, uses fake data for unit testing.',
action='store_true'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
named-data-ndnSIM/ns-3-dev
|
refs/heads/ndnSIM-ns-3.29
|
src/click/test/examples-to-run.py
|
62
|
#! /usr/bin/env python
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
# A list of C++ examples to run in order to ensure that they remain
# buildable and runnable over time. Each tuple in the list contains
#
# (example_name, do_run, do_valgrind_run).
#
# See test.py for more information.
cpp_examples = [
("nsclick-simple-lan --clickConfigFolder=../../src/click/examples", "NSCLICK == True", "False"),
("nsclick-raw-wlan --clickConfigFolder=../../src/click/examples", "NSCLICK == True", "False"),
("nsclick-udp-client-server-csma --clickConfigFolder=../../src/click/examples", "NSCLICK == True", "False"),
("nsclick-udp-client-server-wifi --clickConfigFolder=../../src/click/examples", "NSCLICK == True", "False"),
("nsclick-routing --clickConfigFolder=../../src/click/examples", "NSCLICK == True", "False"),
("nsclick-defines --clickConfigFolder=../../src/click/examples", "NSCLICK == True", "False"),
]
# A list of Python examples to run in order to ensure that they remain
# runnable over time. Each tuple in the list contains
#
# (example_name, do_run).
#
# See test.py for more information.
python_examples = []
|
rcommande/decorrelate
|
refs/heads/master
|
decorrelate/tests/test_decorrelate.py
|
1
|
import pytest
import sys
@pytest.fixture
def clean_registry():
import decorrelate
registry = decorrelate.get_registry()
registry._registered = {}
ressources_module_name = 'decorrelate.tests.ressources'
if ressources_module_name in sys.modules:
del sys.modules[ressources_module_name]
def test_get_proxy(clean_registry):
import decorrelate
registry = decorrelate.get_registry()
def func():
pass
def callback():
pass
decorrelate.get_proxy(func, callback)
assert len(registry) == 1
def test_get_proxy_with_category(clean_registry):
import decorrelate
registry = decorrelate.get_registry()
def func():
pass
def callback():
pass
decorrelate.get_proxy(func, callback, category='test_category')
assert len(registry) == 1
def test_original(clean_registry):
import decorrelate
registry = decorrelate.get_registry()
def decorator(wrapped):
def callback(callable):
callable.wrapped = True
return callable
return decorrelate.get_proxy(wrapped, callback)
@decorator
def test_func():
pass
assert hasattr(test_func, 'wrapped') is False
assert len(registry) == 1
def test_activates(clean_registry):
import decorrelate
registry = decorrelate.get_registry()
def decorator(wrapped):
def callback(callable):
callable.wrapped = True
return callable
return decorrelate.get_proxy(wrapped, callback)
@decorator
def test_func():
pass
assert hasattr(test_func, 'wrapped') is False
assert len(registry) == 1
decorrelate.activates()
assert hasattr(test_func, 'wrapped')
assert len(registry) == 0
def test_activates_proxy_attributes(clean_registry):
import decorrelate
def decorator(wrapped):
def callback(callable):
callable.wrapped = True
callable.__doc__ = 'A test function after wrapping'
return callable
return decorrelate.get_proxy(wrapped, callback)
@decorator
def test_func():
"""A test function"""
pass
assert test_func.__doc__ == 'A test function'
assert isinstance(test_func, decorrelate.Proxy)
assert test_func.__name__ == 'test_func'
assert not repr(test_func).startswith('<decorrelate.Proxy object')
decorrelate.activates()
assert test_func.__doc__ == 'A test function after wrapping'
assert isinstance(test_func, decorrelate.Proxy)
assert test_func.__name__ == 'test_func'
assert not repr(test_func).startswith('<decorrelate.Proxy object')
def test_activates_decorator_with_parameter(clean_registry):
import decorrelate
registry = decorrelate.get_registry()
def decorator(value, **kwargs):
def wrapper(wrapped):
def callback(callable):
callable.wrapped = True
callable.value = value
for key, val in kwargs.items():
setattr(callable, key, val)
return callable
return decorrelate.get_proxy(wrapped, callback)
return wrapper
@decorator('My value', one=1, two=2, three=3)
def test_func():
pass
assert hasattr(test_func, 'wrapped') is False
assert hasattr(test_func, 'value') is False
assert hasattr(test_func, 'one') is False
assert hasattr(test_func, 'two') is False
assert hasattr(test_func, 'three') is False
assert len(registry) == 1
decorrelate.activates()
assert hasattr(test_func, 'wrapped')
assert hasattr(test_func, 'value')
assert test_func.value == 'My value'
assert hasattr(test_func, 'one')
assert test_func.one == 1
assert hasattr(test_func, 'two')
assert test_func.two == 2
assert hasattr(test_func, 'three')
assert test_func.three == 3
assert len(registry) == 0
def test_activates_with_category(clean_registry):
import decorrelate
registry = decorrelate.get_registry()
def decorator(wrapped):
def callback(callable):
callable.wrapped = True
return callable
return decorrelate.get_proxy(wrapped, callback, category='a category')
def decorator2(wrapped):
def callback(callable):
callable.wrapped = True
return callable
return decorrelate.get_proxy(wrapped, callback)
@decorator
def test_func():
pass
@decorator2
def test_func2():
pass
assert hasattr(test_func, 'wrapped') is False
assert len(registry) == 2
decorrelate.activates(category='a category')
assert hasattr(test_func, 'wrapped')
assert len(registry) == 1
assert len(registry._registered['default']) == 1
assert len(registry._registered['a category']) == 0
def test_activates_with_same_category(clean_registry):
import decorrelate
registry = decorrelate.get_registry()
def decorator(wrapped):
def callback(callable):
callable.wrapped = True
return callable
return decorrelate.get_proxy(wrapped, callback, category='a category')
def decorator2(wrapped):
def callback(callable):
callable.wrapped = True
return callable
return decorrelate.get_proxy(wrapped, callback, category='a category')
@decorator
def test_func():
pass
@decorator2
def test_func2():
pass
assert hasattr(test_func, 'wrapped') is False
assert len(registry) == 2
decorrelate.activates(category='a category')
assert hasattr(test_func, 'wrapped')
assert len(registry) == 0
assert len(registry._registered['a category']) == 0
def test_singleton(clean_registry):
import decorrelate
assert decorrelate.get_registry() == decorrelate.get_registry()
assert id(decorrelate.get_registry()) == id(decorrelate.get_registry())
def test_decorrate_a_function(clean_registry):
import decorrelate
def decorator(wrapped):
def callback(callable):
callable.wrapped = True
return callable
return decorrelate.get_proxy(wrapped, callback)
@decorator
def a_test_function():
pass
assert hasattr(a_test_function, 'wrapped') is False
decorrelate.activates()
assert hasattr(a_test_function, 'wrapped')
def test_decorrate_a_function_from_another_module(clean_registry):
import decorrelate
from decorrelate.tests.ressources import a_test_function
assert hasattr(a_test_function, 'wrapped') is False
decorrelate.activates()
assert hasattr(a_test_function, 'wrapped')
def test_decorrate_a_class(clean_registry):
import decorrelate
def decorator(wrapped):
def callback(callable):
callable.wrapped = True
return callable
return decorrelate.get_proxy(wrapped, callback)
@decorator
class ATestClass(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self):
pass
assert hasattr(ATestClass(), 'wrapped') is False
assert repr(ATestClass) == repr(ATestClass._callable)
decorrelate.activates()
assert hasattr(ATestClass(), 'wrapped')
def test_decorrate_a_class_from_another_module(clean_registry):
import decorrelate
from decorrelate.tests.ressources import ATestClass
assert hasattr(ATestClass(), 'wrapped') is False
assert repr(ATestClass) == repr(ATestClass._callable)
decorrelate.activates()
assert hasattr(ATestClass(), 'wrapped')
def test_decorrate_a_method(clean_registry):
import decorrelate
def decorator(wrapped):
def callback(callable):
callable.wrapped = True
return callable
return decorrelate.get_proxy(wrapped, callback)
class ATestClass(object):
def __init__(self, *args, **kwargs):
pass
@decorator
def a_test_method(self):
pass
def __call__(self):
pass
assert hasattr(ATestClass().a_test_method, 'wrapped') is False
decorrelate.activates()
assert hasattr(ATestClass().a_test_method, 'wrapped')
def test_decorrate_a_method_from_another_module(clean_registry):
import decorrelate
from decorrelate.tests.ressources import ATestClassWithDecoratedMethod
assert hasattr(ATestClassWithDecoratedMethod().a_test_method, 'wrapped') is False
decorrelate.activates()
assert hasattr(ATestClassWithDecoratedMethod().a_test_method, 'wrapped')
|
plotly/python-api
|
refs/heads/master
|
packages/python/plotly/plotly/validators/area/marker/__init__.py
|
1
|
import sys
if sys.version_info < (3, 7):
from ._symbolsrc import SymbolsrcValidator
from ._symbol import SymbolValidator
from ._sizesrc import SizesrcValidator
from ._size import SizeValidator
from ._opacitysrc import OpacitysrcValidator
from ._opacity import OpacityValidator
from ._colorsrc import ColorsrcValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._symbolsrc.SymbolsrcValidator",
"._symbol.SymbolValidator",
"._sizesrc.SizesrcValidator",
"._size.SizeValidator",
"._opacitysrc.OpacitysrcValidator",
"._opacity.OpacityValidator",
"._colorsrc.ColorsrcValidator",
"._color.ColorValidator",
],
)
|
felixbuenemann/sentry
|
refs/heads/master
|
tests/sentry/metrics/test_datadog.py
|
14
|
from __future__ import absolute_import
from mock import patch
from datadog.util.hostname import get_hostname
from sentry.metrics.datadog import DatadogMetricsBackend
from sentry.testutils import TestCase
class DatadogMetricsBackendTest(TestCase):
def setUp(self):
self.backend = DatadogMetricsBackend(prefix='sentrytest.')
@patch('datadog.threadstats.base.ThreadStats.increment')
def test_incr(self, mock_incr):
self.backend.incr('foo', instance='bar')
mock_incr.assert_called_once_with(
'sentrytest.foo', 1,
tags=['instance:bar'],
host=get_hostname(),
)
@patch('datadog.threadstats.base.ThreadStats.timing')
def test_timing(self, mock_timing):
self.backend.timing('foo', 30, instance='bar')
mock_timing.assert_called_once_with(
'sentrytest.foo', 30,
sample_rate=1,
tags=['instance:bar'],
host=get_hostname(),
)
|
swiperthefox/python-source-browser
|
refs/heads/master
|
app/__main__.py
|
1
|
from __future__ import (
absolute_import,
print_function,
unicode_literals
)
import os
import errno
import logging
import argparse
from flask import Flask, request, jsonify, g, current_app, send_from_directory
# Workaround for the werkzeug reloader removing the current directory from the
# path. It's nasty, but it works! Inspired by:
# https://github.com/mitsuhiko/flask/issues/1246
os.environ['PYTHONPATH'] = os.getcwd()
from app.mktags import make_tags
from app.htmlizer import pygmentize
from app.db import PSBDatabase
from app.path_utils import list_dir
from app.search import search
DATABASE = 'to be configured'
DEBUG = False
SECRET_KEY = 'we do not need it'
USERNAME = 'no'
PASSWORD = 'no'
PROJECTROOT = 'to be configured'
app = Flask(__name__)
app.config.from_object(__name__)
def config_app(project_root):
"""Configuration based on the proejct_root"""
rel_project_root = os.path.relpath(project_root)
abs_project_root = os.path.abspath(project_root)
datadir = os.path.join(abs_project_root, "psb")
uniquename = abs_project_root.replace(os.path.sep, "_")
# root_as_file_name = os.path.abspath(project_root).replace(os.path.sep, '_')
dbfile = os.path.join(datadir, uniquename + '.sqlite')
tagsfile = os.path.join(datadir, uniquename + '.tags')
config = {
'DATABASE': dbfile,
'PROJECTROOT': os.path.abspath(project_root),
'TAGSFILE': tagsfile
}
app.config.update(config)
# create directories for data
try:
os.makedirs(datadir)
except OSError as err:
# it's ok if the directories already exist
dirs_exist = err.errno == errno.EEXIST
if not dirs_exist:
raise
# initialize the database only on the first time
if not os.path.isfile(app.config['DATABASE']):
with app.open_resource('schema.sql', mode='r') as schema_fd:
PSBDatabase.initialize_db(app.config['DATABASE'], schema_fd.read())
make_tags(rel_project_root, app.config['TAGSFILE'])
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = PSBDatabase(app.config['DATABASE'])
return g.sqlite_db
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
@app.route('/notes')
def get_notes():
psb_db = get_db()
result = psb_db.get_notes()
return jsonify(data=result)
@app.route('/notes', methods = ['POST'])
def create_note():
psb_db = get_db()
data = request.get_json(force=True)
psb_db.add_note([data[key] for key in ('location', 'symbol', 'note')])
newNoteList = psb_db.get_notes()
return jsonify(data=newNoteList)
@app.route('/file/', defaults={'path': ''})
@app.route('/file/<path:path>')
def get_source(path, use_cache=False):
if path == '':
return jsonify(list_dir(current_app.config['PROJECTROOT']))
pygments_config = {
'tagurlformat': '/file/%(path)s%(fname)s%(fext)s',
'tagsfile': current_app.config['TAGSFILE']
}
full_path = os.path.join(current_app.config['PROJECTROOT'], path)
if use_cache:
psb_db = get_db()
result = psb_db.get_html(path)
if result is None:
full_path = os.path.join(current_app.config['PROJECTROOT'], path)
result = pygmentize(full_path, pygments_config)
psb_db.save_html(path, result)
else:
result = pygmentize(full_path, pygments_config)
return result
@app.route('/')
def index():
print ("index")
return send_from_directory(os.path.dirname(__file__), 'index.html')
@app.route('/search')
def search_code():
term = request.values['term']
exact_word_search = request.values.get('exact', False)
result = search(term, app.config['PROJECTROOT'], exact_word_search)
return jsonify(data=result)
def make_argparser():
parser = argparse.ArgumentParser(description="Browse the source code in given directory.")
parser.add_argument('-p', '--port', default=9999, type=int, help="The port to be used.")
parser.add_argument("project_root", help="The root directory of the project")
return parser
def main():
import sys
import webbrowser
import threading
args = make_argparser().parse_args()
port = args.port
url = "http://localhost:%d" % port
project_root = args.project_root
config_app(project_root)
# wait for 1 second so that the server can start
threading.Timer(1, lambda: webbrowser.open(url, autoraise=True)).start()
app.run(port=port, debug=DEBUG)
if __name__ == '__main__':
main()
|
Jonas-Drotleff/yo
|
refs/heads/master
|
requests/packages/chardet/universaldetector.py
|
1775
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
import codecs
from .latin1prober import Latin1Prober # windows-1252
from .mbcsgroupprober import MBCSGroupProber # multi-byte character sets
from .sbcsgroupprober import SBCSGroupProber # single-byte character sets
from .escprober import EscCharSetProber # ISO-2122, etc.
import re
MINIMUM_THRESHOLD = 0.20
ePureAscii = 0
eEscAscii = 1
eHighbyte = 2
class UniversalDetector:
def __init__(self):
self._highBitDetector = re.compile(b'[\x80-\xFF]')
self._escDetector = re.compile(b'(\033|~{)')
self._mEscCharSetProber = None
self._mCharSetProbers = []
self.reset()
def reset(self):
self.result = {'encoding': None, 'confidence': 0.0}
self.done = False
self._mStart = True
self._mGotData = False
self._mInputState = ePureAscii
self._mLastChar = b''
if self._mEscCharSetProber:
self._mEscCharSetProber.reset()
for prober in self._mCharSetProbers:
prober.reset()
def feed(self, aBuf):
if self.done:
return
aLen = len(aBuf)
if not aLen:
return
if not self._mGotData:
# If the data starts with BOM, we know it is UTF
if aBuf[:3] == codecs.BOM_UTF8:
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8-SIG", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_LE:
# FF FE 00 00 UTF-32, little-endian BOM
self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_BE:
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32BE", 'confidence': 1.0}
elif aBuf[:4] == b'\xFE\xFF\x00\x00':
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {
'encoding': "X-ISO-10646-UCS-4-3412",
'confidence': 1.0
}
elif aBuf[:4] == b'\x00\x00\xFF\xFE':
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {
'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0
}
elif aBuf[:2] == codecs.BOM_LE:
# FF FE UTF-16, little endian BOM
self.result = {'encoding': "UTF-16LE", 'confidence': 1.0}
elif aBuf[:2] == codecs.BOM_BE:
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16BE", 'confidence': 1.0}
self._mGotData = True
if self.result['encoding'] and (self.result['confidence'] > 0.0):
self.done = True
return
if self._mInputState == ePureAscii:
if self._highBitDetector.search(aBuf):
self._mInputState = eHighbyte
elif ((self._mInputState == ePureAscii) and
self._escDetector.search(self._mLastChar + aBuf)):
self._mInputState = eEscAscii
self._mLastChar = aBuf[-1:]
if self._mInputState == eEscAscii:
if not self._mEscCharSetProber:
self._mEscCharSetProber = EscCharSetProber()
if self._mEscCharSetProber.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': self._mEscCharSetProber.get_charset_name(),
'confidence': self._mEscCharSetProber.get_confidence()}
self.done = True
elif self._mInputState == eHighbyte:
if not self._mCharSetProbers:
self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(),
Latin1Prober()]
for prober in self._mCharSetProbers:
if prober.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': prober.get_charset_name(),
'confidence': prober.get_confidence()}
self.done = True
break
def close(self):
if self.done:
return
if not self._mGotData:
if constants._debug:
sys.stderr.write('no data received!\n')
return
self.done = True
if self._mInputState == ePureAscii:
self.result = {'encoding': 'ascii', 'confidence': 1.0}
return self.result
if self._mInputState == eHighbyte:
proberConfidence = None
maxProberConfidence = 0.0
maxProber = None
for prober in self._mCharSetProbers:
if not prober:
continue
proberConfidence = prober.get_confidence()
if proberConfidence > maxProberConfidence:
maxProberConfidence = proberConfidence
maxProber = prober
if maxProber and (maxProberConfidence > MINIMUM_THRESHOLD):
self.result = {'encoding': maxProber.get_charset_name(),
'confidence': maxProber.get_confidence()}
return self.result
if constants._debug:
sys.stderr.write('no probers hit minimum threshhold\n')
for prober in self._mCharSetProbers[0].mProbers:
if not prober:
continue
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(),
prober.get_confidence()))
|
sorenk/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/aci/aci_contract_subject_to_filter.py
|
12
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_contract_subject_to_filter
short_description: Bind Contract Subjects to Filters (vz:RsSubjFiltAtt)
description:
- Bind Contract Subjects to Filters on Cisco ACI fabrics.
notes:
- The C(tenant), C(contract), C(subject), and C(filter_name) must exist before using this module in your playbook.
- The M(aci_tenant), M(aci_contract), M(aci_contract_subject), and M(aci_filter) modules can be used for these.
- More information about the internal APIC class B(vz:RsSubjFiltAtt) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Jacob McGill (@jmcgill298)
version_added: '2.4'
options:
contract:
description:
- The name of the contract.
aliases: [ contract_name ]
filter:
description:
- The name of the Filter to bind to the Subject.
aliases: [ filter_name ]
log:
description:
- Determines if the binding should be set to log.
- The APIC defaults new Subject to Filter bindings to C(none).
choices: [ log, none ]
aliases: [ directive ]
subject:
description:
- The name of the Contract Subject.
aliases: [ contract_subject, subject_name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
tenant:
description:
- The name of the tenant.
required: yes
aliases: [ tenant_name ]
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Add a new contract subject to filer binding
aci_contract_subject_to_filter:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
subject: test
filter: '{{ filter }}'
log: '{{ log }}'
state: present
- name: Remove an existing contract subject to filter binding
aci_contract_subject_to_filter:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
subject: test
filter: '{{ filter }}'
log: '{{ log }}'
state: present
- name: Query a specific contract subject to filter binding
aci_contract_subject_to_filter:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
subject: test
filter: '{{ filter }}'
state: query
- name: Query all contract subject to filter bindings
aci_contract_subject_to_filter:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
subject: test
state: query
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
contract=dict(type='str', aliases=['contract_name']), # Not required for querying all objects
filter=dict(type='str', aliases=['filter_name']), # Not required for querying all objects
log=dict(tyep='str', choices=['log', 'none'], aliases=['directive']),
subject=dict(type='str', aliases=['contract_subject', 'subject_name']), # Not required for querying all objects
tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6
protocol=dict(type='str', removed_in_version='2.6'), # Deprecated in v2.6
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['contract', 'filter', 'subject', 'tenant']],
['state', 'present', ['contract', 'filter', 'subject', 'tenant']],
],
)
contract = module.params['contract']
filter_name = module.params['filter']
log = module.params['log']
subject = module.params['subject']
tenant = module.params['tenant']
state = module.params['state']
# Add subject_filter key to modul.params for building the URL
module.params['subject_filter'] = filter_name
# Convert log to empty string if none, as that is what API expects. An empty string is not a good option to present the user.
if log == 'none':
log = ''
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
filter_target='eq(fvTenant.name, "{0}")'.format(tenant),
module_object=tenant,
),
subclass_1=dict(
aci_class='vzBrCP',
aci_rn='brc-{0}'.format(contract),
filter_target='eq(vzBrCP.name, "{0}")'.format(contract),
module_object=contract,
),
subclass_2=dict(
aci_class='vzSubj',
aci_rn='subj-{0}'.format(subject),
filter_target='eq(vzSubj.name, "{0}")'.format(subject),
module_object=subject,
),
subclass_3=dict(
aci_class='vzRsSubjFiltAtt',
aci_rn='rssubjFiltAtt-{0}'.format(filter_name),
filter_target='eq(vzRsSubjFiltAtt.tnVzFilterName, "{0}")'.format(filter_name),
module_object=filter_name,
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='vzRsSubjFiltAtt',
class_config=dict(
tnVzFilterName=filter_name,
directives=log,
),
)
aci.get_diff(aci_class='vzRsSubjFiltAtt')
aci.post_config()
elif state == 'absent':
aci.delete_config()
# Remove subject_filter used to build URL from module.params
module.params.pop('subject_filter')
aci.exit_json()
if __name__ == "__main__":
main()
|
Meninblack007/android_kernel_yu_msm8916
|
refs/heads/almighty-v1.0
|
tools/perf/scripts/python/sctop.py
|
11180
|
# system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
jessstrap/servotk
|
refs/heads/master
|
tests/wpt/css-tests/css-text-decor-3_dev/xhtml1print/support/generate-text-emphasis-line-height-tests.py
|
829
|
#!/usr/bin/env python
# - * - coding: UTF-8 - * -
"""
This script generates tests text-emphasis-line-height-001 ~ 004 except
001z. They test the line height expansion in different directions. This
script outputs a list of all tests it generated in the format of Mozilla
reftest.list to the stdout.
"""
from __future__ import unicode_literals
TEST_FILE = 'text-emphasis-line-height-{:03}{}.html'
TEST_TEMPLATE = '''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Test: text-emphasis line height, {pos}, {wm}, {tag}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<link rel="help" href="https://drafts.csswg.org/css-text-decor-3/#text-emphasis-position-property">
<meta name="assert" content="text emphasis marks should expand the line height like ruby if necessary">
<link rel="match" href="text-emphasis-line-height-{index:03}-ref.html">
<p>Pass if the emphasis marks are {dir} the black line:</p>
{start}試験テスト{end}
'''
REF_FILE = 'text-emphasis-line-height-{:03}-ref.html'
REF_TEMPLATE='''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Reference: text-emphasis line height, {pos}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<style> rt {{ font-variant-east-asian: inherit; }} </style>
<p>Pass if the emphasis marks are {dir} the black line:</p>
<div style="line-height: 1; border-{pos}: 1px solid black; writing-mode: {wm}; ruby-position: {posval}"><ruby>試<rt>●</rt>験<rt>●</rt>テ<rt>●</rt>ス<rt>●</rt>ト<rt>●</rt></ruby></div>
'''
STYLE1 = 'line-height: 1; border-{pos}: 1px solid black; ' + \
'writing-mode: {wm}; text-emphasis-position: {posval};'
STYLE2 = 'text-emphasis: circle;'
TAGS = [
# (tag, start, end)
('div', '<div style="{style1}{style2}">', '</div>'),
('span', '<div style="{style1}"><span style="{style2}">', '</span></div>'),
]
POSITIONS = [
# pos, text-emphasis-position, ruby-position,
# writing-modes, dir text
('top', 'over right', 'over',
['horizontal-tb'], 'below'),
('bottom', 'under right', 'under',
['horizontal-tb'], 'over'),
('right', 'over right', 'over',
['vertical-rl', 'vertical-lr'], 'to the left of'),
('left', 'over left', 'under',
['vertical-rl', 'vertical-lr'], 'to the right of'),
]
import string
def write_file(filename, content):
with open(filename, 'wb') as f:
f.write(content.encode('UTF-8'))
print("# START tests from {}".format(__file__))
idx = 0
for (pos, emphasis_pos, ruby_pos, wms, dir) in POSITIONS:
idx += 1
ref_file = REF_FILE.format(idx)
content = REF_TEMPLATE.format(pos=pos, dir=dir, wm=wms[0], posval=ruby_pos)
write_file(ref_file, content)
suffix = iter(string.ascii_lowercase)
for wm in wms:
style1 = STYLE1.format(pos=pos, wm=wm, posval=emphasis_pos)
for (tag, start, end) in TAGS:
test_file = TEST_FILE.format(idx, next(suffix))
content = TEST_TEMPLATE.format(
pos=pos, wm=wm, tag=tag, index=idx, dir=dir,
start=start.format(style1=style1, style2=STYLE2), end=end)
write_file(test_file, content)
print("== {} {}".format(test_file, ref_file))
print("# END tests from {}".format(__file__))
|
adityamogadala/xLiMeSemanticIntegrator
|
refs/heads/master
|
xlimedataparser/DataCollector.py
|
1
|
# -*- coding: utf-8 -*-
#==============================================================================
#Description : Call all types of data collectors
#Author : Aditya Mogadala
#email : aditya.mogadala@kit.edu
#Version : 1.0.1
#Copyright : Institute AIFB, Karlsruhe Institute of Technology (KIT)
#==============================================================================
import commands
import time
import sys
import re
import VicoSocialMediaStream
import ZattooTvMetadata
import JsiNewsStream
import SubtitlesProcessing
import AdvancedSpeechKafkaProcessing
from threading import Thread
from os.path import expanduser
class AutomatePushToMongo:
def __init__(self,path,confdic):
self.path = path
self.configdict = confdic
def continous_java_run(self,topic):
tot = "java -cp ../utils/kafkaextractor_smallest.jar:. aifb.kit.xlime.kafkaextracor.RunExtractor "+topic+" "+self.configdict['KafkaConsumerGroupID']+" "+self.configdict['KafkaZookeeperPath'] # For using smallest offset of Kafka
# tot = "java -cp ../utils/kafkaextractor_largest.jar:. aifb.kit.xlime.kafkaextracor.RunExtractor "+topic+" "+self.configdict['KafkaConsumerGroupID']+" "+self.configdict['KafkaZookeeperPath'] # For using largest offset of Kafka
vals = commands.getoutput(tot)
time.sleep(2)
def continous_mongo_socialmedia(self):
topic=self.configdict['KafkaTopicSocialMedia']
path1=self.path+topic+"/"
sm = VicoSocialMediaStream.SocialMediaToMongo(path1,self.configdict,topic)
sm.MongoData()
def continous_mongo_zattoosub(self):
topic=self.configdict['KafkaTopicSubtitles']
path1=self.path+topic+"/"
sm = SubtitlesProcessing.PushToMongoSubtitles(path1,self.configdict,topic)
sm.MongoData()
def continous_mongo_zattooepg(self):
topic=self.configdict['KafkaTopicTVMetadata']
path1=self.path+topic+"/"
metadata = ZattooTvMetadata.ZattooToMongo(path1,self.configdict,topic)
metadata.MongoData()
def continous_mongo_zattooasr(self):
topic=self.configdict['KafkaTopicASR']
path1=self.path+topic+"/"
asrdata = AdvancedSpeechKafkaProcessing.PushToMongoSpeech(path1,self.configdict,topic)
asrdata.MongoData()
def continous_mongo_news(self):
topic=self.configdict['KafkaTopicNews']
path1=self.path+topic+"/"
news = JsiNewsStream.Producer(path1,self.configdict,topic)
news.run()
##### Add more here to support different types of data #######################
def main():
home = expanduser("~")
path = home+'/storedata/'
configdict={}
config = '../config/Config.conf'
with open(config) as config_file:
for lines in config_file:
if re.search(r'=',lines):
key = lines.strip('\n').strip().split('=')
configdict[key[0]]=key[1]
generatedata = AutomatePushToMongo(path,configdict)
try:
t1 = Thread(target=generatedata.continous_java_run, args=(configdict['KafkaTopicSocialMedia'],))
t1.start()
t2 = Thread(target=generatedata.continous_java_run, args=(configdict['KafkaTopicTVMetadata'],))
t2.start()
t0 = Thread(target=generatedata.continous_java_run, args=(configdict['KafkaTopicNews'],))
t0.start()
t6 = Thread(target=generatedata.continous_java_run, args=(configdict['KafkaTopicSubtitles'],))
t6.start()
t8 = Thread(target=generatedata.continous_java_run, args=(configdict['KafkaTopicASR'],))
t8.start()
t3 = Thread(target=generatedata.continous_mongo_socialmedia)
t3.start()
t4=Thread(target=generatedata.continous_mongo_zattooepg)
t4.start()
t5=Thread(target=generatedata.continous_mongo_news)
t5.start()
t7=Thread(target=generatedata.continous_mongo_zattoosub)
t7.start()
t9=Thread(target=generatedata.continous_mongo_zattooasr)
t9.start()
##### Add more here to support different types of data #######################
except:
pass
if __name__ =='__main__':
main()
|
kanagasabapathi/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/test/inspect_fodder2.py
|
179
|
# line 1
def wrap(foo=None):
def wrapper(func):
return func
return wrapper
# line 7
def replace(func):
def insteadfunc():
print('hello')
return insteadfunc
# line 13
@wrap()
@wrap(wrap)
def wrapped():
pass
# line 19
@replace
def gone():
pass
# line 24
oll = lambda m: m
# line 27
tll = lambda g: g and \
g and \
g
# line 32
tlli = lambda d: d and \
d
# line 36
def onelinefunc(): pass
# line 39
def manyargs(arg1, arg2,
arg3, arg4): pass
# line 43
def twolinefunc(m): return m and \
m
# line 47
a = [None,
lambda x: x,
None]
# line 52
def setfunc(func):
globals()["anonymous"] = func
setfunc(lambda x, y: x*y)
# line 57
def with_comment(): # hello
world
# line 61
multiline_sig = [
lambda x, \
y: x+y,
None,
]
# line 68
def func69():
class cls70:
def func71():
pass
return cls70
extra74 = 74
# line 76
def func77(): pass
(extra78, stuff78) = 'xy'
extra79 = 'stop'
# line 81
class cls82:
def func83(): pass
(extra84, stuff84) = 'xy'
extra85 = 'stop'
# line 87
def func88():
# comment
return 90
# line 92
def f():
class X:
def g():
"doc"
return 42
return X
method_in_dynamic_class = f().g
#line 101
def keyworded(*arg1, arg2=1):
pass
#line 105
def annotated(arg1: list):
pass
#line 109
def keyword_only_arg(*, arg):
pass
|
nyu-devops-echo/shopcarts
|
refs/heads/master
|
db_create.py
|
1
|
#!usr//bin/python
"""
Database Creation Script
This Python script will create the database base on the
environment variables DATABASE_URI or SQLALCHEMY_DATABASE_URI
in that order. (DATABASE_URI overrides SQLALCHEMY_DATABASE_URI)
You can also override the database name in the URI by passing
in a new name.
Enviroment Variables:
---------------------
- SQLALCHEMY_DATABASE_URI : connection string from config
- DATABASE_URI: override config string
Arguments:
----------
- database_name : String the name of the database
"""
import os
import sys
import pymysql
from app import app, db
# DATABASE_URI = 'mysql+pymysql://root:passw0rd@localhost:3306/development'
DATABASE_URI = os.getenv('DATABASE_URI', None)
if DATABASE_URI:
app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URI
# check to see if there is a database name override
if len(sys.argv) > 1:
dbname = sys.argv[1]
app.config['SQLALCHEMY_DATABASE_URI'] = '{}/{}'.format(
app.config['SQLALCHEMY_DATABASE_URI'].rsplit('/', 1)[0],
dbname
)
print('Database URI {}'.format(app.config['SQLALCHEMY_DATABASE_URI']))
try:
print("Creating database tables")
db.create_all()
except Exception as error:
print('Oops, got error {}'.format(error))
# Parse the URI for user, password, host
data = app.config['SQLALCHEMY_DATABASE_URI'].split('//')[1]
dbname = data.split('/')[1]
host = data.split('@')[1].split(':')[0]
creds = data.split('@')[0]
user = creds.split(':')[0]
password = creds.split(':')[1]
# Connect and create the database
conn = pymysql.connect(host=host, user=user, password=password)
conn.cursor().execute('create database IF NOT EXISTS {}'.format(dbname))
print("Creating database tables")
db.create_all()
|
montanapr/Plugin.Video.Mercy
|
refs/heads/master
|
servers/watchfreeinhd.py
|
44
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para watchfreeinhd
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[watchfreeinhd.py] get_video_url(page_url='%s')" % page_url)
video_urls = []
# Descarga la página, el usuario tiene dos botones de "Descargar" o "Ver"
data = scrapertools.cache_page(page_url)
# La descarga de nuevo como si hubiera pulsado el botón "Ver"
# http://srv.hdplay.org/storage/flv/xGylz8.flv?token=703acade4b51aa6b26ad264327c4a4cf
data = scrapertools.cache_page(page_url,post="agree=")
patron = '<div id="playerHolder">[^<]+'
patron += '<a href="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
if len(matches)>0:
video_urls.append( ["[watchfreeinhd]",matches[0] ] )
for video_url in video_urls:
logger.info("[watchfreeinhd.py] %s - %s" % (video_url[0],video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
# http://www.watchfreeinhd.com/r0GUbN
patronvideos = '(http://www.watchfreeinhd.com/[A-Za-z0-9]+)'
logger.info("[watchfreeinhd.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[watchfreeinhd]"
url = match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'watchfreeinhd' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
|
yzl0083/orange
|
refs/heads/master
|
Orange/OrangeCanvas/help/intersphinx.py
|
6
|
"""
Parsers for intersphinx inventory files
Taken from `sphinx.ext.intersphinx`
"""
import re
import codecs
import zlib
b = str
UTF8StreamReader = codecs.lookup('utf-8')[2]
def read_inventory_v1(f, uri, join):
f = UTF8StreamReader(f)
invdata = {}
line = f.next()
projname = line.rstrip()[11:]
line = f.next()
version = line.rstrip()[11:]
for line in f:
name, type, location = line.rstrip().split(None, 2)
location = join(uri, location)
# version 1 did not add anchors to the location
if type == 'mod':
type = 'py:module'
location += '#module-' + name
else:
type = 'py:' + type
location += '#' + name
invdata.setdefault(type, {})[name] = (projname, version, location, '-')
return invdata
def read_inventory_v2(f, uri, join, bufsize=16*1024):
invdata = {}
line = f.readline()
projname = line.rstrip()[11:].decode('utf-8')
line = f.readline()
version = line.rstrip()[11:].decode('utf-8')
line = f.readline().decode('utf-8')
if 'zlib' not in line:
raise ValueError
def read_chunks():
decompressor = zlib.decompressobj()
for chunk in iter(lambda: f.read(bufsize), b('')):
yield decompressor.decompress(chunk)
yield decompressor.flush()
def split_lines(iter):
buf = b('')
for chunk in iter:
buf += chunk
lineend = buf.find(b('\n'))
while lineend != -1:
yield buf[:lineend].decode('utf-8')
buf = buf[lineend+1:]
lineend = buf.find(b('\n'))
assert not buf
for line in split_lines(read_chunks()):
# be careful to handle names with embedded spaces correctly
m = re.match(r'(?x)(.+?)\s+(\S*:\S*)\s+(\S+)\s+(\S+)\s+(.*)',
line.rstrip())
if not m:
continue
name, type, prio, location, dispname = m.groups()
if location.endswith(u'$'):
location = location[:-1] + name
location = join(uri, location)
invdata.setdefault(type, {})[name] = (projname, version,
location, dispname)
return invdata
|
ArtsiomCh/tensorflow
|
refs/heads/master
|
tensorflow/contrib/distributions/python/__init__.py
|
959
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ops module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
Juniper/python-neutronclient
|
refs/heads/master
|
neutronclient/tests/unit/test_cli20_securitygroup.py
|
1
|
#!/usr/bin/env python
# Copyright 2012 Red Hat
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import uuid
from mox3 import mox
import six
from neutronclient.common import exceptions
from neutronclient.common import utils
from neutronclient.neutron.v2_0 import securitygroup
from neutronclient.tests.unit import test_cli20
class CLITestV20SecurityGroupsJSON(test_cli20.CLITestV20Base):
non_admin_status_resources = ['security_group', 'security_group_rule']
def test_create_security_group(self):
# Create security group: webservers.
resource = 'security_group'
cmd = securitygroup.CreateSecurityGroup(
test_cli20.MyApp(sys.stdout), None)
name = 'webservers'
myid = 'myid'
args = [name, ]
position_names = ['name']
position_values = [name]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_security_group_tenant(self):
# Create security group: webservers.
resource = 'security_group'
cmd = securitygroup.CreateSecurityGroup(
test_cli20.MyApp(sys.stdout), None)
name = 'webservers'
description = 'my webservers'
myid = 'myid'
args = ['--tenant_id', 'tenant_id', '--description', description, name]
position_names = ['name', 'description']
position_values = [name, description]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenant_id')
def test_create_security_group_with_description(self):
# Create security group: webservers.
resource = 'security_group'
cmd = securitygroup.CreateSecurityGroup(
test_cli20.MyApp(sys.stdout), None)
name = 'webservers'
description = 'my webservers'
myid = 'myid'
args = [name, '--description', description]
position_names = ['name', 'description']
position_values = [name, description]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_list_security_groups(self):
resources = "security_groups"
cmd = securitygroup.ListSecurityGroup(
test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, True)
def test_list_security_groups_pagination(self):
resources = "security_groups"
cmd = securitygroup.ListSecurityGroup(
test_cli20.MyApp(sys.stdout), None)
self._test_list_resources_with_pagination(resources, cmd)
def test_list_security_groups_sort(self):
resources = "security_groups"
cmd = securitygroup.ListSecurityGroup(
test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_security_groups_limit(self):
resources = "security_groups"
cmd = securitygroup.ListSecurityGroup(
test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, page_size=1000)
def test_show_security_group_id(self):
resource = 'security_group'
cmd = securitygroup.ShowSecurityGroup(
test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id'])
def test_show_security_group_id_name(self):
resource = 'security_group'
cmd = securitygroup.ShowSecurityGroup(
test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id', 'name'])
def test_delete_security_group(self):
# Delete security group: myid.
resource = 'security_group'
cmd = securitygroup.DeleteSecurityGroup(
test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
def test_update_security_group(self):
# Update security group: myid --name myname --description desc.
resource = 'security_group'
cmd = securitygroup.UpdateSecurityGroup(
test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'myname',
'--description', 'mydescription'],
{'name': 'myname',
'description': 'mydescription'}
)
def test_update_security_group_with_unicode(self):
resource = 'security_group'
cmd = securitygroup.UpdateSecurityGroup(
test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', u'\u7f51\u7edc',
'--description', u'\u7f51\u7edc'],
{'name': u'\u7f51\u7edc',
'description': u'\u7f51\u7edc'}
)
def test_create_security_group_rule_full(self):
# Create security group rule.
resource = 'security_group_rule'
cmd = securitygroup.CreateSecurityGroupRule(
test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
direction = 'ingress'
ethertype = 'IPv4'
protocol = 'tcp'
port_range_min = '22'
port_range_max = '22'
remote_ip_prefix = '10.0.0.0/24'
security_group_id = '1'
remote_group_id = '1'
args = ['--remote_ip_prefix', remote_ip_prefix, '--direction',
direction, '--ethertype', ethertype, '--protocol', protocol,
'--port_range_min', port_range_min, '--port_range_max',
port_range_max, '--remote_group_id', remote_group_id,
security_group_id, '--description', 'PCI policy 1421912']
position_names = ['remote_ip_prefix', 'direction', 'ethertype',
'protocol', 'port_range_min', 'port_range_max',
'remote_group_id', 'security_group_id']
position_values = [remote_ip_prefix, direction, ethertype, protocol,
port_range_min, port_range_max, remote_group_id,
security_group_id]
self._test_create_resource(resource, cmd, None, myid, args,
position_names, position_values,
description='PCI policy 1421912')
def test_create_security_group_rule_with_integer_protocol_value(self):
resource = 'security_group_rule'
cmd = securitygroup.CreateSecurityGroupRule(
test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
direction = 'ingress'
ethertype = 'IPv4'
protocol = '2'
port_range_min = '22'
port_range_max = '22'
remote_ip_prefix = '10.0.0.0/24'
security_group_id = '1'
remote_group_id = '1'
args = ['--remote_ip_prefix', remote_ip_prefix, '--direction',
direction, '--ethertype', ethertype, '--protocol', protocol,
'--port_range_min', port_range_min, '--port_range_max',
port_range_max, '--remote_group_id', remote_group_id,
security_group_id]
position_names = ['remote_ip_prefix', 'direction', 'ethertype',
'protocol', 'port_range_min', 'port_range_max',
'remote_group_id', 'security_group_id']
position_values = [remote_ip_prefix, direction, ethertype, protocol,
port_range_min, port_range_max, remote_group_id,
security_group_id]
self._test_create_resource(resource, cmd, None, myid, args,
position_names, position_values)
def test_delete_security_group_rule(self):
# Delete security group rule: myid.
resource = 'security_group_rule'
cmd = securitygroup.DeleteSecurityGroupRule(
test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
def test_list_security_group_rules(self):
resources = "security_group_rules"
cmd = securitygroup.ListSecurityGroupRule(
test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(securitygroup.ListSecurityGroupRule,
"extend_list")
securitygroup.ListSecurityGroupRule.extend_list(mox.IsA(list),
mox.IgnoreArg())
self._test_list_resources(resources, cmd, True)
def _test_extend_list(self, mox_calls, data):
resources = "security_groups"
cmd = securitygroup.ListSecurityGroupRule(
test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
cmd.get_client().MultipleTimes().AndReturn(self.client)
path = getattr(self.client, resources + '_path')
mox_calls(path, data)
self.mox.ReplayAll()
known_args, _vs = cmd.get_parser(
'list' + resources).parse_known_args()
cmd.extend_list(data, known_args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
def _build_test_data(self, data, excess=0):
# Length of a query filter on security group rule id
# in these testcases, id='secgroupid%02d' (with len(id)=12)
sec_group_id_filter_len = 12
response = []
replace_rules = {'security_group_id': 'security_group',
'remote_group_id': 'remote_group'}
search_opts = {'fields': ['id', 'name']}
sec_group_ids = set()
for rule in data:
for key in replace_rules:
if rule.get(key):
sec_group_ids.add(rule[key])
response.append({'id': rule[key], 'name': 'default'})
sec_group_ids = list(sec_group_ids)
result = []
sec_group_count = len(sec_group_ids)
max_size = ((sec_group_id_filter_len * sec_group_count) - excess)
chunk_size = max_size // sec_group_id_filter_len
for i in range(0, sec_group_count, chunk_size):
search_opts['id'] = sec_group_ids[i: i + chunk_size]
params = utils.safe_encode_dict(search_opts)
resp_str = self.client.serialize({'security_groups': response})
result.append({
'filter': six.moves.urllib.parse.urlencode(params, doseq=1),
'response': (test_cli20.MyResp(200), resp_str),
})
return result
def test_extend_list(self):
def mox_calls(path, data):
responses = self._build_test_data(data)
self.client.httpclient.request(
test_cli20.MyUrlComparator(test_cli20.end_url(
path, responses[0]['filter']), self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token', test_cli20.TOKEN)).AndReturn(
responses[0]['response'])
data = [{'name': 'default',
'remote_group_id': 'remgroupid%02d' % i}
for i in range(10)]
data.append({'name': 'default', 'remote_group_id': None})
self._test_extend_list(mox_calls, data)
def test_extend_list_exceed_max_uri_len(self):
def mox_calls(path, data):
# 1 char of extra URI len will cause a split in 2 requests
self.mox.StubOutWithMock(self.client.httpclient,
'_check_uri_length')
self.client.httpclient._check_uri_length(mox.IgnoreArg()).AndRaise(
exceptions.RequestURITooLong(excess=1))
responses = self._build_test_data(data, excess=1)
for item in responses:
self.client.httpclient._check_uri_length(
mox.IgnoreArg()).AndReturn(None)
self.client.httpclient.request(
test_cli20.MyUrlComparator(
test_cli20.end_url(path, item['filter']), self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token', test_cli20.TOKEN)).AndReturn(
item['response'])
data = [{'name': 'default',
'security_group_id': 'secgroupid%02d' % i,
'remote_group_id': 'remgroupid%02d' % i}
for i in range(10)]
data.append({'name': 'default',
'security_group_id': 'secgroupid10',
'remote_group_id': None})
self._test_extend_list(mox_calls, data)
def test_list_security_group_rules_pagination(self):
resources = "security_group_rules"
cmd = securitygroup.ListSecurityGroupRule(
test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(securitygroup.ListSecurityGroupRule,
"extend_list")
securitygroup.ListSecurityGroupRule.extend_list(mox.IsA(list),
mox.IgnoreArg())
self._test_list_resources_with_pagination(resources, cmd)
def test_list_security_group_rules_sort(self):
resources = "security_group_rules"
cmd = securitygroup.ListSecurityGroupRule(
test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(securitygroup.ListSecurityGroupRule,
"extend_list")
securitygroup.ListSecurityGroupRule.extend_list(mox.IsA(list),
mox.IgnoreArg())
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_security_group_rules_limit(self):
resources = "security_group_rules"
cmd = securitygroup.ListSecurityGroupRule(
test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(securitygroup.ListSecurityGroupRule,
"extend_list")
securitygroup.ListSecurityGroupRule.extend_list(mox.IsA(list),
mox.IgnoreArg())
self._test_list_resources(resources, cmd, page_size=1000)
def test_show_security_group_rule(self):
resource = 'security_group_rule'
cmd = securitygroup.ShowSecurityGroupRule(
test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id'])
def _test_list_security_group_rules_extend(self, api_data, expected,
args=(), conv=True,
query_fields=None):
def setup_list_stub(resources, data, query):
reses = {resources: data}
resstr = self.client.serialize(reses)
resp = (test_cli20.MyResp(200), resstr)
path = getattr(self.client, resources + '_path')
self.client.httpclient.request(
test_cli20.MyUrlComparator(
test_cli20.end_url(path, query),
self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token', test_cli20.TOKEN)).AndReturn(resp)
cmd = securitygroup.ListSecurityGroupRule(
test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(cmd, 'get_client')
self.mox.StubOutWithMock(self.client.httpclient, 'request')
cmd.get_client().MultipleTimes().AndReturn(self.client)
query = ''
if query_fields:
query = '&'.join(['fields=' + f for f in query_fields])
setup_list_stub('security_group_rules', api_data, query)
if conv:
sec_ids = set()
for n in api_data:
sec_ids.add(n['security_group_id'])
if n.get('remote_group_id'):
sec_ids.add(n['remote_group_id'])
filters = ''
for id in sec_ids:
filters = filters + "&id=%s" % id
setup_list_stub('security_groups',
[{'id': 'myid1', 'name': 'group1'},
{'id': 'myid2', 'name': 'group2'},
{'id': 'myid3', 'name': 'group3'}],
query='fields=id&fields=name' + filters)
self.mox.ReplayAll()
cmd_parser = cmd.get_parser('list_security_group_rules')
parsed_args = cmd_parser.parse_args(args)
result = cmd.take_action(parsed_args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
# Check columns
self.assertEqual(expected['cols'], result[0])
# Check data
_result = [x for x in result[1]]
self.assertEqual(len(expected['data']), len(_result))
for res, exp in zip(_result, expected['data']):
self.assertEqual(len(exp), len(res))
self.assertEqual(exp, res)
def _test_list_security_group_rules_extend_sg_name(
self, expected_mode=None, args=(), conv=True, query_field=False):
if query_field:
field_filters = ['id', 'security_group_id',
'remote_ip_prefix', 'remote_group_id']
else:
field_filters = None
data = [self._prepare_rule(rule_id='ruleid1', sg_id='myid1',
remote_group_id='myid1',
filters=field_filters),
self._prepare_rule(rule_id='ruleid2', sg_id='myid2',
remote_group_id='myid3',
filters=field_filters),
self._prepare_rule(rule_id='ruleid3', sg_id='myid2',
remote_group_id='myid2',
filters=field_filters),
]
if expected_mode == 'noconv':
expected = {'cols': ['id', 'security_group_id', 'remote_group_id'],
'data': [('ruleid1', 'myid1', 'myid1'),
('ruleid2', 'myid2', 'myid3'),
('ruleid3', 'myid2', 'myid2')]}
elif expected_mode == 'remote_group_id':
expected = {'cols': ['id', 'security_group', 'remote_group'],
'data': [('ruleid1', 'group1', 'group1'),
('ruleid2', 'group2', 'group3'),
('ruleid3', 'group2', 'group2')]}
else:
expected = {'cols': ['id', 'security_group', 'remote'],
'data': [('ruleid1', 'group1', 'group1 (group)'),
('ruleid2', 'group2', 'group3 (group)'),
('ruleid3', 'group2', 'group2 (group)')]}
self._test_list_security_group_rules_extend(
data, expected, args=args, conv=conv, query_fields=field_filters)
def test_list_security_group_rules_extend_remote_sg_name(self):
args = '-c id -c security_group -c remote'.split()
self._test_list_security_group_rules_extend_sg_name(args=args)
def test_list_security_group_rules_extend_sg_name_noconv(self):
args = '--no-nameconv -c id -c security_group_id -c remote_group_id'
args = args.split()
self._test_list_security_group_rules_extend_sg_name(
expected_mode='noconv', args=args, conv=False)
def test_list_security_group_rules_extend_sg_name_with_columns(self):
args = '-c id -c security_group_id -c remote_group_id'.split()
self._test_list_security_group_rules_extend_sg_name(
expected_mode='remote_group_id', args=args)
def test_list_security_group_rules_extend_sg_name_with_columns_no_id(self):
args = '-c id -c security_group -c remote_group'.split()
self._test_list_security_group_rules_extend_sg_name(
expected_mode='remote_group_id', args=args)
def test_list_security_group_rules_extend_sg_name_with_fields(self):
# NOTE: remote_ip_prefix is required to show "remote" column
args = ('-F id -F security_group_id '
'-F remote_ip_prefix -F remote_group_id').split()
self._test_list_security_group_rules_extend_sg_name(
args=args, query_field=True)
def test_list_security_group_rules_extend_sg_name_with_fields_no_id(self):
# NOTE: remote_ip_prefix is required to show "remote" column
args = ('-F id -F security_group '
'-F remote_ip_prefix -F remote_group').split()
self._test_list_security_group_rules_extend_sg_name(args=args,
query_field=True)
def test_list_security_group_rules_extend_remote(self):
args = '-c id -c security_group -c remote'.split()
data = [self._prepare_rule(rule_id='ruleid1', sg_id='myid1',
remote_ip_prefix='172.16.18.0/24'),
self._prepare_rule(rule_id='ruleid2', sg_id='myid2',
remote_ip_prefix='172.16.20.0/24'),
self._prepare_rule(rule_id='ruleid3', sg_id='myid2',
remote_group_id='myid3')]
expected = {'cols': ['id', 'security_group', 'remote'],
'data': [('ruleid1', 'group1', '172.16.18.0/24 (CIDR)'),
('ruleid2', 'group2', '172.16.20.0/24 (CIDR)'),
('ruleid3', 'group2', 'group3 (group)')]}
self._test_list_security_group_rules_extend(data, expected, args)
def test_list_security_group_rules_extend_proto_port(self):
data = [self._prepare_rule(rule_id='ruleid1', sg_id='myid1',
protocol='tcp',
port_range_min=22, port_range_max=22),
self._prepare_rule(rule_id='ruleid2', sg_id='myid2',
direction='egress', ethertype='IPv6',
protocol='udp',
port_range_min=80, port_range_max=81),
self._prepare_rule(rule_id='ruleid3', sg_id='myid2',
protocol='icmp',
remote_ip_prefix='10.2.0.0/16')]
expected = {
'cols': ['id', 'security_group', 'direction', 'ethertype',
'port/protocol', 'remote'],
'data': [
('ruleid1', 'group1', 'ingress', 'IPv4', '22/tcp', 'any'),
('ruleid2', 'group2', 'egress', 'IPv6', '80-81/udp', 'any'),
('ruleid3', 'group2', 'ingress', 'IPv4', 'icmp',
'10.2.0.0/16 (CIDR)')
]}
self._test_list_security_group_rules_extend(data, expected)
def _prepare_rule(self, rule_id=None, sg_id=None, tenant_id=None,
direction=None, ethertype=None,
protocol=None, port_range_min=None, port_range_max=None,
remote_ip_prefix=None, remote_group_id=None,
filters=None):
rule = {'id': rule_id or str(uuid.uuid4()),
'tenant_id': tenant_id or str(uuid.uuid4()),
'security_group_id': sg_id or str(uuid.uuid4()),
'direction': direction or 'ingress',
'ethertype': ethertype or 'IPv4',
'protocol': protocol,
'port_range_min': port_range_min,
'port_range_max': port_range_max,
'remote_ip_prefix': remote_ip_prefix,
'remote_group_id': remote_group_id}
if filters:
return dict([(k, v) for k, v in rule.items() if k in filters])
else:
return rule
def test__get_remote_both_unspecified(self):
sg_rule = self._prepare_rule(remote_ip_prefix=None,
remote_group_id=None)
self.assertIsNone(securitygroup._get_remote(sg_rule))
def test__get_remote_remote_ip_prefix_specified(self):
sg_rule = self._prepare_rule(remote_ip_prefix='172.16.18.0/24')
self.assertEqual('172.16.18.0/24 (CIDR)',
securitygroup._get_remote(sg_rule))
def test__get_remote_remote_group_specified(self):
sg_rule = self._prepare_rule(remote_group_id='sg_id1')
self.assertEqual('sg_id1 (group)', securitygroup._get_remote(sg_rule))
def test__get_protocol_port_all_none(self):
sg_rule = self._prepare_rule()
self.assertIsNone(securitygroup._get_protocol_port(sg_rule))
def test__get_protocol_port_tcp_all_port(self):
sg_rule = self._prepare_rule(protocol='tcp')
self.assertEqual('tcp', securitygroup._get_protocol_port(sg_rule))
def test__get_protocol_port_tcp_one_port(self):
sg_rule = self._prepare_rule(protocol='tcp',
port_range_min=22, port_range_max=22)
self.assertEqual('22/tcp', securitygroup._get_protocol_port(sg_rule))
def test__get_protocol_port_tcp_port_range(self):
sg_rule = self._prepare_rule(protocol='tcp',
port_range_min=5000, port_range_max=5010)
self.assertEqual('5000-5010/tcp',
securitygroup._get_protocol_port(sg_rule))
def test__get_protocol_port_udp_all_port(self):
sg_rule = self._prepare_rule(protocol='udp')
self.assertEqual('udp', securitygroup._get_protocol_port(sg_rule))
def test__get_protocol_port_udp_one_port(self):
sg_rule = self._prepare_rule(protocol='udp',
port_range_min=22, port_range_max=22)
self.assertEqual('22/udp', securitygroup._get_protocol_port(sg_rule))
def test__get_protocol_port_udp_port_range(self):
sg_rule = self._prepare_rule(protocol='udp',
port_range_min=5000, port_range_max=5010)
self.assertEqual('5000-5010/udp',
securitygroup._get_protocol_port(sg_rule))
def test__get_protocol_port_icmp_all(self):
sg_rule = self._prepare_rule(protocol='icmp')
self.assertEqual('icmp', securitygroup._get_protocol_port(sg_rule))
def test_get_ethertype_for_protocol_icmpv6(self):
self.assertEqual('IPv6',
securitygroup.generate_default_ethertype('icmpv6'))
def test_get_ethertype_for_protocol_icmp(self):
self.assertEqual('IPv4',
securitygroup.generate_default_ethertype('icmp'))
def test__get_protocol_port_udp_code_type(self):
sg_rule = self._prepare_rule(protocol='icmp',
port_range_min=1, port_range_max=8)
self.assertEqual('icmp (type:1, code:8)',
securitygroup._get_protocol_port(sg_rule))
def test__format_sg_rules(self):
rules = [self._prepare_rule(),
self._prepare_rule(protocol='tcp', port_range_min=80,
port_range_max=80),
self._prepare_rule(remote_ip_prefix='192.168.1.0/24'),
self._prepare_rule(remote_group_id='group1'),
self._prepare_rule(protocol='tcp',
remote_ip_prefix='10.1.1.0/24'),
self._prepare_rule(direction='egress'),
self._prepare_rule(direction='egress', ethertype='IPv6'),
]
sg = {'security_group_rules': rules}
expected_data = ['ingress, IPv4',
'ingress, IPv4, 80/tcp',
'ingress, IPv4, remote_ip_prefix: 192.168.1.0/24',
'ingress, IPv4, remote_group_id: group1',
'ingress, IPv4, tcp, remote_ip_prefix: 10.1.1.0/24',
'egress, IPv4',
'egress, IPv6',
]
expected = '\n'.join(sorted(expected_data))
self.assertEqual(expected, securitygroup._format_sg_rules(sg))
|
currychou/1
|
refs/heads/master
|
static/Brython3.1.3-20150514-095342/Lib/copy.py
|
628
|
"""Generic (shallow and deep) copying operations.
Interface summary:
import copy
x = copy.copy(y) # make a shallow copy of y
x = copy.deepcopy(y) # make a deep copy of y
For module specific errors, copy.Error is raised.
The difference between shallow and deep copying is only relevant for
compound objects (objects that contain other objects, like lists or
class instances).
- A shallow copy constructs a new compound object and then (to the
extent possible) inserts *the same objects* into it that the
original contains.
- A deep copy constructs a new compound object and then, recursively,
inserts *copies* into it of the objects found in the original.
Two problems often exist with deep copy operations that don't exist
with shallow copy operations:
a) recursive objects (compound objects that, directly or indirectly,
contain a reference to themselves) may cause a recursive loop
b) because deep copy copies *everything* it may copy too much, e.g.
administrative data structures that should be shared even between
copies
Python's deep copy operation avoids these problems by:
a) keeping a table of objects already copied during the current
copying pass
b) letting user-defined classes override the copying operation or the
set of components copied
This version does not copy types like module, class, function, method,
nor stack trace, stack frame, nor file, socket, window, nor array, nor
any similar types.
Classes can use the same interfaces to control copying that they use
to control pickling: they can define methods called __getinitargs__(),
__getstate__() and __setstate__(). See the documentation for module
"pickle" for information on these methods.
"""
import types
import weakref
from copyreg import dispatch_table
import builtins
class Error(Exception):
pass
error = Error # backward compatibility
# module org.python.core does not exist in Brython, so lets just ignore
# this import request.
#try:
# from org.python.core import PyStringMap
#except ImportError:
# PyStringMap = None
PyStringMap = None
__all__ = ["Error", "copy", "deepcopy"]
def copy(x):
"""Shallow copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
cls = type(x)
copier = _copy_dispatch.get(cls)
if copier:
return copier(x)
copier = getattr(cls, "__copy__", None)
if copier:
return copier(x)
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor:
rv = reductor(2)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error("un(shallow)copyable object of type %s" % cls)
return _reconstruct(x, rv, 0)
_copy_dispatch = d = {}
def _copy_immutable(x):
return x
for t in (type(None), int, float, bool, str, tuple,
frozenset, type, range,
types.BuiltinFunctionType, type(Ellipsis),
types.FunctionType, weakref.ref):
d[t] = _copy_immutable
t = getattr(types, "CodeType", None)
if t is not None:
d[t] = _copy_immutable
for name in ("complex", "unicode"):
t = getattr(builtins, name, None)
if t is not None:
d[t] = _copy_immutable
def _copy_with_constructor(x):
return type(x)(x)
for t in (list, dict, set):
d[t] = _copy_with_constructor
def _copy_with_copy_method(x):
return x.copy()
if PyStringMap is not None:
d[PyStringMap] = _copy_with_copy_method
del d
def deepcopy(x, memo=None, _nil=[]):
"""Deep copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
if memo is None:
memo = {}
d = id(x)
y = memo.get(d, _nil)
if y is not _nil:
return y
cls = type(x)
copier = _deepcopy_dispatch.get(cls)
if copier:
y = copier(x, memo)
else:
try:
issc = issubclass(cls, type)
except TypeError: # cls is not a class (old Boost; see SF #502085)
issc = 0
if issc:
y = _deepcopy_atomic(x, memo)
else:
copier = getattr(x, "__deepcopy__", None)
if copier:
y = copier(memo)
else:
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor:
rv = reductor(2)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error(
"un(deep)copyable object of type %s" % cls)
y = _reconstruct(x, rv, 1, memo)
# If is its own copy, don't memoize.
if y is not x:
memo[d] = y
_keep_alive(x, memo) # Make sure x lives at least as long as d
return y
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x, memo):
return x
d[type(None)] = _deepcopy_atomic
d[type(Ellipsis)] = _deepcopy_atomic
d[int] = _deepcopy_atomic
d[float] = _deepcopy_atomic
d[bool] = _deepcopy_atomic
try:
d[complex] = _deepcopy_atomic
except NameError:
pass
d[bytes] = _deepcopy_atomic
d[str] = _deepcopy_atomic
try:
d[types.CodeType] = _deepcopy_atomic
except AttributeError:
pass
d[type] = _deepcopy_atomic
d[range] = _deepcopy_atomic
d[types.BuiltinFunctionType] = _deepcopy_atomic
d[types.FunctionType] = _deepcopy_atomic
d[weakref.ref] = _deepcopy_atomic
def _deepcopy_list(x, memo):
y = []
memo[id(x)] = y
for a in x:
y.append(deepcopy(a, memo))
return y
d[list] = _deepcopy_list
def _deepcopy_tuple(x, memo):
y = []
for a in x:
y.append(deepcopy(a, memo))
# We're not going to put the tuple in the memo, but it's still important we
# check for it, in case the tuple contains recursive mutable structures.
try:
return memo[id(x)]
except KeyError:
pass
for i in range(len(x)):
if x[i] is not y[i]:
y = tuple(y)
break
else:
y = x
return y
d[tuple] = _deepcopy_tuple
def _deepcopy_dict(x, memo):
y = {}
memo[id(x)] = y
for key, value in x.items():
y[deepcopy(key, memo)] = deepcopy(value, memo)
return y
d[dict] = _deepcopy_dict
if PyStringMap is not None:
d[PyStringMap] = _deepcopy_dict
def _deepcopy_method(x, memo): # Copy instance methods
return type(x)(x.__func__, deepcopy(x.__self__, memo))
_deepcopy_dispatch[types.MethodType] = _deepcopy_method
def _keep_alive(x, memo):
"""Keeps a reference to the object x in the memo.
Because we remember objects by their id, we have
to assure that possibly temporary objects are kept
alive by referencing them.
We store a reference at the id of the memo, which should
normally not be used unless someone tries to deepcopy
the memo itself...
"""
try:
memo[id(memo)].append(x)
except KeyError:
# aha, this is the first one :-)
memo[id(memo)]=[x]
def _reconstruct(x, info, deep, memo=None):
if isinstance(info, str):
return x
assert isinstance(info, tuple)
if memo is None:
memo = {}
n = len(info)
assert n in (2, 3, 4, 5)
callable, args = info[:2]
if n > 2:
state = info[2]
else:
state = {}
if n > 3:
listiter = info[3]
else:
listiter = None
if n > 4:
dictiter = info[4]
else:
dictiter = None
if deep:
args = deepcopy(args, memo)
y = callable(*args)
memo[id(x)] = y
if state:
if deep:
state = deepcopy(state, memo)
if hasattr(y, '__setstate__'):
y.__setstate__(state)
else:
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
else:
slotstate = None
if state is not None:
y.__dict__.update(state)
if slotstate is not None:
for key, value in slotstate.items():
setattr(y, key, value)
if listiter is not None:
for item in listiter:
if deep:
item = deepcopy(item, memo)
y.append(item)
if dictiter is not None:
for key, value in dictiter:
if deep:
key = deepcopy(key, memo)
value = deepcopy(value, memo)
y[key] = value
return y
del d
del types
# Helper for instance creation without calling __init__
class _EmptyClass:
pass
|
ufal/neuralmonkey
|
refs/heads/master
|
scripts/decompound_truecased.py
|
3
|
#!/usr/bin/env python3
import sys
import codecs
import javabridge
from tokenize_data import get_decompounder
def main():
sys.stdin = codecs.getreader('utf-8')(sys.stdin)
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
sys.stderr = codecs.getwriter('utf-8')(sys.stderr)
try:
decompounder = get_decompounder()
for line in sys.stdin:
tokens = []
for token in line.rstrip().split(" "):
if not token:
continue
if token[0].isupper():
decompounded = decompounder.splitWord(token)
if decompounded.size() >= 2:
parts = [decompounded.get(j)
for j in range(decompounded.size())]
parts_with_hyphens = ['-' if not p else p
for p in parts]
tokens.append(">><<".join(parts_with_hyphens))
del decompounded
else:
tokens.append(token)
else:
tokens.append(token)
print(" ".join(tokens))
# except:
# javabridge.kill_vm()
# exit(1)
finally:
javabridge.kill_vm()
if __name__ == "__main__":
main()
|
HossainKhademian/XBMC
|
refs/heads/master
|
lib/gtest/test/gtest_xml_outfiles_test.py
|
2526
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module."""
__author__ = "keith.ray@gmail.com (Keith Ray)"
import os
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_SUBDIR = "xml_outfiles"
GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_"
GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_"
EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyOne" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyOne" SetUpProp="1" TestSomeProperty="1" TearDownProp="1" />
</testsuite>
</testsuites>
"""
EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyTwo" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyTwo" SetUpProp="2" TestSomeProperty="2" TearDownProp="2" />
</testsuite>
</testsuites>
"""
class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
"""Unit test for Google Test's XML output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, "")
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml"))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml"))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1)
def testOutfile2(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2)
def _TestOutFile(self, test_name, expected_xml):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
# TODO(wan@google.com): libtool causes the built test binary to be
# named lt-gtest_xml_outfiles_test_ instead of
# gtest_xml_outfiles_test_. To account for this possibillity, we
# allow both names in the following code. We should remove this
# hack when Chandler Carruth's libtool replacement tool is ready.
output_file_name1 = test_name + ".xml"
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
expected = minidom.parseString(expected_xml)
if os.path.isfile(output_file1):
actual = minidom.parse(output_file1)
else:
actual = minidom.parse(output_file2)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == "__main__":
os.environ["GTEST_STACK_TRACE_DEPTH"] = "0"
gtest_test_utils.Main()
|
GeoNode/geonode
|
refs/heads/master
|
geonode/documents/migrations/0028_auto_20170801_1228_squashed_0035_auto_20190404_0820.py
|
6
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-04-04 08:24
from django.db import migrations, models
import django.db.models.manager
class Migration(migrations.Migration):
replaces = [('documents', '0028_auto_20170801_1228'), ('documents', '0029_auto_20180301_1947'), ('documents', '0030_auto_20180302_0430'), ('documents', '0031_auto_20180409_1238'), ('documents', '0032_auto_20180412_0822'), ('documents', '0033_auto_20180414_2120'), ('documents', '0034_auto_20190329_1652'), ('documents', '0035_auto_20190404_0820')]
dependencies = [
('documents', '27_drop_resource_columns_from_document_table'),
]
operations = [
migrations.AlterField(
model_name='document',
name='abstract_en',
field=models.TextField(blank=True, help_text='brief narrative summary of the content of the resource(s)', max_length=2000, null=True, verbose_name='abstract'),
),
migrations.AlterField(
model_name='document',
name='data_quality_statement_en',
field=models.TextField(blank=True, help_text="general explanation of the data producer's knowledge about the lineage of a dataset", max_length=2000, null=True, verbose_name='data quality statement'),
),
migrations.AlterField(
model_name='document',
name='purpose_en',
field=models.TextField(blank=True, help_text='summary of the intentions with which the resource(s) was developed', max_length=500, null=True, verbose_name='purpose'),
),
migrations.AlterField(
model_name='document',
name='supplemental_information_en',
field=models.TextField(default='No information provided', help_text='any other descriptive information about the dataset', max_length=2000, null=True, verbose_name='supplemental information'),
),
migrations.AlterModelManagers(
name='document',
managers=[
('objects', django.db.models.manager.Manager()),
('base_objects', django.db.models.manager.Manager()),
],
),
migrations.AlterModelOptions(
name='document',
options={'base_manager_name': 'objects'},
),
migrations.AlterModelManagers(
name='document',
managers=[
],
),
migrations.AlterModelOptions(
name='document',
options={},
),
migrations.AlterModelManagers(
name='document',
managers=[
('objects', django.db.models.manager.Manager()),
('base_objects', django.db.models.manager.Manager()),
],
),
]
|
friendly-of-python/flask-online-store
|
refs/heads/master
|
flask_online_store/forms/admin/security.py
|
1
|
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, PasswordField
from wtforms.validators import DataRequired, Email, EqualTo, Length
class LoginForm(FlaskForm):
username = StringField('username',
validators=[
DataRequired()
])
password = PasswordField('password',
validators=[
DataRequired()
])
class RegisterForm(FlaskForm):
username = StringField('username',
validators=[
DataRequired(message=u'用户名不能为空')
],
render_kw={
'placeholder': u'用户名'
})
email = StringField('email',
validators=[
DataRequired(message=u'邮箱不能为空'),
Email(message=u'请输入有效的邮箱地址')
],
render_kw={
'placeholder': u'Email'
})
password = PasswordField('password',
validators=[
DataRequired(message=u'密码不能为空'),
EqualTo('password_confirm',
message='Passwords must match'),
Length(6, 20)
],
render_kw={
'placeholder': u'Password'
})
password_confirm = PasswordField('repeat password',
render_kw={
'placeholder': u'Password Again'
})
validate_code = StringField('validate code',
validators=[
DataRequired(message=u'验证码不能为空')
],
render_kw={
'placeholder': u'请输入验证码'
})
|
devdelay/home-assistant
|
refs/heads/dev
|
homeassistant/components/light/demo.py
|
3
|
"""
Demo light platform that implements lights.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/demo/
"""
import random
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_RGB_COLOR, Light)
LIGHT_COLORS = [
[237, 224, 33],
[255, 63, 111],
]
LIGHT_TEMPS = [240, 380]
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Setup the demo light platform."""
add_devices_callback([
DemoLight("Bed Light", False),
DemoLight("Ceiling Lights", True, LIGHT_COLORS[0], LIGHT_TEMPS[1]),
DemoLight("Kitchen Lights", True, LIGHT_COLORS[1], LIGHT_TEMPS[0])
])
class DemoLight(Light):
"""Represenation of a demo light."""
# pylint: disable=too-many-arguments
def __init__(self, name, state, rgb=None, ct=None, brightness=180):
"""Initialize the light."""
self._name = name
self._state = state
self._rgb = rgb or random.choice(LIGHT_COLORS)
self._ct = ct or random.choice(LIGHT_TEMPS)
self._brightness = brightness
@property
def should_poll(self):
"""No polling needed for a demo light."""
return False
@property
def name(self):
"""Return the name of the light if any."""
return self._name
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def rgb_color(self):
"""Return the RBG color value."""
return self._rgb
@property
def color_temp(self):
"""Return the CT color temperature."""
return self._ct
@property
def is_on(self):
"""Return true if light is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the light on."""
self._state = True
if ATTR_RGB_COLOR in kwargs:
self._rgb = kwargs[ATTR_RGB_COLOR]
if ATTR_COLOR_TEMP in kwargs:
self._ct = kwargs[ATTR_COLOR_TEMP]
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
self.update_ha_state()
def turn_off(self, **kwargs):
"""Turn the light off."""
self._state = False
self.update_ha_state()
|
trishnaguha/ansible
|
refs/heads/devel
|
test/integration/targets/script/files/no_shebang.py
|
97
|
import sys
sys.stdout.write("Script with shebang omitted")
|
zzzeek/sqlalchemy
|
refs/heads/master
|
lib/sqlalchemy/sql/sqltypes.py
|
2
|
# sql/sqltypes.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""SQL specific types.
"""
import codecs
import datetime as dt
import decimal
import json
from . import coercions
from . import elements
from . import operators
from . import roles
from . import type_api
from .base import _bind_or_error
from .base import NO_ARG
from .base import SchemaEventTarget
from .elements import _NONE_NAME
from .elements import quoted_name
from .elements import Slice
from .elements import TypeCoerce as type_coerce # noqa
from .traversals import HasCacheKey
from .traversals import InternalTraversal
from .type_api import Emulated
from .type_api import NativeForEmulated # noqa
from .type_api import to_instance
from .type_api import TypeDecorator
from .type_api import TypeEngine
from .type_api import Variant
from .. import event
from .. import exc
from .. import inspection
from .. import processors
from .. import util
from ..util import compat
from ..util import langhelpers
from ..util import OrderedDict
from ..util import pickle
class _LookupExpressionAdapter(object):
"""Mixin expression adaptations based on lookup tables.
These rules are currently used by the numeric, integer and date types
which have detailed cross-expression coercion rules.
"""
@property
def _expression_adaptations(self):
raise NotImplementedError()
class Comparator(TypeEngine.Comparator):
_blank_dict = util.immutabledict()
def _adapt_expression(self, op, other_comparator):
othertype = other_comparator.type._type_affinity
lookup = self.type._expression_adaptations.get(
op, self._blank_dict
).get(othertype, self.type)
if lookup is othertype:
return (op, other_comparator.type)
elif lookup is self.type._type_affinity:
return (op, self.type)
else:
return (op, to_instance(lookup))
comparator_factory = Comparator
class Concatenable(object):
"""A mixin that marks a type as supporting 'concatenation',
typically strings."""
class Comparator(TypeEngine.Comparator):
def _adapt_expression(self, op, other_comparator):
if op is operators.add and isinstance(
other_comparator,
(Concatenable.Comparator, NullType.Comparator),
):
return operators.concat_op, self.expr.type
else:
return super(Concatenable.Comparator, self)._adapt_expression(
op, other_comparator
)
comparator_factory = Comparator
class Indexable(object):
"""A mixin that marks a type as supporting indexing operations,
such as array or JSON structures.
.. versionadded:: 1.1.0
"""
class Comparator(TypeEngine.Comparator):
def _setup_getitem(self, index):
raise NotImplementedError()
def __getitem__(self, index):
(
adjusted_op,
adjusted_right_expr,
result_type,
) = self._setup_getitem(index)
return self.operate(
adjusted_op, adjusted_right_expr, result_type=result_type
)
comparator_factory = Comparator
class String(Concatenable, TypeEngine):
"""The base for all string and character types.
In SQL, corresponds to VARCHAR. Can also take Python unicode objects
and encode to the database's encoding in bind params (and the reverse for
result sets.)
The `length` field is usually required when the `String` type is
used within a CREATE TABLE statement, as VARCHAR requires a length
on most databases.
"""
__visit_name__ = "string"
RETURNS_UNICODE = util.symbol(
"RETURNS_UNICODE",
"""Indicates that the DBAPI returns Python Unicode for VARCHAR,
NVARCHAR, and other character-based datatypes in all cases.
This is the default value for
:attr:`.DefaultDialect.returns_unicode_strings` under Python 3.
.. versionadded:: 1.4
""",
)
RETURNS_BYTES = util.symbol(
"RETURNS_BYTES",
"""Indicates that the DBAPI returns byte objects under Python 3
or non-Unicode string objects under Python 2 for VARCHAR, NVARCHAR,
and other character-based datatypes in all cases.
This may be applied to the
:attr:`.DefaultDialect.returns_unicode_strings` attribute.
.. versionadded:: 1.4
""",
)
RETURNS_CONDITIONAL = util.symbol(
"RETURNS_CONDITIONAL",
"""Indicates that the DBAPI may return Unicode or bytestrings for
VARCHAR, NVARCHAR, and other character-based datatypes, and that
SQLAlchemy's default String datatype will need to test on a per-row
basis for Unicode or bytes.
This may be applied to the
:attr:`.DefaultDialect.returns_unicode_strings` attribute.
.. versionadded:: 1.4
""",
)
RETURNS_UNKNOWN = util.symbol(
"RETURNS_UNKNOWN",
"""Indicates that the dialect should test on first connect what the
string-returning behavior of character-based datatypes is.
This is the default value for DefaultDialect.unicode_returns under
Python 2.
This may be applied to the
:attr:`.DefaultDialect.returns_unicode_strings` attribute under
Python 2 only. The value is disallowed under Python 3.
.. versionadded:: 1.4
.. deprecated:: 1.4 This value will be removed in SQLAlchemy 2.0.
""",
)
@util.deprecated_params(
convert_unicode=(
"1.3",
"The :paramref:`.String.convert_unicode` parameter is deprecated "
"and will be removed in a future release. All modern DBAPIs "
"now support Python Unicode directly and this parameter is "
"unnecessary.",
),
unicode_error=(
"1.3",
"The :paramref:`.String.unicode_errors` parameter is deprecated "
"and will be removed in a future release. This parameter is "
"unnecessary for modern Python DBAPIs and degrades performance "
"significantly.",
),
)
def __init__(
self,
length=None,
collation=None,
convert_unicode=False,
unicode_error=None,
_warn_on_bytestring=False,
_expect_unicode=False,
):
"""
Create a string-holding type.
:param length: optional, a length for the column for use in
DDL and CAST expressions. May be safely omitted if no ``CREATE
TABLE`` will be issued. Certain databases may require a
``length`` for use in DDL, and will raise an exception when
the ``CREATE TABLE`` DDL is issued if a ``VARCHAR``
with no length is included. Whether the value is
interpreted as bytes or characters is database specific.
:param collation: Optional, a column-level collation for
use in DDL and CAST expressions. Renders using the
COLLATE keyword supported by SQLite, MySQL, and PostgreSQL.
E.g.::
>>> from sqlalchemy import cast, select, String
>>> print(select(cast('some string', String(collation='utf8'))))
SELECT CAST(:param_1 AS VARCHAR COLLATE utf8) AS anon_1
:param convert_unicode: When set to ``True``, the
:class:`.String` type will assume that
input is to be passed as Python Unicode objects under Python 2,
and results returned as Python Unicode objects.
In the rare circumstance that the DBAPI does not support
Python unicode under Python 2, SQLAlchemy will use its own
encoder/decoder functionality on strings, referring to the
value of the :paramref:`_sa.create_engine.encoding` parameter
parameter passed to :func:`_sa.create_engine` as the encoding.
For the extremely rare case that Python Unicode
is to be encoded/decoded by SQLAlchemy on a backend
that *does* natively support Python Unicode,
the string value ``"force"`` can be passed here which will
cause SQLAlchemy's encode/decode services to be
used unconditionally.
.. note::
SQLAlchemy's unicode-conversion flags and features only apply
to Python 2; in Python 3, all string objects are Unicode objects.
For this reason, as well as the fact that virtually all modern
DBAPIs now support Unicode natively even under Python 2,
the :paramref:`.String.convert_unicode` flag is inherently a
legacy feature.
.. note::
In the vast majority of cases, the :class:`.Unicode` or
:class:`.UnicodeText` datatypes should be used for a
:class:`_schema.Column` that expects to store non-ascii data.
These
datatypes will ensure that the correct types are used on the
database side as well as set up the correct Unicode behaviors
under Python 2.
.. seealso::
:paramref:`_sa.create_engine.convert_unicode` -
:class:`_engine.Engine`-wide parameter
:param unicode_error: Optional, a method to use to handle Unicode
conversion errors. Behaves like the ``errors`` keyword argument to
the standard library's ``string.decode()`` functions, requires
that :paramref:`.String.convert_unicode` is set to
``"force"``
"""
if unicode_error is not None and convert_unicode != "force":
raise exc.ArgumentError(
"convert_unicode must be 'force' " "when unicode_error is set."
)
self.length = length
self.collation = collation
self._expect_unicode = convert_unicode or _expect_unicode
self._expect_unicode_error = unicode_error
self._warn_on_bytestring = _warn_on_bytestring
def literal_processor(self, dialect):
def process(value):
value = value.replace("'", "''")
if dialect.identifier_preparer._double_percents:
value = value.replace("%", "%%")
return "'%s'" % value
return process
def bind_processor(self, dialect):
if self._expect_unicode or dialect.convert_unicode:
if (
dialect.supports_unicode_binds
and self._expect_unicode != "force"
):
if self._warn_on_bytestring:
def process(value):
if isinstance(value, util.binary_type):
util.warn_limited(
"Unicode type received non-unicode "
"bind param value %r.",
(util.ellipses_string(value),),
)
return value
return process
else:
return None
else:
encoder = codecs.getencoder(dialect.encoding)
warn_on_bytestring = self._warn_on_bytestring
def process(value):
if isinstance(value, util.text_type):
return encoder(value, self._expect_unicode_error)[0]
elif warn_on_bytestring and value is not None:
util.warn_limited(
"Unicode type received non-unicode bind "
"param value %r.",
(util.ellipses_string(value),),
)
return value
return process
else:
return None
def result_processor(self, dialect, coltype):
wants_unicode = self._expect_unicode or dialect.convert_unicode
needs_convert = wants_unicode and (
dialect.returns_unicode_strings is not String.RETURNS_UNICODE
or self._expect_unicode in ("force", "force_nocheck")
)
needs_isinstance = (
needs_convert
and dialect.returns_unicode_strings
in (
String.RETURNS_CONDITIONAL,
String.RETURNS_UNICODE,
)
and self._expect_unicode != "force_nocheck"
)
if needs_convert:
if needs_isinstance:
return processors.to_conditional_unicode_processor_factory(
dialect.encoding, self._expect_unicode_error
)
else:
return processors.to_unicode_processor_factory(
dialect.encoding, self._expect_unicode_error
)
else:
return None
@property
def python_type(self):
if self._expect_unicode:
return util.text_type
else:
return str
def get_dbapi_type(self, dbapi):
return dbapi.STRING
@classmethod
def _warn_deprecated_unicode(cls):
util.warn_deprecated(
"The convert_unicode on Engine and String as well as the "
"unicode_error flag on String are deprecated. All modern "
"DBAPIs now support Python Unicode natively under Python 2, and "
"under Python 3 all strings are inherently Unicode. These flags "
"will be removed in a future release.",
version="1.3",
)
class Text(String):
"""A variably sized string type.
In SQL, usually corresponds to CLOB or TEXT. Can also take Python
unicode objects and encode to the database's encoding in bind
params (and the reverse for result sets.) In general, TEXT objects
do not have a length; while some databases will accept a length
argument here, it will be rejected by others.
"""
__visit_name__ = "text"
class Unicode(String):
"""A variable length Unicode string type.
The :class:`.Unicode` type is a :class:`.String` subclass that assumes
input and output strings that may contain non-ASCII characters, and for
some backends implies an underlying column type that is explicitly
supporting of non-ASCII data, such as ``NVARCHAR`` on Oracle and SQL
Server. This will impact the output of ``CREATE TABLE`` statements and
``CAST`` functions at the dialect level, and also in some cases will
indicate different behavior in the DBAPI itself in how it handles bound
parameters.
The character encoding used by the :class:`.Unicode` type that is used to
transmit and receive data to the database is usually determined by the
DBAPI itself. All modern DBAPIs accommodate non-ASCII strings but may have
different methods of managing database encodings; if necessary, this
encoding should be configured as detailed in the notes for the target DBAPI
in the :ref:`dialect_toplevel` section.
In modern SQLAlchemy, use of the :class:`.Unicode` datatype does not
typically imply any encoding/decoding behavior within SQLAlchemy itself.
Historically, when DBAPIs did not support Python ``unicode`` objects under
Python 2, SQLAlchemy handled unicode encoding/decoding services itself
which would be controlled by the flag :paramref:`.String.convert_unicode`;
this flag is deprecated as it is no longer needed for Python 3.
When using Python 2, data that is passed to columns that use the
:class:`.Unicode` datatype must be of type ``unicode``, and not ``str``
which in Python 2 is equivalent to ``bytes``. In Python 3, all data
passed to columns that use the :class:`.Unicode` datatype should be
of type ``str``. See the flag :paramref:`.String.convert_unicode` for
more discussion of unicode encode/decode behavior under Python 2.
.. warning:: Some database backends, particularly SQL Server with pyodbc,
are known to have undesirable behaviors regarding data that is noted
as being of ``NVARCHAR`` type as opposed to ``VARCHAR``, including
datatype mismatch errors and non-use of indexes. See the section
on :meth:`.DialectEvents.do_setinputsizes` for background on working
around unicode character issues for backends like SQL Server with
pyodbc as well as cx_Oracle.
.. seealso::
:class:`.UnicodeText` - unlengthed textual counterpart
to :class:`.Unicode`.
:paramref:`.String.convert_unicode`
:meth:`.DialectEvents.do_setinputsizes`
"""
__visit_name__ = "unicode"
def __init__(self, length=None, **kwargs):
"""
Create a :class:`.Unicode` object.
Parameters are the same as that of :class:`.String`,
with the exception that ``convert_unicode``
defaults to ``True``.
"""
kwargs.setdefault("_expect_unicode", True)
kwargs.setdefault("_warn_on_bytestring", True)
super(Unicode, self).__init__(length=length, **kwargs)
class UnicodeText(Text):
"""An unbounded-length Unicode string type.
See :class:`.Unicode` for details on the unicode
behavior of this object.
Like :class:`.Unicode`, usage the :class:`.UnicodeText` type implies a
unicode-capable type being used on the backend, such as
``NCLOB``, ``NTEXT``.
"""
__visit_name__ = "unicode_text"
def __init__(self, length=None, **kwargs):
"""
Create a Unicode-converting Text type.
Parameters are the same as that of :class:`_expression.TextClause`,
with the exception that ``convert_unicode``
defaults to ``True``.
"""
kwargs.setdefault("_expect_unicode", True)
kwargs.setdefault("_warn_on_bytestring", True)
super(UnicodeText, self).__init__(length=length, **kwargs)
def _warn_deprecated_unicode(self):
pass
class Integer(_LookupExpressionAdapter, TypeEngine):
"""A type for ``int`` integers."""
__visit_name__ = "integer"
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
@property
def python_type(self):
return int
def literal_processor(self, dialect):
def process(value):
return str(int(value))
return process
@util.memoized_property
def _expression_adaptations(self):
# TODO: need a dictionary object that will
# handle operators generically here, this is incomplete
return {
operators.add: {
Date: Date,
Integer: self.__class__,
Numeric: Numeric,
},
operators.mul: {
Interval: Interval,
Integer: self.__class__,
Numeric: Numeric,
},
operators.div: {Integer: self.__class__, Numeric: Numeric},
operators.truediv: {Integer: self.__class__, Numeric: Numeric},
operators.sub: {Integer: self.__class__, Numeric: Numeric},
}
class SmallInteger(Integer):
"""A type for smaller ``int`` integers.
Typically generates a ``SMALLINT`` in DDL, and otherwise acts like
a normal :class:`.Integer` on the Python side.
"""
__visit_name__ = "small_integer"
class BigInteger(Integer):
"""A type for bigger ``int`` integers.
Typically generates a ``BIGINT`` in DDL, and otherwise acts like
a normal :class:`.Integer` on the Python side.
"""
__visit_name__ = "big_integer"
class Numeric(_LookupExpressionAdapter, TypeEngine):
"""A type for fixed precision numbers, such as ``NUMERIC`` or ``DECIMAL``.
This type returns Python ``decimal.Decimal`` objects by default, unless
the :paramref:`.Numeric.asdecimal` flag is set to False, in which case
they are coerced to Python ``float`` objects.
.. note::
The :class:`.Numeric` type is designed to receive data from a database
type that is explicitly known to be a decimal type
(e.g. ``DECIMAL``, ``NUMERIC``, others) and not a floating point
type (e.g. ``FLOAT``, ``REAL``, others).
If the database column on the server is in fact a floating-point
type, such as ``FLOAT`` or ``REAL``, use the :class:`.Float`
type or a subclass, otherwise numeric coercion between
``float``/``Decimal`` may or may not function as expected.
.. note::
The Python ``decimal.Decimal`` class is generally slow
performing; cPython 3.3 has now switched to use the `cdecimal
<http://pypi.python.org/pypi/cdecimal/>`_ library natively. For
older Python versions, the ``cdecimal`` library can be patched
into any application where it will replace the ``decimal``
library fully, however this needs to be applied globally and
before any other modules have been imported, as follows::
import sys
import cdecimal
sys.modules["decimal"] = cdecimal
Note that the ``cdecimal`` and ``decimal`` libraries are **not
compatible with each other**, so patching ``cdecimal`` at the
global level is the only way it can be used effectively with
various DBAPIs that hardcode to import the ``decimal`` library.
"""
__visit_name__ = "numeric"
_default_decimal_return_scale = 10
def __init__(
self,
precision=None,
scale=None,
decimal_return_scale=None,
asdecimal=True,
):
"""
Construct a Numeric.
:param precision: the numeric precision for use in DDL ``CREATE
TABLE``.
:param scale: the numeric scale for use in DDL ``CREATE TABLE``.
:param asdecimal: default True. Return whether or not
values should be sent as Python Decimal objects, or
as floats. Different DBAPIs send one or the other based on
datatypes - the Numeric type will ensure that return values
are one or the other across DBAPIs consistently.
:param decimal_return_scale: Default scale to use when converting
from floats to Python decimals. Floating point values will typically
be much longer due to decimal inaccuracy, and most floating point
database types don't have a notion of "scale", so by default the
float type looks for the first ten decimal places when converting.
Specifying this value will override that length. Types which
do include an explicit ".scale" value, such as the base
:class:`.Numeric` as well as the MySQL float types, will use the
value of ".scale" as the default for decimal_return_scale, if not
otherwise specified.
.. versionadded:: 0.9.0
When using the ``Numeric`` type, care should be taken to ensure
that the asdecimal setting is appropriate for the DBAPI in use -
when Numeric applies a conversion from Decimal->float or float->
Decimal, this conversion incurs an additional performance overhead
for all result columns received.
DBAPIs that return Decimal natively (e.g. psycopg2) will have
better accuracy and higher performance with a setting of ``True``,
as the native translation to Decimal reduces the amount of floating-
point issues at play, and the Numeric type itself doesn't need
to apply any further conversions. However, another DBAPI which
returns floats natively *will* incur an additional conversion
overhead, and is still subject to floating point data loss - in
which case ``asdecimal=False`` will at least remove the extra
conversion overhead.
"""
self.precision = precision
self.scale = scale
self.decimal_return_scale = decimal_return_scale
self.asdecimal = asdecimal
@property
def _effective_decimal_return_scale(self):
if self.decimal_return_scale is not None:
return self.decimal_return_scale
elif getattr(self, "scale", None) is not None:
return self.scale
else:
return self._default_decimal_return_scale
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
def literal_processor(self, dialect):
def process(value):
return str(value)
return process
@property
def python_type(self):
if self.asdecimal:
return decimal.Decimal
else:
return float
def bind_processor(self, dialect):
if dialect.supports_native_decimal:
return None
else:
return processors.to_float
def result_processor(self, dialect, coltype):
if self.asdecimal:
if dialect.supports_native_decimal:
# we're a "numeric", DBAPI will give us Decimal directly
return None
else:
util.warn(
"Dialect %s+%s does *not* support Decimal "
"objects natively, and SQLAlchemy must "
"convert from floating point - rounding "
"errors and other issues may occur. Please "
"consider storing Decimal numbers as strings "
"or integers on this platform for lossless "
"storage." % (dialect.name, dialect.driver)
)
# we're a "numeric", DBAPI returns floats, convert.
return processors.to_decimal_processor_factory(
decimal.Decimal,
self.scale
if self.scale is not None
else self._default_decimal_return_scale,
)
else:
if dialect.supports_native_decimal:
return processors.to_float
else:
return None
@util.memoized_property
def _expression_adaptations(self):
return {
operators.mul: {
Interval: Interval,
Numeric: self.__class__,
Integer: self.__class__,
},
operators.div: {Numeric: self.__class__, Integer: self.__class__},
operators.truediv: {
Numeric: self.__class__,
Integer: self.__class__,
},
operators.add: {Numeric: self.__class__, Integer: self.__class__},
operators.sub: {Numeric: self.__class__, Integer: self.__class__},
}
class Float(Numeric):
"""Type representing floating point types, such as ``FLOAT`` or ``REAL``.
This type returns Python ``float`` objects by default, unless the
:paramref:`.Float.asdecimal` flag is set to True, in which case they
are coerced to ``decimal.Decimal`` objects.
.. note::
The :class:`.Float` type is designed to receive data from a database
type that is explicitly known to be a floating point type
(e.g. ``FLOAT``, ``REAL``, others)
and not a decimal type (e.g. ``DECIMAL``, ``NUMERIC``, others).
If the database column on the server is in fact a Numeric
type, such as ``DECIMAL`` or ``NUMERIC``, use the :class:`.Numeric`
type or a subclass, otherwise numeric coercion between
``float``/``Decimal`` may or may not function as expected.
"""
__visit_name__ = "float"
scale = None
def __init__(
self, precision=None, asdecimal=False, decimal_return_scale=None
):
r"""
Construct a Float.
:param precision: the numeric precision for use in DDL ``CREATE
TABLE``.
:param asdecimal: the same flag as that of :class:`.Numeric`, but
defaults to ``False``. Note that setting this flag to ``True``
results in floating point conversion.
:param decimal_return_scale: Default scale to use when converting
from floats to Python decimals. Floating point values will typically
be much longer due to decimal inaccuracy, and most floating point
database types don't have a notion of "scale", so by default the
float type looks for the first ten decimal places when converting.
Specifying this value will override that length. Note that the
MySQL float types, which do include "scale", will use "scale"
as the default for decimal_return_scale, if not otherwise specified.
.. versionadded:: 0.9.0
"""
self.precision = precision
self.asdecimal = asdecimal
self.decimal_return_scale = decimal_return_scale
def result_processor(self, dialect, coltype):
if self.asdecimal:
return processors.to_decimal_processor_factory(
decimal.Decimal, self._effective_decimal_return_scale
)
elif dialect.supports_native_decimal:
return processors.to_float
else:
return None
class DateTime(_LookupExpressionAdapter, TypeEngine):
"""A type for ``datetime.datetime()`` objects.
Date and time types return objects from the Python ``datetime``
module. Most DBAPIs have built in support for the datetime
module, with the noted exception of SQLite. In the case of
SQLite, date and time types are stored as strings which are then
converted back to datetime objects when rows are returned.
For the time representation within the datetime type, some
backends include additional options, such as timezone support and
fractional seconds support. For fractional seconds, use the
dialect-specific datatype, such as :class:`.mysql.TIME`. For
timezone support, use at least the :class:`_types.TIMESTAMP` datatype,
if not the dialect-specific datatype object.
"""
__visit_name__ = "datetime"
def __init__(self, timezone=False):
"""Construct a new :class:`.DateTime`.
:param timezone: boolean. Indicates that the datetime type should
enable timezone support, if available on the
**base date/time-holding type only**. It is recommended
to make use of the :class:`_types.TIMESTAMP` datatype directly when
using this flag, as some databases include separate generic
date/time-holding types distinct from the timezone-capable
TIMESTAMP datatype, such as Oracle.
"""
self.timezone = timezone
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@property
def python_type(self):
return dt.datetime
@util.memoized_property
def _expression_adaptations(self):
# Based on http://www.postgresql.org/docs/current/\
# static/functions-datetime.html.
return {
operators.add: {Interval: self.__class__},
operators.sub: {Interval: self.__class__, DateTime: Interval},
}
class Date(_LookupExpressionAdapter, TypeEngine):
"""A type for ``datetime.date()`` objects."""
__visit_name__ = "date"
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@property
def python_type(self):
return dt.date
@util.memoized_property
def _expression_adaptations(self):
# Based on http://www.postgresql.org/docs/current/\
# static/functions-datetime.html.
return {
operators.add: {
Integer: self.__class__,
Interval: DateTime,
Time: DateTime,
},
operators.sub: {
# date - integer = date
Integer: self.__class__,
# date - date = integer.
Date: Integer,
Interval: DateTime,
# date - datetime = interval,
# this one is not in the PG docs
# but works
DateTime: Interval,
},
}
class Time(_LookupExpressionAdapter, TypeEngine):
"""A type for ``datetime.time()`` objects."""
__visit_name__ = "time"
def __init__(self, timezone=False):
self.timezone = timezone
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@property
def python_type(self):
return dt.time
@util.memoized_property
def _expression_adaptations(self):
# Based on http://www.postgresql.org/docs/current/\
# static/functions-datetime.html.
return {
operators.add: {Date: DateTime, Interval: self.__class__},
operators.sub: {Time: Interval, Interval: self.__class__},
}
class _Binary(TypeEngine):
"""Define base behavior for binary types."""
def __init__(self, length=None):
self.length = length
def literal_processor(self, dialect):
def process(value):
value = value.decode(dialect.encoding).replace("'", "''")
return "'%s'" % value
return process
@property
def python_type(self):
return util.binary_type
# Python 3 - sqlite3 doesn't need the `Binary` conversion
# here, though pg8000 does to indicate "bytea"
def bind_processor(self, dialect):
if dialect.dbapi is None:
return None
DBAPIBinary = dialect.dbapi.Binary
def process(value):
if value is not None:
return DBAPIBinary(value)
else:
return None
return process
# Python 3 has native bytes() type
# both sqlite3 and pg8000 seem to return it,
# psycopg2 as of 2.5 returns 'memoryview'
if util.py2k:
def result_processor(self, dialect, coltype):
return processors.to_str
else:
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
value = bytes(value)
return value
return process
def coerce_compared_value(self, op, value):
"""See :meth:`.TypeEngine.coerce_compared_value` for a description."""
if isinstance(value, util.string_types):
return self
else:
return super(_Binary, self).coerce_compared_value(op, value)
def get_dbapi_type(self, dbapi):
return dbapi.BINARY
class LargeBinary(_Binary):
"""A type for large binary byte data.
The :class:`.LargeBinary` type corresponds to a large and/or unlengthed
binary type for the target platform, such as BLOB on MySQL and BYTEA for
PostgreSQL. It also handles the necessary conversions for the DBAPI.
"""
__visit_name__ = "large_binary"
def __init__(self, length=None):
"""
Construct a LargeBinary type.
:param length: optional, a length for the column for use in
DDL statements, for those binary types that accept a length,
such as the MySQL BLOB type.
"""
_Binary.__init__(self, length=length)
class SchemaType(SchemaEventTarget):
"""Mark a type as possibly requiring schema-level DDL for usage.
Supports types that must be explicitly created/dropped (i.e. PG ENUM type)
as well as types that are complimented by table or schema level
constraints, triggers, and other rules.
:class:`.SchemaType` classes can also be targets for the
:meth:`.DDLEvents.before_parent_attach` and
:meth:`.DDLEvents.after_parent_attach` events, where the events fire off
surrounding the association of the type object with a parent
:class:`_schema.Column`.
.. seealso::
:class:`.Enum`
:class:`.Boolean`
"""
_use_schema_map = True
def __init__(
self,
name=None,
schema=None,
metadata=None,
inherit_schema=False,
quote=None,
_create_events=True,
):
if name is not None:
self.name = quoted_name(name, quote)
else:
self.name = None
self.schema = schema
self.metadata = metadata
self.inherit_schema = inherit_schema
self._create_events = _create_events
if _create_events and self.metadata:
event.listen(
self.metadata,
"before_create",
util.portable_instancemethod(self._on_metadata_create),
)
event.listen(
self.metadata,
"after_drop",
util.portable_instancemethod(self._on_metadata_drop),
)
def _set_parent(self, column, **kw):
column._on_table_attach(util.portable_instancemethod(self._set_table))
def _variant_mapping_for_set_table(self, column):
if isinstance(column.type, Variant):
variant_mapping = column.type.mapping.copy()
variant_mapping["_default"] = column.type.impl
else:
variant_mapping = None
return variant_mapping
def _set_table(self, column, table):
if self.inherit_schema:
self.schema = table.schema
elif self.metadata and self.schema is None and self.metadata.schema:
self.schema = self.metadata.schema
if not self._create_events:
return
variant_mapping = self._variant_mapping_for_set_table(column)
event.listen(
table,
"before_create",
util.portable_instancemethod(
self._on_table_create, {"variant_mapping": variant_mapping}
),
)
event.listen(
table,
"after_drop",
util.portable_instancemethod(
self._on_table_drop, {"variant_mapping": variant_mapping}
),
)
if self.metadata is None:
# TODO: what's the difference between self.metadata
# and table.metadata here ?
event.listen(
table.metadata,
"before_create",
util.portable_instancemethod(
self._on_metadata_create,
{"variant_mapping": variant_mapping},
),
)
event.listen(
table.metadata,
"after_drop",
util.portable_instancemethod(
self._on_metadata_drop,
{"variant_mapping": variant_mapping},
),
)
def copy(self, **kw):
return self.adapt(self.__class__, _create_events=True)
def adapt(self, impltype, **kw):
schema = kw.pop("schema", self.schema)
metadata = kw.pop("metadata", self.metadata)
_create_events = kw.pop("_create_events", False)
return impltype(
name=self.name,
schema=schema,
inherit_schema=self.inherit_schema,
metadata=metadata,
_create_events=_create_events,
**kw
)
@property
def bind(self):
return self.metadata and self.metadata.bind or None
def create(self, bind=None, checkfirst=False):
"""Issue CREATE DDL for this type, if applicable."""
if bind is None:
bind = _bind_or_error(self)
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t.create(bind=bind, checkfirst=checkfirst)
def drop(self, bind=None, checkfirst=False):
"""Issue DROP DDL for this type, if applicable."""
if bind is None:
bind = _bind_or_error(self)
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t.drop(bind=bind, checkfirst=checkfirst)
def _on_table_create(self, target, bind, **kw):
if not self._is_impl_for_variant(bind.dialect, kw):
return
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_table_create(target, bind, **kw)
def _on_table_drop(self, target, bind, **kw):
if not self._is_impl_for_variant(bind.dialect, kw):
return
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_table_drop(target, bind, **kw)
def _on_metadata_create(self, target, bind, **kw):
if not self._is_impl_for_variant(bind.dialect, kw):
return
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_metadata_create(target, bind, **kw)
def _on_metadata_drop(self, target, bind, **kw):
if not self._is_impl_for_variant(bind.dialect, kw):
return
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_metadata_drop(target, bind, **kw)
def _is_impl_for_variant(self, dialect, kw):
variant_mapping = kw.pop("variant_mapping", None)
if variant_mapping is None:
return True
# since PostgreSQL is the only DB that has ARRAY this can only
# be integration tested by PG-specific tests
def _we_are_the_impl(typ):
return (
typ is self or isinstance(typ, ARRAY) and typ.item_type is self
)
if dialect.name in variant_mapping and _we_are_the_impl(
variant_mapping[dialect.name]
):
return True
elif dialect.name not in variant_mapping:
return _we_are_the_impl(variant_mapping["_default"])
class Enum(Emulated, String, SchemaType):
"""Generic Enum Type.
The :class:`.Enum` type provides a set of possible string values
which the column is constrained towards.
The :class:`.Enum` type will make use of the backend's native "ENUM"
type if one is available; otherwise, it uses a VARCHAR datatype.
An option also exists to automatically produce a CHECK constraint
when the VARCHAR (so called "non-native") variant is produced;
see the :paramref:`.Enum.create_constraint` flag.
The :class:`.Enum` type also provides in-Python validation of string
values during both read and write operations. When reading a value
from the database in a result set, the string value is always checked
against the list of possible values and a ``LookupError`` is raised
if no match is found. When passing a value to the database as a
plain string within a SQL statement, if the
:paramref:`.Enum.validate_strings` parameter is
set to True, a ``LookupError`` is raised for any string value that's
not located in the given list of possible values; note that this
impacts usage of LIKE expressions with enumerated values (an unusual
use case).
.. versionchanged:: 1.1 the :class:`.Enum` type now provides in-Python
validation of input values as well as on data being returned by
the database.
The source of enumerated values may be a list of string values, or
alternatively a PEP-435-compliant enumerated class. For the purposes
of the :class:`.Enum` datatype, this class need only provide a
``__members__`` method.
When using an enumerated class, the enumerated objects are used
both for input and output, rather than strings as is the case with
a plain-string enumerated type::
import enum
class MyEnum(enum.Enum):
one = 1
two = 2
three = 3
t = Table(
'data', MetaData(),
Column('value', Enum(MyEnum))
)
connection.execute(t.insert(), {"value": MyEnum.two})
assert connection.scalar(t.select()) is MyEnum.two
Above, the string names of each element, e.g. "one", "two", "three",
are persisted to the database; the values of the Python Enum, here
indicated as integers, are **not** used; the value of each enum can
therefore be any kind of Python object whether or not it is persistable.
In order to persist the values and not the names, the
:paramref:`.Enum.values_callable` parameter may be used. The value of
this parameter is a user-supplied callable, which is intended to be used
with a PEP-435-compliant enumerated class and returns a list of string
values to be persisted. For a simple enumeration that uses string values,
a callable such as ``lambda x: [e.value for e in x]`` is sufficient.
.. versionadded:: 1.1 - support for PEP-435-style enumerated
classes.
.. seealso::
:class:`_postgresql.ENUM` - PostgreSQL-specific type,
which has additional functionality.
:class:`.mysql.ENUM` - MySQL-specific type
"""
__visit_name__ = "enum"
@util.deprecated_params(
convert_unicode=(
"1.3",
"The :paramref:`.Enum.convert_unicode` parameter is deprecated "
"and will be removed in a future release. All modern DBAPIs "
"now support Python Unicode directly and this parameter is "
"unnecessary.",
)
)
def __init__(self, *enums, **kw):
r"""Construct an enum.
Keyword arguments which don't apply to a specific backend are ignored
by that backend.
:param \*enums: either exactly one PEP-435 compliant enumerated type
or one or more string labels.
.. versionadded:: 1.1 a PEP-435 style enumerated class may be
passed.
:param convert_unicode: Enable unicode-aware bind parameter and
result-set processing for this Enum's data under Python 2 only.
Under Python 2, this is set automatically based on the presence of
unicode label strings. This flag will be removed in SQLAlchemy 2.0.
:param create_constraint: defaults to False. When creating a
non-native enumerated type, also build a CHECK constraint on the
database against the valid values.
.. note:: it is strongly recommended that the CHECK constraint
have an explicit name in order to support schema-management
concerns. This can be established either by setting the
:paramref:`.Enum.name` parameter or by setting up an
appropriate naming convention; see
:ref:`constraint_naming_conventions` for background.
.. versionchanged:: 1.4 - this flag now defaults to False, meaning
no CHECK constraint is generated for a non-native enumerated
type.
:param metadata: Associate this type directly with a ``MetaData``
object. For types that exist on the target database as an
independent schema construct (PostgreSQL), this type will be
created and dropped within ``create_all()`` and ``drop_all()``
operations. If the type is not associated with any ``MetaData``
object, it will associate itself with each ``Table`` in which it is
used, and will be created when any of those individual tables are
created, after a check is performed for its existence. The type is
only dropped when ``drop_all()`` is called for that ``Table``
object's metadata, however.
The value of the :paramref:`_schema.MetaData.schema` parameter of
the :class:`_schema.MetaData` object, if set, will be used as the
default value of the :paramref:`_types.Enum.schema` on this object
if an explicit value is not otherwise supplied.
.. versionchanged:: 1.4.12 :class:`_types.Enum` inherits the
:paramref:`_schema.MetaData.schema` parameter of the
:class:`_schema.MetaData` object if present, when passed using
the :paramref:`_types.Enum.metadata` parameter.
:param name: The name of this type. This is required for PostgreSQL
and any future supported database which requires an explicitly
named type, or an explicitly named constraint in order to generate
the type and/or a table that uses it. If a PEP-435 enumerated
class was used, its name (converted to lower case) is used by
default.
:param native_enum: Use the database's native ENUM type when
available. Defaults to True. When False, uses VARCHAR + check
constraint for all backends. The VARCHAR length can be controlled
with :paramref:`.Enum.length`
:param length: Allows specifying a custom length for the VARCHAR
when :paramref:`.Enum.native_enum` is False. By default it uses the
length of the longest value.
.. versionadded:: 1.3.16
:param schema: Schema name of this type. For types that exist on the
target database as an independent schema construct (PostgreSQL),
this parameter specifies the named schema in which the type is
present.
If not present, the schema name will be taken from the
:class:`_schema.MetaData` collection if passed as
:paramref:`_types.Enum.metadata`, for a :class:`_schema.MetaData`
that includes the :paramref:`_schema.MetaData.schema` parameter.
.. versionchanged:: 1.4.12 :class:`_types.Enum` inherits the
:paramref:`_schema.MetaData.schema` parameter of the
:class:`_schema.MetaData` object if present, when passed using
the :paramref:`_types.Enum.metadata` parameter.
Otherwise, if the :paramref:`_types.Enum.inherit_schema` flag is set
to ``True``, the schema will be inherited from the associated
:class:`_schema.Table` object if any; when
:paramref:`_types.Enum.inherit_schema` is at its default of
``False``, the owning table's schema is **not** used.
:param quote: Set explicit quoting preferences for the type's name.
:param inherit_schema: When ``True``, the "schema" from the owning
:class:`_schema.Table`
will be copied to the "schema" attribute of this
:class:`.Enum`, replacing whatever value was passed for the
``schema`` attribute. This also takes effect when using the
:meth:`_schema.Table.to_metadata` operation.
:param validate_strings: when True, string values that are being
passed to the database in a SQL statement will be checked
for validity against the list of enumerated values. Unrecognized
values will result in a ``LookupError`` being raised.
.. versionadded:: 1.1.0b2
:param values_callable: A callable which will be passed the PEP-435
compliant enumerated type, which should then return a list of string
values to be persisted. This allows for alternate usages such as
using the string value of an enum to be persisted to the database
instead of its name.
.. versionadded:: 1.2.3
:param sort_key_function: a Python callable which may be used as the
"key" argument in the Python ``sorted()`` built-in. The SQLAlchemy
ORM requires that primary key columns which are mapped must
be sortable in some way. When using an unsortable enumeration
object such as a Python 3 ``Enum`` object, this parameter may be
used to set a default sort key function for the objects. By
default, the database value of the enumeration is used as the
sorting function.
.. versionadded:: 1.3.8
:param omit_aliases: A boolean that when true will remove aliases from
pep 435 enums. For backward compatibility it defaults to ``False``.
A deprecation warning is raised if the enum has aliases and this
flag was not set.
.. versionadded:: 1.4.5
.. deprecated:: 1.4 The default will be changed to ``True`` in
SQLAlchemy 2.0.
"""
self._enum_init(enums, kw)
@property
def _enums_argument(self):
if self.enum_class is not None:
return [self.enum_class]
else:
return self.enums
def _enum_init(self, enums, kw):
"""internal init for :class:`.Enum` and subclasses.
friendly init helper used by subclasses to remove
all the Enum-specific keyword arguments from kw. Allows all
other arguments in kw to pass through.
"""
self.native_enum = kw.pop("native_enum", True)
self.create_constraint = kw.pop("create_constraint", False)
self.values_callable = kw.pop("values_callable", None)
self._sort_key_function = kw.pop("sort_key_function", NO_ARG)
length_arg = kw.pop("length", NO_ARG)
self._omit_aliases = kw.pop("omit_aliases", NO_ARG)
values, objects = self._parse_into_values(enums, kw)
self._setup_for_values(values, objects, kw)
convert_unicode = kw.pop("convert_unicode", None)
self.validate_strings = kw.pop("validate_strings", False)
if convert_unicode is None:
for e in self.enums:
# this is all py2k logic that can go away for py3k only,
# "expect unicode" will always be implicitly true
if isinstance(e, util.text_type):
_expect_unicode = True
break
else:
_expect_unicode = False
else:
_expect_unicode = convert_unicode
if self.enums:
length = max(len(x) for x in self.enums)
else:
length = 0
if not self.native_enum and length_arg is not NO_ARG:
if length_arg < length:
raise ValueError(
"When provided, length must be larger or equal"
" than the length of the longest enum value. %s < %s"
% (length_arg, length)
)
length = length_arg
self._valid_lookup[None] = self._object_lookup[None] = None
super(Enum, self).__init__(
length=length, _expect_unicode=_expect_unicode
)
if self.enum_class:
kw.setdefault("name", self.enum_class.__name__.lower())
SchemaType.__init__(
self,
name=kw.pop("name", None),
schema=kw.pop("schema", None),
metadata=kw.pop("metadata", None),
inherit_schema=kw.pop("inherit_schema", False),
quote=kw.pop("quote", None),
_create_events=kw.pop("_create_events", True),
)
def _parse_into_values(self, enums, kw):
if not enums and "_enums" in kw:
enums = kw.pop("_enums")
if len(enums) == 1 and hasattr(enums[0], "__members__"):
self.enum_class = enums[0]
_members = self.enum_class.__members__
aliases = [n for n, v in _members.items() if v.name != n]
if self._omit_aliases is NO_ARG and aliases:
util.warn_deprecated_20(
"The provided enum %s contains the aliases %s. The "
"``omit_aliases`` will default to ``True`` in SQLAlchemy "
"2.0. Specify a value to silence this warning."
% (self.enum_class.__name__, aliases)
)
if self._omit_aliases is True:
# remove aliases
members = OrderedDict(
(n, v) for n, v in _members.items() if v.name == n
)
else:
members = _members
if self.values_callable:
values = self.values_callable(self.enum_class)
else:
values = list(members)
objects = [members[k] for k in members]
return values, objects
else:
self.enum_class = None
return enums, enums
def _setup_for_values(self, values, objects, kw):
self.enums = list(values)
self._valid_lookup = dict(zip(reversed(objects), reversed(values)))
self._object_lookup = dict(zip(values, objects))
self._valid_lookup.update(
[
(value, self._valid_lookup[self._object_lookup[value]])
for value in values
]
)
@property
def sort_key_function(self):
if self._sort_key_function is NO_ARG:
return self._db_value_for_elem
else:
return self._sort_key_function
@property
def native(self):
return self.native_enum
def _db_value_for_elem(self, elem):
try:
return self._valid_lookup[elem]
except KeyError as err:
# for unknown string values, we return as is. While we can
# validate these if we wanted, that does not allow for lesser-used
# end-user use cases, such as using a LIKE comparison with an enum,
# or for an application that wishes to apply string tests to an
# ENUM (see [ticket:3725]). While we can decide to differentiate
# here between an INSERT statement and a criteria used in a SELECT,
# for now we're staying conservative w/ behavioral changes (perhaps
# someone has a trigger that handles strings on INSERT)
if not self.validate_strings and isinstance(
elem, compat.string_types
):
return elem
else:
util.raise_(
LookupError(
"'%s' is not among the defined enum values. "
"Enum name: %s. Possible values: %s"
% (
elem,
self.name,
langhelpers.repr_tuple_names(self.enums),
)
),
replace_context=err,
)
class Comparator(String.Comparator):
def _adapt_expression(self, op, other_comparator):
op, typ = super(Enum.Comparator, self)._adapt_expression(
op, other_comparator
)
if op is operators.concat_op:
typ = String(
self.type.length, _expect_unicode=self.type._expect_unicode
)
return op, typ
comparator_factory = Comparator
def _object_value_for_elem(self, elem):
try:
return self._object_lookup[elem]
except KeyError as err:
util.raise_(
LookupError(
"'%s' is not among the defined enum values. "
"Enum name: %s. Possible values: %s"
% (
elem,
self.name,
langhelpers.repr_tuple_names(self.enums),
)
),
replace_context=err,
)
def __repr__(self):
return util.generic_repr(
self,
additional_kw=[("native_enum", True)],
to_inspect=[Enum, SchemaType],
)
def as_generic(self, allow_nulltype=False):
if hasattr(self, "enums"):
args = self.enums
else:
raise NotImplementedError(
"TypeEngine.as_generic() heuristic "
"is undefined for types that inherit Enum but do not have "
"an `enums` attribute."
)
return util.constructor_copy(self, self._generic_type_affinity, *args)
def adapt_to_emulated(self, impltype, **kw):
kw.setdefault("_expect_unicode", self._expect_unicode)
kw.setdefault("validate_strings", self.validate_strings)
kw.setdefault("name", self.name)
kw.setdefault("schema", self.schema)
kw.setdefault("inherit_schema", self.inherit_schema)
kw.setdefault("metadata", self.metadata)
kw.setdefault("_create_events", False)
kw.setdefault("native_enum", self.native_enum)
kw.setdefault("values_callable", self.values_callable)
kw.setdefault("create_constraint", self.create_constraint)
kw.setdefault("length", self.length)
kw.setdefault("omit_aliases", self._omit_aliases)
assert "_enums" in kw
return impltype(**kw)
def adapt(self, impltype, **kw):
kw["_enums"] = self._enums_argument
return super(Enum, self).adapt(impltype, **kw)
def _should_create_constraint(self, compiler, **kw):
if not self._is_impl_for_variant(compiler.dialect, kw):
return False
return (
not self.native_enum or not compiler.dialect.supports_native_enum
)
@util.preload_module("sqlalchemy.sql.schema")
def _set_table(self, column, table):
schema = util.preloaded.sql_schema
SchemaType._set_table(self, column, table)
if not self.create_constraint:
return
variant_mapping = self._variant_mapping_for_set_table(column)
e = schema.CheckConstraint(
type_coerce(column, self).in_(self.enums),
name=_NONE_NAME if self.name is None else self.name,
_create_rule=util.portable_instancemethod(
self._should_create_constraint,
{"variant_mapping": variant_mapping},
),
_type_bound=True,
)
assert e.table is table
def literal_processor(self, dialect):
parent_processor = super(Enum, self).literal_processor(dialect)
def process(value):
value = self._db_value_for_elem(value)
if parent_processor:
value = parent_processor(value)
return value
return process
def bind_processor(self, dialect):
def process(value):
value = self._db_value_for_elem(value)
if parent_processor:
value = parent_processor(value)
return value
parent_processor = super(Enum, self).bind_processor(dialect)
return process
def result_processor(self, dialect, coltype):
parent_processor = super(Enum, self).result_processor(dialect, coltype)
def process(value):
if parent_processor:
value = parent_processor(value)
value = self._object_value_for_elem(value)
return value
return process
def copy(self, **kw):
return SchemaType.copy(self, **kw)
@property
def python_type(self):
if self.enum_class:
return self.enum_class
else:
return super(Enum, self).python_type
class PickleType(TypeDecorator):
"""Holds Python objects, which are serialized using pickle.
PickleType builds upon the Binary type to apply Python's
``pickle.dumps()`` to incoming objects, and ``pickle.loads()`` on
the way out, allowing any pickleable Python object to be stored as
a serialized binary field.
To allow ORM change events to propagate for elements associated
with :class:`.PickleType`, see :ref:`mutable_toplevel`.
"""
impl = LargeBinary
cache_ok = True
def __init__(
self,
protocol=pickle.HIGHEST_PROTOCOL,
pickler=None,
comparator=None,
impl=None,
):
"""
Construct a PickleType.
:param protocol: defaults to ``pickle.HIGHEST_PROTOCOL``.
:param pickler: defaults to cPickle.pickle or pickle.pickle if
cPickle is not available. May be any object with
pickle-compatible ``dumps`` and ``loads`` methods.
:param comparator: a 2-arg callable predicate used
to compare values of this type. If left as ``None``,
the Python "equals" operator is used to compare values.
:param impl: A binary-storing :class:`_types.TypeEngine` class or
instance to use in place of the default :class:`_types.LargeBinary`.
For example the :class: `_mysql.LONGBLOB` class may be more effective
when using MySQL.
.. versionadded:: 1.4.20
"""
self.protocol = protocol
self.pickler = pickler or pickle
self.comparator = comparator
super(PickleType, self).__init__()
if impl:
self.impl = to_instance(impl)
def __reduce__(self):
return PickleType, (self.protocol, None, self.comparator)
def bind_processor(self, dialect):
impl_processor = self.impl.bind_processor(dialect)
dumps = self.pickler.dumps
protocol = self.protocol
if impl_processor:
def process(value):
if value is not None:
value = dumps(value, protocol)
return impl_processor(value)
else:
def process(value):
if value is not None:
value = dumps(value, protocol)
return value
return process
def result_processor(self, dialect, coltype):
impl_processor = self.impl.result_processor(dialect, coltype)
loads = self.pickler.loads
if impl_processor:
def process(value):
value = impl_processor(value)
if value is None:
return None
return loads(value)
else:
def process(value):
if value is None:
return None
return loads(value)
return process
def compare_values(self, x, y):
if self.comparator:
return self.comparator(x, y)
else:
return x == y
class Boolean(Emulated, TypeEngine, SchemaType):
"""A bool datatype.
:class:`.Boolean` typically uses BOOLEAN or SMALLINT on the DDL side,
and on the Python side deals in ``True`` or ``False``.
The :class:`.Boolean` datatype currently has two levels of assertion
that the values persisted are simple true/false values. For all
backends, only the Python values ``None``, ``True``, ``False``, ``1``
or ``0`` are accepted as parameter values. For those backends that
don't support a "native boolean" datatype, an option exists to
also create a CHECK constraint on the target column
.. versionchanged:: 1.2 the :class:`.Boolean` datatype now asserts that
incoming Python values are already in pure boolean form.
"""
__visit_name__ = "boolean"
native = True
def __init__(
self, create_constraint=False, name=None, _create_events=True
):
"""Construct a Boolean.
:param create_constraint: defaults to False. If the boolean
is generated as an int/smallint, also create a CHECK constraint
on the table that ensures 1 or 0 as a value.
.. note:: it is strongly recommended that the CHECK constraint
have an explicit name in order to support schema-management
concerns. This can be established either by setting the
:paramref:`.Boolean.name` parameter or by setting up an
appropriate naming convention; see
:ref:`constraint_naming_conventions` for background.
.. versionchanged:: 1.4 - this flag now defaults to False, meaning
no CHECK constraint is generated for a non-native enumerated
type.
:param name: if a CHECK constraint is generated, specify
the name of the constraint.
"""
self.create_constraint = create_constraint
self.name = name
self._create_events = _create_events
def _should_create_constraint(self, compiler, **kw):
if not self._is_impl_for_variant(compiler.dialect, kw):
return False
return (
not compiler.dialect.supports_native_boolean
and compiler.dialect.non_native_boolean_check_constraint
)
@util.preload_module("sqlalchemy.sql.schema")
def _set_table(self, column, table):
schema = util.preloaded.sql_schema
if not self.create_constraint:
return
variant_mapping = self._variant_mapping_for_set_table(column)
e = schema.CheckConstraint(
type_coerce(column, self).in_([0, 1]),
name=_NONE_NAME if self.name is None else self.name,
_create_rule=util.portable_instancemethod(
self._should_create_constraint,
{"variant_mapping": variant_mapping},
),
_type_bound=True,
)
assert e.table is table
@property
def python_type(self):
return bool
_strict_bools = frozenset([None, True, False])
def _strict_as_bool(self, value):
if value not in self._strict_bools:
if not isinstance(value, int):
raise TypeError("Not a boolean value: %r" % value)
else:
raise ValueError(
"Value %r is not None, True, or False" % value
)
return value
def literal_processor(self, dialect):
compiler = dialect.statement_compiler(dialect, None)
true = compiler.visit_true(None)
false = compiler.visit_false(None)
def process(value):
return true if self._strict_as_bool(value) else false
return process
def bind_processor(self, dialect):
_strict_as_bool = self._strict_as_bool
if dialect.supports_native_boolean:
_coerce = bool
else:
_coerce = int
def process(value):
value = _strict_as_bool(value)
if value is not None:
value = _coerce(value)
return value
return process
def result_processor(self, dialect, coltype):
if dialect.supports_native_boolean:
return None
else:
return processors.int_to_boolean
class _AbstractInterval(_LookupExpressionAdapter, TypeEngine):
@util.memoized_property
def _expression_adaptations(self):
# Based on http://www.postgresql.org/docs/current/\
# static/functions-datetime.html.
return {
operators.add: {
Date: DateTime,
Interval: self.__class__,
DateTime: DateTime,
Time: Time,
},
operators.sub: {Interval: self.__class__},
operators.mul: {Numeric: self.__class__},
operators.truediv: {Numeric: self.__class__},
operators.div: {Numeric: self.__class__},
}
@property
def _type_affinity(self):
return Interval
def coerce_compared_value(self, op, value):
"""See :meth:`.TypeEngine.coerce_compared_value` for a description."""
return self.impl.coerce_compared_value(op, value)
class Interval(Emulated, _AbstractInterval, TypeDecorator):
"""A type for ``datetime.timedelta()`` objects.
The Interval type deals with ``datetime.timedelta`` objects. In
PostgreSQL, the native ``INTERVAL`` type is used; for others, the
value is stored as a date which is relative to the "epoch"
(Jan. 1, 1970).
Note that the ``Interval`` type does not currently provide date arithmetic
operations on platforms which do not support interval types natively. Such
operations usually require transformation of both sides of the expression
(such as, conversion of both sides into integer epoch values first) which
currently is a manual procedure (such as via
:attr:`~sqlalchemy.sql.expression.func`).
"""
impl = DateTime
epoch = dt.datetime.utcfromtimestamp(0)
cache_ok = True
def __init__(self, native=True, second_precision=None, day_precision=None):
"""Construct an Interval object.
:param native: when True, use the actual
INTERVAL type provided by the database, if
supported (currently PostgreSQL, Oracle).
Otherwise, represent the interval data as
an epoch value regardless.
:param second_precision: For native interval types
which support a "fractional seconds precision" parameter,
i.e. Oracle and PostgreSQL
:param day_precision: for native interval types which
support a "day precision" parameter, i.e. Oracle.
"""
super(Interval, self).__init__()
self.native = native
self.second_precision = second_precision
self.day_precision = day_precision
@property
def python_type(self):
return dt.timedelta
def adapt_to_emulated(self, impltype, **kw):
return _AbstractInterval.adapt(self, impltype, **kw)
def bind_processor(self, dialect):
impl_processor = self.impl.bind_processor(dialect)
epoch = self.epoch
if impl_processor:
def process(value):
if value is not None:
value = epoch + value
return impl_processor(value)
else:
def process(value):
if value is not None:
value = epoch + value
return value
return process
def result_processor(self, dialect, coltype):
impl_processor = self.impl.result_processor(dialect, coltype)
epoch = self.epoch
if impl_processor:
def process(value):
value = impl_processor(value)
if value is None:
return None
return value - epoch
else:
def process(value):
if value is None:
return None
return value - epoch
return process
class JSON(Indexable, TypeEngine):
"""Represent a SQL JSON type.
.. note:: :class:`_types.JSON`
is provided as a facade for vendor-specific
JSON types. Since it supports JSON SQL operations, it only
works on backends that have an actual JSON type, currently:
* PostgreSQL - see :class:`sqlalchemy.dialects.postgresql.JSON` and
:class:`sqlalchemy.dialects.postgresql.JSONB` for backend-specific
notes
* MySQL - see
:class:`sqlalchemy.dialects.mysql.JSON` for backend-specific notes
* SQLite as of version 3.9 - see
:class:`sqlalchemy.dialects.sqlite.JSON` for backend-specific notes
* Microsoft SQL Server 2016 and later - see
:class:`sqlalchemy.dialects.mssql.JSON` for backend-specific notes
:class:`_types.JSON` is part of the Core in support of the growing
popularity of native JSON datatypes.
The :class:`_types.JSON` type stores arbitrary JSON format data, e.g.::
data_table = Table('data_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', JSON)
)
with engine.connect() as conn:
conn.execute(
data_table.insert(),
data = {"key1": "value1", "key2": "value2"}
)
**JSON-Specific Expression Operators**
The :class:`_types.JSON`
datatype provides these additional SQL operations:
* Keyed index operations::
data_table.c.data['some key']
* Integer index operations::
data_table.c.data[3]
* Path index operations::
data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')]
* Data casters for specific JSON element types, subsequent to an index
or path operation being invoked::
data_table.c.data["some key"].as_integer()
.. versionadded:: 1.3.11
Additional operations may be available from the dialect-specific versions
of :class:`_types.JSON`, such as
:class:`sqlalchemy.dialects.postgresql.JSON` and
:class:`sqlalchemy.dialects.postgresql.JSONB` which both offer additional
PostgreSQL-specific operations.
**Casting JSON Elements to Other Types**
Index operations, i.e. those invoked by calling upon the expression using
the Python bracket operator as in ``some_column['some key']``, return an
expression object whose type defaults to :class:`_types.JSON` by default,
so that
further JSON-oriented instructions may be called upon the result type.
However, it is likely more common that an index operation is expected
to return a specific scalar element, such as a string or integer. In
order to provide access to these elements in a backend-agnostic way,
a series of data casters are provided:
* :meth:`.JSON.Comparator.as_string` - return the element as a string
* :meth:`.JSON.Comparator.as_boolean` - return the element as a boolean
* :meth:`.JSON.Comparator.as_float` - return the element as a float
* :meth:`.JSON.Comparator.as_integer` - return the element as an integer
These data casters are implemented by supporting dialects in order to
assure that comparisons to the above types will work as expected, such as::
# integer comparison
data_table.c.data["some_integer_key"].as_integer() == 5
# boolean comparison
data_table.c.data["some_boolean"].as_boolean() == True
.. versionadded:: 1.3.11 Added type-specific casters for the basic JSON
data element types.
.. note::
The data caster functions are new in version 1.3.11, and supersede
the previous documented approaches of using CAST; for reference,
this looked like::
from sqlalchemy import cast, type_coerce
from sqlalchemy import String, JSON
cast(
data_table.c.data['some_key'], String
) == type_coerce(55, JSON)
The above case now works directly as::
data_table.c.data['some_key'].as_integer() == 5
For details on the previous comparison approach within the 1.3.x
series, see the documentation for SQLAlchemy 1.2 or the included HTML
files in the doc/ directory of the version's distribution.
**Detecting Changes in JSON columns when using the ORM**
The :class:`_types.JSON` type, when used with the SQLAlchemy ORM, does not
detect in-place mutations to the structure. In order to detect these, the
:mod:`sqlalchemy.ext.mutable` extension must be used. This extension will
allow "in-place" changes to the datastructure to produce events which
will be detected by the unit of work. See the example at :class:`.HSTORE`
for a simple example involving a dictionary.
**Support for JSON null vs. SQL NULL**
When working with NULL values, the :class:`_types.JSON`
type recommends the
use of two specific constants in order to differentiate between a column
that evaluates to SQL NULL, e.g. no value, vs. the JSON-encoded string
of ``"null"``. To insert or select against a value that is SQL NULL,
use the constant :func:`.null`::
from sqlalchemy import null
conn.execute(table.insert(), json_value=null())
To insert or select against a value that is JSON ``"null"``, use the
constant :attr:`_types.JSON.NULL`::
conn.execute(table.insert(), json_value=JSON.NULL)
The :class:`_types.JSON` type supports a flag
:paramref:`_types.JSON.none_as_null` which when set to True will result
in the Python constant ``None`` evaluating to the value of SQL
NULL, and when set to False results in the Python constant
``None`` evaluating to the value of JSON ``"null"``. The Python
value ``None`` may be used in conjunction with either
:attr:`_types.JSON.NULL` and :func:`.null` in order to indicate NULL
values, but care must be taken as to the value of the
:paramref:`_types.JSON.none_as_null` in these cases.
**Customizing the JSON Serializer**
The JSON serializer and deserializer used by :class:`_types.JSON`
defaults to
Python's ``json.dumps`` and ``json.loads`` functions; in the case of the
psycopg2 dialect, psycopg2 may be using its own custom loader function.
In order to affect the serializer / deserializer, they are currently
configurable at the :func:`_sa.create_engine` level via the
:paramref:`_sa.create_engine.json_serializer` and
:paramref:`_sa.create_engine.json_deserializer` parameters. For example,
to turn off ``ensure_ascii``::
engine = create_engine(
"sqlite://",
json_serializer=lambda obj: json.dumps(obj, ensure_ascii=False))
.. versionchanged:: 1.3.7
SQLite dialect's ``json_serializer`` and ``json_deserializer``
parameters renamed from ``_json_serializer`` and
``_json_deserializer``.
.. seealso::
:class:`sqlalchemy.dialects.postgresql.JSON`
:class:`sqlalchemy.dialects.postgresql.JSONB`
:class:`sqlalchemy.dialects.mysql.JSON`
:class:`sqlalchemy.dialects.sqlite.JSON`
.. versionadded:: 1.1
"""
__visit_name__ = "JSON"
hashable = False
NULL = util.symbol("JSON_NULL")
"""Describe the json value of NULL.
This value is used to force the JSON value of ``"null"`` to be
used as the value. A value of Python ``None`` will be recognized
either as SQL NULL or JSON ``"null"``, based on the setting
of the :paramref:`_types.JSON.none_as_null` flag; the
:attr:`_types.JSON.NULL`
constant can be used to always resolve to JSON ``"null"`` regardless
of this setting. This is in contrast to the :func:`_expression.null`
construct,
which always resolves to SQL NULL. E.g.::
from sqlalchemy import null
from sqlalchemy.dialects.postgresql import JSON
# will *always* insert SQL NULL
obj1 = MyObject(json_value=null())
# will *always* insert JSON string "null"
obj2 = MyObject(json_value=JSON.NULL)
session.add_all([obj1, obj2])
session.commit()
In order to set JSON NULL as a default value for a column, the most
transparent method is to use :func:`_expression.text`::
Table(
'my_table', metadata,
Column('json_data', JSON, default=text("'null'"))
)
While it is possible to use :attr:`_types.JSON.NULL` in this context, the
:attr:`_types.JSON.NULL` value will be returned as the value of the
column,
which in the context of the ORM or other repurposing of the default
value, may not be desirable. Using a SQL expression means the value
will be re-fetched from the database within the context of retrieving
generated defaults.
"""
def __init__(self, none_as_null=False):
"""Construct a :class:`_types.JSON` type.
:param none_as_null=False: if True, persist the value ``None`` as a
SQL NULL value, not the JSON encoding of ``null``. Note that
when this flag is False, the :func:`.null` construct can still
be used to persist a NULL value::
from sqlalchemy import null
conn.execute(table.insert(), data=null())
.. note::
:paramref:`_types.JSON.none_as_null` does **not** apply to the
values passed to :paramref:`_schema.Column.default` and
:paramref:`_schema.Column.server_default`; a value of ``None``
passed for these parameters means "no default present".
.. seealso::
:attr:`.types.JSON.NULL`
"""
self.none_as_null = none_as_null
class JSONElementType(TypeEngine):
"""Common function for index / path elements in a JSON expression."""
_integer = Integer()
_string = String()
def string_bind_processor(self, dialect):
return self._string._cached_bind_processor(dialect)
def string_literal_processor(self, dialect):
return self._string._cached_literal_processor(dialect)
def bind_processor(self, dialect):
int_processor = self._integer._cached_bind_processor(dialect)
string_processor = self.string_bind_processor(dialect)
def process(value):
if int_processor and isinstance(value, int):
value = int_processor(value)
elif string_processor and isinstance(value, util.string_types):
value = string_processor(value)
return value
return process
def literal_processor(self, dialect):
int_processor = self._integer._cached_literal_processor(dialect)
string_processor = self.string_literal_processor(dialect)
def process(value):
if int_processor and isinstance(value, int):
value = int_processor(value)
elif string_processor and isinstance(value, util.string_types):
value = string_processor(value)
return value
return process
class JSONIndexType(JSONElementType):
"""Placeholder for the datatype of a JSON index value.
This allows execution-time processing of JSON index values
for special syntaxes.
"""
class JSONIntIndexType(JSONIndexType):
"""Placeholder for the datatype of a JSON index value.
This allows execution-time processing of JSON index values
for special syntaxes.
"""
class JSONStrIndexType(JSONIndexType):
"""Placeholder for the datatype of a JSON index value.
This allows execution-time processing of JSON index values
for special syntaxes.
"""
class JSONPathType(JSONElementType):
"""Placeholder type for JSON path operations.
This allows execution-time processing of a path-based
index value into a specific SQL syntax.
"""
class Comparator(Indexable.Comparator, Concatenable.Comparator):
"""Define comparison operations for :class:`_types.JSON`."""
def _setup_getitem(self, index):
if not isinstance(index, util.string_types) and isinstance(
index, compat.collections_abc.Sequence
):
index = coercions.expect(
roles.BinaryElementRole,
index,
expr=self.expr,
operator=operators.json_path_getitem_op,
bindparam_type=JSON.JSONPathType,
)
operator = operators.json_path_getitem_op
else:
index = coercions.expect(
roles.BinaryElementRole,
index,
expr=self.expr,
operator=operators.json_getitem_op,
bindparam_type=JSON.JSONIntIndexType
if isinstance(index, int)
else JSON.JSONStrIndexType,
)
operator = operators.json_getitem_op
return operator, index, self.type
def as_boolean(self):
"""Cast an indexed value as boolean.
e.g.::
stmt = select(
mytable.c.json_column['some_data'].as_boolean()
).where(
mytable.c.json_column['some_data'].as_boolean() == True
)
.. versionadded:: 1.3.11
"""
return self._binary_w_type(Boolean(), "as_boolean")
def as_string(self):
"""Cast an indexed value as string.
e.g.::
stmt = select(
mytable.c.json_column['some_data'].as_string()
).where(
mytable.c.json_column['some_data'].as_string() ==
'some string'
)
.. versionadded:: 1.3.11
"""
return self._binary_w_type(String(), "as_string")
def as_integer(self):
"""Cast an indexed value as integer.
e.g.::
stmt = select(
mytable.c.json_column['some_data'].as_integer()
).where(
mytable.c.json_column['some_data'].as_integer() == 5
)
.. versionadded:: 1.3.11
"""
return self._binary_w_type(Integer(), "as_integer")
def as_float(self):
"""Cast an indexed value as float.
e.g.::
stmt = select(
mytable.c.json_column['some_data'].as_float()
).where(
mytable.c.json_column['some_data'].as_float() == 29.75
)
.. versionadded:: 1.3.11
"""
return self._binary_w_type(Float(), "as_float")
def as_numeric(self, precision, scale, asdecimal=True):
"""Cast an indexed value as numeric/decimal.
e.g.::
stmt = select(
mytable.c.json_column['some_data'].as_numeric(10, 6)
).where(
mytable.c.
json_column['some_data'].as_numeric(10, 6) == 29.75
)
.. versionadded:: 1.4.0b2
"""
return self._binary_w_type(
Numeric(precision, scale, asdecimal=asdecimal), "as_numeric"
)
def as_json(self):
"""Cast an indexed value as JSON.
e.g.::
stmt = select(mytable.c.json_column['some_data'].as_json())
This is typically the default behavior of indexed elements in any
case.
Note that comparison of full JSON structures may not be
supported by all backends.
.. versionadded:: 1.3.11
"""
return self.expr
def _binary_w_type(self, typ, method_name):
if not isinstance(
self.expr, elements.BinaryExpression
) or self.expr.operator not in (
operators.json_getitem_op,
operators.json_path_getitem_op,
):
raise exc.InvalidRequestError(
"The JSON cast operator JSON.%s() only works with a JSON "
"index expression e.g. col['q'].%s()"
% (method_name, method_name)
)
expr = self.expr._clone()
expr.type = typ
return expr
comparator_factory = Comparator
@property
def python_type(self):
return dict
@property
def should_evaluate_none(self):
"""Alias of :attr:`_types.JSON.none_as_null`"""
return not self.none_as_null
@should_evaluate_none.setter
def should_evaluate_none(self, value):
self.none_as_null = not value
@util.memoized_property
def _str_impl(self):
return String(_expect_unicode=True)
def bind_processor(self, dialect):
string_process = self._str_impl.bind_processor(dialect)
json_serializer = dialect._json_serializer or json.dumps
def process(value):
if value is self.NULL:
value = None
elif isinstance(value, elements.Null) or (
value is None and self.none_as_null
):
return None
serialized = json_serializer(value)
if string_process:
serialized = string_process(serialized)
return serialized
return process
def result_processor(self, dialect, coltype):
string_process = self._str_impl.result_processor(dialect, coltype)
json_deserializer = dialect._json_deserializer or json.loads
def process(value):
if value is None:
return None
if string_process:
value = string_process(value)
return json_deserializer(value)
return process
class ARRAY(SchemaEventTarget, Indexable, Concatenable, TypeEngine):
"""Represent a SQL Array type.
.. note:: This type serves as the basis for all ARRAY operations.
However, currently **only the PostgreSQL backend has support for SQL
arrays in SQLAlchemy**. It is recommended to use the PostgreSQL-specific
:class:`sqlalchemy.dialects.postgresql.ARRAY` type directly when using
ARRAY types with PostgreSQL, as it provides additional operators
specific to that backend.
:class:`_types.ARRAY` is part of the Core in support of various SQL
standard functions such as :class:`_functions.array_agg`
which explicitly involve
arrays; however, with the exception of the PostgreSQL backend and possibly
some third-party dialects, no other SQLAlchemy built-in dialect has support
for this type.
An :class:`_types.ARRAY` type is constructed given the "type"
of element::
mytable = Table("mytable", metadata,
Column("data", ARRAY(Integer))
)
The above type represents an N-dimensional array,
meaning a supporting backend such as PostgreSQL will interpret values
with any number of dimensions automatically. To produce an INSERT
construct that passes in a 1-dimensional array of integers::
connection.execute(
mytable.insert(),
data=[1,2,3]
)
The :class:`_types.ARRAY` type can be constructed given a fixed number
of dimensions::
mytable = Table("mytable", metadata,
Column("data", ARRAY(Integer, dimensions=2))
)
Sending a number of dimensions is optional, but recommended if the
datatype is to represent arrays of more than one dimension. This number
is used:
* When emitting the type declaration itself to the database, e.g.
``INTEGER[][]``
* When translating Python values to database values, and vice versa, e.g.
an ARRAY of :class:`.Unicode` objects uses this number to efficiently
access the string values inside of array structures without resorting
to per-row type inspection
* When used with the Python ``getitem`` accessor, the number of dimensions
serves to define the kind of type that the ``[]`` operator should
return, e.g. for an ARRAY of INTEGER with two dimensions::
>>> expr = table.c.column[5] # returns ARRAY(Integer, dimensions=1)
>>> expr = expr[6] # returns Integer
For 1-dimensional arrays, an :class:`_types.ARRAY` instance with no
dimension parameter will generally assume single-dimensional behaviors.
SQL expressions of type :class:`_types.ARRAY` have support for "index" and
"slice" behavior. The Python ``[]`` operator works normally here, given
integer indexes or slices. Arrays default to 1-based indexing.
The operator produces binary expression
constructs which will produce the appropriate SQL, both for
SELECT statements::
select(mytable.c.data[5], mytable.c.data[2:7])
as well as UPDATE statements when the :meth:`_expression.Update.values`
method
is used::
mytable.update().values({
mytable.c.data[5]: 7,
mytable.c.data[2:7]: [1, 2, 3]
})
The :class:`_types.ARRAY` type also provides for the operators
:meth:`.types.ARRAY.Comparator.any` and
:meth:`.types.ARRAY.Comparator.all`. The PostgreSQL-specific version of
:class:`_types.ARRAY` also provides additional operators.
.. versionadded:: 1.1.0
.. seealso::
:class:`sqlalchemy.dialects.postgresql.ARRAY`
"""
__visit_name__ = "ARRAY"
_is_array = True
zero_indexes = False
"""If True, Python zero-based indexes should be interpreted as one-based
on the SQL expression side."""
class Comparator(Indexable.Comparator, Concatenable.Comparator):
"""Define comparison operations for :class:`_types.ARRAY`.
More operators are available on the dialect-specific form
of this type. See :class:`.postgresql.ARRAY.Comparator`.
"""
def _setup_getitem(self, index):
if isinstance(index, slice):
return_type = self.type
if self.type.zero_indexes:
index = slice(index.start + 1, index.stop + 1, index.step)
slice_ = Slice(
index.start, index.stop, index.step, _name=self.expr.key
)
return operators.getitem, slice_, return_type
else:
if self.type.zero_indexes:
index += 1
if self.type.dimensions is None or self.type.dimensions == 1:
return_type = self.type.item_type
else:
adapt_kw = {"dimensions": self.type.dimensions - 1}
return_type = self.type.adapt(
self.type.__class__, **adapt_kw
)
return operators.getitem, index, return_type
def contains(self, *arg, **kw):
raise NotImplementedError(
"ARRAY.contains() not implemented for the base "
"ARRAY type; please use the dialect-specific ARRAY type"
)
@util.preload_module("sqlalchemy.sql.elements")
def any(self, other, operator=None):
"""Return ``other operator ANY (array)`` clause.
Argument places are switched, because ANY requires array
expression to be on the right hand-side.
E.g.::
from sqlalchemy.sql import operators
conn.execute(
select(table.c.data).where(
table.c.data.any(7, operator=operators.lt)
)
)
:param other: expression to be compared
:param operator: an operator object from the
:mod:`sqlalchemy.sql.operators`
package, defaults to :func:`.operators.eq`.
.. seealso::
:func:`_expression.any_`
:meth:`.types.ARRAY.Comparator.all`
"""
elements = util.preloaded.sql_elements
operator = operator if operator else operators.eq
# send plain BinaryExpression so that negate remains at None,
# leading to NOT expr for negation.
return elements.BinaryExpression(
coercions.expect(roles.ExpressionElementRole, other),
elements.CollectionAggregate._create_any(self.expr),
operator,
)
@util.preload_module("sqlalchemy.sql.elements")
def all(self, other, operator=None):
"""Return ``other operator ALL (array)`` clause.
Argument places are switched, because ALL requires array
expression to be on the right hand-side.
E.g.::
from sqlalchemy.sql import operators
conn.execute(
select(table.c.data).where(
table.c.data.all(7, operator=operators.lt)
)
)
:param other: expression to be compared
:param operator: an operator object from the
:mod:`sqlalchemy.sql.operators`
package, defaults to :func:`.operators.eq`.
.. seealso::
:func:`_expression.all_`
:meth:`.types.ARRAY.Comparator.any`
"""
elements = util.preloaded.sql_elements
operator = operator if operator else operators.eq
# send plain BinaryExpression so that negate remains at None,
# leading to NOT expr for negation.
return elements.BinaryExpression(
coercions.expect(roles.ExpressionElementRole, other),
elements.CollectionAggregate._create_all(self.expr),
operator,
)
comparator_factory = Comparator
def __init__(
self, item_type, as_tuple=False, dimensions=None, zero_indexes=False
):
"""Construct an :class:`_types.ARRAY`.
E.g.::
Column('myarray', ARRAY(Integer))
Arguments are:
:param item_type: The data type of items of this array. Note that
dimensionality is irrelevant here, so multi-dimensional arrays like
``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as
``ARRAY(ARRAY(Integer))`` or such.
:param as_tuple=False: Specify whether return results
should be converted to tuples from lists. This parameter is
not generally needed as a Python list corresponds well
to a SQL array.
:param dimensions: if non-None, the ARRAY will assume a fixed
number of dimensions. This impacts how the array is declared
on the database, how it goes about interpreting Python and
result values, as well as how expression behavior in conjunction
with the "getitem" operator works. See the description at
:class:`_types.ARRAY` for additional detail.
:param zero_indexes=False: when True, index values will be converted
between Python zero-based and SQL one-based indexes, e.g.
a value of one will be added to all index values before passing
to the database.
"""
if isinstance(item_type, ARRAY):
raise ValueError(
"Do not nest ARRAY types; ARRAY(basetype) "
"handles multi-dimensional arrays of basetype"
)
if isinstance(item_type, type):
item_type = item_type()
self.item_type = item_type
self.as_tuple = as_tuple
self.dimensions = dimensions
self.zero_indexes = zero_indexes
@property
def hashable(self):
return self.as_tuple
@property
def python_type(self):
return list
def compare_values(self, x, y):
return x == y
def _set_parent(self, column, outer=False, **kw):
"""Support SchemaEventTarget"""
if not outer and isinstance(self.item_type, SchemaEventTarget):
self.item_type._set_parent(column, **kw)
def _set_parent_with_dispatch(self, parent):
"""Support SchemaEventTarget"""
super(ARRAY, self)._set_parent_with_dispatch(parent, outer=True)
if isinstance(self.item_type, SchemaEventTarget):
self.item_type._set_parent_with_dispatch(parent)
class TupleType(TypeEngine):
"""represent the composite type of a Tuple."""
_is_tuple_type = True
def __init__(self, *types):
self._fully_typed = NULLTYPE not in types
self.types = types
def _resolve_values_to_types(self, value):
if self._fully_typed:
return self
else:
return TupleType(
*[
_resolve_value_to_type(elem) if typ is NULLTYPE else typ
for typ, elem in zip(self.types, value)
]
)
def result_processor(self, dialect, coltype):
raise NotImplementedError(
"The tuple type does not support being fetched "
"as a column in a result row."
)
class REAL(Float):
"""The SQL REAL type."""
__visit_name__ = "REAL"
class FLOAT(Float):
"""The SQL FLOAT type."""
__visit_name__ = "FLOAT"
class NUMERIC(Numeric):
"""The SQL NUMERIC type."""
__visit_name__ = "NUMERIC"
class DECIMAL(Numeric):
"""The SQL DECIMAL type."""
__visit_name__ = "DECIMAL"
class INTEGER(Integer):
"""The SQL INT or INTEGER type."""
__visit_name__ = "INTEGER"
INT = INTEGER
class SMALLINT(SmallInteger):
"""The SQL SMALLINT type."""
__visit_name__ = "SMALLINT"
class BIGINT(BigInteger):
"""The SQL BIGINT type."""
__visit_name__ = "BIGINT"
class TIMESTAMP(DateTime):
"""The SQL TIMESTAMP type.
:class:`_types.TIMESTAMP` datatypes have support for timezone
storage on some backends, such as PostgreSQL and Oracle. Use the
:paramref:`~types.TIMESTAMP.timezone` argument in order to enable
"TIMESTAMP WITH TIMEZONE" for these backends.
"""
__visit_name__ = "TIMESTAMP"
def __init__(self, timezone=False):
"""Construct a new :class:`_types.TIMESTAMP`.
:param timezone: boolean. Indicates that the TIMESTAMP type should
enable timezone support, if available on the target database.
On a per-dialect basis is similar to "TIMESTAMP WITH TIMEZONE".
If the target database does not support timezones, this flag is
ignored.
"""
super(TIMESTAMP, self).__init__(timezone=timezone)
def get_dbapi_type(self, dbapi):
return dbapi.TIMESTAMP
class DATETIME(DateTime):
"""The SQL DATETIME type."""
__visit_name__ = "DATETIME"
class DATE(Date):
"""The SQL DATE type."""
__visit_name__ = "DATE"
class TIME(Time):
"""The SQL TIME type."""
__visit_name__ = "TIME"
class TEXT(Text):
"""The SQL TEXT type."""
__visit_name__ = "TEXT"
class CLOB(Text):
"""The CLOB type.
This type is found in Oracle and Informix.
"""
__visit_name__ = "CLOB"
class VARCHAR(String):
"""The SQL VARCHAR type."""
__visit_name__ = "VARCHAR"
class NVARCHAR(Unicode):
"""The SQL NVARCHAR type."""
__visit_name__ = "NVARCHAR"
class CHAR(String):
"""The SQL CHAR type."""
__visit_name__ = "CHAR"
class NCHAR(Unicode):
"""The SQL NCHAR type."""
__visit_name__ = "NCHAR"
class BLOB(LargeBinary):
"""The SQL BLOB type."""
__visit_name__ = "BLOB"
class BINARY(_Binary):
"""The SQL BINARY type."""
__visit_name__ = "BINARY"
class VARBINARY(_Binary):
"""The SQL VARBINARY type."""
__visit_name__ = "VARBINARY"
class BOOLEAN(Boolean):
"""The SQL BOOLEAN type."""
__visit_name__ = "BOOLEAN"
class NullType(TypeEngine):
"""An unknown type.
:class:`.NullType` is used as a default type for those cases where
a type cannot be determined, including:
* During table reflection, when the type of a column is not recognized
by the :class:`.Dialect`
* When constructing SQL expressions using plain Python objects of
unknown types (e.g. ``somecolumn == my_special_object``)
* When a new :class:`_schema.Column` is created,
and the given type is passed
as ``None`` or is not passed at all.
The :class:`.NullType` can be used within SQL expression invocation
without issue, it just has no behavior either at the expression
construction level or at the bind-parameter/result processing level.
:class:`.NullType` will result in a :exc:`.CompileError` if the compiler
is asked to render the type itself, such as if it is used in a
:func:`.cast` operation or within a schema creation operation such as that
invoked by :meth:`_schema.MetaData.create_all` or the
:class:`.CreateTable`
construct.
"""
__visit_name__ = "null"
_isnull = True
hashable = False
def literal_processor(self, dialect):
def process(value):
raise exc.CompileError(
"Don't know how to render literal SQL value: %r" % value
)
return process
class Comparator(TypeEngine.Comparator):
def _adapt_expression(self, op, other_comparator):
if isinstance(
other_comparator, NullType.Comparator
) or not operators.is_commutative(op):
return op, self.expr.type
else:
return other_comparator._adapt_expression(op, self)
comparator_factory = Comparator
class TableValueType(HasCacheKey, TypeEngine):
"""Refers to a table value type."""
_is_table_value = True
_traverse_internals = [
("_elements", InternalTraversal.dp_clauseelement_list),
]
def __init__(self, *elements):
self._elements = [
coercions.expect(roles.StrAsPlainColumnRole, elem)
for elem in elements
]
class MatchType(Boolean):
"""Refers to the return type of the MATCH operator.
As the :meth:`.ColumnOperators.match` is probably the most open-ended
operator in generic SQLAlchemy Core, we can't assume the return type
at SQL evaluation time, as MySQL returns a floating point, not a boolean,
and other backends might do something different. So this type
acts as a placeholder, currently subclassing :class:`.Boolean`.
The type allows dialects to inject result-processing functionality
if needed, and on MySQL will return floating-point values.
.. versionadded:: 1.0.0
"""
NULLTYPE = NullType()
BOOLEANTYPE = Boolean()
STRINGTYPE = String()
INTEGERTYPE = Integer()
MATCHTYPE = MatchType()
TABLEVALUE = TableValueType()
_type_map = {
int: Integer(),
float: Float(),
bool: BOOLEANTYPE,
decimal.Decimal: Numeric(),
dt.date: Date(),
dt.datetime: DateTime(),
dt.time: Time(),
dt.timedelta: Interval(),
util.NoneType: NULLTYPE,
}
if util.py3k:
_type_map[bytes] = LargeBinary() # noqa
_type_map[str] = Unicode()
else:
_type_map[unicode] = Unicode() # noqa
_type_map[str] = String()
_type_map_get = _type_map.get
def _resolve_value_to_type(value):
_result_type = _type_map_get(type(value), False)
if _result_type is False:
# use inspect() to detect SQLAlchemy built-in
# objects.
insp = inspection.inspect(value, False)
if (
insp is not None
and
# foil mock.Mock() and other impostors by ensuring
# the inspection target itself self-inspects
insp.__class__ in inspection._registrars
):
raise exc.ArgumentError(
"Object %r is not legal as a SQL literal value" % value
)
return NULLTYPE
else:
return _result_type
# back-assign to type_api
type_api.BOOLEANTYPE = BOOLEANTYPE
type_api.STRINGTYPE = STRINGTYPE
type_api.INTEGERTYPE = INTEGERTYPE
type_api.NULLTYPE = NULLTYPE
type_api.MATCHTYPE = MATCHTYPE
type_api.INDEXABLE = Indexable
type_api.TABLEVALUE = TABLEVALUE
type_api._resolve_value_to_type = _resolve_value_to_type
TypeEngine.Comparator.BOOLEANTYPE = BOOLEANTYPE
|
ericem/sprintkit
|
refs/heads/master
|
setup.py
|
1
|
from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
README = open(path.join(here, 'README.rst')).read()
CHANGES = open(path.join(here, 'CHANGES')).read()
version = '0.2.0'
setup(
name='sprintkit',
version=version,
description="Access Sprint's Network APIs Through Python",
long_description=README + '\n\n' + CHANGES,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Communications',
'Topic :: Software Development :: Libraries :: Python Modules'
],
keywords='communications',
author='Eric Miller',
author_email='eric.miller@sprint.com',
url='https://github.com/ericem/sprintkit',
license='MIT',
packages=find_packages('src'),
package_dir = {'': 'src'},
zip_safe=False,
setup_requires=['nose'],
install_requires=['restkit>=3.2']
)
|
proffalken/edison
|
refs/heads/master
|
piston/admin.py
|
1
|
# This file is part of the Edison Project.
# Please refer to the LICENSE document that was supplied with this software for information on how it can be used.
from django.contrib import admin
from piston.models import Nonce, Consumer, Token
admin.site.register(Nonce)
admin.site.register(Consumer)
admin.site.register(Token)
|
nzavagli/UnrealPy
|
refs/heads/master
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Lib/tempfile.py
|
7
|
"""Temporary files.
This module provides generic, low- and high-level interfaces for
creating temporary files and directories. All of the interfaces
provided by this module can be used without fear of race conditions
except for 'mktemp'. 'mktemp' is subject to race conditions and
should not be used; it is provided for backward compatibility only.
This module also provides some data items to the user:
TMP_MAX - maximum number of names that will be tried before
giving up.
template - the default prefix for all temporary names.
You may change this to control the default prefix.
tempdir - If this is set to a string before the first use of
any routine from this module, it will be considered as
another candidate location to store temporary files.
"""
__all__ = [
"NamedTemporaryFile", "TemporaryFile", # high level safe interfaces
"SpooledTemporaryFile",
"mkstemp", "mkdtemp", # low level safe interfaces
"mktemp", # deprecated unsafe interface
"TMP_MAX", "gettempprefix", # constants
"tempdir", "gettempdir"
]
# Imports.
import io as _io
import os as _os
import errno as _errno
from random import Random as _Random
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
try:
import fcntl as _fcntl
except ImportError:
def _set_cloexec(fd):
pass
else:
def _set_cloexec(fd):
try:
flags = _fcntl.fcntl(fd, _fcntl.F_GETFD, 0)
except IOError:
pass
else:
# flags read successfully, modify
flags |= _fcntl.FD_CLOEXEC
_fcntl.fcntl(fd, _fcntl.F_SETFD, flags)
try:
import thread as _thread
except ImportError:
import dummy_thread as _thread
_allocate_lock = _thread.allocate_lock
_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL
if hasattr(_os, 'O_NOINHERIT'):
_text_openflags |= _os.O_NOINHERIT
if hasattr(_os, 'O_NOFOLLOW'):
_text_openflags |= _os.O_NOFOLLOW
_bin_openflags = _text_openflags
if hasattr(_os, 'O_BINARY'):
_bin_openflags |= _os.O_BINARY
if hasattr(_os, 'TMP_MAX'):
TMP_MAX = _os.TMP_MAX
else:
TMP_MAX = 10000
template = "tmp"
# Internal routines.
_once_lock = _allocate_lock()
if hasattr(_os, "lstat"):
_stat = _os.lstat
elif hasattr(_os, "stat"):
_stat = _os.stat
else:
# Fallback. All we need is something that raises os.error if the
# file doesn't exist.
def _stat(fn):
try:
f = open(fn)
except IOError:
raise _os.error
f.close()
def _exists(fn):
try:
_stat(fn)
except _os.error:
return False
else:
return True
class _RandomNameSequence:
"""An instance of _RandomNameSequence generates an endless
sequence of unpredictable strings which can safely be incorporated
into file names. Each string is six characters long. Multiple
threads can safely use the same instance at the same time.
_RandomNameSequence is an iterator."""
characters = ("abcdefghijklmnopqrstuvwxyz" +
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
"0123456789_")
def __init__(self):
self.mutex = _allocate_lock()
self.normcase = _os.path.normcase
@property
def rng(self):
cur_pid = _os.getpid()
if cur_pid != getattr(self, '_rng_pid', None):
self._rng = _Random()
self._rng_pid = cur_pid
return self._rng
def __iter__(self):
return self
def next(self):
m = self.mutex
c = self.characters
choose = self.rng.choice
m.acquire()
try:
letters = [choose(c) for dummy in "123456"]
finally:
m.release()
return self.normcase(''.join(letters))
def _candidate_tempdir_list():
"""Generate a list of candidate temporary directories which
_get_default_tempdir will try."""
dirlist = []
# First, try the environment.
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = _os.getenv(envname)
if dirname: dirlist.append(dirname)
# Failing that, try OS-specific locations.
if _os.name == 'riscos':
dirname = _os.getenv('Wimp$ScrapDir')
if dirname: dirlist.append(dirname)
elif _os.name == 'nt':
dirlist.extend([ r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ])
else:
dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ])
# As a last resort, the current directory.
try:
dirlist.append(_os.getcwd())
except (AttributeError, _os.error):
dirlist.append(_os.curdir)
return dirlist
def _get_default_tempdir():
"""Calculate the default directory to use for temporary files.
This routine should be called exactly once.
We determine whether or not a candidate temp dir is usable by
trying to create and write to a file in that directory. If this
is successful, the test file is deleted. To prevent denial of
service, the name of the test file must be randomized."""
namer = _RandomNameSequence()
dirlist = _candidate_tempdir_list()
flags = _text_openflags
for dir in dirlist:
if dir != _os.curdir:
dir = _os.path.normcase(_os.path.abspath(dir))
# Try only a few names per directory.
for seq in xrange(100):
name = namer.next()
filename = _os.path.join(dir, name)
try:
fd = _os.open(filename, flags, 0o600)
try:
try:
with _io.open(fd, 'wb', closefd=False) as fp:
fp.write(b'blat')
finally:
_os.close(fd)
finally:
_os.unlink(filename)
return dir
except (OSError, IOError) as e:
if e.args[0] != _errno.EEXIST:
break # no point trying more names in this directory
pass
raise IOError, (_errno.ENOENT,
("No usable temporary directory found in %s" % dirlist))
_name_sequence = None
def _get_candidate_names():
"""Common setup sequence for all user-callable interfaces."""
global _name_sequence
if _name_sequence is None:
_once_lock.acquire()
try:
if _name_sequence is None:
_name_sequence = _RandomNameSequence()
finally:
_once_lock.release()
return _name_sequence
def _mkstemp_inner(dir, pre, suf, flags):
"""Code common to mkstemp, TemporaryFile, and NamedTemporaryFile."""
names = _get_candidate_names()
for seq in xrange(TMP_MAX):
name = names.next()
file = _os.path.join(dir, pre + name + suf)
try:
fd = _os.open(file, flags, 0600)
_set_cloexec(fd)
return (fd, _os.path.abspath(file))
except OSError, e:
if e.errno == _errno.EEXIST:
continue # try again
if _os.name == 'nt' and e.errno == _errno.EACCES:
# On windows, when a directory with the chosen name already
# exists, EACCES error code is returned instead of EEXIST.
continue
raise
raise IOError, (_errno.EEXIST, "No usable temporary file name found")
# User visible interfaces.
def gettempprefix():
"""Accessor for tempdir.template."""
return template
tempdir = None
def gettempdir():
"""Accessor for tempfile.tempdir."""
global tempdir
if tempdir is None:
_once_lock.acquire()
try:
if tempdir is None:
tempdir = _get_default_tempdir()
finally:
_once_lock.release()
return tempdir
def mkstemp(suffix="", prefix=template, dir=None, text=False):
"""User-callable function to create and return a unique temporary
file. The return value is a pair (fd, name) where fd is the
file descriptor returned by os.open, and name is the filename.
If 'suffix' is specified, the file name will end with that suffix,
otherwise there will be no suffix.
If 'prefix' is specified, the file name will begin with that prefix,
otherwise a default prefix is used.
If 'dir' is specified, the file will be created in that directory,
otherwise a default directory is used.
If 'text' is specified and true, the file is opened in text
mode. Else (the default) the file is opened in binary mode. On
some operating systems, this makes no difference.
The file is readable and writable only by the creating user ID.
If the operating system uses permission bits to indicate whether a
file is executable, the file is executable by no one. The file
descriptor is not inherited by children of this process.
Caller is responsible for deleting the file when done with it.
"""
if dir is None:
dir = gettempdir()
if text:
flags = _text_openflags
else:
flags = _bin_openflags
return _mkstemp_inner(dir, prefix, suffix, flags)
def mkdtemp(suffix="", prefix=template, dir=None):
"""User-callable function to create and return a unique temporary
directory. The return value is the pathname of the directory.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
The directory is readable, writable, and searchable only by the
creating user.
Caller is responsible for deleting the directory when done with it.
"""
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in xrange(TMP_MAX):
name = names.next()
file = _os.path.join(dir, prefix + name + suffix)
try:
_os.mkdir(file, 0700)
return file
except OSError, e:
if e.errno == _errno.EEXIST:
continue # try again
raise
raise IOError, (_errno.EEXIST, "No usable temporary directory name found")
def mktemp(suffix="", prefix=template, dir=None):
"""User-callable function to return a unique temporary file name. The
file is not created.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
This function is unsafe and should not be used. The file name
refers to a file that did not exist at some point, but by the time
you get around to creating it, someone else may have beaten you to
the punch.
"""
## from warnings import warn as _warn
## _warn("mktemp is a potential security risk to your program",
## RuntimeWarning, stacklevel=2)
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in xrange(TMP_MAX):
name = names.next()
file = _os.path.join(dir, prefix + name + suffix)
if not _exists(file):
return file
raise IOError, (_errno.EEXIST, "No usable temporary filename found")
class _TemporaryFileWrapper:
"""Temporary file wrapper
This class provides a wrapper around files opened for
temporary use. In particular, it seeks to automatically
remove the file when it is no longer needed.
"""
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.close_called = False
self.delete = delete
def __getattr__(self, name):
# Attribute lookups are delegated to the underlying file
# and cached for non-numeric results
# (i.e. methods are cached, closed and friends are not)
file = self.__dict__['file']
a = getattr(file, name)
if not issubclass(type(a), type(0)):
setattr(self, name, a)
return a
# The underlying __enter__ method returns the wrong object
# (self.file) so override it to return the wrapper
def __enter__(self):
self.file.__enter__()
return self
# NT provides delete-on-close as a primitive, so we don't need
# the wrapper to do anything special. We still use it so that
# file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile.
if _os.name != 'nt':
# Cache the unlinker so we don't get spurious errors at
# shutdown when the module-level "os" is None'd out. Note
# that this must be referenced as self.unlink, because the
# name TemporaryFileWrapper may also get None'd out before
# __del__ is called.
unlink = _os.unlink
def close(self):
if not self.close_called:
self.close_called = True
try:
self.file.close()
finally:
if self.delete:
self.unlink(self.name)
def __del__(self):
self.close()
# Need to trap __exit__ as well to ensure the file gets
# deleted when used in a with statement
def __exit__(self, exc, value, tb):
result = self.file.__exit__(exc, value, tb)
self.close()
return result
else:
def __exit__(self, exc, value, tb):
self.file.__exit__(exc, value, tb)
def NamedTemporaryFile(mode='w+b', bufsize=-1, suffix="",
prefix=template, dir=None, delete=True):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to os.fdopen (default "w+b").
'bufsize' -- the buffer size argument to os.fdopen (default -1).
'delete' -- whether the file is deleted on close (default True).
The file is created as mkstemp() would do it.
Returns an object with a file-like interface; the name of the file
is accessible as file.name. The file will be automatically deleted
when it is closed unless the 'delete' argument is set to False.
"""
if dir is None:
dir = gettempdir()
if 'b' in mode:
flags = _bin_openflags
else:
flags = _text_openflags
# Setting O_TEMPORARY in the flags causes the OS to delete
# the file when it is closed. This is only supported by Windows.
if _os.name == 'nt' and delete:
flags |= _os.O_TEMPORARY
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
try:
file = _os.fdopen(fd, mode, bufsize)
return _TemporaryFileWrapper(file, name, delete)
except:
_os.close(fd)
raise
if _os.name != 'posix' or _os.sys.platform == 'cygwin':
# On non-POSIX and Cygwin systems, assume that we cannot unlink a file
# while it is open.
TemporaryFile = NamedTemporaryFile
else:
def TemporaryFile(mode='w+b', bufsize=-1, suffix="",
prefix=template, dir=None):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to os.fdopen (default "w+b").
'bufsize' -- the buffer size argument to os.fdopen (default -1).
The file is created as mkstemp() would do it.
Returns an object with a file-like interface. The file has no
name, and will cease to exist when it is closed.
"""
if dir is None:
dir = gettempdir()
if 'b' in mode:
flags = _bin_openflags
else:
flags = _text_openflags
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
try:
_os.unlink(name)
return _os.fdopen(fd, mode, bufsize)
except:
_os.close(fd)
raise
class SpooledTemporaryFile:
"""Temporary file wrapper, specialized to switch from
StringIO to a real file when it exceeds a certain size or
when a fileno is needed.
"""
_rolled = False
def __init__(self, max_size=0, mode='w+b', bufsize=-1,
suffix="", prefix=template, dir=None):
self._file = _StringIO()
self._max_size = max_size
self._rolled = False
self._TemporaryFileArgs = (mode, bufsize, suffix, prefix, dir)
def _check(self, file):
if self._rolled: return
max_size = self._max_size
if max_size and file.tell() > max_size:
self.rollover()
def rollover(self):
if self._rolled: return
file = self._file
newfile = self._file = TemporaryFile(*self._TemporaryFileArgs)
del self._TemporaryFileArgs
newfile.write(file.getvalue())
newfile.seek(file.tell(), 0)
self._rolled = True
# The method caching trick from NamedTemporaryFile
# won't work here, because _file may change from a
# _StringIO instance to a real file. So we list
# all the methods directly.
# Context management protocol
def __enter__(self):
if self._file.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc, value, tb):
self._file.close()
# file protocol
def __iter__(self):
return self._file.__iter__()
def close(self):
self._file.close()
@property
def closed(self):
return self._file.closed
def fileno(self):
self.rollover()
return self._file.fileno()
def flush(self):
self._file.flush()
def isatty(self):
return self._file.isatty()
@property
def mode(self):
try:
return self._file.mode
except AttributeError:
return self._TemporaryFileArgs[0]
@property
def name(self):
try:
return self._file.name
except AttributeError:
return None
def next(self):
return self._file.next
def read(self, *args):
return self._file.read(*args)
def readline(self, *args):
return self._file.readline(*args)
def readlines(self, *args):
return self._file.readlines(*args)
def seek(self, *args):
self._file.seek(*args)
@property
def softspace(self):
return self._file.softspace
def tell(self):
return self._file.tell()
def truncate(self):
self._file.truncate()
def write(self, s):
file = self._file
rv = file.write(s)
self._check(file)
return rv
def writelines(self, iterable):
file = self._file
rv = file.writelines(iterable)
self._check(file)
return rv
def xreadlines(self, *args):
if hasattr(self._file, 'xreadlines'): # real file
return iter(self._file)
else: # StringIO()
return iter(self._file.readlines(*args))
|
eteq/ginga
|
refs/heads/staging
|
ginga/LayerImage.py
|
3
|
#
# LayerImage.py -- Abstraction of an generic layered image.
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import numpy
import time
from ginga import BaseImage
from ginga.misc import Bunch
class LayerImage(object):
"""Mixin class for BaseImage subclasses. Adds layers and alpha/rgb
compositing.
"""
def __init__(self):
self._layer = []
self.cnt = 0
self.compose_types = ('alpha', 'rgb')
self.compose = 'alpha'
def _insert_layer(self, idx, image, alpha=None, name=None):
if alpha is None:
alpha = 1.0
if name is None:
name = "layer%d" % (self.cnt)
self.cnt += 1
bnch = Bunch.Bunch(image=image, alpha=alpha, name=name)
self._layer.insert(idx, bnch)
def insert_layer(self, idx, image, alpha=None, name=None,
compose=True):
self._insert_layer(idx, image, alpha=alpha, name=name)
if compose:
self.compose_layers()
def set_layer(self, idx, image, alpha=None, name=None,
compose=True):
self.delete_layer(idx, compose=False)
self._insert_layer(idx, image, alpha=alpha, name=name)
if compose:
self.compose_layers()
def delete_layer(self, idx, compose=True):
self._layer.pop(idx)
if compose:
self.compose_layers()
def get_layer(self, idx):
return self._layer[idx]
def num_layers(self):
return len(self._layer)
def get_max_shape(self, entity='image'):
maxdim = -1
maxshape = ()
for layer in self._layer:
if entity == 'image':
shape = layer[entity].get_shape()
elif entity == 'alpha':
item = layer.alpha
# If alpha is an image, get the array
if isinstance(item, BaseImage.BaseImage):
item = layer.alpha.get_data()
shape = numpy.shape(item)
else:
raise BaseImage.ImageError("entity '%s' not in (image, alpha)" % (
entity))
if len(shape) > maxdim:
maxdim = len(shape)
maxshape = shape
return maxshape
## def alpha_combine(self, src, alpha, dst):
## return (src * alpha) + (dst * (1.0 - alpha))
def mono2color(self, data):
return numpy.dstack((data, data, data))
def alpha_multiply(self, alpha, data, shape=None):
"""(alpha) can be a scalar or an array.
"""
# alpha can be a scalar or an array
if shape is None:
shape = data.shape
if len(data.shape) == 2:
res = alpha * data
# If desired shape is monochrome then return a mono image
# otherwise broadcast to a grey color image.
if len(shape) == 2:
return res
# note: in timing tests, dstack was not as efficient here...
#data = numpy.dstack((res, res, res))
data = numpy.empty(shape)
data[:, :, 0] = res[:, :]
data[:, :, 1] = res[:, :]
data[:, :, 2] = res[:, :]
return data
else:
# note: in timing tests, dstack was not as efficient here...
#res = numpy.dstack((data[:, :, 0] * alpha,
# data[:, :, 1] * alpha,
# data[:, :, 2] * alpha))
res = numpy.empty(shape)
res[:, :, 0] = data[:, :, 0] * alpha
res[:, :, 1] = data[:, :, 1] * alpha
res[:, :, 2] = data[:, :, 2] * alpha
return res
def alpha_compose(self):
start_time = time.time()
shape = self.get_max_shape()
## ht, wd = shape[:2]
## # alpha can be a scalar or an array, prepare for the appropriate kind
## ashape = self.get_max_shape(entity='alpha')
## if len(ashape) == 0:
## alpha_used = 0.0
## else:
## alpha_used = numpy.zeros((ht, wd))
# result holds the result of the composition
result = numpy.zeros(shape)
cnt = 0
for layer in self._layer:
alpha = layer.alpha
if isinstance(alpha, BaseImage.BaseImage):
alpha = alpha.get_data()
#alpha = numpy.clip((1.0 - alpha_used) * alpha, 0.0, 1.0)
#mina = numpy.min(alpha)
#print "cnt=%d mina=%f" % (cnt, mina)
data = layer.image.get_data()
result += self.alpha_multiply(alpha, data, shape=shape)
## alpha_used += layer.alpha
#numpy.clip(alpha_used, 0.0, 1.0)
cnt += 1
self.set_data(result)
end_time = time.time()
self.logger.debug("alpha compose=%.4f sec" % (end_time - start_time))
# def rgb_compose(self):
# slices = []
# start_time = time.time()
# for i in xrange(len(self._layer)):
# layer = self.get_layer(i)
# data = self.alpha_multiply(layer.alpha, layer.image.get_data())
# slices.append(data)
# split_time = time.time()
# result = numpy.dstack(slices)
# end_time = time.time()
# self.set_data(result)
# print "rgb_compose alpha multiply=%.4f sec dstack=%.4f sec sec total=%.4f sec" % (
# split_time - start_time, end_time - split_time,
# end_time - start_time)
def rgb_compose(self):
#num = self.num_layers()
num = 3
layer = self.get_layer(0)
wd, ht = layer.image.get_size()
result = numpy.empty((ht, wd, num))
start_time = time.time()
for i in range(len(self._layer)):
layer = self.get_layer(i)
alpha = layer.alpha
if isinstance(alpha, BaseImage.BaseImage):
alpha = alpha.get_data()
data = self.alpha_multiply(alpha, layer.image.get_data())
result[:, :, i] = data[:, :]
end_time = time.time()
self.set_data(result)
self.logger.debug("rgb_compose total=%.4f sec" % (
end_time - start_time))
def rgb_decompose(self, image):
data = image.get_data()
shape = data.shape
if len(shape) == 2:
self._insert_layer(0, image)
else:
names = ("Red", "Green", "Blue")
alphas = (0.292, 0.594, 0.114)
for i in range(shape[2]):
imgslice = data[:, :, i]
#img = BaseImage.BaseImage(data_np=imgslice, logger=self.logger)
# Create the same type of image as we are decomposing
img = image.__class__(data_np=imgslice, logger=self.logger)
if i < 3:
name = names[i]
alpha = alphas[i]
else:
name = "layer%d" % i
alpha = 0.0
self._insert_layer(i, img, name=name, alpha=alpha)
self.compose_layers()
def set_compose_type(self, ctype):
assert ctype in self.compose_types, \
BaseImage.ImageError("Bad compose type '%s': must be one of %s" % (
ctype, str(self.compose_types)))
self.compose = ctype
self.compose_layers()
def set_alpha(self, lidx, val):
layer = self._layer[lidx]
layer.alpha = val
self.compose_layers()
def set_alphas(self, vals):
for lidx in range(len(vals)):
layer = self._layer[lidx]
layer.alpha = vals[lidx]
self.compose_layers()
def compose_layers(self):
if self.compose == 'rgb':
self.rgb_compose()
else:
self.alpha_compose()
#END
|
MattsFleaMarket/python-for-android
|
refs/heads/master
|
python-modules/twisted/twisted/trial/test/weird.py
|
82
|
from twisted.trial import unittest
from twisted.internet import defer
# Used in test_tests.TestUnhandledDeferred
class TestBleeding(unittest.TestCase):
"""This test creates an unhandled Deferred and leaves it in a cycle.
The Deferred is left in a cycle so that the garbage collector won't pick it
up immediately. We were having some problems where unhandled Deferreds in
one test were failing random other tests. (See #1507, #1213)
"""
def test_unhandledDeferred(self):
try:
1/0
except ZeroDivisionError:
f = defer.fail()
# these two lines create the cycle. don't remove them
l = [f]
l.append(l)
|
MackZxh/OCA-Choice
|
refs/heads/8.0
|
hr/hr_worked_days_from_timesheet/__openerp__.py
|
19
|
# -*- coding:utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Odoo Canada. All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Worked Days From Timesheet',
'version': '1.0',
'license': 'AGPL-3',
'category': 'Generic Modules/Human Resources',
'description': """
Worked Days From Timesheet
==========================
* Adds a button to import worked days from timesheet
Contributors
------------
* David Dufresne <david.dufresne@savoirfairelinux.com>
* Pierre Lamarche <pierre.lamarche@savoirfairelinux.com>
""",
'author': "Savoir-faire Linux,Odoo Community Association (OCA)",
'website': 'https://www.savoirfairelinux.com/',
'depends': [
'hr_payroll',
'hr_timesheet_sheet',
],
'data': [
'hr_payslip_view.xml'
],
'test': [
'test/worked_days_from_timesheet_test.yml'
],
'demo': [],
'installable': False,
}
|
tux-00/ansible
|
refs/heads/devel
|
test/units/plugins/action/test_synchronize.py
|
8
|
'''
(Epdb) pprint(DeepDiff(self.final_task_vars, out_task_vars), indent=2)
{ 'dic_item_added': set([u"root['ansible_python_interpreter']"]),
'dic_item_removed': set([ u"root['hostvars']['127.0.0.1']",
u"root['hostvars']['::1']",
u"root['hostvars']['localhost']"]),
'iterable_item_added': { u"root['hostvars']['el6host']['groups']['all'][1]": u'::1',
u"root['hostvars']['el6host']['groups']['ungrouped'][1]": u'::1',
u"root['vars']['hostvars']['el6host']['groups']['all'][1]": u'::1',
u"root['vars']['hostvars']['el6host']['groups']['ungrouped'][1]": u'::1'}}
'''
import json
import os
import sys
import unittest
import yaml
from pprint import pprint
import ansible.plugins
from ansible.compat.tests.mock import patch, MagicMock
from ansible.plugins.action.synchronize import ActionModule
# Getting the incoming and outgoing task vars from the plugin's run method
'''
import copy
safe_vars = {}
for k,v in task_vars.items():
if k not in ['vars', 'hostvars']:
safe_vars[k] = copy.deepcopy(v)
else:
sdata = str(v)
newv = eval(sdata)
safe_vars[k] = newv
import json
with open('task_vars.json', 'wb') as f:
f.write(json.dumps(safe_vars, indent=2))
'''
class TaskMock(object):
args = {'src': u'/tmp/deleteme',
'dest': '/tmp/deleteme',
'rsync_path': 'rsync'}
async = None
become = None
become_user = None
become_method = None
class StdinMock(object):
shell = None
class ConnectionMock(object):
ismock = True
_play_context = None
# transport = 'ssh'
transport = None
_new_stdin = StdinMock()
class PlayContextMock(object):
shell = None
private_key_file = None
become = False
become_user = 'root'
become_method = None
check_mode = False
no_log = None
diff = None
remote_addr = None
remote_user = None
password = None
class ModuleLoaderMock(object):
def find_plugin(self, module_name, mod_type):
pass
class SharedLoaderMock(object):
module_loader = ModuleLoaderMock()
class SynchronizeTester(object):
''' A wrapper for mocking out synchronize environments '''
task = TaskMock()
connection = ConnectionMock()
_play_context = PlayContextMock()
loader = None
templar = None
shared_loader_obj = SharedLoaderMock()
final_task_vars = None
execute_called = False
def _execute_module(self, module_name, module_args=None, task_vars=None):
self.execute_called = True
self.final_module_args = module_args
self.final_task_vars = task_vars
return {}
def runtest(self, fixturepath='fixtures/synchronize/basic'):
metapath = os.path.join(fixturepath, 'meta.yaml')
with open(metapath, 'rb') as f:
fdata = f.read()
test_meta = yaml.load(fdata)
# load inital play context vars
if '_play_context' in test_meta:
if test_meta['_play_context']:
self.task.args = {}
for (k, v) in test_meta['_play_context'].items():
if v == 'None':
v = None
setattr(self._play_context, k, v)
# load inital task context vars
if '_task' in test_meta:
if test_meta['_task']:
self.task.args = {}
for (k, v) in test_meta['_task'].items():
# import epdb; epdb.st()
if v == 'None':
v = None
setattr(self.task, k, v)
# load inital task vars
if 'task_args' in test_meta:
if test_meta['task_args']:
self.task.args = {}
for (k, v) in test_meta['task_args'].items():
self.task.args[k] = v
# load inital task vars
invarspath = os.path.join(fixturepath, test_meta.get('fixtures', {}).get('taskvars_in', 'taskvars_in.json'))
with open(invarspath, 'rb') as f:
fdata = f.read()
fdata = fdata.decode("utf-8")
in_task_vars = json.loads(fdata)
# load expected final task vars
outvarspath = os.path.join(fixturepath, test_meta.get('fixtures', {}).get('taskvars_out', 'taskvars_out.json'))
with open(outvarspath, 'rb') as f:
fdata = f.read()
fdata = fdata.decode("utf-8")
out_task_vars = json.loads(fdata)
# fixup the connection
for (k, v) in test_meta['connection'].items():
setattr(self.connection, k, v)
# fixup the hostvars
if test_meta['hostvars']:
for (k, v) in test_meta['hostvars'].items():
in_task_vars['hostvars'][k] = v
# initalize and run the module
SAM = ActionModule(self.task, self.connection, self._play_context,
self.loader, self.templar, self.shared_loader_obj)
SAM._execute_module = self._execute_module
result = SAM.run(task_vars=in_task_vars)
# run assertions
for check in test_meta['asserts']:
value = eval(check)
# if not value:
# print(check, value)
# import epdb; epdb.st()
assert value, check
class FakePluginLoader(object):
mocked = True
@staticmethod
def get(transport, play_context, new_stdin):
conn = ConnectionMock()
conn.transport = transport
conn._play_context = play_context
conn._new_stdin = new_stdin
return conn
class TestSynchronizeAction(unittest.TestCase):
fixturedir = os.path.dirname(__file__)
fixturedir = os.path.join(fixturedir, 'fixtures', 'synchronize')
# print(basedir)
@patch('ansible.plugins.action.synchronize.connection_loader', FakePluginLoader)
def test_basic(self):
x = SynchronizeTester()
x.runtest(fixturepath=os.path.join(self.fixturedir, 'basic'))
@patch('ansible.plugins.action.synchronize.connection_loader', FakePluginLoader)
def test_basic_become(self):
x = SynchronizeTester()
x.runtest(fixturepath=os.path.join(self.fixturedir, 'basic_become'))
@patch('ansible.plugins.action.synchronize.connection_loader', FakePluginLoader)
def test_basic_become_cli(self):
# --become on the cli sets _play_context.become
x = SynchronizeTester()
x.runtest(fixturepath=os.path.join(self.fixturedir, 'basic_become_cli'))
@patch('ansible.plugins.action.synchronize.connection_loader', FakePluginLoader)
def test_basic_vagrant(self):
# simple vagrant example
x = SynchronizeTester()
x.runtest(fixturepath=os.path.join(self.fixturedir, 'basic_vagrant'))
@patch('ansible.plugins.action.synchronize.connection_loader', FakePluginLoader)
def test_basic_vagrant_sudo(self):
# vagrant plus sudo
x = SynchronizeTester()
x.runtest(fixturepath=os.path.join(self.fixturedir, 'basic_vagrant_sudo'))
@patch('ansible.plugins.action.synchronize.connection_loader', FakePluginLoader)
def test_basic_vagrant_become_cli(self):
# vagrant plus sudo
x = SynchronizeTester()
x.runtest(fixturepath=os.path.join(self.fixturedir, 'basic_vagrant_become_cli'))
@patch('ansible.plugins.action.synchronize.connection_loader', FakePluginLoader)
def test_delegate_remote(self):
# delegate to other remote host
x = SynchronizeTester()
x.runtest(fixturepath=os.path.join(self.fixturedir, 'delegate_remote'))
@patch('ansible.plugins.action.synchronize.connection_loader', FakePluginLoader)
def test_delegate_remote_su(self):
# delegate to other remote host with su enabled
x = SynchronizeTester()
x.runtest(fixturepath=os.path.join(self.fixturedir, 'delegate_remote_su'))
|
2014c2g4/2015cda_g7
|
refs/heads/master
|
static/Brython3.1.0-20150301-090019/Lib/webbrowser.py
|
735
|
from browser import window
__all__ = ["Error", "open", "open_new", "open_new_tab"]
class Error(Exception):
pass
_target = { 0: '', 1: '_blank', 2: '_new' } # hack...
def open(url, new=0, autoraise=True):
"""
new window or tab is not controllable
on the client side. autoraise not available.
"""
if window.open(url, _target[new]):
return True
return False
def open_new(url):
return open(url, 1)
def open_new_tab(url):
return open(url, 2)
|
hamishwillee/ardupilot
|
refs/heads/master
|
libraries/SITL/examples/Morse/rover_follow.py
|
35
|
'''
This is an example builder script that sets up a a set of rovers to
be driven by ArduPilot for demonstrating follow mode
The rover has the basic set of sensors that ArduPilot needs
To start the simulation use this:
morse run rover_follow.py
'''
from morse.builder import *
num_vehicles = 3
for i in range(num_vehicles):
vehicle = ATRV('Vehicle%u' % i)
vehicle.properties(Object = True, Graspable = False, Label = "Vehicle")
# set rovers 3 meters apart
vehicle.translate(x=0.0, y=3*i, z=0.0)
# add sensors needed for ArduPilot operation to a vehicle
pose = Pose()
vehicle.append(pose)
imu = IMU()
vehicle.append(imu)
gps = GPS()
gps.alter('UTM')
vehicle.append(gps)
velocity = Velocity()
vehicle.append(velocity)
# create a compound sensor of all of the individual sensors and stream it
all_sensors = CompoundSensor([imu, gps, velocity, pose])
all_sensors.add_stream('socket')
vehicle.append(all_sensors)
# make the vehicle controllable with speed and angular velocity
motion = MotionVW()
vehicle.append(motion)
motion.add_stream('socket')
# Environment
env = Environment('land-1/trees', fastmode=False)
env.set_camera_location([10.0, -10.0, 10.0])
env.set_camera_rotation([1.0470, 0, 0.7854])
env.set_camera_clip(clip_end=1000)
# startup at CMAC. A location is needed for the magnetometer
env.properties(longitude = 149.165230, latitude = -35.363261, altitude = 584.0)
|
jdowner/qtile
|
refs/heads/develop
|
libqtile/widget/graph.py
|
6
|
# Copyright (c) 2010 Aldo Cortesi
# Copyright (c) 2010-2011 Paul Colomiets
# Copyright (c) 2010, 2014 roger
# Copyright (c) 2011 Mounier Florian
# Copyright (c) 2011 Kenji_Takahashi
# Copyright (c) 2012 Mika Fischer
# Copyright (c) 2012, 2014-2015 Tycho Andersen
# Copyright (c) 2012-2013 Craig Barnes
# Copyright (c) 2013 dequis
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2013 Mickael FALCK
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 Adi Sieker
# Copyright (c) 2014 Florian Scherf
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import cairocffi
from . import base
from libqtile.log_utils import logger
from os import statvfs
import time
import platform
__all__ = [
'CPUGraph',
'MemoryGraph',
'SwapGraph',
'NetGraph',
'HDDGraph',
'HDDBusyGraph',
]
class _Graph(base._Widget):
fixed_upper_bound = False
defaults = [
("graph_color", "18BAEB", "Graph color"),
("fill_color", "1667EB.3", "Fill color for linefill graph"),
("border_color", "215578", "Widget border color"),
("border_width", 2, "Widget border width"),
("margin_x", 3, "Margin X"),
("margin_y", 3, "Margin Y"),
("samples", 100, "Count of graph samples."),
("frequency", 1, "Update frequency in seconds"),
("type", "linefill", "'box', 'line', 'linefill'"),
("line_width", 3, "Line width"),
("start_pos", "bottom", "Drawer starting position ('bottom'/'top')"),
]
def __init__(self, width=100, **config):
base._Widget.__init__(self, width, **config)
self.add_defaults(_Graph.defaults)
self.values = [0] * self.samples
self.maxvalue = 0
self.oldtime = time.time()
self.lag_cycles = 0
def timer_setup(self):
self.timeout_add(self.frequency, self.update)
@property
def graphwidth(self):
return self.width - self.border_width * 2 - self.margin_x * 2
@property
def graphheight(self):
return self.bar.height - self.margin_y * 2 - self.border_width * 2
def draw_box(self, x, y, values):
step = self.graphwidth / float(self.samples)
self.drawer.set_source_rgb(self.graph_color)
for val in values:
val = self.val(val)
self.drawer.fillrect(x, y - val, step, val)
x += step
def draw_line(self, x, y, values):
step = self.graphwidth / float(self.samples - 1)
self.drawer.ctx.set_line_join(cairocffi.LINE_JOIN_ROUND)
self.drawer.set_source_rgb(self.graph_color)
self.drawer.ctx.set_line_width(self.line_width)
for val in values:
self.drawer.ctx.line_to(x, y - self.val(val))
x += step
self.drawer.ctx.stroke()
def draw_linefill(self, x, y, values):
step = self.graphwidth / float(self.samples - 2)
self.drawer.ctx.set_line_join(cairocffi.LINE_JOIN_ROUND)
self.drawer.set_source_rgb(self.graph_color)
self.drawer.ctx.set_line_width(self.line_width)
for index, val in enumerate(values):
self.drawer.ctx.line_to(x + index * step, y - self.val(val))
self.drawer.ctx.stroke_preserve()
self.drawer.ctx.line_to(
x + (len(values) - 1) * step,
y - 1 + self.line_width / 2.0
)
self.drawer.ctx.line_to(x, y - 1 + self.line_width / 2.0)
self.drawer.set_source_rgb(self.fill_color)
self.drawer.ctx.fill()
def val(self, val):
if self.start_pos == 'bottom':
return val
elif self.start_pos == 'top':
return -val
else:
raise ValueError("Unknown starting position: %s." % self.start_pos)
def draw(self):
self.drawer.clear(self.background or self.bar.background)
if self.border_width:
self.drawer.set_source_rgb(self.border_color)
self.drawer.ctx.set_line_width(self.border_width)
self.drawer.ctx.rectangle(
self.margin_x + self.border_width / 2.0,
self.margin_y + self.border_width / 2.0,
self.graphwidth + self.border_width,
self.bar.height - self.margin_y * 2 - self.border_width,
)
self.drawer.ctx.stroke()
x = self.margin_x + self.border_width
y = self.margin_y + self.border_width
if self.start_pos == 'bottom':
y += self.graphheight
elif not self.start_pos == 'top':
raise ValueError("Unknown starting position: %s." % self.start_pos)
k = 1.0 / (self.maxvalue or 1)
scaled = [self.graphheight * val * k for val in reversed(self.values)]
if self.type == "box":
self.draw_box(x, y, scaled)
elif self.type == "line":
self.draw_line(x, y, scaled)
elif self.type == "linefill":
self.draw_linefill(x, y, scaled)
else:
raise ValueError("Unknown graph type: %s." % self.type)
self.drawer.draw(offsetx=self.offset, width=self.width)
def push(self, value):
if self.lag_cycles > self.samples:
# compensate lag by sending the same value up to
# the graph samples limit
self.lag_cycles = 1
self.values = ([value] * min(self.samples, self.lag_cycles)) + self.values
self.values = self.values[:self.samples]
if not self.fixed_upper_bound:
self.maxvalue = max(self.values)
self.draw()
def update(self):
# lag detection
newtime = time.time()
self.lag_cycles = int((newtime - self.oldtime) / self.frequency)
self.oldtime = newtime
self.update_graph()
self.timeout_add(self.frequency, self.update)
def fulfill(self, value):
self.values = [value] * len(self.values)
class CPUGraph(_Graph):
"""Display CPU usage graph"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("core", "all", "Which core to show (all/0/1/2/...)"),
]
fixed_upper_bound = True
def __init__(self, **config):
_Graph.__init__(self, **config)
self.add_defaults(CPUGraph.defaults)
self.maxvalue = 100
self.oldvalues = self._getvalues()
def _getvalues(self):
proc = '/proc/stat'
if platform.system() == "FreeBSD":
proc = '/compat/linux' + proc
with open(proc) as file:
lines = file.readlines()
# default to all cores (first line)
line = lines.pop(0)
# core specified, grab the corresponding line
if isinstance(self.core, int):
# we already removed the first line from the list,
# so it's 0 indexed now :D
line = lines[self.core]
if not line.startswith("cpu%s" % self.core):
raise ValueError("No such core: %s" % self.core)
if platform.system() == 'FreeBSD':
name, user, nice, sys, idle = line.split(None, 4)
else:
name, user, nice, sys, idle, iowait, tail = line.split(None, 6)
return (int(user), int(nice), int(sys), int(idle))
def update_graph(self):
nval = self._getvalues()
oval = self.oldvalues
busy = nval[0] + nval[1] + nval[2] - oval[0] - oval[1] - oval[2]
total = busy + nval[3] - oval[3]
# sometimes this value is zero for unknown reason (time shift?)
# we just sent the previous value, because it gives us no info about
# cpu load, if it's zero.
if total:
push_value = busy * 100.0 / total
self.push(push_value)
else:
self.push(self.values[0])
self.oldvalues = nval
def get_meminfo():
val = {}
proc = '/proc/meminfo'
if platform.system() == "FreeBSD":
proc = "/compat/linux" + proc
with open(proc) as file:
for line in file:
if line.lstrip().startswith("total"):
pass
else:
key, tail = line.strip().split(':')
uv = tail.split()
val[key] = int(uv[0])
val['MemUsed'] = val['MemTotal'] - val['MemFree']
return val
class MemoryGraph(_Graph):
"""Displays a memory usage graph"""
orientations = base.ORIENTATION_HORIZONTAL
fixed_upper_bound = True
def __init__(self, **config):
_Graph.__init__(self, **config)
val = self._getvalues()
self.maxvalue = val['MemTotal']
mem = val['MemTotal'] - val['MemFree'] - val['Buffers'] - val['Cached']
self.fulfill(mem)
def _getvalues(self):
return get_meminfo()
def update_graph(self):
val = self._getvalues()
self.push(
val['MemTotal'] - val['MemFree'] - val['Buffers'] - val['Cached']
)
class SwapGraph(_Graph):
"""Display a swap info graph"""
orientations = base.ORIENTATION_HORIZONTAL
fixed_upper_bound = True
def __init__(self, **config):
_Graph.__init__(self, **config)
val = self._getvalues()
self.maxvalue = val['SwapTotal']
swap = val['SwapTotal'] - val['SwapFree'] - val.get('SwapCached', 0)
self.fulfill(swap)
def _getvalues(self):
return get_meminfo()
def update_graph(self):
val = self._getvalues()
swap = val['SwapTotal'] - val['SwapFree'] - val.get('SwapCached', 0)
# can change, swapon/off
if self.maxvalue != val['SwapTotal']:
self.maxvalue = val['SwapTotal']
self.fulfill(swap)
self.push(swap)
class NetGraph(_Graph):
"""Display a network usage graph"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
(
"interface",
"auto",
"Interface to display info for ('auto' for detection)"
),
("bandwidth_type", "down", "down(load)/up(load)"),
]
def __init__(self, **config):
_Graph.__init__(self, **config)
self.add_defaults(NetGraph.defaults)
if self.interface == "auto":
try:
self.interface = self.get_main_iface()
except RuntimeError:
logger.warning(
"NetGraph - Automatic interface detection failed, "
"falling back to 'eth0'"
)
self.interface = "eth0"
self.filename = '/sys/class/net/{interface}/statistics/{type}'.format(
interface=self.interface,
type=self.bandwidth_type == 'down' and 'rx_bytes' or 'tx_bytes'
)
self.bytes = 0
self.bytes = self._getValues()
def _getValues(self):
try:
with open(self.filename) as file:
val = int(file.read())
rval = val - self.bytes
self.bytes = val
return rval
except IOError:
return 0
def update_graph(self):
val = self._getValues()
self.push(val)
@staticmethod
def get_main_iface():
def make_route(line):
return dict(zip(['iface', 'dest'], line.split()))
with open('/proc/net/route', 'r') as fp:
lines = fp.readlines()
routes = [make_route(line) for line in lines[1:]]
try:
return next(
(r for r in routes if not int(r['dest'], 16)),
routes[0]
)['iface']
except (KeyError, IndexError, ValueError):
raise RuntimeError('No valid interfaces available')
class HDDGraph(_Graph):
"""Display HDD free or used space graph"""
fixed_upper_bound = True
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("path", "/", "Partition mount point."),
("space_type", "used", "free/used")
]
def __init__(self, **config):
_Graph.__init__(self, **config)
self.add_defaults(HDDGraph.defaults)
stats = statvfs(self.path)
self.maxvalue = stats.f_blocks * stats.f_frsize
values = self._getValues()
self.fulfill(values)
def _getValues(self):
stats = statvfs(self.path)
if self.space_type == 'used':
return (stats.f_blocks - stats.f_bfree) * stats.f_frsize
else:
return stats.f_bavail * stats.f_frsize
def update_graph(self):
val = self._getValues()
self.push(val)
class HDDBusyGraph(_Graph):
"""Display HDD busy time graph
Parses /sys/block/<dev>/stat file and extracts overall device IO usage,
based on ``io_ticks``'s value. See
https://www.kernel.org/doc/Documentation/block/stat.txt
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("device", "sda", "Block device to display info for")
]
def __init__(self, **config):
_Graph.__init__(self, **config)
self.add_defaults(HDDBusyGraph.defaults)
self.path = '/sys/block/{dev}/stat'.format(
dev=self.device
)
self._prev = 0
def _getActivity(self):
try:
# io_ticks is field number 9
with open(self.path) as f:
io_ticks = int(f.read().split()[9])
except IOError:
return 0
activity = io_ticks - self._prev
self._prev = io_ticks
return activity
def update_graph(self):
self.push(self._getActivity())
|
vmax-feihu/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/tests/template_tests/templatetags/broken_tag.py
|
240
|
from django import Xtemplate
|
bbansalWolfPack/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/serve/__init__.py
|
458
|
import serve
|
tbombach/autorest
|
refs/heads/master
|
src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/Header/autorestswaggerbatheaderservice/exceptions.py
|
687
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.exceptions import (
ClientException,
SerializationError,
DeserializationError,
TokenExpiredError,
ClientRequestError,
AuthenticationError,
HttpOperationError,
ValidationError,
)
|
Nowheresly/account-financial-tools
|
refs/heads/8.0
|
account_constraints/tests/test_account_constraints.py
|
24
|
# -*- coding: utf-8 -*-
#
#
# Authors: Adrien Peiffer
# Copyright (c) 2014 Acsone SA/NV (http://www.acsone.eu)
# All Rights Reserved
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs.
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contact a Free Software
# Service Company.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
import openerp.tests.common as common
from datetime import datetime
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
from openerp import workflow, exceptions
def create_simple_invoice(self):
partner_id = self.ref('base.res_partner_2')
product_id = self.ref('product.product_product_4')
today = datetime.now()
journal_id = self.ref('account.sales_journal')
date = today.strftime(DEFAULT_SERVER_DATE_FORMAT)
return self.env['account.invoice']\
.create({'partner_id': partner_id,
'account_id':
self.ref('account.a_recv'),
'journal_id':
journal_id,
'date_invoice': date,
'invoice_line': [(0, 0, {'name': 'test',
'account_id':
self.ref('account.a_sale'),
'price_unit': 2000.00,
'quantity': 1,
'product_id': product_id,
}
)
],
})
class TestAccountConstraints(common.TransactionCase):
def setUp(self):
super(TestAccountConstraints, self).setUp()
def test_draft_move_invoice(self):
invoice = create_simple_invoice(self)
workflow.trg_validate(self.uid, 'account.invoice', invoice.id,
'invoice_open', self.cr)
move = invoice.move_id
move_lines = move.line_id
move.with_context({'from_parent_object': True})\
.write({'state': 'draft'})
self.assertRaises(exceptions.Warning, move_lines.write,
{'credit': 0.0})
def test_post_move_invoice_ref(self):
invoice = create_simple_invoice(self)
workflow.trg_validate(self.uid, 'account.invoice', invoice.id,
'invoice_open', self.cr)
move_lines = invoice.move_id.line_id
# here, normally no exception is raised in standard code.
# It's just to verify if it's
# possible to modify ref field in a post account_move_line
move_lines.with_context({'from_parent_object': True})\
.write({'ref': 'test'})
def test_post_move_invoice(self):
invoice = create_simple_invoice(self)
workflow.trg_validate(self.uid, 'account.invoice', invoice.id,
'invoice_open', self.cr)
move_lines = invoice.move_id.line_id
self.assertRaises(exceptions.Warning, move_lines.write,
{'ref': 'test'})
|
GageGaskins/osf.io
|
refs/heads/develop
|
scripts/osfstorage/migrate_metadata.py
|
28
|
# -*- coding: utf-8 -*-
"""Script which ensures that every file version's
content_type, size, and date_modified fields are consistent
with the metadata from waterbutler.
"""
from modularodm import Q
import logging
import sys
from website.addons.osfstorage.model import OsfStorageFileVersion
from website.app import init_app
from scripts import utils as scripts_utils
logger = logging.getLogger(__name__)
def main():
for each in OsfStorageFileVersion.find(
Q('size', 'eq', None) &
Q('status', 'ne', 'cached') &
Q('location.object', 'exists', True)
):
logger.info('Updating metadata for OsfStorageFileVersion {}'.format(each._id))
if 'dry' not in sys.argv:
each.update_metadata(each.metadata)
each.save()
if __name__ == '__main__':
# Set up storage backends
init_app(set_backends=True, routes=False)
if 'dry' not in sys.argv:
scripts_utils.add_file_logger(logger, __file__)
main()
|
iModels/mbuild
|
refs/heads/master
|
mbuild/formats/par_writer.py
|
2
|
"""CHARMM Par format."""
import warnings
__all__ = ["write_par"]
def write_par(structure, filename):
"""Write CHARMM Par file given a parametrized structure.
Notes
-----
Follows format according to
https://www.ks.uiuc.edu/Training/Tutorials/namd/namd-tutorial-unix-html/
node25.html
Furthermore, ParmEd should support writing CHARMM par, rtf, str files
by converting the parmed.Structure into parmed.CharmmParameterSet
Parmed stores rmin/2 in "rmin"
"""
# ATOMS
with open(filename, "w") as f:
f.write("ATOMS\n")
unique_atoms = set()
for atom in structure.atoms:
unique_atoms.add((atom.atom_type.name, atom.atom_type.mass))
for atom in unique_atoms:
f.write("MASS -1 {:8s} {:8.4f}\n".format(atom[0], atom[1]))
f.write("\nBONDS\n")
unique_bonds = set()
for bond in structure.bonds:
unique_bonds.add(
(
bond.atom1.atom_type.name,
bond.atom2.atom_type.name,
bond.type,
)
)
for bond in unique_bonds:
f.write(
"{:8s} {:8s} {:.5f} {:.5f}\n".format(
bond[0], bond[1], bond[2].k, bond[2].req
)
)
f.write("\nANGLES\n")
unique_angles = set()
unique_ubs = set()
for angle in structure.angles:
associated_ub = False
for ub in structure.urey_bradleys:
if ((angle.atom1, angle.atom3) == (ub.atom1, ub.atom2)) or (
angle.atom3,
angle.atom1,
) == (ub.atom1, ub.atom2):
unique_ubs.add(
(
angle.atom1.atom_type.name,
angle.atom2.atom_type.name,
angle.atom3.atom_type.name,
angle.type,
ub.type,
)
)
associated_ub = True
if not associated_ub:
unique_angles.add(
(
angle.atom1.atom_type.name,
angle.atom2.atom_type.name,
angle.atom3.atom_type.name,
angle.type,
)
)
for ub in unique_ubs:
f.write(
"{:8s} {:8s} {:8s} {:.5f} {:.5f} {:.5f} {:.5f}\n".format(
ub[0],
ub[1],
ub[2],
ub[3].k,
ub[3].theteq,
ub[4].k,
ub[4].req,
)
)
for angle in unique_angles:
f.write(
"{:8s} {:8s} {:8s} {:.5f} {:.5f}\n".format(
angle[0], angle[1], angle[2], angle[3].k, angle[3].theteq
)
)
# These dihedrals need to be PeriodicTorsion Style (Charmm style)
if len(structure.rb_torsions) > 0:
warnings.warn("RB Torsions detected, but unsupported in par writer")
f.write("\nDIHEDRALS\n")
unique_dihedrals = set()
scnb = set()
for dihedral in structure.dihedrals:
if not dihedral.improper:
unique_dihedrals.add(
(
dihedral.atom1.atom_type.name,
dihedral.atom2.atom_type.name,
dihedral.atom3.atom_type.name,
dihedral.atom4.atom_type.name,
dihedral.type,
)
)
scnb.add(dihedral.type.scnb)
else:
msg = (
"AMBER-style improper detected between "
+ "{} {} {} {}".format(
dihedral.atom1,
dihedral.atom2,
dihedral.atom3,
dihedral.atom4,
)
+ ", but unsupported in par writer"
)
warnings.warn(msg)
for dihedral in unique_dihedrals:
f.write(
"{:8s} {:8s} {:8s} {:8s} {:.5f} {:5d} {:.5f}\n".format(
dihedral[0],
dihedral[1],
dihedral[2],
dihedral[3],
dihedral[4].phi_k,
dihedral[4].per,
dihedral[4].phase,
)
)
f.write("\nIMPROPER\n")
unique_impropers = set()
for improper in structure.impropers:
unique_impropers.add(
(
improper.atom1.atom_type.name,
improper.atom2.atom_type.name,
improper.atom3.atom_type.name,
improper.atom4.atom_type.name,
improper.type,
)
)
for improper in unique_impropers:
f.write(
"{:8s} {:8s} {:8s} {:8s} {:.5f} {:5d} {:.5f}\n".format(
improper[2],
improper[0],
improper[1],
improper[3],
improper[4].psi_k,
0,
improper[4].psi_eq,
)
)
sc_nb = [a for a in scnb]
if len(sc_nb) > 1:
warnings.warn(
"Multiple 1-4 LJ scalings were detected, "
"defaulting to first LJ scaling detected, {}".format(sc_nb[0])
)
sc_nb = sc_nb[0]
elif len(sc_nb) == 1:
sc_nb = sc_nb[0]
elif len(sc_nb) == 0:
warnings.warn("No 1-4 LJ scaling was detected, defaulting 1")
sc_nb = 1.0
f.write("\nNONBONDED\n")
unique_atypes = set()
for atom in structure.atoms:
unique_atypes.add(atom.atom_type)
for atype in unique_atypes:
# atype, 0.0, epsilon, rmin/2, 0.0, epsilon(1-4), rmin/2 (1-4)
f.write(
"{:8s} {:8.3f} {:8.3f} {:8.3f} {:8.3f} {:8.3f} {:8.3f}\n".format(
atype.name,
0.0,
-1 * atype.epsilon,
atype.rmin,
0.0,
-1 * sc_nb * atype.epsilon,
atype.rmin,
)
)
if structure.has_NBFIX():
warnings.warn("NBFixes detected but unsupported in par writer")
f.write("\nEND")
|
qateam123/eq
|
refs/heads/master
|
tests/integration/questionnaire/test_questionnaire_save_sign_out.py
|
1
|
from tests.integration.create_token import create_token
from tests.integration.integration_test_case import IntegrationTestCase
class TestSaveSignOut(IntegrationTestCase):
def test_save_sign_out_with_mandatory_question_not_answered(self):
# We can save and go to the sign-out page without having to fill in mandatory answer
base_url = '/questionnaire/1/0205/789/'
# Given
token = create_token('0205', '1')
self.client.get('/session?token=' + token.decode(), follow_redirects=False)
# When
post_data = {
'action[start_questionnaire]': 'Start Questionnaire'
}
resp = self.client.post(base_url + 'introduction', data=post_data, follow_redirects=False)
self.assertEquals(resp.status_code, 302)
block_one_url = resp.headers['Location']
post_data = {
"total-retail-turnover": " 1000",
"action[save_sign_out]": "Save and sign out"
}
resp = self.client.post(block_one_url, data=post_data, follow_redirects=False)
self.assertEquals(resp.status_code, 302)
# Then
# we are presented with the sign out page
self.assertTrue("signed-out" in resp.headers['Location'])
self.assertEquals(resp.status_code, 302)
resp = self.client.get(resp.headers['Location'], follow_redirects=False)
self.assertEquals(resp.status_code, 200)
def test_save_sign_out_with_non_mandatory_validation_error(self):
# We can't save if a validation error is caused, this doesn't include missing a mandatory question
base_url = '/questionnaire/1/0205/789/'
# Given
token = create_token('0205', '1')
self.client.get('/session?token=' + token.decode(), follow_redirects=False)
# When
post_data = {
'action[start_questionnaire]': 'Start Questionnaire'
}
resp = self.client.post(base_url + 'introduction', data=post_data, follow_redirects=False)
self.assertEquals(resp.status_code, 302)
block_one_url = resp.headers['Location']
post_data = {
"total-retail-turnover": "error",
"action[save_sign_out]": "Save and sign out"
}
resp = self.client.post(block_one_url, data=post_data, follow_redirects=False)
self.assertEquals(resp.status_code, 200)
# Then
# we are presented with an error message
content = resp.get_data(True)
self.assertRegexpMatches(content, 'Please only enter whole numbers into the field.')
def test_save_sign_out_complete_a_block_then_revist_it(self):
# If a user completes a block, but then goes back and uses save and come back on that block, that block
# should no longer be considered complete and on re-authenticate it should return to it
base_url = '/questionnaire/1/0102/789/'
token = create_token('0102', '1')
self.client.get('/session?token=' + token.decode(), follow_redirects=False)
post_data = {
'action[start_questionnaire]': 'Start Questionnaire'
}
resp = self.client.post(base_url + 'introduction', data=post_data, follow_redirects=False)
self.assertEquals(resp.status_code, 302)
block_one_url = resp.headers['Location']
post_data = {
"period-from-day": "01",
"period-from-month": "4",
"period-from-year": "2016",
"period-to-day": "30",
"period-to-month": "04",
"period-to-year": "2016",
"action[save_continue]": "Save & Continue"
}
resp = self.client.post(block_one_url, data=post_data, follow_redirects=False)
self.assertEquals(resp.status_code, 302)
# We go back to the first page and save and complete later
self.client.get(block_one_url, follow_redirects=False)
post_data = {
"action[save_sign_out]": "Save and sign out"
}
self.client.post(block_one_url, data=post_data, follow_redirects=False)
# We re-authenticate and check we are on the first page
resp = self.client.get('/session?token=' + token.decode(), follow_redirects=False)
block_one_url = resp.headers['Location']
self.assertRegexpMatches(block_one_url, 'reporting-period')
|
nugget/home-assistant
|
refs/heads/dev
|
tests/components/media_player/test_blackbird.py
|
3
|
"""The tests for the Monoprice Blackbird media player platform."""
import unittest
from unittest import mock
import voluptuous as vol
from collections import defaultdict
from homeassistant.components.media_player.const import (
DOMAIN, SUPPORT_TURN_ON, SUPPORT_TURN_OFF, SUPPORT_SELECT_SOURCE)
from homeassistant.const import STATE_ON, STATE_OFF
import tests.common
from homeassistant.components.media_player.blackbird import (
DATA_BLACKBIRD, PLATFORM_SCHEMA, SERVICE_SETALLZONES, setup_platform)
import pytest
class AttrDict(dict):
"""Helper class for mocking attributes."""
def __setattr__(self, name, value):
"""Set attribute."""
self[name] = value
def __getattr__(self, item):
"""Get attribute."""
return self[item]
class MockBlackbird:
"""Mock for pyblackbird object."""
def __init__(self):
"""Init mock object."""
self.zones = defaultdict(lambda: AttrDict(power=True,
av=1))
def zone_status(self, zone_id):
"""Get zone status."""
status = self.zones[zone_id]
status.zone = zone_id
return AttrDict(status)
def set_zone_source(self, zone_id, source_idx):
"""Set source for zone."""
self.zones[zone_id].av = source_idx
def set_zone_power(self, zone_id, power):
"""Turn zone on/off."""
self.zones[zone_id].power = power
def set_all_zone_source(self, source_idx):
"""Set source for all zones."""
self.zones[3].av = source_idx
class TestBlackbirdSchema(unittest.TestCase):
"""Test Blackbird schema."""
def test_valid_serial_schema(self):
"""Test valid schema."""
valid_schema = {
'platform': 'blackbird',
'port': '/dev/ttyUSB0',
'zones': {1: {'name': 'a'},
2: {'name': 'a'},
3: {'name': 'a'},
4: {'name': 'a'},
5: {'name': 'a'},
6: {'name': 'a'},
7: {'name': 'a'},
8: {'name': 'a'},
},
'sources': {
1: {'name': 'a'},
2: {'name': 'a'},
3: {'name': 'a'},
4: {'name': 'a'},
5: {'name': 'a'},
6: {'name': 'a'},
7: {'name': 'a'},
8: {'name': 'a'},
}
}
PLATFORM_SCHEMA(valid_schema)
def test_valid_socket_schema(self):
"""Test valid schema."""
valid_schema = {
'platform': 'blackbird',
'host': '192.168.1.50',
'zones': {1: {'name': 'a'},
2: {'name': 'a'},
3: {'name': 'a'},
4: {'name': 'a'},
5: {'name': 'a'},
},
'sources': {
1: {'name': 'a'},
2: {'name': 'a'},
3: {'name': 'a'},
4: {'name': 'a'},
}
}
PLATFORM_SCHEMA(valid_schema)
def test_invalid_schemas(self):
"""Test invalid schemas."""
schemas = (
{}, # Empty
None, # None
# Port and host used concurrently
{
'platform': 'blackbird',
'port': '/dev/ttyUSB0',
'host': '192.168.1.50',
'name': 'Name',
'zones': {1: {'name': 'a'}},
'sources': {1: {'name': 'b'}},
},
# Port or host missing
{
'platform': 'blackbird',
'name': 'Name',
'zones': {1: {'name': 'a'}},
'sources': {1: {'name': 'b'}},
},
# Invalid zone number
{
'platform': 'blackbird',
'port': '/dev/ttyUSB0',
'name': 'Name',
'zones': {11: {'name': 'a'}},
'sources': {1: {'name': 'b'}},
},
# Invalid source number
{
'platform': 'blackbird',
'port': '/dev/ttyUSB0',
'name': 'Name',
'zones': {1: {'name': 'a'}},
'sources': {9: {'name': 'b'}},
},
# Zone missing name
{
'platform': 'blackbird',
'port': '/dev/ttyUSB0',
'name': 'Name',
'zones': {1: {}},
'sources': {1: {'name': 'b'}},
},
# Source missing name
{
'platform': 'blackbird',
'port': '/dev/ttyUSB0',
'name': 'Name',
'zones': {1: {'name': 'a'}},
'sources': {1: {}},
},
)
for value in schemas:
with pytest.raises(vol.MultipleInvalid):
PLATFORM_SCHEMA(value)
class TestBlackbirdMediaPlayer(unittest.TestCase):
"""Test the media_player module."""
def setUp(self):
"""Set up the test case."""
self.blackbird = MockBlackbird()
self.hass = tests.common.get_test_home_assistant()
self.hass.start()
# Note, source dictionary is unsorted!
with mock.patch('pyblackbird.get_blackbird',
new=lambda *a: self.blackbird):
setup_platform(self.hass, {
'platform': 'blackbird',
'port': '/dev/ttyUSB0',
'zones': {3: {'name': 'Zone name'}},
'sources': {1: {'name': 'one'},
3: {'name': 'three'},
2: {'name': 'two'}},
}, lambda *args, **kwargs: None, {})
self.hass.block_till_done()
self.media_player = self.hass.data[DATA_BLACKBIRD]['/dev/ttyUSB0-3']
self.media_player.hass = self.hass
self.media_player.entity_id = 'media_player.zone_3'
def tearDown(self):
"""Tear down the test case."""
self.hass.stop()
def test_setup_platform(self, *args):
"""Test setting up platform."""
# One service must be registered
assert self.hass.services.has_service(DOMAIN, SERVICE_SETALLZONES)
assert len(self.hass.data[DATA_BLACKBIRD]) == 1
assert self.hass.data[DATA_BLACKBIRD]['/dev/ttyUSB0-3'].name == \
'Zone name'
def test_setallzones_service_call_with_entity_id(self):
"""Test set all zone source service call with entity id."""
self.media_player.update()
assert 'Zone name' == self.media_player.name
assert STATE_ON == self.media_player.state
assert 'one' == self.media_player.source
# Call set all zones service
self.hass.services.call(DOMAIN, SERVICE_SETALLZONES,
{'entity_id': 'media_player.zone_3',
'source': 'three'},
blocking=True)
# Check that source was changed
assert 3 == self.blackbird.zones[3].av
self.media_player.update()
assert 'three' == self.media_player.source
def test_setallzones_service_call_without_entity_id(self):
"""Test set all zone source service call without entity id."""
self.media_player.update()
assert 'Zone name' == self.media_player.name
assert STATE_ON == self.media_player.state
assert 'one' == self.media_player.source
# Call set all zones service
self.hass.services.call(DOMAIN, SERVICE_SETALLZONES,
{'source': 'three'}, blocking=True)
# Check that source was changed
assert 3 == self.blackbird.zones[3].av
self.media_player.update()
assert 'three' == self.media_player.source
def test_update(self):
"""Test updating values from blackbird."""
assert self.media_player.state is None
assert self.media_player.source is None
self.media_player.update()
assert STATE_ON == self.media_player.state
assert 'one' == self.media_player.source
def test_name(self):
"""Test name property."""
assert 'Zone name' == self.media_player.name
def test_state(self):
"""Test state property."""
assert self.media_player.state is None
self.media_player.update()
assert STATE_ON == self.media_player.state
self.blackbird.zones[3].power = False
self.media_player.update()
assert STATE_OFF == self.media_player.state
def test_supported_features(self):
"""Test supported features property."""
assert SUPPORT_TURN_ON | SUPPORT_TURN_OFF | \
SUPPORT_SELECT_SOURCE == \
self.media_player.supported_features
def test_source(self):
"""Test source property."""
assert self.media_player.source is None
self.media_player.update()
assert 'one' == self.media_player.source
def test_media_title(self):
"""Test media title property."""
assert self.media_player.media_title is None
self.media_player.update()
assert 'one' == self.media_player.media_title
def test_source_list(self):
"""Test source list property."""
# Note, the list is sorted!
assert ['one', 'two', 'three'] == \
self.media_player.source_list
def test_select_source(self):
"""Test source selection methods."""
self.media_player.update()
assert 'one' == self.media_player.source
self.media_player.select_source('two')
assert 2 == self.blackbird.zones[3].av
self.media_player.update()
assert 'two' == self.media_player.source
# Trying to set unknown source.
self.media_player.select_source('no name')
assert 2 == self.blackbird.zones[3].av
self.media_player.update()
assert 'two' == self.media_player.source
def test_turn_on(self):
"""Testing turning on the zone."""
self.blackbird.zones[3].power = False
self.media_player.update()
assert STATE_OFF == self.media_player.state
self.media_player.turn_on()
assert self.blackbird.zones[3].power
self.media_player.update()
assert STATE_ON == self.media_player.state
def test_turn_off(self):
"""Testing turning off the zone."""
self.blackbird.zones[3].power = True
self.media_player.update()
assert STATE_ON == self.media_player.state
self.media_player.turn_off()
assert not self.blackbird.zones[3].power
self.media_player.update()
assert STATE_OFF == self.media_player.state
|
leekchan/django_test
|
refs/heads/master
|
django/contrib/gis/utils/layermapping.py
|
23
|
# LayerMapping -- A Django Model/OGR Layer Mapping Utility
"""
The LayerMapping class provides a way to map the contents of OGR
vector files (e.g. SHP files) to Geographic-enabled Django models.
For more information, please consult the GeoDjango documentation:
http://geodjango.org/docs/layermapping.html
"""
import sys
from decimal import Decimal, InvalidOperation as DecimalInvalidOperation
from django.core.exceptions import ObjectDoesNotExist
from django.db import connections, router
from django.contrib.gis.db.models import GeometryField
from django.contrib.gis.gdal import (CoordTransform, DataSource,
OGRException, OGRGeometry, OGRGeomType, SpatialReference)
from django.contrib.gis.gdal.field import (
OFTDate, OFTDateTime, OFTInteger, OFTReal, OFTString, OFTTime)
from django.db import models, transaction
from django.utils import six
from django.utils.encoding import force_text
# LayerMapping exceptions.
class LayerMapError(Exception):
pass
class InvalidString(LayerMapError):
pass
class InvalidDecimal(LayerMapError):
pass
class InvalidInteger(LayerMapError):
pass
class MissingForeignKey(LayerMapError):
pass
class LayerMapping(object):
"A class that maps OGR Layers to GeoDjango Models."
# Acceptable 'base' types for a multi-geometry type.
MULTI_TYPES = {1: OGRGeomType('MultiPoint'),
2: OGRGeomType('MultiLineString'),
3: OGRGeomType('MultiPolygon'),
OGRGeomType('Point25D').num: OGRGeomType('MultiPoint25D'),
OGRGeomType('LineString25D').num: OGRGeomType('MultiLineString25D'),
OGRGeomType('Polygon25D').num: OGRGeomType('MultiPolygon25D'),
}
# Acceptable Django field types and corresponding acceptable OGR
# counterparts.
FIELD_TYPES = {
models.AutoField: OFTInteger,
models.IntegerField: (OFTInteger, OFTReal, OFTString),
models.FloatField: (OFTInteger, OFTReal),
models.DateField: OFTDate,
models.DateTimeField: OFTDateTime,
models.EmailField: OFTString,
models.TimeField: OFTTime,
models.DecimalField: (OFTInteger, OFTReal),
models.CharField: OFTString,
models.SlugField: OFTString,
models.TextField: OFTString,
models.URLField: OFTString,
models.BigIntegerField: (OFTInteger, OFTReal, OFTString),
models.SmallIntegerField: (OFTInteger, OFTReal, OFTString),
models.PositiveSmallIntegerField: (OFTInteger, OFTReal, OFTString),
}
def __init__(self, model, data, mapping, layer=0,
source_srs=None, encoding='utf-8',
transaction_mode='commit_on_success',
transform=True, unique=None, using=None):
"""
A LayerMapping object is initialized using the given Model (not an instance),
a DataSource (or string path to an OGR-supported data file), and a mapping
dictionary. See the module level docstring for more details and keyword
argument usage.
"""
# Getting the DataSource and the associated Layer.
if isinstance(data, six.string_types):
self.ds = DataSource(data, encoding=encoding)
else:
self.ds = data
self.layer = self.ds[layer]
self.using = using if using is not None else router.db_for_write(model)
self.spatial_backend = connections[self.using].ops
# Setting the mapping & model attributes.
self.mapping = mapping
self.model = model
# Checking the layer -- initialization of the object will fail if
# things don't check out before hand.
self.check_layer()
# Getting the geometry column associated with the model (an
# exception will be raised if there is no geometry column).
if connections[self.using].features.supports_transform:
self.geo_field = self.geometry_field()
else:
transform = False
# Checking the source spatial reference system, and getting
# the coordinate transformation object (unless the `transform`
# keyword is set to False)
if transform:
self.source_srs = self.check_srs(source_srs)
self.transform = self.coord_transform()
else:
self.transform = transform
# Setting the encoding for OFTString fields, if specified.
if encoding:
# Making sure the encoding exists, if not a LookupError
# exception will be thrown.
from codecs import lookup
lookup(encoding)
self.encoding = encoding
else:
self.encoding = None
if unique:
self.check_unique(unique)
transaction_mode = 'autocommit' # Has to be set to autocommit.
self.unique = unique
else:
self.unique = None
# Setting the transaction decorator with the function in the
# transaction modes dictionary.
self.transaction_mode = transaction_mode
if transaction_mode == 'autocommit':
self.transaction_decorator = None
elif transaction_mode == 'commit_on_success':
self.transaction_decorator = transaction.atomic
else:
raise LayerMapError('Unrecognized transaction mode: %s' % transaction_mode)
#### Checking routines used during initialization ####
def check_fid_range(self, fid_range):
"This checks the `fid_range` keyword."
if fid_range:
if isinstance(fid_range, (tuple, list)):
return slice(*fid_range)
elif isinstance(fid_range, slice):
return fid_range
else:
raise TypeError
else:
return None
def check_layer(self):
"""
This checks the Layer metadata, and ensures that it is compatible
with the mapping information and model. Unlike previous revisions,
there is no need to increment through each feature in the Layer.
"""
# The geometry field of the model is set here.
# TODO: Support more than one geometry field / model. However, this
# depends on the GDAL Driver in use.
self.geom_field = False
self.fields = {}
# Getting lists of the field names and the field types available in
# the OGR Layer.
ogr_fields = self.layer.fields
ogr_field_types = self.layer.field_types
# Function for determining if the OGR mapping field is in the Layer.
def check_ogr_fld(ogr_map_fld):
try:
idx = ogr_fields.index(ogr_map_fld)
except ValueError:
raise LayerMapError('Given mapping OGR field "%s" not found in OGR Layer.' % ogr_map_fld)
return idx
# No need to increment through each feature in the model, simply check
# the Layer metadata against what was given in the mapping dictionary.
for field_name, ogr_name in self.mapping.items():
# Ensuring that a corresponding field exists in the model
# for the given field name in the mapping.
try:
model_field = self.model._meta.get_field(field_name)
except models.fields.FieldDoesNotExist:
raise LayerMapError('Given mapping field "%s" not in given Model fields.' % field_name)
# Getting the string name for the Django field class (e.g., 'PointField').
fld_name = model_field.__class__.__name__
if isinstance(model_field, GeometryField):
if self.geom_field:
raise LayerMapError('LayerMapping does not support more than one GeometryField per model.')
# Getting the coordinate dimension of the geometry field.
coord_dim = model_field.dim
try:
if coord_dim == 3:
gtype = OGRGeomType(ogr_name + '25D')
else:
gtype = OGRGeomType(ogr_name)
except OGRException:
raise LayerMapError('Invalid mapping for GeometryField "%s".' % field_name)
# Making sure that the OGR Layer's Geometry is compatible.
ltype = self.layer.geom_type
if not (ltype.name.startswith(gtype.name) or self.make_multi(ltype, model_field)):
raise LayerMapError('Invalid mapping geometry; model has %s%s, '
'layer geometry type is %s.' %
(fld_name, '(dim=3)' if coord_dim == 3 else '', ltype))
# Setting the `geom_field` attribute w/the name of the model field
# that is a Geometry. Also setting the coordinate dimension
# attribute.
self.geom_field = field_name
self.coord_dim = coord_dim
fields_val = model_field
elif isinstance(model_field, models.ForeignKey):
if isinstance(ogr_name, dict):
# Is every given related model mapping field in the Layer?
rel_model = model_field.rel.to
for rel_name, ogr_field in ogr_name.items():
idx = check_ogr_fld(ogr_field)
try:
rel_model._meta.get_field(rel_name)
except models.fields.FieldDoesNotExist:
raise LayerMapError('ForeignKey mapping field "%s" not in %s fields.' %
(rel_name, rel_model.__class__.__name__))
fields_val = rel_model
else:
raise TypeError('ForeignKey mapping must be of dictionary type.')
else:
# Is the model field type supported by LayerMapping?
if model_field.__class__ not in self.FIELD_TYPES:
raise LayerMapError('Django field type "%s" has no OGR mapping (yet).' % fld_name)
# Is the OGR field in the Layer?
idx = check_ogr_fld(ogr_name)
ogr_field = ogr_field_types[idx]
# Can the OGR field type be mapped to the Django field type?
if not issubclass(ogr_field, self.FIELD_TYPES[model_field.__class__]):
raise LayerMapError('OGR field "%s" (of type %s) cannot be mapped to Django %s.' %
(ogr_field, ogr_field.__name__, fld_name))
fields_val = model_field
self.fields[field_name] = fields_val
def check_srs(self, source_srs):
"Checks the compatibility of the given spatial reference object."
if isinstance(source_srs, SpatialReference):
sr = source_srs
elif isinstance(source_srs, self.spatial_backend.spatial_ref_sys()):
sr = source_srs.srs
elif isinstance(source_srs, (int, six.string_types)):
sr = SpatialReference(source_srs)
else:
# Otherwise just pulling the SpatialReference from the layer
sr = self.layer.srs
if not sr:
raise LayerMapError('No source reference system defined.')
else:
return sr
def check_unique(self, unique):
"Checks the `unique` keyword parameter -- may be a sequence or string."
if isinstance(unique, (list, tuple)):
# List of fields to determine uniqueness with
for attr in unique:
if attr not in self.mapping:
raise ValueError
elif isinstance(unique, six.string_types):
# Only a single field passed in.
if unique not in self.mapping:
raise ValueError
else:
raise TypeError('Unique keyword argument must be set with a tuple, list, or string.')
# Keyword argument retrieval routines ####
def feature_kwargs(self, feat):
"""
Given an OGR Feature, this will return a dictionary of keyword arguments
for constructing the mapped model.
"""
# The keyword arguments for model construction.
kwargs = {}
# Incrementing through each model field and OGR field in the
# dictionary mapping.
for field_name, ogr_name in self.mapping.items():
model_field = self.fields[field_name]
if isinstance(model_field, GeometryField):
# Verify OGR geometry.
try:
val = self.verify_geom(feat.geom, model_field)
except OGRException:
raise LayerMapError('Could not retrieve geometry from feature.')
elif isinstance(model_field, models.base.ModelBase):
# The related _model_, not a field was passed in -- indicating
# another mapping for the related Model.
val = self.verify_fk(feat, model_field, ogr_name)
else:
# Otherwise, verify OGR Field type.
val = self.verify_ogr_field(feat[ogr_name], model_field)
# Setting the keyword arguments for the field name with the
# value obtained above.
kwargs[field_name] = val
return kwargs
def unique_kwargs(self, kwargs):
"""
Given the feature keyword arguments (from `feature_kwargs`) this routine
will construct and return the uniqueness keyword arguments -- a subset
of the feature kwargs.
"""
if isinstance(self.unique, six.string_types):
return {self.unique: kwargs[self.unique]}
else:
return dict((fld, kwargs[fld]) for fld in self.unique)
#### Verification routines used in constructing model keyword arguments. ####
def verify_ogr_field(self, ogr_field, model_field):
"""
Verifies if the OGR Field contents are acceptable to the Django
model field. If they are, the verified value is returned,
otherwise the proper exception is raised.
"""
if (isinstance(ogr_field, OFTString) and
isinstance(model_field, (models.CharField, models.TextField))):
if self.encoding:
# The encoding for OGR data sources may be specified here
# (e.g., 'cp437' for Census Bureau boundary files).
val = force_text(ogr_field.value, self.encoding)
else:
val = ogr_field.value
if model_field.max_length and len(val) > model_field.max_length:
raise InvalidString('%s model field maximum string length is %s, given %s characters.' %
(model_field.name, model_field.max_length, len(val)))
elif isinstance(ogr_field, OFTReal) and isinstance(model_field, models.DecimalField):
try:
# Creating an instance of the Decimal value to use.
d = Decimal(str(ogr_field.value))
except DecimalInvalidOperation:
raise InvalidDecimal('Could not construct decimal from: %s' % ogr_field.value)
# Getting the decimal value as a tuple.
dtup = d.as_tuple()
digits = dtup[1]
d_idx = dtup[2] # index where the decimal is
# Maximum amount of precision, or digits to the left of the decimal.
max_prec = model_field.max_digits - model_field.decimal_places
# Getting the digits to the left of the decimal place for the
# given decimal.
if d_idx < 0:
n_prec = len(digits[:d_idx])
else:
n_prec = len(digits) + d_idx
# If we have more than the maximum digits allowed, then throw an
# InvalidDecimal exception.
if n_prec > max_prec:
raise InvalidDecimal(
'A DecimalField with max_digits %d, decimal_places %d must '
'round to an absolute value less than 10^%d.' %
(model_field.max_digits, model_field.decimal_places, max_prec)
)
val = d
elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance(model_field, models.IntegerField):
# Attempt to convert any OFTReal and OFTString value to an OFTInteger.
try:
val = int(ogr_field.value)
except ValueError:
raise InvalidInteger('Could not construct integer from: %s' % ogr_field.value)
else:
val = ogr_field.value
return val
def verify_fk(self, feat, rel_model, rel_mapping):
"""
Given an OGR Feature, the related model and its dictionary mapping,
this routine will retrieve the related model for the ForeignKey
mapping.
"""
# TODO: It is expensive to retrieve a model for every record --
# explore if an efficient mechanism exists for caching related
# ForeignKey models.
# Constructing and verifying the related model keyword arguments.
fk_kwargs = {}
for field_name, ogr_name in rel_mapping.items():
fk_kwargs[field_name] = self.verify_ogr_field(feat[ogr_name], rel_model._meta.get_field(field_name))
# Attempting to retrieve and return the related model.
try:
return rel_model.objects.using(self.using).get(**fk_kwargs)
except ObjectDoesNotExist:
raise MissingForeignKey(
'No ForeignKey %s model found with keyword arguments: %s' %
(rel_model.__name__, fk_kwargs)
)
def verify_geom(self, geom, model_field):
"""
Verifies the geometry -- will construct and return a GeometryCollection
if necessary (for example if the model field is MultiPolygonField while
the mapped shapefile only contains Polygons).
"""
# Downgrade a 3D geom to a 2D one, if necessary.
if self.coord_dim != geom.coord_dim:
geom.coord_dim = self.coord_dim
if self.make_multi(geom.geom_type, model_field):
# Constructing a multi-geometry type to contain the single geometry
multi_type = self.MULTI_TYPES[geom.geom_type.num]
g = OGRGeometry(multi_type)
g.add(geom)
else:
g = geom
# Transforming the geometry with our Coordinate Transformation object,
# but only if the class variable `transform` is set w/a CoordTransform
# object.
if self.transform:
g.transform(self.transform)
# Returning the WKT of the geometry.
return g.wkt
#### Other model methods ####
def coord_transform(self):
"Returns the coordinate transformation object."
SpatialRefSys = self.spatial_backend.spatial_ref_sys()
try:
# Getting the target spatial reference system
target_srs = SpatialRefSys.objects.using(self.using).get(srid=self.geo_field.srid).srs
# Creating the CoordTransform object
return CoordTransform(self.source_srs, target_srs)
except Exception as msg:
new_msg = 'Could not translate between the data source and model geometry: %s' % msg
six.reraise(LayerMapError, LayerMapError(new_msg), sys.exc_info()[2])
def geometry_field(self):
"Returns the GeometryField instance associated with the geographic column."
# Use the `get_field_by_name` on the model's options so that we
# get the correct field instance if there's model inheritance.
opts = self.model._meta
fld, model, direct, m2m = opts.get_field_by_name(self.geom_field)
return fld
def make_multi(self, geom_type, model_field):
"""
Given the OGRGeomType for a geometry and its associated GeometryField,
determine whether the geometry should be turned into a GeometryCollection.
"""
return (geom_type.num in self.MULTI_TYPES and
model_field.__class__.__name__ == 'Multi%s' % geom_type.django)
def save(self, verbose=False, fid_range=False, step=False,
progress=False, silent=False, stream=sys.stdout, strict=False):
"""
Saves the contents from the OGR DataSource Layer into the database
according to the mapping dictionary given at initialization.
Keyword Parameters:
verbose:
If set, information will be printed subsequent to each model save
executed on the database.
fid_range:
May be set with a slice or tuple of (begin, end) feature ID's to map
from the data source. In other words, this keyword enables the user
to selectively import a subset range of features in the geographic
data source.
step:
If set with an integer, transactions will occur at every step
interval. For example, if step=1000, a commit would occur after
the 1,000th feature, the 2,000th feature etc.
progress:
When this keyword is set, status information will be printed giving
the number of features processed and successfully saved. By default,
progress information will pe printed every 1000 features processed,
however, this default may be overridden by setting this keyword with an
integer for the desired interval.
stream:
Status information will be written to this file handle. Defaults to
using `sys.stdout`, but any object with a `write` method is supported.
silent:
By default, non-fatal error notifications are printed to stdout, but
this keyword may be set to disable these notifications.
strict:
Execution of the model mapping will cease upon the first error
encountered. The default behavior is to attempt to continue.
"""
# Getting the default Feature ID range.
default_range = self.check_fid_range(fid_range)
# Setting the progress interval, if requested.
if progress:
if progress is True or not isinstance(progress, int):
progress_interval = 1000
else:
progress_interval = progress
def _save(feat_range=default_range, num_feat=0, num_saved=0):
if feat_range:
layer_iter = self.layer[feat_range]
else:
layer_iter = self.layer
for feat in layer_iter:
num_feat += 1
# Getting the keyword arguments
try:
kwargs = self.feature_kwargs(feat)
except LayerMapError as msg:
# Something borked the validation
if strict:
raise
elif not silent:
stream.write('Ignoring Feature ID %s because: %s\n' % (feat.fid, msg))
else:
# Constructing the model using the keyword args
is_update = False
if self.unique:
# If we want unique models on a particular field, handle the
# geometry appropriately.
try:
# Getting the keyword arguments and retrieving
# the unique model.
u_kwargs = self.unique_kwargs(kwargs)
m = self.model.objects.using(self.using).get(**u_kwargs)
is_update = True
# Getting the geometry (in OGR form), creating
# one from the kwargs WKT, adding in additional
# geometries, and update the attribute with the
# just-updated geometry WKT.
geom = getattr(m, self.geom_field).ogr
new = OGRGeometry(kwargs[self.geom_field])
for g in new:
geom.add(g)
setattr(m, self.geom_field, geom.wkt)
except ObjectDoesNotExist:
# No unique model exists yet, create.
m = self.model(**kwargs)
else:
m = self.model(**kwargs)
try:
# Attempting to save.
m.save(using=self.using)
num_saved += 1
if verbose:
stream.write('%s: %s\n' % ('Updated' if is_update else 'Saved', m))
except Exception as msg:
if strict:
# Bailing out if the `strict` keyword is set.
if not silent:
stream.write(
'Failed to save the feature (id: %s) into the '
'model with the keyword arguments:\n' % feat.fid
)
stream.write('%s\n' % kwargs)
raise
elif not silent:
stream.write('Failed to save %s:\n %s\nContinuing\n' % (kwargs, msg))
# Printing progress information, if requested.
if progress and num_feat % progress_interval == 0:
stream.write('Processed %d features, saved %d ...\n' % (num_feat, num_saved))
# Only used for status output purposes -- incremental saving uses the
# values returned here.
return num_saved, num_feat
if self.transaction_decorator is not None:
_save = self.transaction_decorator(_save)
nfeat = self.layer.num_feat
if step and isinstance(step, int) and step < nfeat:
# Incremental saving is requested at the given interval (step)
if default_range:
raise LayerMapError('The `step` keyword may not be used in conjunction with the `fid_range` keyword.')
beg, num_feat, num_saved = (0, 0, 0)
indices = range(step, nfeat, step)
n_i = len(indices)
for i, end in enumerate(indices):
# Constructing the slice to use for this step; the last slice is
# special (e.g, [100:] instead of [90:100]).
if i + 1 == n_i:
step_slice = slice(beg, None)
else:
step_slice = slice(beg, end)
try:
num_feat, num_saved = _save(step_slice, num_feat, num_saved)
beg = end
except: # Deliberately catch everything
stream.write('%s\nFailed to save slice: %s\n' % ('=-' * 20, step_slice))
raise
else:
# Otherwise, just calling the previously defined _save() function.
_save()
|
csieg/ardupilot
|
refs/heads/master
|
Tools/mavproxy_modules/lib/geodesic_grid.py
|
108
|
# Copyright (C) 2016 Intel Corporation. All rights reserved.
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
'''
This module takes libraries/AP_Math/AP_GeodesicGrid.h reference for defining
the geodesic sections.
'''
import math
from scipy.constants import golden as g
_first_half = (
((-g, 1, 0), (-1, 0,-g), (-g,-1, 0)),
((-1, 0,-g), (-g,-1, 0), ( 0,-g,-1)),
((-g,-1, 0), ( 0,-g,-1), ( 0,-g, 1)),
((-1, 0,-g), ( 0,-g,-1), ( 1, 0,-g)),
(( 0,-g,-1), ( 0,-g, 1), ( g,-1, 0)),
(( 0,-g,-1), ( 1, 0,-g), ( g,-1, 0)),
(( g,-1, 0), ( 1, 0,-g), ( g, 1, 0)),
(( 1, 0,-g), ( g, 1, 0), ( 0, g,-1)),
(( 1, 0,-g), ( 0, g,-1), (-1, 0,-g)),
(( 0, g,-1), (-g, 1, 0), (-1, 0,-g)),
)
_second_half = tuple(
((-xa, -ya, -za), (-xb, -yb, -zb), (-xc, -yc, -zc))
for (xa, ya, za), (xb, yb, zb), (xc, yc, zc) in _first_half
)
triangles = _first_half + _second_half
def _midpoint_projection(a, b):
xa, ya, za = a
xb, yb, zb = b
s = _midpoint_projection.scale
return s * (xa + xb), s * (ya + yb), s * (za + zb)
radius = math.sqrt(1 + g**2)
# radius / (length of two vertices of an icosahedron triangle)
_midpoint_projection.scale = radius / (2 * g)
sections_triangles = ()
for a, b, c in triangles:
ma = _midpoint_projection(a, b)
mb = _midpoint_projection(b, c)
mc = _midpoint_projection(c, a)
sections_triangles += (
(ma, mb, mc),
( a, ma, mc),
(ma, b, mb),
(mc, mb, c),
)
|
tixxdz/ahaggar
|
refs/heads/master
|
scripts/pahaggar/gcc.py
|
1
|
"""GCC Utils."""
#
# Copyright (C) 2012-2013 Djalal Harouni <tixxdz@opendz.org>
# Copyright (C) 2012-2013 LIRE Laboratory.
# University Constantine 2, Algeria.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# of the License.
#
from pahaggar import gcc_tree
from pahaggar.gcc_tree import TREE
gtree = gcc_tree
OP_SYMBOL = {
gtree.MODIFY_EXPR: "=",
gtree.TRUTH_OR_EXPR: "||",
gtree.TRUTH_ORIF_EXPR: "||",
gtree.TRUTH_AND_EXPR: "&&",
gtree.TRUTH_ANDIF_EXPR: "&&",
gtree.BIT_IOR_EXPR: "|",
gtree.TRUTH_XOR_EXPR: "^",
gtree.BIT_XOR_EXPR: "^",
gtree.ADDR_EXPR: "&",
gtree.BIT_AND_EXPR: "&",
gtree.ORDERED_EXPR: "ord",
gtree.UNORDERED_EXPR: "unord",
gtree.EQ_EXPR: "==",
gtree.UNEQ_EXPR: "u==",
gtree.NE_EXPR: "!=",
gtree.LT_EXPR: "<",
gtree.UNLT_EXPR: "u<",
gtree.LE_EXPR: "<=",
gtree.UNLE_EXPR: "u<=",
gtree.GT_EXPR: ">",
gtree.UNGT_EXPR: "u>",
gtree.GE_EXPR: ">=",
gtree.UNGE_EXPR: "u>=",
gtree.LTGT_EXPR: "<>",
gtree.LSHIFT_EXPR: "<<",
gtree.RSHIFT_EXPR: ">>",
gtree.LROTATE_EXPR: "r<<",
gtree.RROTATE_EXPR: "r>>",
gtree.VEC_LSHIFT_EXPR: "v<<",
gtree.VEC_RSHIFT_EXPR: "v>>",
gtree.WIDEN_LSHIFT_EXPR: "w<<",
gtree.POINTER_PLUS_EXPR: "+",
gtree.PLUS_EXPR: "+",
gtree.REDUC_PLUS_EXPR: "r+",
gtree.WIDEN_SUM_EXPR: "w+",
gtree.WIDEN_MULT_EXPR: "w*",
gtree.MULT_HIGHPART_EXPR: "h*",
gtree.NEGATE_EXPR: "-",
gtree.MINUS_EXPR: "-",
gtree.BIT_NOT_EXPR: "~",
gtree.TRUTH_NOT_EXPR: "!",
gtree.MULT_EXPR: "*",
gtree.INDIRECT_REF: "*",
gtree.TRUNC_DIV_EXPR: "/",
gtree.RDIV_EXPR: "/",
gtree.CEIL_DIV_EXPR: "/[cl]",
gtree.FLOOR_DIV_EXPR: "/[fl]",
gtree.ROUND_DIV_EXPR: "/[rd]",
gtree.EXACT_DIV_EXPR: "/[ex]",
gtree.TRUNC_MOD_EXPR: "%",
gtree.CEIL_MOD_EXPR: "%[cl]",
gtree.FLOOR_MOD_EXPR: "%[fl]",
gtree.ROUND_MOD_EXPR: "%[rd]",
gtree.PREDECREMENT_EXPR: " --",
gtree.PREINCREMENT_EXPR: " ++",
gtree.POSTDECREMENT_EXPR: "-- ",
gtree.POSTINCREMENT_EXPR: "++ ",
gtree.MAX_EXPR: "max",
gtree.MIN_EXPR: "min",
}
def op_symbol(code):
if is_code_valid_gcc_tree(code):
idx = TREE[code]
if idx in OP_SYMBOL:
return OP_SYMBOL[idx]
return ""
def get_op_operands(operations):
results = []
return results
def is_error_mark(code):
return TREE[code] == gtree.ERROR_MARK
def is_identifier_node(code):
return TREE[code] == gtree.IDENTIFIER_NODE
def is_tree_list(code):
return TREE[code] == gtree.TREE_LIST
def is_tree_binfo(code):
return TREE[code] == gtree.TREE_BINFO
def is_tree_vec(code):
return TREE[code] == gtree.TREE_VEC
def is_block(code):
return TREE[code] == gtree.BLOCK
def is_void_type(code):
return TREE[code] == gtree.VOID_TYPE
def is_integer_type(code):
return TREE[code] == gtree.INTEGER_TYPE
def is_real_type(code):
return TREE[code] == gtree.REAL_TYPE
def is_fixed_point_type(code):
return TREE[code] == gtree.FIXED_POINT_TYPE
def is_complex_type(code):
return TREE[code] == gtree.COMPLEX_TYPE
def is_vector_type(code):
return TREE[code] == gtree.VECTOR_TYPE
def is_enumeral_type(code):
return TREE[code] == gtree.ENUMERAL_TYPE
def is_boolean_type(code):
return TREE[code] == gtree.BOOLEAN_TYPE
def is_pointer_type(code):
return TREE[code] == gtree.POINTER_TYPE
def is_reference_type(code):
return TREE[code] == gtree.REFERENCE_TYPE
def is_offset_type(code):
return TREE[code] == gtree.OFFSET_TYPE
def is_mem_ref(code):
return TREE[code] == gtree.MEM_REF
def is_target_mem_ref(code):
return TREE[code] == gtree.TARGET_MEM_REF
def is_array_type(code):
return TREE[code] == gtree.ARRAY_TYPE
def is_record_type(code):
return TREE[code] == gtree.RECORD_TYPE
def is_union_type(code):
return TREE[code] == gtree.UNION_TYPE
def is_qual_union_type(code):
return TREE[code] == gtree.QUAL_UNION_TYPE
def is_lang_type(code):
return TREE[code] == gtree.LANG_TYPE
def is_statement_list(code):
return TREE[code] == gtree.STATEMENT_LIST
def is_integer_cst(code):
return TREE[code] == gtree.INTEGER_CST
def is_real_cst(code):
return TREE[code] == gtree.REAL_CST
def is_fixed_cst(code):
return TREE[code] == gtree.FIXED_CST
def is_complex_cst(code):
return TREE[code] == gtree.COMPLEX_CST
def is_string_cst(code):
return TREE[code] == gtree.STRING_CST
def is_vector_cst(code):
return TREE[code] == gtree.VECTOR_CST
def is_function_type(code):
return TREE[code] == gtree.FUNCTION_TYPE
def is_methode_type(code):
return TREE[code] == gtree.METHODE_TYPE
def is_function_decl(code):
return TREE[code] == gtree.FUNCTION_DECL
def is_const_decl(code):
return TREE[code] == gtree.CONST_DECL
def is_label_decl(code):
return TREE[code] == gtree.LABEL_DECL
def is_result_decl(code):
return TREE[code] == gtree.RESULT_DECL
def is_type_decl(code):
return TREE[code] == gtree.TYPE_DECL
def is_var_decl(code):
return TREE[code] == gtree.VAR_DECL
def is_parm_decl(code):
return TREE[code] == gtree.PARM_DECL
def is_debug_expr_decl(code):
return TREE[code] == gtree.DEBUG_EXPR_DECL
def is_namespace_decl(code):
return TREE[code] == gtree.NAMESPACE_DECL
def is_field_decl(code):
return TREE[code] == gtree.FIELD_DECL
def is_component_ref(code):
return TREE[code] == gtree.COMPONENT_REF
def is_bit_field_ref(code):
return TREE[code] == gtree.BIT_FIELD_REF
def is_array_ref(code):
return TREE[code] == gtree.ARRAY_REF
def is_array_range_ref(code):
return TREE[code] == gtree.ARRAY_RANGE_REF
# TODO: continue
def is_aggr_init_expr(code):
return TREE[code] == gtree.AGGR_INIT_EXPR
def is_call_expr(code):
return TREE[code] == gtree.CALL_EXPR
def is_code_valid_gcc_tree(code):
return code in TREE
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
bovesan/mistika-hyperspeed
|
refs/heads/master
|
Afterscripts/Rewrap-to-mov/rewrap-to-mov.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import hyperspeed.afterscript
title = 'Rewrap to mov'
cmd = '-vcodec copy -acodec copy'
# Path relative to primary output folder of render:
# default_output = '[project]_[render_name].[codec].mov'
# Absolute path:
default_output = '/Volumes/SAN3/Masters/[project]/[project]_[rendername]/[project]_[rendername].[codec].mov'
hyperspeed.afterscript.AfterscriptFfmpeg(__file__, cmd, default_output, title)
hyperspeed.afterscript.gtk.main()
|
QuLogic/meson
|
refs/heads/master
|
mesonbuild/mesonlib/__init__.py
|
2
|
# SPDX-license-identifier: Apache-2.0
# Copyright 2012-2021 The Meson development team
# Copyright © 2021 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions and classes."""
import os
from .universal import *
# Here we import either the posix implementations, the windows implementations,
# or a generic no-op implementation
if os.name == 'posix':
from .posix import *
elif os.name == 'nt':
from .win32 import *
else:
from .platform import *
|
repotvsupertuga/tvsupertuga.repository
|
refs/heads/master
|
script.module.schism.common/lib/requests/packages/chardet/utf8prober.py
|
2918
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8SMModel
ONE_CHAR_PROB = 0.5
class UTF8Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(UTF8SMModel)
self.reset()
def reset(self):
CharSetProber.reset(self)
self._mCodingSM.reset()
self._mNumOfMBChar = 0
def get_charset_name(self):
return "utf-8"
def feed(self, aBuf):
for c in aBuf:
codingState = self._mCodingSM.next_state(c)
if codingState == constants.eError:
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
if self._mCodingSM.get_current_charlen() >= 2:
self._mNumOfMBChar += 1
if self.get_state() == constants.eDetecting:
if self.get_confidence() > constants.SHORTCUT_THRESHOLD:
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
unlike = 0.99
if self._mNumOfMBChar < 6:
for i in range(0, self._mNumOfMBChar):
unlike = unlike * ONE_CHAR_PROB
return 1.0 - unlike
else:
return unlike
|
mengxn/tensorflow
|
refs/heads/master
|
tensorflow/python/ops/check_ops.py
|
9
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Asserts and Boolean Checks.
See the @{$python/check_ops} guide.
@@assert_negative
@@assert_positive
@@assert_non_negative
@@assert_non_positive
@@assert_equal
@@assert_none_equal
@@assert_less
@@assert_less_equal
@@assert_greater
@@assert_greater_equal
@@assert_rank
@@assert_rank_at_least
@@assert_type
@@assert_integer
@@assert_proper_iterable
@@assert_same_float_dtype
@@assert_scalar
@@is_non_decreasing
@@is_numeric_tensor
@@is_strictly_increasing
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import compat
NUMERIC_TYPES = frozenset(
[dtypes.float32, dtypes.float64, dtypes.int8, dtypes.int16, dtypes.int32,
dtypes.int64, dtypes.uint8, dtypes.qint8, dtypes.qint32, dtypes.quint8,
dtypes.complex64])
__all__ = [
'assert_negative',
'assert_positive',
'assert_proper_iterable',
'assert_non_negative',
'assert_non_positive',
'assert_equal',
'assert_none_equal',
'assert_integer',
'assert_less',
'assert_less_equal',
'assert_greater',
'assert_greater_equal',
'assert_rank',
'assert_rank_at_least',
'assert_rank_in',
'assert_same_float_dtype',
'assert_scalar',
'assert_type',
'is_non_decreasing',
'is_numeric_tensor',
'is_strictly_increasing',
]
def assert_proper_iterable(values):
"""Static assert that values is a "proper" iterable.
`Ops` that expect iterables of `Tensor` can call this to validate input.
Useful since `Tensor`, `ndarray`, byte/text type are all iterables themselves.
Args:
values: Object to be checked.
Raises:
TypeError: If `values` is not iterable or is one of
`Tensor`, `SparseTensor`, `np.array`, `tf.compat.bytes_or_text_types`.
"""
unintentional_iterables = (
(ops.Tensor, sparse_tensor.SparseTensor, np.ndarray)
+ compat.bytes_or_text_types
)
if isinstance(values, unintentional_iterables):
raise TypeError(
'Expected argument "values" to be a "proper" iterable. Found: %s' %
type(values))
if not hasattr(values, '__iter__'):
raise TypeError(
'Expected argument "values" to be iterable. Found: %s' % type(values))
def assert_negative(x, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x < 0` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_negative(x)]):
output = tf.reduce_sum(x)
```
Negative means, for every element `x[i]` of `x`, we have `x[i] < 0`.
If `x` is empty this is trivially satisfied.
Args:
x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_negative".
Returns:
Op raising `InvalidArgumentError` unless `x` is all negative.
"""
message = message or ''
with ops.name_scope(name, 'assert_negative', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
data = [
message, 'Condition x < 0 did not hold element-wise: x = ', x.name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less(x, zero, data=data, summarize=summarize)
def assert_positive(x, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x > 0` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_positive(x)]):
output = tf.reduce_sum(x)
```
Positive means, for every element `x[i]` of `x`, we have `x[i] > 0`.
If `x` is empty this is trivially satisfied.
Args:
x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_positive".
Returns:
Op raising `InvalidArgumentError` unless `x` is all positive.
"""
message = message or ''
with ops.name_scope(name, 'assert_positive', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
data = [
message, 'Condition x > 0 did not hold element-wise: x = ', x.name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less(zero, x, data=data, summarize=summarize)
def assert_non_negative(x, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x >= 0` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_non_negative(x)]):
output = tf.reduce_sum(x)
```
Non-negative means, for every element `x[i]` of `x`, we have `x[i] >= 0`.
If `x` is empty this is trivially satisfied.
Args:
x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_non_negative".
Returns:
Op raising `InvalidArgumentError` unless `x` is all non-negative.
"""
message = message or ''
with ops.name_scope(name, 'assert_non_negative', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
data = [
message,
'Condition x >= 0 did not hold element-wise: x = ', x.name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less_equal(zero, x, data=data, summarize=summarize)
def assert_non_positive(x, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x <= 0` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_non_positive(x)]):
output = tf.reduce_sum(x)
```
Non-positive means, for every element `x[i]` of `x`, we have `x[i] <= 0`.
If `x` is empty this is trivially satisfied.
Args:
x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_non_positive".
Returns:
Op raising `InvalidArgumentError` unless `x` is all non-positive.
"""
message = message or ''
with ops.name_scope(name, 'assert_non_positive', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
data = [
message,
'Condition x <= 0 did not hold element-wise: x = ', x.name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less_equal(x, zero, data=data, summarize=summarize)
def assert_equal(x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x == y` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_equal(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] == y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_equal".
Returns:
Op that raises `InvalidArgumentError` if `x == y` is False.
"""
message = message or ''
with ops.name_scope(name, 'assert_equal', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if data is None:
data = [
message,
'Condition x == y did not hold element-wise: x = ', x.name, x, 'y = ',
y.name, y
]
condition = math_ops.reduce_all(math_ops.equal(x, y))
return control_flow_ops.Assert(condition, data, summarize=summarize)
def assert_none_equal(
x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x != y` holds for all elements.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_none_equal(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] != y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_none_equal".
Returns:
Op that raises `InvalidArgumentError` if `x != y` is ever False.
"""
message = message or ''
with ops.name_scope(name, 'assert_none_equal', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if data is None:
data = [
message,
'Condition x != y did not hold for every single element: x = ',
x.name, x,
'y = ', y.name, y
]
condition = math_ops.reduce_all(math_ops.not_equal(x, y))
return control_flow_ops.Assert(condition, data, summarize=summarize)
def assert_less(x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x < y` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_less(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] < y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_less".
Returns:
Op that raises `InvalidArgumentError` if `x < y` is False.
"""
message = message or ''
with ops.name_scope(name, 'assert_less', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if data is None:
data = [
message,
'Condition x < y did not hold element-wise: x = ', x.name, x, 'y = ',
y.name, y
]
condition = math_ops.reduce_all(math_ops.less(x, y))
return control_flow_ops.Assert(condition, data, summarize=summarize)
def assert_less_equal(x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x <= y` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_less_equal(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] <= y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_less_equal"
Returns:
Op that raises `InvalidArgumentError` if `x <= y` is False.
"""
message = message or ''
with ops.name_scope(name, 'assert_less_equal', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if data is None:
data = [
message,
'Condition x <= y did not hold element-wise: x = ', x.name, x, 'y = ',
y.name, y
]
condition = math_ops.reduce_all(math_ops.less_equal(x, y))
return control_flow_ops.Assert(condition, data, summarize=summarize)
def assert_greater(x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x > y` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_greater(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] > y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_greater".
Returns:
Op that raises `InvalidArgumentError` if `x > y` is False.
"""
message = message or ''
with ops.name_scope(name, 'assert_greater', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if data is None:
data = [
message,
'Condition x > y did not hold element-wise: x = ', x.name, x, 'y = ',
y.name, y
]
condition = math_ops.reduce_all(math_ops.greater(x, y))
return control_flow_ops.Assert(condition, data, summarize=summarize)
def assert_greater_equal(x, y, data=None, summarize=None, message=None,
name=None):
"""Assert the condition `x >= y` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_greater_equal(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] >= y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to
"assert_greater_equal"
Returns:
Op that raises `InvalidArgumentError` if `x >= y` is False.
"""
message = message or ''
with ops.name_scope(name, 'assert_greater_equal', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if data is None:
data = [
message,
'Condition x >= y did not hold element-wise: x = ', x.name, x, 'y = ',
y.name, y
]
condition = math_ops.reduce_all(math_ops.greater_equal(x, y))
return control_flow_ops.Assert(condition, data, summarize=summarize)
def _assert_rank_condition(
x, rank, static_condition, dynamic_condition, data, summarize):
"""Assert `x` has a rank that satisfies a given condition.
Args:
x: Numeric `Tensor`.
rank: Scalar `Tensor`.
static_condition: A python function that takes `[actual_rank, given_rank]`
and returns `True` if the condition is satisfied, `False` otherwise.
dynamic_condition: An `op` that takes [actual_rank, given_rank]
and return `True` if the condition is satisfied, `False` otherwise.
data: The tensors to print out if the condition is false. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
Returns:
Op raising `InvalidArgumentError` if `x` fails dynamic_condition.
Raises:
ValueError: If static checks determine `x` fails static_condition.
"""
assert_type(rank, dtypes.int32)
# Attempt to statically defined rank.
rank_static = tensor_util.constant_value(rank)
if rank_static is not None:
if rank_static.ndim != 0:
raise ValueError('Rank must be a scalar.')
x_rank_static = x.get_shape().ndims
if x_rank_static is not None:
if not static_condition(x_rank_static, rank_static):
raise ValueError(
'Static rank condition failed', x_rank_static, rank_static)
return control_flow_ops.no_op(name='static_checks_determined_all_ok')
condition = dynamic_condition(array_ops.rank(x), rank)
# Add the condition that `rank` must have rank zero. Prevents the bug where
# someone does assert_rank(x, [n]), rather than assert_rank(x, n).
if rank_static is None:
this_data = ['Rank must be a scalar. Received rank: ', rank]
rank_check = assert_rank(rank, 0, data=this_data)
condition = control_flow_ops.with_dependencies([rank_check], condition)
return control_flow_ops.Assert(condition, data, summarize=summarize)
def assert_rank(x, rank, data=None, summarize=None, message=None, name=None):
"""Assert `x` has rank equal to `rank`.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_rank(x, 2)]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
rank: Scalar integer `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_rank".
Returns:
Op raising `InvalidArgumentError` unless `x` has specified rank.
If static checks determine `x` has correct rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has wrong rank.
"""
with ops.name_scope(name, 'assert_rank', (x, rank) + tuple(data or [])):
x = ops.convert_to_tensor(x, name='x')
rank = ops.convert_to_tensor(rank, name='rank')
message = message or ''
static_condition = lambda actual_rank, given_rank: actual_rank == given_rank
dynamic_condition = math_ops.equal
if data is None:
data = [
message,
'Tensor %s must have rank' % x.name, rank, 'Received shape: ',
array_ops.shape(x)
]
try:
assert_op = _assert_rank_condition(x, rank, static_condition,
dynamic_condition, data, summarize)
except ValueError as e:
if e.args[0] == 'Static rank condition failed':
raise ValueError(
'%s. Tensor %s must have rank %d. Received rank %d, shape %s' %
(message, x.name, e.args[2], e.args[1], x.get_shape()))
else:
raise
return assert_op
def assert_rank_at_least(
x, rank, data=None, summarize=None, message=None, name=None):
"""Assert `x` has rank equal to `rank` or higher.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_rank_at_least(x, 2)]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
rank: Scalar `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_rank_at_least".
Returns:
Op raising `InvalidArgumentError` unless `x` has specified rank or higher.
If static checks determine `x` has correct rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has wrong rank.
"""
with ops.name_scope(
name, 'assert_rank_at_least', (x, rank) + tuple(data or [])):
x = ops.convert_to_tensor(x, name='x')
rank = ops.convert_to_tensor(rank, name='rank')
message = message or ''
static_condition = lambda actual_rank, given_rank: actual_rank >= given_rank
dynamic_condition = math_ops.greater_equal
if data is None:
data = [
message,
'Tensor %s must have rank at least' % x.name, rank,
'Received shape: ', array_ops.shape(x)
]
try:
assert_op = _assert_rank_condition(x, rank, static_condition,
dynamic_condition, data, summarize)
except ValueError as e:
if e.args[0] == 'Static rank condition failed':
raise ValueError(
'%s. Tensor %s must have rank at least %d. Received rank %d, '
'shape %s' % (message, x.name, e.args[2], e.args[1], x.get_shape()))
else:
raise
return assert_op
def _static_rank_in(actual_rank, given_ranks):
return actual_rank in given_ranks
def _dynamic_rank_in(actual_rank, given_ranks):
if len(given_ranks) < 1:
return ops.convert_to_tensor(False)
result = math_ops.equal(given_ranks[0], actual_rank)
for given_rank in given_ranks[1:]:
result = math_ops.logical_or(
result, math_ops.equal(given_rank, actual_rank))
return result
def _assert_ranks_condition(
x, ranks, static_condition, dynamic_condition, data, summarize):
"""Assert `x` has a rank that satisfies a given condition.
Args:
x: Numeric `Tensor`.
ranks: Scalar `Tensor`.
static_condition: A python function that takes
`[actual_rank, given_ranks]` and returns `True` if the condition is
satisfied, `False` otherwise.
dynamic_condition: An `op` that takes [actual_rank, given_ranks]
and return `True` if the condition is satisfied, `False` otherwise.
data: The tensors to print out if the condition is false. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
Returns:
Op raising `InvalidArgumentError` if `x` fails dynamic_condition.
Raises:
ValueError: If static checks determine `x` fails static_condition.
"""
for rank in ranks:
assert_type(rank, dtypes.int32)
# Attempt to statically defined rank.
ranks_static = tuple([tensor_util.constant_value(rank) for rank in ranks])
if None not in ranks_static:
for rank_static in ranks_static:
if rank_static.ndim != 0:
raise ValueError('Rank must be a scalar.')
x_rank_static = x.get_shape().ndims
if x_rank_static is not None:
if not static_condition(x_rank_static, ranks_static):
raise ValueError(
'Static rank condition failed', x_rank_static, ranks_static)
return control_flow_ops.no_op(name='static_checks_determined_all_ok')
condition = dynamic_condition(array_ops.rank(x), ranks)
# Add the condition that `rank` must have rank zero. Prevents the bug where
# someone does assert_rank(x, [n]), rather than assert_rank(x, n).
for rank, rank_static in zip(ranks, ranks_static):
if rank_static is None:
this_data = ['Rank must be a scalar. Received rank: ', rank]
rank_check = assert_rank(rank, 0, data=this_data)
condition = control_flow_ops.with_dependencies([rank_check], condition)
return control_flow_ops.Assert(condition, data, summarize=summarize)
def assert_rank_in(
x, ranks, data=None, summarize=None, message=None, name=None):
"""Assert `x` has rank in `ranks`.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_rank_in(x, (2, 4))]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
ranks: Iterable of scalar `Tensor` objects.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_rank_in".
Returns:
Op raising `InvalidArgumentError` unless rank of `x` is in `ranks`.
If static checks determine `x` has matching rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has mismatched rank.
"""
with ops.name_scope(
name, 'assert_rank_in', (x,) + tuple(ranks) + tuple(data or [])):
x = ops.convert_to_tensor(x, name='x')
ranks = tuple([ops.convert_to_tensor(rank, name='rank') for rank in ranks])
message = message or ''
if data is None:
data = [
message, 'Tensor %s must have rank in' % x.name
] + list(ranks) + [
'Received shape: ', array_ops.shape(x)
]
try:
assert_op = _assert_ranks_condition(x, ranks, _static_rank_in,
_dynamic_rank_in, data, summarize)
except ValueError as e:
if e.args[0] == 'Static rank condition failed':
raise ValueError(
'%s. Tensor %s must have rank in %s. Received rank %d, '
'shape %s' % (message, x.name, e.args[2], e.args[1], x.get_shape()))
else:
raise
return assert_op
def assert_integer(x, message=None, name=None):
"""Assert that `x` is of integer dtype.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_integer(x)]):
output = tf.reduce_sum(x)
```
Args:
x: `Tensor` whose basetype is integer and is not quantized.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_integer".
Raises:
TypeError: If `x.dtype` is anything other than non-quantized integer.
Returns:
A `no_op` that does nothing. Type can be determined statically.
"""
message = message or ''
with ops.name_scope(name, 'assert_integer', [x]):
x = ops.convert_to_tensor(x, name='x')
if not x.dtype.is_integer:
err_msg = (
'%s Expected "x" to be integer type. Found: %s of dtype %s'
% (message, x.name, x.dtype))
raise TypeError(err_msg)
return control_flow_ops.no_op('statically_determined_was_integer')
def assert_type(tensor, tf_type, message=None, name=None):
"""Statically asserts that the given `Tensor` is of the specified type.
Args:
tensor: A tensorflow `Tensor`.
tf_type: A tensorflow type (`dtypes.float32`, `tf.int64`, `dtypes.bool`,
etc).
message: A string to prefix to the default message.
name: A name to give this `Op`. Defaults to "assert_type"
Raises:
TypeError: If the tensors data type doesn't match `tf_type`.
Returns:
A `no_op` that does nothing. Type can be determined statically.
"""
message = message or ''
with ops.name_scope(name, 'assert_type', [tensor]):
tensor = ops.convert_to_tensor(tensor, name='tensor')
if tensor.dtype != tf_type:
raise TypeError(
'%s %s must be of type %s' % (message, tensor.op.name, tf_type))
return control_flow_ops.no_op('statically_determined_correct_type')
# pylint: disable=line-too-long
def _get_diff_for_monotonic_comparison(x):
"""Gets the difference x[1:] - x[:-1]."""
x = array_ops.reshape(x, [-1])
if not is_numeric_tensor(x):
raise TypeError('Expected x to be numeric, instead found: %s' % x)
# If x has less than 2 elements, there is nothing to compare. So return [].
is_shorter_than_two = math_ops.less(array_ops.size(x), 2)
short_result = lambda: ops.convert_to_tensor([], dtype=x.dtype)
# With 2 or more elements, return x[1:] - x[:-1]
s_len = array_ops.shape(x) - 1
diff = lambda: array_ops.strided_slice(x, [1], [1] + s_len)- array_ops.strided_slice(x, [0], s_len)
return control_flow_ops.cond(is_shorter_than_two, short_result, diff)
def is_numeric_tensor(tensor):
return isinstance(tensor, ops.Tensor) and tensor.dtype in NUMERIC_TYPES
def is_non_decreasing(x, name=None):
"""Returns `True` if `x` is non-decreasing.
Elements of `x` are compared in row-major order. The tensor `[x[0],...]`
is non-decreasing if for every adjacent pair we have `x[i] <= x[i+1]`.
If `x` has less than two elements, it is trivially non-decreasing.
See also: `is_strictly_increasing`
Args:
x: Numeric `Tensor`.
name: A name for this operation (optional). Defaults to "is_non_decreasing"
Returns:
Boolean `Tensor`, equal to `True` iff `x` is non-decreasing.
Raises:
TypeError: if `x` is not a numeric tensor.
"""
with ops.name_scope(name, 'is_non_decreasing', [x]):
diff = _get_diff_for_monotonic_comparison(x)
# When len(x) = 1, diff = [], less_equal = [], and reduce_all([]) = True.
zero = ops.convert_to_tensor(0, dtype=diff.dtype)
return math_ops.reduce_all(math_ops.less_equal(zero, diff))
def is_strictly_increasing(x, name=None):
"""Returns `True` if `x` is strictly increasing.
Elements of `x` are compared in row-major order. The tensor `[x[0],...]`
is strictly increasing if for every adjacent pair we have `x[i] < x[i+1]`.
If `x` has less than two elements, it is trivially strictly increasing.
See also: `is_non_decreasing`
Args:
x: Numeric `Tensor`.
name: A name for this operation (optional).
Defaults to "is_strictly_increasing"
Returns:
Boolean `Tensor`, equal to `True` iff `x` is strictly increasing.
Raises:
TypeError: if `x` is not a numeric tensor.
"""
with ops.name_scope(name, 'is_strictly_increasing', [x]):
diff = _get_diff_for_monotonic_comparison(x)
# When len(x) = 1, diff = [], less = [], and reduce_all([]) = True.
zero = ops.convert_to_tensor(0, dtype=diff.dtype)
return math_ops.reduce_all(math_ops.less(zero, diff))
def _assert_same_base_type(items, expected_type=None):
r"""Asserts all items are of the same base type.
Args:
items: List of graph items (e.g., `Variable`, `Tensor`, `SparseTensor`,
`Operation`, or `IndexedSlices`). Can include `None` elements, which
will be ignored.
expected_type: Expected type. If not specified, assert all items are
of the same base type.
Returns:
Validated type, or none if neither expected_type nor items provided.
Raises:
ValueError: If any types do not match.
"""
original_item_str = None
for item in items:
if item is not None:
item_type = item.dtype.base_dtype
if not expected_type:
expected_type = item_type
original_item_str = item.name if hasattr(item, 'name') else str(item)
elif expected_type != item_type:
raise ValueError('%s, type=%s, must be of the same type (%s)%s.' % (
item.name if hasattr(item, 'name') else str(item),
item_type, expected_type,
(' as %s' % original_item_str) if original_item_str else ''))
return expected_type
def assert_same_float_dtype(tensors=None, dtype=None):
"""Validate and return float type based on `tensors` and `dtype`.
For ops such as matrix multiplication, inputs and weights must be of the
same float type. This function validates that all `tensors` are the same type,
validates that type is `dtype` (if supplied), and returns the type. Type must
be a floating point type. If neither `tensors` nor `dtype` is supplied,
the function will return `dtypes.float32`.
Args:
tensors: Tensors of input values. Can include `None` elements, which will be
ignored.
dtype: Expected type.
Returns:
Validated type.
Raises:
ValueError: if neither `tensors` nor `dtype` is supplied, or result is not
float, or the common type of the inputs is not a floating point type.
"""
if tensors:
dtype = _assert_same_base_type(tensors, dtype)
if not dtype:
dtype = dtypes.float32
elif not dtype.is_floating:
raise ValueError('Expected floating point type, got %s.' % dtype)
return dtype
def assert_scalar(tensor, name=None):
with ops.name_scope(name, 'assert_scalar', [tensor]) as name_scope:
tensor = ops.convert_to_tensor(tensor, name=name_scope)
shape = tensor.get_shape()
if shape.ndims != 0:
raise ValueError('Expected scalar shape for %s, saw shape: %s.'
% (tensor.name, shape))
return tensor
|
mblondel/scikit-learn
|
refs/heads/master
|
sklearn/svm/tests/test_bounds.py
|
280
|
import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
|
blablack/beatslash-lv2
|
refs/heads/master
|
waflib/Tools/compiler_fc.py
|
56
|
#!/usr/bin/env python
# encoding: utf-8
import re
from waflib import Utils, Logs
from waflib.Tools import fc
fc_compiler = {
'win32' : ['gfortran','ifort'],
'darwin' : ['gfortran', 'g95', 'ifort'],
'linux' : ['gfortran', 'g95', 'ifort'],
'java' : ['gfortran', 'g95', 'ifort'],
'default': ['gfortran'],
'aix' : ['gfortran']
}
"""
Dict mapping the platform names to lists of names of Fortran compilers to try, in order of preference::
from waflib.Tools.compiler_c import c_compiler
c_compiler['linux'] = ['gfortran', 'g95', 'ifort']
"""
def default_compilers():
build_platform = Utils.unversioned_sys_platform()
possible_compiler_list = fc_compiler.get(build_platform, fc_compiler['default'])
return ' '.join(possible_compiler_list)
def configure(conf):
"""
Detects a suitable Fortran compiler
:raises: :py:class:`waflib.Errors.ConfigurationError` when no suitable compiler is found
"""
try:
test_for_compiler = conf.options.check_fortran_compiler or default_compilers()
except AttributeError:
conf.fatal("Add options(opt): opt.load('compiler_fc')")
for compiler in re.split('[ ,]+', test_for_compiler):
conf.env.stash()
conf.start_msg('Checking for %r (Fortran compiler)' % compiler)
try:
conf.load(compiler)
except conf.errors.ConfigurationError as e:
conf.env.revert()
conf.end_msg(False)
Logs.debug('compiler_fortran: %r', e)
else:
if conf.env.FC:
conf.end_msg(conf.env.get_flat('FC'))
conf.env.COMPILER_FORTRAN = compiler
conf.env.commit()
break
conf.env.revert()
conf.end_msg(False)
else:
conf.fatal('could not configure a Fortran compiler!')
def options(opt):
"""
This is how to provide compiler preferences on the command-line::
$ waf configure --check-fortran-compiler=ifort
"""
test_for_compiler = default_compilers()
opt.load_special_tools('fc_*.py')
fortran_compiler_opts = opt.add_option_group('Configuration options')
fortran_compiler_opts.add_option('--check-fortran-compiler', default=None,
help='list of Fortran compiler to try [%s]' % test_for_compiler,
dest="check_fortran_compiler")
for x in test_for_compiler.split():
opt.load('%s' % x)
|
ShassAro/ShassAro
|
refs/heads/master
|
DockerAdmin/dockerVirtualEnv/lib/python2.7/site-packages/django/core/mail/backends/dummy.py
|
835
|
"""
Dummy email backend that does nothing.
"""
from django.core.mail.backends.base import BaseEmailBackend
class EmailBackend(BaseEmailBackend):
def send_messages(self, email_messages):
return len(list(email_messages))
|
terbolous/SickRage
|
refs/heads/master
|
lib/pgi/cffilib/gir/giunioninfo.py
|
20
|
# Copyright 2013 Christoph Reiter
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
from .._compat import xrange
from ._ffi import lib
from .gibaseinfo import GIBaseInfo, GIInfoType
from .gitypeinfo import GITypeInfo
from .giregisteredtypeinfo import GIRegisteredTypeInfo
@GIBaseInfo._register(GIInfoType.UNION)
class GIUnionInfo(GIRegisteredTypeInfo):
@property
def n_fields(self):
return lib.g_union_info_get_n_fields(self._ptr)
def get_field(self, n):
return lib.g_union_info_get_field(self._ptr, n)
def get_fields(self):
for i in xrange(self.n_fields):
yield self.get_field(i)
@property
def n_methods(self):
return lib.g_union_info_get_n_methods(self._ptr)
def get_method(self):
return lib.g_union_info_get_method(self._ptr)
def get_methods(self):
for i in xrange(self.n_methods):
yield self.get_method(i)
@property
def is_discriminated(self):
return bool(lib.g_union_info_is_discriminated(self._ptr))
@property
def discriminator_offset(self):
return lib.g_union_info_get_discriminator_offset(self._ptr)
@property
def discriminator_type(self):
return GITypeInfo(lib.g_union_info_get_discriminator_type(self._ptr))
def get_discriminator(self, n):
# FIXME
return lib.g_union_info_get_discriminator(self._ptr, n)
def find_method(self, name):
# FIXME
return lib.g_union_info_find_method(self._ptr, name)
@property
def size(self):
return lib.g_union_info_get_size(self._ptr)
@property
def alignment(self):
return lib.g_union_info_get_alignment(self._ptr)
|
bgris/ODL_bgris
|
refs/heads/master
|
lib/python3.5/site-packages/skimage/transform/pyramids.py
|
20
|
import math
import numpy as np
from scipy import ndimage as ndi
from ..transform import resize
from ..util import img_as_float
def _smooth(image, sigma, mode, cval):
"""Return image with each channel smoothed by the Gaussian filter."""
smoothed = np.empty(image.shape, dtype=np.double)
# apply Gaussian filter to all dimensions independently
if image.ndim == 3:
for dim in range(image.shape[2]):
ndi.gaussian_filter(image[..., dim], sigma,
output=smoothed[..., dim],
mode=mode, cval=cval)
else:
ndi.gaussian_filter(image, sigma, output=smoothed,
mode=mode, cval=cval)
return smoothed
def _check_factor(factor):
if factor <= 1:
raise ValueError('scale factor must be greater than 1')
def pyramid_reduce(image, downscale=2, sigma=None, order=1,
mode='reflect', cval=0):
"""Smooth and then downsample image.
Parameters
----------
image : array
Input image.
downscale : float, optional
Downscale factor.
sigma : float, optional
Sigma for Gaussian filter. Default is `2 * downscale / 6.0` which
corresponds to a filter mask twice the size of the scale factor that
covers more than 99% of the Gaussian distribution.
order : int, optional
Order of splines used in interpolation of downsampling. See
`skimage.transform.warp` for detail.
mode : {'reflect', 'constant', 'edge', 'symmetric', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
cval : float, optional
Value to fill past edges of input if mode is 'constant'.
Returns
-------
out : array
Smoothed and downsampled float image.
References
----------
.. [1] http://web.mit.edu/persci/people/adelson/pub_pdfs/pyramid83.pdf
"""
_check_factor(downscale)
image = img_as_float(image)
rows = image.shape[0]
cols = image.shape[1]
out_rows = math.ceil(rows / float(downscale))
out_cols = math.ceil(cols / float(downscale))
if sigma is None:
# automatically determine sigma which covers > 99% of distribution
sigma = 2 * downscale / 6.0
smoothed = _smooth(image, sigma, mode, cval)
out = resize(smoothed, (out_rows, out_cols), order=order,
mode=mode, cval=cval)
return out
def pyramid_expand(image, upscale=2, sigma=None, order=1,
mode='reflect', cval=0):
"""Upsample and then smooth image.
Parameters
----------
image : array
Input image.
upscale : float, optional
Upscale factor.
sigma : float, optional
Sigma for Gaussian filter. Default is `2 * upscale / 6.0` which
corresponds to a filter mask twice the size of the scale factor that
covers more than 99% of the Gaussian distribution.
order : int, optional
Order of splines used in interpolation of upsampling. See
`skimage.transform.warp` for detail.
mode : {'reflect', 'constant', 'edge', 'symmetric', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
cval : float, optional
Value to fill past edges of input if mode is 'constant'.
Returns
-------
out : array
Upsampled and smoothed float image.
References
----------
.. [1] http://web.mit.edu/persci/people/adelson/pub_pdfs/pyramid83.pdf
"""
_check_factor(upscale)
image = img_as_float(image)
rows = image.shape[0]
cols = image.shape[1]
out_rows = math.ceil(upscale * rows)
out_cols = math.ceil(upscale * cols)
if sigma is None:
# automatically determine sigma which covers > 99% of distribution
sigma = 2 * upscale / 6.0
resized = resize(image, (out_rows, out_cols), order=order,
mode=mode, cval=cval)
out = _smooth(resized, sigma, mode, cval)
return out
def pyramid_gaussian(image, max_layer=-1, downscale=2, sigma=None, order=1,
mode='reflect', cval=0):
"""Yield images of the Gaussian pyramid formed by the input image.
Recursively applies the `pyramid_reduce` function to the image, and yields
the downscaled images.
Note that the first image of the pyramid will be the original, unscaled
image. The total number of images is `max_layer + 1`. In case all layers
are computed, the last image is either a one-pixel image or the image where
the reduction does not change its shape.
Parameters
----------
image : array
Input image.
max_layer : int
Number of layers for the pyramid. 0th layer is the original image.
Default is -1 which builds all possible layers.
downscale : float, optional
Downscale factor.
sigma : float, optional
Sigma for Gaussian filter. Default is `2 * downscale / 6.0` which
corresponds to a filter mask twice the size of the scale factor that
covers more than 99% of the Gaussian distribution.
order : int, optional
Order of splines used in interpolation of downsampling. See
`skimage.transform.warp` for detail.
mode : {'reflect', 'constant', 'edge', 'symmetric', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
cval : float, optional
Value to fill past edges of input if mode is 'constant'.
Returns
-------
pyramid : generator
Generator yielding pyramid layers as float images.
References
----------
.. [1] http://web.mit.edu/persci/people/adelson/pub_pdfs/pyramid83.pdf
"""
_check_factor(downscale)
# cast to float for consistent data type in pyramid
image = img_as_float(image)
layer = 0
rows = image.shape[0]
cols = image.shape[1]
prev_layer_image = image
yield image
# build downsampled images until max_layer is reached or downscale process
# does not change image size
while layer != max_layer:
layer += 1
layer_image = pyramid_reduce(prev_layer_image, downscale, sigma, order,
mode, cval)
prev_rows = rows
prev_cols = cols
prev_layer_image = layer_image
rows = layer_image.shape[0]
cols = layer_image.shape[1]
# no change to previous pyramid layer
if prev_rows == rows and prev_cols == cols:
break
yield layer_image
def pyramid_laplacian(image, max_layer=-1, downscale=2, sigma=None, order=1,
mode='reflect', cval=0):
"""Yield images of the laplacian pyramid formed by the input image.
Each layer contains the difference between the downsampled and the
downsampled, smoothed image::
layer = resize(prev_layer) - smooth(resize(prev_layer))
Note that the first image of the pyramid will be the difference between the
original, unscaled image and its smoothed version. The total number of
images is `max_layer + 1`. In case all layers are computed, the last image
is either a one-pixel image or the image where the reduction does not
change its shape.
Parameters
----------
image : array
Input image.
max_layer : int
Number of layers for the pyramid. 0th layer is the original image.
Default is -1 which builds all possible layers.
downscale : float, optional
Downscale factor.
sigma : float, optional
Sigma for Gaussian filter. Default is `2 * downscale / 6.0` which
corresponds to a filter mask twice the size of the scale factor that
covers more than 99% of the Gaussian distribution.
order : int, optional
Order of splines used in interpolation of downsampling. See
`skimage.transform.warp` for detail.
mode : {'reflect', 'constant', 'edge', 'symmetric', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
cval : float, optional
Value to fill past edges of input if mode is 'constant'.
Returns
-------
pyramid : generator
Generator yielding pyramid layers as float images.
References
----------
.. [1] http://web.mit.edu/persci/people/adelson/pub_pdfs/pyramid83.pdf
.. [2] http://sepwww.stanford.edu/~morgan/texturematch/paper_html/node3.html
"""
_check_factor(downscale)
# cast to float for consistent data type in pyramid
image = img_as_float(image)
if sigma is None:
# automatically determine sigma which covers > 99% of distribution
sigma = 2 * downscale / 6.0
layer = 0
rows = image.shape[0]
cols = image.shape[1]
smoothed_image = _smooth(image, sigma, mode, cval)
yield image - smoothed_image
# build downsampled images until max_layer is reached or downscale process
# does not change image size
while layer != max_layer:
layer += 1
out_rows = math.ceil(rows / float(downscale))
out_cols = math.ceil(cols / float(downscale))
resized_image = resize(smoothed_image, (out_rows, out_cols),
order=order, mode=mode, cval=cval)
smoothed_image = _smooth(resized_image, sigma, mode, cval)
prev_rows = rows
prev_cols = cols
rows = resized_image.shape[0]
cols = resized_image.shape[1]
# no change to previous pyramid layer
if prev_rows == rows and prev_cols == cols:
break
yield resized_image - smoothed_image
|
aloksinha2001/rk3066-kernel
|
refs/heads/master
|
mm/tools/perf/scripts/python/sctop.py
|
11180
|
# system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
molebot/vnpy
|
refs/heads/master
|
vn.demo/ctpdemo/mdtest.py
|
96
|
# encoding: UTF-8
import sys
from time import sleep
from PyQt4 import QtGui
from vnctpmd import *
#----------------------------------------------------------------------
def print_dict(d):
"""按照键值打印一个字典"""
for key,value in d.items():
print key + ':' + str(value)
#----------------------------------------------------------------------
def simple_log(func):
"""简单装饰器用于输出函数名"""
def wrapper(*args, **kw):
print ""
print str(func.__name__)
return func(*args, **kw)
return wrapper
########################################################################
class TestMdApi(MdApi):
"""测试用实例"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(TestMdApi, self).__init__()
#----------------------------------------------------------------------
@simple_log
def onFrontConnected(self):
"""服务器连接"""
pass
#----------------------------------------------------------------------
@simple_log
def onFrontDisconnected(self, n):
"""服务器断开"""
print n
#----------------------------------------------------------------------
@simple_log
def onHeartBeatWarning(self, n):
"""心跳报警"""
print n
#----------------------------------------------------------------------
@simple_log
def onRspError(self, error, n, last):
"""错误"""
print_dict(error)
@simple_log
#----------------------------------------------------------------------
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRspSubMarketData(self, data, error, n, last):
"""订阅合约回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRspUnSubMarketData(self, data, error, n, last):
"""退订合约回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRtnDepthMarketData(self, data):
"""行情推送"""
print_dict(data)
#----------------------------------------------------------------------
@simple_log
def onRspSubForQuoteRsp(self, data, error, n, last):
"""订阅合约回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRspUnSubForQuoteRsp(self, data, error, n, last):
"""退订合约回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRtnForQuoteRsp(self, data):
"""行情推送"""
print_dict(data)
#----------------------------------------------------------------------
def main():
"""主测试函数,出现堵塞时可以考虑使用sleep"""
reqid = 0
# 创建Qt应用对象,用于事件循环
app = QtGui.QApplication(sys.argv)
# 创建API对象
api = TestMdApi()
# 在C++环境中创建MdApi对象,传入参数是希望用来保存.con文件的地址
api.createFtdcMdApi('')
# 注册前置机地址
api.registerFront("tcp://qqfz-md1.ctp.shcifco.com:32313")
# 初始化api,连接前置机
api.init()
sleep(0.5)
# 登陆
loginReq = {} # 创建一个空字典
loginReq['UserID'] = '' # 参数作为字典键值的方式传入
loginReq['Password'] = '' # 键名和C++中的结构体成员名对应
loginReq['BrokerID'] = ''
reqid = reqid + 1 # 请求数必须保持唯一性
i = api.reqUserLogin(loginReq, 1)
sleep(0.5)
## 登出,测试出错(无此功能)
#reqid = reqid + 1
#i = api.reqUserLogout({}, 1)
#sleep(0.5)
## 安全退出,测试通过
#i = api.exit()
## 获取交易日,目前输出为空
#day = api.getTradingDay()
#print 'Trading Day is:' + str(day)
#sleep(0.5)
## 订阅合约,测试通过
#i = api.subscribeMarketData('IF1505')
## 退订合约,测试通过
#i = api.unSubscribeMarketData('IF1505')
# 订阅询价,测试通过
i = api.subscribeForQuoteRsp('IO1504-C-3900')
# 退订询价,测试通过
i = api.unSubscribeForQuoteRsp('IO1504-C-3900')
# 连续运行,用于输出行情
app.exec_()
if __name__ == '__main__':
main()
|
haomiao/monster
|
refs/heads/master
|
setup.py
|
1
|
'''
monster
-----
monster is a python general template framework for new project,
you can fastly build yourself projects.and before you know: It's MIT licensed!
some tips:
1. setup setuptools tool
2. run pip requirements.txt, setup the required modules
3. others
'''
import codecs
from setuptools import setup,find_packages
from setuptools.command.test import test as TestCommand
import os
import sys
import re
import ast
HERE = os.path.abspath(os.path.dirname(__file__))
_version_re = re.compile(r'__version__\s+=\s+(.*)')
class RunTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['--strict', '--verbose', '--tb=long', 'tests']
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
def read(*parts):
return codecs.open(os.path.join(HERE, *parts), 'r').read()
monster_version = str(ast.literal_eval(_version_re.search(read("monster/__init__.py")).group(1)))
'''str(read('README.md'))'''
long_description = 'faf'
setup(
name='monster',
version=monster_version,
url='https://github.com/haomiao/monster',
author='colper',
author_email='635541412@qq.com',
description='a python general template framework for new project',
long_description=long_description,
license='MIT',
packages=['monster'],
include_package_data=True,
#package_data={}
zip_safe=False,
platforms='any',
cmdclass={'test': RunTest},
tests_require=['pytest','nose'],
#install_requires=['pytest'],
#entry_points={}
extras_require={
'testing': ['pytest'],
},
classifiers = [
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Natural Language :: English',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
alexmorozov/django
|
refs/heads/master
|
tests/migrations/test_migrations_unmigdep/0001_initial.py
|
282
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("auth", "__first__"),
]
operations = [
migrations.CreateModel(
"Book",
[
("id", models.AutoField(primary_key=True)),
("user", models.ForeignKey("auth.User", models.SET_NULL, null=True)),
],
)
]
|
40223249-1/-w16b_test
|
refs/heads/master
|
static/Brython3.1.3-20150514-095342/Lib/site-packages/pygame/base.py
|
603
|
#!/usr/bin/env python
## https://bitbucket.org/pygame/pygame/raw/2383b8ab0e2273bc83c545ab9c18fee1f3459c64/pygame/base.py
'''Pygame core routines
Contains the core routines that are used by the rest of the
pygame modules. Its routines are merged directly into the pygame
namespace. This mainly includes the auto-initialization `init` and
`quit` routines.
There is a small module named `locals` that also gets merged into
this namespace. This contains all the constants needed by pygame.
Object constructors also get placed into this namespace, you can
call functions like `Rect` and `Surface` to create objects of
that type. As a convenience, you can import the members of
pygame.locals directly into your module's namespace with::
from pygame.locals import *
Most of the pygame examples do this if you'd like to take a look.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import atexit
import sys
#import SDL
_quitfunctions = []
class error(RuntimeError):
pass
def init():
'''Autoinitialize all imported pygame modules.
Initialize all imported pygame modules. Includes pygame modules
that are not part of the base modules (like font and image).
It does not raise exceptions, but instead silently counts which
modules have failed to init. The return argument contains a count
of the number of modules initialized, and the number of modules
that failed to initialize.
You can always initialize the modules you want by hand. The
modules that need it have an `init` and `quit` routine built in,
which you can call directly. They also have a `get_init` routine
which you can use to doublecheck the initialization. Note that
the manual `init` routines will raise an exception on error. Be
aware that most platforms require the display module to be
initialized before others. This `init` will handle that for you,
but if you initialize by hand, be aware of this constraint.
As with the manual `init` routines. It is safe to call this
`init` as often as you like.
:rtype: int, int
:return: (count_passed, count_failed)
'''
success = 0
fail = 0
#SDL.SDL_Init(SDL.SDL_INIT_EVENTTHREAD | SDL.SDL_INIT_TIMER)
if _video_autoinit():
success += 1
else:
fail += 1
for mod in sys.modules.values():
if hasattr(mod, '__PYGAMEinit__') and callable(mod.__PYGAMEinit__):
try:
mod.__PYGAMEinit__()
success += 1
except:
fail += 1
return success, fail
def register_quit(func):
'''Routine to call when pygame quits.
The given callback routine will be called when pygame is
quitting. Quit callbacks are served on a 'last in, first out'
basis.
'''
_quitfunctions.append(func)
def _video_autoquit():
if SDL.SDL_WasInit(SDL.SDL_INIT_VIDEO):
SDL.SDL_QuitSubSystem(SDL.SDL_INIT_VIDEO)
def _video_autoinit():
return 1
#if not SDL.SDL_WasInit(SDL.SDL_INIT_VIDEO):
# SDL.SDL_InitSubSystem(SDL.SDL_INIT_VIDEO)
# SDL.SDL_EnableUNICODE(1)
#return 1
def _atexit_quit():
while _quitfunctions:
func = _quitfunctions.pop()
func()
_video_autoquit()
#SDL.SDL_Quit()
def get_sdl_version():
'''Get the version of the linked SDL runtime.
:rtype: int, int, int
:return: major, minor, patch
'''
#v = SDL.SDL_Linked_Version()
#return v.major, v.minor, v.patch
return None, None, None
def quit():
'''Uninitialize all pygame modules.
Uninitialize all pygame modules that have been initialized. Even
if you initialized the module by hand, this `quit` will
uninitialize it for you.
All the pygame modules are uninitialized automatically when your
program exits, so you will usually not need this routine. If you
program plans to keep running after it is done with pygame, then
would be a good time to make this call.
'''
_atexit_quit()
def get_error():
'''Get current error message.
SDL maintains an internal current error message. This message is
usually given to you when an SDL related exception occurs, but
sometimes you may want to call this directly yourself.
:rtype: str
'''
#return SDL.SDL_GetError()
return ''
def _rgba_from_obj(obj):
if not type(obj) in (tuple, list):
return None
if len(obj) == 1:
return _rgba_from_obj(obj[0])
elif len(obj) == 3:
return (int(obj[0]), int(obj[1]), int(obj[2]), 255)
elif len(obj) == 4:
return obj
else:
return None
atexit.register(_atexit_quit)
|
vsajip/django
|
refs/heads/django3
|
tests/regressiontests/dates/models.py
|
93
|
from django.db import models
class Article(models.Model):
title = models.CharField(max_length=100)
pub_date = models.DateField()
categories = models.ManyToManyField("Category", related_name="articles")
def __unicode__(self):
return self.title
class Comment(models.Model):
article = models.ForeignKey(Article, related_name="comments")
text = models.TextField()
pub_date = models.DateField()
approval_date = models.DateField(null=True)
def __unicode__(self):
return 'Comment to %s (%s)' % (self.article.title, self.pub_date)
class Category(models.Model):
name = models.CharField(max_length=255)
|
mnahm5/django-estore
|
refs/heads/master
|
Lib/site-packages/django/contrib/gis/geos/prepared.py
|
137
|
from .base import GEOSBase
from .error import GEOSException
from .geometry import GEOSGeometry
from .libgeos import geos_version_info
from .prototypes import prepared as capi
class PreparedGeometry(GEOSBase):
"""
A geometry that is prepared for performing certain operations.
At the moment this includes the contains covers, and intersects
operations.
"""
ptr_type = capi.PREPGEOM_PTR
def __init__(self, geom):
# Keeping a reference to the original geometry object to prevent it
# from being garbage collected which could then crash the prepared one
# See #21662
self._base_geom = geom
if not isinstance(geom, GEOSGeometry):
raise TypeError
self.ptr = capi.geos_prepare(geom.ptr)
def __del__(self):
if self._ptr and capi:
capi.prepared_destroy(self._ptr)
def contains(self, other):
return capi.prepared_contains(self.ptr, other.ptr)
def contains_properly(self, other):
return capi.prepared_contains_properly(self.ptr, other.ptr)
def covers(self, other):
return capi.prepared_covers(self.ptr, other.ptr)
def intersects(self, other):
return capi.prepared_intersects(self.ptr, other.ptr)
# Added in GEOS 3.3:
def crosses(self, other):
if geos_version_info()['version'] < '3.3.0':
raise GEOSException("crosses on prepared geometries requires GEOS >= 3.3.0")
return capi.prepared_crosses(self.ptr, other.ptr)
def disjoint(self, other):
if geos_version_info()['version'] < '3.3.0':
raise GEOSException("disjoint on prepared geometries requires GEOS >= 3.3.0")
return capi.prepared_disjoint(self.ptr, other.ptr)
def overlaps(self, other):
if geos_version_info()['version'] < '3.3.0':
raise GEOSException("overlaps on prepared geometries requires GEOS >= 3.3.0")
return capi.prepared_overlaps(self.ptr, other.ptr)
def touches(self, other):
if geos_version_info()['version'] < '3.3.0':
raise GEOSException("touches on prepared geometries requires GEOS >= 3.3.0")
return capi.prepared_touches(self.ptr, other.ptr)
def within(self, other):
if geos_version_info()['version'] < '3.3.0':
raise GEOSException("within on prepared geometries requires GEOS >= 3.3.0")
return capi.prepared_within(self.ptr, other.ptr)
|
anhstudios/swganh
|
refs/heads/develop
|
data/scripts/templates/object/tangible/ship/components/booster/shared_bst_mandal_jbj_mk2.py
|
2
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/booster/shared_bst_mandal_jbj_mk2.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","bst_mandal_jbj_mk2_n")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
MalloyPower/parsing-python
|
refs/heads/master
|
front-end/testsuite-python-lib/Python-2.2/Lib/test/test_mimetypes.py
|
10
|
import mimetypes
import StringIO
import unittest
import test_support
# Tell it we don't know about external files:
mimetypes.knownfiles = []
class MimeTypesTestCase(unittest.TestCase):
def setUp(self):
self.db = mimetypes.MimeTypes()
def test_default_data(self):
self.assertEqual(self.db.guess_type("foo.html"),
("text/html", None))
self.assertEqual(self.db.guess_type("foo.tgz"),
("application/x-tar", "gzip"))
self.assertEqual(self.db.guess_type("foo.tar.gz"),
("application/x-tar", "gzip"))
self.assertEqual(self.db.guess_type("foo.tar.Z"),
("application/x-tar", "compress"))
def test_data_urls(self):
self.assertEqual(self.db.guess_type("data:,thisIsTextPlain"),
("text/plain", None))
self.assertEqual(self.db.guess_type("data:;base64,thisIsTextPlain"),
("text/plain", None))
self.assertEqual(self.db.guess_type("data:text/x-foo,thisIsTextXFoo"),
("text/x-foo", None))
def test_file_parsing(self):
sio = StringIO.StringIO("x-application/x-unittest pyunit\n")
self.db.readfp(sio)
self.assertEqual(self.db.guess_type("foo.pyunit"),
("x-application/x-unittest", None))
self.assertEqual(self.db.guess_extension("x-application/x-unittest"),
".pyunit")
def test_non_standard_types(self):
# First try strict
self.assertEqual(self.db.guess_type('foo.xul', strict=1),
(None, None))
self.assertEqual(self.db.guess_extension('image/jpg', strict=1),
None)
# And then non-strict
self.assertEqual(self.db.guess_type('foo.xul', strict=0),
('text/xul', None))
self.assertEqual(self.db.guess_extension('image/jpg', strict=0),
'.jpg')
def test_main():
test_support.run_unittest(MimeTypesTestCase)
if __name__ == "__main__":
test_main()
|
tliber/scrapy
|
refs/heads/master
|
scrapy/squeue.py
|
144
|
import warnings
from scrapy.exceptions import ScrapyDeprecationWarning
warnings.warn("Module `scrapy.squeue` is deprecated, "
"use `scrapy.squeues` instead",
ScrapyDeprecationWarning, stacklevel=2)
from scrapy.squeues import *
|
valkjsaaa/sl4a
|
refs/heads/master
|
python-build/python-libs/gdata/src/atom/token_store.py
|
280
|
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides a TokenStore class which is designed to manage
auth tokens required for different services.
Each token is valid for a set of scopes which is the start of a URL. An HTTP
client will use a token store to find a valid Authorization header to send
in requests to the specified URL. If the HTTP client determines that a token
has expired or been revoked, it can remove the token from the store so that
it will not be used in future requests.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import atom.http_interface
import atom.url
SCOPE_ALL = 'http'
class TokenStore(object):
"""Manages Authorization tokens which will be sent in HTTP headers."""
def __init__(self, scoped_tokens=None):
self._tokens = scoped_tokens or {}
def add_token(self, token):
"""Adds a new token to the store (replaces tokens with the same scope).
Args:
token: A subclass of http_interface.GenericToken. The token object is
responsible for adding the Authorization header to the HTTP request.
The scopes defined in the token are used to determine if the token
is valid for a requested scope when find_token is called.
Returns:
True if the token was added, False if the token was not added becase
no scopes were provided.
"""
if not hasattr(token, 'scopes') or not token.scopes:
return False
for scope in token.scopes:
self._tokens[str(scope)] = token
return True
def find_token(self, url):
"""Selects an Authorization header token which can be used for the URL.
Args:
url: str or atom.url.Url or a list containing the same.
The URL which is going to be requested. All
tokens are examined to see if any scopes begin match the beginning
of the URL. The first match found is returned.
Returns:
The token object which should execute the HTTP request. If there was
no token for the url (the url did not begin with any of the token
scopes available), then the atom.http_interface.GenericToken will be
returned because the GenericToken calls through to the http client
without adding an Authorization header.
"""
if url is None:
return None
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
if url in self._tokens:
token = self._tokens[url]
if token.valid_for_scope(url):
return token
else:
del self._tokens[url]
for scope, token in self._tokens.iteritems():
if token.valid_for_scope(url):
return token
return atom.http_interface.GenericToken()
def remove_token(self, token):
"""Removes the token from the token_store.
This method is used when a token is determined to be invalid. If the
token was found by find_token, but resulted in a 401 or 403 error stating
that the token was invlid, then the token should be removed to prevent
future use.
Returns:
True if a token was found and then removed from the token
store. False if the token was not in the TokenStore.
"""
token_found = False
scopes_to_delete = []
for scope, stored_token in self._tokens.iteritems():
if stored_token == token:
scopes_to_delete.append(scope)
token_found = True
for scope in scopes_to_delete:
del self._tokens[scope]
return token_found
def remove_all_tokens(self):
self._tokens = {}
|
xingwu1/autorest
|
refs/heads/master
|
AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/BodyDateTimeRfc1123/autorestrfc1123datetimetestservice/exceptions.py
|
687
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.exceptions import (
ClientException,
SerializationError,
DeserializationError,
TokenExpiredError,
ClientRequestError,
AuthenticationError,
HttpOperationError,
ValidationError,
)
|
Samuc/Proyecto-IV
|
refs/heads/master
|
lib/python2.7/site-packages/setuptools/tests/test_sdist.py
|
332
|
# -*- coding: utf-8 -*-
"""sdist tests"""
import locale
import os
import shutil
import sys
import tempfile
import unittest
import unicodedata
import re
from setuptools.tests import environment, test_svn
from setuptools.tests.py26compat import skipIf
from setuptools.compat import StringIO, unicode
from setuptools.tests.py26compat import skipIf
from setuptools.command.sdist import sdist, walk_revctrl
from setuptools.command.egg_info import manifest_maker
from setuptools.dist import Distribution
from setuptools import svn_utils
SETUP_ATTRS = {
'name': 'sdist_test',
'version': '0.0',
'packages': ['sdist_test'],
'package_data': {'sdist_test': ['*.txt']}
}
SETUP_PY = """\
from setuptools import setup
setup(**%r)
""" % SETUP_ATTRS
if sys.version_info >= (3,):
LATIN1_FILENAME = 'smörbröd.py'.encode('latin-1')
else:
LATIN1_FILENAME = 'sm\xf6rbr\xf6d.py'
# Cannot use context manager because of Python 2.4
def quiet():
global old_stdout, old_stderr
old_stdout, old_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = StringIO(), StringIO()
def unquiet():
sys.stdout, sys.stderr = old_stdout, old_stderr
# Fake byte literals for Python <= 2.5
def b(s, encoding='utf-8'):
if sys.version_info >= (3,):
return s.encode(encoding)
return s
# Convert to POSIX path
def posix(path):
if sys.version_info >= (3,) and not isinstance(path, str):
return path.replace(os.sep.encode('ascii'), b('/'))
else:
return path.replace(os.sep, '/')
# HFS Plus uses decomposed UTF-8
def decompose(path):
if isinstance(path, unicode):
return unicodedata.normalize('NFD', path)
try:
path = path.decode('utf-8')
path = unicodedata.normalize('NFD', path)
path = path.encode('utf-8')
except UnicodeError:
pass # Not UTF-8
return path
class TestSdistTest(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
f = open(os.path.join(self.temp_dir, 'setup.py'), 'w')
f.write(SETUP_PY)
f.close()
# Set up the rest of the test package
test_pkg = os.path.join(self.temp_dir, 'sdist_test')
os.mkdir(test_pkg)
# *.rst was not included in package_data, so c.rst should not be
# automatically added to the manifest when not under version control
for fname in ['__init__.py', 'a.txt', 'b.txt', 'c.rst']:
# Just touch the files; their contents are irrelevant
open(os.path.join(test_pkg, fname), 'w').close()
self.old_cwd = os.getcwd()
os.chdir(self.temp_dir)
def tearDown(self):
os.chdir(self.old_cwd)
shutil.rmtree(self.temp_dir)
def test_package_data_in_sdist(self):
"""Regression test for pull request #4: ensures that files listed in
package_data are included in the manifest even if they're not added to
version control.
"""
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# squelch output
quiet()
try:
cmd.run()
finally:
unquiet()
manifest = cmd.filelist.files
self.assertTrue(os.path.join('sdist_test', 'a.txt') in manifest)
self.assertTrue(os.path.join('sdist_test', 'b.txt') in manifest)
self.assertTrue(os.path.join('sdist_test', 'c.rst') not in manifest)
def test_manifest_is_written_with_utf8_encoding(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
mm = manifest_maker(dist)
mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
os.mkdir('sdist_test.egg-info')
# UTF-8 filename
filename = os.path.join('sdist_test', 'smörbröd.py')
# Add UTF-8 filename and write manifest
quiet()
try:
mm.run()
mm.filelist.files.append(filename)
mm.write_manifest()
finally:
unquiet()
manifest = open(mm.manifest, 'rbU')
contents = manifest.read()
manifest.close()
# The manifest should be UTF-8 encoded
try:
u_contents = contents.decode('UTF-8')
except UnicodeDecodeError:
e = sys.exc_info()[1]
self.fail(e)
# The manifest should contain the UTF-8 filename
if sys.version_info >= (3,):
self.assertTrue(posix(filename) in u_contents)
else:
self.assertTrue(posix(filename) in contents)
# Python 3 only
if sys.version_info >= (3,):
def test_write_manifest_allows_utf8_filenames(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
mm = manifest_maker(dist)
mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
os.mkdir('sdist_test.egg-info')
# UTF-8 filename
filename = os.path.join(b('sdist_test'), b('smörbröd.py'))
# Add filename and write manifest
quiet()
try:
mm.run()
u_filename = filename.decode('utf-8')
mm.filelist.files.append(u_filename)
# Re-write manifest
mm.write_manifest()
finally:
unquiet()
manifest = open(mm.manifest, 'rbU')
contents = manifest.read()
manifest.close()
# The manifest should be UTF-8 encoded
try:
contents.decode('UTF-8')
except UnicodeDecodeError:
e = sys.exc_info()[1]
self.fail(e)
# The manifest should contain the UTF-8 filename
self.assertTrue(posix(filename) in contents)
# The filelist should have been updated as well
self.assertTrue(u_filename in mm.filelist.files)
def test_write_manifest_skips_non_utf8_filenames(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
mm = manifest_maker(dist)
mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
os.mkdir('sdist_test.egg-info')
# Latin-1 filename
filename = os.path.join(b('sdist_test'), LATIN1_FILENAME)
# Add filename with surrogates and write manifest
quiet()
try:
mm.run()
u_filename = filename.decode('utf-8', 'surrogateescape')
mm.filelist.files.append(u_filename)
# Re-write manifest
mm.write_manifest()
finally:
unquiet()
manifest = open(mm.manifest, 'rbU')
contents = manifest.read()
manifest.close()
# The manifest should be UTF-8 encoded
try:
contents.decode('UTF-8')
except UnicodeDecodeError:
e = sys.exc_info()[1]
self.fail(e)
# The Latin-1 filename should have been skipped
self.assertFalse(posix(filename) in contents)
# The filelist should have been updated as well
self.assertFalse(u_filename in mm.filelist.files)
def test_manifest_is_read_with_utf8_encoding(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# Create manifest
quiet()
try:
cmd.run()
finally:
unquiet()
# Add UTF-8 filename to manifest
filename = os.path.join(b('sdist_test'), b('smörbröd.py'))
cmd.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
manifest = open(cmd.manifest, 'ab')
manifest.write(b('\n')+filename)
manifest.close()
# The file must exist to be included in the filelist
open(filename, 'w').close()
# Re-read manifest
cmd.filelist.files = []
quiet()
try:
cmd.read_manifest()
finally:
unquiet()
# The filelist should contain the UTF-8 filename
if sys.version_info >= (3,):
filename = filename.decode('utf-8')
self.assertTrue(filename in cmd.filelist.files)
# Python 3 only
if sys.version_info >= (3,):
def test_read_manifest_skips_non_utf8_filenames(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# Create manifest
quiet()
try:
cmd.run()
finally:
unquiet()
# Add Latin-1 filename to manifest
filename = os.path.join(b('sdist_test'), LATIN1_FILENAME)
cmd.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
manifest = open(cmd.manifest, 'ab')
manifest.write(b('\n')+filename)
manifest.close()
# The file must exist to be included in the filelist
open(filename, 'w').close()
# Re-read manifest
cmd.filelist.files = []
quiet()
try:
try:
cmd.read_manifest()
except UnicodeDecodeError:
e = sys.exc_info()[1]
self.fail(e)
finally:
unquiet()
# The Latin-1 filename should have been skipped
filename = filename.decode('latin-1')
self.assertFalse(filename in cmd.filelist.files)
@skipIf(sys.version_info >= (3,) and locale.getpreferredencoding() != 'UTF-8',
'Unittest fails if locale is not utf-8 but the manifests is recorded correctly')
def test_sdist_with_utf8_encoded_filename(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# UTF-8 filename
filename = os.path.join(b('sdist_test'), b('smörbröd.py'))
open(filename, 'w').close()
quiet()
try:
cmd.run()
finally:
unquiet()
if sys.platform == 'darwin':
filename = decompose(filename)
if sys.version_info >= (3,):
fs_enc = sys.getfilesystemencoding()
if sys.platform == 'win32':
if fs_enc == 'cp1252':
# Python 3 mangles the UTF-8 filename
filename = filename.decode('cp1252')
self.assertTrue(filename in cmd.filelist.files)
else:
filename = filename.decode('mbcs')
self.assertTrue(filename in cmd.filelist.files)
else:
filename = filename.decode('utf-8')
self.assertTrue(filename in cmd.filelist.files)
else:
self.assertTrue(filename in cmd.filelist.files)
def test_sdist_with_latin1_encoded_filename(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# Latin-1 filename
filename = os.path.join(b('sdist_test'), LATIN1_FILENAME)
open(filename, 'w').close()
self.assertTrue(os.path.isfile(filename))
quiet()
try:
cmd.run()
finally:
unquiet()
if sys.version_info >= (3,):
#not all windows systems have a default FS encoding of cp1252
if sys.platform == 'win32':
# Latin-1 is similar to Windows-1252 however
# on mbcs filesys it is not in latin-1 encoding
fs_enc = sys.getfilesystemencoding()
if fs_enc == 'mbcs':
filename = filename.decode('mbcs')
else:
filename = filename.decode('latin-1')
self.assertTrue(filename in cmd.filelist.files)
else:
# The Latin-1 filename should have been skipped
filename = filename.decode('latin-1')
self.assertFalse(filename in cmd.filelist.files)
else:
# No conversion takes place under Python 2 and the file
# is included. We shall keep it that way for BBB.
self.assertTrue(filename in cmd.filelist.files)
class TestDummyOutput(environment.ZippedEnvironment):
def setUp(self):
self.datafile = os.path.join('setuptools', 'tests',
'svn_data', "dummy.zip")
self.dataname = "dummy"
super(TestDummyOutput, self).setUp()
def _run(self):
code, data = environment.run_setup_py(["sdist"],
pypath=self.old_cwd,
data_stream=0)
if code:
info = "DIR: " + os.path.abspath('.')
info += "\n SDIST RETURNED: %i\n\n" % code
info += data
raise AssertionError(info)
datalines = data.splitlines()
possible = (
"running sdist",
"running egg_info",
"creating dummy\.egg-info",
"writing dummy\.egg-info",
"writing top-level names to dummy\.egg-info",
"writing dependency_links to dummy\.egg-info",
"writing manifest file 'dummy\.egg-info",
"reading manifest file 'dummy\.egg-info",
"reading manifest template 'MANIFEST\.in'",
"writing manifest file 'dummy\.egg-info",
"creating dummy-0.1.1",
"making hard links in dummy-0\.1\.1",
"copying files to dummy-0\.1\.1",
"copying \S+ -> dummy-0\.1\.1",
"copying dummy",
"copying dummy\.egg-info",
"hard linking \S+ -> dummy-0\.1\.1",
"hard linking dummy",
"hard linking dummy\.egg-info",
"Writing dummy-0\.1\.1",
"creating dist",
"creating 'dist",
"Creating tar archive",
"running check",
"adding 'dummy-0\.1\.1",
"tar .+ dist/dummy-0\.1\.1\.tar dummy-0\.1\.1",
"gzip .+ dist/dummy-0\.1\.1\.tar",
"removing 'dummy-0\.1\.1' \\(and everything under it\\)",
)
print(" DIR: " + os.path.abspath('.'))
for line in datalines:
found = False
for pattern in possible:
if re.match(pattern, line):
print(" READ: " + line)
found = True
break
if not found:
raise AssertionError("Unexpexected: %s\n-in-\n%s"
% (line, data))
return data
def test_sources(self):
self._run()
class TestSvn(environment.ZippedEnvironment):
def setUp(self):
version = svn_utils.SvnInfo.get_svn_version()
if not version: # None or Empty
return
self.base_version = tuple([int(x) for x in version.split('.')][:2])
if not self.base_version:
raise ValueError('No SVN tools installed')
elif self.base_version < (1, 3):
raise ValueError('Insufficient SVN Version %s' % version)
elif self.base_version >= (1, 9):
#trying the latest version
self.base_version = (1, 8)
self.dataname = "svn%i%i_example" % self.base_version
self.datafile = os.path.join('setuptools', 'tests',
'svn_data', self.dataname + ".zip")
super(TestSvn, self).setUp()
@skipIf(not test_svn._svn_check, "No SVN to text, in the first place")
def test_walksvn(self):
if self.base_version >= (1, 6):
folder2 = 'third party2'
folder3 = 'third party3'
else:
folder2 = 'third_party2'
folder3 = 'third_party3'
#TODO is this right
expected = set([
os.path.join('a file'),
os.path.join(folder2, 'Changes.txt'),
os.path.join(folder2, 'MD5SUMS'),
os.path.join(folder2, 'README.txt'),
os.path.join(folder3, 'Changes.txt'),
os.path.join(folder3, 'MD5SUMS'),
os.path.join(folder3, 'README.txt'),
os.path.join(folder3, 'TODO.txt'),
os.path.join(folder3, 'fin'),
os.path.join('third_party', 'README.txt'),
os.path.join('folder', folder2, 'Changes.txt'),
os.path.join('folder', folder2, 'MD5SUMS'),
os.path.join('folder', folder2, 'WatashiNiYomimasu.txt'),
os.path.join('folder', folder3, 'Changes.txt'),
os.path.join('folder', folder3, 'fin'),
os.path.join('folder', folder3, 'MD5SUMS'),
os.path.join('folder', folder3, 'oops'),
os.path.join('folder', folder3, 'WatashiNiYomimasu.txt'),
os.path.join('folder', folder3, 'ZuMachen.txt'),
os.path.join('folder', 'third_party', 'WatashiNiYomimasu.txt'),
os.path.join('folder', 'lalala.txt'),
os.path.join('folder', 'quest.txt'),
# The example will have a deleted file
# (or should) but shouldn't return it
])
self.assertEqual(set(x for x in walk_revctrl()), expected)
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
|
tcwicklund/django
|
refs/heads/master
|
tests/template_tests/filter_tests/test_stringformat.py
|
345
|
from django.template.defaultfilters import stringformat
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class StringformatTests(SimpleTestCase):
"""
Notice that escaping is applied *after* any filters, so the string
formatting here only needs to deal with pre-escaped characters.
"""
@setup({'stringformat01':
'{% autoescape off %}.{{ a|stringformat:"5s" }}. .{{ b|stringformat:"5s" }}.{% endautoescape %}'})
def test_stringformat01(self):
output = self.engine.render_to_string('stringformat01', {'a': 'a<b', 'b': mark_safe('a<b')})
self.assertEqual(output, '. a<b. . a<b.')
@setup({'stringformat02': '.{{ a|stringformat:"5s" }}. .{{ b|stringformat:"5s" }}.'})
def test_stringformat02(self):
output = self.engine.render_to_string('stringformat02', {'a': 'a<b', 'b': mark_safe('a<b')})
self.assertEqual(output, '. a<b. . a<b.')
class FunctionTests(SimpleTestCase):
def test_format(self):
self.assertEqual(stringformat(1, '03d'), '001')
def test_invalid(self):
self.assertEqual(stringformat(1, 'z'), '')
|
lisa-lab/pylearn2
|
refs/heads/master
|
pylearn2/models/tests/test_maxout.py
|
44
|
"""
Tests of the maxout functionality.
So far these don't test correctness, just that you can
run the objects.
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2013, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
import numpy as np
import unittest
# Skip test if cuda_ndarray is not available.
from nose.plugins.skip import SkipTest
from theano import config
from theano import function
from theano.sandbox import cuda
from theano import tensor as T
from pylearn2.config import yaml_parse
from pylearn2.datasets.exc import NoDataPathError
from pylearn2.models.mlp import MLP
from pylearn2.models.maxout import Maxout
from pylearn2.space import VectorSpace
def test_min_zero():
"""
This test guards against a bug where the size of the zero buffer used with
the min_zero flag was specified to have the wrong size. The bug only
manifested when compiled with optimizations off, because the optimizations
discard information about the size of the zero buffer.
"""
mlp = MLP(input_space=VectorSpace(1),
layers= [Maxout(layer_name="test_layer", num_units=1,
num_pieces = 2,
irange=.05, min_zero=True)])
X = T.matrix()
output = mlp.fprop(X)
# Compile in debug mode so we don't optimize out the size of the buffer
# of zeros
f = function([X], output, mode="DEBUG_MODE")
f(np.zeros((1, 1)).astype(X.dtype))
def test_maxout_basic():
# Tests that we can load a densely connected maxout model
# and train it for a few epochs (without saving) on a dummy
# dataset-- tiny model and dataset
yaml_string = """
!obj:pylearn2.train.Train {
dataset: &train !obj:pylearn2.testing.datasets.random_one_hot_dense_d\
esign_matrix {
rng: !obj:numpy.random.RandomState { seed: [2013, 3, 16] },
num_examples: 12,
dim: 2,
num_classes: 10
},
model: !obj:pylearn2.models.mlp.MLP {
layers: [
!obj:pylearn2.models.maxout.Maxout {
layer_name: 'h0',
num_units: 3,
num_pieces: 2,
irange: .005,
max_col_norm: 1.9365,
},
!obj:pylearn2.models.maxout.Maxout {
layer_name: 'h1',
num_units: 2,
num_pieces: 3,
irange: .005,
max_col_norm: 1.9365,
},
!obj:pylearn2.models.mlp.Softmax {
max_col_norm: 1.9365,
layer_name: 'y',
n_classes: 10,
irange: .005
}
],
nvis: 2,
},
algorithm: !obj:pylearn2.training_algorithms.sgd.SGD {
learning_rule: !obj:pylearn2.training_algorithms.learning_rule.Mo\
mentum { init_momentum: 0.5 },
batch_size: 6,
learning_rate: .1,
monitoring_dataset:
{
'train' : *train
},
cost: !obj:pylearn2.costs.mlp.dropout.Dropout {
input_include_probs: { 'h0' : .8 },
input_scales: { 'h0': 1. }
},
termination_criterion: !obj:pylearn2.termination_criteria.EpochCo\
unter {
max_epochs: 3,
},
update_callbacks: !obj:pylearn2.training_algorithms.sgd.Exponenti\
alDecay {
decay_factor: 1.000004,
min_lr: .000001
}
},
extensions: [
!obj:pylearn2.training_algorithms.learning_rule.MomentumAdjustor {
start: 1,
saturate: 250,
final_momentum: .7
}
],
}
"""
train = yaml_parse.load(yaml_string)
train.main_loop()
yaml_string_maxout_conv_c01b_basic = """
!obj:pylearn2.train.Train {
dataset: &train !obj:pylearn2.testing.datasets.random_one_hot_topolog\
ical_dense_design_matrix {
rng: !obj:numpy.random.RandomState { seed: [2013, 3, 16] },
shape: &input_shape [10, 10],
channels: 1,
axes: ['c', 0, 1, 'b'],
num_examples: 12,
num_classes: 10
},
model: !obj:pylearn2.models.mlp.MLP {
batch_size: 2,
layers: [
!obj:pylearn2.models.maxout.MaxoutConvC01B {
layer_name: 'h0',
pad: 0,
num_channels: 8,
num_pieces: 2,
kernel_shape: [2, 2],
pool_shape: [2, 2],
pool_stride: [2, 2],
irange: .005,
max_kernel_norm: .9,
},
# The following layers are commented out to make this
# test pass on a GTX 285.
# cuda-convnet isn't really meant to run on such an old
# graphics card but that's what we use for the buildbot.
# In the long run, we should move the buildbot to a newer
# graphics card and uncomment the remaining layers.
# !obj:pylearn2.models.maxout.MaxoutConvC01B {
# layer_name: 'h1',
# pad: 3,
# num_channels: 4,
# num_pieces: 4,
# kernel_shape: [3, 3],
# pool_shape: [2, 2],
# pool_stride: [2, 2],
# irange: .005,
# max_kernel_norm: 1.9365,
# },
#!obj:pylearn2.models.maxout.MaxoutConvC01B {
# pad: 3,
# layer_name: 'h2',
# num_channels: 16,
# num_pieces: 2,
# kernel_shape: [2, 2],
# pool_shape: [2, 2],
# pool_stride: [2, 2],
# irange: .005,
# max_kernel_norm: 1.9365,
# },
!obj:pylearn2.models.mlp.Softmax {
max_col_norm: 1.9365,
layer_name: 'y',
n_classes: 10,
irange: .005
}
],
input_space: !obj:pylearn2.space.Conv2DSpace {
shape: *input_shape,
num_channels: 1,
axes: ['c', 0, 1, 'b'],
},
},
algorithm: !obj:pylearn2.training_algorithms.sgd.SGD {
learning_rate: .05,
learning_rule: !obj:pylearn2.training_algorithms.learning_rule.Mo\
mentum { init_momentum: 0.9 },
monitoring_dataset:
{
'train': *train
},
cost: !obj:pylearn2.costs.mlp.dropout.Dropout {
input_include_probs: { 'h0' : .8 },
input_scales: { 'h0': 1. }
},
termination_criterion: !obj:pylearn2.termination_criteria.EpochCo\
unter {
max_epochs: 3
},
update_callbacks: !obj:pylearn2.training_algorithms.sgd.Exponenti\
alDecay {
decay_factor: 1.00004,
min_lr: .000001
}
},
extensions: [
!obj:pylearn2.training_algorithms.learning_rule.MomentumAdjustor {
start: 1,
saturate: 250,
final_momentum: .7
}
]
}
"""
yaml_string_maxout_conv_c01b_cifar10 = """
!obj:pylearn2.train.Train {
dataset: &train !obj:pylearn2.datasets.cifar10.CIFAR10 {
toronto_prepro: True,
which_set: 'train',
axes: ['c', 0, 1, 'b'],
start: 0,
stop: 50000
},
model: !obj:pylearn2.models.mlp.MLP {
batch_size: 100,
input_space: !obj:pylearn2.space.Conv2DSpace {
shape: [32, 32],
num_channels: 3,
axes: ['c', 0, 1, 'b'],
},
layers: [
!obj:pylearn2.models.maxout.MaxoutConvC01B {
layer_name: 'conv1',
pad: 0,
num_channels: 32,
num_pieces: 1,
kernel_shape: [5, 5],
pool_shape: [3, 3],
pool_stride: [2, 2],
irange: .01,
min_zero: True,
W_lr_scale: 1.,
b_lr_scale: 2.,
tied_b: True,
max_kernel_norm: 9.9,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: 'y',
n_classes: 10,
istdev: .01,
W_lr_scale: 1.,
b_lr_scale: 2.,
max_col_norm: 9.9365
}
],
},
algorithm: !obj:pylearn2.training_algorithms.sgd.SGD {
learning_rule: !obj:pylearn2.training_algorithms.learning_rule.Mo\
mentum { init_momentum: 0.9 },
batch_size: 100,
learning_rate: .01,
monitoring_dataset:
{
'valid' : !obj:pylearn2.datasets.cifar10.CIFAR10 {
toronto_prepro: True,
axes: ['c', 0, 1, 'b'],
which_set: 'train',
start: 40000,
stop: 50000
},
'test' : !obj:pylearn2.datasets.cifar10.CIFAR10 {
toronto_prepro: True,
axes: ['c', 0, 1, 'b'],
which_set: 'test',
}
},
termination_criterion: !obj:pylearn2.termination_criteria.EpochCo\
unter {
max_epochs: 5
}
}
}
"""
yaml_string_maxout_conv_c01b_cifar10_fast = """
!obj:pylearn2.train.Train {
dataset: &train !obj:pylearn2.datasets.cifar10.CIFAR10 {
toronto_prepro: True,
which_set: 'train',
axes: ['c', 0, 1, 'b'],
start: 0,
stop: 100
},
model: !obj:pylearn2.models.mlp.MLP {
batch_size: 100,
input_space: !obj:pylearn2.space.Conv2DSpace {
shape: [32, 32],
num_channels: 3,
axes: ['c', 0, 1, 'b'],
},
layers: [
!obj:pylearn2.models.maxout.MaxoutConvC01B {
layer_name: 'conv1',
pad: 0,
num_channels: 16,
num_pieces: 1,
kernel_shape: [5, 5],
pool_shape: [3, 3],
pool_stride: [2, 2],
irange: .01,
min_zero: False,
W_lr_scale: 1.,
b_lr_scale: 2.,
tied_b: True,
max_kernel_norm: 9.9,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: 'y',
n_classes: 10,
istdev: .03,
W_lr_scale: 1.,
b_lr_scale: 2.,
max_col_norm: 8.5
}
],
},
algorithm: !obj:pylearn2.training_algorithms.sgd.SGD {
learning_rule: !obj:pylearn2.training_algorithms.learning_rule.Mo\
momentum: { init_momentum: 0.9 },
batch_size: 100,
learning_rate: .01,
monitoring_dataset:
{
'valid' : !obj:pylearn2.datasets.cifar10.CIFAR10 {
toronto_prepro: True,
axes: ['c', 0, 1, 'b'],
which_set: 'train',
start: 40000,
stop: 40100
},
'test' : !obj:pylearn2.datasets.cifar10.CIFAR10 {
toronto_prepro: True,
axes: ['c', 0, 1, 'b'],
which_set: 'test',
}
},
termination_criterion: !obj:pylearn2.termination_criteria.EpochCo\
unter {
max_epochs: 5
}
}
}
"""
class TestMaxout(unittest.TestCase):
def test_maxout_conv_c01b_basic_err(self):
assert cuda.cuda_enabled is False
self.assertRaises(RuntimeError,
yaml_parse.load,
yaml_string_maxout_conv_c01b_basic)
def test_maxout_conv_c01b_basic(self):
if cuda.cuda_available is False:
raise SkipTest('Optional package cuda disabled')
if not hasattr(cuda, 'unuse'):
raise Exception("Theano version too old to run this test!")
# Tests that we can run a small convolutional model on GPU,
assert cuda.cuda_enabled is False
# Even if there is a GPU, but the user didn't specify device=gpu
# we want to run this test.
try:
old_floatX = config.floatX
cuda.use('gpu')
config.floatX = 'float32'
train = yaml_parse.load(yaml_string_maxout_conv_c01b_basic)
train.main_loop()
finally:
config.floatX = old_floatX
cuda.unuse()
assert cuda.cuda_enabled is False
def test_maxout_conv_c01b_cifar10(self):
if cuda.cuda_available is False:
raise SkipTest('Optional package cuda disabled')
if not hasattr(cuda, 'unuse'):
raise Exception("Theano version too old to run this test!")
# Tests that we can run a small convolutional model on GPU,
assert cuda.cuda_enabled is False
# Even if there is a GPU, but the user didn't specify device=gpu
# we want to run this test.
try:
old_floatX = config.floatX
cuda.use('gpu')
config.floatX = 'float32'
try:
if config.mode in ['DEBUG_MODE', 'DebugMode']:
train = yaml_parse.load(yaml_string_maxout_conv_c01b_cifar10_fast)
else:
train = yaml_parse.load(yaml_string_maxout_conv_c01b_cifar10)
except NoDataPathError:
raise SkipTest("PYLEARN2_DATA_PATH environment variable "
"not defined")
train.main_loop()
# Check that the performance is close to the expected one:
# test_y_misclass: 0.3777000308036804
misclass_chan = train.algorithm.monitor.channels['test_y_misclass']
if not config.mode in ['DEBUG_MODE', 'DebugMode']:
assert misclass_chan.val_record[-1] < 0.38, \
("misclass_chan.val_record[-1] = %g" %
misclass_chan.val_record[-1])
# test_y_nll: 1.0978516340255737
nll_chan = train.algorithm.monitor.channels['test_y_nll']
if not config.mode in ['DEBUG_MODE', 'DebugMode']:
assert nll_chan.val_record[-1] < 1.1
finally:
config.floatX = old_floatX
cuda.unuse()
assert cuda.cuda_enabled is False
if __name__ == '__main__':
t = TestMaxout('setUp')
t.setUp()
t.test_maxout_conv_c01b_basic()
if 0:
unittest.main()
|
google-research/valan
|
refs/heads/master
|
r2r/house_parser.py
|
1
|
# coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for parsing artifacts about a R2R house specification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import json
import math
import os
import re
import numpy as np
import tensorflow.compat.v2 as tf
from valan.r2r import house_utils
_BANNED_MP40_CAT_INDEX = {0, 40, 41}
# Note: `oriented_bbox` is another namedtuple of ('axis0', 'axis1', 'radii')
RoomObject = collections.namedtuple('RoomObject', [
'center', 'distance', 'name', 'category', 'clean_category', 'oriented_bbox'
])
class Region(object):
"""Region data in the house segmentation file."""
NUM_TOKENS = 20 # Number of tokens to expect in line.
INDEX_LOC = 1 # Location of region index.
LEVEL_LOC = 2 # Location of level index.
LABEL_LOC = 5 # Location of label.
PX_LOC = 6 # Location of px.
PY_LOC = 7 # Location of py.
PZ_LOC = 8 # Location of pz.
def __init__(self, region_line):
parts = region_line.strip().split()
assert 'R' == parts[0]
assert self.NUM_TOKENS == len(parts)
self.index = int(parts[self.INDEX_LOC])
self.level_index = int(parts[self.LEVEL_LOC])
self.label = parts[self.LABEL_LOC]
self.center = (float(parts[self.PX_LOC]), float(parts[self.PY_LOC]),
float(parts[self.PZ_LOC]))
class Pano(object):
"""Pano data in the house segmentation file."""
NUM_TOKENS = 13 # Number of tokens to expect in line.
NAME_LOC = 1 # Location of name.
INDEX_LOC = 2 # Location of index.
REGION_LOC = 3 # Location of region index.
PX_LOC = 5 # Location of px.
PY_LOC = 6 # Location of py.
PZ_LOC = 7 # Location of pz.
def __init__(self, pano_line):
parts = pano_line.strip().split()
assert 'P' == parts[0]
assert self.NUM_TOKENS == len(parts)
self.name = parts[self.NAME_LOC]
self.index = int(parts[self.INDEX_LOC])
self.region_index = int(parts[self.REGION_LOC])
self.center = (float(parts[self.PX_LOC]), float(parts[self.PY_LOC]),
float(parts[self.PZ_LOC]))
# List of images (`Image`) of the pano.
self.images = []
def get_available_headings(self):
# Only need to consider the heading angles of one of the 3 cameras.
return [image.heading for image in self.images if image.camera_index == 0]
def get_images_at(self, heading, threshold=math.pi / 12):
"""Returns the images within threshold arc distance from given heading."""
angle_distances = [
house_utils.compute_arc_distance(heading, image.heading)
for image in self.images
]
matches = [
self.images[idx]
for idx, dist in enumerate(angle_distances)
if dist <= threshold
]
# Sort matches by pitch angle for consistency.
sorted_images = sorted(matches, key=lambda image: image.pitch)
return sorted_images
class Image(object):
"""Image data in the house segmentation file."""
NUM_TOKENS = 41 # Number of tokens to expect in line.
INDEX_LOC = 1 # Location of image index.
PANO_INDEX_LOC = 2 # Location of panorama index.
NAME_LOC = 3 # Location of the name of the image.
CAMERA_INDEX_LOC = 4 # Location of the camera index.
HEADING_INDEX_LOC = 5 # Location of the heading (yaw) index.
EXTRINSIC_MATRIX_START_LOC = 6 # Start location of extrinsic camera matrix.
EXTRINSIC_MATRIX_END_LOC = 18 # End location of extrinsic camera matrix.
def __init__(self, image_line, scan_id):
self.scan_id = scan_id
parts = image_line.strip().split()
assert 'I' == parts[0]
assert self.NUM_TOKENS == len(parts)
self.index = int(parts[self.INDEX_LOC])
self.pano_index = int(parts[self.PANO_INDEX_LOC])
self.name = parts[self.NAME_LOC]
self.camera_index = int(parts[self.CAMERA_INDEX_LOC])
self.heading_index = int(parts[self.HEADING_INDEX_LOC])
# Compute heading and pitch from extrinsing matrix.
extrinsic_coordinates = [
float(coord) for coord in
parts[self.EXTRINSIC_MATRIX_START_LOC:self.EXTRINSIC_MATRIX_END_LOC]
]
rotation_matrix = [
extrinsic_coordinates[i:i + 3]
for i in range(0, len(extrinsic_coordinates), 4)
]
self.heading, self.pitch, _ = house_utils.get_euler_angles(rotation_matrix)
class Category(object):
"""Category data in the house segmentation file."""
NUM_TOKENS = 11 # Number of tokens to expect in line.
INDEX_LOC = 1 # Location of index.
CAT_MAP_INDEX_LOC = 2 # Location of category mapping index.
CAT_MAP_NAME_LOC = 3 # Location of category mapping name.
MPCAT_INDEX_LOC = 4 # Location of mpcat index.
MPCAT_NAME_LOC = 5 # Location of mpcat name.
def __init__(self, category_line, category_map=None):
"""Extract object index and category name for a line in the .house file."""
parts = category_line.strip().split()
assert 'C' == parts[0]
assert self.NUM_TOKENS == len(parts)
self.index = int(parts[self.INDEX_LOC])
self.category_mapping_index = int(parts[self.CAT_MAP_INDEX_LOC])
# Raw category name
self.category_mapping_name = ' '.join(
parts[self.CAT_MAP_NAME_LOC].split('#'))
self.mpcat40_index = int(parts[self.MPCAT_INDEX_LOC])
self.mpcat40_name = parts[self.MPCAT_NAME_LOC]
# Cleaned category name
if category_map:
self.clean_category_name = self._get_clean_cat_name(
self.category_mapping_index, category_map)
def _get_clean_cat_name(self,
category_index,
category_map,
count_cutoff_threshold=5):
"""Map category index to a clean category name instead of raw categeory.
The clean categories are from the R2R `category_mapping.tsv` file. It
corrects typos and standardizes the raw categories, which is much more fine
grained than the mpcat40 categories (which only has 40 categories).
For more information see:
https://github.com/niessner/Matterport/blob/master/metadata/category_mapping.tsv
Args:
category_index: int; the category mapping index extracted from the
category line from the .house file.
category_map: a dict returned by `_load_cat_map()`, containing mappings
from category index to category names, mpcat40 name, and count.
count_cutoff_threshold: categories with counts below the threshold are
replaced with their corresponding mpcat40 names. This is used to
truncate the long tail of rarely used category names.
Returns:
A unicode string for the clean category name.
"""
cat_map = category_map[category_index]
if cat_map['count'] >= count_cutoff_threshold:
clean_name = cat_map['clean_category']
else:
clean_name = cat_map['mpcat40']
return clean_name
class Object(object):
"""Object data in the house segmentation file."""
NUM_TOKENS = 24 # Number of tokens to expect in line.
INDEX_LOC = 1 # Location of index.
REGION_LOC = 2 # Location of index.
CAT_LOC = 3 # Location of index.
PX_LOC = 4 # Location of px.
PY_LOC = 5 # Location of py.
PZ_LOC = 6 # Location of pz.
AXIS0_X_LOC = 7 # Location of X axis min of the oriented bbox
AXIS0_Y_LOC = 8 # Location of Y axis min of the oriented bbox
AXIS0_Z_LOC = 9 # Location of Z axis min of the oriented bbox
AXIS1_X_LOC = 10 # Location of X axis max of the oriented bbox
AXIS1_Y_LOC = 11 # Location of Y axis max of the oriented bbox
AXIS1_Z_LOC = 12 # Location of Z axis max of the oriented bbox
RADIUS_X_LOC = 13 # Location of X raidus of the oriented bbox
RADIUS_Y_LOC = 14 # Location of Y raidus of the oriented bbox
RADIUS_Z_LOC = 15 # Location of Z raidus of the oriented bbox
def __init__(self, object_line):
parts = object_line.strip().split()
assert 'O' == parts[0]
assert self.NUM_TOKENS == len(parts)
self.index = int(parts[self.INDEX_LOC])
self.region_index = int(parts[self.REGION_LOC])
self.category_index = int(parts[self.CAT_LOC])
self.center = (float(parts[self.PX_LOC]), float(parts[self.PY_LOC]),
float(parts[self.PZ_LOC]))
# Oriented bounding box (obbox)
oriented_bbox = collections.namedtuple('OrientedBbox',
['axis0', 'axis1', 'radii'])
axis0 = (float(parts[self.AXIS0_X_LOC]), float(parts[self.AXIS0_Y_LOC]),
float(parts[self.AXIS0_Z_LOC]))
axis1 = (float(parts[self.AXIS1_X_LOC]), float(parts[self.AXIS1_Y_LOC]),
float(parts[self.AXIS1_Z_LOC]))
radii = (float(parts[self.RADIUS_X_LOC]), float(parts[self.RADIUS_Y_LOC]),
float(parts[self.RADIUS_Z_LOC]))
self.obbox = oriented_bbox(axis0, axis1, radii)
def is_well_formed(self):
return (self.index >= 0 and self.region_index >= 0 and
self.category_index >= 0)
class R2RHouseParser(object):
"""Parser to extract various annotations in a house to assist perception."""
def __init__(self,
house_file_path,
category_map_dir=None,
category_map_file='category_mapping.tsv',
banned_mp40_cat_index=None):
"""Parses regions, panos, categories and objects from house spec file.
For more information see:
https://github.com/niessner/Matterport/blob/master/data_organization.md
Args:
house_file_path: Path to scan id house specification file.
category_map_dir: Dir of category mapping file 'category_mapping.tsv'.
If not provided, then the `clean_category` will be omitted.
category_map_file: str; file name for the category mapping table. Use
default unless the mapping table is different.
banned_mp40_cat_index: A set of mpcat40 category indices, e.g., {0, 41}
for (void, unlabled). If provided, then these categories will be ignored
when extracting objects of each pano.
"""
# Load category map and banned mp40 categories.
if category_map_dir:
assert tf.io.gfile.isdir(
category_map_dir), '{} must be an existing dir.'.format(
category_map_dir)
category_map = _load_cat_map(
category_map_file, category_map_dir, delimiter='\t')
else:
# Default to None and omit `clean_category_name` if dir is not given.
category_map = None
if not banned_mp40_cat_index:
self.banned_mp40_cat_index = _BANNED_MP40_CAT_INDEX
else:
self.banned_mp40_cat_index = banned_mp40_cat_index
self.scan_id = os.path.splitext(os.path.basename(house_file_path))[0]
with tf.io.gfile.GFile(house_file_path, 'r') as input_file:
# Skip but check header line.
assert re.match('^ASCII .*', input_file.readline().strip()) is not None
house_info = input_file.readline().strip().split()
assert 29 == len(house_info)
self.num_images = int(house_info[3])
self.num_panos = int(house_info[4])
self.num_objects = int(house_info[8])
self.num_categories = int(house_info[9])
self.num_regions = int(house_info[10])
self.regions = {}
self.panos = {}
self.categories = {}
self.objects = {}
self.images = {}
for line in input_file:
if line[0] == 'R':
r = Region(line)
assert r.index not in self.regions
self.regions[r.index] = r
elif line[0] == 'P':
p = Pano(line)
assert p.index not in self.panos
self.panos[p.index] = p
elif line[0] == 'C':
c = Category(line, category_map)
assert c.index not in self.categories
self.categories[c.index] = c
elif line[0] == 'O':
o = Object(line)
assert o.index not in self.objects
self.objects[o.index] = o
elif line[0] == 'I':
i = Image(line, self.scan_id)
assert i.index not in self.images
self.images[i.index] = i
assert self.num_regions == len(self.regions)
assert self.num_panos == len(self.panos)
assert self.num_categories == len(self.categories)
assert self.num_objects == len(self.objects)
assert self.num_images == len(self.images)
# Organize objects into regions for easy retrieval later.
self.pano_name_map = {}
for p in self.panos.values():
self.pano_name_map[p.name] = p.index
self.region_object_map = collections.defaultdict(list)
for o in self.objects.values():
self.region_object_map[o.region_index] += [o.index]
# Add images to the associated panos.
for image in self.images.values():
pano = self.get_pano_by_name(image.name)
pano.images.append(image)
def __repr__(self):
return 'Regions: {}, Panos: {}, Cats: {}, Objs: {}'.format(
self.num_regions, self.num_panos, self.num_categories, self.num_objects)
def get_pano_objects(self, pano_name):
"""Extract the set of objects given a pano.
Only returns the closest object of the same mp40 category and skips any
objects with mp40 category in `self.banned_mp40_cat_index` (e.g. misc,
void, unlabeled categories).
Args:
pano_name: panoromic hash id.
Returns:
Dictionary where key is the center of the `RoomObject` and the
value is the `RoomObject` named tuple. In particular,
`RoomObject.oriented_bbox` is another named tuple (axis0, axis1, radii)
containing the axis orientation of the bounding box and the radii of the
object along each axis.
"""
pano_id = self.pano_name_map.get(pano_name, None)
if pano_id is None: # Note that `pano_id` is int and thus can be 0.
return {}
room_objects = {}
region_index = self.panos[pano_id].region_index
pano_center = self.panos[pano_id].center
for object_index in self.region_object_map[region_index]:
try:
category = self.categories[self.objects[object_index].category_index]
# NOTE: 'unknown' objects are sometimes labeled as mpcat40=40 (misc)
# instead of mpcat40=41 (unlabeled). So we specifically exclude it here.
if ('unknown' not in category.category_mapping_name.lower()) and (
category.mpcat40_index not in self.banned_mp40_cat_index):
object_center = self.objects[object_index].center
assert object_center not in room_objects, self.objects[object_index]
room_objects[object_center] = RoomObject(
object_center,
np.linalg.norm(np.array(object_center) - np.array(pano_center)),
category.category_mapping_name,
category.mpcat40_name,
(category.clean_category_name if hasattr(
category, 'clean_category_name') else None),
self.objects[object_index].obbox)
except KeyError:
# Note that this happens because some objects have been marked with -1
# categories. We can safely ignore these objects.
assert self.objects[object_index].category_index == -1
return room_objects
def get_pano_by_name(self, pano_name):
return self.panos[self.pano_name_map[pano_name]]
def get_panos_graph(self,
connections_file,
cache_best_paths_and_distances=False):
"""Returns a house_utils.Graph object with the panos as nodes.
The connectivity file should be a json with a single line, containing one
dictionary for every pano in the house scan. The fields of this dictionary
used in this method are 'image_id', a unique string that identifies the
pano, and 'unobstructed', a list of booleans with length equal to the number
of panos in the scan, representing whether there is an unobstructed direct
path from the current pano to the pano with that index.
A more detailed description of the format of the file can be found at
https://github.com/peteanderson80/Matterport3DSimulator/tree/master/connectivity.
Args:
connections_file: Path to the file containing the connections.
cache_best_paths_and_distances: Whether or not to cache the best path and
distance for every pair of nodes in the graph.
"""
assert tf.io.gfile.exists(connections_file), ('Missing required file: %s' %
connections_file)
with tf.io.gfile.GFile(connections_file, 'r') as f:
connections_info = json.loads(f.readline())
pano_idx_map = {} # Dictionary mapping idx in connection file to pano name.
excluded_pano_names = set() # Set of names of excluded panos.
for idx, pano_info in enumerate(connections_info):
pano_name = pano_info['image_id']
pano_idx_map[idx] = pano_name
if not pano_info['included']:
excluded_pano_names.add(pano_name)
# We build a dictionary indexed by the id of the panos, with values
# being a dictionary of connected panos to a house_utils.ConnectionInfo
# storing the distance and heading_angle between them.
nodes_dict = collections.defaultdict(house_utils.NodeInfo)
for pano_info in connections_info:
current_pano = self.get_pano_by_name(pano_info['image_id'])
if current_pano.name in excluded_pano_names:
continue # Don't add excluded panos to the graph.
for index, is_unobstructed in enumerate(pano_info['unobstructed']):
connected_pano = self.get_pano_by_name(pano_idx_map[index])
# Path should be unobstructed and target point should be included in the
# simulator.
if is_unobstructed and connected_pano.name not in excluded_pano_names:
distance = house_utils.compute_distance(current_pano.center,
connected_pano.center)
heading_angle = house_utils.compute_heading_angle(
current_pano.center,
connected_pano.center,
radians=True,
apply_r2r_correction=True)
pitch_angle = house_utils.compute_pitch_angle(
current_pano.center, connected_pano.center, radians=True)
connection_info = house_utils.ConnectionInfo(distance, heading_angle,
pitch_angle)
nodes_dict[current_pano.name].connections[connected_pano.name] = (
connection_info)
graph = house_utils.Graph(nodes_dict, cache_best_paths_and_distances)
for node in graph.nodes.keys():
graph.nodes[node].coords = self.get_pano_by_name(node).center
return graph
def _load_cat_map(category_map_file='category_mapping.tsv',
file_dir='',
delimiter='\t'):
"""Load category mapping table from file.
The mapping table is available at:
https://github.com/niessner/Matterport/blob/master/metadata/category_mapping.tsv
Args:
category_map_file: str; category mapping table file.
file_dir: str; the dir to `category_map_file`.
delimiter: str; optional delimiter for the input table.
Returns:
A dict that maps category index to category names and counts.
"""
filepath = os.path.join(file_dir, category_map_file)
assert tf.io.gfile.exists(filepath), (
'Missing category mapping file: {}'.format(filepath))
data = {}
with tf.io.gfile.GFile(filepath, 'r') as f:
reader = csv.reader(f, delimiter=delimiter)
for i, row in enumerate(reader):
assert len(row) == 18, 'Num columns must be 18.'
if i == 0:
header = row
assert header[0:4] == ['index', 'raw_category', 'category', 'count']
assert header[-2:] == ['mpcat40index', 'mpcat40']
else:
entries = [r.lower() for r in row]
# entries[0] is the index of each line.
data[int(entries[0])] = {
'raw_category': entries[1],
'clean_category': # Only take the first part if has '\' in name.
entries[2].split('/')[0].strip(),
'count': int(entries[3]),
'mpcat40index': int(entries[-2]),
'mpcat40': entries[-1],
}
return data
|
antoviaque/edx-platform
|
refs/heads/master
|
common/test/acceptance/pages/studio/asset_index.py
|
53
|
"""
The Files and Uploads page for a course in Studio
"""
import urllib
import os
from opaque_keys.edx.locator import CourseLocator
from . import BASE_URL
from .course_page import CoursePage
from bok_choy.javascript import wait_for_js, requirejs
@requirejs('js/views/assets')
class AssetIndexPage(CoursePage):
"""
The Files and Uploads page for a course in Studio
"""
url_path = "assets"
type_filter_element = '#js-asset-type-col'
@property
def url(self):
"""
Construct a URL to the page within the course.
"""
# TODO - is there a better way to make this agnostic to the underlying default module store?
default_store = os.environ.get('DEFAULT_STORE', 'draft')
course_key = CourseLocator(
self.course_info['course_org'],
self.course_info['course_num'],
self.course_info['course_run'],
deprecated=(default_store == 'draft')
)
url = "/".join([BASE_URL, self.url_path, urllib.quote_plus(unicode(course_key))])
return url if url[-1] is '/' else url + '/'
@wait_for_js
def is_browser_on_page(self):
return all([
self.q(css='body.view-uploads').present,
self.q(css='.page-header').present,
not self.q(css='div.ui-loading').visible,
])
@wait_for_js
def type_filter_on_page(self):
"""
Checks that type filter is in table header.
"""
return self.q(css=self.type_filter_element).present
@wait_for_js
def type_filter_header_label_visible(self):
"""
Checks type filter label is added and visible in the pagination header.
"""
return self.q(css='span.filter-column').visible
@wait_for_js
def click_type_filter(self):
"""
Clicks type filter menu.
"""
self.q(css=".filterable-column .nav-item").click()
@wait_for_js
def select_type_filter(self, filter_number):
"""
Selects Type filter from dropdown which filters the results.
Returns False if no filter.
"""
self.wait_for_ajax()
if self.q(css=".filterable-column .nav-item").is_present():
if not self.q(css=self.type_filter_element + " .wrapper-nav-sub").visible:
self.q(css=".filterable-column > .nav-item").first.click()
self.wait_for_element_visibility(
self.type_filter_element + " .wrapper-nav-sub", "Type Filter promise satisfied.")
self.q(css=self.type_filter_element + " .column-filter-link").nth(filter_number).click()
self.wait_for_ajax()
return True
return False
def return_results_set(self):
"""
Returns the asset set from the page
"""
return self.q(css="#asset-table-body tr").results
|
meego-tablet-ux/meego-app-browser
|
refs/heads/master
|
third_party/mesa/MesaLib/src/mapi/glapi/gen/gl_procs.py
|
33
|
#!/usr/bin/env python
# (C) Copyright IBM Corporation 2004, 2005
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Ian Romanick <idr@us.ibm.com>
import license
import gl_XML, glX_XML
import sys, getopt
class PrintGlProcs(gl_XML.gl_print_base):
def __init__(self, long_strings, es=False):
gl_XML.gl_print_base.__init__(self)
self.es = es
self.long_strings = long_strings
self.name = "gl_procs.py (from Mesa)"
self.license = license.bsd_license_template % ( \
"""Copyright (C) 1999-2001 Brian Paul All Rights Reserved.
(C) Copyright IBM Corporation 2004, 2006""", "BRIAN PAUL, IBM")
def printRealHeader(self):
print """
/* This file is only included by glapi.c and is used for
* the GetProcAddress() function
*/
typedef struct {
GLint Name_offset;
#if defined(NEED_FUNCTION_POINTER) || defined(GLX_INDIRECT_RENDERING)
_glapi_proc Address;
#endif
GLuint Offset;
} glprocs_table_t;
#if !defined(NEED_FUNCTION_POINTER) && !defined(GLX_INDIRECT_RENDERING)
# define NAME_FUNC_OFFSET(n,f1,f2,f3,o) { n , o }
#elif defined(NEED_FUNCTION_POINTER) && !defined(GLX_INDIRECT_RENDERING)
# define NAME_FUNC_OFFSET(n,f1,f2,f3,o) { n , (_glapi_proc) f1 , o }
#elif defined(NEED_FUNCTION_POINTER) && defined(GLX_INDIRECT_RENDERING)
# define NAME_FUNC_OFFSET(n,f1,f2,f3,o) { n , (_glapi_proc) f2 , o }
#elif !defined(NEED_FUNCTION_POINTER) && defined(GLX_INDIRECT_RENDERING)
# define NAME_FUNC_OFFSET(n,f1,f2,f3,o) { n , (_glapi_proc) f3 , o }
#endif
"""
return
def printRealFooter(self):
print ''
print '#undef NAME_FUNC_OFFSET'
return
def printFunctionString(self, name):
if self.long_strings:
print ' "gl%s\\0"' % (name)
else:
print " 'g','l',",
for c in name:
print "'%s'," % (c),
print "'\\0',"
def printBody(self, api):
print ''
if self.long_strings:
print 'static const char gl_string_table[] ='
else:
print 'static const char gl_string_table[] = {'
base_offset = 0
table = []
for func in api.functionIterateByOffset():
name = func.dispatch_name()
self.printFunctionString(func.name)
table.append((base_offset, "gl" + name, "gl" + name, "NULL", func.name))
# The length of the function's name, plus 2 for "gl",
# plus 1 for the NUL.
base_offset += len(func.name) + 3
for func in api.functionIterateByOffset():
for n in func.entry_points:
if n != func.name:
name = func.dispatch_name()
self.printFunctionString( n )
if func.has_different_protocol(n):
alt_name = "gl" + func.static_glx_name(n)
table.append((base_offset, "gl" + name, alt_name, alt_name, func.name))
else:
table.append((base_offset, "gl" + name, "gl" + name, "NULL", func.name))
base_offset += len(n) + 3
if self.long_strings:
print ' ;'
else:
print '};'
print ''
print ''
print "#ifdef USE_MGL_NAMESPACE"
for func in api.functionIterateByOffset():
for n in func.entry_points:
if (not func.is_static_entry_point(func.name)) or (func.has_different_protocol(n) and not func.is_static_entry_point(n)):
print '#define gl_dispatch_stub_%u mgl_dispatch_stub_%u' % (func.offset, func.offset)
break
print "#endif /* USE_MGL_NAMESPACE */"
print ''
print ''
print '#if defined(NEED_FUNCTION_POINTER) || defined(GLX_INDIRECT_RENDERING)'
for func in api.functionIterateByOffset():
for n in func.entry_points:
if (not func.is_static_entry_point(func.name)) or (func.has_different_protocol(n) and not func.is_static_entry_point(n)):
print '%s GLAPIENTRY gl_dispatch_stub_%u(%s);' % (func.return_type, func.offset, func.get_parameter_string())
break
if self.es:
categories = {}
for func in api.functionIterateByOffset():
for n in func.entry_points:
cat, num = api.get_category_for_name(n)
if (cat.startswith("es") or cat.startswith("GL_OES")):
if not categories.has_key(cat):
categories[cat] = []
proto = 'GLAPI %s GLAPIENTRY %s(%s);' \
% (func.return_type, "gl" + n, func.get_parameter_string(n))
categories[cat].append(proto)
if categories:
print ''
print '/* OpenGL ES specific prototypes */'
print ''
keys = categories.keys()
keys.sort()
for key in keys:
print '/* category %s */' % key
print "\n".join(categories[key])
print ''
print '#endif /* defined(NEED_FUNCTION_POINTER) || defined(GLX_INDIRECT_RENDERING) */'
print ''
print 'static const glprocs_table_t static_functions[] = {'
for info in table:
print ' NAME_FUNC_OFFSET(%5u, %s, %s, %s, _gloffset_%s),' % info
print ' NAME_FUNC_OFFSET(-1, NULL, NULL, NULL, 0)'
print '};'
return
def show_usage():
print "Usage: %s [-f input_file_name] [-m mode] [-c]" % sys.argv[0]
print "-c Enable compatibility with OpenGL ES."
print "-m mode mode can be one of:"
print " long - Create code for compilers that can handle very"
print " long string constants. (default)"
print " short - Create code for compilers that can only handle"
print " ANSI C89 string constants."
sys.exit(1)
if __name__ == '__main__':
file_name = "gl_API.xml"
try:
(args, trail) = getopt.getopt(sys.argv[1:], "f:m:c")
except Exception,e:
show_usage()
long_string = 1
es = False
for (arg,val) in args:
if arg == "-f":
file_name = val
elif arg == "-m":
if val == "short":
long_string = 0
elif val == "long":
long_string = 1
else:
show_usage()
elif arg == "-c":
es = True
api = gl_XML.parse_GL_API(file_name, glX_XML.glx_item_factory())
printer = PrintGlProcs(long_string, es)
printer.Print(api)
|
edulramirez/nova
|
refs/heads/master
|
nova/db/sqlalchemy/migrate_repo/versions/219_placeholder.py
|
810
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Havana backports.
# Do not use this number for new Icehouse work. New Icehouse work starts after
# all the placeholders.
#
# See blueprint backportable-db-migrations-icehouse
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass
|
MrKiven/gunicorn
|
refs/heads/master
|
scripts/update_thanks.py
|
22
|
#!/usr/bin/env python
# Usage: git log --format="%an <%ae>" | python update_thanks.py
# You will get a result.txt file, you can work with the file (update, remove, ...)
#
# Install
# =======
# pip install validate_email pyDNS
#
from __future__ import print_function
import os
import sys
from validate_email import validate_email
from email.utils import parseaddr
import DNS.Base
addresses = set()
bad_addresses = set()
collection = []
lines = list(reversed(sys.stdin.readlines()))
for author in map(str.strip, lines):
realname, email_address = parseaddr(author)
if email_address not in addresses:
if email_address in bad_addresses:
continue
else:
try:
value = validate_email(email_address)
if value:
addresses.add(email_address)
collection.append(author)
else:
bad_addresses.add(email_address)
except DNS.Base.TimeoutError:
bad_addresses.add(email_address)
with open('result.txt', 'w') as output:
output.write('\n'.join(collection))
|
Zhongqilong/mykbengineer
|
refs/heads/master
|
kbe/src/lib/python/Lib/test/final_b.py
|
103
|
"""
Fodder for module finalization tests in test_module.
"""
import shutil
import test.final_a
x = 'b'
class C:
def __del__(self):
# Inspect module globals and builtins
print("x =", x)
print("final_a.x =", test.final_a.x)
print("shutil.rmtree =", getattr(shutil.rmtree, '__name__', None))
print("len =", getattr(len, '__name__', None))
c = C()
_underscored = C()
|
theflofly/tensorflow
|
refs/heads/master
|
tensorflow/compiler/tests/self_adjoint_eig_op_test.py
|
5
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.self_adjoint_eig."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.platform import test
class SelfAdjointEigOpTest(xla_test.XLATestCase, parameterized.TestCase):
def _test(self, dtype, shape):
np.random.seed(1)
x_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape)).reshape(shape).astype(dtype)
x_np = x_np + np.swapaxes(x_np, -1, -2)
n = shape[-1]
e_np, _ = np.linalg.eigh(x_np)
with self.cached_session() as sess:
x_tf = array_ops.placeholder(dtype)
with self.test_scope():
e, v = linalg_ops.self_adjoint_eig(x_tf)
e_val, v_val = sess.run([e, v], feed_dict={x_tf: x_np})
v_diff = np.matmul(v_val, np.swapaxes(v_val, -1, -2)) - np.eye(n)
self.assertAlmostEqual(np.mean(v_diff**2), 0.0, delta=1e-6)
self.assertAlmostEqual(np.mean((e_val - e_np)**2), 0.0, delta=1e-6)
SIZES = [1, 2, 5, 10, 32]
DTYPES = [np.float32]
PARAMS = itertools.product(SIZES, DTYPES)
@parameterized.parameters(*PARAMS)
def testSelfAdjointEig(self, n, dtype):
for batch_dims in [(), (3,)] + [(3, 2)] * (n < 10):
self._test(dtype, batch_dims + (n, n))
if __name__ == "__main__":
test.main()
|
kenshay/ImageScripter
|
refs/heads/master
|
ProgramData/SystemFiles/Python/Lib/ensurepip/__main__.py
|
171
|
import ensurepip
if __name__ == "__main__":
ensurepip._main()
|
ibis-project/ibis
|
refs/heads/master
|
ibis/backends/dask/execution/strings.py
|
1
|
import itertools
import dask.dataframe as dd
import dask.dataframe.groupby as ddgb
import numpy as np
import pandas
import toolz
from pandas import isnull
import ibis
import ibis.expr.operations as ops
from ibis.backends.pandas.core import integer_types, scalar_types
from ibis.backends.pandas.execution.strings import (
execute_series_join_scalar_sep,
execute_series_regex_extract,
execute_series_regex_replace,
execute_series_regex_search,
execute_series_right,
execute_series_translate_scalar_scalar,
execute_series_translate_scalar_series,
execute_series_translate_series_scalar,
execute_series_translate_series_series,
execute_string_capitalize,
execute_string_contains,
execute_string_length_series,
execute_string_like_series_string,
execute_string_lower,
execute_string_lpad,
execute_string_lstrip,
execute_string_repeat,
execute_string_reverse,
execute_string_rpad,
execute_string_rstrip,
execute_string_strip,
execute_string_upper,
execute_substring_int_int,
haystack_to_series_of_lists,
)
from ..dispatch import execute_node
from .util import (
TypeRegistrationDict,
make_selected_obj,
register_types_to_dispatcher,
)
DASK_DISPATCH_TYPES: TypeRegistrationDict = {
ops.StringLength: [((dd.Series,), execute_string_length_series)],
ops.Substring: [
(
(dd.Series, integer_types, integer_types,),
execute_substring_int_int,
),
],
ops.Strip: [((dd.Series,), execute_string_strip)],
ops.LStrip: [((dd.Series,), execute_string_lstrip)],
ops.RStrip: [((dd.Series,), execute_string_rstrip)],
ops.LPad: [
(
(dd.Series, (dd.Series,) + integer_types, (dd.Series, str),),
execute_string_lpad,
),
],
ops.RPad: [
(
(dd.Series, (dd.Series,) + integer_types, (dd.Series, str),),
execute_string_rpad,
),
],
ops.Reverse: [((dd.Series,), execute_string_reverse)],
ops.Lowercase: [((dd.Series,), execute_string_lower)],
ops.Uppercase: [((dd.Series,), execute_string_upper)],
ops.Capitalize: [((dd.Series,), execute_string_capitalize)],
ops.Repeat: [
((dd.Series, (dd.Series,) + integer_types), execute_string_repeat),
],
ops.StringFind: [
(
(
dd.Series,
(dd.Series, str),
(dd.Series, type(None)) + integer_types,
(dd.Series, type(None)) + integer_types,
),
execute_string_contains,
)
],
ops.StringSQLLike: [
(
(dd.Series, str, (str, type(None)),),
execute_string_like_series_string,
),
],
ops.RegexSearch: [((dd.Series, str,), execute_series_regex_search)],
ops.RegexExtract: [
(
(dd.Series, (dd.Series, str), integer_types),
execute_series_regex_extract,
),
],
ops.RegexReplace: [
((dd.Series, str, str,), execute_series_regex_replace),
],
ops.Translate: [
(
(dd.Series, dd.Series, dd.Series),
execute_series_translate_series_series,
),
((dd.Series, dd.Series, str), execute_series_translate_series_scalar),
((dd.Series, str, dd.Series), execute_series_translate_scalar_series),
((dd.Series, str, str), execute_series_translate_scalar_scalar),
],
ops.StrRight: [((dd.Series, integer_types), execute_series_right)],
ops.StringJoin: [
(((dd.Series, str), list), execute_series_join_scalar_sep),
],
}
register_types_to_dispatcher(execute_node, DASK_DISPATCH_TYPES)
@execute_node.register(ops.Substring, dd.Series, dd.Series, integer_types)
def execute_substring_series_int(op, data, start, length, **kwargs):
return execute_substring_series_series(
op, data, start, dd.from_array(np.repeat(length, len(start))), **kwargs
)
@execute_node.register(ops.Substring, dd.Series, integer_types, dd.Series)
def execute_string_substring_int_series(op, data, start, length, **kwargs):
return execute_substring_series_series(
op,
data,
dd.from_array(np.repeat(start, len(length))),
length,
**kwargs,
)
# TODO - substring - #2553
@execute_node.register(ops.Substring, dd.Series, dd.Series, dd.Series)
def execute_substring_series_series(op, data, start, length, **kwargs):
end = start + length
# TODO - this is broken
def iterate(
value, start_iter=start.iteritems(), end_iter=end.iteritems(),
):
_, begin = next(start_iter)
_, end = next(end_iter)
if (begin is not None and isnull(begin)) or (
end is not None and isnull(end)
):
return None
return value[begin:end]
return data.map(iterate)
@execute_node.register(ops.StringSQLLike, ddgb.SeriesGroupBy, str, str)
def execute_string_like_series_groupby_string(
op, data, pattern, escape, **kwargs
):
return execute_string_like_series_string(
op, make_selected_obj(data), pattern, escape, **kwargs
).groupby(data.grouper.groupings)
# TODO - aggregations - #2553
@execute_node.register(
ops.GroupConcat, dd.Series, str, (dd.Series, type(None))
)
def execute_group_concat_series_mask(
op, data, sep, mask, aggcontext=None, **kwargs
):
return aggcontext.agg(
data[mask] if mask is not None else data,
lambda series, sep=sep: sep.join(series.values),
)
@execute_node.register(ops.GroupConcat, ddgb.SeriesGroupBy, str, type(None))
def execute_group_concat_series_gb(
op, data, sep, _, aggcontext=None, **kwargs
):
custom_group_concat = dd.Aggregation(
name='custom_group_concat',
chunk=lambda s: s.apply(list),
agg=lambda s0: s0.apply(
lambda chunks: sep.join(
str(s) for s in itertools.chain.from_iterable(chunks)
)
),
)
return data.agg(custom_group_concat)
# TODO - aggregations - #2553
@execute_node.register(
ops.GroupConcat, ddgb.SeriesGroupBy, str, ddgb.SeriesGroupBy
)
def execute_group_concat_series_gb_mask(
op, data, sep, mask, aggcontext=None, **kwargs
):
def method(series, sep=sep):
return sep.join(series.values.astype(str))
return aggcontext.agg(
data,
lambda data, mask=mask.obj, method=method: method(
data[mask[data.index]]
),
)
@execute_node.register(ops.StringAscii, dd.Series)
def execute_string_ascii(op, data, **kwargs):
output_meta = pandas.Series([], dtype=np.dtype('int32'), name=data.name)
return data.map(ord, meta=output_meta)
@execute_node.register(ops.StringAscii, ddgb.SeriesGroupBy)
def execute_string_ascii_group_by(op, data, **kwargs):
return execute_string_ascii(op, make_selected_obj(data), **kwargs).groupby(
data.index
)
@execute_node.register(ops.RegexSearch, ddgb.SeriesGroupBy, str)
def execute_series_regex_search_gb(op, data, pattern, **kwargs):
return execute_series_regex_search(
op,
make_selected_obj(data),
getattr(pattern, 'obj', pattern),
**kwargs,
).groupby(data.index)
@execute_node.register(
ops.RegexExtract, ddgb.SeriesGroupBy, str, integer_types
)
def execute_series_regex_extract_gb(op, data, pattern, index, **kwargs):
return execute_series_regex_extract(
op, make_selected_obj(data), pattern, index, **kwargs
).groupby(data.index)
@execute_node.register(ops.RegexReplace, ddgb.SeriesGroupBy, str, str)
def execute_series_regex_replace_gb(op, data, pattern, replacement, **kwargs):
return execute_series_regex_replace(
make_selected_obj(data), pattern, replacement, **kwargs
).groupby(data.index)
@execute_node.register(ops.StrRight, ddgb.SeriesGroupBy, integer_types)
def execute_series_right_gb(op, data, nchars, **kwargs):
return execute_series_right(op, make_selected_obj(data), nchars).groupby(
data.index
)
def haystack_to_dask_series_of_lists(haystack, index=None):
pieces = haystack_to_series_of_lists(haystack, index)
return dd.from_pandas(pieces, npartitions=1)
@execute_node.register(ops.FindInSet, dd.Series, list)
def execute_series_find_in_set(op, needle, haystack, **kwargs):
def find_in_set(index, elements):
return ibis.util.safe_index(elements, index)
return needle.apply(find_in_set, args=(haystack,))
@execute_node.register(ops.FindInSet, ddgb.SeriesGroupBy, list)
def execute_series_group_by_find_in_set(op, needle, haystack, **kwargs):
pieces = [getattr(piece, 'obj', piece) for piece in haystack]
return execute_series_find_in_set(
op, make_selected_obj(needle), pieces, **kwargs
).groupby(needle.index)
# TODO we need this version not pandas
@execute_node.register(ops.FindInSet, scalar_types, list)
def execute_string_group_by_find_in_set(op, needle, haystack, **kwargs):
# `list` could contain series, series groupbys, or scalars
# mixing series and series groupbys is not allowed
series_in_haystack = [
type(piece)
for piece in haystack
if isinstance(piece, (dd.Series, ddgb.SeriesGroupBy))
]
if not series_in_haystack:
return ibis.util.safe_index(haystack, needle)
try:
(collection_type,) = frozenset(map(type, series_in_haystack))
except ValueError:
raise ValueError('Mixing Series and ddgb.SeriesGroupBy is not allowed')
pieces = haystack_to_dask_series_of_lists(
[getattr(piece, 'obj', piece) for piece in haystack]
)
result = pieces.map(toolz.flip(ibis.util.safe_index)(needle))
if issubclass(collection_type, dd.Series):
return result
assert issubclass(collection_type, ddgb.SeriesGroupBy)
return result.groupby(
toolz.first(
piece.grouper.groupings
for piece in haystack
if hasattr(piece, 'grouper')
)
)
|
wronk/mne-python
|
refs/heads/master
|
mne/time_frequency/tfr.py
|
1
|
"""A module which implements the time frequency estimation.
Morlet code inspired by Matlab code from Sheraz Khan & Brainstorm & SPM
"""
# Authors : Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Hari Bharadwaj <hari@nmr.mgh.harvard.edu>
# Clement Moutard <clement.moutard@polytechnique.org>
#
# License : BSD (3-clause)
from copy import deepcopy
from math import sqrt
import numpy as np
from scipy import linalg
from scipy.fftpack import fftn, ifftn
from ..fixes import partial
from ..baseline import rescale
from ..parallel import parallel_func
from ..utils import (logger, verbose, _time_mask, warn, check_fname,
_check_copy_dep)
from ..channels.channels import ContainsMixin, UpdateChannelsMixin
from ..io.pick import pick_info, pick_types
from ..io.meas_info import Info
from .multitaper import dpss_windows
from ..viz.utils import figure_nobar, plt_show
from ..externals.h5io import write_hdf5, read_hdf5
from ..externals.six import string_types
def _get_data(inst, return_itc):
"""Get data from Epochs or Evoked instance as epochs x ch x time"""
from ..epochs import _BaseEpochs
from ..evoked import Evoked
if not isinstance(inst, (_BaseEpochs, Evoked)):
raise TypeError('inst must be Epochs or Evoked')
if isinstance(inst, _BaseEpochs):
data = inst.get_data()
else:
if return_itc:
raise ValueError('return_itc must be False for evoked data')
data = inst.data[np.newaxis, ...].copy()
return data
def morlet(sfreq, freqs, n_cycles=7, sigma=None, zero_mean=False):
"""Compute Wavelets for the given frequency range
Parameters
----------
sfreq : float
Sampling Frequency
freqs : array
frequency range of interest (1 x Frequencies)
n_cycles: float | array of float
Number of cycles. Fixed number or one per frequency.
sigma : float, (optional)
It controls the width of the wavelet ie its temporal
resolution. If sigma is None the temporal resolution
is adapted with the frequency like for all wavelet transform.
The higher the frequency the shorter is the wavelet.
If sigma is fixed the temporal resolution is fixed
like for the short time Fourier transform and the number
of oscillations increases with the frequency.
zero_mean : bool
Make sure the wavelet is zero mean
Returns
-------
Ws : list of array
Wavelets time series
See Also
--------
mne.time_frequency.cwt_morlet : Compute time-frequency decomposition
with Morlet wavelets
"""
Ws = list()
n_cycles = np.atleast_1d(n_cycles)
if (n_cycles.size != 1) and (n_cycles.size != len(freqs)):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
# fixed or scale-dependent window
if sigma is None:
sigma_t = this_n_cycles / (2.0 * np.pi * f)
else:
sigma_t = this_n_cycles / (2.0 * np.pi * sigma)
# this scaling factor is proportional to (Tallon-Baudry 98):
# (sigma_t*sqrt(pi))^(-1/2);
t = np.arange(0., 5. * sigma_t, 1.0 / sfreq)
t = np.r_[-t[::-1], t[1:]]
oscillation = np.exp(2.0 * 1j * np.pi * f * t)
gaussian_enveloppe = np.exp(-t ** 2 / (2.0 * sigma_t ** 2))
if zero_mean: # to make it zero mean
real_offset = np.exp(- 2 * (np.pi * f * sigma_t) ** 2)
oscillation -= real_offset
W = oscillation * gaussian_enveloppe
W /= sqrt(0.5) * linalg.norm(W.ravel())
Ws.append(W)
return Ws
def _dpss_wavelet(sfreq, freqs, n_cycles=7, time_bandwidth=4.0,
zero_mean=False):
"""Compute Wavelets for the given frequency range
Parameters
----------
sfreq : float
Sampling Frequency.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
Defaults to 7.
time_bandwidth : float, (optional)
Time x Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1).
Default is 4.0, giving 3 good tapers.
Returns
-------
Ws : list of array
Wavelets time series
"""
Ws = list()
if time_bandwidth < 2.0:
raise ValueError("time_bandwidth should be >= 2.0 for good tapers")
n_taps = int(np.floor(time_bandwidth - 1))
n_cycles = np.atleast_1d(n_cycles)
if n_cycles.size != 1 and n_cycles.size != len(freqs):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for m in range(n_taps):
Wm = list()
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
t_win = this_n_cycles / float(f)
t = np.arange(0., t_win, 1.0 / sfreq)
# Making sure wavelets are centered before tapering
oscillation = np.exp(2.0 * 1j * np.pi * f * (t - t_win / 2.))
# Get dpss tapers
tapers, conc = dpss_windows(t.shape[0], time_bandwidth / 2.,
n_taps)
Wk = oscillation * tapers[m]
if zero_mean: # to make it zero mean
real_offset = Wk.mean()
Wk -= real_offset
Wk /= sqrt(0.5) * linalg.norm(Wk.ravel())
Wm.append(Wk)
Ws.append(Wm)
return Ws
def _centered(arr, newsize):
"""Aux Function to center data"""
# Return the center newsize portion of the array.
newsize = np.asarray(newsize)
currsize = np.array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _cwt(X, Ws, mode="same", decim=1, use_fft=True):
"""Compute cwt with fft based convolutions or temporal convolutions.
Return a generator over signals.
"""
if mode not in ['same', 'valid', 'full']:
raise ValueError("`mode` must be 'same', 'valid' or 'full', "
"got %s instead." % mode)
if mode == 'full' and (not use_fft):
# XXX JRK: full wavelet decomposition needs to be implemented
raise ValueError('`full` decomposition with convolution is currently' +
' not supported.')
decim = _check_decim(decim)
X = np.asarray(X)
# Precompute wavelets for given frequency range to save time
n_signals, n_times = X.shape
n_times_out = X[:, decim].shape[1]
n_freqs = len(Ws)
Ws_max_size = max(W.size for W in Ws)
size = n_times + Ws_max_size - 1
# Always use 2**n-sized FFT
fsize = 2 ** int(np.ceil(np.log2(size)))
# precompute FFTs of Ws
if use_fft:
fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128)
for i, W in enumerate(Ws):
if len(W) > n_times:
raise ValueError('Wavelet is too long for such a short signal. '
'Reduce the number of cycles.')
if use_fft:
fft_Ws[i] = fftn(W, [fsize])
# Make generator looping across signals
tfr = np.zeros((n_freqs, n_times_out), dtype=np.complex128)
for x in X:
if use_fft:
fft_x = fftn(x, [fsize])
# Loop across wavelets
for ii, W in enumerate(Ws):
if use_fft:
ret = ifftn(fft_x * fft_Ws[ii])[:n_times + W.size - 1]
else:
ret = np.convolve(x, W, mode=mode)
# Center and decimate decomposition
if mode == "valid":
sz = abs(W.size - n_times) + 1
offset = (n_times - sz) / 2
this_slice = slice(offset // decim.step,
(offset + sz) // decim.step)
if use_fft:
ret = _centered(ret, sz)
tfr[ii, this_slice] = ret[decim]
else:
if use_fft:
ret = _centered(ret, n_times)
tfr[ii, :] = ret[decim]
yield tfr
def cwt_morlet(X, sfreq, freqs, use_fft=True, n_cycles=7.0, zero_mean=False,
decim=1):
"""Compute time freq decomposition with Morlet wavelets
This function operates directly on numpy arrays. Consider using
`tfr_morlet` to process `Epochs` or `Evoked` instances.
Parameters
----------
X : array, shape (n_signals, n_times)
Signals (one per line)
sfreq : float
Sampling frequency.
freqs : array
Array of frequencies of interest
use_fft : bool
Compute convolution with FFT or temoral convolution.
n_cycles: float | array of float
Number of cycles. Fixed number or one per frequency.
zero_mean : bool
Make sure the wavelets are zero mean.
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice` returns tfr[..., decim].
Note that decimation may create aliasing artifacts.
Defaults to 1.
Returns
-------
tfr : 3D array
Time Frequency Decompositions (n_signals x n_frequencies x n_times)
See Also
--------
tfr.cwt : Compute time-frequency decomposition with user-provided wavelets
"""
mode = 'same'
# mode = "valid"
decim = _check_decim(decim)
n_signals, n_times = X[:, decim].shape
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, freqs, n_cycles=n_cycles, zero_mean=zero_mean)
coefs = cwt(X, Ws, use_fft=use_fft, mode=mode, decim=decim)
tfrs = np.empty((n_signals, len(freqs), n_times), dtype=np.complex)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr
return tfrs
def cwt(X, Ws, use_fft=True, mode='same', decim=1):
"""Compute time freq decomposition with continuous wavelet transform
Parameters
----------
X : array, shape (n_signals, n_times)
The signals.
Ws : list of array
Wavelets time series.
use_fft : bool
Use FFT for convolutions. Defaults to True.
mode : 'same' | 'valid' | 'full'
Convention for convolution. 'full' is currently not implemented with
`use_fft=False`. Defaults to 'same'.
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice` returns tfr[..., decim].
Note that decimation may create aliasing artifacts.
Defaults to 1.
Returns
-------
tfr : array, shape (n_signals, n_frequencies, n_times)
The time frequency decompositions.
See Also
--------
mne.time_frequency.cwt_morlet : Compute time-frequency decomposition
with Morlet wavelets
"""
decim = _check_decim(decim)
n_signals, n_times = X[:, decim].shape
coefs = _cwt(X, Ws, mode, decim=decim, use_fft=use_fft)
tfrs = np.empty((n_signals, len(Ws), n_times), dtype=np.complex)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr
return tfrs
def _time_frequency(X, Ws, use_fft, decim):
"""Aux of time_frequency for parallel computing over channels
"""
decim = _check_decim(decim)
n_epochs, n_times = X[:, decim].shape
n_frequencies = len(Ws)
psd = np.zeros((n_frequencies, n_times)) # PSD
plf = np.zeros((n_frequencies, n_times), np.complex) # phase lock
mode = 'same'
tfrs = _cwt(X, Ws, mode, decim=decim, use_fft=use_fft)
for tfr in tfrs:
tfr_abs = np.abs(tfr)
psd += tfr_abs ** 2
plf += tfr / tfr_abs
psd /= n_epochs
plf = np.abs(plf) / n_epochs
return psd, plf
@verbose
def single_trial_power(data, sfreq, frequencies, use_fft=True, n_cycles=7,
baseline=None, baseline_mode='ratio', times=None,
decim=1, n_jobs=1, zero_mean=False, verbose=None):
"""Compute time-frequency power on single epochs
Parameters
----------
data : array of shape [n_epochs, n_channels, n_times]
The epochs
sfreq : float
Sampling rate
frequencies : array-like
The frequencies
use_fft : bool
Use the FFT for convolutions or not.
n_cycles : float | array of float
Number of cycles in the Morlet wavelet. Fixed number
or one per frequency.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
baseline_mode : None | 'ratio' | 'zscore'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
times : array
Required to define baseline
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice` returns tfr[..., decim].
Note that decimation may create aliasing artifacts.
Defaults to 1.
n_jobs : int
The number of epochs to process at the same time
zero_mean : bool
Make sure the wavelets are zero mean.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : 4D array
Power estimate (Epochs x Channels x Frequencies x Timepoints).
"""
decim = _check_decim(decim)
mode = 'same'
n_frequencies = len(frequencies)
n_epochs, n_channels, n_times = data[:, :, decim].shape
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
parallel, my_cwt, _ = parallel_func(cwt, n_jobs)
logger.info("Computing time-frequency power on single epochs...")
power = np.empty((n_epochs, n_channels, n_frequencies, n_times),
dtype=np.float)
# Package arguments for `cwt` here to minimize omissions where only one of
# the two calls below is updated with new function arguments.
cwt_kw = dict(Ws=Ws, use_fft=use_fft, mode=mode, decim=decim)
if n_jobs == 1:
for k, e in enumerate(data):
x = cwt(e, **cwt_kw)
power[k] = (x * x.conj()).real
else:
# Precompute tf decompositions in parallel
tfrs = parallel(my_cwt(e, **cwt_kw) for e in data)
for k, tfr in enumerate(tfrs):
power[k] = (tfr * tfr.conj()).real
# Run baseline correction. Be sure to decimate the times array as well if
# needed.
if times is not None:
times = times[decim]
power = rescale(power, times, baseline, baseline_mode, copy=False)
return power
def _induced_power_cwt(data, sfreq, frequencies, use_fft=True, n_cycles=7,
decim=1, n_jobs=1, zero_mean=False):
"""Compute time induced power and inter-trial phase-locking factor
The time frequency decomposition is done with Morlet wavelets
Parameters
----------
data : array
3D array of shape [n_epochs, n_channels, n_times]
sfreq : float
Sampling frequency.
frequencies : array
Array of frequencies of interest
use_fft : bool
Compute transform with fft based convolutions or temporal
convolutions.
n_cycles : float | array of float
Number of cycles. Fixed number or one per frequency.
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice` returns tfr[..., decim].
Note that decimation may create aliasing artifacts.
Defaults to 1.
n_jobs : int
The number of CPUs used in parallel. All CPUs are used in -1.
Requires joblib package.
zero_mean : bool
Make sure the wavelets are zero mean.
Returns
-------
power : 2D array
Induced power (Channels x Frequencies x Timepoints).
Squared amplitude of time-frequency coefficients.
phase_lock : 2D array
Phase locking factor in [0, 1] (Channels x Frequencies x Timepoints)
"""
decim = _check_decim(decim)
n_frequencies = len(frequencies)
n_epochs, n_channels, n_times = data[:, :, decim].shape
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
psd = np.empty((n_channels, n_frequencies, n_times))
plf = np.empty((n_channels, n_frequencies, n_times))
# Separate to save memory for n_jobs=1
parallel, my_time_frequency, _ = parallel_func(_time_frequency, n_jobs)
psd_plf = parallel(my_time_frequency(data[:, c, :], Ws, use_fft, decim)
for c in range(n_channels))
for c, (psd_c, plf_c) in enumerate(psd_plf):
psd[c, :, :], plf[c, :, :] = psd_c, plf_c
return psd, plf
def _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB, sfreq):
"""Aux Function to prepare tfr computation"""
from ..viz.utils import _setup_vmin_vmax
copy = baseline is not None
data = rescale(data, times, baseline, mode, copy=copy)
# crop time
itmin, itmax = None, None
idx = np.where(_time_mask(times, tmin, tmax, sfreq=sfreq))[0]
if tmin is not None:
itmin = idx[0]
if tmax is not None:
itmax = idx[-1] + 1
times = times[itmin:itmax]
# crop freqs
ifmin, ifmax = None, None
idx = np.where(_time_mask(freqs, fmin, fmax, sfreq=sfreq))[0]
if fmin is not None:
ifmin = idx[0]
if fmax is not None:
ifmax = idx[-1] + 1
freqs = freqs[ifmin:ifmax]
# crop data
data = data[:, ifmin:ifmax, itmin:itmax]
times *= 1e3
if dB:
data = 10 * np.log10((data * data.conj()).real)
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
return data, times, freqs, vmin, vmax
class AverageTFR(ContainsMixin, UpdateChannelsMixin):
"""Container for Time-Frequency data
Can for example store induced power at sensor level or intertrial
coherence.
Parameters
----------
info : Info
The measurement info.
data : ndarray, shape (n_channels, n_freqs, n_times)
The data.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
nave : int
The number of averaged TFRs.
comment : str | None
Comment on the data, e.g., the experimental condition.
Defaults to None.
method : str | None
Comment on the method used to compute the data, e.g., morlet wavelet.
Defaults to None.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Attributes
----------
ch_names : list
The names of the channels.
"""
@verbose
def __init__(self, info, data, times, freqs, nave, comment=None,
method=None, verbose=None):
self.info = info
if data.ndim != 3:
raise ValueError('data should be 3d. Got %d.' % data.ndim)
n_channels, n_freqs, n_times = data.shape
if n_channels != len(info['chs']):
raise ValueError("Number of channels and data size don't match"
" (%d != %d)." % (n_channels, len(info['chs'])))
if n_freqs != len(freqs):
raise ValueError("Number of frequencies and data size don't match"
" (%d != %d)." % (n_freqs, len(freqs)))
if n_times != len(times):
raise ValueError("Number of times and data size don't match"
" (%d != %d)." % (n_times, len(times)))
self.data = data
self.times = np.asarray(times)
self.freqs = np.asarray(freqs)
self.nave = nave
self.comment = comment
self.method = method
@property
def ch_names(self):
return self.info['ch_names']
def crop(self, tmin=None, tmax=None, copy=None):
"""Crop data to a given time interval
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
copy : bool
This parameter has been deprecated and will be removed in 0.13.
Use inst.copy() instead.
Whether to return a new instance or modify in place.
Returns
-------
inst : instance of AverageTFR
The modified instance.
"""
inst = _check_copy_dep(self, copy)
mask = _time_mask(inst.times, tmin, tmax, sfreq=self.info['sfreq'])
inst.times = inst.times[mask]
inst.data = inst.data[:, :, mask]
return inst
@verbose
def plot(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
cmap='RdBu_r', dB=False, colorbar=True, show=True,
title=None, axes=None, layout=None, verbose=None):
"""Plot TFRs in a topography with images
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot. For user defined axes,
the colorbar cannot be drawn. Defaults to True.
show : bool
Call pyplot.show() at the end.
title : str | None
String for title. Defaults to None (blank/no title).
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channels. If instance of Axes,
there must be only one channel plotted.
layout : Layout | None
Layout instance specifying sensor positions. Used for interactive
plotting of topographies on rectangle selection. If possible, the
correct layout is inferred from the data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz.topo import _imshow_tfr
import matplotlib.pyplot as plt
times, freqs = self.times.copy(), self.freqs.copy()
info = self.info
data = self.data
n_picks = len(picks)
info, data, picks = _prepare_picks(info, data, picks)
data = data[picks]
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB, info['sfreq'])
tmin, tmax = times[0], times[-1]
if isinstance(axes, plt.Axes):
axes = [axes]
if isinstance(axes, list) or isinstance(axes, np.ndarray):
if len(axes) != n_picks:
raise RuntimeError('There must be an axes for each picked '
'channel.')
for idx in range(len(data)):
if axes is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = axes[idx]
fig = ax.get_figure()
onselect_callback = partial(self._onselect, baseline=baseline,
mode=mode, layout=layout)
_imshow_tfr(ax, 0, tmin, tmax, vmin, vmax, onselect_callback,
ylim=None, tfr=data[idx: idx + 1], freq=freqs,
x_label='Time (ms)', y_label='Frequency (Hz)',
colorbar=colorbar, picker=False, cmap=cmap)
if title:
fig.suptitle(title)
colorbar = False # only one colorbar for multiple axes
plt_show(show)
return fig
def _onselect(self, eclick, erelease, baseline, mode, layout):
"""Callback function called by rubber band selector in channel tfr."""
import matplotlib.pyplot as plt
from ..viz import plot_tfr_topomap
if abs(eclick.x - erelease.x) < .1 or abs(eclick.y - erelease.y) < .1:
return
plt.ion() # turn interactive mode on
tmin = round(min(eclick.xdata, erelease.xdata) / 1000., 5) # ms to s
tmax = round(max(eclick.xdata, erelease.xdata) / 1000., 5)
fmin = round(min(eclick.ydata, erelease.ydata), 5) # Hz
fmax = round(max(eclick.ydata, erelease.ydata), 5)
tmin = min(self.times, key=lambda x: abs(x - tmin)) # find closest
tmax = min(self.times, key=lambda x: abs(x - tmax))
fmin = min(self.freqs, key=lambda x: abs(x - fmin))
fmax = min(self.freqs, key=lambda x: abs(x - fmax))
if tmin == tmax or fmin == fmax:
logger.info('The selected area is too small. '
'Select a larger time-frequency window.')
return
types = list()
if 'eeg' in self:
types.append('eeg')
if 'mag' in self:
types.append('mag')
if 'grad' in self:
types.append('grad')
fig = figure_nobar()
fig.suptitle('{:.2f} s - {:.2f} s, {:.2f} Hz - {:.2f} Hz'.format(tmin,
tmax,
fmin,
fmax),
y=0.04)
for idx, ch_type in enumerate(types):
ax = plt.subplot(1, len(types), idx + 1)
plot_tfr_topomap(self, ch_type=ch_type, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax, layout=layout,
baseline=baseline, mode=mode, cmap=None,
title=ch_type, vmin=None, vmax=None,
axes=ax)
def plot_topo(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
layout=None, cmap='RdBu_r', title=None, dB=False,
colorbar=True, layout_scale=0.945, show=True,
border='none', fig_facecolor='k', font_color='w'):
"""Plot TFRs in a topography with images
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot. If None all available
channels are displayed.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
layout : Layout | None
Layout instance specifying sensor positions. If possible, the
correct layout is inferred from the data.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
title : str
Title of the figure.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
show : bool
Call pyplot.show() at the end.
border : str
matplotlib borders style to be used for each sensor plot.
fig_facecolor : str | obj
The figure face color. Defaults to black.
font_color: str | obj
The color of tick labels in the colorbar. Defaults to white.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz.topo import _imshow_tfr, _plot_topo, _imshow_tfr_unified
times = self.times.copy()
freqs = self.freqs
data = self.data
info = self.info
info, data, picks = _prepare_picks(info, data, picks)
data = data[picks]
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax,
mode, baseline, vmin, vmax, dB, info['sfreq'])
if layout is None:
from mne import find_layout
layout = find_layout(self.info)
onselect_callback = partial(self._onselect, baseline=baseline,
mode=mode, layout=layout)
click_fun = partial(_imshow_tfr, tfr=data, freq=freqs, cmap=cmap,
onselect=onselect_callback)
imshow = partial(_imshow_tfr_unified, tfr=data, freq=freqs, cmap=cmap,
onselect=onselect_callback)
fig = _plot_topo(info=info, times=times, show_func=imshow,
click_func=click_fun, layout=layout,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title, border=border,
x_label='Time (ms)', y_label='Frequency (Hz)',
fig_facecolor=fig_facecolor, font_color=font_color,
unified=True, img=True)
plt_show(show)
return fig
def _check_compat(self, tfr):
"""checks that self and tfr have the same time-frequency ranges"""
assert np.all(tfr.times == self.times)
assert np.all(tfr.freqs == self.freqs)
def __add__(self, tfr):
self._check_compat(tfr)
out = self.copy()
out.data += tfr.data
return out
def __iadd__(self, tfr):
self._check_compat(tfr)
self.data += tfr.data
return self
def __sub__(self, tfr):
self._check_compat(tfr)
out = self.copy()
out.data -= tfr.data
return out
def __isub__(self, tfr):
self._check_compat(tfr)
self.data -= tfr.data
return self
def copy(self):
"""Return a copy of the instance."""
return deepcopy(self)
def __repr__(self):
s = "time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
s += ", nave : %d" % self.nave
s += ', channels : %d' % self.data.shape[0]
return "<AverageTFR | %s>" % s
@verbose
def apply_baseline(self, baseline, mode='mean', verbose=None):
"""Baseline correct the data
Parameters
----------
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
self.data = rescale(self.data, self.times, baseline, mode,
copy=False)
def plot_topomap(self, tmin=None, tmax=None, fmin=None, fmax=None,
ch_type=None, baseline=None, mode='mean',
layout=None, vmin=None, vmax=None, cmap=None,
sensors=True, colorbar=True, unit=None, res=64, size=2,
cbar_fmt='%1.1e', show_names=False, title=None,
axes=None, show=True, outlines='head', head_pos=None):
"""Plot topographic maps of time-frequency intervals of TFR data
Parameters
----------
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file was
found, the layout is automatically generated from the sensor
locations.
vmin : float | callable | None
The value specifying the lower bound of the color range. If None,
and vmax is None, -vmax is used. Else np.min(data) or in case
data contains only positive values 0. If callable, the output
equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range. If None,
the maximum value is used. If callable, the output equals
vmax(data). Defaults to None.
cmap : matplotlib colormap | None
Colormap. If None and the plotted data is all positive, defaults to
'Reds'. If None and data contains also negative values, defaults to
'RdBu_r'. Defaults to None.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
axes : instance of Axes | None
The axes to plot to. If None the axes is defined automatically.
show : bool
Call pyplot.show() at the end.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz import plot_tfr_topomap
return plot_tfr_topomap(self, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, ch_type=ch_type, baseline=baseline,
mode=mode, layout=layout, vmin=vmin, vmax=vmax,
cmap=cmap, sensors=sensors, colorbar=colorbar,
unit=unit, res=res, size=size,
cbar_fmt=cbar_fmt, show_names=show_names,
title=title, axes=axes, show=show,
outlines=outlines, head_pos=head_pos)
def save(self, fname, overwrite=False):
"""Save TFR object to hdf5 file
Parameters
----------
fname : str
The file name, which should end with -tfr.h5 .
overwrite : bool
If True, overwrite file (if it exists). Defaults to false
"""
write_tfrs(fname, self, overwrite=overwrite)
def _prepare_write_tfr(tfr, condition):
"""Aux function"""
return (condition, dict(times=tfr.times, freqs=tfr.freqs,
data=tfr.data, info=tfr.info,
nave=tfr.nave, comment=tfr.comment,
method=tfr.method))
def write_tfrs(fname, tfr, overwrite=False):
"""Write a TFR dataset to hdf5.
Parameters
----------
fname : string
The file name, which should end with -tfr.h5
tfr : AverageTFR instance, or list of AverageTFR instances
The TFR dataset, or list of TFR datasets, to save in one file.
Note. If .comment is not None, a name will be generated on the fly,
based on the order in which the TFR objects are passed
overwrite : bool
If True, overwrite file (if it exists). Defaults to False.
See Also
--------
read_tfrs
Notes
-----
.. versionadded:: 0.9.0
"""
out = []
if not isinstance(tfr, (list, tuple)):
tfr = [tfr]
for ii, tfr_ in enumerate(tfr):
comment = ii if tfr_.comment is None else tfr_.comment
out.append(_prepare_write_tfr(tfr_, condition=comment))
write_hdf5(fname, out, overwrite=overwrite, title='mnepython')
def read_tfrs(fname, condition=None):
"""
Read TFR datasets from hdf5 file.
Parameters
----------
fname : string
The file name, which should end with -tfr.h5 .
condition : int or str | list of int or str | None
The condition to load. If None, all conditions will be returned.
Defaults to None.
See Also
--------
write_tfrs
Returns
-------
tfrs : list of instances of AverageTFR | instance of AverageTFR
Depending on `condition` either the TFR object or a list of multiple
TFR objects.
Notes
-----
.. versionadded:: 0.9.0
"""
check_fname(fname, 'tfr', ('-tfr.h5',))
logger.info('Reading %s ...' % fname)
tfr_data = read_hdf5(fname, title='mnepython')
for k, tfr in tfr_data:
tfr['info'] = Info(tfr['info'])
if condition is not None:
tfr_dict = dict(tfr_data)
if condition not in tfr_dict:
keys = ['%s' % k for k in tfr_dict]
raise ValueError('Cannot find condition ("{0}") in this file. '
'I can give you "{1}""'
.format(condition, " or ".join(keys)))
out = AverageTFR(**tfr_dict[condition])
else:
out = [AverageTFR(**d) for d in list(zip(*tfr_data))[1]]
return out
@verbose
def tfr_morlet(inst, freqs, n_cycles, use_fft=False, return_itc=True, decim=1,
n_jobs=1, picks=None, verbose=None):
"""Compute Time-Frequency Representation (TFR) using Morlet wavelets
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
use_fft : bool
The fft based convolution or not.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
Must be ``False`` for evoked data.
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice` returns tfr[..., decim].
Note that decimation may create aliasing artifacts.
Defaults to 1.
n_jobs : int
The number of jobs to run in parallel.
picks : array-like of int | None
The indices of the channels to plot. If None all available
channels are displayed.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : instance of AverageTFR
The averaged power.
itc : instance of AverageTFR
The intertrial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
tfr_multitaper, tfr_stockwell
"""
decim = _check_decim(decim)
data = _get_data(inst, return_itc)
info = inst.info
info, data, picks = _prepare_picks(info, data, picks)
data = data[:, picks, :]
power, itc = _induced_power_cwt(data, sfreq=info['sfreq'],
frequencies=freqs,
n_cycles=n_cycles, n_jobs=n_jobs,
use_fft=use_fft, decim=decim,
zero_mean=True)
times = inst.times[decim].copy()
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave, method='morlet-power')
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave,
method='morlet-itc'))
return out
def _prepare_picks(info, data, picks):
if picks is None:
picks = pick_types(info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
if np.array_equal(picks, np.arange(len(data))):
picks = slice(None)
else:
info = pick_info(info, picks)
return info, data, picks
@verbose
def _induced_power_mtm(data, sfreq, frequencies, time_bandwidth=4.0,
use_fft=True, n_cycles=7, decim=1, n_jobs=1,
zero_mean=True, verbose=None):
"""Compute time induced power and inter-trial phase-locking factor
The time frequency decomposition is done with DPSS wavelets
Parameters
----------
data : np.ndarray, shape (n_epochs, n_channels, n_times)
The input data.
sfreq : float
Sampling frequency.
frequencies : np.ndarray, shape (n_frequencies,)
Array of frequencies of interest
time_bandwidth : float
Time x (Full) Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1). Default is 4.0 (3 tapers).
use_fft : bool
Compute transform with fft based convolutions or temporal
convolutions. Defaults to True.
n_cycles : float | np.ndarray shape (n_frequencies,)
Number of cycles. Fixed number or one per frequency. Defaults to 7.
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice` returns tfr[..., decim].
Note that decimation may create aliasing artifacts.
Defaults to 1.
n_jobs : int
The number of CPUs used in parallel. All CPUs are used in -1.
Requires joblib package. Defaults to 1.
zero_mean : bool
Make sure the wavelets are zero mean. Defaults to True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : np.ndarray, shape (n_channels, n_frequencies, n_times)
Induced power. Squared amplitude of time-frequency coefficients.
itc : np.ndarray, shape (n_channels, n_frequencies, n_times)
Phase locking value.
"""
decim = _check_decim(decim)
n_epochs, n_channels, n_times = data[:, :, decim].shape
logger.info('Data is %d trials and %d channels', n_epochs, n_channels)
n_frequencies = len(frequencies)
logger.info('Multitaper time-frequency analysis for %d frequencies',
n_frequencies)
# Precompute wavelets for given frequency range to save time
Ws = _dpss_wavelet(sfreq, frequencies, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, zero_mean=zero_mean)
n_taps = len(Ws)
logger.info('Using %d tapers', n_taps)
n_times_wavelets = Ws[0][0].shape[0]
if data.shape[2] <= n_times_wavelets:
warn('Time windows are as long or longer than the epoch. Consider '
'reducing n_cycles.')
psd = np.zeros((n_channels, n_frequencies, n_times))
itc = np.zeros((n_channels, n_frequencies, n_times))
parallel, my_time_frequency, _ = parallel_func(_time_frequency,
n_jobs)
for m in range(n_taps):
psd_itc = parallel(my_time_frequency(data[:, c, :], Ws[m], use_fft,
decim)
for c in range(n_channels))
for c, (psd_c, itc_c) in enumerate(psd_itc):
psd[c, :, :] += psd_c
itc[c, :, :] += itc_c
psd /= n_taps
itc /= n_taps
return psd, itc
@verbose
def tfr_multitaper(inst, freqs, n_cycles, time_bandwidth=4.0,
use_fft=True, return_itc=True, decim=1,
n_jobs=1, picks=None, verbose=None):
"""Compute Time-Frequency Representation (TFR) using DPSS wavelets
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
The time-window length is thus T = n_cycles / freq.
time_bandwidth : float, (optional)
Time x (Full) Bandwidth product. Should be >= 2.0.
Choose this along with n_cycles to get desired frequency resolution.
The number of good tapers (least leakage from far away frequencies)
is chosen automatically based on this to floor(time_bandwidth - 1).
Default is 4.0 (3 good tapers).
E.g., With freq = 20 Hz and n_cycles = 10, we get time = 0.5 s.
If time_bandwidth = 4., then frequency smoothing is (4 / time) = 8 Hz.
use_fft : bool
The fft based convolution or not.
Defaults to True.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
Defaults to True.
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice` returns tfr[..., decim].
Note that decimation may create aliasing artifacts.
Defaults to 1.
n_jobs : int
The number of jobs to run in parallel. Defaults to 1.
picks : array-like of int | None
The indices of the channels to plot. If None all available
channels are displayed.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : AverageTFR
The averaged power.
itc : AverageTFR
The intertrial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
tfr_multitaper, tfr_stockwell
Notes
-----
.. versionadded:: 0.9.0
"""
decim = _check_decim(decim)
data = _get_data(inst, return_itc)
info = inst.info
info, data, picks = _prepare_picks(info, data, picks)
data = data = data[:, picks, :]
power, itc = _induced_power_mtm(data, sfreq=info['sfreq'],
frequencies=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth,
use_fft=use_fft, decim=decim,
n_jobs=n_jobs, zero_mean=True,
verbose='INFO')
times = inst.times[decim].copy()
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave,
method='mutlitaper-power')
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave,
method='mutlitaper-itc'))
return out
def combine_tfr(all_tfr, weights='nave'):
"""Merge AverageTFR data by weighted addition
Create a new AverageTFR instance, using a combination of the supplied
instances as its data. By default, the mean (weighted by trials) is used.
Subtraction can be performed by passing negative weights (e.g., [1, -1]).
Data must have the same channels and the same time instants.
Parameters
----------
all_tfr : list of AverageTFR
The tfr datasets.
weights : list of float | str
The weights to apply to the data of each AverageTFR instance.
Can also be ``'nave'`` to weight according to tfr.nave,
or ``'equal'`` to use equal weighting (each weighted as ``1/N``).
Returns
-------
tfr : AverageTFR
The new TFR data.
Notes
-----
.. versionadded:: 0.11.0
"""
tfr = all_tfr[0].copy()
if isinstance(weights, string_types):
if weights not in ('nave', 'equal'):
raise ValueError('Weights must be a list of float, or "nave" or '
'"equal"')
if weights == 'nave':
weights = np.array([e.nave for e in all_tfr], float)
weights /= weights.sum()
else: # == 'equal'
weights = [1. / len(all_tfr)] * len(all_tfr)
weights = np.array(weights, float)
if weights.ndim != 1 or weights.size != len(all_tfr):
raise ValueError('Weights must be the same size as all_tfr')
ch_names = tfr.ch_names
for t_ in all_tfr[1:]:
assert t_.ch_names == ch_names, ValueError("%s and %s do not contain "
"the same channels"
% (tfr, t_))
assert np.max(np.abs(t_.times - tfr.times)) < 1e-7, \
ValueError("%s and %s do not contain the same time instants"
% (tfr, t_))
# use union of bad channels
bads = list(set(tfr.info['bads']).union(*(t_.info['bads']
for t_ in all_tfr[1:])))
tfr.info['bads'] = bads
tfr.data = sum(w * t_.data for w, t_ in zip(weights, all_tfr))
tfr.nave = max(int(1. / sum(w ** 2 / e.nave
for w, e in zip(weights, all_tfr))), 1)
return tfr
def _check_decim(decim):
""" aux function checking the decim parameter """
if isinstance(decim, int):
decim = slice(None, None, decim)
elif not isinstance(decim, slice):
raise(TypeError, '`decim` must be int or slice, got %s instead'
% type(decim))
return decim
|
phupn1510/ELK
|
refs/heads/master
|
kibana/kibana/node/lib/node_modules/npm/node_modules/node-gyp/gyp/gyp_main.py
|
1452
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
# Make sure we're using the version of pylib in this repo, not one installed
# elsewhere on the system.
sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), 'pylib'))
import gyp
if __name__ == '__main__':
sys.exit(gyp.script_main())
|
raymondxyang/tensorflow
|
refs/heads/master
|
tensorflow/contrib/learn/python/learn/ops/ops_test.py
|
94
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.layers import conv2d
from tensorflow.contrib.learn.python.learn import ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import variables
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class OpsTest(test.TestCase):
"""Ops tests."""
def test_softmax_classifier(self):
with self.test_session() as session:
features = array_ops.placeholder(dtypes.float32, [None, 3])
labels = array_ops.placeholder(dtypes.float32, [None, 2])
weights = constant_op.constant([[0.1, 0.1], [0.1, 0.1], [0.1, 0.1]])
biases = constant_op.constant([0.2, 0.3])
class_weight = constant_op.constant([0.1, 0.9])
prediction, loss = ops.softmax_classifier(features, labels, weights,
biases, class_weight)
self.assertEqual(prediction.get_shape()[1], 2)
self.assertEqual(loss.get_shape(), [])
value = session.run(loss, {features: [[0.2, 0.3, 0.2]], labels: [[0, 1]]})
self.assertAllClose(value, 0.55180627)
def test_embedding_lookup(self):
d_embed = 5
n_embed = 10
ids_shape = (2, 3, 4)
embeds = np.random.randn(n_embed, d_embed)
ids = np.random.randint(0, n_embed, ids_shape)
with self.test_session():
embed_np = embeds[ids]
embed_tf = ops.embedding_lookup(embeds, ids).eval()
self.assertEqual(embed_np.shape, embed_tf.shape)
self.assertAllClose(embed_np, embed_tf)
def test_categorical_variable(self):
random_seed.set_random_seed(42)
with self.test_session() as sess:
cat_var_idx = array_ops.placeholder(dtypes.int64, [2, 2])
embeddings = ops.categorical_variable(
cat_var_idx, n_classes=5, embedding_size=10, name="my_cat_var")
sess.run(variables.global_variables_initializer())
emb1 = sess.run(embeddings,
feed_dict={cat_var_idx.name: [[0, 1], [2, 3]]})
emb2 = sess.run(embeddings,
feed_dict={cat_var_idx.name: [[0, 2], [1, 3]]})
self.assertEqual(emb1.shape, emb2.shape)
self.assertAllEqual(np.transpose(emb2, axes=[1, 0, 2]), emb1)
if __name__ == "__main__":
test.main()
|
beernarrd/gramps
|
refs/heads/sl-master
|
gramps/gen/datehandler/_date_el.py
|
2
|
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2004-2006 Donald N. Allingham
# Copyright (C) 2013 Zissis Papadopoulos <zissis@mail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Greek-specific classes for parsing and displaying dates.
"""
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import re
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ..lib.date import Date
from ._dateparser import DateParser
from ._datedisplay import DateDisplay
from ._datehandler import register_datehandler
#-------------------------------------------------------------------------
#
# Greek parser class
#
#-------------------------------------------------------------------------
class DateParserEL(DateParser):
"""
Convert a text string into a Date object. If the date cannot be
converted, the text string is assigned.
"""
# modifiers before the date
modifier_to_int = {
'προ του' : Date.MOD_BEFORE,
'πριν το' : Date.MOD_BEFORE,
'πριν από τις' : Date.MOD_BEFORE,
'πριν από την' : Date.MOD_BEFORE,
'πριν από το' : Date.MOD_BEFORE,
'πριν από τον' : Date.MOD_BEFORE,
'προ' : Date.MOD_BEFORE,
'πρ.' : Date.MOD_BEFORE,
'μετά το' : Date.MOD_AFTER,
'μετά από τις' : Date.MOD_AFTER,
'μετά από την' : Date.MOD_AFTER,
'μετά από το' : Date.MOD_AFTER,
'μετά από τον' : Date.MOD_AFTER,
'μετά' : Date.MOD_AFTER,
'μετ.' : Date.MOD_AFTER,
'γύρω στο' : Date.MOD_ABOUT,
'γύρω στον' : Date.MOD_ABOUT,
'γύρω στις' : Date.MOD_ABOUT,
'περίπου το' : Date.MOD_ABOUT,
'περ.' : Date.MOD_ABOUT,
'γυρ.' : Date.MOD_ABOUT,
'~' : Date.MOD_ABOUT,
}
bce = ["π.Χ.", "π.Κ.Χ.", "π.Κ.Ε.", "π.Χ" ]
calendar_to_int = {
'γρηγοριανό' : Date.CAL_GREGORIAN,
'γ' : Date.CAL_GREGORIAN,
'ιουλιανό' : Date.CAL_JULIAN,
'ι' : Date.CAL_JULIAN,
'εβραϊκό' : Date.CAL_HEBREW,
'ε' : Date.CAL_HEBREW,
'ισλαμικό' : Date.CAL_ISLAMIC,
'ισλ' : Date.CAL_ISLAMIC,
'γαλλικό' : Date.CAL_FRENCH,
'γαλλικής δημοκρατίας': Date.CAL_FRENCH,
'γ' : Date.CAL_FRENCH,
'περσικό' : Date.CAL_PERSIAN,
'π' : Date.CAL_PERSIAN,
'σουηδικό' : Date.CAL_SWEDISH,
'σ' : Date.CAL_SWEDISH,
}
quality_to_int = {
'κατʼ εκτίμηση' : Date.QUAL_ESTIMATED,
'εκτιμώμενη' : Date.QUAL_ESTIMATED,
'εκτ.' : Date.QUAL_ESTIMATED,
'εκτ' : Date.QUAL_ESTIMATED,
'υπολογ' : Date.QUAL_CALCULATED,
'υπολογ.' : Date.QUAL_CALCULATED,
'υπολογισμένη' : Date.QUAL_CALCULATED,
'με υπολογισμό' : Date.QUAL_CALCULATED,
}
def init_strings(self):
"""
This method compiles regular expression strings for matching dates.
"""
DateParser.init_strings(self)
_span_1 = ['από']
_span_2 = ['έως']
_range_1 = ['μετ', 'μετ\.', 'μεταξύ']
_range_2 = ['και']
self._span = re.compile("(%s)\s+(?P<start>.+)\s+(%s)\s+(?P<stop>.+)" %
('|'.join(_span_1), '|'.join(_span_2)),
re.IGNORECASE)
self._range = re.compile("(%s)\s+(?P<start>.+)\s+(%s)\s+(?P<stop>.+)" %
('|'.join(_range_1), '|'.join(_range_2)),
re.IGNORECASE)
#-------------------------------------------------------------------------
#
# Greek display
#
#-------------------------------------------------------------------------
class DateDisplayEL(DateDisplay):
"""
Greek language date display class.
"""
# this is used to display the 12 gregorian months
long_months = ( "", "Ιανουάριος", "Φεβρουάριος", "Μάρτιος",
"Απρίλιος", "Μάιος", "Ιούνιος",
"Ιούλιος", "Αύγουστος", "Σεπτέμβριος",
"Οκτώβριος", "Νοέμβριος", "Δεκέμβριος" )
short_months = ( "", "Ιαν", "Φεβ", "Μαρ", "Απρ", "Μάι", "Ιουν",
"Ιουλ", "Αύγ", "Σεπ", "Οκτ", "Νοε", "Δεκ" )
_mod_str = ("", "προ του ", "μετά το ", "γύρω στο ", "", "", "")
_qual_str = ("", "εκτιμώμενη ", "υπολογισμένη ")
_bce_str = "%s π.Χ."
formats = (
"ΕΕΕΕ-ΜΜ-ΗΗ (ISO)", "ΗΗ-ΜΜ-ΕΕΕΕ", "ΗΗ/ΜΜ/ΕΕΕΕ",
"ΗΗ Μήνας ΕΕΕΕ", "ΗΗ Μήν ΕΕΕΕ"
)
# this definition must agree with its "_display_gregorian" method
def _display_gregorian(self, date_val):
"""
display gregorian calendar date in different format
"""
# this must agree with its locale-specific "formats" definition
year = self._slash_year(date_val[2], date_val[3])
if self.format == 0:
return self.display_iso(date_val)
elif self.format == 1:
# day-month_number-year
if date_val[0] == 0:
if date_val[1] == 0:
value = year
else:
value = "%s-%s" % (date_val[1], year)
else:
value = "%d-%s-%s" % (date_val[0], date_val[1], year)
elif self.format == 2:
# day/month_number/year
if date_val[0] == 0:
if date_val[1] == 0:
value = year
else:
value = "%s/%s" % (date_val[1], year)
else:
value = "%d/%s/%s" % (date_val[0], date_val[1], year)
elif self.format == 3:
# day month_name year
if date_val[0] == 0:
if date_val[1] == 0:
value = year
else:
value = "%s %s" % (self.long_months[date_val[1]], year)
else:
value = "%d %s %s" % (date_val[0],
self.long_months[date_val[1]], year)
else:
# day month_abbreviation year
if date_val[0] == 0:
if date_val[1] == 0:
value = year
else:
value = "%s %s" % (self.short_months[date_val[1]], year)
else:
value = "%d %s %s" % (date_val[0],
self.short_months[date_val[1]], year)
if date_val[2] < 0:
return self._bce_str % value
else:
return value
def display(self, date):
"""
Return a text string representing the date.
"""
mod = date.get_modifier()
cal = date.get_calendar()
qual = date.get_quality()
start = date.get_start_date()
newyear = date.get_new_year()
qual_str = self._qual_str[qual]
if mod == Date.MOD_TEXTONLY:
return date.get_text()
elif start == Date.EMPTY:
return ""
elif mod == Date.MOD_SPAN:
d1 = self.display_cal[cal](start)
d2 = self.display_cal[cal](date.get_stop_date())
scal = self.format_extras(cal, newyear)
return "%s%s %s %s %s%s" % (qual_str, 'από', d1, 'έως', d2, scal)
elif mod == Date.MOD_RANGE:
d1 = self.display_cal[cal](start)
d2 = self.display_cal[cal](date.get_stop_date())
scal = self.format_extras(cal, newyear)
return "%s%s %s %s %s%s" % (qual_str, 'μεταξύ', d1, 'και', d2, scal)
else:
text = self.display_cal[date.get_calendar()](start)
scal = self.format_extras(cal, newyear)
return "%s%s%s%s" % (qual_str, self._mod_str[mod], text, scal)
#-------------------------------------------------------------------------
#
# Register classes
#
#-------------------------------------------------------------------------
register_datehandler(('el_GR', 'el_CY', 'el', 'Greek', 'greek'),
DateParserEL, DateDisplayEL)
|
wangmiao1981/spark
|
refs/heads/master
|
python/pyspark/sql/group.py
|
23
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from pyspark.sql.column import Column, _to_seq
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.pandas.group_ops import PandasGroupedOpsMixin
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
__all__ = ["GroupedData"]
def dfapi(f):
def _api(self):
name = f.__name__
jdf = getattr(self._jgd, name)()
return DataFrame(jdf, self.sql_ctx)
_api.__name__ = f.__name__
_api.__doc__ = f.__doc__
return _api
def df_varargs_api(f):
def _api(self, *cols):
name = f.__name__
jdf = getattr(self._jgd, name)(_to_seq(self.sql_ctx._sc, cols))
return DataFrame(jdf, self.sql_ctx)
_api.__name__ = f.__name__
_api.__doc__ = f.__doc__
return _api
class GroupedData(PandasGroupedOpsMixin):
"""
A set of methods for aggregations on a :class:`DataFrame`,
created by :func:`DataFrame.groupBy`.
.. versionadded:: 1.3
"""
def __init__(self, jgd, df):
self._jgd = jgd
self._df = df
self.sql_ctx = df.sql_ctx
def agg(self, *exprs):
"""Compute aggregates and returns the result as a :class:`DataFrame`.
The available aggregate functions can be:
1. built-in aggregation functions, such as `avg`, `max`, `min`, `sum`, `count`
2. group aggregate pandas UDFs, created with :func:`pyspark.sql.functions.pandas_udf`
.. note:: There is no partial aggregation with group aggregate UDFs, i.e.,
a full shuffle is required. Also, all the data of a group will be loaded into
memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
.. seealso:: :func:`pyspark.sql.functions.pandas_udf`
If ``exprs`` is a single :class:`dict` mapping from string to string, then the key
is the column to perform aggregation on, and the value is the aggregate function.
Alternatively, ``exprs`` can also be a list of aggregate :class:`Column` expressions.
.. versionadded:: 1.3.0
Parameters
----------
exprs : dict
a dict mapping from column name (string) to aggregate functions (string),
or a list of :class:`Column`.
Notes
-----
Built-in aggregation functions and group aggregate pandas UDFs cannot be mixed
in a single call to this function.
Examples
--------
>>> gdf = df.groupBy(df.name)
>>> sorted(gdf.agg({"*": "count"}).collect())
[Row(name='Alice', count(1)=1), Row(name='Bob', count(1)=1)]
>>> from pyspark.sql import functions as F
>>> sorted(gdf.agg(F.min(df.age)).collect())
[Row(name='Alice', min(age)=2), Row(name='Bob', min(age)=5)]
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> @pandas_udf('int', PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def min_udf(v):
... return v.min()
>>> sorted(gdf.agg(min_udf(df.age)).collect()) # doctest: +SKIP
[Row(name='Alice', min_udf(age)=2), Row(name='Bob', min_udf(age)=5)]
"""
assert exprs, "exprs should not be empty"
if len(exprs) == 1 and isinstance(exprs[0], dict):
jdf = self._jgd.agg(exprs[0])
else:
# Columns
assert all(isinstance(c, Column) for c in exprs), "all exprs should be Column"
jdf = self._jgd.agg(exprs[0]._jc,
_to_seq(self.sql_ctx._sc, [c._jc for c in exprs[1:]]))
return DataFrame(jdf, self.sql_ctx)
@dfapi
def count(self):
"""Counts the number of records for each group.
.. versionadded:: 1.3.0
Examples
--------
>>> sorted(df.groupBy(df.age).count().collect())
[Row(age=2, count=1), Row(age=5, count=1)]
"""
@df_varargs_api
def mean(self, *cols):
"""Computes average values for each numeric columns for each group.
:func:`mean` is an alias for :func:`avg`.
.. versionadded:: 1.3.0
Parameters
----------
cols : str
column names. Non-numeric columns are ignored.
Examples
--------
>>> df.groupBy().mean('age').collect()
[Row(avg(age)=3.5)]
>>> df3.groupBy().mean('age', 'height').collect()
[Row(avg(age)=3.5, avg(height)=82.5)]
"""
@df_varargs_api
def avg(self, *cols):
"""Computes average values for each numeric columns for each group.
:func:`mean` is an alias for :func:`avg`.
.. versionadded:: 1.3.0
Parameters
----------
cols : str
column names. Non-numeric columns are ignored.
Examples
--------
>>> df.groupBy().avg('age').collect()
[Row(avg(age)=3.5)]
>>> df3.groupBy().avg('age', 'height').collect()
[Row(avg(age)=3.5, avg(height)=82.5)]
"""
@df_varargs_api
def max(self, *cols):
"""Computes the max value for each numeric columns for each group.
.. versionadded:: 1.3.0
Examples
--------
>>> df.groupBy().max('age').collect()
[Row(max(age)=5)]
>>> df3.groupBy().max('age', 'height').collect()
[Row(max(age)=5, max(height)=85)]
"""
@df_varargs_api
def min(self, *cols):
"""Computes the min value for each numeric column for each group.
.. versionadded:: 1.3.0
Parameters
----------
cols : str
column names. Non-numeric columns are ignored.
Examples
--------
>>> df.groupBy().min('age').collect()
[Row(min(age)=2)]
>>> df3.groupBy().min('age', 'height').collect()
[Row(min(age)=2, min(height)=80)]
"""
@df_varargs_api
def sum(self, *cols):
"""Compute the sum for each numeric columns for each group.
.. versionadded:: 1.3.0
Parameters
----------
cols : str
column names. Non-numeric columns are ignored.
Examples
--------
>>> df.groupBy().sum('age').collect()
[Row(sum(age)=7)]
>>> df3.groupBy().sum('age', 'height').collect()
[Row(sum(age)=7, sum(height)=165)]
"""
def pivot(self, pivot_col, values=None):
"""
Pivots a column of the current :class:`DataFrame` and perform the specified aggregation.
There are two versions of pivot function: one that requires the caller to specify the list
of distinct values to pivot on, and one that does not. The latter is more concise but less
efficient, because Spark needs to first compute the list of distinct values internally.
.. versionadded:: 1.6.0
Parameters
----------
pivot_col : str
Name of the column to pivot.
values :
List of values that will be translated to columns in the output DataFrame.
Examples
--------
# Compute the sum of earnings for each year by course with each course as a separate column
>>> df4.groupBy("year").pivot("course", ["dotNET", "Java"]).sum("earnings").collect()
[Row(year=2012, dotNET=15000, Java=20000), Row(year=2013, dotNET=48000, Java=30000)]
# Or without specifying column values (less efficient)
>>> df4.groupBy("year").pivot("course").sum("earnings").collect()
[Row(year=2012, Java=20000, dotNET=15000), Row(year=2013, Java=30000, dotNET=48000)]
>>> df5.groupBy("sales.year").pivot("sales.course").sum("sales.earnings").collect()
[Row(year=2012, Java=20000, dotNET=15000), Row(year=2013, Java=30000, dotNET=48000)]
"""
if values is None:
jgd = self._jgd.pivot(pivot_col)
else:
jgd = self._jgd.pivot(pivot_col, values)
return GroupedData(jgd, self._df)
def _test():
import doctest
from pyspark.sql import Row, SparkSession
import pyspark.sql.group
globs = pyspark.sql.group.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.group tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')]) \
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df3'] = sc.parallelize([Row(name='Alice', age=2, height=80),
Row(name='Bob', age=5, height=85)]).toDF()
globs['df4'] = sc.parallelize([Row(course="dotNET", year=2012, earnings=10000),
Row(course="Java", year=2012, earnings=20000),
Row(course="dotNET", year=2012, earnings=5000),
Row(course="dotNET", year=2013, earnings=48000),
Row(course="Java", year=2013, earnings=30000)]).toDF()
globs['df5'] = sc.parallelize([
Row(training="expert", sales=Row(course="dotNET", year=2012, earnings=10000)),
Row(training="junior", sales=Row(course="Java", year=2012, earnings=20000)),
Row(training="expert", sales=Row(course="dotNET", year=2012, earnings=5000)),
Row(training="junior", sales=Row(course="dotNET", year=2013, earnings=48000)),
Row(training="expert", sales=Row(course="Java", year=2013, earnings=30000))]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.group, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
tovrstra/horton
|
refs/heads/master
|
horton/scripts/test/test_espfit.py
|
4
|
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2017 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
import numpy as np
import os
from nose.plugins.attrib import attr
from horton import * # pylint: disable=wildcard-import,unused-wildcard-import
from horton.test.common import check_script, tmpdir
from horton.scripts.test.common import check_files
from horton.scripts.espfit import parse_wdens, parse_wnear, parse_wfar, max_at_edge
def test_wdens():
assert parse_wdens('fubar.cube') == ('fubar.cube', -9, 0.8)
assert parse_wdens('fubar.cube:-6') == ('fubar.cube', -6, 0.8)
assert parse_wdens('fubar.cube:-6:0.5') == ('fubar.cube', -6, 0.5)
def test_wnear():
assert parse_wnear('1:1.0') == {1: (1.0*angstrom, 0.5*angstrom)}
assert parse_wnear('1:1.0:0.3') == {1: (1.0*angstrom, 0.3*angstrom)}
assert parse_wnear(['1:1.0', '2:1.2']) == {1: (1.0*angstrom, 0.5*angstrom), 2: (1.2*angstrom, 0.6*angstrom)}
assert parse_wnear(['1:1.0:0.3', '2:1.2:0.2']) == {1: (1.0*angstrom, 0.3*angstrom), 2: (1.2*angstrom, 0.2*angstrom)}
def test_wfar():
assert parse_wfar('4.3') == (4.3*angstrom, 1.0*angstrom)
assert parse_wfar('4.2:0.3') == (4.2*angstrom, 0.3*angstrom)
@attr('slow')
def test_scripts():
# Generate some random system with random esp data
natom = 5
numbers = np.random.randint(1, 20, natom)
coordinates = np.random.uniform(0, 10, (natom, 3))
origin = np.zeros(3, float)
grid_rvecs = np.identity(3, float)*1.0
shape = np.array([10, 10, 10])
pbc = np.ones(3, int)
ugrid = UniformGrid(origin, grid_rvecs, shape, pbc)
esp_cube_data = np.random.uniform(-1, 1, shape)
rho_cube_data = np.random.uniform(-1, 1, shape)
mol_esp = IOData(coordinates=coordinates, numbers=numbers, grid=ugrid, cube_data=esp_cube_data)
mol_rho = mol_esp.copy()
mol_rho.cube_data = rho_cube_data
# Write the cube file to the tmpdir and run scripts (run 1)
with tmpdir('horton.scripts.test.test_espfit.test_scripts') as dn:
mol_esp.to_file(os.path.join(dn, 'esp.cube'))
check_script('horton-esp-cost.py esp.cube esp.h5 --wnear=0:1.0:0.5', dn)
check_script('horton-esp-fit.py esp.h5 other.h5', dn)
check_script('horton-esp-test.py esp.h5 other.h5:charges foo.h5', dn)
check_script('horton-esp-gen.py other.h5:charges esp.cube gen.h5', dn)
check_files(dn, ['esp.h5', 'other.h5', 'foo.h5', 'gen.h5'])
# Write the cube file to the tmpdir and run scripts (run 2)
with tmpdir('horton.scripts.test.test_espfit.test_scripts2') as dn:
mol_esp.to_file(os.path.join(dn, 'esp.cube'))
mol_rho.to_file(os.path.join(dn, 'rho.cube'))
check_script('horton-esp-cost.py esp.cube esp.h5 --wnear=0:1.0:0.5 --wdens=rho.cube --wsave=weight.cube', dn)
check_files(dn, ['esp.h5', 'weight.cube'])
check_script('horton-esp-fit.py esp.h5 other.h5', dn)
check_script('horton-esp-test.py esp.h5 other.h5:charges foo.h5', dn)
check_script('horton-esp-gen.py other.h5:charges esp.cube gen.h5', dn)
check_files(dn, ['esp.h5', 'other.h5', 'foo.h5', 'gen.h5'])
def test_max_at_edge():
weights = np.array([[[0.0, 1.0], [2.0, 3.0]], [[4.0, 5.0], [6.0, 7.0]]])
assert max_at_edge(weights, [1,1,1]) == 0.0
assert max_at_edge(weights, [1,1,0]) == 7.0
weights = np.array([[[0.0, 1.0, 2.0], [2.0, 3.0, 4.0]], [[4.0, 5.0, 6.0], [6.0, 9.0, 8.0]]])
assert max_at_edge(weights, [1,1,1]) == 0.0
assert max_at_edge(weights, [1,1,0]) == 8.0
assert max_at_edge(weights, [1,0,1]) == 9.0
assert max_at_edge(weights, [0,1,1]) == 9.0
|
davisein/jitsudone
|
refs/heads/master
|
django/templatetags/tz.py
|
80
|
from __future__ import with_statement
from datetime import datetime, tzinfo
try:
import pytz
except ImportError:
pytz = None
from django.template import Node
from django.template import TemplateSyntaxError, Library
from django.utils import timezone
register = Library()
# HACK: datetime is an old-style class, create a new-style equivalent
# so we can define additional attributes.
class datetimeobject(datetime, object):
pass
# Template filters
@register.filter
def localtime(value):
"""
Converts a datetime to local time in the active time zone.
This only makes sense within a {% localtime off %} block.
"""
return do_timezone(value, timezone.get_current_timezone())
@register.filter
def utc(value):
"""
Converts a datetime to UTC.
"""
return do_timezone(value, timezone.utc)
@register.filter('timezone')
def do_timezone(value, arg):
"""
Converts a datetime to local time in a given time zone.
The argument must be an instance of a tzinfo subclass or a time zone name.
If it is a time zone name, pytz is required.
Naive datetimes are assumed to be in local time in the default time zone.
"""
if not isinstance(value, datetime):
return ''
# Obtain a timezone-aware datetime
try:
if timezone.is_naive(value):
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
# Filters must never raise exceptions, and pytz' exceptions inherit
# Exception directly, not a specific subclass. So catch everything.
except Exception:
return ''
# Obtain a tzinfo instance
if isinstance(arg, tzinfo):
tz = arg
elif isinstance(arg, basestring) and pytz is not None:
try:
tz = pytz.timezone(arg)
except pytz.UnknownTimeZoneError:
return ''
else:
return ''
# Convert and prevent further conversion
result = value.astimezone(tz)
if hasattr(tz, 'normalize'):
# available for pytz time zones
result = tz.normalize(result)
# HACK: the convert_to_local_time flag will prevent
# automatic conversion of the value to local time.
result = datetimeobject(result.year, result.month, result.day,
result.hour, result.minute, result.second,
result.microsecond, result.tzinfo)
result.convert_to_local_time = False
return result
# Template tags
class LocalTimeNode(Node):
"""
Template node class used by ``localtime_tag``.
"""
def __init__(self, nodelist, use_tz):
self.nodelist = nodelist
self.use_tz = use_tz
def render(self, context):
old_setting = context.use_tz
context.use_tz = self.use_tz
output = self.nodelist.render(context)
context.use_tz = old_setting
return output
class TimezoneNode(Node):
"""
Template node class used by ``timezone_tag``.
"""
def __init__(self, nodelist, tz):
self.nodelist = nodelist
self.tz = tz
def render(self, context):
with timezone.override(self.tz.resolve(context)):
output = self.nodelist.render(context)
return output
class GetCurrentTimezoneNode(Node):
"""
Template node class used by ``get_current_timezone_tag``.
"""
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = timezone.get_current_timezone_name()
return ''
@register.tag('localtime')
def localtime_tag(parser, token):
"""
Forces or prevents conversion of datetime objects to local time,
regardless of the value of ``settings.USE_TZ``.
Sample usage::
{% localtime off %}{{ value_in_utc }}{% endlocaltime %}
"""
bits = token.split_contents()
if len(bits) == 1:
use_tz = True
elif len(bits) > 2 or bits[1] not in ('on', 'off'):
raise TemplateSyntaxError("%r argument should be 'on' or 'off'" %
bits[0])
else:
use_tz = bits[1] == 'on'
nodelist = parser.parse(('endlocaltime',))
parser.delete_first_token()
return LocalTimeNode(nodelist, use_tz)
@register.tag('timezone')
def timezone_tag(parser, token):
"""
Enables a given time zone just for this block.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If is it a time zone name, pytz is required.
If it is ``None``, the default time zone is used within the block.
Sample usage::
{% timezone "Europe/Paris" %}
It is {{ now }} in Paris.
{% endtimezone %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument (timezone)" %
bits[0])
tz = parser.compile_filter(bits[1])
nodelist = parser.parse(('endtimezone',))
parser.delete_first_token()
return TimezoneNode(nodelist, tz)
@register.tag("get_current_timezone")
def get_current_timezone_tag(parser, token):
"""
Stores the name of the current time zone in the context.
Usage::
{% get_current_timezone as TIME_ZONE %}
This will fetch the currently active time zone and put its name
into the ``TIME_ZONE`` context variable.
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_timezone' requires "
"'as variable' (got %r)" % args)
return GetCurrentTimezoneNode(args[2])
|
Endika/c2c-rd-addons
|
refs/heads/8.0
|
chricar_stock_dispo_production_V1/__openerp__.py
|
4
|
{ 'sequence': 500,
"name" : "Dispo Production"
, "version" : "1.0"
, "author" : "ChriCar Beteiligungs- und Beratungs- GmbH"
, "website" : "http://www.chricar.at"
, "description" : """Dispo Production
generated 2010-04-02 15:01:02+02"""
, "category" : "Client Modules/Farm"
, "depends" : ["sale","stock", "one2many_sorted","c2c_stock_accounting", "c2c_product_price_unit"]
, "init_xml" : ["mig_stock_dispo_production_init.xml"]
, "demo" : []
, "data" :
[ "stock_dispo_production_view.xml"
, "stock_prod_lot_update.xml"
]
, "auto_install" : False
, 'installable': False
, 'application' : False
}
|
antoinecarme/pyaf
|
refs/heads/master
|
tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_ConstantTrend_NoCycle_LSTM.py
|
1
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Logit'] , ['ConstantTrend'] , ['NoCycle'] , ['LSTM'] );
|
vdloo/raptiformica
|
refs/heads/master
|
raptiformica/actions/update.py
|
1
|
from logging import getLogger
from raptiformica.actions.slave import provision_machine
from raptiformica.settings.types import get_first_server_type
log = getLogger(__name__)
def update_machine(server_type=None):
"""
Update the local machine by running the configured
commands from the installed provisioning modules
:param str server_type: name of the server type to provision the machine as
:return:
"""
log.info(
"Updating local as server type {}".format(server_type)
)
server_type = server_type or get_first_server_type()
provision_machine(server_type=server_type)
|
alxgu/ansible
|
refs/heads/devel
|
lib/ansible/modules/messaging/rabbitmq/rabbitmq_queue.py
|
10
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Manuel Sousa <manuel.sousa@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rabbitmq_queue
author: Manuel Sousa (@manuel-sousa)
version_added: "2.0"
short_description: Manage rabbitMQ queues
description:
- This module uses rabbitMQ Rest API to create/delete queues
requirements: [ "requests >= 1.0.0" ]
options:
name:
description:
- Name of the queue to create
required: true
state:
description:
- Whether the queue should be present or absent
choices: [ "present", "absent" ]
default: present
durable:
description:
- whether queue is durable or not
type: bool
default: 'yes'
auto_delete:
description:
- if the queue should delete itself after all queues/queues unbound from it
type: bool
default: 'no'
message_ttl:
description:
- How long a message can live in queue before it is discarded (milliseconds)
default: forever
auto_expires:
description:
- How long a queue can be unused before it is automatically deleted (milliseconds)
default: forever
max_length:
description:
- How many messages can the queue contain before it starts rejecting
default: no limit
dead_letter_exchange:
description:
- Optional name of an exchange to which messages will be republished if they
- are rejected or expire
dead_letter_routing_key:
description:
- Optional replacement routing key to use when a message is dead-lettered.
- Original routing key will be used if unset
max_priority:
description:
- Maximum number of priority levels for the queue to support.
- If not set, the queue will not support message priorities.
- Larger numbers indicate higher priority.
version_added: "2.4"
arguments:
description:
- extra arguments for queue. If defined this argument is a key/value dictionary
default: {}
extends_documentation_fragment:
- rabbitmq
'''
EXAMPLES = '''
# Create a queue
- rabbitmq_queue:
name: myQueue
# Create a queue on remote host
- rabbitmq_queue:
name: myRemoteQueue
login_user: user
login_password: secret
login_host: remote.example.org
'''
import json
import traceback
REQUESTS_IMP_ERR = None
try:
import requests
HAS_REQUESTS = True
except ImportError:
REQUESTS_IMP_ERR = traceback.format_exc()
HAS_REQUESTS = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.six.moves.urllib import parse as urllib_parse
from ansible.module_utils.rabbitmq import rabbitmq_argument_spec
def main():
argument_spec = rabbitmq_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(required=True, type='str'),
durable=dict(default=True, type='bool'),
auto_delete=dict(default=False, type='bool'),
message_ttl=dict(default=None, type='int'),
auto_expires=dict(default=None, type='int'),
max_length=dict(default=None, type='int'),
dead_letter_exchange=dict(default=None, type='str'),
dead_letter_routing_key=dict(default=None, type='str'),
arguments=dict(default=dict(), type='dict'),
max_priority=dict(default=None, type='int')
)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
url = "%s://%s:%s/api/queues/%s/%s" % (
module.params['login_protocol'],
module.params['login_host'],
module.params['login_port'],
urllib_parse.quote(module.params['vhost'], ''),
module.params['name']
)
if not HAS_REQUESTS:
module.fail_json(msg=missing_required_lib("requests"), exception=REQUESTS_IMP_ERR)
result = dict(changed=False, name=module.params['name'])
# Check if queue already exists
r = requests.get(url, auth=(module.params['login_user'], module.params['login_password']),
verify=module.params['ca_cert'], cert=(module.params['client_cert'], module.params['client_key']))
if r.status_code == 200:
queue_exists = True
response = r.json()
elif r.status_code == 404:
queue_exists = False
response = r.text
else:
module.fail_json(
msg="Invalid response from RESTAPI when trying to check if queue exists",
details=r.text
)
if module.params['state'] == 'present':
change_required = not queue_exists
else:
change_required = queue_exists
# Check if attributes change on existing queue
if not change_required and r.status_code == 200 and module.params['state'] == 'present':
if not (
response['durable'] == module.params['durable'] and
response['auto_delete'] == module.params['auto_delete'] and
(
('x-message-ttl' in response['arguments'] and response['arguments']['x-message-ttl'] == module.params['message_ttl']) or
('x-message-ttl' not in response['arguments'] and module.params['message_ttl'] is None)
) and
(
('x-expires' in response['arguments'] and response['arguments']['x-expires'] == module.params['auto_expires']) or
('x-expires' not in response['arguments'] and module.params['auto_expires'] is None)
) and
(
('x-max-length' in response['arguments'] and response['arguments']['x-max-length'] == module.params['max_length']) or
('x-max-length' not in response['arguments'] and module.params['max_length'] is None)
) and
(
('x-dead-letter-exchange' in response['arguments'] and
response['arguments']['x-dead-letter-exchange'] == module.params['dead_letter_exchange']) or
('x-dead-letter-exchange' not in response['arguments'] and module.params['dead_letter_exchange'] is None)
) and
(
('x-dead-letter-routing-key' in response['arguments'] and
response['arguments']['x-dead-letter-routing-key'] == module.params['dead_letter_routing_key']) or
('x-dead-letter-routing-key' not in response['arguments'] and module.params['dead_letter_routing_key'] is None)
) and
(
('x-max-priority' in response['arguments'] and
response['arguments']['x-max-priority'] == module.params['max_priority']) or
('x-max-priority' not in response['arguments'] and module.params['max_priority'] is None)
)
):
module.fail_json(
msg="RabbitMQ RESTAPI doesn't support attribute changes for existing queues",
)
# Copy parameters to arguments as used by RabbitMQ
for k, v in {
'message_ttl': 'x-message-ttl',
'auto_expires': 'x-expires',
'max_length': 'x-max-length',
'dead_letter_exchange': 'x-dead-letter-exchange',
'dead_letter_routing_key': 'x-dead-letter-routing-key',
'max_priority': 'x-max-priority'
}.items():
if module.params[k] is not None:
module.params['arguments'][v] = module.params[k]
# Exit if check_mode
if module.check_mode:
result['changed'] = change_required
result['details'] = response
result['arguments'] = module.params['arguments']
module.exit_json(**result)
# Do changes
if change_required:
if module.params['state'] == 'present':
r = requests.put(
url,
auth=(module.params['login_user'], module.params['login_password']),
headers={"content-type": "application/json"},
data=json.dumps({
"durable": module.params['durable'],
"auto_delete": module.params['auto_delete'],
"arguments": module.params['arguments']
}),
verify=module.params['ca_cert'],
cert=(module.params['client_cert'], module.params['client_key'])
)
elif module.params['state'] == 'absent':
r = requests.delete(url, auth=(module.params['login_user'], module.params['login_password']),
verify=module.params['ca_cert'], cert=(module.params['client_cert'], module.params['client_key']))
# RabbitMQ 3.6.7 changed this response code from 204 to 201
if r.status_code == 204 or r.status_code == 201:
result['changed'] = True
module.exit_json(**result)
else:
module.fail_json(
msg="Error creating queue",
status=r.status_code,
details=r.text
)
else:
module.exit_json(
changed=False,
name=module.params['name']
)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.