gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
from __future__ import unicode_literals
import base64
import calendar
import datetime
import re
import sys
import unicodedata
from binascii import Error as BinasciiError
from email.utils import formatdate
from django.utils import six
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_bytes, force_str, force_text
from django.utils.functional import keep_lazy_text
from django.utils.six.moves.urllib.parse import (
quote, quote_plus, unquote, unquote_plus, urlencode as original_urlencode,
urlparse,
)
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
RFC3986_GENDELIMS = str(":/?#[]@")
RFC3986_SUBDELIMS = str("!$&'()*+,;=")
@keep_lazy_text
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_text(quote(force_str(url), force_str(safe)))
@keep_lazy_text
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_text(quote_plus(force_str(url), force_str(safe)))
@keep_lazy_text
def urlunquote(quoted_url):
"""
A wrapper for Python's urllib.unquote() function that can operate on
the result of django.utils.http.urlquote().
"""
return force_text(unquote(force_str(quoted_url)))
@keep_lazy_text
def urlunquote_plus(quoted_url):
"""
A wrapper for Python's urllib.unquote_plus() function that can operate on
the result of django.utils.http.urlquote_plus().
"""
return force_text(unquote_plus(force_str(quoted_url)))
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first cast to UTF-8 encoded strings and
then encoded as per normal.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return original_urlencode(
[(force_str(k),
[force_str(i) for i in v] if isinstance(v, (list, tuple)) else force_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC2616 section 3.3.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
return formatdate(epoch_seconds, usegmt=True)
def parse_http_date(date):
"""
Parses a date format as specified by HTTP RFC2616 section 3.3.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Returns an integer expressed in seconds since the epoch, in UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC2616 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception:
six.reraise(ValueError, ValueError("%r is not a valid date" % date), sys.exc_info()[2])
def parse_http_date_safe(date):
"""
Same as parse_http_date, but returns None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Converts a base 36 string to an ``int``. Raises ``ValueError` if the
input won't fit into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is long than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
value = int(s, 36)
# ... then do a final check that the value will fit into an int to avoid
# returning a long (#15067). The long type was removed in Python 3.
if six.PY2 and value > sys.maxint:
raise ValueError("Base36 input too large")
return value
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
char_set = '0123456789abcdefghijklmnopqrstuvwxyz'
if i < 0:
raise ValueError("Negative base36 conversion input.")
if six.PY2:
if not isinstance(i, six.integer_types):
raise TypeError("Non-integer base36 conversion input.")
if i > sys.maxint:
raise ValueError("Base36 conversion input too large.")
if i < 36:
return char_set[i]
b36 = ''
while i != 0:
i, n = divmod(i, 36)
b36 = char_set[n] + b36
return b36
def urlsafe_base64_encode(s):
"""
Encodes a bytestring in base64 for use in URLs, stripping any trailing
equal signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b'\n=')
def urlsafe_base64_decode(s):
"""
Decodes a base64 encoded string, adding back any trailing equal signs that
might have been stripped.
"""
s = force_bytes(s)
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.encode('ascii').decode('unicode_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necessary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
def unquote_etag(etag):
"""
Unquote an ETag string; i.e. revert quote_etag().
"""
return etag.strip('"').replace('\\"', '"').replace('\\\\', '\\') if etag else etag
def is_same_domain(host, pattern):
"""
Return ``True`` if the host is either an exact match or a match
to the wildcard pattern.
Any pattern beginning with a period matches a domain and all of its
subdomains. (e.g. ``.example.com`` matches ``example.com`` and
``foo.example.com``). Anything else is an exact string match.
"""
if not pattern:
return False
pattern = pattern.lower()
return (
pattern[0] == '.' and (host.endswith(pattern) or host == pattern[1:]) or
pattern == host
)
def is_safe_url(url, host=None):
"""
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host and uses a safe scheme).
Always returns ``False`` on an empty url.
"""
if url is not None:
url = url.strip()
if not url:
return False
if six.PY2:
try:
url = force_text(url)
except UnicodeDecodeError:
return False
# Chrome treats \ completely as / in paths but it could be part of some
# basic auth credentials so we need to check both URLs.
return _is_safe_url(url, host) and _is_safe_url(url.replace('\\', '/'), host)
def _is_safe_url(url, host):
# Chrome considers any URL with more than two slashes to be absolute, but
# urlparse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith('///'):
return False
url_info = urlparse(url)
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info.netloc and url_info.scheme:
return False
# Forbid URLs that start with control characters. Some browsers (like
# Chrome) ignore quite a few control characters at the start of a
# URL and might consider the URL as scheme relative.
if unicodedata.category(url[0])[0] == 'C':
return False
return ((not url_info.netloc or url_info.netloc == host) and
(not url_info.scheme or url_info.scheme in ['http', 'https']))
| |
# -*- coding: utf-8 -*-
from __future__ import division
from datetime import datetime
from logging import getLogger
from os.path import abspath, basename
from sys import exit
import work_queue as wq
from yerba.core import TASK_DONE
from yerba.services import Service
logger = getLogger('yerba.workqueue')
name = "yerba"
MAX_OUTPUT = 65536
def get_task_info(task):
dateformat="%d/%m/%y at %I:%M:%S%p"
DIV = 1000000.0
dt1 = datetime.fromtimestamp(task.submit_time / DIV)
start_time = dt1.strftime(dateformat)
dt2 = datetime.fromtimestamp(task.finish_time / DIV)
finish_time = dt2.strftime(dateformat)
execution_time = task.cmd_execution_time / DIV
return {
'cmd' : task.command,
'started' : start_time,
'ended' : finish_time,
'elapsed' : execution_time,
'taskid' : task.id,
'returned' : task.return_status,
'output' : repr(task.output[:MAX_OUTPUT]),
}
class WorkQueueService(Service):
name = "workqueue"
group = "scheduler"
def __init__(self, config, notifier):
self.tasks = {}
self.notifier = notifier
try:
self.project = config['project']
self.catalog_server = config['catalog_server']
self.catalog_port = int(config['catalog_port'])
self.port = int(config['port'])
self.log = config['log']
if config['debug']:
wq.set_debug_flag('all')
except KeyError:
logger.exception("Invalid workqueue configuration")
exit(1)
def initialize(self):
'''
Initializes work_queue for scheduling workers.
A new work_queue will be created if an existing queue is not connected
on the port. If an existing queue is running then the process will exit.
'''
try:
self.queue = wq.WorkQueue(name=self.project, catalog=True, port=-1)
self.queue.specify_catalog_server(self.catalog_server,
self.catalog_port)
self.queue.specify_log(self.log)
logger.info('WORKQUEUE %s: Starting work queue on port %s',
self.project, self.queue.port)
except Exception:
logger.exception("The work queue could not be started")
exit(1)
def stop(self):
'''
Removes all jobs from the queue and stops the work queue.
'''
logger.info('WORKQUEUE %s: Stopping work queue on port %s',
self.project, self.queue.port)
self.queue.shutdown_workers(0)
def schedule(self, iterable, name, priority=None):
'''
Schedules jobs into work_queue
'''
logger.info("######### WORKQUEUE SCHEDULING ##########")
for new_job in iterable:
logger.info('WORKQUEUE %s: The workflow %s is scheduling job %s', self.project, name, new_job)
if not new_job.ready():
logger.info('WORKFLOW %s: Job %s was not scheduled waiting on inputs', name, new_job)
continue
skip = False
for (taskid, item) in self.tasks.items():
(names, job) = item
if new_job == job:
if name not in names:
names.append(name)
logger.info(('WORKQUEUE %s: This job has already been'
'assigned to task %s'), self.project, taskid)
self.tasks[taskid] = (names, job)
skip = True
break
if skip:
continue
cmd = str(new_job)
task = wq.Task(cmd)
for input_file in new_job.inputs:
if isinstance(input_file, list) and input_file[1]:
remote_input = basename(abspath(input_file[0]))
task.specify_directory(str(input_file[0]), str(remote_input),
wq.WORK_QUEUE_INPUT, recursive=1)
else:
remote_input = basename(abspath(input_file))
task.specify_input_file(str(input_file), str(remote_input),
wq.WORK_QUEUE_INPUT)
for output_file in new_job.outputs:
if isinstance(output_file, list):
remote_output = basename(abspath(output_file[0]))
task.specify_directory(str(output_file[0]), str(remote_output),
wq.WORK_QUEUE_OUTPUT, recursive=1, cache=False)
else:
remote_output = basename(abspath(output_file))
task.specify_file(str(output_file), str(remote_output),
wq.WORK_QUEUE_OUTPUT, cache=False)
new_id = self.queue.submit(task)
logger.info('WORKQUEUE %s: Task has been submited and assigned [id %s]', self.project, new_id)
self.tasks[new_id] = ([name], new_job)
logger.info("######### WORKQUEUE END SCHEDULING ##########")
def update(self):
'''
Updates the scheduled workflow.
If a task is completed new tasks from the workflow will be scheduled.
'''
task = self.queue.wait(0)
if not task:
return
logger.info("######### WORKQUEUE UPDATING ##########")
logger.info("WORKQUEUE %s: Fetching task from the work queue",
self.project)
try:
logger.debug("INSPECTING TASK: %s", str(task))
logger.debug(('WORKQUEUE %s: Recieved task %s from work_queue with'
' return_status %s'), self.project, task.id, task.return_status)
except:
logger.debug("Couldn't inspect the task")
if task.id not in self.tasks:
logger.info(('WORKQUEUE %s: The job for id %s could ',
'not be found.'), self.project, task.id)
return
(names, job) = self.tasks[task.id]
info = get_task_info(task)
del self.tasks[task.id]
for workflow in names:
self.notifier.notify(TASK_DONE, workflow, job, info)
logger.info("######### WORKQUEUE END UPDATING ##########")
def cancel(self, name):
'''
Removes the jobs based on there job id task id from the queue.
'''
for (taskid, item) in self.tasks.items():
(names, job) = item
logger.info('WORKFLOW %s: Requesting task %s to be cancelled',
name, taskid)
if name in names:
names.remove(name)
else:
continue
if not names:
task = self.queue.cancel_by_taskid(taskid)
if task:
del self.tasks[taskid]
logger.info("WORKQUEUE %s: The task %s was cancelled",
self.project, task.taskid)
else:
logger.error("WORKQUEUE %s: failed to cancel %s",
self.project, taskid)
else:
msg = ('WORKQUEUE %s: The task %s was not cancelled '
'workflows %s depend on the task')
logger.info(msg, self.project, taskid, ', '.join(names))
self.tasks[taskid] = (names, job)
| |
'''
PMDL Export Blender Addon
By Jack Andersen <jackoalan@gmail.com>
This file defines the main `pmdl` class used to generate PMDL file instances.
The class is initialised with a MESH or ARMATURE object (ARMATUREs generate `PAR1`).
The provided MESH or ARMATURE may hierarcically contain other MESH objects,
resulting in an appropriately sub-divided PMDL. The MATERIAL name assigned to
MESH objects is hashed with SHA1 and used to reference a PSPLC implementing
the appropriate shader.
Once the object is assigned, the appropriate draw-generator is initialised
(with optional indexing parameters set) and loaded into the PMDL instance.
The actual PMDL generation may then happen with these objects gathered together.
'''
from . import pmdl_par2_octree
from . import pmdl_par1_rigging
import struct
import bpy
import hashlib
import posixpath
# Lookup PSPL material of MESH object
def lookup_pspl_material(mesh_obj):
cur_mesh = mesh_obj
while cur_mesh is not None:
if cur_mesh.pspl_material:
base_name = posixpath.splitext(os.path.basename(bpy.data.filepath))[0]
mesh_name = string.replace(mesh_obj.name, "/", "_")
mesh_name = string.replace(mesh_name, "\\", "_")
material_name = string.replace(cur_mesh.pspl_material, "/", "_")
material_name = string.replace(material_name, "\\", "_")
shader_name = posixpath.normpath(os.path.dirname(bpy.data.filepath) + "/" + base_name + ".psplblend/" + mesh_name + "/" + material_name + ".pspl")
return shader_name
cur_mesh = cur_mesh.parent
return None
def add_vec3(a,b):
return (a[0]+b[0],a[1]+b[1],a[2]+b[2])
# Round up to nearest 32 multiple
def ROUND_UP_32(num):
if num%32:
return ((num>>5)<<5)+32
else:
return num
class pmdl:
# Expand master bounding box to fit added mesh object
def _accumulate_bound_box(self, obj):
min_box = obj.bound_box[0]
max_box = obj.bound_box[6]
if not self.bound_box_accounted:
self.bound_box_accounted = True
for i in range(3):
self.bound_box_min[i] = min_box[i]
self.bound_box_max[i] = max_box[i]
else:
for i in range(3):
if self.bound_box_min[i] > min_box[i]:
self.bound_box_min[i] = min_box[i]
if self.bound_box_max[i] < max_box[i]:
self.bound_box_max[i] = max_box[i]
# Recursive routine to add all MESH objects as individual PMDL collections
def _recursive_add_mesh(self, draw_gen, object, root_rel_loc, rigger):
# Start with root object (make sure it's a MESH)
if object.type == 'MESH':
# Only add if there is at least *one* polygon in mesh
if len(object.data.polygons):
# Copy mesh
copy_name = object.name + "_pmdltri"
copy_mesh = bpy.data.meshes.new(copy_name)
copy_obj = bpy.data.objects.new(copy_name, copy_mesh)
copy_obj.data = object.to_mesh(bpy.context.scene, True, 'RENDER')
copy_obj.scale = object.scale
copy_obj.location = root_rel_loc # This is set to be root-object-relative
bpy.context.scene.objects.link(copy_obj)
# If rigging, set original mesh's vertex groups as context
if rigger:
rigger.mesh_vertex_groups = object.vertex_groups
# Triangulate mesh
bpy.context.scene.objects.active = copy_obj
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.quads_convert_to_tris()
bpy.ops.object.mode_set(mode='OBJECT')
# Set mesh's coordinates to be root-relative
for vert in copy_mesh.vertices:
for i in range(3):
vert.co[i] += root_rel_loc[i]
# Accumulate polygon total (for progress meter)
self.prog_poly_total += len(copy_obj.data.polygons)
# Recursively reach out to children here (to build up polygon count for progress meter)
for child in object.children:
self._recursive_add_mesh(draw_gen, child, add_vec3(root_rel_loc, child.location), rigger)
# Add mesh to draw generator
draw_gen.add_mesh(self, copy_obj, rigger)
# Delete copied mesh from scene (and add to set to be deleted later)
bpy.context.scene.objects.unlink(copy_obj)
self.all_objs.add(copy_obj)
self.all_meshes.add(copy_obj.data)
self.all_meshes.add(copy_mesh)
# Account for mesh bounding box
self._accumulate_bound_box(object)
else:
# Recursively reach out to children here (if parent has no polygons)
for child in object.children:
self._recursive_add_mesh(draw_gen, child, add_vec3(root_rel_loc, child.location), rigger)
else:
# Recursively reach out to children here (if parent not a MESH)
for child in object.children:
self._recursive_add_mesh(draw_gen, child, add_vec3(root_rel_loc, child.location), rigger)
# Update progress meter with another polygon (called from draw generator)
def prog_add_polygon(self):
self.prog_poly_completed += 1
if not bpy.app.background:
bpy.context.window_manager.progress_update(self.prog_poly_completed / self.prog_poly_total)
# PMDL Constructor, given a blender object for initialisation
def __init__(self, object, draw_gen):
self.object = object
self.draw_gen = draw_gen
# Set of *all* included objects and their meshes
self.all_objs = set()
self.all_meshes = set()
# Array of shader PSPLC hashes
self.shader_hashes = []
# Array of bone names
self.bone_names = []
# Bound box of all meshes
self.bound_box_accounted = False
self.bound_box_min = [0,0,0]
self.bound_box_max = [0,0,0]
self.sub_type = 'PAR0'
self.rigging = None
if object.type == 'ARMATURE':
# Create a PAR1 file if given an ARMATURE
self.sub_type = 'PAR1'
self.rigging = pmdl_par1_rigging.pmdl_par1_rigging(8, object)
# Start tracking polygon progress
self.prog_poly_total = 0
self.prog_poly_completed = 0
if not bpy.app.background:
bpy.context.window_manager.progress_begin(0.0, 1.0)
# Break down blender object into collections
self.collections = set()
self._recursive_add_mesh(draw_gen, object, (0,0,0), self.rigging)
# Remove progress meter
if not bpy.app.background:
bpy.context.window_manager.progress_end()
# Get shader index
def get_shader_index(self, shader_name):
name_hash = hashlib.sha1(shader_name.encode('utf-8')).digest()
for i in range(len(self.shader_hashes)):
if name_hash == exist_hash[i]:
return i
self.shader_hashes.append(name_hash)
return len(self.shader_hashes) - 1
# Generate hash ref table
def gen_shader_refs(self, endian_char):
table = bytearray()
table += struct.pack(endian_char + 'I', len(self.shader_hashes))
for hash in self.shader_hashes:
table += hash
return table
# Get bone offset
def get_bone_offset(self, new_name, psize):
offset = 0
for bone_name in self.bone_names:
if bone_name == new_name:
return offset
offset += len(bone_name) + 1
self.bone_names.append(new_name)
return offset
# Generate bone string table
def gen_bone_table(self, endian_char, psize):
table = bytearray()
for bone in self.bone_names:
bone_str = bone.encode('utf-8')
table += bone_str
table.append(0)
return table
# When initialised with a MESH hierarchy,
# a PMDL may be made as a `PAR2` when this is called.
# `levels` is the count of non-leaf octree levels that will be produced;
# it must be at least 1.
def add_octree(self, levels):
if levels < 1:
return "Unable to make PAR2; there must be at least 1 octree level requested"
if self.sub_type == 'PAR1':
return "Unable to make PAR2; the PMDL was initialised as a `PAR1` (ARMATURE)"
# Set self type
self.sub_type = 'PAR2'
# Add octree and perform subdivision
self.octree = pmdl_par2_octree.pmdl_par2_octree(self, levels)
return None
# Finally, generate complete collection buffer
def generate_collection_buffer(self, endianness, psize):
endian_char = None
if endianness == 'LITTLE':
endian_char = '<'
elif endianness == 'BIG':
endian_char = '>'
# Bytearray for collection bytes
collection_bytes = bytearray()
# Arrays to hold bytes objects for generated collection buffers
collection_header_buffers = []
collection_vertex_buffers = []
collection_element_buffers = []
collection_index_buffers = []
# Calculate size of header and padding bits
shader_pointer_space = psize + ((28+psize)%psize)
# Begin generating individual collection buffers
for i in range(len(self.draw_gen.collections)):
header = bytearray()
# Generate platform-specific portion of vertex buffer
uv_count, max_bone_count, vert_bytes = self.draw_gen.generate_vertex_buffer(i, endian_char, psize)
collection_vertex_buffers.append(vert_bytes)
# Generate platform-specific portion of element buffer
primitive_meshes, element_bytes = self.draw_gen.generate_element_buffer(i, endian_char, psize)
collection_element_buffers.append(element_bytes)
idx_buf = bytearray()
# Collect mesh headers
mesh_headers = bytearray()
for mesh_primitives in primitive_meshes:
# Individual mesh bounding box
for comp in mesh_primitives['mesh'].bound_box[0]:
mesh_headers += struct.pack(endian_char + 'f', comp)
for comp in mesh_primitives['mesh'].bound_box[6]:
mesh_headers += struct.pack(endian_char + 'f', comp)
# Individual mesh shader index
material_name = lookup_pspl_material(mesh_primitives['mesh'])
if material_name is not None:
shader_idx = self.get_shader_index(material_name)
else:
shader_idx = -1
mesh_headers += struct.pack(endian_char + 'i', shader_idx)
# Individual mesh shader pointer space
for i in range(shader_pointer_space):
mesh_headers.append(0)
# Insert padding before index buffers
mesh_headers_len = len(mesh_headers) + 8
mesh_headers_len_round = ROUND_UP_32(mesh_headers_len)
mesh_headers_len_pad = mesh_headers_len_round - mesh_headers_len
for i in range(mesh_headers_len_pad):
mesh_headers.append(0)
# Count of meshes and offset to index buffers (after this value)
idx_buf += struct.pack(endian_char + 'I', len(primitive_meshes))
idx_buf += struct.pack(endian_char + 'I', len(mesh_headers) + 8)
idx_buf += mesh_headers
# Generate platform-specific portion of index buffer
idx_buf += self.draw_gen.generate_index_buffer(primitive_meshes, endian_char, psize, self.rigging)
collection_index_buffers.append(idx_buf)
header += struct.pack(endian_char + 'H', uv_count)
header += struct.pack(endian_char + 'H', max_bone_count)
collection_header_buffers.append(header)
# Add together header and buffer sizes to get offsets
headers_len = len(collection_header_buffers) * 24
headers_len_round = ROUND_UP_32(headers_len)
headers_len_pad = headers_len_round - headers_len
# Vertex buffer offsets
cur_buf_offset = headers_len_round
for i in range(len(collection_header_buffers)):
collection_header_buffers[i] += struct.pack(endian_char + 'I', cur_buf_offset)
collection_header_buffers[i] += struct.pack(endian_char + 'I', len(collection_vertex_buffers[i]))
cur_buf_offset += len(collection_vertex_buffers[i])
# Element buffer offsets
for i in range(len(collection_header_buffers)):
collection_header_buffers[i] += struct.pack(endian_char + 'I', cur_buf_offset)
collection_header_buffers[i] += struct.pack(endian_char + 'I', len(collection_element_buffers[i]))
cur_buf_offset += len(collection_element_buffers[i])
# Index buffer offsets
vert_end_round = ROUND_UP_32(cur_buf_offset)
vert_end_pad = vert_end_round - cur_buf_offset
cur_buf_offset = vert_end_round
for i in range(len(collection_header_buffers)):
collection_header_buffers[i] += struct.pack(endian_char + 'I', cur_buf_offset)
cur_buf_offset += len(collection_index_buffers[i])
# Begin generating master buffer
for header in collection_header_buffers:
collection_bytes += header
for i in range(headers_len_pad):
collection_bytes.append(0)
for vert_buf in collection_vertex_buffers:
collection_bytes += vert_buf
for elem_buf in collection_element_buffers:
collection_bytes += elem_buf
for i in range(vert_end_pad):
collection_bytes.append(0)
for idx_buf in collection_index_buffers:
collection_bytes += idx_buf
# Add padding 0s
buf_end_len = len(collection_bytes)
buf_end_round = ROUND_UP_32(buf_end_len)
buf_end_pad = buf_end_round - buf_end_len
for i in range(buf_end_pad):
collection_bytes.append(0)
# Done
return collection_bytes
# This routine will generate a PMDL file with the requested
# endianness ['LITTLE', 'BIG'] and pointer-size at the requested path
def generate_file(self, endianness, psize, file_path):
endian_char = None
if endianness == 'LITTLE':
endian_char = '<'
elif endianness == 'BIG':
endian_char = '>'
# First, calculate various offsets into PMDL file
header_size = 64
collection_buffer = self.generate_collection_buffer(endianness, psize)
skeleton_info_buffer = bytes()
rigging_info_buffer = bytes()
animation_info_buffer = bytes()
octree_buffer = bytes()
if self.rigging:
skeleton_info_buffer = self.rigging.generate_skeleton_info(self, endian_char, psize)
if self.draw_gen.file_identifier == '_GEN':
rigging_info_buffer = self.rigging.generate_rigging_info(self, endian_char, psize)
animation_info_buffer = self.rigging.generate_animation_info(self, endian_char, psize)
collection_offset = header_size + len(skeleton_info_buffer) + len(rigging_info_buffer) + len(animation_info_buffer) + len(octree_buffer)
collection_pre_pad = ROUND_UP_32(collection_offset) - collection_offset
collection_offset += collection_pre_pad
shader_refs_offset = collection_offset + len(collection_buffer)
shader_refs_buffer = self.gen_shader_refs(endian_char)
shader_refs_end = shader_refs_offset + len(shader_refs_buffer)
shader_refs_padding = ROUND_UP_32(shader_refs_end) - shader_refs_end
bone_names_offset = shader_refs_end + shader_refs_padding
bone_names_buffer = self.gen_bone_table(endian_char, psize)
total_size = bone_names_offset + len(bone_names_buffer)
total_size_round = ROUND_UP_32(total_size)
total_size_pad = total_size_round - total_size
# Open file and write in header
pmdl_file = open(file_path, 'wb')
pmdl_header = bytearray()
pmdl_header += b'PMDL'
if endianness == 'LITTLE':
pmdl_header += b'_LIT'
elif endianness == 'BIG':
pmdl_header += b'_BIG'
pmdl_header += struct.pack(endian_char + 'I', psize)
pmdl_header += self.sub_type.encode('utf-8')
pmdl_header += self.draw_gen.file_identifier.encode('utf-8')
for comp in self.bound_box_min:
pmdl_header += struct.pack(endian_char + 'f', comp)
for comp in self.bound_box_max:
pmdl_header += struct.pack(endian_char + 'f', comp)
pmdl_header += struct.pack(endian_char + 'I', collection_offset)
pmdl_header += struct.pack(endian_char + 'I', len(self.draw_gen.collections))
pmdl_header += struct.pack(endian_char + 'I', shader_refs_offset)
pmdl_header += struct.pack(endian_char + 'I', bone_names_offset)
for i in range(4):
pmdl_header.append(0)
pmdl_file.write(pmdl_header)
# Now write sub-buffers
pmdl_file.write(skeleton_info_buffer)
pmdl_file.write(rigging_info_buffer)
pmdl_file.write(animation_info_buffer)
pmdl_file.write(octree_buffer)
for i in range(collection_pre_pad):
pmdl_file.write(b'\x00')
pmdl_file.write(collection_buffer)
pmdl_file.write(shader_refs_buffer)
for i in range(shader_refs_padding):
pmdl_file.write(b'\x00')
pmdl_file.write(bone_names_buffer)
for i in range(total_size_pad):
pmdl_file.write(b'\xff')
# Delete copied meshes from blender data
def __del__(self):
for obj in self.all_objs:
bpy.data.objects.remove(obj)
for mesh in self.all_meshes:
bpy.data.meshes.remove(mesh)
| |
#!/usr/bin/env python
"""tilestache-clean.py will flush your cache.
This script is intended to be run directly. This example cleans the area around
West Oakland (http://sta.mn/ck) in the "osm" layer, for zoom levels 12-15:
tilestache-clean.py -c ./config.json -l osm -b 37.79 -122.35 37.83 -122.25 -e png 12 13 14 15
See `tilestache-clean.py --help` for more information.
"""
from __future__ import print_function
from sys import stderr, path
from optparse import OptionParser
try:
from json import dump as json_dump
except ImportError:
from simplejson import dump as json_dump
#
# Most imports can be found below, after the --include-path option is known.
#
parser = OptionParser(usage="""%prog [options] [zoom...]
Cleans a single layer in your TileStache configuration - no images are returned,
and TileStache ends up with an empty in selected areas cache. Bounding box is
given as a pair of lat/lon coordinates, e.g. "37.788 -122.349 37.833 -122.246".
Output is a list of tile paths as they are created.
Configuration, bbox, and layer options are required; see `%prog --help` for info.""")
defaults = dict(extension='png', padding=0, verbose=True, bbox=(37.777, -122.352, 37.839, -122.226))
parser.set_defaults(**defaults)
parser.add_option('-c', '--config', dest='config',
help='Path to configuration file.')
parser.add_option('-l', '--layer', dest='layer',
help='Layer name from configuration. "ALL" is a special value that will clean all layers in turn. If you have an actual layer named "ALL", use "ALL LAYERS" instead.')
parser.add_option('-b', '--bbox', dest='bbox',
help='Bounding box in floating point geographic coordinates: south west north east.',
type='float', nargs=4)
parser.add_option('-p', '--padding', dest='padding',
help='Extra margin of tiles to add around bounded area. Default value is %s (no extra tiles).' % repr(defaults['padding']),
type='int')
parser.add_option('-e', '--extension', dest='extension',
help='Optional file type for rendered tiles. Default value is %s.' % repr(defaults['extension']))
parser.add_option('-f', '--progress-file', dest='progressfile',
help="Optional JSON progress file that gets written on each iteration, so you don't have to pay close attention.")
parser.add_option('-q', action='store_false', dest='verbose',
help='Suppress chatty output, --progress-file works well with this.')
parser.add_option('-i', '--include-path', dest='include',
help="Add the following colon-separated list of paths to Python's include path (aka sys.path)")
parser.add_option('--tile-list', dest='tile_list',
help='Optional file of tile coordinates, a simple text list of Z/X/Y coordinates. Overrides --bbox and --padding.')
def generateCoordinates(ul, lr, zooms, padding):
""" Generate a stream of (offset, count, coordinate) tuples for seeding.
Flood-fill coordinates based on two corners, a list of zooms and padding.
"""
# start with a simple total of all the coordinates we will need.
count = 0
for zoom in zooms:
ul_ = ul.zoomTo(zoom).container().left(padding).up(padding)
lr_ = lr.zoomTo(zoom).container().right(padding).down(padding)
rows = lr_.row + 1 - ul_.row
cols = lr_.column + 1 - ul_.column
count += int(rows * cols)
# now generate the actual coordinates.
# offset starts at zero
offset = 0
for zoom in zooms:
ul_ = ul.zoomTo(zoom).container().left(padding).up(padding)
lr_ = lr.zoomTo(zoom).container().right(padding).down(padding)
for row in range(int(ul_.row), int(lr_.row + 1)):
for column in range(int(ul_.column), int(lr_.column + 1)):
coord = Coordinate(row, column, zoom)
yield (offset, count, coord)
offset += 1
def listCoordinates(filename):
""" Generate a stream of (offset, count, coordinate) tuples for seeding.
Read coordinates from a file with one Z/X/Y coordinate per line.
"""
coords = (line.strip().split('/') for line in open(filename, 'r'))
coords = (map(int, (row, column, zoom)) for (zoom, column, row) in coords)
coords = [Coordinate(*args) for args in coords]
count = len(coords)
for (offset, coord) in enumerate(coords):
yield (offset, count, coord)
if __name__ == '__main__':
options, zooms = parser.parse_args()
if options.include:
for p in options.include.split(':'):
path.insert(0, p)
from TileStache import parseConfig, getTile
from TileStache.Core import KnownUnknown
from TileStache.Caches import Disk, Multi
from ModestMaps.Core import Coordinate
from ModestMaps.Geo import Location
try:
if options.config is None:
raise KnownUnknown('Missing required configuration (--config) parameter.')
if options.layer is None:
raise KnownUnknown('Missing required layer (--layer) parameter.')
config = parseConfig(options.config)
if options.layer in ('ALL', 'ALL LAYERS') and options.layer not in config.layers:
# clean every layer in the config
layers = config.layers.values()
elif options.layer not in config.layers:
raise KnownUnknown('"%s" is not a layer I know about. Here are some that I do know about: %s.' % (options.layer, ', '.join(sorted(config.layers.keys()))))
else:
# clean just one layer in the config
layers = [config.layers[options.layer]]
verbose = options.verbose
extension = options.extension
progressfile = options.progressfile
lat1, lon1, lat2, lon2 = options.bbox
south, west = min(lat1, lat2), min(lon1, lon2)
north, east = max(lat1, lat2), max(lon1, lon2)
northwest = Location(north, west)
southeast = Location(south, east)
for (i, zoom) in enumerate(zooms):
if not zoom.isdigit():
raise KnownUnknown('"%s" is not a valid numeric zoom level.' % zoom)
zooms[i] = int(zoom)
if options.padding < 0:
raise KnownUnknown('A negative padding will not work.')
padding = options.padding
tile_list = options.tile_list
except KnownUnknown as e:
parser.error(str(e))
for layer in layers:
if tile_list:
coordinates = listCoordinates(tile_list)
else:
ul = layer.projection.locationCoordinate(northwest)
lr = layer.projection.locationCoordinate(southeast)
coordinates = generateCoordinates(ul, lr, zooms, padding)
for (offset, count, coord) in coordinates:
path = '%s/%d/%d/%d.%s' % (layer.name(), coord.zoom, coord.column, coord.row, extension)
progress = {"tile": path,
"offset": offset + 1,
"total": count}
if options.verbose:
print('%(offset)d of %(total)d...' % progress, end=' ', file=stderr)
try:
mimetype, format = layer.getTypeByExtension(extension)
except:
#
# It's not uncommon for layers to lack support for certain
# extensions, so just don't attempt to remove a cached tile
# for an unsupported format.
#
pass
else:
config.cache.remove(layer, coord, format)
if options.verbose:
print('%(tile)s' % progress, file=stderr)
if progressfile:
fp = open(progressfile, 'w')
json_dump(progress, fp)
fp.close()
| |
import os
import re
from smtplib import SMTPException
from django import forms
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.contrib.auth import forms as auth_forms
from django.contrib.auth.tokens import default_token_generator
from django.forms.util import ErrorList
from django.utils.safestring import mark_safe
import captcha.fields
import commonware.log
import happyforms
from tower import ugettext as _, ugettext_lazy as _lazy
import amo
import users.notifications as email
from amo.urlresolvers import reverse
from amo.utils import clean_nl, has_links, log_cef, slug_validator
from translations import LOCALES
from . import tasks
from .models import (UserProfile, UserNotification, BlacklistedName,
BlacklistedEmailDomain, BlacklistedPassword)
from .widgets import (NotificationsSelectMultiple, RequiredEmailInput,
RequiredInputMixin, RequiredTextInput)
log = commonware.log.getLogger('z.users')
admin_re = re.compile('(?=.*\d)(?=.*[a-zA-Z])')
class PasswordMixin:
min_length = 8
error_msg = {
'min_length': _lazy('Must be %s characters or more.') % min_length}
@classmethod
def widget(cls, **kw):
attrs = {
'class': 'password-strength',
'data-min-length': cls.min_length,
}
if kw.pop('required', False):
attrs.update(RequiredInputMixin.required_attrs)
return forms.PasswordInput(attrs=attrs, **kw)
def clean_password(self, field='password', instance='instance'):
data = self.cleaned_data[field]
if not data:
return data
user = getattr(self, instance, None)
if user and user.pk and user.needs_tougher_password:
if not admin_re.search(data):
raise forms.ValidationError(_('Letters and numbers required.'))
if BlacklistedPassword.blocked(data):
raise forms.ValidationError(_('That password is not allowed.'))
return data
class AuthenticationForm(auth_forms.AuthenticationForm):
username = forms.CharField(max_length=75, widget=RequiredTextInput)
password = forms.CharField(max_length=255,
min_length=PasswordMixin.min_length,
error_messages=PasswordMixin.error_msg,
widget=PasswordMixin.widget(render_value=False,
required=True))
rememberme = forms.BooleanField(required=False)
recaptcha = captcha.fields.ReCaptchaField()
recaptcha_shown = forms.BooleanField(widget=forms.HiddenInput,
required=False)
def __init__(self, request=None, use_recaptcha=False, *args, **kw):
super(AuthenticationForm, self).__init__(*args, **kw)
if not use_recaptcha or not settings.RECAPTCHA_PRIVATE_KEY:
del self.fields['recaptcha']
def clean(self):
# We want an explicit error message for old accounts with a too
# short password, see bug 1067673 for details.
if ('password' in self.errors and 'password' in self.data and
1 < len(self.data['password']) < PasswordMixin.min_length):
msg = _('As part of our new password policy, your password must '
'be %s characters or more. Please update your password by '
'<a href="%s">issuing a password reset</a>.'
) % (PasswordMixin.min_length,
reverse('password_reset_form'))
self._errors['password'] = ErrorList([mark_safe(msg)])
# Only clean the form (username and password) if recaptcha is ok.
if 'recaptcha' in self.errors:
return {}
return super(AuthenticationForm, self).clean()
class PasswordResetForm(auth_forms.PasswordResetForm):
email = forms.EmailField(widget=RequiredEmailInput)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_email(self):
email = self.cleaned_data['email']
self.users_cache = UserProfile.objects.filter(email__iexact=email)
return email
def save(self, **kw):
if not self.users_cache:
log.info("Unknown email used for password reset: {email}".format(
**self.cleaned_data))
return
for user in self.users_cache:
log.info(u'Password reset email sent for user (%s)' % user)
if user.needs_tougher_password:
log_cef('Password Reset', 5, self.request,
username=user,
signature='PASSWORDRESET',
msg='Privileged user requested password reset')
else:
log_cef('Password Reset', 5, self.request,
username=user,
signature='PASSWORDRESET',
msg='User requested password reset')
try:
# Django calls send_mail() directly and has no option to pass
# in fail_silently, so we have to catch the SMTP error ourselves
self.base_save(**kw)
except SMTPException, e:
log.error("Failed to send mail for (%s): %s" % (user, e))
# Copypaste from superclass.
def base_save(
self, domain_override=None,
subject_template_name='registration/password_reset_subject.txt',
email_template_name='registration/password_reset_email.html',
use_https=False, token_generator=default_token_generator,
from_email=None, request=None):
"""
Generates a one-use only link for resetting password and sends to the
user.
"""
from django.contrib.auth import get_user_model
from django.contrib.sites.models import get_current_site
from django.core.mail import send_mail
from django.template import loader
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
UserModel = get_user_model()
email = self.cleaned_data["email"]
active_users = UserModel._default_manager.filter(
email__iexact=email,
# we use "deleted" instead of "is_active"
deleted=False)
for user in active_users:
# Make sure that no email is sent to a user that actually has
# a password marked as unusable
if not user.has_usable_password():
continue
if not domain_override:
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
else:
site_name = domain = domain_override
c = {
'email': user.email,
'domain': domain,
'site_name': site_name,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'user': user,
'token': token_generator.make_token(user),
'protocol': 'https' if use_https else 'http',
}
subject = loader.render_to_string(subject_template_name, c)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
email = loader.render_to_string(email_template_name, c)
send_mail(subject, email, from_email, [user.email])
class SetPasswordForm(auth_forms.SetPasswordForm, PasswordMixin):
new_password1 = forms.CharField(label=_lazy(u'New password'),
min_length=PasswordMixin.min_length,
error_messages=PasswordMixin.error_msg,
widget=PasswordMixin.widget(required=True))
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(SetPasswordForm, self).__init__(*args, **kwargs)
def clean_new_password1(self):
return self.clean_password(field='new_password1', instance='user')
def save(self, **kw):
# Three different loggers? :(
amo.log(amo.LOG.CHANGE_PASSWORD, user=self.user)
log.info(u'User (%s) changed password with reset form' % self.user)
log_cef('Password Changed', 5, self.request,
username=self.user.username, signature='PASSWORDCHANGED',
msg='User changed password')
super(SetPasswordForm, self).save(**kw)
class UserDeleteForm(forms.Form):
password = forms.CharField(max_length=255, required=True,
widget=forms.PasswordInput(render_value=False))
confirm = forms.BooleanField(required=True)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(UserDeleteForm, self).__init__(*args, **kwargs)
def clean_password(self):
data = self.cleaned_data
amouser = self.request.user
if not amouser.check_password(data["password"]):
raise forms.ValidationError(_("Wrong password entered!"))
def clean(self):
amouser = self.request.user
if amouser.is_developer:
# This is tampering because the form isn't shown on the page if the
# user is a developer
log.warning(u'[Tampering] Attempt to delete developer account (%s)'
% self.request.user)
raise forms.ValidationError("")
class UsernameMixin:
def clean_username(self):
name = self.cleaned_data['username']
# All-digits usernames are disallowed since they can be
# confused for user IDs in URLs. (See bug 862121.)
if name.isdigit():
raise forms.ValidationError(
_('Usernames cannot contain only digits.'))
slug_validator(
name, lower=False,
message=_('Enter a valid username consisting of letters, numbers, '
'underscores or hyphens.'))
if BlacklistedName.blocked(name):
raise forms.ValidationError(_('This username cannot be used.'))
# FIXME: Bug 858452. Remove this check when collation of the username
# column is changed to case insensitive.
if (UserProfile.objects.exclude(id=self.instance.id)
.filter(username__iexact=name).exists()):
raise forms.ValidationError(_('This username is already in use.'))
return name
class UserRegisterForm(happyforms.ModelForm, UsernameMixin, PasswordMixin):
"""
For registering users. We're not building off
d.contrib.auth.forms.UserCreationForm because it doesn't do a lot of the
details here, so we'd have to rewrite most of it anyway.
"""
username = forms.CharField(max_length=50, widget=RequiredTextInput)
email = forms.EmailField(widget=RequiredEmailInput)
display_name = forms.CharField(label=_lazy(u'Display Name'), max_length=50,
required=False)
location = forms.CharField(label=_lazy(u'Location'), max_length=100,
required=False)
occupation = forms.CharField(label=_lazy(u'Occupation'), max_length=100,
required=False)
password = forms.CharField(max_length=255,
min_length=PasswordMixin.min_length,
error_messages=PasswordMixin.error_msg,
widget=PasswordMixin.widget(render_value=False,
required=True))
password2 = forms.CharField(max_length=255,
widget=PasswordMixin.widget(render_value=False,
required=True))
recaptcha = captcha.fields.ReCaptchaField()
homepage = forms.URLField(label=_lazy(u'Homepage'), required=False)
class Meta:
model = UserProfile
fields = ('username', 'display_name', 'location', 'occupation',
'password', 'password2', 'recaptcha', 'homepage', 'email',
'emailhidden')
def __init__(self, *args, **kwargs):
super(UserRegisterForm, self).__init__(*args, **kwargs)
if not settings.RECAPTCHA_PRIVATE_KEY:
del self.fields['recaptcha']
errors = {'invalid': _('This URL has an invalid format. '
'Valid URLs look like '
'http://example.com/my_page.')}
self.fields['homepage'].error_messages = errors
def clean_email(self):
d = self.cleaned_data['email'].split('@')[-1]
if BlacklistedEmailDomain.blocked(d):
raise forms.ValidationError(_('Please use an email address from a '
'different provider to complete '
'your registration.'))
return self.cleaned_data['email']
def clean_display_name(self):
name = self.cleaned_data['display_name']
if BlacklistedName.blocked(name):
raise forms.ValidationError(_('This display name cannot be used.'))
return name
def clean(self):
super(UserRegisterForm, self).clean()
data = self.cleaned_data
# Passwords
p1 = data.get('password')
p2 = data.get('password2')
# If p1 is invalid because its blocked, this message is non sensical.
if p1 and p1 != p2:
msg = _('The passwords did not match.')
self._errors['password2'] = ErrorList([msg])
if p2:
del data['password2']
return data
class UserEditForm(UserRegisterForm, PasswordMixin):
oldpassword = forms.CharField(
max_length=255, required=False,
widget=forms.PasswordInput(render_value=False))
password = forms.CharField(max_length=255, required=False,
min_length=PasswordMixin.min_length,
error_messages=PasswordMixin.error_msg,
widget=PasswordMixin.widget(render_value=False))
password2 = forms.CharField(max_length=255, required=False,
widget=forms.PasswordInput(render_value=False))
photo = forms.FileField(label=_lazy(u'Profile Photo'), required=False)
notifications = forms.MultipleChoiceField(
choices=[],
widget=NotificationsSelectMultiple,
initial=email.NOTIFICATIONS_DEFAULT,
required=False)
lang = forms.TypedChoiceField(label=_lazy(u'Default locale'),
choices=LOCALES)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(UserEditForm, self).__init__(*args, **kwargs)
if not self.instance.lang and self.request:
self.initial['lang'] = self.request.LANG
if self.instance:
default = dict((i, n.default_checked) for i, n
in email.NOTIFICATIONS_BY_ID.items())
user = dict((n.notification_id, n.enabled) for n
in self.instance.notifications.all())
default.update(user)
# Add choices to Notification.
choices = email.NOTIFICATIONS_CHOICES
if not self.instance.is_developer:
choices = email.NOTIFICATIONS_CHOICES_NOT_DEV
# Append a "NEW" message to new notification options.
saved = self.instance.notifications.values_list('notification_id',
flat=True)
self.choices_status = {}
for idx, label in choices:
self.choices_status[idx] = idx not in saved
self.fields['notifications'].choices = choices
self.fields['notifications'].initial = [i for i, v
in default.items() if v]
self.fields['notifications'].widget.form_instance = self
# TODO: We should inherit from a base form not UserRegisterForm
if self.fields.get('recaptcha'):
del self.fields['recaptcha']
class Meta:
model = UserProfile
exclude = ('password', 'picture_type', 'last_login')
def clean(self):
data = self.cleaned_data
amouser = self.request.user
# Passwords
p1 = data.get("password")
p2 = data.get("password2")
if p1 or p2:
if not amouser.check_password(data["oldpassword"]):
msg = _("Wrong password entered!")
self._errors["oldpassword"] = ErrorList([msg])
del data["oldpassword"]
super(UserEditForm, self).clean()
return data
def clean_photo(self):
photo = self.cleaned_data['photo']
if not photo:
return
if photo.content_type not in ('image/png', 'image/jpeg'):
raise forms.ValidationError(
_('Images must be either PNG or JPG.'))
if photo.size > settings.MAX_PHOTO_UPLOAD_SIZE:
raise forms.ValidationError(
_('Please use images smaller than %dMB.' %
(settings.MAX_PHOTO_UPLOAD_SIZE / 1024 / 1024 - 1)))
return photo
def clean_bio(self):
bio = self.cleaned_data['bio']
normalized = clean_nl(unicode(bio))
if has_links(normalized):
# There's some links, we don't want them.
raise forms.ValidationError(_('No links are allowed.'))
return bio
def save(self, log_for_developer=True):
u = super(UserEditForm, self).save(commit=False)
data = self.cleaned_data
photo = data['photo']
if photo:
u.picture_type = 'image/png'
tmp_destination = u.picture_path + '__unconverted'
with storage.open(tmp_destination, 'wb') as fh:
for chunk in photo.chunks():
fh.write(chunk)
tasks.resize_photo.delay(tmp_destination, u.picture_path,
set_modified_on=[u])
if data['password']:
u.set_password(data['password'])
log_cef('Password Changed', 5, self.request, username=u.username,
signature='PASSWORDCHANGED', msg='User changed password')
if log_for_developer:
amo.log(amo.LOG.CHANGE_PASSWORD)
log.info(u'User (%s) changed their password' % u)
for (i, n) in email.NOTIFICATIONS_BY_ID.items():
enabled = n.mandatory or (str(i) in data['notifications'])
UserNotification.update_or_create(
user=u, notification_id=i, update={'enabled': enabled})
log.debug(u'User (%s) updated their profile' % u)
u.save()
return u
class BaseAdminUserEditForm(object):
def changed_fields(self):
"""Returns changed_data ignoring these fields."""
return (set(self.changed_data) -
set(['admin_log', 'notifications', 'photo',
'password', 'password2', 'oldpassword']))
def changes(self):
"""A dictionary of changed fields, old, new. Hides password."""
details = dict([(k, (self.initial[k], self.cleaned_data[k]))
for k in self.changed_fields()])
if 'password' in self.changed_data:
details['password'] = ['****', '****']
return details
def clean_anonymize(self):
if (self.cleaned_data['anonymize'] and
self.changed_fields() != set(['anonymize'])):
raise forms.ValidationError(_('To anonymize, enter a reason for'
' the change but do not change any'
' other field.'))
return self.cleaned_data['anonymize']
class AdminUserEditForm(BaseAdminUserEditForm, UserEditForm):
"""This is the form used by admins to edit users' info."""
admin_log = forms.CharField(required=True, label='Reason for change',
widget=forms.Textarea(attrs={'rows': 4}))
confirmationcode = forms.CharField(required=False, max_length=255,
label='Confirmation code')
notes = forms.CharField(required=False, label='Notes',
widget=forms.Textarea(attrs={'rows': 4}))
anonymize = forms.BooleanField(required=False)
def save(self, *args, **kw):
profile = super(AdminUserEditForm, self).save(log_for_developer=False)
if self.cleaned_data['anonymize']:
amo.log(amo.LOG.ADMIN_USER_ANONYMIZED, self.instance,
self.cleaned_data['admin_log'])
profile.anonymize() # This also logs
else:
amo.log(amo.LOG.ADMIN_USER_EDITED, self.instance,
self.cleaned_data['admin_log'], details=self.changes())
log.info('Admin edit user: %s changed fields: %s' %
(self.instance, self.changed_fields()))
if 'password' in self.changes():
log_cef('Password Changed', 5, self.request,
username=self.instance.username,
signature='PASSWORDRESET',
msg='Admin requested password reset',
cs1=self.request.amo_user.username,
cs1Label='AdminName')
return profile
class BlacklistedNameAddForm(forms.Form):
"""Form for adding blacklisted names in bulk fashion."""
names = forms.CharField(widget=forms.Textarea(
attrs={'cols': 40, 'rows': 16}))
def clean_names(self):
names = self.cleaned_data['names'].strip()
if not names:
raise forms.ValidationError(
_('Please enter at least one name to blacklist.'))
names = os.linesep.join(
[s.strip() for s in names.splitlines() if s.strip()])
return names
class BlacklistedEmailDomainAddForm(forms.Form):
"""Form for adding blacklisted user e-mail domains in bulk fashion."""
domains = forms.CharField(
widget=forms.Textarea(attrs={'cols': 40, 'rows': 16}))
def clean(self):
super(BlacklistedEmailDomainAddForm, self).clean()
data = self.cleaned_data
if 'domains' in data:
l = filter(None, [s.strip() for s in data['domains'].splitlines()])
data['domains'] = os.linesep.join(l)
if not data.get('domains', ''):
msg = 'Please enter at least one e-mail domain to blacklist.'
self._errors['domains'] = ErrorList([msg])
return data
| |
"""
Support for Flux lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.flux_led/
"""
import logging
import socket
import random
import voluptuous as vol
from homeassistant.const import CONF_DEVICES, CONF_NAME, CONF_PROTOCOL
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_HS_COLOR, ATTR_EFFECT, ATTR_WHITE_VALUE,
EFFECT_COLORLOOP, EFFECT_RANDOM, SUPPORT_BRIGHTNESS, SUPPORT_EFFECT,
SUPPORT_COLOR, SUPPORT_WHITE_VALUE, Light, PLATFORM_SCHEMA)
import homeassistant.helpers.config_validation as cv
import homeassistant.util.color as color_util
REQUIREMENTS = ['flux_led==0.22']
_LOGGER = logging.getLogger(__name__)
CONF_AUTOMATIC_ADD = 'automatic_add'
ATTR_MODE = 'mode'
DOMAIN = 'flux_led'
SUPPORT_FLUX_LED = (SUPPORT_BRIGHTNESS | SUPPORT_EFFECT |
SUPPORT_COLOR)
MODE_RGB = 'rgb'
MODE_RGBW = 'rgbw'
# This mode enables white value to be controlled by brightness.
# RGB value is ignored when this mode is specified.
MODE_WHITE = 'w'
# List of supported effects which aren't already declared in LIGHT
EFFECT_RED_FADE = 'red_fade'
EFFECT_GREEN_FADE = 'green_fade'
EFFECT_BLUE_FADE = 'blue_fade'
EFFECT_YELLOW_FADE = 'yellow_fade'
EFFECT_CYAN_FADE = 'cyan_fade'
EFFECT_PURPLE_FADE = 'purple_fade'
EFFECT_WHITE_FADE = 'white_fade'
EFFECT_RED_GREEN_CROSS_FADE = 'rg_cross_fade'
EFFECT_RED_BLUE_CROSS_FADE = 'rb_cross_fade'
EFFECT_GREEN_BLUE_CROSS_FADE = 'gb_cross_fade'
EFFECT_COLORSTROBE = 'colorstrobe'
EFFECT_RED_STROBE = 'red_strobe'
EFFECT_GREEN_STROBE = 'green_strobe'
EFFECT_BLUE_STROBE = 'blue_strobe'
EFFECT_YELLOW_STROBE = 'yellow_strobe'
EFFECT_CYAN_STROBE = 'cyan_strobe'
EFFECT_PURPLE_STROBE = 'purple_strobe'
EFFECT_WHITE_STROBE = 'white_strobe'
EFFECT_COLORJUMP = 'colorjump'
EFFECT_MAP = {
EFFECT_COLORLOOP: 0x25,
EFFECT_RED_FADE: 0x26,
EFFECT_GREEN_FADE: 0x27,
EFFECT_BLUE_FADE: 0x28,
EFFECT_YELLOW_FADE: 0x29,
EFFECT_CYAN_FADE: 0x2a,
EFFECT_PURPLE_FADE: 0x2b,
EFFECT_WHITE_FADE: 0x2c,
EFFECT_RED_GREEN_CROSS_FADE: 0x2d,
EFFECT_RED_BLUE_CROSS_FADE: 0x2e,
EFFECT_GREEN_BLUE_CROSS_FADE: 0x2f,
EFFECT_COLORSTROBE: 0x30,
EFFECT_RED_STROBE: 0x31,
EFFECT_GREEN_STROBE: 0x32,
EFFECT_BLUE_STROBE: 0x33,
EFFECT_YELLOW_STROBE: 0x34,
EFFECT_CYAN_STROBE: 0x35,
EFFECT_PURPLE_STROBE: 0x36,
EFFECT_WHITE_STROBE: 0x37,
EFFECT_COLORJUMP: 0x38
}
FLUX_EFFECT_LIST = [
EFFECT_RANDOM,
] + list(EFFECT_MAP)
DEVICE_SCHEMA = vol.Schema({
vol.Optional(CONF_NAME): cv.string,
vol.Optional(ATTR_MODE, default=MODE_RGBW):
vol.All(cv.string, vol.In([MODE_RGBW, MODE_RGB, MODE_WHITE])),
vol.Optional(CONF_PROTOCOL):
vol.All(cv.string, vol.In(['ledenet'])),
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_DEVICES, default={}): {cv.string: DEVICE_SCHEMA},
vol.Optional(CONF_AUTOMATIC_ADD, default=False): cv.boolean,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Flux lights."""
import flux_led
lights = []
light_ips = []
for ipaddr, device_config in config.get(CONF_DEVICES, {}).items():
device = {}
device['name'] = device_config[CONF_NAME]
device['ipaddr'] = ipaddr
device[CONF_PROTOCOL] = device_config.get(CONF_PROTOCOL)
device[ATTR_MODE] = device_config[ATTR_MODE]
light = FluxLight(device)
lights.append(light)
light_ips.append(ipaddr)
if not config.get(CONF_AUTOMATIC_ADD, False):
add_entities(lights, True)
return
# Find the bulbs on the LAN
scanner = flux_led.BulbScanner()
scanner.scan(timeout=10)
for device in scanner.getBulbInfo():
ipaddr = device['ipaddr']
if ipaddr in light_ips:
continue
device['name'] = '{} {}'.format(device['id'], ipaddr)
device[ATTR_MODE] = MODE_RGBW
device[CONF_PROTOCOL] = None
light = FluxLight(device)
lights.append(light)
add_entities(lights, True)
class FluxLight(Light):
"""Representation of a Flux light."""
def __init__(self, device):
"""Initialize the light."""
self._name = device['name']
self._ipaddr = device['ipaddr']
self._protocol = device[CONF_PROTOCOL]
self._mode = device[ATTR_MODE]
self._bulb = None
self._error_reported = False
def _connect(self):
"""Connect to Flux light."""
import flux_led
self._bulb = flux_led.WifiLedBulb(self._ipaddr, timeout=5)
if self._protocol:
self._bulb.setProtocol(self._protocol)
# After bulb object is created the status is updated. We can
# now set the correct mode if it was not explicitly defined.
if not self._mode:
if self._bulb.rgbwcapable:
self._mode = MODE_RGBW
else:
self._mode = MODE_RGB
def _disconnect(self):
"""Disconnect from Flux light."""
self._bulb = None
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._bulb is not None
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
return self._bulb.isOn()
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
if self._mode == MODE_WHITE:
return self.white_value
return self._bulb.brightness
@property
def hs_color(self):
"""Return the color property."""
return color_util.color_RGB_to_hs(*self._bulb.getRgb())
@property
def supported_features(self):
"""Flag supported features."""
if self._mode == MODE_RGBW:
return SUPPORT_FLUX_LED | SUPPORT_WHITE_VALUE
if self._mode == MODE_WHITE:
return SUPPORT_BRIGHTNESS
return SUPPORT_FLUX_LED
@property
def white_value(self):
"""Return the white value of this light between 0..255."""
return self._bulb.getRgbw()[3]
@property
def effect_list(self):
"""Return the list of supported effects."""
return FLUX_EFFECT_LIST
def turn_on(self, **kwargs):
"""Turn the specified or all lights on."""
if not self.is_on:
self._bulb.turnOn()
hs_color = kwargs.get(ATTR_HS_COLOR)
if hs_color:
rgb = color_util.color_hs_to_RGB(*hs_color)
else:
rgb = None
brightness = kwargs.get(ATTR_BRIGHTNESS)
effect = kwargs.get(ATTR_EFFECT)
white = kwargs.get(ATTR_WHITE_VALUE)
# Show warning if effect set with rgb, brightness, or white level
if effect and (brightness or white or rgb):
_LOGGER.warning("RGB, brightness and white level are ignored when"
" an effect is specified for a flux bulb")
# Random color effect
if effect == EFFECT_RANDOM:
self._bulb.setRgb(random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255))
return
# Effect selection
if effect in EFFECT_MAP:
self._bulb.setPresetPattern(EFFECT_MAP[effect], 50)
return
# Preserve current brightness on color/white level change
if brightness is None:
brightness = self.brightness
# Preserve color on brightness/white level change
if rgb is None:
rgb = self._bulb.getRgb()
if white is None and self._mode == MODE_RGBW:
white = self.white_value
# handle W only mode (use brightness instead of white value)
if self._mode == MODE_WHITE:
self._bulb.setRgbw(0, 0, 0, w=brightness)
# handle RGBW mode
elif self._mode == MODE_RGBW:
self._bulb.setRgbw(*tuple(rgb), w=white, brightness=brightness)
# handle RGB mode
else:
self._bulb.setRgb(*tuple(rgb), brightness=brightness)
def turn_off(self, **kwargs):
"""Turn the specified or all lights off."""
self._bulb.turnOff()
def update(self):
"""Synchronize state with bulb."""
if not self.available:
try:
self._connect()
self._error_reported = False
except socket.error:
self._disconnect()
if not self._error_reported:
_LOGGER.warning("Failed to connect to bulb %s, %s",
self._ipaddr, self._name)
self._error_reported = True
return
self._bulb.update_state(retry=2)
| |
"""Parser for bvlc caffe googlenet."""
# Authors: Michael Eickenberg
# Kyle Kastner
# License: BSD 3 Clause
from sklearn.externals import joblib
from ...datasets import get_dataset_dir, download
from caffemodel import _parse_caffe_model, parse_caffe_model
import os
from ...utils import check_tensor
from ..overfeat import get_overfeat_class_label
from sklearn.base import BaseEstimator, TransformerMixin
import theano
import theano.tensor as T
import numpy as np
GOOGLENET_PATH = get_dataset_dir("caffe/bvlc_googlenet")
def fetch_googlenet_protobuffer_file(caffemodel_file=None):
"""Checks for existence of caffemodel protobuffer.
Downloads it if it cannot be found."""
default_filename = os.path.join(GOOGLENET_PATH,
"bvlc_googlenet.caffemodel")
if caffemodel_file is not None:
if os.path.exists(caffemodel_file):
return caffemodel_file
else:
if os.path.exists(default_filename):
import warnings
warnings.warn('Did not find %s, but found and returned %s.' %
(caffemodel_file, default_filename))
return default_filename
else:
if os.path.exists(default_filename):
return default_filename
# We didn't find the file, let's download it. To the specified location
# if specified, otherwise to the default place
if caffemodel_file is None:
caffemodel_file = default_filename
if not os.path.exists(GOOGLENET_PATH):
os.makedirs(GOOGLENET_PATH)
url = "https://dl.dropboxusercontent.com/u/15378192/bvlc_googlenet.caffemodel"
download(url, caffemodel_file, progress_update_percentage=1)
return caffemodel_file
def fetch_googlenet_architecture(caffemodel_parsed=None,
caffemodel_protobuffer=None):
"""Fetch a pickled version of the caffe model, represented as list of
dictionaries."""
default_filename = os.path.join(GOOGLENET_PATH, 'bvlc_googlenet.pickle')
if caffemodel_parsed is not None:
if os.path.exists(caffemodel_parsed):
return joblib.load(caffemodel_parsed)
else:
if os.path.exists(default_filename):
import warnings
warnings.warn('Did not find %s, but found %s. Loading it.' %
(caffemodel_parsed, default_filename))
return joblib.load(default_filename)
else:
if os.path.exists(default_filename):
return joblib.load(default_filename)
# We didn't find the file: let's create it by parsing the protobuffer
protobuf_file = fetch_googlenet_protobuffer_file(caffemodel_protobuffer)
model = _parse_caffe_model(protobuf_file)
if caffemodel_parsed is not None:
joblib.dump(model, caffemodel_parsed)
else:
joblib.dump(model, default_filename)
return model
def create_theano_expressions(model=None, verbose=0):
if model is None:
model = fetch_googlenet_architecture()
layers, blobs, inputs = parse_caffe_model(model, verbose=verbose)
data_input = inputs['data']
return blobs, data_input
def _get_fprop(output_layers=('loss3/loss3',), model=None, verbose=0):
if model is None:
model = fetch_googlenet_architecture(model)
expressions, input_data = create_theano_expressions(model,
verbose=verbose)
to_compile = [expressions[expr] for expr in output_layers]
return theano.function([input_data], to_compile)
class GoogLeNetTransformer(BaseEstimator, TransformerMixin):
"""
A transformer/feature extractor for images using GoogLeNet.
Parameters
----------
output_layers : iterable, optional (default=('loss3/classifier',))
Which layers to return. Can be used to retrieve multiple levels of
output with a single call to transform.
force_reshape : boolean, optional (default=True)
Whether or not to force the output to be two dimensional. If true,
this class can be used as part of a scikit-learn pipeline.
force_reshape currently only supports len(output_layers) == 1!
"""
def __init__(self, output_layers=('loss3/classifier',),
force_reshape=True, transpose_order=(0, 3, 1, 2)):
self.output_layers = output_layers
self.force_reshape = force_reshape
self.transpose_order = transpose_order
self.transform_function = _get_fprop(output_layers)
def fit(self, X, y):
"""Passthrough function for sklearn compatibility"""
pass
def transform(self, X):
"""
Transform a set of images.
Returns the features from each layer.
Parameters
----------
X : array-like, shape = [n_images, height, width, color]
or
shape = [height, width, color]
Returns
-------
T : array-like, shape = [n_images, n_features]
If force_reshape = False,
list of array-like, length output_layers,
each shape = [n_images, n_channels,
n_features]
Returns the features extracted for each of the n_images in X.
"""
X = check_tensor(X, dtype=np.float32, n_dim=4)
if self.force_reshape:
return self.transform_function(X.transpose(
*self.transpose_order))[0].reshape((len(X), -1))
else:
return self.transform_function(X.transpose(*self.transpose_order))
class GoogLeNetClassifier(BaseEstimator):
"""
A classifier for cropped images using the OverFeat neural network.
Image will be cropped to center 224x224 pixels
Parameters
----------
top_n : integer, optional (default=5)
How many classes to return, based on sorted class probabilities.
output_strings : boolean, optional (default=True)
Whether to return class strings or integer classes. Returns class
strings by default.
Attributes
----------
crop_bounds_ : tuple, (x_left, x_right, y_lower, y_upper)
The coordinate boundaries of the cropping box used.
"""
min_size = (224, 224)
def __init__(self, top_n=5, large_network=False, output_strings=True,
transpose_order=(0, 3, 1, 2)):
self.top_n = top_n
self.large_network = large_network
self.output_strings = output_strings
self.transpose_order = transpose_order
self.transform_function = _get_fprop(('loss3/loss3',))
def fit(self, X, y=None):
"""Passthrough for scikit-learn pipeline compatibility."""
return self
def _predict_proba(self, X):
x_midpoint = X.shape[2] // 2
y_midpoint = X.shape[1] // 2
x_lower_bound = x_midpoint - self.min_size[0] // 2
if x_lower_bound <= 0:
x_lower_bound = 0
x_upper_bound = x_lower_bound + self.min_size[0]
y_lower_bound = y_midpoint - self.min_size[1] // 2
if y_lower_bound <= 0:
y_lower_bound = 0
y_upper_bound = y_lower_bound + self.min_size[1]
self.crop_bounds_ = (x_lower_bound, x_upper_bound, y_lower_bound,
y_upper_bound)
res = self.transform_function(
X[:, y_lower_bound:y_upper_bound,
x_lower_bound:x_upper_bound, :].transpose(
*self.transpose_order))[0]
return res
def predict(self, X):
"""
Classify a set of cropped input images.
Returns the top_n classes.
Parameters
----------
X : array-like, shape = [n_images, height, width, color]
or
shape = [height, width, color]
Returns
-------
T : array-like, shape = [n_images, top_n]
Returns the top_n classes for each of the n_images in X.
If output_strings is True, then the result will be string
description of the class label.
Otherwise, the returned values will be the integer class label.
"""
X = check_tensor(X, dtype=np.float32, n_dim=4)
res = self._predict_proba(X)[:, :, 0, 0]
indices = np.argsort(res, axis=1)
indices = indices[:, -self.top_n:]
if self.output_strings:
class_strings = np.empty_like(indices,
dtype=object)
for index, value in enumerate(indices.flat):
class_strings.flat[index] = get_overfeat_class_label(value)
return class_strings
else:
return indices
def predict_proba(self, X):
"""
Prediction probability for a set of cropped input images.
Returns the top_n probabilities.
Parameters
----------
X : array-like, shape = [n_images, height, width, color]
or
shape = [height, width, color]
Returns
-------
T : array-like, shape = [n_images, top_n]
Returns the top_n probabilities for each of the n_images in X.
"""
X = check_tensor(X, dtype=np.float32, n_dim=4)
res = self._predict_proba(X)[:, :, 0, 0]
return np.sort(res, axis=1)[:, -self.top_n:]
| |
# encoding: utf-8
"""Unit-test suite for `pptx.text.layout` module."""
import pytest
from pptx.text.layout import _BinarySearchTree, _Line, _LineSource, TextFitter
from ..unitutil.mock import (
ANY,
call,
class_mock,
function_mock,
initializer_mock,
instance_mock,
method_mock,
property_mock,
)
class DescribeTextFitter(object):
"""Unit-test suite for `pptx.text.layout.TextFitter` object."""
def it_can_determine_the_best_fit_font_size(self, request, line_source_):
_LineSource_ = class_mock(
request, "pptx.text.layout._LineSource", return_value=line_source_
)
_init_ = initializer_mock(request, TextFitter)
_best_fit_font_size_ = method_mock(
request, TextFitter, "_best_fit_font_size", return_value=36
)
extents, max_size = (19, 20), 42
font_size = TextFitter.best_fit_font_size(
"Foobar", extents, max_size, "foobar.ttf"
)
_LineSource_.assert_called_once_with("Foobar")
_init_.assert_called_once_with(line_source_, extents, "foobar.ttf")
_best_fit_font_size_.assert_called_once_with(ANY, max_size)
assert font_size == 36
def it_finds_best_fit_font_size_to_help_best_fit(self, _best_fit_fixture):
text_fitter, max_size, _BinarySearchTree_ = _best_fit_fixture[:3]
sizes_, predicate_, font_size_ = _best_fit_fixture[3:]
font_size = text_fitter._best_fit_font_size(max_size)
_BinarySearchTree_.from_ordered_sequence.assert_called_once_with(
range(1, max_size + 1)
)
sizes_.find_max.assert_called_once_with(predicate_)
assert font_size is font_size_
@pytest.mark.parametrize(
"extents, point_size, text_lines, expected_value",
(
((66, 99), 6, ("foo", "bar"), False),
((66, 100), 6, ("foo", "bar"), True),
((66, 101), 6, ("foo", "bar"), True),
),
)
def it_provides_a_fits_inside_predicate_fn(
self,
request,
line_source_,
_rendered_size_,
extents,
point_size,
text_lines,
expected_value,
):
_wrap_lines_ = method_mock(
request, TextFitter, "_wrap_lines", return_value=text_lines
)
_rendered_size_.return_value = (None, 50)
text_fitter = TextFitter(line_source_, extents, "foobar.ttf")
predicate = text_fitter._fits_inside_predicate
result = predicate(point_size)
_wrap_lines_.assert_called_once_with(text_fitter, line_source_, point_size)
_rendered_size_.assert_called_once_with(
"Ty", point_size, text_fitter._font_file
)
assert result is expected_value
def it_provides_a_fits_in_width_predicate_fn(self, fits_cx_pred_fixture):
text_fitter, point_size, line = fits_cx_pred_fixture[:3]
_rendered_size_, expected_value = fits_cx_pred_fixture[3:]
predicate = text_fitter._fits_in_width_predicate(point_size)
result = predicate(line)
_rendered_size_.assert_called_once_with(
line.text, point_size, text_fitter._font_file
)
assert result is expected_value
def it_wraps_lines_to_help_best_fit(self, request):
line_source, remainder = _LineSource("foo bar"), _LineSource("bar")
_break_line_ = method_mock(
request,
TextFitter,
"_break_line",
side_effect=[("foo", remainder), ("bar", _LineSource(""))],
)
text_fitter = TextFitter(None, (None, None), None)
text_fitter._wrap_lines(line_source, 21)
assert _break_line_.call_args_list == [
call(text_fitter, line_source, 21),
call(text_fitter, remainder, 21),
]
def it_breaks_off_a_line_to_help_wrap(
self, request, line_source_, _BinarySearchTree_
):
bst_ = instance_mock(request, _BinarySearchTree)
_fits_in_width_predicate_ = method_mock(
request, TextFitter, "_fits_in_width_predicate"
)
_BinarySearchTree_.from_ordered_sequence.return_value = bst_
predicate_ = _fits_in_width_predicate_.return_value
max_value_ = bst_.find_max.return_value
text_fitter = TextFitter(None, (None, None), None)
value = text_fitter._break_line(line_source_, 21)
_BinarySearchTree_.from_ordered_sequence.assert_called_once_with(line_source_)
text_fitter._fits_in_width_predicate.assert_called_once_with(text_fitter, 21)
bst_.find_max.assert_called_once_with(predicate_)
assert value is max_value_
# fixtures ---------------------------------------------
@pytest.fixture
def _best_fit_fixture(self, _BinarySearchTree_, _fits_inside_predicate_):
text_fitter = TextFitter(None, (None, None), None)
max_size = 42
sizes_ = _BinarySearchTree_.from_ordered_sequence.return_value
predicate_ = _fits_inside_predicate_.return_value
font_size_ = sizes_.find_max.return_value
return (
text_fitter,
max_size,
_BinarySearchTree_,
sizes_,
predicate_,
font_size_,
)
@pytest.fixture(params=[(49, True), (50, True), (51, False)])
def fits_cx_pred_fixture(self, request, _rendered_size_):
rendered_width, expected_value = request.param
text_fitter = TextFitter(None, (50, None), "foobar.ttf")
point_size, line = 12, _Line("foobar", None)
_rendered_size_.return_value = (rendered_width, None)
return (text_fitter, point_size, line, _rendered_size_, expected_value)
# fixture components -----------------------------------
@pytest.fixture
def _BinarySearchTree_(self, request):
return class_mock(request, "pptx.text.layout._BinarySearchTree")
@pytest.fixture
def _fits_inside_predicate_(self, request):
return property_mock(request, TextFitter, "_fits_inside_predicate")
@pytest.fixture
def line_source_(self, request):
return instance_mock(request, _LineSource)
@pytest.fixture
def _rendered_size_(self, request):
return function_mock(request, "pptx.text.layout._rendered_size")
class Describe_BinarySearchTree(object):
"""Unit-test suite for `pptx.text.layout._BinarySearchTree` object."""
def it_can_construct_from_an_ordered_sequence(self):
bst = _BinarySearchTree.from_ordered_sequence(range(10))
def in_order(node):
"""
Traverse the tree depth first to produce a list of its values,
in order.
"""
result = []
if node is None:
return result
result.extend(in_order(node._lesser))
result.append(node.value)
result.extend(in_order(node._greater))
return result
assert bst.value == 9
assert bst._lesser.value == 4
assert bst._greater is None
assert in_order(bst) == list(range(10))
def it_can_find_the_max_value_satisfying_a_predicate(self, max_fixture):
bst, predicate, expected_value = max_fixture
assert bst.find_max(predicate) == expected_value
# fixtures ---------------------------------------------
@pytest.fixture(
params=[
(range(10), lambda n: n < 6.5, 6),
(range(10), lambda n: n > 9.9, None),
(range(10), lambda n: n < 0.0, None),
]
)
def max_fixture(self, request):
seq, predicate, expected_value = request.param
bst = _BinarySearchTree.from_ordered_sequence(seq)
return bst, predicate, expected_value
class Describe_LineSource(object):
"""Unit-test suite for `pptx.text.layout._LineSource` object."""
def it_generates_text_remainder_pairs(self):
line_source = _LineSource("foo bar baz")
expected = (
("foo", _LineSource("bar baz")),
("foo bar", _LineSource("baz")),
("foo bar baz", _LineSource("")),
)
assert all((a == b) for a, b in zip(expected, line_source))
# produces different results on Linux, fails Travis-CI
# from pptx.text.layout import _rendered_size
# from ..unitutil.file import testfile
# class Describe_rendered_size(object):
# def it_calculates_the_rendered_size_of_text_at_point_size(self, fixture):
# text, point_size, font_file, expected_value = fixture
# extents = _rendered_size(text, point_size, font_file)
# assert extents == expected_value
# # fixtures ---------------------------------------------
# @pytest.fixture(params=[
# ('Typical', 18, (673100, 254000)),
# ('foo bar baz', 12, (698500, 165100)),
# ])
# def fixture(self, request):
# text, point_size, expected_value = request.param
# font_file = testfile('calibriz.ttf')
# return text, point_size, font_file, expected_value
| |
import sublime
import sublime_plugin
import sys
import os
# We use several commands implemented in Vintange, so make it available here.
sys.path.append(os.path.join(sublime.packages_path(), 'Vintage'))
import re
import subprocess
# TODO: This is not available. Integrate with Vintageous instead.
# from Vintage.vintage import g_registers
from VintageousEx.plat.windows import get_oem_cp
from VintageousEx.plat.windows import get_startup_info
from VintageousEx.vex import ex_error
from VintageousEx.vex import ex_range
from VintageousEx.vex import shell
from VintageousEx.vex import parsers
GLOBAL_RANGES = []
CURRENT_LINE_RANGE = {'left_ref': '.', 'left_offset': 0, 'left_search_offsets': [],
'right_ref': None, 'right_offset': 0, 'right_search_offsets': []}
class VintageousExState(object):
# When repeating searches, determines which search term to use: the current
# word or the latest search term.
# Values: find_under, search_pattern
search_buffer_type = 'find_under'
def is_any_buffer_dirty(window):
for v in window.views():
if v.is_dirty():
return True
# TODO: this code must be shared with Vintageous, not reimplemented here.
def set_register(text, register):
return None
# global g_registers
# if register == '*' or register == '+':
# sublime.set_clipboard(text)
# elif register == '%':
# pass
# else:
# reg = register.lower()
# append = (reg != register)
# if append and reg in g_registers:
# g_registers[reg] += text
# else:
# g_registers[reg] = text
def gather_buffer_info(v):
"""gathers data to be displayed by :ls or :buffers
"""
path = v.file_name()
if path:
parent, leaf = os.path.split(path)
parent = os.path.basename(parent)
path = os.path.join(parent, leaf)
else:
path = v.name() or str(v.buffer_id())
leaf = v.name() or 'untitled'
status = []
if not v.file_name():
status.append("t")
if v.is_dirty():
status.append("*")
if v.is_read_only():
status.append("r")
if status:
leaf += ' (%s)' % ', '.join(status)
return [leaf, path]
def get_region_by_range(view, line_range=None, as_lines=False):
# If GLOBAL_RANGES exists, the ExGlobal command has been run right before
# the current command, and we know we must process these lines.
global GLOBAL_RANGES
if GLOBAL_RANGES:
rv = GLOBAL_RANGES[:]
GLOBAL_RANGES = []
return rv
if line_range:
vim_range = ex_range.VimRange(view, line_range)
if as_lines:
return vim_range.lines()
else:
return vim_range.blocks()
class ExGoto(sublime_plugin.TextCommand):
def run(self, edit, line_range=None):
if not line_range['text_range']:
# No-op: user issued ":".
return
ranges, _ = ex_range.new_calculate_range(self.view, line_range)
a, b = ranges[0]
self.view.run_command('vi_goto_line', {'repeat': b})
self.view.show(self.view.sel()[0])
class ExShellOut(sublime_plugin.TextCommand):
"""Ex command(s): :!cmd, :'<,>'!cmd
Run cmd in a system's shell or filter selected regions through external
command.
"""
def run(self, edit, line_range=None, shell_cmd=''):
try:
if line_range['text_range']:
shell.filter_thru_shell(
view=self.view,
regions=get_region_by_range(self.view, line_range=line_range),
cmd=shell_cmd)
else:
shell.run_and_wait(self.view, shell_cmd)
except NotImplementedError:
ex_error.handle_not_implemented()
class ExShell(sublime_plugin.TextCommand):
"""Ex command(s): :shell
Opens a shell at the current view's directory. Sublime Text keeps a virtual
current directory that most of the time will be out of sync with the actual
current directory. The virtual current directory is always set to the
current view's directory, but it isn't accessible through the API.
"""
def open_shell(self, command):
view_dir = os.path.dirname(self.view.file_name())
return subprocess.Popen(command, cwd=view_dir)
def run(self, edit):
if sublime.platform() == 'linux':
term = self.view.settings().get('VintageousEx_linux_terminal')
term = term or os.environ.get('COLORTERM') or os.environ.get("TERM")
if not term:
sublime.status_message("VintageousEx: Not terminal name found.")
return
try:
self.open_shell([term, '-e', 'bash']).wait()
except Exception as e:
print(e)
sublime.status_message("VintageousEx: Error while executing command through shell.")
return
elif sublime.platform() == 'osx':
term = self.view.settings().get('VintageousEx_osx_terminal')
term = term or os.environ.get('COLORTERM') or os.environ.get("TERM")
if not term:
sublime.status_message("VintageousEx: Not terminal name found.")
return
try:
self.open_shell([term, '-e', 'bash']).wait()
except Exception as e:
print(e)
sublime.status_message("VintageousEx: Error while executing command through shell.")
return
elif sublime.platform() == 'windows':
self.open_shell(['cmd.exe', '/k']).wait()
else:
# XXX OSX (make check explicit)
ex_error.handle_not_implemented()
class ExReadShellOut(sublime_plugin.TextCommand):
def run(self, edit, line_range=None, name='', plusplus_args='', forced=False):
target_line = self.view.line(self.view.sel()[0].begin())
if line_range['text_range']:
range = max(ex_range.calculate_range(self.view, line_range=line_range)[0])
target_line = self.view.line(self.view.text_point(range, 0))
target_point = min(target_line.b + 1, self.view.size())
# cheat a little bit to get the parsing right:
# - forced == True means we need to execute a command
if forced:
if sublime.platform() == 'linux':
for s in self.view.sel():
# TODO: make shell command configurable.
the_shell = self.view.settings().get('linux_shell')
the_shell = the_shell or os.path.expandvars("$SHELL")
if not the_shell:
sublime.status_message("VintageousEx: No shell name found.")
return
try:
p = subprocess.Popen([the_shell, '-c', name],
stdout=subprocess.PIPE)
except Exception as e:
print(e)
sublime.status_message("VintageousEx: Error while executing command through shell.")
return
self.view.insert(edit, s.begin(), p.communicate()[0][:-1])
elif sublime.platform() == 'windows':
for s in self.view.sel():
p = subprocess.Popen(['cmd.exe', '/C', name],
stdout=subprocess.PIPE,
startupinfo=get_startup_info()
)
cp = 'cp' + get_oem_cp()
rv = p.communicate()[0].decode(cp)[:-2].strip()
self.view.insert(edit, s.begin(), rv)
else:
ex_error.handle_not_implemented()
# Read a file into the current view.
else:
# According to Vim's help, :r should read the current file's content
# if no file name is given, but Vim doesn't do that.
# TODO: implement reading a file into the buffer.
ex_error.handle_not_implemented()
return
class ExPromptSelectOpenFile(sublime_plugin.TextCommand):
"""Ex command(s): :ls, :files
Shows a quick panel listing the open files only. Provides concise
information about the buffers's state: 'transient', 'unsaved'.
"""
def run(self, edit):
self.file_names = [gather_buffer_info(v)
for v in self.view.window().views()]
self.view.window().show_quick_panel(self.file_names, self.on_done)
def on_done(self, idx):
if idx == -1: return
sought_fname = self.file_names[idx]
for v in self.view.window().views():
if v.file_name() and v.file_name().endswith(sought_fname[1]):
self.view.window().focus_view(v)
# XXX Base all checks on buffer id?
elif sought_fname[1].isdigit() and \
v.buffer_id() == int(sought_fname[1]):
self.view.window().focus_view(v)
class ExMap(sublime_plugin.TextCommand):
# do at least something moderately useful: open the user's .sublime-keymap
# file
def run(self, edit):
if sublime.platform() == 'windows':
platf = 'Windows'
elif sublime.platform() == 'linux':
platf = 'Linux'
else:
platf = 'OSX'
self.view.window().run_command('open_file', {'file':
'${packages}/User/Default (%s).sublime-keymap' % platf})
class ExAbbreviate(sublime_plugin.TextCommand):
# for them moment, just open a completions file.
def run(self, edit):
abbs_file_name = 'VintageousEx Abbreviations.sublime-completions'
abbreviations = os.path.join(sublime.packages_path(),
'User/' + abbs_file_name)
if not os.path.exists(abbreviations):
with open(abbreviations, 'w') as f:
f.write('{\n\t"scope": "",\n\t"completions": [\n\t\n\t]\n}\n')
self.view.window().run_command('open_file',
{'file': "${packages}/User/%s" % abbs_file_name})
class ExPrintWorkingDir(sublime_plugin.TextCommand):
def run(self, edit):
sublime.status_message(os.getcwd())
class ExWriteFile(sublime_plugin.TextCommand):
def run(self, edit,
line_range=None,
forced=False,
file_name='',
plusplus_args='',
operator='',
target_redirect='',
subcmd=''):
if file_name and target_redirect:
sublime.status_message('VintageousEx: Too many arguments.')
return
appending = operator == '>>'
# FIXME: reversed? -- what's going on here!!
a_range = line_range['text_range']
content = get_region_by_range(self.view, line_range=line_range) if a_range else \
[sublime.Region(0, self.view.size())]
if target_redirect or file_name:
target = self.view.window().new_file()
target.set_name(target_redirect or file_name)
else:
target = self.view
start = 0 if not appending else target.size()
prefix = '\n' if appending and target.size() > 0 else ''
if appending or target_redirect or file_name:
for frag in reversed(content):
target.insert(edit, start, prefix + self.view.substr(frag) + '\n')
elif a_range:
start_deleting = 0
for frag in content:
text = self.view.substr(frag) + '\n'
self.view.insert(edit, 0, text)
start_deleting += len(text)
self.view.replace(edit, sublime.Region(start_deleting,
self.view.size()), '')
else:
if self.view.is_dirty():
self.view.run_command('save')
class ExWriteAll(sublime_plugin.TextCommand):
def run(self, edit, forced=False):
for v in self.view.window().views():
if v.is_dirty():
v.run_command('save')
class ExNewFile(sublime_plugin.TextCommand):
def run(self, edit, forced=False):
self.view.window().run_command('new_file')
class ExFile(sublime_plugin.TextCommand):
def run(self, edit, forced=False):
# XXX figure out what the right params are. vim's help seems to be
# wrong
if self.view.file_name():
fname = self.view.file_name()
else:
fname = 'untitled'
attrs = ''
if self.view.is_read_only():
attrs = 'readonly'
if self.view.is_scratch():
attrs = 'modified'
lines = 'no lines in the buffer'
if self.view.rowcol(self.view.size())[0]:
lines = self.view.rowcol(self.view.size())[0] + 1
# fixme: doesn't calculate the buffer's % correctly
if not isinstance(lines, basestring):
vr = self.view.visible_region()
start_row, end_row = self.view.rowcol(vr.begin())[0], \
self.view.rowcol(vr.end())[0]
mid = (start_row + end_row + 2) / 2
percent = float(mid) / lines * 100.0
msg = fname
if attrs:
msg += " [%s]" % attrs
if isinstance(lines, basestring):
msg += " -- %s --" % lines
else:
msg += " %d line(s) --%d%%--" % (lines, int(percent))
sublime.status_message('VintageousEx: %s' % msg)
class ExMove(sublime_plugin.TextCommand):
def run(self, edit, line_range=None, forced=False, address=''):
# make sure we have a default range
if not line_range['text_range']:
line_range['text_range'] = '.'
address_parser = parsers.cmd_line.AddressParser(address)
parsed_address = address_parser.parse()
address = ex_range.calculate_address(self.view, parsed_address)
if address is None:
ex_error.display_error(ex_error.ERR_INVALID_ADDRESS)
return
line_block = get_region_by_range(self.view, line_range=line_range)
line_block = [self.view.substr(r) for r in line_block]
text = '\n'.join(line_block) + '\n'
if address != 0:
dest = self.view.line(self.view.text_point(address, 0)).end() + 1
else:
dest = 0
# Don't move lines onto themselves.
for sel in self.view.sel():
if sel.contains(dest):
ex_error.display_error(ex_error.ERR_CANT_MOVE_LINES_ONTO_THEMSELVES)
return
if dest > self.view.size():
dest = self.view.size()
text = '\n' + text[:-1]
self.view.insert(edit, dest, text)
for r in reversed(get_region_by_range(self.view, line_range)):
self.view.erase(edit, self.view.full_line(r))
class ExCopy(sublime_plugin.TextCommand):
# todo: do null ranges always default to '.'?
def run(self, edit, line_range=CURRENT_LINE_RANGE, forced=False, address=''):
address_parser = parsers.cmd_line.AddressParser(address)
parsed_address = address_parser.parse()
address = ex_range.calculate_address(self.view, parsed_address)
if address is None:
ex_error.display_error(ex_error.ERR_INVALID_ADDRESS)
return
line_block = get_region_by_range(self.view, line_range=line_range)
line_block = [self.view.substr(r) for r in line_block]
text = '\n'.join(line_block) + '\n'
if address != 0:
dest = self.view.line(self.view.text_point(address, 0)).end() + 1
else:
dest = address
if dest > self.view.size():
dest = self.view.size()
text = '\n' + text[:-1]
self.view.insert(edit, dest, text)
self.view.sel().clear()
cursor_dest = self.view.line(dest + len(text) - 1).begin()
self.view.sel().add(sublime.Region(cursor_dest, cursor_dest))
class ExOnly(sublime_plugin.TextCommand):
""" Command: :only
"""
def run(self, edit, forced=False):
if not forced:
if is_any_buffer_dirty(self.view.window()):
ex_error.display_error(ex_error.ERR_OTHER_BUFFER_HAS_CHANGES)
return
w = self.view.window()
current_id = self.view.id()
for v in w.views():
if v.id() != current_id:
if forced and v.is_dirty():
v.set_scratch(True)
w.focus_view(v)
w.run_command('close')
class ExDoubleAmpersand(sublime_plugin.TextCommand):
""" Command :&&
"""
def run(self, edit, line_range=None, flags='', count=''):
self.view.run_command('ex_substitute', {'line_range': line_range,
'pattern': flags + count})
class ExSubstitute(sublime_plugin.TextCommand):
most_recent_pat = None
most_recent_flags = ''
most_recent_replacement = ''
def run(self, edit, line_range=None, pattern=''):
# :s
if not pattern:
pattern = ExSubstitute.most_recent_pat
replacement = ExSubstitute.most_recent_replacement
flags = ''
count = 0
# :s g 100 | :s/ | :s// | s:/foo/bar/g 100 | etc.
else:
try:
parts = parsers.s_cmd.split(pattern)
except SyntaxError as e:
sublime.status_message("VintageousEx: (substitute) %s" % e)
print("VintageousEx: (substitute) %s" % e)
return
else:
if len(parts) == 4:
# This is a full command in the form :s/foo/bar/g 100 or a
# partial version of it.
(pattern, replacement, flags, count) = parts
else:
# This is a short command in the form :s g 100 or a partial
# version of it.
(flags, count) = parts
pattern = ExSubstitute.most_recent_pat
replacement = ExSubstitute.most_recent_replacement
if not pattern:
pattern = ExSubstitute.most_recent_pat
else:
ExSubstitute.most_recent_pat = pattern
ExSubstitute.most_recent_replacement = replacement
ExSubstitute.most_recent_flags = flags
computed_flags = 0
computed_flags |= re.IGNORECASE if (flags and 'i' in flags) else 0
try:
pattern = re.compile(pattern, flags=computed_flags)
except Exception as e:
sublime.status_message("VintageousEx [regex error]: %s ... in pattern '%s'" % (e.message, pattern))
print("VintageousEx [regex error]: %s ... in pattern '%s'" % (e.message, pattern))
return
replace_count = 0 if (flags and 'g' in flags) else 1
target_region = get_region_by_range(self.view, line_range=line_range, as_lines=True)
for r in reversed(target_region):
line_text = self.view.substr(self.view.line(r))
rv = re.sub(pattern, replacement, line_text, count=replace_count)
self.view.replace(edit, self.view.line(r), rv)
class ExDelete(sublime_plugin.TextCommand):
def run(self, edit, line_range=None, register='', count=''):
# XXX somewhat different to vim's behavior
rs = get_region_by_range(self.view, line_range=line_range)
self.view.sel().clear()
to_store = []
for r in rs:
self.view.sel().add(r)
if register:
to_store.append(self.view.substr(self.view.full_line(r)))
if register:
text = ''.join(to_store)
# needed for lines without a newline character
if not text.endswith('\n'):
text = text + '\n'
set_register(text, register)
self.view.run_command('split_selection_into_lines')
self.view.run_command('run_macro_file',
{'file': 'Packages/Default/Delete Line.sublime-macro'})
class ExGlobal(sublime_plugin.TextCommand):
"""Ex command(s): :global
:global filters lines where a pattern matches and then applies the supplied
action to all those lines.
Examples:
:10,20g/FOO/delete
This command deletes all lines between line 10 and line 20 where 'FOO'
matches.
:g:XXX:s!old!NEW!g
This command replaces all instances of 'old' with 'NEW' in every line
where 'XXX' matches.
By default, :global searches all lines in the buffer.
If you want to filter lines where a pattern does NOT match, add an
exclamation point:
:g!/DON'T TOUCH THIS/delete
"""
most_recent_pat = None
def run(self, edit, line_range=None, forced=False, pattern=''):
if not line_range['text_range']:
line_range['text_range'] = '%'
line_range['left_ref'] = '%'
try:
global_pattern, subcmd = parsers.g_cmd.split(pattern)
except ValueError:
msg = "VintageousEx: Bad :global pattern. (%s)" % pattern
sublime.status_message(msg)
print(msg)
return
if global_pattern:
ExGlobal.most_recent_pat = global_pattern
else:
global_pattern = ExGlobal.most_recent_pat
# Make sure we always have a subcommand to exectute. This is what
# Vim does too.
subcmd = subcmd or 'print'
rs = get_region_by_range(self.view, line_range=line_range, as_lines=True)
for r in rs:
try:
match = re.search(global_pattern, self.view.substr(r))
except Exception as e:
msg = "VintageousEx (global): %s ... in pattern '%s'" % (str(e), global_pattern)
sublime.status_message(msg)
print(msg)
return
if (match and not forced) or (not match and forced):
GLOBAL_RANGES.append(r)
# don't do anything if we didn't found any target ranges
if not GLOBAL_RANGES:
return
self.view.window().run_command('vi_colon_input',
{'cmd_line': ':' +
str(self.view.rowcol(r.a)[0] + 1) +
subcmd})
class ExPrint(sublime_plugin.TextCommand):
def run(self, edit, line_range=None, count='1', flags=''):
if not count.isdigit():
flags, count = count, ''
rs = get_region_by_range(self.view, line_range=line_range)
to_display = []
for r in rs:
for line in self.view.lines(r):
text = self.view.substr(line)
if '#' in flags:
row = self.view.rowcol(line.begin())[0] + 1
else:
row = ''
to_display.append((text, row))
v = self.view.window().new_file()
v.set_scratch(True)
if 'l' in flags:
v.settings().set('draw_white_space', 'all')
for t, r in to_display:
v.insert(edit, v.size(), (str(r) + ' ' + t + '\n').lstrip())
# TODO: General note for all :q variants:
# ST has a notion of hot_exit, whereby it preserves all buffers so that they
# can be restored next time you open ST. With this option on, all :q
# commands should probably execute silently even if there are unsaved buffers.
# Sticking to Vim's behavior closely here makes for a worse experience
# because typically you don't start ST as many times.
class ExQuitCommand(sublime_plugin.WindowCommand):
"""Ex command(s): :quit
Closes the window.
* Don't close the window if there are dirty buffers
TODO:
(Doesn't make too much sense if hot_exit is on, though.)
Although ST's window command 'exit' would take care of this, it
displays a modal dialog, so spare ourselves that.
"""
def run(self, forced=False, count=1, flags=''):
v = self.window.active_view()
if forced:
v.set_scratch(True)
if v.is_dirty():
sublime.status_message("There are unsaved changes!")
return
self.window.run_command('close')
if len(self.window.views()) == 0:
self.window.run_command('close')
class ExQuitAllCommand(sublime_plugin.WindowCommand):
"""Ex command(s): :qall
Close all windows and then exit Sublime Text.
If there are dirty buffers, exit only if :qall!.
"""
def run(self, forced=False):
if forced:
for v in self.window.views():
if v.is_dirty():
v.set_scratch(True)
elif is_any_buffer_dirty(self.window):
sublime.status_message("There are unsaved changes!")
return
self.window.run_command('close_all')
self.window.run_command('exit')
class ExWriteAndQuitCommand(sublime_plugin.TextCommand):
"""Ex command(s): :wq
Write and then close the active buffer.
"""
def run(self, edit, line_range=None, forced=False):
# TODO: implement this
if forced:
ex_error.handle_not_implemented()
return
if self.view.is_read_only():
sublime.status_message("Can't write a read-only buffer.")
return
if not self.view.file_name():
sublime.status_message("Can't save a file without name.")
return
self.view.run_command('save')
self.view.window().run_command('ex_quit')
class ExBrowse(sublime_plugin.TextCommand):
def run(self, edit):
self.view.window().run_command('prompt_open_file')
class ExEdit(sublime_plugin.TextCommand):
def run_(self, args):
self.run(args)
def run(self, forced=False):
# todo: restore active line_nr too
if forced or not self.view.is_dirty():
self.view.run_command('revert')
return
elif self.view.is_dirty():
ex_error.display_error(ex_error.ERR_UNSAVED_CHANGES)
return
ex_error.handle_not_implemented()
class ExCquit(sublime_plugin.TextCommand):
def run(self, edit):
self.view.window().run_command('exit')
class ExExit(sublime_plugin.TextCommand):
"""Ex command(s): :x[it], :exi[t]
Like :wq, but write only when changes have been made.
TODO: Support ranges, like :w.
"""
def run(self, edit, line_range=None):
w = self.view.window()
if w.active_view().is_dirty():
w.run_command('save')
w.run_command('close')
if len(w.views()) == 0:
w.run_command('close')
class ExListRegisters(sublime_plugin.TextCommand):
"""Lists registers in quick panel and saves selected to `"` register."""
def run(self, edit):
if not g_registers:
sublime.status_message('VintageousEx: no registers.')
self.view.window().show_quick_panel(
['"{0} {1}'.format(k, v) for k, v in g_registers.items()],
self.on_done)
def on_done(self, idx):
"""Save selected value to `"` register."""
if idx == -1:
return
g_registers['"'] = g_registers.values()[idx]
class ExNew(sublime_plugin.TextCommand):
"""Ex command(s): :new
Create a new buffer.
TODO: Create new buffer by splitting the screen.
"""
def run(self, edit, line_range=None):
self.view.window().run_command('new_file')
class ExYank(sublime_plugin.TextCommand):
"""Ex command(s): :y[ank]
"""
def run(self, edit, line_range, register=None, count=None):
if not register:
register = '"'
regs = get_region_by_range(self.view, line_range)
text = '\n'.join([self.view.substr(line) for line in regs])
g_registers[register] = text
if register == '"':
g_registers['0'] = text
class TabControlCommand(sublime_plugin.WindowCommand):
def run(self, command, file_name=None, forced=False):
window = self.window
selfview = window.active_view()
max_index = len(window.views())
(group, index) = window.get_view_index(selfview)
if (command == "open"):
if file_name is None: # TODO: file completion
window.run_command("show_overlay", {"overlay": "goto", "show_files": True, })
else:
cur_dir = os.path.dirname(selfview.file_name())
window.open_file(os.path.join(cur_dir, file_name))
elif command == "next":
window.run_command("select_by_index", {"index": (index + 1) % max_index}, )
elif command == "prev":
window.run_command("select_by_index", {"index": (index + max_index - 1) % max_index, })
elif command == "last":
window.run_command("select_by_index", {"index": max_index - 1, })
elif command == "first":
window.run_command("select_by_index", {"index": 0, })
elif command == "only":
for view in window.views_in_group(group):
if view.id() != selfview.id():
window.focus_view(view)
window.run_command("ex_quit", {"forced": forced})
window.focus_view(selfview)
else:
sublime.status_message("Unknown TabControl Command")
class ExTabOpenCommand(sublime_plugin.WindowCommand):
def run(self, file_name=None):
self.window.run_command("tab_control", {"command": "open", "file_name": file_name}, )
class ExTabNextCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.run_command("tab_control", {"command": "next"}, )
class ExTabPrevCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.run_command("tab_control", {"command": "prev"}, )
class ExTabLastCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.run_command("tab_control", {"command": "last"}, )
class ExTabFirstCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.run_command("tab_control", {"command": "first"}, )
class ExTabOnlyCommand(sublime_plugin.WindowCommand):
def run(self, forced=False):
self.window.run_command("tab_control", {"command": "only", "forced": forced, }, )
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import since
from pyspark.rdd import ignore_unicode_prefix, PythonEvalType
from pyspark.sql.column import Column, _to_seq, _to_java_column, _create_column_from_literal
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.udf import UserDefinedFunction
from pyspark.sql.types import *
__all__ = ["GroupedData"]
def dfapi(f):
def _api(self):
name = f.__name__
jdf = getattr(self._jgd, name)()
return DataFrame(jdf, self.sql_ctx)
_api.__name__ = f.__name__
_api.__doc__ = f.__doc__
return _api
def df_varargs_api(f):
def _api(self, *cols):
name = f.__name__
jdf = getattr(self._jgd, name)(_to_seq(self.sql_ctx._sc, cols))
return DataFrame(jdf, self.sql_ctx)
_api.__name__ = f.__name__
_api.__doc__ = f.__doc__
return _api
class GroupedData(object):
"""
A set of methods for aggregations on a :class:`DataFrame`,
created by :func:`DataFrame.groupBy`.
.. note:: Experimental
.. versionadded:: 1.3
"""
def __init__(self, jgd, df):
self._jgd = jgd
self._df = df
self.sql_ctx = df.sql_ctx
@ignore_unicode_prefix
@since(1.3)
def agg(self, *exprs):
"""Compute aggregates and returns the result as a :class:`DataFrame`.
The available aggregate functions are `avg`, `max`, `min`, `sum`, `count`.
If ``exprs`` is a single :class:`dict` mapping from string to string, then the key
is the column to perform aggregation on, and the value is the aggregate function.
Alternatively, ``exprs`` can also be a list of aggregate :class:`Column` expressions.
:param exprs: a dict mapping from column name (string) to aggregate functions (string),
or a list of :class:`Column`.
>>> gdf = df.groupBy(df.name)
>>> sorted(gdf.agg({"*": "count"}).collect())
[Row(name=u'Alice', count(1)=1), Row(name=u'Bob', count(1)=1)]
>>> from pyspark.sql import functions as F
>>> sorted(gdf.agg(F.min(df.age)).collect())
[Row(name=u'Alice', min(age)=2), Row(name=u'Bob', min(age)=5)]
"""
assert exprs, "exprs should not be empty"
if len(exprs) == 1 and isinstance(exprs[0], dict):
jdf = self._jgd.agg(exprs[0])
else:
# Columns
assert all(isinstance(c, Column) for c in exprs), "all exprs should be Column"
jdf = self._jgd.agg(exprs[0]._jc,
_to_seq(self.sql_ctx._sc, [c._jc for c in exprs[1:]]))
return DataFrame(jdf, self.sql_ctx)
@dfapi
@since(1.3)
def count(self):
"""Counts the number of records for each group.
>>> sorted(df.groupBy(df.age).count().collect())
[Row(age=2, count=1), Row(age=5, count=1)]
"""
@df_varargs_api
@since(1.3)
def mean(self, *cols):
"""Computes average values for each numeric columns for each group.
:func:`mean` is an alias for :func:`avg`.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().mean('age').collect()
[Row(avg(age)=3.5)]
>>> df3.groupBy().mean('age', 'height').collect()
[Row(avg(age)=3.5, avg(height)=82.5)]
"""
@df_varargs_api
@since(1.3)
def avg(self, *cols):
"""Computes average values for each numeric columns for each group.
:func:`mean` is an alias for :func:`avg`.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().avg('age').collect()
[Row(avg(age)=3.5)]
>>> df3.groupBy().avg('age', 'height').collect()
[Row(avg(age)=3.5, avg(height)=82.5)]
"""
@df_varargs_api
@since(1.3)
def max(self, *cols):
"""Computes the max value for each numeric columns for each group.
>>> df.groupBy().max('age').collect()
[Row(max(age)=5)]
>>> df3.groupBy().max('age', 'height').collect()
[Row(max(age)=5, max(height)=85)]
"""
@df_varargs_api
@since(1.3)
def min(self, *cols):
"""Computes the min value for each numeric column for each group.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().min('age').collect()
[Row(min(age)=2)]
>>> df3.groupBy().min('age', 'height').collect()
[Row(min(age)=2, min(height)=80)]
"""
@df_varargs_api
@since(1.3)
def sum(self, *cols):
"""Compute the sum for each numeric columns for each group.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().sum('age').collect()
[Row(sum(age)=7)]
>>> df3.groupBy().sum('age', 'height').collect()
[Row(sum(age)=7, sum(height)=165)]
"""
@since(1.6)
def pivot(self, pivot_col, values=None):
"""
Pivots a column of the current :class:`DataFrame` and perform the specified aggregation.
There are two versions of pivot function: one that requires the caller to specify the list
of distinct values to pivot on, and one that does not. The latter is more concise but less
efficient, because Spark needs to first compute the list of distinct values internally.
:param pivot_col: Name of the column to pivot.
:param values: List of values that will be translated to columns in the output DataFrame.
# Compute the sum of earnings for each year by course with each course as a separate column
>>> df4.groupBy("year").pivot("course", ["dotNET", "Java"]).sum("earnings").collect()
[Row(year=2012, dotNET=15000, Java=20000), Row(year=2013, dotNET=48000, Java=30000)]
# Or without specifying column values (less efficient)
>>> df4.groupBy("year").pivot("course").sum("earnings").collect()
[Row(year=2012, Java=20000, dotNET=15000), Row(year=2013, Java=30000, dotNET=48000)]
"""
if values is None:
jgd = self._jgd.pivot(pivot_col)
else:
jgd = self._jgd.pivot(pivot_col, values)
return GroupedData(jgd, self._df)
@since(2.3)
def apply(self, udf):
"""
Maps each group of the current :class:`DataFrame` using a pandas udf and returns the result
as a `DataFrame`.
The user-defined function should take a `pandas.DataFrame` and return another
`pandas.DataFrame`. For each group, all columns are passed together as a `pandas.DataFrame`
to the user-function and the returned `pandas.DataFrame`s are combined as a
:class:`DataFrame`.
The returned `pandas.DataFrame` can be of arbitrary length and its schema must match the
returnType of the pandas udf.
This function does not support partial aggregation, and requires shuffling all the data in
the :class:`DataFrame`.
:param udf: A function object returned by :meth:`pyspark.sql.functions.pandas_udf`
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("id long, v double", PandasUDFType.GROUP_MAP) # doctest: +SKIP
... def normalize(pdf):
... v = pdf.v
... return pdf.assign(v=(v - v.mean()) / v.std())
>>> df.groupby("id").apply(normalize).show() # doctest: +SKIP
+---+-------------------+
| id| v|
+---+-------------------+
| 1|-0.7071067811865475|
| 1| 0.7071067811865475|
| 2|-0.8320502943378437|
| 2|-0.2773500981126146|
| 2| 1.1094003924504583|
+---+-------------------+
.. seealso:: :meth:`pyspark.sql.functions.pandas_udf`
"""
# Columns are special because hasattr always return True
if isinstance(udf, Column) or not hasattr(udf, 'func') \
or udf.evalType != PythonEvalType.SQL_PANDAS_GROUP_MAP_UDF:
raise ValueError("Invalid udf: the udf argument must be a pandas_udf of type "
"GROUP_MAP.")
df = self._df
udf_column = udf(*[df[col] for col in df.columns])
jdf = self._jgd.flatMapGroupsInPandas(udf_column._jc.expr())
return DataFrame(jdf, self.sql_ctx)
def _test():
import doctest
from pyspark.sql import Row, SparkSession
import pyspark.sql.group
globs = pyspark.sql.group.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.group tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')]) \
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df3'] = sc.parallelize([Row(name='Alice', age=2, height=80),
Row(name='Bob', age=5, height=85)]).toDF()
globs['df4'] = sc.parallelize([Row(course="dotNET", year=2012, earnings=10000),
Row(course="Java", year=2012, earnings=20000),
Row(course="dotNET", year=2012, earnings=5000),
Row(course="dotNET", year=2013, earnings=48000),
Row(course="Java", year=2013, earnings=30000)]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.group, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
spark.stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| |
#!/usr/bin/env python
# Custom module
import lightCommon as lc
# For communicating with GPIO on Raspberry Pi
import RPi.GPIO as gpio
import socket
import sys,signal,os,time
import struct
from threading import Thread
# Global
lights = []
def handler(signum, frame):
print 'Signal handler called with signal', signum
s.close()
def findRelayActive(pin):
# All we get here is a pin. This function is called by turnOn or turnOff
# No error handling yet.
relays = lc.getNodeProps()['relays']
for relay in relays:
if relay['relay_pin'] == pin:
return relay['relay_active']
def turnOn(num):
'''
This function turns on a locally-connected light based
on the light number. This SHOULD BE the only place where
lc.on/off gets mapped to a T/F write to GPIO
'''
# First look up the relay_active property
relay_on = findRelayActive(lights[num][lc.l_pin])
try:
gpio.output(lights[num][lc.l_pin],relay_on)
lights[num][lc.l_stat] = lc.on
except KeyError as e:
print 'KeyError in turnOn: ' + str(e)
def turnOff(num):
'''
This function turns off a locally-connected light based
on the light number. This SHOULD BE the only place where
lc.on/off gets mapped to a T/F write to GPIO
'''
# First look up the relay_active property
relay_off = not findRelayActive(lights[num][lc.l_pin])
try:
gpio.output(lights[num][lc.l_pin],relay_off)
lights[num][lc.l_stat] = lc.off
except KeyError as e:
print 'KeyError in turnOff: ' + str(e)
def setLight(num, status, tif=0):
'''
This either calls turnOn or turnOff based on the status requested
It also sends messages to all linked lights
'''
# tif is time in future. tif is how long this function will wait before
# it actuates the light. This function should probably be threaded if
# tif > 0
if tif > 0:
time.sleep(tif)
if status == lc.on:
turnOn(num)
else:
turnOff(num)
for link in lights[num][lc.l_links]:
lc.sendSetMsg(link[lc.link_node],link[lc.link_num],status,0)
def getStatus(num):
'''
Returns a light number's name and status (on, off)
'''
try:
status = lights[num][lc.l_stat]
name = lights[num][lc.l_name]
return status, name
except IndexError as e:
print 'IndexError in getStatus: ' + str(e)
def serverLoop(s):
# Never end... This is a server after all
while s:
# Accept a connection from any client
conn, addr = s.accept()
print str(time.time()) + ' Connection accepted from: ', addr
# Clients to the server immediately send a message
recv_msg = conn.recv(struct.calcsize(lc.packString))
try:
# Unpack the message
req_type, light_num, light_status, time_in_future = struct.unpack(lc.packString,recv_msg)
# For debugging
print('req_type: %d light_num: %d light_status: %d time_in_future = %d' % (req_type, light_num, light_status, time_in_future) )
# Query that state of one light
if ( req_type == lc.msg_info ):
print 'msg_info'
light_status, light_name = getStatus(light_num)
print light_num,light_status,light_name
send_msg = struct.pack(lc.queryPackString,req_type,light_num,light_status,light_name)
conn.send(send_msg)
# Set the state of one light
elif ( req_type == lc.msg_set ):
print 'msg_set'
print light_num, light_status, time_in_future
Thread(target=setLight,args=(light_num, light_status, time_in_future,)).start()
# Query all the lights available
elif ( req_type == lc.msg_dump ):
print 'msg_dump'
for light in lights:
light_num = lights.index(light)
light_status, light_name = getStatus(light_num)
print req_type,light_num, light_status, light_name
send_msg = struct.pack(lc.queryPackString,req_type,light_num,light_status,light_name)
conn.send(send_msg)
else:
print('Incorrect request type: %d' % (req_type) )
# If a light is requested that doesn't exist
except KeyError:
print "Error: Light number", light_num, "does not exist."
# If an error happens in packing / unpacking
except struct.error as e:
print "Error:",e
# Only one message processed at a time. Dump the client
conn.send(struct.pack(lc.queryPackString,lc.msg_done,0,0,''))
conn.close()
if __name__ == '__main__':
# Set the signal handler
signal.signal(signal.SIGINT, handler)
print('Process ID is: ' + str(os.getpid()))
nodeInfo = lc.getNodeProps()
try:
nodeName = nodeInfo['node']
except KeyError:
print('This node not found')
# Set up GPIO on the raspberry pi
gpio.setmode(gpio.BCM)
# A dictionary containing info about every light connected to the RPi
# In the form of: 'lightNum':[pinNum,onOrOffBool,name]
try:
lights = lc.lightList[nodeName]
except KeyError:
print('No lights found for node ' + myIp)
# Set up every light in the dictionary
for light in lights:
initial_state = findRelayActive(light[lc.l_pin])
if light[lc.l_stat] == lc.off:
initial_state = not initial_state
gpio.setup(light[lc.l_pin],gpio.OUT,initial=initial_state)
stat_msg = 'LightNum: ' + str(light) + ' Pin: '
stat_msg = stat_msg + str(light[lc.l_pin]) + ' State: '
stat_msg = stat_msg + str(light[lc.l_stat]) + ' Name: '
stat_msg = stat_msg + str(light[lc.l_name])
print(stat_msg)
print 'GPIO set up'
try:
# Socket listening on any interface, socket port set in lightCommon
listen_ip = '0.0.0.0'
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((listen_ip, lc.socketPort))
s.listen(5)
serverLoop(s)
except socket.error as e:
print 'Socket error: ',e
gpio.cleanup()
s.close()
print('Exiting server')
| |
import os
import sys
import shlex
import inspect
import traceback
from cmd import Cmd
from functools import partial
from qshell.exceptions import CommandNotFound, log_err
class _Cmd(Cmd):
"""
Subclass of Cmd.
Passes 'do_*' methods to the Context instance for execution.
Overrides some Cmd methods to support the Context idea.
"""
def __init__(self, context, *args, **kwargs):
Cmd.__init__(self, *args, **kwargs)
self._context = context
def __getattr__(self, name):
"""Pass 'do_' commands to the Context instance"""
if name.startswith('do_'):
try:
return self._context.get_command_wrapper(name[3:].lower())
except CommandNotFound:
log_err('Unknown command: %s' % name[3:])
return getattr(Cmd, name)
def completenames(self, text, *ignored):
"""Override for autocompleting from the context"""
text = text.lower()
return filter(lambda u: u.startswith(text), self._context.get_names())
def do_help(self, arg):
"""Display a command's help text"""
if arg:
try:
cmd = self._context.get_command(arg.lower())
except CommandNotFound:
log_err('Unknown command: %s' % arg)
else:
sys.stdout.write('(help) %r\n' % cmd)
else:
sys.stdout.write(
"(help) Available commands (type 'help <command>'):\n")
sys.stdout.write(' '.join(sorted(self._context.get_names())))
sys.stdout.write('\n')
def default(self, line):
"""Do nothing"""
pass
def emptyline(self):
"""Do nothing"""
pass
class Command(object):
"""Represents a single command"""
def __init__(self, name, func):
self.name = name
self.func = func
self.help = inspect.getdoc(func) or ''
self.arg_string = self._inspect_func(func)
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def __str__(self):
return self.name
def __repr__(self):
s = 'Syntax: %s %s \n%s' % (self.name, self.arg_string, self.help)
return s.strip()
def _inspect_func(self, func):
"""Prepare a visualization of the command arguments"""
ins = inspect.getargspec(func)
line = []
if ins.keywords:
line.append('[**%s]' % ins.keywords)
if ins.varargs:
line.append('[*%s]' % ins.varargs)
for default in reversed(ins.defaults or ()):
arg = ins.args.pop()
line.append('[%s=%s]' % (arg, default))
line.extend(reversed(ins.args))
return ' '.join(reversed(line))
class Context(object):
"""
The main context. A regsitry of commands.
Serves as a proxy between the Cmd class to the actual commands (functions).
"""
def __init__(self, prompt='>>> '):
self._registry = {}
self.cmd = _Cmd(self)
def register(self, name, func):
name = name.lower().rstrip('_')
self._registry[name] = Command(name, func)
def start_loop(self, intro=''):
try:
self.cmd.cmdloop(intro)
except KeyboardInterrupt:
sys.stdout.write('\nBye\n')
def get_names(self):
return self._registry.keys()
def get_command(self, name):
"""Returns the Command instance representing 'name' command"""
try:
return self._registry[name]
except KeyError:
raise CommandNotFound(name)
def get_command_wrapper(self, name):
"""
Returns a execute_command() 'partial' callable, referring the
command to be executed as received in `name`.
If not found, will raise CommandNotFound.
"""
self.get_command(name)
return partial(self.execute_command, name)
def execute_command(self, name, line):
"""
Called by the _Cmd class with the following arguments:
`name` - name of the command to execute.
`line` - a single string of args received from the Cmd super class.
"""
cmd = self._registry[name]
args, kwargs = self._parse_args(line)
try:
try:
result = cmd(*args, **kwargs)
sys.stdout.write('%s\n' % (str(result) or '(ok)'))
except TypeError as e:
# In case of TypeError, we need to check if it was due to
# bad call to the command function, or in the function itself.
exc_info = sys.exc_info()
if (os.path.dirname(__file__) ==
os.path.dirname(
traceback.extract_tb(exc_info[2])[-1][0])):
# Bad arguments in the call to the command function
s = str(e).split('()', 1)
s = s[len(s)-1].strip()
log_err("Bad arguments: '%s' %s" % (name, s))
else:
# TypeError raised inside the command function, so re-raise
# This will be catched by the below 'except' block
raise exc_info[0], exc_info[1], exc_info[2]
except Exception as e:
# Command execution throws an exception
log_err("Exception in '%s':\n%s" % (name, traceback.format_exc()))
def _parse_args(self, line):
"""
Parses the arguments from a string 'line'.
Turns args like "name=john" into {'name': 'john'}.
Also, identify ints/floats/booleans and casts as needed.
"""
args = []
kwargs = {}
arg_list = shlex.split(line)
while arg_list:
arg = arg_list.pop(0)
if '=' in arg:
k, v = arg.split('=', 1)
kwargs[k] = self._cast(v)
else:
args.append(self._cast(arg))
return args, kwargs
def _cast(self, v):
# Cast boolean
if v in ('True', 'true'):
return True
if v in ('False', 'false'):
return False
# Cast int/float
try:
fv = float(v)
except ValueError:
return v
if not '.' in v:
fv = int(fv)
return fv
@property
def prompt(self):
return self.cmd.prompt
@prompt.setter
def prompt(self, value):
self.cmd.prompt = value
# A global registry context
ctx = Context()
| |
#!/usr/bin/env python
"""system: syscalls for system managment
This module includes a number of helpful functions for managing and maintaing
a system. These were orignially created to support a Linux contaienrs solution
written in python but may be genrally useful and have been broken off and
intergrarated into butter in this module
* mount: Mount filesystems using the `man 2 mount` syscall (simmilar to
/sbin/mount)
* umount: Unmount filesystems in the system
* pivot_root: Exchange the filesystem at 'new' with '/' and mount the old
filesystem at 'old'
* sethostname: Set the hostname of the system
* gethostname: Retrvie the current system hostname (identical to
:py:func:`socket.gethostname`)
* getpid: Call the syscall `getpid` directly, bypassing glibc and any caching
it performs
* getppid: Call the syscall `getppid` directly, bypassing glibc and any caching
it performs
"""
from __future__ import print_function
from .utils import PermissionError, InternalError, UnknownError
from os.path import isdir as _isdir
from cffi import FFI as _FFI
import errno as _errno
_ffi = _FFI()
_ffi.cdef("""
# define MS_BIND ...
# define MS_DIRSYNC ...
# define MS_MANDLOCK ...
# define MS_MOVE ...
# define MS_NOATIME ...
# define MS_NODEV ...
# define MS_NODIRATIME ...
# define MS_NOEXEC ...
# define MS_NOSUID ...
# define MS_RDONLY ...
# define MS_RELATIME ...
# define MS_REMOUNT ...
# define MS_SILENT ...
# define MS_STRICTATIME ...
# define MS_SYNCHRONOUS ...
# define MNT_FORCE ...
# define MNT_DETACH ...
# define MNT_EXPIRE ...
# define UMOUNT_NOFOLLOW ...
# define HOST_NAME_MAX ...
int mount(const char *source, const char *target,
const char *filesystemtype, unsigned long mountflags,
const void *data);
int umount2(const char *target, int flags);
extern int pivot_root(const char * new_root, const char * put_old);
int gethostname(char *name, size_t len);
int sethostname(const char *name, size_t len);
// Muck with the types so cffi understands it
// normmaly pid_t (defined as int32_t in
// /usr/include/arm-linux-gnueabihf/bits/typesizes.h
int32_t getpid(void);
int32_t getppid(void);
""")
_C = _ffi.verify("""
//#include <sched.h>
#include <sys/mount.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/syscall.h>
#include <sys/mount.h>
int32_t getpid(void){
return syscall(SYS_getpid);
};
int32_t getppid(void){
return syscall(SYS_getppid);
};
""", libraries=[], ext_package="butter")
MS_BIND = _C.MS_BIND
MS_DIRSYNC = _C.MS_DIRSYNC
MS_MANDLOCK = _C.MS_MANDLOCK
MS_MOVE = _C.MS_MOVE
MS_NOATIME = _C.MS_NOATIME
MS_NODEV = _C.MS_NODEV
MS_NODIRATIME = _C.MS_NODIRATIME
MS_NOEXEC = _C.MS_NOEXEC
MS_NOSUID = _C.MS_NOSUID
MS_RDONLY = _C.MS_RDONLY
MS_RELATIME = _C.MS_RELATIME
MS_REMOUNT = _C.MS_REMOUNT
MS_SILENT = _C.MS_SILENT
MS_STRICTATIME = _C.MS_STRICTATIME
MS_SYNCHRONOUS = _C.MS_SYNCHRONOUS
HOST_NAME_MAX = _C.HOST_NAME_MAX
# seems reasonable
MAXPATHLEN = 256
getpid = _C.getpid
getppid = _C.getppid
class Retry(Exception):
"""Filesystem now marked as expired"""
def mount(src, target, fs, flags=0, data=""):
"""Mount the specified filesystem at `target`
Arguments
----------
:param str src: Filesystem dependent string specifing the source of the mount
eg for nfs this would be <ip>:/remote/path or a block device
dev for a normal filesystem
:param str target: The path to mount the filesystem on
:param str fs: The type of filesystem to mount on `target`
:param int flags: Extra conditions on the mount (see flags below)
:param str data: Additinal data to pass to the filesystem driver
Flags
------
:py:const:`MS_BIND`: Make mount a bind mount, `fs` is ignored with this option
:py:const:`MS_DIRSYNC`: Perform all directory operations synchronously
:py:const:`MS_MANDLOCK`: Enable Mandatory locking for the filesystem
:py:const:`MS_MOVE`: Move a mountpoint to a new location atomically without unmounting
:py:const:`MS_NOATIME`: Dont update atime on file access
:py:const:`MS_NODEV`: Prevent device nodes from ebing created on the filesystem
:py:const:`MS_NODIRATIME`: Dont update Directory atime on file access
:py:const:`MS_NOEXEC`: Prevent files from being exected on the filesystem via `exec()`
:py:const:`MS_NOSUID`: Disable SUID flag on files in this filesystem
:py:const:`MS_RDONLY`: Mount filesystem Read Only
:py:const:`MS_RELATIME`: only update atime if ctime or mtime have been updated
:py:const:`MS_REMOUNT`: Remount the filesystem in place
:py:const:`MS_SILENT`: Disable printing messages to dmesg for the mount
:py:const:`MS_STRICTATIME`: Always update atime on file access
:py:const:`MS_SYNCHRONOUS`: Mount the filesystem in synchronous mode (same as passing
O_SYNC to :py:func:`os.open()`
Returns
--------
No return value
Exceptions
-----------
:raises ValueError: Attempt to mount a Read only filesystem without specifing MS_RDONLY as a flag
:raises ValueError: `src` or `target` contain a component that does not exist or was not searchable
:raises ValueError: `src` is already mounted
:raises ValueError: Filesystem cannot be mounted read only as it still holds files open for writing
:raises ValueError: Target is busy (it is the working directory of some thread, the mount point of another device, has open files, etc.)
:raises OSError: `src` has an invalid superblock
:raises OSError: MS_REMOUNT was attempted but `src` is not mounted on `target`
:raises OSError: MS_MOVE attempted but `src` is not a mount point or is '/'
:raises OSError: Filesystem not available in the kernel
:raises ValueError: Too many links encountered during pathname resolution
:raises ValueError: MS_MOVE attempted while `target` is a descendent of `src`
:raises ValueError: `src` or `target` longer than MAXPATHLEN
:raises ValueError: `src` or `target` contains an empty or non existent component
:raises ValueError: `src` is nto a valid block device and a block device is required by this filesystem
:raises ValueError: `target` or prefix of `src` is nto a directory
:raises OSError: The major number of `src` is out of the range for valid block devices
:raises MemoryError: Kernel could not allocate enough memory to handle the request
:raises PermissionError: No permission to mount filesystem
"""
assert 0 < len(src) < MAXPATHLEN, "src is too long in length"
assert 0 < len(target) < MAXPATHLEN, "target is too long in length"
assert isinstance(src, (str, bytes)), "src must be a string"
assert isinstance(target, (str, bytes)), "target must be a string"
assert isinstance(fs, (str, bytes)), "fs must be a string"
assert isinstance(flags, int ), "flags must be a integer"
assert isinstance(data, (str, bytes)), "data must be a string"
if isinstance(src, str):
src = src.encode()
if isinstance(target, str):
target = target.encode()
if isinstance(fs, str):
fs = fs.encode()
if isinstance(data, str):
data = data.encode()
err = _C.mount(src, target, fs, flags, data)
if err < 0:
err = _ffi.errno
if err == _errno.EACCES:
raise PermissionError("A component of a path was not searchable. (See also path_resolution(7).) Or, mounting a read-only filesystem was attempted without giving the MS_RDONLYflag. Or, the block device source is located on a filesystem mounted with the MS_NODEV option")
elif err == _errno.EBUSY:
raise ValueError("source is already mounted. Or, it cannot be remounted read-only, because it still holds files open for writing. Or, it cannot be mounted on target because target is still busy (it is the working directory of some thread, the mount point of another device, has open files, etc.)")
elif err == _errno.EFAULT:
# In practice this should not be raised as it means this lib has passed in invalid
# data, this is a bug so report it if you can
raise ValueError("One of the pointer arguments points outside the user address space")
elif err == _errno.EINVAL:
raise ValueError("source had an invalid superblock. Or, a remount (MS_REMOUNT) was attempted, but source was not already mounted on target. Or, a move (MS_MOVE) was attempted, but source was not a mount point, or was '/'")
elif err == _errno.ELOOP:
raise ValueError("Too many links encountered during pathname resolution. Or, a move was attempted, while target is a descendant of source")
elif err == _errno.EMFILE:
raise OSError("Table of dummy devices is full")
elif err == _errno.ENAMETOOLONG:
# This is checked in the assert above but check for it and report it corectly anyway
raise ValueError("A pathname was longer than MAXPATHLEN ({})".format(MAXPATHLEN))
elif err == _errno.ENODEV:
raise OSError("filesystemtype not configured in the kernel")
elif err == _errno.ENOENT:
raise ValueError("A pathname was empty or had a nonexistent component")
elif err == _errno.ENOMEM:
raise MemoryError("The kernel could not allocate a free page to copy filenames or data into")
elif err == _errno.ENOTBLK:
raise ValueError("source is not a block device and a device was required")
elif err == _errno.ENOTDIR:
raise ValueError("target, or a prefix of source, is not a directory")
elif err == _errno.ENXIO:
raise OSError("The major number of the block device source is out of range")
elif err == _errno.EPERM:
raise PermissionError("Permission denied, CAP_SYS_ADMIN not in capability bits")
else:
# If you are here, its a bug. send us the traceback
raise UnknownError(err)
def umount(target, flags=0):
"""Unmount the specified filesystem
Arguments
----------
:param str target: The path to the filesystem to unmount
:param int flags: Extra options to use to unmount the filesystem
Flags
------
:py:const:`MNT_FORCE`: Forcibly detach the filesystem, even if busy (NFS only)
:py:const:`MNT_DETACH`: Lazily detach the filesystem (filesystem will be detached
when there are no more consumers of the filesystem). This
will cause the mount to appear unmounted to processes that
are not using the detached mount point
:py:const:`MNT_EXPIRE`: Mark the mountpoint as expired and trigger an EAGAIN. any
access by a program will mark the filesystem as active
again. if a filesystem is marked as expired, then another
umount call will unmount the filesystem normmaly
:py:const:`UMOUNT_NOFOLLOW`: Do not derefrence any symlinks when unmounting the
filesystem
Returns
--------
No return value
Exceptions
-----------
:raises Retry: Filesystem now marked as expired, call again to unmount
:raises ValueError: Could not unount filesystema s it is currently in use
:raises OSError: Target is not a mount point
:raises OSError: umount called with MNT_EXPIRE and ethier MNT_DETACH or MNT_FORCE
:raises ValueError: Supplied path is too long
:raises ValueError: Supplied path has an empty or non-existent component
:raises MemoryError: Kernel could not allocate enough memory to handle the request
:raises PermissionError: No permission to pivot_root to new location
"""
assert 0 < len(target) < MAXPATHLEN, "target is too long in length"
assert isinstance(target, (str, bytes)), "target must be a string"
assert isinstance(flags, int), "flags must be a integer"
if isinstance(target, str):
target = target.encode()
err = _C.umount2(target, flags)
if err < 0:
err = _ffi.errno
if err == _errno.EAGAIN:
raise Retry("Filesystem marked as expired, call again to unmount filesystem")
elif err == _errno.EBUSY:
raise ValueError("target could not be unmounted because it is busy")
elif err == _errno.EFAULT:
raise ValueError("target points outside the user address space")
elif err == _errno.EINVAL:
raise ValueError("target is not a mount point. Or, umount2() was called with MNT_EXPIRE and either MNT_DETACH or MNT_FORCE")
elif err == _errno.ENAMETOOLONG:
raise ValueError("A pathname was longer than MAXPATHLEN")
elif err == _errno.ENOENT:
raise ValueError("A pathname was empty or had a nonexistent component")
elif err == _errno.ENOMEM:
raise MemoryError("The kernel could not allocate a free page to copy filenames or data into")
elif err == _errno.EPERM:
raise PermissionError("Permission denied, CAP_SYS_ADMIN not in capability bits")
else:
# If you are here, its a bug. send us the traceback
raise UnknownError(err)
def pivot_root(new, old):
"""Move the filesystem specfied by `new` and mount it at '/' and move the old '/' to `old`
Arguments
----------
:param str new: Path to a mounted filesystem to make the new '/'
:param str old: Location where current '/' should be mounted
Returns
--------
No return value
Exceptions
-----------
:raises ValueError: `new` or `old` does not refer to a directory
:raises ValueError: `new` or `old` are on the current root filesystem or filesystem already mounted on `old`
:raises ValueError: `old` is not a folder underneath `new`
:raises PermissionError: No permission to pivot_root to new location
"""
assert len(new) > 0
assert len(old) > 0
assert isinstance(new, (str, bytes)), "new must be a string"
assert isinstance(old, (str, bytes)), "old must be a string"
if isinstance(new, str):
new = new.encode()
if isinstance(old, str):
old = old.encode()
err = _C.pivot_root(new, old)
if err < 0:
err = _ffi.errno
if err == _errno.EINVAL:
raise ValueError("{} is not a sub-directory of {}".format(old, new))
elif err == _errno.EBUSY:
raise ValueError("old or new are on the current root filesystem or filesystem already mounted on {}".format(old))
elif err == _errno.ENOTDIR:
if _isdir(new):
raise ValueError("{} is not a Directory".format(new))
elif _isdir(old):
raise ValueError("{} is not a Directory".format(old))
else:
# this is a bug but testing for this case just in case, let us know if you
# hit it
raise ValueError("old or new is not a dir but could not work out which one")
elif err == _errno.EPERM:
raise PermissionError("Permission denied, CAP_SYS_ADMIN not in capability bits")
else:
# If you are here, its a bug. send us the traceback
raise UnknownError(err)
def sethostname(hostname):
"""Set the hostname for they system
Arguments
----------
:param str hostname: The hostname to set
Returns
--------
No return value
Exceptions
-----------
:raises ValueError: Hostname too long
:raises PermissionError: No permission to set hostname
"""
assert len(hostname) < HOST_NAME_MAX, "Specified hostname too long"
assert isinstance(hostname, (str, bytes)), "Hostname must be a string"
if isinstance(hostname, str):
hostname = hostname.encode()
err = _C.sethostname(hostname, len(hostname))
if err < 0:
err = _ffi.errno
if err == _errno.EFAULT:
# in practice this should never be raised as it means this function is broken
raise ValueError("Name is an invalid address")
elif err == _errno.EINVAL:
# same as above, we check values and supply the right ones but just in case we
# handle the error case
raise ValueError("length is negative or hostname is longer than allowed value")
elif err == _errno.EPERM:
raise PermissionError("Permission denied, CAP_SYS_ADMIN not in capability bits")
else:
# If you are here, its a bug. send us the traceback
raise UnknownError(err)
def gethostname():
"""Retrive the specified hostname of the system
Returns
--------
:return: The hostname of the system
:rtype: str
"""
hostname = _ffi.new('char[]', HOST_NAME_MAX)
err = _C.gethostname(hostname, len(hostname))
if err < 0:
err = _ffi.errno
if err == _errno.EFAULT:
# in practice this should never be raised as it means this function is broken
raise ValueError("Name is an invalid address")
elif err == _errno.EINVAL:
# same as above, we check values and supply the right ones but just in case we
# handle the error case
raise ValueError("length is negative or hostname is longer than allowed value")
elif err == _errno.ENAMETOOLONG:
# great, for some reason we did not allocate a long enough buffer
# this is internal and is a bug in out code if reached
# we allocated HOST_NAME_MAX for the length above so this should
# be impossible to hit, using InternalError rather than ValueError as the
# caller of this code did not provide an incorrect value but instead
# the platform/OS provided an invalid value
raise InternalError("Supplied buffer not long enough")
elif err == _errno.EPERM:
raise PermissionError("Permission denied, CAP_SYS_ADMIN not in capability bits")
else:
# If you are here, its a bug. send us the traceback
raise UnknownError(err)
hostname = _ffi.string(hostname, HOST_NAME_MAX)
return hostname
| |
from __future__ import unicode_literals
import base64
import json
import datetime
import mock
from django.test import TestCase, RequestFactory
from django.core.urlresolvers import reverse
from django.utils import timezone
from ..compat import urlparse, parse_qs, urlencode, get_user_model
from ..models import get_application_model, Grant, AccessToken
from ..settings import oauth2_settings
from ..oauth2_validators import OAuth2Validator
from ..views import ProtectedResourceView
from .test_utils import TestCaseUtils
Application = get_application_model()
UserModel = get_user_model()
# mocking a protected resource view
class ResourceView(ProtectedResourceView):
def get(self, request, *args, **kwargs):
return "This is a protected resource"
class BaseTest(TestCaseUtils, TestCase):
def setUp(self):
self.factory = RequestFactory()
self.test_user = UserModel.objects.create_user("test_user", "test@user.com", "123456")
self.dev_user = UserModel.objects.create_user("dev_user", "dev@user.com", "123456")
oauth2_settings.ALLOWED_REDIRECT_URI_SCHEMES = ['http', 'custom-scheme']
self.application = Application(
name="Test Application",
redirect_uris="http://localhost http://example.com http://example.it custom-scheme://example.com",
user=self.dev_user,
client_type=Application.CLIENT_CONFIDENTIAL,
authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE,
)
self.application.save()
oauth2_settings._SCOPES = ['read', 'write']
def tearDown(self):
self.application.delete()
self.test_user.delete()
self.dev_user.delete()
class TestAuthorizationCodeView(BaseTest):
def test_skip_authorization_completely(self):
"""
If application.skip_authorization = True, should skip the authorization page.
"""
self.client.login(username="test_user", password="123456")
self.application.skip_authorization = True
self.application.save()
query_string = urlencode({
'client_id': self.application.client_id,
'response_type': 'code',
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.it',
})
url = "{url}?{qs}".format(url=reverse('oauth2_provider:authorize'), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_pre_auth_invalid_client(self):
"""
Test error for an invalid client_id with response_type: code
"""
self.client.login(username="test_user", password="123456")
query_string = urlencode({
'client_id': 'fakeclientid',
'response_type': 'code',
})
url = "{url}?{qs}".format(url=reverse('oauth2_provider:authorize'), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_pre_auth_valid_client(self):
"""
Test response for a valid client_id with response_type: code
"""
self.client.login(username="test_user", password="123456")
query_string = urlencode({
'client_id': self.application.client_id,
'response_type': 'code',
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.it',
})
url = "{url}?{qs}".format(url=reverse('oauth2_provider:authorize'), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# check form is in context and form params are valid
self.assertIn("form", response.context)
form = response.context["form"]
self.assertEqual(form['redirect_uri'].value(), "http://example.it")
self.assertEqual(form['state'].value(), "random_state_string")
self.assertEqual(form['scope'].value(), "read write")
self.assertEqual(form['client_id'].value(), self.application.client_id)
def test_pre_auth_valid_client_custom_redirect_uri_scheme(self):
"""
Test response for a valid client_id with response_type: code
using a non-standard, but allowed, redirect_uri scheme.
"""
self.client.login(username="test_user", password="123456")
query_string = urlencode({
'client_id': self.application.client_id,
'response_type': 'code',
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'custom-scheme://example.com',
})
url = "{url}?{qs}".format(url=reverse('oauth2_provider:authorize'), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# check form is in context and form params are valid
self.assertIn("form", response.context)
form = response.context["form"]
self.assertEqual(form['redirect_uri'].value(), "custom-scheme://example.com")
self.assertEqual(form['state'].value(), "random_state_string")
self.assertEqual(form['scope'].value(), "read write")
self.assertEqual(form['client_id'].value(), self.application.client_id)
def test_pre_auth_approval_prompt(self):
"""
"""
tok = AccessToken.objects.create(user=self.test_user, token='1234567890',
application=self.application,
expires=timezone.now()+datetime.timedelta(days=1),
scope='read write')
self.client.login(username="test_user", password="123456")
query_string = urlencode({
'client_id': self.application.client_id,
'response_type': 'code',
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.it',
'approval_prompt': 'auto',
})
url = "{url}?{qs}".format(url=reverse('oauth2_provider:authorize'), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
# user already authorized the application, but with different scopes: prompt them.
tok.scope = 'read'
tok.save()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_pre_auth_approval_prompt_default(self):
"""
"""
self.assertEqual(oauth2_settings.REQUEST_APPROVAL_PROMPT, 'force')
AccessToken.objects.create(user=self.test_user, token='1234567890',
application=self.application,
expires=timezone.now()+datetime.timedelta(days=1),
scope='read write')
self.client.login(username="test_user", password="123456")
query_string = urlencode({
'client_id': self.application.client_id,
'response_type': 'code',
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.it',
})
url = "{url}?{qs}".format(url=reverse('oauth2_provider:authorize'), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_pre_auth_approval_prompt_default_override(self):
"""
"""
oauth2_settings.REQUEST_APPROVAL_PROMPT = 'auto'
AccessToken.objects.create(user=self.test_user, token='1234567890',
application=self.application,
expires=timezone.now()+datetime.timedelta(days=1),
scope='read write')
self.client.login(username="test_user", password="123456")
query_string = urlencode({
'client_id': self.application.client_id,
'response_type': 'code',
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.it',
})
url = "{url}?{qs}".format(url=reverse('oauth2_provider:authorize'), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_pre_auth_default_redirect(self):
"""
Test for default redirect uri if omitted from query string with response_type: code
"""
self.client.login(username="test_user", password="123456")
query_string = urlencode({
'client_id': self.application.client_id,
'response_type': 'code',
})
url = "{url}?{qs}".format(url=reverse('oauth2_provider:authorize'), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
form = response.context["form"]
self.assertEqual(form['redirect_uri'].value(), "http://localhost")
def test_pre_auth_forbibben_redirect(self):
"""
Test error when passing a forbidden redirect_uri in query string with response_type: code
"""
self.client.login(username="test_user", password="123456")
query_string = urlencode({
'client_id': self.application.client_id,
'response_type': 'code',
'redirect_uri': 'http://forbidden.it',
})
url = "{url}?{qs}".format(url=reverse('oauth2_provider:authorize'), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_pre_auth_wrong_response_type(self):
"""
Test error when passing a wrong response_type in query string
"""
self.client.login(username="test_user", password="123456")
query_string = urlencode({
'client_id': self.application.client_id,
'response_type': 'WRONG',
})
url = "{url}?{qs}".format(url=reverse('oauth2_provider:authorize'), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
self.assertIn("error=unauthorized_client", response['Location'])
def test_code_post_auth_allow(self):
"""
Test authorization code is given for an allowed request with response_type: code
"""
self.client.login(username="test_user", password="123456")
form_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.it',
'response_type': 'code',
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn('http://example.it?', response['Location'])
self.assertIn('state=random_state_string', response['Location'])
self.assertIn('code=', response['Location'])
def test_code_post_auth_deny(self):
"""
Test error when resource owner deny access
"""
self.client.login(username="test_user", password="123456")
form_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.it',
'response_type': 'code',
'allow': False,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("error=access_denied", response['Location'])
def test_code_post_auth_bad_responsetype(self):
"""
Test authorization code is given for an allowed request with a response_type not supported
"""
self.client.login(username="test_user", password="123456")
form_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.it',
'response_type': 'UNKNOWN',
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn('http://example.it?error', response['Location'])
def test_code_post_auth_forbidden_redirect_uri(self):
"""
Test authorization code is given for an allowed request with a forbidden redirect_uri
"""
self.client.login(username="test_user", password="123456")
form_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://forbidden.it',
'response_type': 'code',
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=form_data)
self.assertEqual(response.status_code, 400)
def test_code_post_auth_malicious_redirect_uri(self):
"""
Test validation of a malicious redirect_uri
"""
self.client.login(username="test_user", password="123456")
form_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': '/../',
'response_type': 'code',
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=form_data)
self.assertEqual(response.status_code, 400)
def test_code_post_auth_allow_custom_redirect_uri_scheme(self):
"""
Test authorization code is given for an allowed request with response_type: code
using a non-standard, but allowed, redirect_uri scheme.
"""
self.client.login(username="test_user", password="123456")
form_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'custom-scheme://example.com',
'response_type': 'code',
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn('custom-scheme://example.com?', response['Location'])
self.assertIn('state=random_state_string', response['Location'])
self.assertIn('code=', response['Location'])
def test_code_post_auth_deny_custom_redirect_uri_scheme(self):
"""
Test error when resource owner deny access
using a non-standard, but allowed, redirect_uri scheme.
"""
self.client.login(username="test_user", password="123456")
form_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'custom-scheme://example.com',
'response_type': 'code',
'allow': False,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn('custom-scheme://example.com?', response['Location'])
self.assertIn("error=access_denied", response['Location'])
def test_code_post_auth_redirection_uri_with_querystring(self):
"""
Tests that a redirection uri with query string is allowed
and query string is retained on redirection.
See http://tools.ietf.org/html/rfc6749#section-3.1.2
"""
self.client.login(username="test_user", password="123456")
form_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.com?foo=bar',
'response_type': 'code',
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("http://example.com?foo=bar", response['Location'])
self.assertIn("code=", response['Location'])
def test_code_post_auth_fails_when_redirect_uri_path_is_invalid(self):
"""
Tests that a redirection uri is matched using scheme + netloc + path
"""
self.client.login(username="test_user", password="123456")
form_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.com/a?foo=bar',
'response_type': 'code',
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=form_data)
self.assertEqual(response.status_code, 400)
class TestAuthorizationCodeTokenView(BaseTest):
def get_auth(self):
"""
Helper method to retrieve a valid authorization code
"""
authcode_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.it',
'response_type': 'code',
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=authcode_data)
query_dict = parse_qs(urlparse(response['Location']).query)
return query_dict['code'].pop()
def test_basic_auth(self):
"""
Request an access token using basic authentication for client authentication
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content['token_type'], "Bearer")
self.assertEqual(content['scope'], "read write")
self.assertEqual(content['expires_in'], oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)
def test_refresh(self):
"""
Request an access token using a refresh token
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue('refresh_token' in content)
# make a second token request to be sure the previous refresh token remains valid, see #65
authorization_code = self.get_auth()
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it'
}
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
token_request_data = {
'grant_type': 'refresh_token',
'refresh_token': content['refresh_token'],
'scope': content['scope'],
}
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue('access_token' in content)
# check refresh token cannot be used twice
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 401)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue('invalid_grant' in content.values())
def test_refresh_no_scopes(self):
"""
Request an access token using a refresh token without passing any scope
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue('refresh_token' in content)
token_request_data = {
'grant_type': 'refresh_token',
'refresh_token': content['refresh_token'],
}
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue('access_token' in content)
def test_refresh_bad_scopes(self):
"""
Request an access token using a refresh token and wrong scopes
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue('refresh_token' in content)
token_request_data = {
'grant_type': 'refresh_token',
'refresh_token': content['refresh_token'],
'scope': 'read write nuke',
}
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 401)
def test_refresh_fail_repeating_requests(self):
"""
Try refreshing an access token with the same refresh token more than once
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue('refresh_token' in content)
token_request_data = {
'grant_type': 'refresh_token',
'refresh_token': content['refresh_token'],
'scope': content['scope'],
}
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 401)
def test_refresh_repeating_requests_non_rotating_tokens(self):
"""
Try refreshing an access token with the same refresh token more than once when not rotating tokens.
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue('refresh_token' in content)
token_request_data = {
'grant_type': 'refresh_token',
'refresh_token': content['refresh_token'],
'scope': content['scope'],
}
with mock.patch('oauthlib.oauth2.rfc6749.request_validator.RequestValidator.rotate_refresh_token', return_value=False):
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
def test_basic_auth_bad_authcode(self):
"""
Request an access token using a bad authorization code
"""
self.client.login(username="test_user", password="123456")
token_request_data = {
'grant_type': 'authorization_code',
'code': 'BLAH',
'redirect_uri': 'http://example.it'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 401)
def test_basic_auth_bad_granttype(self):
"""
Request an access token using a bad grant_type string
"""
self.client.login(username="test_user", password="123456")
token_request_data = {
'grant_type': 'UNKNOWN',
'code': 'BLAH',
'redirect_uri': 'http://example.it'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 400)
def test_basic_auth_grant_expired(self):
"""
Request an access token using an expired grant token
"""
self.client.login(username="test_user", password="123456")
g = Grant(application=self.application, user=self.test_user, code='BLAH', expires=timezone.now(),
redirect_uri='', scope='')
g.save()
token_request_data = {
'grant_type': 'authorization_code',
'code': 'BLAH',
'redirect_uri': 'http://example.it'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 401)
def test_basic_auth_bad_secret(self):
"""
Request an access token using basic authentication for client authentication
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, 'BOOM!')
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 401)
def test_basic_auth_wrong_auth_type(self):
"""
Request an access token using basic authentication for client authentication
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it'
}
user_pass = '{0}:{1}'.format(self.application.client_id, self.application.client_secret)
auth_string = base64.b64encode(user_pass.encode('utf-8'))
auth_headers = {
'HTTP_AUTHORIZATION': 'Wrong ' + auth_string.decode("utf-8"),
}
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 401)
def test_request_body_params(self):
"""
Request an access token using client_type: public
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it',
'client_id': self.application.client_id,
'client_secret': self.application.client_secret,
}
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content['token_type'], "Bearer")
self.assertEqual(content['scope'], "read write")
self.assertEqual(content['expires_in'], oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)
def test_public(self):
"""
Request an access token using client_type: public
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
authorization_code = self.get_auth()
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it',
'client_id': self.application.client_id
}
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content['token_type'], "Bearer")
self.assertEqual(content['scope'], "read write")
self.assertEqual(content['expires_in'], oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)
def test_malicious_redirect_uri(self):
"""
Request an access token using client_type: public and ensure redirect_uri is
properly validated.
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
authorization_code = self.get_auth()
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': '/../',
'client_id': self.application.client_id
}
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data)
self.assertEqual(response.status_code, 401)
def test_code_exchange_succeed_when_redirect_uri_match(self):
"""
Tests code exchange succeed when redirect uri matches the one used for code request
"""
self.client.login(username="test_user", password="123456")
# retrieve a valid authorization code
authcode_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.it?foo=bar',
'response_type': 'code',
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=authcode_data)
query_dict = parse_qs(urlparse(response['Location']).query)
authorization_code = query_dict['code'].pop()
# exchange authorization code for a valid access token
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it?foo=bar'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content['token_type'], "Bearer")
self.assertEqual(content['scope'], "read write")
self.assertEqual(content['expires_in'], oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)
def test_code_exchange_fails_when_redirect_uri_does_not_match(self):
"""
Tests code exchange fails when redirect uri does not match the one used for code request
"""
self.client.login(username="test_user", password="123456")
# retrieve a valid authorization code
authcode_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.it?foo=bar',
'response_type': 'code',
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=authcode_data)
query_dict = parse_qs(urlparse(response['Location']).query)
authorization_code = query_dict['code'].pop()
# exchange authorization code for a valid access token
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it?foo=baraa'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 401)
def test_code_exchange_succeed_when_redirect_uri_match_with_multiple_query_params(self):
"""
Tests code exchange succeed when redirect uri matches the one used for code request
"""
self.client.login(username="test_user", password="123456")
self.application.redirect_uris = "http://localhost http://example.com?foo=bar"
self.application.save()
# retrieve a valid authorization code
authcode_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.com?bar=baz&foo=bar',
'response_type': 'code',
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=authcode_data)
query_dict = parse_qs(urlparse(response['Location']).query)
authorization_code = query_dict['code'].pop()
# exchange authorization code for a valid access token
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.com?bar=baz&foo=bar'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content['token_type'], "Bearer")
self.assertEqual(content['scope'], "read write")
self.assertEqual(content['expires_in'], oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)
class TestAuthorizationCodeProtectedResource(BaseTest):
def test_resource_access_allowed(self):
self.client.login(username="test_user", password="123456")
# retrieve a valid authorization code
authcode_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.it',
'response_type': 'code',
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=authcode_data)
query_dict = parse_qs(urlparse(response['Location']).query)
authorization_code = query_dict['code'].pop()
# exchange authorization code for a valid access token
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
access_token = content['access_token']
# use token to access the resource
auth_headers = {
'HTTP_AUTHORIZATION': 'Bearer ' + access_token,
}
request = self.factory.get("/fake-resource", **auth_headers)
request.user = self.test_user
view = ResourceView.as_view()
response = view(request)
self.assertEqual(response, "This is a protected resource")
def test_resource_access_deny(self):
auth_headers = {
'HTTP_AUTHORIZATION': 'Bearer ' + "faketoken",
}
request = self.factory.get("/fake-resource", **auth_headers)
request.user = self.test_user
view = ResourceView.as_view()
response = view(request)
self.assertEqual(response.status_code, 403)
| |
# Quick to write and slow to run Doxygen to XML Comment converter.
# John Hardy 2011
def endComment():
"""
@brief Reset the values for the next comment block.
"""
global sEType, sEVar, sEData, iIndent
sEType = BRIEF
sEVar = None
sEData = ""
iIndent = -1
def handleExistingData(iIndent):
"""
@brief Write out any existing data.
@param iIndent The indent level.
"""
global sEType, sEVar, sEData
# If none, quit.
if not sEType:
return
# Skip if we have no data.
if not sEData:
return
# Insert tab level and comments into a header.
sHead = (" " * iIndent) + "/// "
# Sanitise data.
sEData.rstrip()
# Swap breaks for heads.
sEData = sEData.replace(BREAK, "\n" + sHead)
# Write out the respective blocks.
if sEType == BRIEF:
#sEData = sEData.replace("<summary>", "")
#sEData = sEData.replace("</summary>", "")
pOutFile.write(sHead + "<summary>\n")
pOutFile.write(sHead + sEData + "\n")
pOutFile.write(sHead + "</summary>\n")
elif sEType == PARAM:
pOutFile.write(sHead + "<param name=\"" + str(sEVar) + "\">" + str(sEData) + "</param>\n")
elif sEType == RETURN:
pOutFile.write(sHead + "<returns>" + str(sEData) + "</returns>\n")
elif sEType == AUTHOR:
pOutFile.write(sHead + "<author>" + str(sEData) + "</author>\n")
elif sEType == DATE:
pOutFile.write(sHead + "<date>" + str(sEData) + "</date>\n")
elif sEType == RETURN:
pOutFile.write(sHead + "<returns>" + str(sEData) + "</returns>\n")
elif sEType == REMARK:
pOutFile.write(sHead + str(sEData) + "\n")
# Zap any leftover data.
sEType = None
sEVar = None
sEData = ""
def dataFromString(sString, iStart = 0):
"""
@brief Parse data out of a line which may or may not end in an '*/'.
@param sString The string to parse.
@param iStart The starting index to parse from. Default = 0 which is the start of the string.
@return The data (without the ending '*/' is present.
"""
iEnd = len(sString)
if CLOSE_COMMENT in sString:
iEnd = sString.find(CLOSE_COMMENT)
return sString[iStart : iEnd].rstrip()
def dataFromLine(sLine):
"""
@brief Parse data from a comment line.
@param sLine The comment line to parse.
@return A rstrip'ed string of data after the '* ' in a comment line.
"""
iStart = sLine.find("* ")
if iStart < 0:
return ""
iStart += 2
return dataFromString(sLine, iStart)
def handleCommentLine(sLine, iLine):
"""
@brief Write data from a comment line back to the thingy.
@param sLine The line data.
@param iLine The line number.
@return Is the end of the comment block on this line.
"""
global sEType, sEVar, sEData, iIndent
# Work out the indentation level to operate at.
# This is only done once for each comment block.
if iIndent < 0:
iIndent = (len(sLine) - len(sLine.lstrip())) / 4
# If there is no '@' symbol, save as much data as we can from the commentline.
if START_SYMBOL not in sLine:
# If we are a directive which only accepts single line values then anything extra is a remark.
if sEType in (PARAM, RETURN, AUTHOR, DATE):
handleExistingData(iIndent)
sEType = REMARK
sEData = ""
# Get the data from the line and append it if it is exists.
sData = dataFromLine(sLine)
if len(sData) > 0:
# If we already have data, insert a breakline.
if sEData:
sEData += BREAK + sData
# Otherwise do not.
else:
sEData = sData
# If we have an end comment on this line, exit the comment by returning false.
if CLOSE_COMMENT in sLine:
handleExistingData(iIndent)
endComment()
return False
return True
# Since the line does contain an '@' symbol, push any existing data.
handleExistingData(iIndent)
# If this line contains an '@' symbol then work out what is after it.
sEType = sLine.split(START_SYMBOL)[1].split(" ")[0]
# If the comment data type is BRIEF
if sEType == BRIEF:
sEData = dataFromString(sLine, sLine.find(BRIEF) + len(BRIEF) + 1)
elif sEType == PARAM:
sTemp = dataFromString(sLine, sLine.find(PARAM) + len(PARAM) + 1)
iChop = sTemp.find(" ") + 1
sEData = sTemp[iChop:]
sEVar = sTemp[:iChop].rstrip()
elif sEType == RETURN:
sEData = dataFromString(sLine, sLine.find(RETURN) + len(RETURN) + 1)
elif sEType == DATE:
sEData = dataFromString(sLine, sLine.find(DATE) + len(DATE) + 1)
elif sEType == AUTHOR:
sEData = dataFromString(sLine, sLine.find(AUTHOR) + len(AUTHOR) + 1)
# If we have an end comment on this line, exit the comment by returning false.
if CLOSE_COMMENT in sLine:
handleExistingData(iIndent)
endComment()
return False
return True
## Modules
import time
import shutil
import os
## Constants
START_SYMBOL = "@"
OPEN_COMMENT = "/**"
CLOSE_COMMENT = "*/"
BRIEF = "brief"
PARAM = "param"
RETURN = "return"
AUTHOR = "author"
DATE = "date"
REMARK = "remark"
BREAK = "!BREAK!"
## Define globals.
global sEType, sEVar, sEData, pOutFile
## Main function.
def convert(sInFile, sOutFile = None, bReport = True):
"""
@brief A function which will convert the contents of one file and write them to an output file.
@param sInFile The file to convert from doxycomments to xml comments.
@param sOutFile OPTIONAL The file to save them in. Default is a _d appended version of the old one.
@param bReport Report the number of comments and time it took etc.
"""
# Globals
global pOutFile
# File jiggery.
if not sOutFile:
sOutFile = sInFile.replace(".", "_dtemp.")
# Some initial state and a line counter.
endComment()
bInComment = False
iLine = 0
iComments = 0
iStartTime = time.clock()
# Open the files.
pOutFile = open(sOutFile, "w")
with open(sInFile) as pIn:
# For each line in the file.
for sLine in pIn:
# Increment counter.
iLine += 1
# If we are in a comment, handle the line.
if bInComment:
bInComment = handleCommentLine(sLine, iLine)
# Check the new line to see if it opens a comment line.
elif OPEN_COMMENT in sLine:
iComments += 1
bInComment = handleCommentLine(sLine, iLine)
# We are neither a comment so write the line back to the source.
else:
pOutFile.write(sLine)
# Close the output file.
pOutFile.close()
# Backup the old file.
#shutil.copy(sInFile, sInFile + "_dbackup")
# Copy the new file over the old file.
shutil.copy(sOutFile, sInFile)
os.remove(sOutFile)
# Report.
if bReport:
print sInFile
print str(iComments) + " comment blocks converted within "+str(iLine)+" lines in approx "+str(round(time.clock() - iStartTime, 2))+" seconds."
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print "Please specify an input file."
else:
lFiles = sys.argv[1:]
for sFile in lFiles:
convert(sFile)
print "-----"
raw_input("Done")
| |
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_utils import excutils
from sqlalchemy import and_
from sqlalchemy import event
from sqlalchemy import orm
from sqlalchemy.orm import exc
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.common import ipv6_utils
from neutron.common import utils
from neutron import context as ctx
from neutron.db import common_db_mixin
from neutron.db import models_v2
from neutron.db import sqlalchemyutils
from neutron.extensions import l3
from neutron.i18n import _LE, _LI
from neutron import manager
from neutron import neutron_plugin_base_v2
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants as service_constants
LOG = logging.getLogger(__name__)
# Ports with the following 'device_owner' values will not prevent
# network deletion. If delete_network() finds that all ports on a
# network have these owners, it will explicitly delete each port
# and allow network deletion to continue. Similarly, if delete_subnet()
# finds out that all existing IP Allocations are associated with ports
# with these owners, it will allow subnet deletion to proceed with the
# IP allocations being cleaned up by cascade.
AUTO_DELETE_PORT_OWNERS = [constants.DEVICE_OWNER_DHCP]
class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2,
common_db_mixin.CommonDbMixin):
"""V2 Neutron plugin interface implementation using SQLAlchemy models.
Whenever a non-read call happens the plugin will call an event handler
class method (e.g., network_created()). The result is that this class
can be sub-classed by other classes that add custom behaviors on certain
events.
"""
# This attribute specifies whether the plugin supports or not
# bulk/pagination/sorting operations. Name mangling is used in
# order to ensure it is qualified by class
__native_bulk_support = True
__native_pagination_support = True
__native_sorting_support = True
def __init__(self):
if cfg.CONF.notify_nova_on_port_status_changes:
from neutron.notifiers import nova
# NOTE(arosen) These event listeners are here to hook into when
# port status changes and notify nova about their change.
self.nova_notifier = nova.Notifier()
event.listen(models_v2.Port, 'after_insert',
self.nova_notifier.send_port_status)
event.listen(models_v2.Port, 'after_update',
self.nova_notifier.send_port_status)
event.listen(models_v2.Port.status, 'set',
self.nova_notifier.record_port_status_changed)
def _get_network(self, context, id):
try:
network = self._get_by_id(context, models_v2.Network, id)
except exc.NoResultFound:
raise n_exc.NetworkNotFound(net_id=id)
return network
def _get_subnet(self, context, id):
try:
subnet = self._get_by_id(context, models_v2.Subnet, id)
except exc.NoResultFound:
raise n_exc.SubnetNotFound(subnet_id=id)
return subnet
def _get_port(self, context, id):
try:
port = self._get_by_id(context, models_v2.Port, id)
except exc.NoResultFound:
raise n_exc.PortNotFound(port_id=id)
return port
def _get_dns_by_subnet(self, context, subnet_id):
dns_qry = context.session.query(models_v2.DNSNameServer)
return dns_qry.filter_by(subnet_id=subnet_id).all()
def _get_route_by_subnet(self, context, subnet_id):
route_qry = context.session.query(models_v2.SubnetRoute)
return route_qry.filter_by(subnet_id=subnet_id).all()
def _get_subnets_by_network(self, context, network_id):
subnet_qry = context.session.query(models_v2.Subnet)
return subnet_qry.filter_by(network_id=network_id).all()
def _get_all_subnets(self, context):
# NOTE(salvatore-orlando): This query might end up putting
# a lot of stress on the db. Consider adding a cache layer
return context.session.query(models_v2.Subnet).all()
@staticmethod
def _generate_mac():
return utils.get_random_mac(cfg.CONF.base_mac.split(':'))
@staticmethod
def _delete_ip_allocation(context, network_id, subnet_id, ip_address):
# Delete the IP address from the IPAllocate table
LOG.debug("Delete allocated IP %(ip_address)s "
"(%(network_id)s/%(subnet_id)s)",
{'ip_address': ip_address,
'network_id': network_id,
'subnet_id': subnet_id})
context.session.query(models_v2.IPAllocation).filter_by(
network_id=network_id,
ip_address=ip_address,
subnet_id=subnet_id).delete()
@staticmethod
def _store_ip_allocation(context, ip_address, network_id, subnet_id,
port_id):
LOG.debug("Allocated IP %(ip_address)s "
"(%(network_id)s/%(subnet_id)s/%(port_id)s)",
{'ip_address': ip_address,
'network_id': network_id,
'subnet_id': subnet_id,
'port_id': port_id})
allocated = models_v2.IPAllocation(
network_id=network_id,
port_id=port_id,
ip_address=ip_address,
subnet_id=subnet_id
)
context.session.add(allocated)
@staticmethod
def _generate_ip(context, subnets):
try:
return NeutronDbPluginV2._try_generate_ip(context, subnets)
except n_exc.IpAddressGenerationFailure:
NeutronDbPluginV2._rebuild_availability_ranges(context, subnets)
return NeutronDbPluginV2._try_generate_ip(context, subnets)
@staticmethod
def _try_generate_ip(context, subnets):
"""Generate an IP address.
The IP address will be generated from one of the subnets defined on
the network.
"""
range_qry = context.session.query(
models_v2.IPAvailabilityRange).join(
models_v2.IPAllocationPool).with_lockmode('update')
for subnet in subnets:
ip_range = range_qry.filter_by(subnet_id=subnet['id']).first()
if not ip_range:
LOG.debug("All IPs from subnet %(subnet_id)s (%(cidr)s) "
"allocated",
{'subnet_id': subnet['id'],
'cidr': subnet['cidr']})
continue
ip_address = ip_range['first_ip']
if ip_range['first_ip'] == ip_range['last_ip']:
# No more free indices on subnet => delete
LOG.debug("No more free IP's in slice. Deleting "
"allocation pool.")
context.session.delete(ip_range)
else:
# increment the first free
new_first_ip = str(netaddr.IPAddress(ip_address) + 1)
ip_range['first_ip'] = new_first_ip
LOG.debug("Allocated IP - %(ip_address)s from %(first_ip)s "
"to %(last_ip)s",
{'ip_address': ip_address,
'first_ip': ip_address,
'last_ip': ip_range['last_ip']})
return {'ip_address': ip_address,
'subnet_id': subnet['id']}
raise n_exc.IpAddressGenerationFailure(net_id=subnets[0]['network_id'])
@staticmethod
def _rebuild_availability_ranges(context, subnets):
"""Rebuild availability ranges.
This method is called only when there's no more IP available or by
_update_subnet_allocation_pools. Calling
_update_subnet_allocation_pools before calling this function deletes
the IPAllocationPools associated with the subnet that is updating,
which will result in deleting the IPAvailabilityRange too.
"""
ip_qry = context.session.query(
models_v2.IPAllocation).with_lockmode('update')
# PostgreSQL does not support select...for update with an outer join.
# No join is needed here.
pool_qry = context.session.query(
models_v2.IPAllocationPool).options(
orm.noload('available_ranges')).with_lockmode('update')
for subnet in sorted(subnets):
LOG.debug("Rebuilding availability ranges for subnet %s",
subnet)
# Create a set of all currently allocated addresses
ip_qry_results = ip_qry.filter_by(subnet_id=subnet['id'])
allocations = netaddr.IPSet([netaddr.IPAddress(i['ip_address'])
for i in ip_qry_results])
for pool in pool_qry.filter_by(subnet_id=subnet['id']):
# Create a set of all addresses in the pool
poolset = netaddr.IPSet(netaddr.IPRange(pool['first_ip'],
pool['last_ip']))
# Use set difference to find free addresses in the pool
available = poolset - allocations
# Generator compacts an ip set into contiguous ranges
def ipset_to_ranges(ipset):
first, last = None, None
for cidr in ipset.iter_cidrs():
if last and last + 1 != cidr.first:
yield netaddr.IPRange(first, last)
first = None
first, last = first if first else cidr.first, cidr.last
if first:
yield netaddr.IPRange(first, last)
# Write the ranges to the db
for ip_range in ipset_to_ranges(available):
available_range = models_v2.IPAvailabilityRange(
allocation_pool_id=pool['id'],
first_ip=str(netaddr.IPAddress(ip_range.first)),
last_ip=str(netaddr.IPAddress(ip_range.last)))
context.session.add(available_range)
@staticmethod
def _allocate_specific_ip(context, subnet_id, ip_address):
"""Allocate a specific IP address on the subnet."""
ip = int(netaddr.IPAddress(ip_address))
range_qry = context.session.query(
models_v2.IPAvailabilityRange).join(
models_v2.IPAllocationPool).with_lockmode('update')
results = range_qry.filter_by(subnet_id=subnet_id)
for ip_range in results:
first = int(netaddr.IPAddress(ip_range['first_ip']))
last = int(netaddr.IPAddress(ip_range['last_ip']))
if first <= ip <= last:
if first == last:
context.session.delete(ip_range)
return
elif first == ip:
new_first_ip = str(netaddr.IPAddress(ip_address) + 1)
ip_range['first_ip'] = new_first_ip
return
elif last == ip:
new_last_ip = str(netaddr.IPAddress(ip_address) - 1)
ip_range['last_ip'] = new_last_ip
return
else:
# Adjust the original range to end before ip_address
old_last_ip = ip_range['last_ip']
new_last_ip = str(netaddr.IPAddress(ip_address) - 1)
ip_range['last_ip'] = new_last_ip
# Create a new second range for after ip_address
new_first_ip = str(netaddr.IPAddress(ip_address) + 1)
new_ip_range = models_v2.IPAvailabilityRange(
allocation_pool_id=ip_range['allocation_pool_id'],
first_ip=new_first_ip,
last_ip=old_last_ip)
context.session.add(new_ip_range)
return
@staticmethod
def _check_unique_ip(context, network_id, subnet_id, ip_address):
"""Validate that the IP address on the subnet is not in use."""
ip_qry = context.session.query(models_v2.IPAllocation)
try:
ip_qry.filter_by(network_id=network_id,
subnet_id=subnet_id,
ip_address=ip_address).one()
except exc.NoResultFound:
return True
return False
@classmethod
def _check_gateway_in_subnet(cls, cidr, gateway):
"""Validate that the gateway is on the subnet."""
ip = netaddr.IPAddress(gateway)
if ip.version == 4 or (ip.version == 6 and not ip.is_link_local()):
return cls._check_subnet_ip(cidr, gateway)
return True
@classmethod
def _check_subnet_ip(cls, cidr, ip_address):
"""Validate that the IP address is on the subnet."""
ip = netaddr.IPAddress(ip_address)
net = netaddr.IPNetwork(cidr)
# Check that the IP is valid on subnet. This cannot be the
# network or the broadcast address
if (ip != net.network and
ip != net.broadcast and
net.netmask & ip == net.network):
return True
return False
@staticmethod
def _check_ip_in_allocation_pool(context, subnet_id, gateway_ip,
ip_address):
"""Validate IP in allocation pool.
Validates that the IP address is either the default gateway or
in the allocation pools of the subnet.
"""
# Check if the IP is the gateway
if ip_address == gateway_ip:
# Gateway is not in allocation pool
return False
# Check if the requested IP is in a defined allocation pool
pool_qry = context.session.query(models_v2.IPAllocationPool)
allocation_pools = pool_qry.filter_by(subnet_id=subnet_id)
ip = netaddr.IPAddress(ip_address)
for allocation_pool in allocation_pools:
allocation_pool_range = netaddr.IPRange(
allocation_pool['first_ip'],
allocation_pool['last_ip'])
if ip in allocation_pool_range:
return True
return False
def _test_fixed_ips_for_port(self, context, network_id, fixed_ips,
device_owner):
"""Test fixed IPs for port.
Check that configured subnets are valid prior to allocating any
IPs. Include the subnet_id in the result if only an IP address is
configured.
:raises: InvalidInput, IpAddressInUse
"""
fixed_ip_set = []
for fixed in fixed_ips:
found = False
if 'subnet_id' not in fixed:
if 'ip_address' not in fixed:
msg = _('IP allocation requires subnet_id or ip_address')
raise n_exc.InvalidInput(error_message=msg)
filter = {'network_id': [network_id]}
subnets = self.get_subnets(context, filters=filter)
for subnet in subnets:
if self._check_subnet_ip(subnet['cidr'],
fixed['ip_address']):
found = True
subnet_id = subnet['id']
break
if not found:
msg = _('IP address %s is not a valid IP for the defined '
'networks subnets') % fixed['ip_address']
raise n_exc.InvalidInput(error_message=msg)
else:
subnet = self._get_subnet(context, fixed['subnet_id'])
if subnet['network_id'] != network_id:
msg = (_("Failed to create port on network %(network_id)s"
", because fixed_ips included invalid subnet "
"%(subnet_id)s") %
{'network_id': network_id,
'subnet_id': fixed['subnet_id']})
raise n_exc.InvalidInput(error_message=msg)
subnet_id = subnet['id']
if 'ip_address' in fixed:
# Ensure that the IP's are unique
if not NeutronDbPluginV2._check_unique_ip(context, network_id,
subnet_id,
fixed['ip_address']):
raise n_exc.IpAddressInUse(net_id=network_id,
ip_address=fixed['ip_address'])
# Ensure that the IP is valid on the subnet
if (not found and
not self._check_subnet_ip(subnet['cidr'],
fixed['ip_address'])):
msg = _('IP address %s is not a valid IP for the defined '
'subnet') % fixed['ip_address']
raise n_exc.InvalidInput(error_message=msg)
if (ipv6_utils.is_auto_address_subnet(subnet) and
device_owner not in
constants.ROUTER_INTERFACE_OWNERS):
msg = (_("IPv6 address %(address)s can not be directly "
"assigned to a port on subnet %(id)s since the "
"subnet is configured for automatic addresses") %
{'address': fixed['ip_address'],
'id': subnet_id})
raise n_exc.InvalidInput(error_message=msg)
fixed_ip_set.append({'subnet_id': subnet_id,
'ip_address': fixed['ip_address']})
else:
fixed_ip_set.append({'subnet_id': subnet_id})
if len(fixed_ip_set) > cfg.CONF.max_fixed_ips_per_port:
msg = _('Exceeded maximim amount of fixed ips per port')
raise n_exc.InvalidInput(error_message=msg)
return fixed_ip_set
def _allocate_fixed_ips(self, context, fixed_ips, mac_address):
"""Allocate IP addresses according to the configured fixed_ips."""
ips = []
for fixed in fixed_ips:
if 'ip_address' in fixed:
# Remove the IP address from the allocation pool
NeutronDbPluginV2._allocate_specific_ip(
context, fixed['subnet_id'], fixed['ip_address'])
ips.append({'ip_address': fixed['ip_address'],
'subnet_id': fixed['subnet_id']})
# Only subnet ID is specified => need to generate IP
# from subnet
else:
subnet = self._get_subnet(context, fixed['subnet_id'])
if (subnet['ip_version'] == 6 and
ipv6_utils.is_auto_address_subnet(subnet)):
prefix = subnet['cidr']
ip_address = ipv6_utils.get_ipv6_addr_by_EUI64(
prefix, mac_address)
ips.append({'ip_address': ip_address.format(),
'subnet_id': subnet['id']})
else:
subnets = [subnet]
# IP address allocation
result = self._generate_ip(context, subnets)
ips.append({'ip_address': result['ip_address'],
'subnet_id': result['subnet_id']})
return ips
def _update_ips_for_port(self, context, network_id, port_id, original_ips,
new_ips, mac_address, device_owner):
"""Add or remove IPs from the port."""
ips = []
# These ips are still on the port and haven't been removed
prev_ips = []
# the new_ips contain all of the fixed_ips that are to be updated
if len(new_ips) > cfg.CONF.max_fixed_ips_per_port:
msg = _('Exceeded maximim amount of fixed ips per port')
raise n_exc.InvalidInput(error_message=msg)
# Remove all of the intersecting elements
for original_ip in original_ips[:]:
for new_ip in new_ips[:]:
if ('ip_address' in new_ip and
original_ip['ip_address'] == new_ip['ip_address']):
original_ips.remove(original_ip)
new_ips.remove(new_ip)
prev_ips.append(original_ip)
# Check if the IP's to add are OK
to_add = self._test_fixed_ips_for_port(context, network_id, new_ips,
device_owner)
for ip in original_ips:
LOG.debug("Port update. Hold %s", ip)
NeutronDbPluginV2._delete_ip_allocation(context,
network_id,
ip['subnet_id'],
ip['ip_address'])
if to_add:
LOG.debug("Port update. Adding %s", to_add)
ips = self._allocate_fixed_ips(context, to_add, mac_address)
return ips, prev_ips
def _allocate_ips_for_port(self, context, port):
"""Allocate IP addresses for the port.
If port['fixed_ips'] is set to 'ATTR_NOT_SPECIFIED', allocate IP
addresses for the port. If port['fixed_ips'] contains an IP address or
a subnet_id then allocate an IP address accordingly.
"""
p = port['port']
ips = []
fixed_configured = p['fixed_ips'] is not attributes.ATTR_NOT_SPECIFIED
if fixed_configured:
configured_ips = self._test_fixed_ips_for_port(context,
p["network_id"],
p['fixed_ips'],
p['device_owner'])
ips = self._allocate_fixed_ips(context,
configured_ips,
p['mac_address'])
else:
filter = {'network_id': [p['network_id']]}
subnets = self.get_subnets(context, filters=filter)
# Split into v4 and v6 subnets
v4 = []
v6_stateful = []
v6_stateless = []
for subnet in subnets:
if subnet['ip_version'] == 4:
v4.append(subnet)
else:
if ipv6_utils.is_auto_address_subnet(subnet):
v6_stateless.append(subnet)
else:
v6_stateful.append(subnet)
for subnet in v6_stateless:
prefix = subnet['cidr']
ip_address = ipv6_utils.get_ipv6_addr_by_EUI64(
prefix, p['mac_address'])
if not self._check_unique_ip(
context, p['network_id'],
subnet['id'], ip_address.format()):
raise n_exc.IpAddressInUse(
net_id=p['network_id'],
ip_address=ip_address.format())
ips.append({'ip_address': ip_address.format(),
'subnet_id': subnet['id']})
version_subnets = [v4, v6_stateful]
for subnets in version_subnets:
if subnets:
result = NeutronDbPluginV2._generate_ip(context, subnets)
ips.append({'ip_address': result['ip_address'],
'subnet_id': result['subnet_id']})
return ips
def _validate_subnet_cidr(self, context, network, new_subnet_cidr):
"""Validate the CIDR for a subnet.
Verifies the specified CIDR does not overlap with the ones defined
for the other subnets specified for this network, or with any other
CIDR if overlapping IPs are disabled.
"""
new_subnet_ipset = netaddr.IPSet([new_subnet_cidr])
# Disallow subnets with prefix length 0 as they will lead to
# dnsmasq failures (see bug 1362651).
# This is not a discrimination against /0 subnets.
# A /0 subnet is conceptually possible but hardly a practical
# scenario for neutron's use cases.
for cidr in new_subnet_ipset.iter_cidrs():
if cidr.prefixlen == 0:
err_msg = _("0 is not allowed as CIDR prefix length")
raise n_exc.InvalidInput(error_message=err_msg)
if cfg.CONF.allow_overlapping_ips:
subnet_list = network.subnets
else:
subnet_list = self._get_all_subnets(context)
for subnet in subnet_list:
if (netaddr.IPSet([subnet.cidr]) & new_subnet_ipset):
# don't give out details of the overlapping subnet
err_msg = (_("Requested subnet with cidr: %(cidr)s for "
"network: %(network_id)s overlaps with another "
"subnet") %
{'cidr': new_subnet_cidr,
'network_id': network.id})
LOG.info(_LI("Validation for CIDR: %(new_cidr)s failed - "
"overlaps with subnet %(subnet_id)s "
"(CIDR: %(cidr)s)"),
{'new_cidr': new_subnet_cidr,
'subnet_id': subnet.id,
'cidr': subnet.cidr})
raise n_exc.InvalidInput(error_message=err_msg)
def _validate_allocation_pools(self, ip_pools, subnet_cidr):
"""Validate IP allocation pools.
Verify start and end address for each allocation pool are valid,
ie: constituted by valid and appropriately ordered IP addresses.
Also, verify pools do not overlap among themselves.
Finally, verify that each range fall within the subnet's CIDR.
"""
subnet = netaddr.IPNetwork(subnet_cidr)
subnet_first_ip = netaddr.IPAddress(subnet.first + 1)
subnet_last_ip = netaddr.IPAddress(subnet.last - 1)
LOG.debug("Performing IP validity checks on allocation pools")
ip_sets = []
for ip_pool in ip_pools:
try:
start_ip = netaddr.IPAddress(ip_pool['start'])
end_ip = netaddr.IPAddress(ip_pool['end'])
except netaddr.AddrFormatError:
LOG.info(_LI("Found invalid IP address in pool: "
"%(start)s - %(end)s:"),
{'start': ip_pool['start'],
'end': ip_pool['end']})
raise n_exc.InvalidAllocationPool(pool=ip_pool)
if (start_ip.version != subnet.version or
end_ip.version != subnet.version):
LOG.info(_LI("Specified IP addresses do not match "
"the subnet IP version"))
raise n_exc.InvalidAllocationPool(pool=ip_pool)
if end_ip < start_ip:
LOG.info(_LI("Start IP (%(start)s) is greater than end IP "
"(%(end)s)"),
{'start': ip_pool['start'], 'end': ip_pool['end']})
raise n_exc.InvalidAllocationPool(pool=ip_pool)
if start_ip < subnet_first_ip or end_ip > subnet_last_ip:
LOG.info(_LI("Found pool larger than subnet "
"CIDR:%(start)s - %(end)s"),
{'start': ip_pool['start'],
'end': ip_pool['end']})
raise n_exc.OutOfBoundsAllocationPool(
pool=ip_pool,
subnet_cidr=subnet_cidr)
# Valid allocation pool
# Create an IPSet for it for easily verifying overlaps
ip_sets.append(netaddr.IPSet(netaddr.IPRange(
ip_pool['start'],
ip_pool['end']).cidrs()))
LOG.debug("Checking for overlaps among allocation pools "
"and gateway ip")
ip_ranges = ip_pools[:]
# Use integer cursors as an efficient way for implementing
# comparison and avoiding comparing the same pair twice
for l_cursor in range(len(ip_sets)):
for r_cursor in range(l_cursor + 1, len(ip_sets)):
if ip_sets[l_cursor] & ip_sets[r_cursor]:
l_range = ip_ranges[l_cursor]
r_range = ip_ranges[r_cursor]
LOG.info(_LI("Found overlapping ranges: %(l_range)s and "
"%(r_range)s"),
{'l_range': l_range, 'r_range': r_range})
raise n_exc.OverlappingAllocationPools(
pool_1=l_range,
pool_2=r_range,
subnet_cidr=subnet_cidr)
def _validate_host_route(self, route, ip_version):
try:
netaddr.IPNetwork(route['destination'])
netaddr.IPAddress(route['nexthop'])
except netaddr.core.AddrFormatError:
err_msg = _("Invalid route: %s") % route
raise n_exc.InvalidInput(error_message=err_msg)
except ValueError:
# netaddr.IPAddress would raise this
err_msg = _("Invalid route: %s") % route
raise n_exc.InvalidInput(error_message=err_msg)
self._validate_ip_version(ip_version, route['nexthop'], 'nexthop')
self._validate_ip_version(ip_version, route['destination'],
'destination')
def _allocate_pools_for_subnet(self, context, subnet):
"""Create IP allocation pools for a given subnet
Pools are defined by the 'allocation_pools' attribute,
a list of dict objects with 'start' and 'end' keys for
defining the pool range.
"""
pools = []
# Auto allocate the pool around gateway_ip
net = netaddr.IPNetwork(subnet['cidr'])
first_ip = net.first + 1
last_ip = net.last - 1
gw_ip = int(netaddr.IPAddress(subnet['gateway_ip'] or net.last))
# Use the gw_ip to find a point for splitting allocation pools
# for this subnet
split_ip = min(max(gw_ip, net.first), net.last)
if split_ip > first_ip:
pools.append({'start': str(netaddr.IPAddress(first_ip)),
'end': str(netaddr.IPAddress(split_ip - 1))})
if split_ip < last_ip:
pools.append({'start': str(netaddr.IPAddress(split_ip + 1)),
'end': str(netaddr.IPAddress(last_ip))})
# return auto-generated pools
# no need to check for their validity
return pools
def _validate_shared_update(self, context, id, original, updated):
# The only case that needs to be validated is when 'shared'
# goes from True to False
if updated['shared'] == original.shared or updated['shared']:
return
ports = self._model_query(
context, models_v2.Port).filter(
and_(
models_v2.Port.network_id == id,
models_v2.Port.device_owner !=
constants.DEVICE_OWNER_ROUTER_GW,
models_v2.Port.device_owner !=
constants.DEVICE_OWNER_FLOATINGIP))
subnets = self._model_query(
context, models_v2.Subnet).filter(
models_v2.Subnet.network_id == id)
tenant_ids = set([port['tenant_id'] for port in ports] +
[subnet['tenant_id'] for subnet in subnets])
# raise if multiple tenants found or if the only tenant found
# is not the owner of the network
if (len(tenant_ids) > 1 or len(tenant_ids) == 1 and
tenant_ids.pop() != original.tenant_id):
raise n_exc.InvalidSharedSetting(network=original.name)
def _validate_ipv6_attributes(self, subnet, cur_subnet):
if cur_subnet:
self._validate_ipv6_update_dhcp(subnet, cur_subnet)
return
ra_mode_set = attributes.is_attr_set(subnet.get('ipv6_ra_mode'))
address_mode_set = attributes.is_attr_set(
subnet.get('ipv6_address_mode'))
self._validate_ipv6_dhcp(ra_mode_set, address_mode_set,
subnet['enable_dhcp'])
if ra_mode_set and address_mode_set:
self._validate_ipv6_combination(subnet['ipv6_ra_mode'],
subnet['ipv6_address_mode'])
if address_mode_set or ra_mode_set:
self._validate_eui64_applicable(subnet)
def _validate_eui64_applicable(self, subnet):
# Per RFC 4862, section 5.5.3, prefix length and interface
# id together should be equal to 128. Currently neutron supports
# EUI64 interface id only, thus limiting the prefix
# length to be 64 only.
if ipv6_utils.is_auto_address_subnet(subnet):
if netaddr.IPNetwork(subnet['cidr']).prefixlen != 64:
msg = _('Invalid CIDR %s for IPv6 address mode. '
'OpenStack uses the EUI-64 address format, '
'which requires the prefix to be /64.')
raise n_exc.InvalidInput(
error_message=(msg % subnet['cidr']))
def _validate_ipv6_combination(self, ra_mode, address_mode):
if ra_mode != address_mode:
msg = _("ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode "
"set to '%(addr_mode)s' is not valid. "
"If both attributes are set, they must be the same value"
) % {'ra_mode': ra_mode, 'addr_mode': address_mode}
raise n_exc.InvalidInput(error_message=msg)
def _validate_ipv6_dhcp(self, ra_mode_set, address_mode_set, enable_dhcp):
if (ra_mode_set or address_mode_set) and not enable_dhcp:
msg = _("ipv6_ra_mode or ipv6_address_mode cannot be set when "
"enable_dhcp is set to False.")
raise n_exc.InvalidInput(error_message=msg)
def _validate_ipv6_update_dhcp(self, subnet, cur_subnet):
if ('enable_dhcp' in subnet and not subnet['enable_dhcp']):
msg = _("Cannot disable enable_dhcp with "
"ipv6 attributes set")
ra_mode_set = attributes.is_attr_set(subnet.get('ipv6_ra_mode'))
address_mode_set = attributes.is_attr_set(
subnet.get('ipv6_address_mode'))
if ra_mode_set or address_mode_set:
raise n_exc.InvalidInput(error_message=msg)
old_ra_mode_set = attributes.is_attr_set(
cur_subnet.get('ipv6_ra_mode'))
old_address_mode_set = attributes.is_attr_set(
cur_subnet.get('ipv6_address_mode'))
if old_ra_mode_set or old_address_mode_set:
raise n_exc.InvalidInput(error_message=msg)
def _make_network_dict(self, network, fields=None,
process_extensions=True):
res = {'id': network['id'],
'name': network['name'],
'tenant_id': network['tenant_id'],
'admin_state_up': network['admin_state_up'],
'status': network['status'],
'shared': network['shared'],
'subnets': [subnet['id']
for subnet in network['subnets']]}
# Call auxiliary extend functions, if any
if process_extensions:
self._apply_dict_extend_functions(
attributes.NETWORKS, res, network)
return self._fields(res, fields)
def _make_subnet_dict(self, subnet, fields=None):
res = {'id': subnet['id'],
'name': subnet['name'],
'tenant_id': subnet['tenant_id'],
'network_id': subnet['network_id'],
'ip_version': subnet['ip_version'],
'cidr': subnet['cidr'],
'allocation_pools': [{'start': pool['first_ip'],
'end': pool['last_ip']}
for pool in subnet['allocation_pools']],
'gateway_ip': subnet['gateway_ip'],
'enable_dhcp': subnet['enable_dhcp'],
'ipv6_ra_mode': subnet['ipv6_ra_mode'],
'ipv6_address_mode': subnet['ipv6_address_mode'],
'dns_nameservers': [dns['address']
for dns in subnet['dns_nameservers']],
'host_routes': [{'destination': route['destination'],
'nexthop': route['nexthop']}
for route in subnet['routes']],
'shared': subnet['shared']
}
# Call auxiliary extend functions, if any
self._apply_dict_extend_functions(attributes.SUBNETS, res, subnet)
return self._fields(res, fields)
def _make_port_dict(self, port, fields=None,
process_extensions=True):
res = {"id": port["id"],
'name': port['name'],
"network_id": port["network_id"],
'tenant_id': port['tenant_id'],
"mac_address": port["mac_address"],
"admin_state_up": port["admin_state_up"],
"status": port["status"],
"fixed_ips": [{'subnet_id': ip["subnet_id"],
'ip_address': ip["ip_address"]}
for ip in port["fixed_ips"]],
"device_id": port["device_id"],
"device_owner": port["device_owner"]}
# Call auxiliary extend functions, if any
if process_extensions:
self._apply_dict_extend_functions(
attributes.PORTS, res, port)
return self._fields(res, fields)
def _create_bulk(self, resource, context, request_items):
objects = []
collection = "%ss" % resource
items = request_items[collection]
context.session.begin(subtransactions=True)
try:
for item in items:
obj_creator = getattr(self, 'create_%s' % resource)
objects.append(obj_creator(context, item))
context.session.commit()
except Exception:
context.session.rollback()
with excutils.save_and_reraise_exception():
LOG.error(_LE("An exception occurred while creating "
"the %(resource)s:%(item)s"),
{'resource': resource, 'item': item})
return objects
def create_network_bulk(self, context, networks):
return self._create_bulk('network', context, networks)
def create_network(self, context, network):
"""Handle creation of a single network."""
# single request processing
n = network['network']
# NOTE(jkoelker) Get the tenant_id outside of the session to avoid
# unneeded db action if the operation raises
tenant_id = self._get_tenant_id_for_create(context, n)
with context.session.begin(subtransactions=True):
args = {'tenant_id': tenant_id,
'id': n.get('id') or uuidutils.generate_uuid(),
'name': n['name'],
'admin_state_up': n['admin_state_up'],
'shared': n['shared'],
'status': n.get('status', constants.NET_STATUS_ACTIVE)}
network = models_v2.Network(**args)
context.session.add(network)
return self._make_network_dict(network, process_extensions=False)
def update_network(self, context, id, network):
n = network['network']
with context.session.begin(subtransactions=True):
network = self._get_network(context, id)
# validate 'shared' parameter
if 'shared' in n:
self._validate_shared_update(context, id, network, n)
network.update(n)
# also update shared in all the subnets for this network
subnets = self._get_subnets_by_network(context, id)
for subnet in subnets:
subnet['shared'] = network['shared']
return self._make_network_dict(network)
def delete_network(self, context, id):
with context.session.begin(subtransactions=True):
network = self._get_network(context, id)
context.session.query(models_v2.Port).filter_by(
network_id=id).filter(
models_v2.Port.device_owner.
in_(AUTO_DELETE_PORT_OWNERS)).delete(synchronize_session=False)
port_in_use = context.session.query(models_v2.Port).filter_by(
network_id=id).first()
if port_in_use:
raise n_exc.NetworkInUse(net_id=id)
# clean up subnets
subnets = self._get_subnets_by_network(context, id)
for subnet in subnets:
self.delete_subnet(context, subnet['id'])
context.session.delete(network)
def get_network(self, context, id, fields=None):
network = self._get_network(context, id)
return self._make_network_dict(network, fields)
def get_networks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'network', limit, marker)
return self._get_collection(context, models_v2.Network,
self._make_network_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
def get_networks_count(self, context, filters=None):
return self._get_collection_count(context, models_v2.Network,
filters=filters)
def create_subnet_bulk(self, context, subnets):
return self._create_bulk('subnet', context, subnets)
def _validate_ip_version(self, ip_version, addr, name):
"""Check IP field of a subnet match specified ip version."""
ip = netaddr.IPNetwork(addr)
if ip.version != ip_version:
data = {'name': name,
'addr': addr,
'ip_version': ip_version}
msg = _("%(name)s '%(addr)s' does not match "
"the ip_version '%(ip_version)s'") % data
raise n_exc.InvalidInput(error_message=msg)
def _validate_subnet(self, context, s, cur_subnet=None):
"""Validate a subnet spec."""
# This method will validate attributes which may change during
# create_subnet() and update_subnet().
# The method requires the subnet spec 's' has 'ip_version' field.
# If 's' dict does not have 'ip_version' field in an API call
# (e.g., update_subnet()), you need to set 'ip_version' field
# before calling this method.
ip_ver = s['ip_version']
if 'cidr' in s:
self._validate_ip_version(ip_ver, s['cidr'], 'cidr')
if attributes.is_attr_set(s.get('gateway_ip')):
self._validate_ip_version(ip_ver, s['gateway_ip'], 'gateway_ip')
if (cfg.CONF.force_gateway_on_subnet and
not self._check_gateway_in_subnet(
s['cidr'], s['gateway_ip'])):
error_message = _("Gateway is not valid on subnet")
raise n_exc.InvalidInput(error_message=error_message)
# Ensure the gateway IP is not assigned to any port
# skip this check in case of create (s parameter won't have id)
# NOTE(salv-orlando): There is slight chance of a race, when
# a subnet-update and a router-interface-add operation are
# executed concurrently
if cur_subnet:
alloc_qry = context.session.query(models_v2.IPAllocation)
allocated = alloc_qry.filter_by(
ip_address=cur_subnet['gateway_ip'],
subnet_id=cur_subnet['id']).first()
if allocated and allocated['port_id']:
raise n_exc.GatewayIpInUse(
ip_address=cur_subnet['gateway_ip'],
port_id=allocated['port_id'])
if attributes.is_attr_set(s.get('dns_nameservers')):
if len(s['dns_nameservers']) > cfg.CONF.max_dns_nameservers:
raise n_exc.DNSNameServersExhausted(
subnet_id=s.get('id', _('new subnet')),
quota=cfg.CONF.max_dns_nameservers)
for dns in s['dns_nameservers']:
try:
netaddr.IPAddress(dns)
except Exception:
raise n_exc.InvalidInput(
error_message=(_("Error parsing dns address %s") %
dns))
self._validate_ip_version(ip_ver, dns, 'dns_nameserver')
if attributes.is_attr_set(s.get('host_routes')):
if len(s['host_routes']) > cfg.CONF.max_subnet_host_routes:
raise n_exc.HostRoutesExhausted(
subnet_id=s.get('id', _('new subnet')),
quota=cfg.CONF.max_subnet_host_routes)
# check if the routes are all valid
for rt in s['host_routes']:
self._validate_host_route(rt, ip_ver)
if ip_ver == 4:
if attributes.is_attr_set(s.get('ipv6_ra_mode')):
raise n_exc.InvalidInput(
error_message=(_("ipv6_ra_mode is not valid when "
"ip_version is 4")))
if attributes.is_attr_set(s.get('ipv6_address_mode')):
raise n_exc.InvalidInput(
error_message=(_("ipv6_address_mode is not valid when "
"ip_version is 4")))
if ip_ver == 6:
self._validate_ipv6_attributes(s, cur_subnet)
def _validate_gw_out_of_pools(self, gateway_ip, pools):
for allocation_pool in pools:
pool_range = netaddr.IPRange(
allocation_pool['start'],
allocation_pool['end'])
if netaddr.IPAddress(gateway_ip) in pool_range:
raise n_exc.GatewayConflictWithAllocationPools(
pool=pool_range,
ip_address=gateway_ip)
def create_subnet(self, context, subnet):
net = netaddr.IPNetwork(subnet['subnet']['cidr'])
# turn the CIDR into a proper subnet
subnet['subnet']['cidr'] = '%s/%s' % (net.network, net.prefixlen)
s = subnet['subnet']
if s['gateway_ip'] is attributes.ATTR_NOT_SPECIFIED:
s['gateway_ip'] = str(netaddr.IPAddress(net.first + 1))
if s['allocation_pools'] == attributes.ATTR_NOT_SPECIFIED:
s['allocation_pools'] = self._allocate_pools_for_subnet(context, s)
else:
self._validate_allocation_pools(s['allocation_pools'], s['cidr'])
if s['gateway_ip'] is not None:
self._validate_gw_out_of_pools(s['gateway_ip'],
s['allocation_pools'])
self._validate_subnet(context, s)
tenant_id = self._get_tenant_id_for_create(context, s)
with context.session.begin(subtransactions=True):
network = self._get_network(context, s["network_id"])
self._validate_subnet_cidr(context, network, s['cidr'])
# The 'shared' attribute for subnets is for internal plugin
# use only. It is not exposed through the API
args = {'tenant_id': tenant_id,
'id': s.get('id') or uuidutils.generate_uuid(),
'name': s['name'],
'network_id': s['network_id'],
'ip_version': s['ip_version'],
'cidr': s['cidr'],
'enable_dhcp': s['enable_dhcp'],
'gateway_ip': s['gateway_ip'],
'shared': network.shared}
if s['ip_version'] == 6 and s['enable_dhcp']:
if attributes.is_attr_set(s['ipv6_ra_mode']):
args['ipv6_ra_mode'] = s['ipv6_ra_mode']
if attributes.is_attr_set(s['ipv6_address_mode']):
args['ipv6_address_mode'] = s['ipv6_address_mode']
subnet = models_v2.Subnet(**args)
context.session.add(subnet)
if s['dns_nameservers'] is not attributes.ATTR_NOT_SPECIFIED:
for addr in s['dns_nameservers']:
ns = models_v2.DNSNameServer(address=addr,
subnet_id=subnet.id)
context.session.add(ns)
if s['host_routes'] is not attributes.ATTR_NOT_SPECIFIED:
for rt in s['host_routes']:
route = models_v2.SubnetRoute(
subnet_id=subnet.id,
destination=rt['destination'],
nexthop=rt['nexthop'])
context.session.add(route)
for pool in s['allocation_pools']:
ip_pool = models_v2.IPAllocationPool(subnet=subnet,
first_ip=pool['start'],
last_ip=pool['end'])
context.session.add(ip_pool)
ip_range = models_v2.IPAvailabilityRange(
ipallocationpool=ip_pool,
first_ip=pool['start'],
last_ip=pool['end'])
context.session.add(ip_range)
return self._make_subnet_dict(subnet)
def _update_subnet_dns_nameservers(self, context, id, s):
old_dns_list = self._get_dns_by_subnet(context, id)
new_dns_addr_set = set(s["dns_nameservers"])
old_dns_addr_set = set([dns['address']
for dns in old_dns_list])
new_dns = list(new_dns_addr_set)
for dns_addr in old_dns_addr_set - new_dns_addr_set:
for dns in old_dns_list:
if dns['address'] == dns_addr:
context.session.delete(dns)
for dns_addr in new_dns_addr_set - old_dns_addr_set:
dns = models_v2.DNSNameServer(
address=dns_addr,
subnet_id=id)
context.session.add(dns)
del s["dns_nameservers"]
return new_dns
def _update_subnet_host_routes(self, context, id, s):
def _combine(ht):
return ht['destination'] + "_" + ht['nexthop']
old_route_list = self._get_route_by_subnet(context, id)
new_route_set = set([_combine(route)
for route in s['host_routes']])
old_route_set = set([_combine(route)
for route in old_route_list])
for route_str in old_route_set - new_route_set:
for route in old_route_list:
if _combine(route) == route_str:
context.session.delete(route)
for route_str in new_route_set - old_route_set:
route = models_v2.SubnetRoute(
destination=route_str.partition("_")[0],
nexthop=route_str.partition("_")[2],
subnet_id=id)
context.session.add(route)
# Gather host routes for result
new_routes = []
for route_str in new_route_set:
new_routes.append(
{'destination': route_str.partition("_")[0],
'nexthop': route_str.partition("_")[2]})
del s["host_routes"]
return new_routes
def _update_subnet_allocation_pools(self, context, id, s):
context.session.query(models_v2.IPAllocationPool).filter_by(
subnet_id=id).delete()
new_pools = [models_v2.IPAllocationPool(
first_ip=p['start'], last_ip=p['end'],
subnet_id=id) for p in s['allocation_pools']]
context.session.add_all(new_pools)
NeutronDbPluginV2._rebuild_availability_ranges(context, [s])
#Gather new pools for result:
result_pools = [{'start': pool['start'],
'end': pool['end']}
for pool in s['allocation_pools']]
del s['allocation_pools']
return result_pools
def update_subnet(self, context, id, subnet):
"""Update the subnet with new info.
The change however will not be realized until the client renew the
dns lease or we support gratuitous DHCP offers
"""
s = subnet['subnet']
changed_host_routes = False
changed_dns = False
changed_allocation_pools = False
db_subnet = self._get_subnet(context, id)
# Fill 'ip_version' and 'allocation_pools' fields with the current
# value since _validate_subnet() expects subnet spec has 'ip_version'
# and 'allocation_pools' fields.
s['ip_version'] = db_subnet.ip_version
s['cidr'] = db_subnet.cidr
s['id'] = db_subnet.id
self._validate_subnet(context, s, cur_subnet=db_subnet)
if s.get('gateway_ip') is not None:
allocation_pools = [{'start': p['first_ip'], 'end': p['last_ip']}
for p in db_subnet.allocation_pools]
self._validate_gw_out_of_pools(s["gateway_ip"], allocation_pools)
with context.session.begin(subtransactions=True):
if "dns_nameservers" in s:
changed_dns = True
new_dns = self._update_subnet_dns_nameservers(context, id, s)
if "host_routes" in s:
changed_host_routes = True
new_routes = self._update_subnet_host_routes(context, id, s)
if "allocation_pools" in s:
self._validate_allocation_pools(s['allocation_pools'],
s['cidr'])
changed_allocation_pools = True
new_pools = self._update_subnet_allocation_pools(context,
id, s)
subnet = self._get_subnet(context, id)
subnet.update(s)
result = self._make_subnet_dict(subnet)
# Keep up with fields that changed
if changed_dns:
result['dns_nameservers'] = new_dns
if changed_host_routes:
result['host_routes'] = new_routes
if changed_allocation_pools:
result['allocation_pools'] = new_pools
return result
def _subnet_check_ip_allocations(self, context, subnet_id):
return context.session.query(
models_v2.IPAllocation).filter_by(
subnet_id=subnet_id).join(models_v2.Port).first()
def delete_subnet(self, context, id):
with context.session.begin(subtransactions=True):
subnet = self._get_subnet(context, id)
# Delete all network owned ports
qry_network_ports = (
context.session.query(models_v2.IPAllocation).
filter_by(subnet_id=subnet['id']).
join(models_v2.Port))
# Remove network owned ports, and delete IP allocations
# for IPv6 addresses which were automatically generated
# via SLAAC
is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet)
if not is_auto_addr_subnet:
qry_network_ports = (
qry_network_ports.filter(models_v2.Port.device_owner.
in_(AUTO_DELETE_PORT_OWNERS)))
network_ports = qry_network_ports.all()
if network_ports:
map(context.session.delete, network_ports)
# Check if there are more IP allocations, unless
# is_auto_address_subnet is True. In that case the check is
# unnecessary. This additional check not only would be wasteful
# for this class of subnet, but is also error-prone since when
# the isolation level is set to READ COMMITTED allocations made
# concurrently will be returned by this query
if not is_auto_addr_subnet:
if self._subnet_check_ip_allocations(context, id):
LOG.debug("Found IP allocations on subnet %s, "
"cannot delete", id)
raise n_exc.SubnetInUse(subnet_id=id)
context.session.delete(subnet)
def get_subnet(self, context, id, fields=None):
subnet = self._get_subnet(context, id)
return self._make_subnet_dict(subnet, fields)
def get_subnets(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'subnet', limit, marker)
return self._get_collection(context, models_v2.Subnet,
self._make_subnet_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
def get_subnets_count(self, context, filters=None):
return self._get_collection_count(context, models_v2.Subnet,
filters=filters)
def _check_mac_addr_update(self, context, port, new_mac, device_owner):
if (device_owner and device_owner.startswith('network:')):
raise n_exc.UnsupportedPortDeviceOwner(
op=_("mac address update"), port_id=id,
device_owner=device_owner)
def create_port_bulk(self, context, ports):
return self._create_bulk('port', context, ports)
def _create_port_with_mac(self, context, network_id, port_data,
mac_address, nested=False):
try:
with context.session.begin(subtransactions=True, nested=nested):
db_port = models_v2.Port(mac_address=mac_address, **port_data)
context.session.add(db_port)
return db_port
except db_exc.DBDuplicateEntry:
raise n_exc.MacAddressInUse(net_id=network_id, mac=mac_address)
def _create_port(self, context, network_id, port_data):
max_retries = cfg.CONF.mac_generation_retries
for i in range(max_retries):
mac = self._generate_mac()
try:
# nested = True frames an operation that may potentially fail
# within a transaction, so that it can be rolled back to the
# point before its failure while maintaining the enclosing
# transaction
return self._create_port_with_mac(
context, network_id, port_data, mac, nested=True)
except n_exc.MacAddressInUse:
LOG.debug('Generated mac %(mac_address)s exists on '
'network %(network_id)s',
{'mac_address': mac, 'network_id': network_id})
LOG.error(_LE("Unable to generate mac address after %s attempts"),
max_retries)
raise n_exc.MacAddressGenerationFailure(net_id=network_id)
def create_port(self, context, port):
p = port['port']
port_id = p.get('id') or uuidutils.generate_uuid()
network_id = p['network_id']
# NOTE(jkoelker) Get the tenant_id outside of the session to avoid
# unneeded db action if the operation raises
tenant_id = self._get_tenant_id_for_create(context, p)
if p.get('device_owner'):
self._enforce_device_owner_not_router_intf_or_device_id(
context, p.get('device_owner'), p.get('device_id'), tenant_id)
port_data = dict(tenant_id=tenant_id,
name=p['name'],
id=port_id,
network_id=network_id,
admin_state_up=p['admin_state_up'],
status=p.get('status', constants.PORT_STATUS_ACTIVE),
device_id=p['device_id'],
device_owner=p['device_owner'])
with context.session.begin(subtransactions=True):
# Ensure that the network exists.
self._get_network(context, network_id)
# Create the port
if p['mac_address'] is attributes.ATTR_NOT_SPECIFIED:
db_port = self._create_port(context, network_id, port_data)
p['mac_address'] = db_port['mac_address']
else:
db_port = self._create_port_with_mac(
context, network_id, port_data, p['mac_address'])
# Update the IP's for the port
ips = self._allocate_ips_for_port(context, port)
if ips:
for ip in ips:
ip_address = ip['ip_address']
subnet_id = ip['subnet_id']
NeutronDbPluginV2._store_ip_allocation(
context, ip_address, network_id, subnet_id, port_id)
return self._make_port_dict(db_port, process_extensions=False)
def update_port(self, context, id, port):
p = port['port']
changed_ips = False
with context.session.begin(subtransactions=True):
port = self._get_port(context, id)
changed_owner = 'device_owner' in p
current_owner = p.get('device_owner') or port['device_owner']
changed_device_id = p.get('device_id') != port['device_id']
current_device_id = p.get('device_id') or port['device_id']
if current_owner and changed_device_id or changed_owner:
self._enforce_device_owner_not_router_intf_or_device_id(
context, current_owner, current_device_id,
port['tenant_id'])
new_mac = p.get('mac_address')
if new_mac and new_mac != port['mac_address']:
self._check_mac_addr_update(
context, port, new_mac, current_owner)
# Check if the IPs need to be updated
network_id = port['network_id']
if 'fixed_ips' in p:
changed_ips = True
original = self._make_port_dict(port, process_extensions=False)
added_ips, prev_ips = self._update_ips_for_port(
context, network_id, id,
original["fixed_ips"], p['fixed_ips'],
original['mac_address'], port['device_owner'])
# Update ips if necessary
for ip in added_ips:
NeutronDbPluginV2._store_ip_allocation(
context, ip['ip_address'], network_id,
ip['subnet_id'], port.id)
# Remove all attributes in p which are not in the port DB model
# and then update the port
try:
port.update(self._filter_non_model_columns(p, models_v2.Port))
context.session.flush()
except db_exc.DBDuplicateEntry:
raise n_exc.MacAddressInUse(net_id=network_id, mac=new_mac)
result = self._make_port_dict(port)
# Keep up with fields that changed
if changed_ips:
result['fixed_ips'] = prev_ips + added_ips
return result
def delete_port(self, context, id):
with context.session.begin(subtransactions=True):
self._delete_port(context, id)
def delete_ports_by_device_id(self, context, device_id, network_id=None):
query = (context.session.query(models_v2.Port.id)
.enable_eagerloads(False)
.filter(models_v2.Port.device_id == device_id))
if network_id:
query = query.filter(models_v2.Port.network_id == network_id)
port_ids = [p[0] for p in query]
for port_id in port_ids:
try:
self.delete_port(context, port_id)
except n_exc.PortNotFound:
# Don't raise if something else concurrently deleted the port
LOG.debug("Ignoring PortNotFound when deleting port '%s'. "
"The port has already been deleted.",
port_id)
def _delete_port(self, context, id):
query = (context.session.query(models_v2.Port).
enable_eagerloads(False).filter_by(id=id))
if not context.is_admin:
query = query.filter_by(tenant_id=context.tenant_id)
query.delete()
def get_port(self, context, id, fields=None):
port = self._get_port(context, id)
return self._make_port_dict(port, fields)
def _get_ports_query(self, context, filters=None, sorts=None, limit=None,
marker_obj=None, page_reverse=False):
Port = models_v2.Port
IPAllocation = models_v2.IPAllocation
if not filters:
filters = {}
query = self._model_query(context, Port)
fixed_ips = filters.pop('fixed_ips', {})
ip_addresses = fixed_ips.get('ip_address')
subnet_ids = fixed_ips.get('subnet_id')
if ip_addresses or subnet_ids:
query = query.join(Port.fixed_ips)
if ip_addresses:
query = query.filter(IPAllocation.ip_address.in_(ip_addresses))
if subnet_ids:
query = query.filter(IPAllocation.subnet_id.in_(subnet_ids))
query = self._apply_filters_to_query(query, Port, filters)
if limit and page_reverse and sorts:
sorts = [(s[0], not s[1]) for s in sorts]
query = sqlalchemyutils.paginate_query(query, Port, limit,
sorts, marker_obj)
return query
def get_ports(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'port', limit, marker)
query = self._get_ports_query(context, filters=filters,
sorts=sorts, limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
items = [self._make_port_dict(c, fields) for c in query]
if limit and page_reverse:
items.reverse()
return items
def get_ports_count(self, context, filters=None):
return self._get_ports_query(context, filters).count()
def _enforce_device_owner_not_router_intf_or_device_id(self, context,
device_owner,
device_id,
tenant_id):
"""Prevent tenants from replacing the device id of router ports with
a router uuid belonging to another tenant.
"""
if device_owner not in constants.ROUTER_INTERFACE_OWNERS:
return
if not context.is_admin:
# check to make sure device_id does not match another tenants
# router.
if device_id:
if hasattr(self, 'get_router'):
try:
ctx_admin = ctx.get_admin_context()
router = self.get_router(ctx_admin, device_id)
except l3.RouterNotFound:
return
else:
l3plugin = (
manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT))
if l3plugin:
try:
ctx_admin = ctx.get_admin_context()
router = l3plugin.get_router(ctx_admin,
device_id)
except l3.RouterNotFound:
return
else:
# raise as extension doesn't support L3 anyways.
raise n_exc.DeviceIDNotOwnedByTenant(
device_id=device_id)
if tenant_id != router['tenant_id']:
raise n_exc.DeviceIDNotOwnedByTenant(device_id=device_id)
| |
# -*- coding: utf-8 -*-
# Django settings for the mozillians project.
import logging
import os.path
import sys
from funfactory.manage import path
from funfactory.settings_base import *
from urlparse import urljoin
from mozillians.users.helpers import calculate_username
from django.utils.functional import lazy
## Log settings
SYSLOG_TAG = "http_app_mozillians"
LOGGING = {
'loggers': {
'landing': {'level': logging.INFO},
'phonebook': {'level': logging.INFO},
},
}
## Database settings
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': '',
'PORT': '',
'OPTIONS': {
'init_command': 'SET storage_engine=InnoDB',
'charset': 'utf8',
'use_unicode': True,
},
'TEST_CHARSET': 'utf8',
'TEST_COLLATION': 'utf8_general_ci',
},
}
## L10n
LOCALE_PATHS = [path('locale')]
# Accepted locales
PROD_LANGUAGES = ('ca', 'cs', 'de', 'en-US', 'es', 'hu', 'fr', 'it', 'ko',
'nl', 'pl', 'pt-BR', 'ru', 'sk', 'sl', 'sq', 'zh-TW',
'zh-CN', 'lt', 'ja')
# List of RTL locales known to this project. Subset of LANGUAGES.
RTL_LANGUAGES = () # ('ar', 'fa', 'fa-IR', 'he')
# For absoluate urls
PROTOCOL = "https://"
PORT = 443
## Media and templates.
STATIC_ROOT = path('media/static')
STATIC_URL = MEDIA_URL + 'static/'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'jingo.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = get_template_context_processors(
append=['django_browserid.context_processors.browserid',
'mozillians.common.context_processors.current_year'])
JINGO_EXCLUDE_APPS = [
'bootstrapform',
'admin',
'autocomplete_light',
'browserid'
]
MINIFY_BUNDLES = {
'css': {
'common': (
'css/main.less',
'css/jquery-ui-1.8.16.custom.css',
'js/libs/tag-it/css/jquery.tagit.css',
),
'common-old': (
'css/bootstrap.min.css',
),
'api': (
'css/prettify.css',
),
'test': (
'css/qunit.css',
),
},
'js': {
'common': (
'js/libs/jquery-1.8.3.min.js',
'js/libs/jquery-ui-1.8.7.custom.min.js',
'js/main.js',
'js/libs/validation/validation.js',
),
'common-old': (
'js/libs/bootstrap/bootstrap-transition.js',
'js/libs/bootstrap/bootstrap-alert.js',
'js/libs/bootstrap/bootstrap-modal.js',
'js/libs/bootstrap/bootstrap-dropdown.js',
'js/libs/bootstrap/bootstrap-tooltip.js',
'js/libs/bootstrap/bootstrap-popover.js',
'js/libs/bootstrap/bootstrap-button.js',
'js/libs/bootstrap/bootstrap-collapse.js',
'js/libs/bootstrap/bootstrap-carousel.js',
'js/libs/bootstrap/bootstrap-typeahead.js',
'js/libs/bootstrap/bootstrap-tab.js',
'js/groups.js',
),
'homepage': (
'js/libs/modernizr.custom.26887.js',
'js/libs/jquery.transit.min.js',
'js/libs/jquery.gridrotator.js',
'js/libs/jquery.smooth-scroll.min.js',
'js/homepage.js'
),
'api': (
'js/libs/prettify.js',
'js/api.js',
),
'edit_profile': (
'js/libs/tag-it/js/tag-it.js',
'js/profile_edit.js'
),
'register': (
'js/libs/tag-it/js/tag-it.js',
'js/register.js',
),
'search': (
'js/libs/jquery.endless-scroll.js',
'js/infinite.js',
'js/expand.js',
),
'backbone': (
'js/libs/underscore.js',
'js/libs/backbone.js',
'js/libs/backbone.localStorage.js',
'js/profiles.js',
),
'test': (
'js/libs/qunit.js',
'js/tests/test.js',
),
'profile_view': (
'js/libs/tag-it/js/tag-it.js',
'js/profile_view.js',
),
'google_analytics': (
'js/google-analytics.js',
),
}
}
LESS_PREPROCESS = False
LESS_BIN = 'lessc'
MIDDLEWARE_CLASSES = get_middleware(append=[
'commonware.response.middleware.StrictTransportMiddleware',
'django_statsd.middleware.GraphiteMiddleware',
'django_statsd.middleware.GraphiteRequestTimingMiddleware',
'django_statsd.middleware.TastyPieRequestTimingMiddleware',
'mozillians.common.middleware.StrongholdMiddleware',
'mozillians.phonebook.middleware.RegisterMiddleware',
'mozillians.phonebook.middleware.UsernameRedirectionMiddleware',
'mozillians.groups.middleware.OldGroupRedirectionMiddleware'])
# StrictTransport
STS_SUBDOMAINS = True
# Not all URLs need locale.
SUPPORTED_NONLOCALES = list(SUPPORTED_NONLOCALES) + [
'csp',
'api',
'browserid',
'admin',
'autocomplete',
]
AUTHENTICATION_BACKENDS = ('django_browserid.auth.BrowserIDBackend',)
# BrowserID creates a user if one doesn't exist.
BROWSERID_CREATE_USER = True
BROWSERID_USERNAME_ALGO = calculate_username
# On Login, we redirect through register.
LOGIN_REDIRECT_URL = '/register'
INSTALLED_APPS = get_apps(append=[
# These need to go in order of migration.
'jingo_minify',
'mozillians.users',
'mozillians.phonebook',
'mozillians.groups',
'mozillians.common',
'mozillians.api',
'mozillians.mozspaces',
'mozillians.funfacts',
'mozillians.announcements',
'sorl.thumbnail',
'autocomplete_light',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.staticfiles',
'django_browserid',
'bootstrapform',
# DB migrations
'south',
])
## Auth
PWD_ALGORITHM = 'bcrypt'
HMAC_KEYS = {
'2011-01-01': 'cheesecake',
}
SESSION_COOKIE_HTTPONLY = True
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
SESSION_COOKIE_NAME='mozillians_sessionid'
ANON_ALWAYS = True
# Email
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
FROM_NOREPLY = u'Mozillians <no-reply@mozillians.org>'
# Auth
LOGIN_URL = '/'
LOGIN_REDIRECT_URL = '/'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
AUTH_PROFILE_MODULE = 'users.UserProfile'
MAX_PHOTO_UPLOAD_SIZE = 8 * (1024 ** 2)
AUTO_VOUCH_DOMAINS = ('mozilla.com', 'mozilla.org', 'mozillafoundation.org')
SOUTH_TESTS_MIGRATE = False
# Django-CSP
CSP_DEFAULT_SRC = ("'self'",)
CSP_FONT_SRC = ("'self'",
'https://*.mozilla.org',
'https://*.mozilla.net')
CSP_FRAME_SRC = ("'self'",
'https://login.persona.org',)
CSP_IMG_SRC = ("'self'",
'data:',
'https://*.mozilla.net',
'https://*.google-analytics.com',
'https://*.gravatar.com',
'https://i1.wp.com')
CSP_SCRIPT_SRC = ("'self'",
'https://*.mozilla.org',
'https://*.mozilla.net',
'https://*.google-analytics.com',
'https://login.persona.org',)
CSP_STYLE_SRC = ("'self'",
"'unsafe-inline'",
'https://*.mozilla.org',
'https://*.mozilla.net',)
# Elasticutils settings
ES_DISABLED = True
ES_HOSTS = ['127.0.0.1:9200']
ES_INDEXES = {'default': 'mozillians',
'public': 'mozillians-public'}
ES_INDEXING_TIMEOUT = 10
# Sorl settings
THUMBNAIL_DUMMY = True
THUMBNAIL_PREFIX = 'uploads/sorl-cache/'
# Statsd Graphite
STATSD_CLIENT = 'django_statsd.clients.normal'
# Basket
# If we're running tests, don't hit the real basket server.
if 'test' in sys.argv:
BASKET_URL = 'http://127.0.0.1'
else:
BASKET_URL = 'http://basket.mozilla.com'
BASKET_NEWSLETTER = 'mozilla-phone'
USER_AVATAR_DIR = 'uploads/userprofile'
MOZSPACE_PHOTO_DIR = 'uploads/mozspaces'
ANNOUNCEMENTS_PHOTO_DIR = 'uploads/announcements'
# Google Analytics
GA_ACCOUNT_CODE = 'UA-35433268-19'
# Set ALLOWED_HOSTS based on SITE_URL.
def _allowed_hosts():
from django.conf import settings
from urlparse import urlparse
host = urlparse(settings.SITE_URL).netloc # Remove protocol and path
host = host.rsplit(':', 1)[0] # Remove port
return [host]
ALLOWED_HOSTS = lazy(_allowed_hosts, list)()
STRONGHOLD_EXCEPTIONS = ['^%s' % MEDIA_URL,
'^/csp/',
'^/admin/',
'^/browserid/',
'^/api/']
# Set default avatar for user profiles
DEFAULT_AVATAR= 'img/unknown.png'
DEFAULT_AVATAR_URL = urljoin(MEDIA_URL, DEFAULT_AVATAR)
DEFAULT_AVATAR_PATH = os.path.join(MEDIA_ROOT, DEFAULT_AVATAR)
CELERYBEAT_SCHEDULER = "djcelery.schedulers.DatabaseScheduler"
SECRET_KEY = ''
JINGO_MINIFY_USE_STATIC = False
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import functools
import os
import shutil
import tempfile
import time
import weakref
from eventlet import semaphore
from cloudbaseinit.openstack.common import cfg
from cloudbaseinit.openstack.common.gettextutils import _
from cloudbaseinit.openstack.common import fileutils
from cloudbaseinit.openstack.common import log as logging
LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
cfg.StrOpt('lock_path',
default=os.path.abspath(os.path.join(os.path.dirname(__file__),
'../')),
help='Directory to use for lock files')
]
CONF = cfg.CONF
CONF.register_opts(util_opts)
class _InterProcessLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are released when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def __enter__(self):
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
return self
except IOError, e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.unlock()
self.lockfile.close()
except IOError:
LOG.exception(_("Could not release the acquired lock `%s`"),
self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_InterProcessLock):
def trylock(self):
msvcrt.locking(self.lockfile, msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile, msvcrt.LK_UNLCK, 1)
class _PosixLock(_InterProcessLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
else:
import fcntl
InterProcessLock = _PosixLock
_semaphores = weakref.WeakValueDictionary()
def synchronized(name, lock_file_prefix, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the bar method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix. The prefix should end with a hyphen ('-') if specified.
The external keyword argument denotes whether this lock should work across
multiple processes. This means that if two different workers both run a
a method decorated with @synchronized('mylock', external=True), only one
of them will execute at a time.
The lock_path keyword argument is used to specify a special location for
external lock files to live. If nothing is set, then CONF.lock_path is
used as a default.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
# NOTE(soren): If we ever go natively threaded, this will be racy.
# See http://stackoverflow.com/questions/5390569/dyn
# amically-allocating-and-destroying-mutexes
sem = _semaphores.get(name, semaphore.Semaphore())
if name not in _semaphores:
# this check is not racy - we're already holding ref locally
# so GC won't remove the item and there was no IO switch
# (only valid in greenthreads)
_semaphores[name] = sem
with sem:
LOG.debug(_('Got semaphore "%(lock)s" for method '
'"%(method)s"...'), {'lock': name,
'method': f.__name__})
if external and not CONF.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s" for '
'method "%(method)s"...'),
{'lock': name, 'method': f.__name__})
cleanup_dir = False
# We need a copy of lock_path because it is non-local
local_lock_path = lock_path
if not local_lock_path:
local_lock_path = CONF.lock_path
if not local_lock_path:
cleanup_dir = True
local_lock_path = tempfile.mkdtemp()
if not os.path.exists(local_lock_path):
cleanup_dir = True
fileutils.ensure_tree(local_lock_path)
# NOTE(mikal): the lock name cannot contain directory
# separators
safe_name = name.replace(os.sep, '_')
lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
lock_file_path = os.path.join(local_lock_path,
lock_file_name)
try:
lock = InterProcessLock(lock_file_path)
with lock:
LOG.debug(_('Got file lock "%(lock)s" at %(path)s '
'for method "%(method)s"...'),
{'lock': name,
'path': lock_file_path,
'method': f.__name__})
retval = f(*args, **kwargs)
finally:
# NOTE(vish): This removes the tempdir if we needed
# to create one. This is used to cleanup
# the locks left behind by unit tests.
if cleanup_dir:
shutil.rmtree(local_lock_path)
else:
retval = f(*args, **kwargs)
return retval
return inner
return wrap
| |
#!/usr/bin/env python
# coding: utf-8
#
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
import sys
import unittest2 as unittest
import tempfile, os, shutil
from gppylib.commands.base import CommandResult, Command, ExecutionError
from gppylib.operations.backup_utils import *
from gppylib.operations.restore import *
from gppylib.operations.restore import _build_gpdbrestore_cmd_line
from gppylib.mainUtils import ExceptionNoStackTraceNeeded
from mock import patch, MagicMock, Mock, mock_open, call, ANY
class RestoreTestCase(unittest.TestCase):
def setUp(self):
context = Context()
context.restore_db='testdb'
context.include_dump_tables_file='/tmp/table_list.txt'
context.master_datadir='/data/master/p1'
context.batch_default=None
context.timestamp = '20160101010101'
context.no_analyze = True
context.drop_db = True
context.master_port = 5432
self.context = context
self.restore = RestoreDatabase(self.context)
self.validate_timestamp = ValidateTimestamp(self.context)
def test_GetDbName_default(self):
""" Basic test """
with tempfile.NamedTemporaryFile() as f:
f.write("""
--
-- Database creation
--
CREATE DATABASE monkey WITH TEMPLATE = template0 ENCODING = 'UTF8' OWNER = thisguy;
""")
f.flush()
self.assertTrue(GetDbName(f.name).run() == "monkey")
def test_GetDbName_line_check(self):
""" Verify that GetDbName looks no further than 50 lines. """
with tempfile.NamedTemporaryFile() as f:
for i in range(0, 50):
f.write("crap\n")
f.write("CREATE DATABASE monkey")
f.flush()
try:
GetDbName(f.name).run()
except GetDbName.DbNameGiveUp, e:
return
self.fail("DbNameGiveUp should have been raised.")
def test_GetDbName_no_name(self):
""" Verify that GetDbName fails when cdatabase file ends prematurely. """
with tempfile.NamedTemporaryFile() as f:
f.write("this is the whole file")
f.flush()
try:
GetDbName(f.name).run()
except GetDbName.DbNameNotFound, e:
return
self.fail("DbNameNotFound should have been raised.")
@patch('gppylib.operations.restore.RestoreDatabase._process_createdb', side_effect=ExceptionNoStackTraceNeeded('Failed to create database'))
@patch('time.sleep')
def test_multitry_createdb_create_fails(self, mock1, mock2):
self.assertRaises(ExceptionNoStackTraceNeeded, self.restore._multitry_createdb)
@patch('gppylib.operations.restore.RestoreDatabase._process_createdb')
def test_multitry_createdb_default(self, mock):
self.restore._multitry_createdb()
@patch('gppylib.operations.restore.get_partition_list', return_value=[('public', 't1'), ('public', 't2'), ('public', 't3')])
@patch('gppylib.operations.restore.get_incremental_restore_timestamps', return_value=['20160101010101', '20160101010111'])
@patch('gppylib.operations.restore.get_dirty_table_file_contents', return_value=['public.t1', 'public.t2'])
def test_create_restore_plan_default(self, mock1, mock2, mock3):
expected = ["20160101010111:", "20160101010101:public.t1,public.t2", "20160101000000:public.t3"]
self.context.full_dump_timestamp = '20160101000000'
m = mock_open()
with patch('__builtin__.open', m, create=True):
plan_file = create_restore_plan(self.context)
result = m()
self.assertEqual(len(expected), len(result.write.call_args_list))
for i in range(len(expected)):
self.assertEqual(call(expected[i]+'\n'), result.write.call_args_list[i])
@patch('gppylib.operations.restore.get_incremental_restore_timestamps', return_value=['20160101010101', '20160101010111'])
@patch('gppylib.operations.restore.get_dirty_table_file_contents', return_value=['public.t1', 'public.t2'])
def test_create_restore_plan_empty_list(self, mock1, mock2):
expected = ["20160101010111:", "20160101010101:", "20160101000000:"]
self.context.full_dump_timestamp = '20160101000000'
m = mock_open()
with patch('__builtin__.open', m, create=True):
plan_file = create_restore_plan(self.context)
result = m()
self.assertEqual(len(expected), len(result.write.call_args_list))
for i in range(len(expected)):
self.assertEqual(call(expected[i]+'\n'), result.write.call_args_list[i])
@patch('gppylib.operations.restore.get_partition_list', return_value=[])
@patch('gppylib.operations.restore.get_full_timestamp_for_incremental', return_value='20120101000000')
@patch('gppylib.operations.restore.get_incremental_restore_timestamps', return_value=['20160101010101', '20160101010111'])
@patch('gppylib.operations.restore.get_dirty_table_file_contents', return_value=['public.t1', 'public.t2'])
@patch('gppylib.operations.restore.create_plan_file_contents')
def test_create_restore_plan_empty_list_with_nbu(self, mock1, mock2, mock3, mock4, mock5):
self.context.netbackup_service_host = 'mdw'
self.context.netbackup_block_size = '1024'
m = mock_open()
with patch('__builtin__.open', m, create=True):
plan_file = create_restore_plan(self.context)
result = m()
self.assertEqual(len(result.write.call_args_list), 0)
@patch('gppylib.operations.restore.get_lines_from_file', return_value=['20160101010110', '20160101010109', '20160101010108', '20160101010107', '20160101010106', '20160101010105', '20160101010104', '20160101010103', '20160101010102', '20160101010101'])
def test_get_incremental_restore_timestamps_midway(self, mock):
self.context.full_dump_timestamp = '20160101010101'
self.context.timestamp = '20160101010105'
increments = get_incremental_restore_timestamps(self.context)
self.assertEqual(increments, ['20160101010105', '20160101010104', '20160101010103', '20160101010102', '20160101010101'])
@patch('gppylib.operations.restore.get_lines_from_file', return_value=['20160101010110', '20160101010109', '20160101010108', '20160101010107', '20160101010106', '20160101010105', '20160101010104', '20160101010103', '20160101010102', '20160101010101'])
def test_get_incremental_restore_timestamps_latest(self, mock):
self.context.full_dump_timestamp = '20160101010101'
self.context.timestamp = '20160101010110'
increments = get_incremental_restore_timestamps(self.context)
self.assertEqual(increments, ['20160101010110', '20160101010109', '20160101010108', '20160101010107', '20160101010106', '20160101010105', '20160101010104', '20160101010103', '20160101010102', '20160101010101'])
@patch('gppylib.operations.restore.get_lines_from_file', return_value=[])
def test_get_incremental_restore_timestamps_earliest(self, mock):
self.context.full_dump_timestamp = '20160101010101'
self.context.timestamp = '20160101010100'
increments = get_incremental_restore_timestamps(self.context)
self.assertEqual(increments, [])
@patch('gppylib.operations.restore.get_lines_from_file', side_effect=[['public.t1'], ['public.t1', 'public.t2', 'public.t3'], ['public.t2', 'public.t4']])
def test_create_plan_file_contents_with_file(self, mock):
table_set_from_metadata_file = ['public.t1', 'public.t2', 'public.t3', 'public.t4']
incremental_restore_timestamps = ['20160101010113', '20160101010101', '20160101010111']
latest_full_timestamp = '20160101010110'
expected_output = {'20160101010113': ['public.t1'], '20160101010101': ['public.t2', 'public.t3'], '20160101010111': ['public.t4'], '20160101010110': []}
file_contents = create_plan_file_contents(self.context, table_set_from_metadata_file, incremental_restore_timestamps, latest_full_timestamp)
self.assertEqual(file_contents, expected_output)
def test_create_plan_file_contents_no_file(self):
table_set_from_metadata_file = ['public.t1', 'public.t2', 'public.t3', 'public.t4']
incremental_restore_timestamps = []
latest_full_timestamp = '20160101010110'
expected_output = {'20160101010110': ['public.t1', 'public.t2', 'public.t3', 'public.t4']}
file_contents = create_plan_file_contents(self.context, table_set_from_metadata_file, incremental_restore_timestamps, latest_full_timestamp)
self.assertEqual(file_contents, expected_output)
@patch('gppylib.operations.restore.get_lines_from_file', side_effect=[['public.t1'], ['public.t1', 'public.t2', 'public.t3'], ['public.t2', 'public.t4']])
def test_create_plan_file_contents_no_metadata(self, mock):
table_set_from_metadata_file = []
incremental_restore_timestamps = ['20160101010113', '20160101010101', '20160101010111']
latest_full_timestamp = '20160101010110'
expected_output = {'20160101010101': [], '20160101010113': [], '20160101010111': [], '20160101010110': []}
file_contents = create_plan_file_contents(self.context, table_set_from_metadata_file, incremental_restore_timestamps, latest_full_timestamp)
self.assertEqual(file_contents, expected_output)
@patch('gppylib.operations.restore.get_lines_from_file', side_effect=[['public.t1'], ['public.t1', 'public.t2', 'public.t3'], ['public.t2', 'public.t4']])
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_create_plan_file_contents_with_nbu(self, mock1, mock2):
self.context.netbackup_service_host = 'mdw'
self.context.netbackup_block_size = '1024'
table_set_from_metadata_file = []
incremental_restore_timestamps = ['20160101010113', '20160101010101', '20160101010111']
latest_full_timestamp = '20160101010110'
expected_output = {'20160101010101': [], '20160101010113': [], '20160101010111': [], '20160101010110': []}
file_contents = create_plan_file_contents(self.context, table_set_from_metadata_file, incremental_restore_timestamps, latest_full_timestamp)
self.assertEqual(file_contents, expected_output)
@patch('gppylib.operations.restore.write_lines_to_file')
def test_write_to_plan_file_default(self, mock1):
plan_file = 'blah'
plan_file_contents = {'20160101010113': ['public.t1'],
'20160101010101': ['public.t2', 'public.t3'],
'20160101010111': ['public.t4']}
expected_output = ['20160101010113:public.t1',
'20160101010111:public.t4',
'20160101010101:public.t2,public.t3']
file_contents = write_to_plan_file(plan_file_contents, plan_file)
self.assertEqual(expected_output, file_contents)
@patch('gppylib.operations.restore.write_lines_to_file')
def test_write_to_plan_file_empty_list(self, mock1):
plan_file = 'blah'
plan_file_contents = {}
expected_output = []
file_contents = write_to_plan_file(plan_file_contents, plan_file)
self.assertEqual(expected_output, file_contents)
@patch('gppylib.operations.restore.write_lines_to_file')
def test_write_to_plan_file_no_plan_file(self, mock1):
plan_file = None
plan_file_contents = {}
with self.assertRaisesRegexp(Exception, 'Invalid plan file .*'):
write_to_plan_file(plan_file_contents, plan_file)
@patch('gppylib.operations.restore.get_lines_from_file', return_value=['public.t1', 'public.t2'])
def test_get_partition_list_default(self, mock):
partition_list = get_partition_list(self.context)
self.assertEqual(partition_list, [('public', 't1'), ('public', 't2')])
@patch('gppylib.operations.restore.get_lines_from_file', return_value=[])
def test_get_partition_list_no_partitions(self, mock):
partition_list = get_partition_list(self.context)
self.assertEqual(partition_list, [])
@patch('gppylib.operations.restore.get_lines_from_file', return_value=['Backup Type: Incremental'])
@patch('os.path.isfile', return_value=True)
def test_is_incremental_restore_default(self, mock1, mock2):
self.assertTrue(is_incremental_restore(self.context))
@patch('gppylib.operations.restore.get_lines_from_file')
@patch('gppylib.operations.restore.check_backup_type', return_value=True)
@patch('os.path.isfile', return_value=True)
def test_is_incremental_restore_bypass_file_incremental(self, mock1, mock2, mock3):
self.assertTrue(is_incremental_restore(self.context))
@patch('os.path.isfile', return_value=True)
@patch('gppylib.operations.restore.get_lines_from_file', return_value=['Backup Type: Full'])
def test_is_incremental_restore_full_backup(self, mock1, mock2):
self.assertFalse(is_incremental_restore(self.context))
@patch('os.path.isfile', return_value=True)
@patch('gppylib.operations.restore.get_lines_from_file')
@patch('gppylib.operations.restore.check_backup_type', return_value=False)
def test_is_incremental_restore_bypass_file_full(self, mock1, mock2, mock3):
self.assertFalse(is_incremental_restore(self.context))
@patch('os.path.isfile', return_value=False)
def test_is_incremental_restore_no_file(self, mock1):
self.assertFalse(is_incremental_restore(self.context))
@patch('os.path.isfile', return_value=True)
@patch('gppylib.operations.restore.get_lines_from_file', return_value=['Backup Type: Full'])
@patch('os.path.isfile', return_value=True)
def test_is_full_restore_default(self, mock1, mock2, mock3):
self.assertTrue(is_full_restore(self.context))
@patch('gppylib.operations.restore.get_lines_from_file')
@patch('gppylib.operations.restore.check_backup_type', return_value=True)
@patch('os.path.isfile', return_value=True)
def test_is_full_restore_bypass_file_full(self, mock1, mock2, mock3):
self.assertTrue(is_full_restore(self.context))
@patch('os.path.isfile', return_value=True)
@patch('gppylib.operations.restore.get_lines_from_file', return_value=['Backup Type: Incremental'])
def test_is_full_restore_incremental(self, mock1, mock2):
self.assertFalse(is_full_restore(self.context))
@patch('os.path.isfile', return_value=True)
@patch('gppylib.operations.restore.get_lines_from_file')
@patch('gppylib.operations.restore.check_backup_type', return_value=False)
def test_is_full_restore_bypass_file_incremental(self, mock1, mock2, mock3):
self.assertFalse(is_full_restore(self.context))
@patch('os.path.isfile', return_value=False)
def test_is_full_restore_no_file(self, mock1):
filename = self.context.generate_filename("report")
with self.assertRaisesRegexp(Exception, 'Report file %s does not exist' % filename):
is_full_restore(self.context)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_create_schema_only_restore_string_default(self, mock1, mock2):
self.context.backup_dir = None
table_filter_file = None
full_restore_with_filter = False
metadata_file = self.context.generate_filename("metadata")
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p -s %s --gp-d=db_dumps/20160101 --gp-c -d "testdb"' % metadata_file
restore_line = self.restore.create_schema_only_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_create_schema_only_restore_string_no_compression(self, mock1, mock2):
self.context.backup_dir = None
self.context.compress = False
table_filter_file = None
full_restore_with_filter = False
metadata_file = self.context.generate_filename("metadata")
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p -s %s --gp-d=db_dumps/20160101 -d "testdb"' % metadata_file
restore_line = self.restore.create_schema_only_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
@patch('gppylib.operations.backup_utils.Context.backup_dir_is_writable', return_value=True)
def test_create_schema_only_restore_string_backup_dir(self, mock1, mock2, mock3):
table_filter_file = None
full_restore_with_filter = False
self.context.report_status_dir = "/data/master/p1/db_dumps/20160101"
metadata_file = self.context.generate_filename("metadata")
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p -s %s --gp-r=/data/master/p1/db_dumps/20160101 --status=/data/master/p1/db_dumps/20160101 --gp-d=db_dumps/20160101 --gp-c -d "testdb"' % metadata_file
restore_line = self.restore.create_schema_only_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
@patch('gppylib.operations.backup_utils.Context.backup_dir_is_writable', return_value=False)
def test_create_schema_only_restore_string_prefix(self, mock1, mock2, mock3):
self.context.dump_prefix = 'bar_'
table_filter_file = 'filter_file1'
metadata_file = self.context.generate_filename("metadata")
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p -s %s --gp-d=db_dumps/20160101 --prefix=bar_ --gp-f=%s --gp-c -d "testdb"' % (metadata_file, table_filter_file)
restore_line = self.restore.create_schema_only_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
@patch('gppylib.operations.backup_utils.Context.backup_dir_is_writable', return_value=False)
def test_create_schema_only_restore_string_no_filter_file(self, mock1, mock2, mock3):
table_filter_file = None
metadata_file = self.context.generate_filename("metadata")
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p -s %s --gp-d=db_dumps/20160101 --gp-c -d "testdb"' % metadata_file
restore_line = self.restore.create_schema_only_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_create_schema_only_restore_string_different_status_dir(self, mock1, mock2):
self.context.report_status_dir = '/tmp'
table_filter_file = None
full_restore_with_filter = False
metadata_file = self.context.generate_filename("metadata")
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p -s %s --gp-r=/tmp --status=/tmp --gp-d=db_dumps/20160101 --gp-c -d "testdb"' % metadata_file
restore_line = self.restore.create_schema_only_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_create_schema_only_restore_string_status_dir_with_filter(self, mock1, mock2):
self.context.report_status_dir = '/tmp'
table_filter_file = None
full_restore_with_filter = True
metadata_file = self.context.generate_filename("metadata")
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p -s %s -P --gp-r=/tmp --status=/tmp --gp-d=db_dumps/20160101 --gp-c -d "testdb"' % metadata_file
restore_line = self.restore.create_schema_only_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_create_schema_only_restore_string_with_nbu(self, mock1, mock2):
table_filter_file = None
full_restore_with_filter = False
self.context.netbackup_service_host = "mdw"
metadata_file = self.context.generate_filename("metadata")
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p -s %s --gp-d=db_dumps/20160101 --gp-c -d "testdb" --netbackup-service-host=mdw' % metadata_file
restore_line = self.restore.create_schema_only_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_create_schema_only_restore_string_with_ddboost(self, mock1, mock2):
self.context.report_status_dir = '/tmp'
table_filter_file = None
full_restore_with_filter = True
self.context.ddboost = True
self.context.dump_dir = '/backup/DCA-35'
metadata_file = self.context.generate_filename("metadata")
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p -s %s -P --gp-r=/tmp --status=/tmp --gp-d=/backup/DCA-35/20160101 --gp-c -d "testdb" --ddboost' % metadata_file
restore_line = self.restore.create_schema_only_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_create_post_data_schema_only_restore_string_default(self, mock1, mock2):
table_filter_file = None
full_restore_with_filter = True
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=db_dumps/20160101 --gp-i --gp-k=20160101010101 --gp-l=p -P --gp-c -d "testdb"'
restore_line = self.restore.create_post_data_schema_only_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
@patch('gppylib.operations.backup_utils.Context.backup_dir_is_writable', return_value=True)
def test_create_post_data_schema_only_restore_string_no_filter(self, mock1, mock2, mock3):
table_filter_file = None
full_restore_with_filter = False
self.context.report_status_dir="/data/master/p1/db_dumps/20160101"
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=db_dumps/20160101 --gp-i --gp-k=20160101010101 --gp-l=p --gp-r=/data/master/p1/db_dumps/20160101 --status=/data/master/p1/db_dumps/20160101 --gp-c -d "testdb"'
restore_line = self.restore.create_post_data_schema_only_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
@patch('gppylib.operations.backup_utils.Context.backup_dir_is_writable', return_value=False)
def test_create_post_data_schema_only_restore_string_with_prefix(self, mock1, mock2, mock3):
self.context.dump_prefix = 'bar_'
table_filter_file = None
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=db_dumps/20160101 --gp-i --gp-k=20160101010101 --gp-l=p --prefix=bar_ --gp-c -d "testdb"'
restore_line = self.restore.create_post_data_schema_only_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
@patch('gppylib.operations.backup_utils.Context.backup_dir_is_writable', return_value=False)
def test_create_post_data_schema_only_restore_string_with_prefix_and_filter(self, mock1, mock2, mock3):
self.context.dump_prefix = 'bar_'
table_filter_file = 'filter_file1'
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=db_dumps/20160101 --gp-i --gp-k=20160101010101 --gp-l=p --prefix=bar_ --gp-f=%s --gp-c -d "testdb"' % (table_filter_file)
restore_line = self.restore.create_post_data_schema_only_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
@patch('gppylib.operations.backup_utils.Context.backup_dir_is_writable', return_value=False)
def test_create_post_data_schema_only_restore_string_no_backup_dir(self, mock1, mock2, mock3):
table_filter_file = None
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=db_dumps/20160101 --gp-i --gp-k=20160101010101 --gp-l=p --gp-c -d "testdb"'
restore_line = self.restore.create_post_data_schema_only_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_create_post_data_schema_only_restore_string_different_status_dir(self, mock1, mock2):
self.context.report_status_dir = '/tmp'
table_filter_file = None
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=db_dumps/20160101 --gp-i --gp-k=20160101010101 --gp-l=p --gp-r=/tmp --status=/tmp --gp-c -d "testdb"'
restore_line = self.restore.create_post_data_schema_only_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_create_post_data_schema_only_restore_string_status_dir_and_filter(self, mock1, mock2):
self.context.report_status_dir = '/tmp'
table_filter_file = None
full_restore_with_filter = True
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=db_dumps/20160101 --gp-i --gp-k=20160101010101 --gp-l=p -P --gp-r=/tmp --status=/tmp --gp-c -d "testdb"'
restore_line = self.restore.create_post_data_schema_only_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_create_post_data_schema_only_restore_string_with_ddboost(self, mock1, mock2):
self.context.report_status_dir = '/tmp'
table_filter_file = None
full_restore_with_filter = True
self.context.ddboost = True
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=db_dumps/20160101 --gp-i --gp-k=20160101010101 --gp-l=p -P --gp-r=/tmp --status=/tmp --gp-c -d "testdb" --ddboost'
restore_line = self.restore.create_post_data_schema_only_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_create_post_data_schema_only_restore_string_with_nbu(self, mock1, mock2):
table_filter_file = None
full_restore_with_filter = True
self.context.backup_dir = None
self.context.netbackup_service_host = "mdw"
self.context.netbackup_block_size = 1024
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=db_dumps/20160101 --gp-i --gp-k=20160101010101 --gp-l=p -P --gp-c -d "testdb" --netbackup-service-host=mdw --netbackup-block-size=1024'
restore_line = self.restore.create_post_data_schema_only_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_gpdbrestore_cmd_line_default(self, mock1, mock2):
ts = '20160101010101'
self.context.backup_dir = None
expected_output = 'gpdbrestore -t 20160101010101 --table-file foo -a -v --noplan --noanalyze --noaostats --no-validate-table-name'
restore_line = _build_gpdbrestore_cmd_line(self.context, ts, 'foo')
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_gpdbrestore_cmd_line_backup_dir(self, mock1, mock2):
ts = '20160101010101'
self.context.backup_dir = '/tmp'
expected_output = 'gpdbrestore -t 20160101010101 --table-file foo -a -v --noplan --noanalyze --noaostats --no-validate-table-name -u /tmp'
restore_line = _build_gpdbrestore_cmd_line(self.context, ts, 'foo')
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_gpdbrestore_cmd_line_report_status_dir(self, mock1, mock2):
ts = '20160101010101'
self.context.backup_dir = None
self.context.report_status_dir = '/tmp'
expected_output = 'gpdbrestore -t 20160101010101 --table-file foo -a -v --noplan --noanalyze --noaostats --no-validate-table-name --report-status-dir=/tmp'
restore_line = _build_gpdbrestore_cmd_line(self.context, ts, 'foo')
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_gpdbrestore_cmd_line_redirected_restore(self, mock1, mock2):
ts = '20160101010101'
self.context.backup_dir = None
self.context.redirected_restore_db = "redb"
expected_output = 'gpdbrestore -t 20160101010101 --table-file foo -a -v --noplan --noanalyze --noaostats --no-validate-table-name --redirect=redb'
restore_line = _build_gpdbrestore_cmd_line(self.context, ts, 'foo')
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_gpdbrestore_cmd_line_with_ddboost(self, mock1, mock2):
ts = '20160101010101'
self.context.backup_dir = None
self.context.ddboost = True
self.context.report_status_dir = '/tmp'
expected_output = 'gpdbrestore -t 20160101010101 --table-file foo -a -v --noplan --noanalyze --noaostats --no-validate-table-name --report-status-dir=/tmp --ddboost'
ddboost = True
restore_line = _build_gpdbrestore_cmd_line(self.context, ts, 'foo')
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.dbconn.DbURL')
@patch('gppylib.operations.restore.dbconn.connect')
@patch('gppylib.operations.restore.execSQL')
@patch('gppylib.operations.restore.RestoreDatabase.get_full_tables_in_schema', return_value=['"public"."tablename1"', '"public"."tablename2"', '"public"."tablename3"'])
def test_truncate_restore_tables_restore_schemas(self, mock1, mock2, mock3, mock4):
self.context.restore_schemas = ['public']
self.restore.truncate_restore_tables()
calls = [call(ANY,'Truncate "public"."tablename1"'), call(ANY,'Truncate "public"."tablename2"'), call(ANY,'Truncate "public"."tablename3"')]
mock2.assert_has_calls(calls)
@patch('gppylib.operations.restore.dbconn.DbURL')
@patch('gppylib.operations.restore.dbconn.connect')
@patch('gppylib.operations.restore.execSQL')
@patch('gppylib.operations.restore.execSQLForSingleton', return_value='t')
def test_truncate_restore_tables_restore_tables(self, mock1, mock2, mock3, mock4):
self.context.restore_tables = ['public.ao1', 'testschema.heap1']
self.restore.truncate_restore_tables()
calls = [call(ANY,'Truncate "public"."ao1"'), call(ANY,'Truncate "testschema"."heap1"')]
mock2.assert_has_calls(calls)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_create_restore_string_no_filter_file(self, mock1, mock2):
self.context.no_plan = True
table_filter_file = None
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p --gp-d=db_dumps/20160101 --gp-c -d "testdb" -a'
restore_line = self.restore.create_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_create_restore_string_default(self, mock1, mock2):
self.context.no_plan = True
table_filter_file = '/tmp/foo'
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p --gp-d=db_dumps/20160101 --gp-f=/tmp/foo --gp-c -d "testdb" -a'
restore_line = self.restore.create_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_create_restore_string_with_ddboost(self, mock1, mock2):
self.context.no_plan = True
table_filter_file = None
full_restore_with_filter = False
self.context.ddboost = True
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p --gp-d=db_dumps/20160101 --gp-c -d "testdb" -a --ddboost'
restore_line = self.restore.create_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_create_restore_string_different_status_dir(self, mock1, mock2):
self.context.no_plan = True
self.context.report_status_dir = '/tmp'
table_filter_file = None
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p --gp-d=db_dumps/20160101 --gp-r=/tmp --status=/tmp --gp-c -d "testdb" -a'
restore_line = self.restore.create_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_create_restore_string_no_filter(self, mock1, mock2):
self.context.no_plan = True
table_filter_file = None
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p --gp-d=db_dumps/20160101 --gp-c -d "testdb" -a'
restore_line = self.restore.create_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_create_restore_string_no_filter_file(self, mock1, mock2):
self.context.no_plan = True
table_filter_file = '/tmp/foo'
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p --gp-d=db_dumps/20160101 --gp-f=/tmp/foo --gp-c -d "testdb" -a'
restore_line = self.restore.create_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_create_restore_string_ddboost_and_prefix(self, mock1, mock2):
self.context.no_plan = True
table_filter_file = None
self.context.dump_prefix = 'bar_'
full_restore_with_filter = False
self.context.ddboost = True
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --prefix=bar_ --gp-k=20160101010101 --gp-l=p --gp-d=db_dumps/20160101 --gp-c -d "testdb" -a --ddboost'
restore_line = self.restore.create_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
@patch('gppylib.operations.backup_utils.Context.backup_dir_is_writable', return_value=True)
def test_create_restore_string_backup_dir(self, mock1, mock2, mock3):
self.context.no_plan = True
table_filter_file = None
self.context.backup_dir = '/tmp'
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p --gp-d=/tmp/db_dumps/20160101 --gp-r=/tmp/db_dumps/20160101 --status=/tmp/db_dumps/20160101 --gp-c -d "testdb" -a'
restore_line = self.restore.create_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_create_restore_string_no_ao_stats(self, mock1, mock2):
self.context.no_plan = True
self.context.no_ao_stats = True
table_filter_file = None
self.context.report_status_dir = '/tmp'
self.context.backup_dir = '/foo'
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p --gp-d=/foo/db_dumps/20160101 --gp-r=/tmp --status=/tmp --gp-c -d "testdb" -a --gp-nostats'
restore_line = self.restore.create_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_create_restore_string_with_plan(self, mock1, mock2):
table_filter_file = None
self.context.report_status_dir = '/tmp'
self.context.backup_dir = '/foo'
full_restore_with_filter = True
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p --gp-d=/foo/db_dumps/20160101 --gp-r=/tmp --status=/tmp --gp-c -d "testdb" -a'
restore_line = self.restore.create_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_create_restore_string_with_nbu(self, mock1, mock2):
self.context.no_plan = True
table_filter_file = None
self.context.report_status_dir = '/tmp'
self.context.backup_dir = '/foo'
self.context.netbackup_service_host = "mdw"
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p --gp-d=/foo/db_dumps/20160101 --gp-r=/tmp --status=/tmp --gp-c -d "testdb" -a --netbackup-service-host=mdw'
restore_line = self.restore.create_restore_string(table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
# Test to verify the command line for gp_restore
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_create_restore_string_change_schema(self, mock1, mock2):
self.context.no_plan = True
table_filter_file = None
full_restore_with_filter = False
change_schema_file = 'newschema'
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20160101010101 --gp-l=p --gp-d=db_dumps/20160101 --gp-c -d "testdb" -a --change-schema-file=newschema'
restore_line = self.restore.create_restore_string(table_filter_file, full_restore_with_filter, change_schema_file)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.backup_utils.Context.generate_filename', return_value='foo')
def test_get_plan_file_contents_no_file(self, mock1):
with self.assertRaisesRegexp(Exception, 'Plan file foo does not exist'):
get_plan_file_contents(self.context)
@patch('gppylib.operations.backup_utils.Context.generate_filename', return_value='foo')
@patch('gppylib.operations.restore.get_lines_from_file', return_value=[])
@patch('os.path.isfile', return_value=True)
def test_get_plan_file_contents_empty_file(self, mock1, mock2, mock3):
with self.assertRaisesRegexp(Exception, 'Plan file foo has no contents'):
get_plan_file_contents(self.context)
@patch('gppylib.operations.backup_utils.Context.generate_filename', return_value='foo')
@patch('gppylib.operations.restore.get_lines_from_file', return_value=['20160101010101:t1,t2', '20160101010111:t3,t4', '20160101121210:t5,t6,t7'])
@patch('os.path.isfile', return_value=True)
def test_get_plan_file_contents_default(self, mock1, mock2, mock3):
expected_output = [('20160101010101','t1,t2'), ('20160101010111','t3,t4'), ('20160101121210','t5,t6,t7')]
output = get_plan_file_contents(self.context)
self.assertEqual(output, expected_output)
@patch('gppylib.operations.backup_utils.Context.generate_filename', return_value='foo')
@patch('gppylib.operations.restore.get_lines_from_file', return_value=['20160101010101:', '20160101010111', '20160101121210:'])
@patch('os.path.isfile', return_value=True)
def test_get_plan_file_contents_invalid_format(self, mock1, mock2, mock3):
with self.assertRaisesRegexp(Exception, 'Invalid plan file format'):
get_plan_file_contents(self.context)
@patch('gppylib.operations.restore.get_plan_file_contents', return_value=[('20160101010101', 't1,t2'), ('20160101010111', 't3,t4'), ('20160101121210', 't5,t6,t7')])
@patch('gppylib.operations.restore.Command.run')
@patch('gppylib.operations.restore.update_ao_statistics')
def test_restore_incremental_data_only_default(self, mock1, mock2, mock3):
results = self.restore.restore_incremental_data_only()
self.assertTrue(results)
@patch('gppylib.operations.restore.get_plan_file_contents', return_value=[('20160101010101', ''), ('20160101010111', ''), ('20160101121210', '')])
@patch('os.path.isfile', return_value=True)
@patch('gppylib.operations.restore.update_ao_statistics')
def test_restore_incremental_data_only_no_tables(self, mock1, mock2, mock3):
with self.assertRaisesRegexp(Exception, 'There were no tables to restore. Check the plan file contents for restore timestamp 20160101010101'):
self.restore.restore_incremental_data_only()
@patch('gppylib.operations.restore.get_plan_file_contents', return_value=[('20160101010101', 't1,t2'), ('20160101010111', 't3,t4'), ('20160101121210', 't5,t6,t7')])
@patch('gppylib.operations.restore.Command.run', side_effect=Exception('Error executing gpdbrestore'))
@patch('gppylib.operations.restore.update_ao_statistics')
def test_restore_incremental_data_only_error(self, mock1, mock2, mock3):
with self.assertRaisesRegexp(Exception, 'Error executing gpdbrestore'):
self.restore.restore_incremental_data_only()
def test_create_filter_file_no_tables(self):
self.context.restore_tables = None
self.assertEquals(self.restore.create_filter_file(), None)
@patch('gppylib.operations.restore.get_all_segment_addresses', return_value=['host1'])
@patch('gppylib.operations.restore.scp_file_to_hosts')
def test_create_filter_file_default(self, m1, m2):
self.context.restore_tables = ['public.ao1', 'testschema.heap1']
m = mock_open()
with patch('tempfile.NamedTemporaryFile', m, create=True):
fname = self.restore.create_filter_file()
result = m()
self.assertEqual(len(self.context.restore_tables), len(result.write.call_args_list))
for i in range(len(self.context.restore_tables)):
self.assertEqual(call(self.context.restore_tables[i]+'\n'), result.write.call_args_list[i])
@patch('gppylib.operations.restore.get_lines_from_file', return_value = ['public.t1', 'public.t2', 'public.t3'])
@patch('os.path.isfile', return_value = True)
def test_get_restore_tables_from_table_file_default(self, mock1, mock2):
table_file = '/foo'
expected_result = ['public.t1', 'public.t2', 'public.t3']
result = get_restore_tables_from_table_file(table_file)
self.assertEqual(expected_result, result)
@patch('os.path.isfile', return_value = False)
def test_get_restore_tables_from_table_file_no_file(self, mock):
table_file = '/foo'
expected_result = ['public.t1', 'public.t2', 'public.t3']
with self.assertRaisesRegexp(Exception, 'Table file does not exist'):
result = get_restore_tables_from_table_file(table_file)
def test_check_table_name_format_and_duplicate_missing_schema(self):
table_list = ['publicao1', 'public.ao2']
with self.assertRaisesRegexp(Exception, 'No schema name supplied'):
check_table_name_format_and_duplicate(table_list, None)
def test_check_table_name_format_and_duplicate_default(self):
table_list = ['public.ao1', 'public.ao2']
check_table_name_format_and_duplicate(table_list, [])
def test_check_table_name_format_and_duplicate_no_tables(self):
table_list = []
schema_list = []
check_table_name_format_and_duplicate(table_list, schema_list)
def test_check_table_name_format_and_duplicate_duplicate_tables(self):
table_list = ['public.ao1', 'public.ao1']
resolved_list, _ = check_table_name_format_and_duplicate(table_list, [])
self.assertEqual(resolved_list, ['public.ao1'])
def test_check_table_name_format_and_duplicate_funny_chars(self):
table_list = [' `"@#$%^&( )_|:;<>?/-+={}[]*1Aa . `"@#$%^&( )_|:;<>?/-+={}[]*1Aa ', 'schema.ao1']
schema_list = ['schema']
resolved_table_list, resolved_schema_list = check_table_name_format_and_duplicate(table_list, schema_list)
self.assertEqual(resolved_table_list, [' `"@#$%^&( )_|:;<>?/-+={}[]*1Aa . `"@#$%^&( )_|:;<>?/-+={}[]*1Aa '])
self.assertEqual(resolved_schema_list, ['schema'])
def test_validate_tablenames_exist_in_dump_file_no_tables(self):
dumped_tables = []
table_list = ['schema.ao']
with self.assertRaisesRegexp(Exception, 'No dumped tables to restore.'):
validate_tablenames_exist_in_dump_file(table_list, dumped_tables)
def test_validate_tablenames_exist_in_dump_file_one_table(self):
dumped_tables = [('schema', 'ao', 'gpadmin')]
table_list = ['schema.ao']
validate_tablenames_exist_in_dump_file(table_list, dumped_tables)
def test_validate_tablenames_exist_in_dump_file_nonexistent_table(self):
dumped_tables = [('schema', 'ao', 'gpadmin')]
table_list = ['schema.ao', 'schema.co']
with self.assertRaisesRegexp(Exception, "Tables \['schema.co'\] not found in backup"):
validate_tablenames_exist_in_dump_file(table_list, dumped_tables)
def test_get_restore_table_list_default(self):
table_list = ['public.ao_table', 'public.ao_table2', 'public.co_table', 'public.heap_table']
restore_tables = ['public.ao_table2', 'public.co_table']
m = mock_open()
with patch('tempfile.NamedTemporaryFile', m, create=True):
result = get_restore_table_list(table_list, restore_tables)
result = m()
self.assertEqual(len(restore_tables), len(result.write.call_args_list))
for i in range(len(restore_tables)):
self.assertEqual(call(restore_tables[i]+'\n'), result.write.call_args_list[i])
def test_get_restore_table_list_no_restore_tables(self):
table_list = ['public.ao_table', 'public.ao_table2', 'public.co_table', 'public.heap_table']
restore_tables = None
m = mock_open()
with patch('tempfile.NamedTemporaryFile', m, create=True):
result = get_restore_table_list(table_list, restore_tables)
result = m()
self.assertEqual(len(table_list), len(result.write.call_args_list))
for i in range(len(table_list)):
self.assertEqual(call(table_list[i]+'\n'), result.write.call_args_list[i])
def test_get_restore_table_list_extra_restore_tables(self):
table_list = ['public.ao_table', 'public.ao_table2', 'public.co_table', 'public.heap_table']
restore_tables = ['public.ao_table2', 'public.co_table', 'public.ao_table3']
expected = ['public.ao_table2', 'public.co_table']
m = mock_open()
with patch('tempfile.NamedTemporaryFile', m, create=True):
result = get_restore_table_list(table_list, restore_tables)
result = m()
self.assertEqual(len(expected), len(result.write.call_args_list))
for i in range(len(expected)):
self.assertEqual(call(expected[i]+'\n'), result.write.call_args_list[i])
def test_validate_restore_tables_list_default(self):
plan_file_contents = [('20160101121213', 'public.t1'), ('20160101010101', 'public.t2,public.t3'), ('20160101010101', 'public.t4')]
restore_tables = ['public.t1', 'public.t2']
validate_restore_tables_list(plan_file_contents, restore_tables)
def test_validate_restore_tables_list_invalid_tables(self):
plan_file_contents = [('20160101121213', 'public.t1'), ('20160101010101', 'public.t2,public.t3'), ('20160101010101', 'public.t4')]
restore_tables = ['public.t5', 'public.t2']
with self.assertRaisesRegexp(Exception, 'Invalid tables for -T option: The following tables were not found in plan file'):
validate_restore_tables_list(plan_file_contents, restore_tables)
@patch('os.path.exists', return_value=False)
def test_restore_global_no_file(self, mock):
with self.assertRaisesRegexp(Exception, 'Unable to locate global file /data/master/p1/db_dumps/20160101/gp_global_1_1_20160101010101 in dump set'):
self.restore._restore_global(self.context)
@patch('os.path.exists', return_value=True)
@patch('gppylib.commands.gp.Psql.run')
def test_restore_global_default(self, mock1, mock2):
self.restore._restore_global(self.context) # should not error out
@patch('gppylib.operations.restore.execSQLForSingleton')
@patch('pygresql.pgdb.pgdbCnx.commit')
def test_update_ao_stat_func_default(self, m1, m2):
conn = None
ao_schema = 'schema'
ao_table = 'table'
counter = 1
batch_size = 1000
update_ao_stat_func(conn, ao_schema, ao_table, counter, batch_size)
@patch('pygresql.pgdb.pgdbCnx.commit')
@patch('gppylib.operations.restore.execSQLForSingleton')
def test_update_ao_stat_func_near_batch_size(self, m1, m2):
conn = None
ao_table = 'table'
ao_schema = 'schema'
counter = 999
batch_size = 1000
update_ao_stat_func(conn, ao_schema, ao_table, counter, batch_size)
@patch('gppylib.operations.restore.execSQLForSingleton')
@patch('pygresql.pgdb.pgdbCnx.commit')
def test_update_ao_stat_func_equal_batch_size(self, m1, m2):
conn = None
ao_table = 'table'
ao_schema = 'schema'
counter = 1000
batch_size = 1000
with self.assertRaisesRegexp(AttributeError, "'NoneType' object has no attribute 'commit'"):
update_ao_stat_func(conn, ao_schema, ao_table, counter, batch_size)
@patch('gppylib.operations.restore.execSQLForSingleton')
@patch('pygresql.pgdb.pgdbCnx.commit')
def test_update_ao_stat_func_over_batch_size(self, m1, m2):
conn = None
ao_table = 'table'
ao_schema = 'schema'
counter = 1001
batch_size = 1000
update_ao_stat_func(conn, ao_schema, ao_table, counter, batch_size)
@patch('gppylib.operations.restore.execSQLForSingleton')
@patch('pygresql.pgdb.pgdbCnx.commit')
def test_update_ao_stat_func_double_batch_size(self, m1, m2):
conn = None
ao_table = 'table'
ao_schema = 'schema'
counter = 2000
batch_size = 1000
with self.assertRaisesRegexp(AttributeError, "'NoneType' object has no attribute 'commit'"):
update_ao_stat_func(conn, ao_schema, ao_table, counter, batch_size)
@patch('gppylib.operations.restore.execute_sql', return_value=[['t1', 'public']])
@patch('gppylib.operations.restore.dbconn.connect')
@patch('gppylib.operations.restore.update_ao_stat_func')
def test_update_ao_statistics_default(self, m1, m2, m3):
restored_tables = []
update_ao_statistics(self.context, restored_tables)
update_ao_statistics(self.context, restored_tables=['public.t1'], restored_schema=[], restore_all=False)
update_ao_statistics(self.context, restored_tables=[], restored_schema=['public'], restore_all=False)
update_ao_statistics(self.context, restored_tables=[], restored_schema=[], restore_all=True)
def test_generate_restored_tables_no_table(self):
results = [['t1','public'], ['t2', 'public'], ['foo', 'bar']]
tables = generate_restored_tables(results, restored_tables=[], restored_schema=[], restore_all=False)
self.assertEqual(tables, set())
def test_generate_restored_tables_specified_table(self):
results = [['t1','public'], ['t2', 'public'], ['foo', 'bar']]
tables = generate_restored_tables(results, restored_tables=['public.t1'], restored_schema=[], restore_all=False)
self.assertEqual(tables, set([('public','t1')]))
def test_generate_restored_tables_specified_schema(self):
results = [['t1','public'], ['t2', 'public'], ['foo', 'bar']]
tables = generate_restored_tables(results, restored_tables=[], restored_schema=['public'], restore_all=False)
self.assertEqual(tables, set([('public','t1'), ('public', 't2')]))
def test_generate_restored_tables_full_restore(self):
results = [['t1','public'], ['t2', 'public'], ['foo', 'bar']]
tables = generate_restored_tables(results, restored_tables=[], restored_schema=[], restore_all=True)
self.assertEqual(tables, set([('public','t1'), ('public', 't2'), ('bar', 'foo')]))
@patch('gppylib.operations.restore.dbconn.connect')
@patch('gppylib.db.dbconn.execSQLForSingleton', return_value=5)
def test_check_gp_toolkit_true(self, m1, m2):
self.assertTrue(self.restore.check_gp_toolkit())
@patch('gppylib.operations.restore.dbconn.connect')
@patch('gppylib.db.dbconn.execSQLForSingleton', return_value=0)
def test_check_gp_toolkit_false(self, m1, m2):
self.assertFalse(self.restore.check_gp_toolkit())
@patch('gppylib.operations.backup_utils.dbconn.DbURL')
@patch('gppylib.operations.backup_utils.dbconn.connect')
@patch('gppylib.operations.restore.execSQL')
def test_analyze_restore_tables_default(self, mock1, mock2, mock3):
self.context.restore_tables = ['public.t1', 'public.t2']
self.restore._analyze_restore_tables()
@patch('gppylib.operations.restore.execSQL', side_effect=Exception('analyze failed'))
@patch('gppylib.operations.backup_utils.dbconn.DbURL')
@patch('gppylib.operations.backup_utils.dbconn.connect')
def test_analyze_restore_tables_analyze_failed(self, mock1, mock2, mock3):
self.context.restore_tables = ['public.t1', 'public.t2']
self.assertRaises(Exception, self.restore._analyze_restore_tables)
@patch('gppylib.operations.backup_utils.execSQL')
@patch('gppylib.operations.backup_utils.dbconn.DbURL', side_effect=Exception('Failed'))
@patch('gppylib.operations.backup_utils.dbconn.connect')
def test_analyze_restore_tables_connection_failed(self, mock1, mock2, mock3):
self.context.restore_tables = ['public.t1', 'public.t2']
self.assertRaises(Exception, self.restore._analyze_restore_tables)
@patch('gppylib.operations.backup_utils.dbconn.DbURL')
@patch('gppylib.operations.backup_utils.dbconn.connect')
@patch('gppylib.operations.restore.execSQL')
def test_analyze_restore_tables_three_batches(self, mock1, mock2, mock3):
self.context.restore_tables = ['public.t%d' % i for i in range(3002)]
expected_batch_count = 3
batch_count = self.restore._analyze_restore_tables()
self.assertEqual(batch_count, expected_batch_count)
@patch('gppylib.operations.backup_utils.dbconn.DbURL')
@patch('gppylib.operations.backup_utils.dbconn.connect')
@patch('gppylib.operations.backup_utils.dbconn.execSQL')
def test_analyze_restore_tables_change_schema(self, mock1, mock2, mock3):
self.context.restore_tables = ['public.t1', 'public.t2']
self.context.change_schema = 'newschema'
self.restore._analyze_restore_tables()
@patch('gppylib.operations.restore.execSQL', side_effect=Exception())
@patch('gppylib.operations.backup_utils.dbconn.DbURL')
@patch('gppylib.operations.backup_utils.dbconn.connect')
def test_analyze_restore_tables_execSQL_failed(self, mock1, mock2, mock3):
self.context.restore_db = 'db1'
self.context.restore_tables = ['public.t1', 'public.t2']
self.assertRaisesRegexp(Exception, 'Issue with \'ANALYZE\' of restored table \'"public"."t1"\' in \'db1\' database', self.restore._analyze_restore_tables)
@patch('os.path.exists', side_effect=[True, False])
def test_validate_metadata_file_with_compression_exists(self, mock):
compressed_file = 'compressed_file.gz'
self.assertTrue(self.validate_timestamp.validate_metadata_file(compressed_file))
@patch('os.path.exists', side_effect=[False, False])
def test_validate_metadata_file_with_compression_doesnt_exists(self, mock):
compressed_file = 'compressed_file.gz'
with self.assertRaisesRegexp(ExceptionNoStackTraceNeeded, 'Unable to find compressed_file or compressed_file.gz'):
self.validate_timestamp.validate_metadata_file(compressed_file)
@patch('os.path.exists', side_effect=[False, True])
def test_validate_metadata_file_without_compression_exists(self, mock):
compressed_file = 'compressed_file.gz'
self.assertFalse(self.validate_timestamp.validate_metadata_file(compressed_file))
@patch('os.path.exists', side_effect=[False, False])
def test_validate_metadata_file_without_compression_doesnt_exist(self, mock):
compressed_file = 'compressed_file.gz'
with self.assertRaisesRegexp(ExceptionNoStackTraceNeeded, 'Unable to find compressed_file or compressed_file.gz'):
self.validate_timestamp.validate_metadata_file(compressed_file)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_state_files_with_nbu_default(self, mock1):
self.context.netbackup_service_host = "mdw"
restore_state_files_with_nbu(self.context)
self.assertEqual(mock1.call_count, 3)
calls = ["ao", "co", "last_operation"]
for i in range(len(mock1.call_args_list)):
self.assertEqual(mock1.call_args_list[i], call(self.context, calls[i]))
@patch('gppylib.operations.backup_utils.Context.generate_filename', return_value='/tmp/foo_schema')
@patch('gppylib.commands.base.Command.run')
def test_restore_file_with_nbu_default(self, mock1, mock2):
self.context.netbackup_service_host = "mdw"
cmdStr = "gp_bsa_restore_agent --netbackup-service-host mdw --netbackup-filename /tmp/foo_schema > /tmp/foo_schema"
with patch.object(Command, '__init__', return_value=None) as cmd:
restore_file_with_nbu(self.context, "schema")
cmd.assert_called_with("restoring metadata files to master", cmdStr)
self.assertEqual(mock2.call_count, 1)
@patch('gppylib.operations.backup_utils.Context.generate_filename', return_value='')
@patch('gppylib.commands.base.Command.run')
def test_restore_file_with_nbu_no_filetype(self, mock1, mock2):
self.context.netbackup_service_host = "mdw"
self.context.netbackup_block_size = 100
cmdStr = "gp_bsa_restore_agent --netbackup-service-host mdw --netbackup-block-size 100 --netbackup-filename /tmp/foo_schema > /tmp/foo_schema"
with patch.object(Command, '__init__', return_value=None) as cmd:
restore_file_with_nbu(self.context, path="/tmp/foo_schema")
cmd.assert_called_with("restoring metadata files to master", cmdStr)
@patch('gppylib.operations.backup_utils.Context.generate_filename', return_value='/tmp/foo_schema')
@patch('gppylib.commands.base.Command.run')
def test_restore_file_with_nbu_no_path(self, mock1, mock2):
self.context.netbackup_service_host = "mdw"
self.context.netbackup_block_size = 100
cmdStr = "gp_bsa_restore_agent --netbackup-service-host mdw --netbackup-block-size 100 --netbackup-filename /tmp/foo_schema > /tmp/foo_schema"
with patch.object(Command, '__init__', return_value=None) as cmd:
restore_file_with_nbu(self.context, "schema")
cmd.assert_called_with("restoring metadata files to master", cmdStr)
@patch('gppylib.operations.backup_utils.Context.generate_filename', return_value='foo_schema')
@patch('gppylib.commands.base.Command.run')
def test_restore_file_with_nbu_both_args(self, mock1, mock2):
with self.assertRaisesRegexp(Exception, 'Cannot supply both a file type and a file path to restore_file_with_nbu'):
restore_file_with_nbu(self.context, "schema", "/tmp/foo_schema")
@patch('gppylib.operations.backup_utils.Context.generate_filename', return_value='foo_schema')
@patch('gppylib.commands.base.Command.run')
def test_restore_file_with_nbu_neither_arg(self, mock1, mock2):
with self.assertRaisesRegexp(Exception, 'Cannot call restore_file_with_nbu with no type or path argument'):
restore_file_with_nbu(self.context)
@patch('gppylib.operations.backup_utils.Context.generate_filename', return_value='/tmp/foo_schema')
@patch('gppylib.commands.base.Command.run')
def test_restore_file_with_nbu_block_size(self, mock1, mock2):
self.context.netbackup_service_host = "mdw"
self.context.netbackup_block_size = 1024
cmdStr = "gp_bsa_restore_agent --netbackup-service-host mdw --netbackup-block-size 1024 --netbackup-filename /tmp/foo_schema > /tmp/foo_schema"
with patch.object(Command, '__init__', return_value=None) as cmd:
restore_file_with_nbu(self.context, "schema")
cmd.assert_called_with("restoring metadata files to master", cmdStr)
@patch('gppylib.operations.backup_utils.Context.generate_filename', return_value='/tmp/foo_schema')
@patch('gppylib.commands.base.Command.run')
def test_restore_file_with_nbu_keyword(self, mock1, mock2):
self.context.netbackup_service_host = "mdw"
self.context.netbackup_keyword = "foo"
cmdStr = "gp_bsa_restore_agent --netbackup-service-host mdw --netbackup-filename /tmp/foo_schema > /tmp/foo_schema"
with patch.object(Command, '__init__', return_value=None) as cmd:
restore_file_with_nbu(self.context, "schema")
cmd.assert_called_with("restoring metadata files to master", cmdStr)
@patch('gppylib.operations.backup_utils.Context.generate_filename', return_value='/tmp/foo_schema')
@patch('gppylib.commands.base.Command.run')
def test_restore_file_with_nbu_segment(self, mock1, mock2):
self.context.netbackup_service_host = "mdw"
cmdStr = "gp_bsa_restore_agent --netbackup-service-host mdw --netbackup-filename /tmp/foo_schema > /tmp/foo_schema"
with patch.object(Command, '__init__', return_value=None) as cmd:
restore_file_with_nbu(self.context, "schema", hostname="sdw")
from gppylib.commands.base import REMOTE
cmd.assert_called_with("restoring metadata files to segment", cmdStr, ctxt=REMOTE, remoteHost="sdw")
class MyMock(MagicMock):
def __init__(self, num_segs):
super(MagicMock, self).__init__()
self.mock_segs = []
for i in range(num_segs):
self.mock_segs.append(Mock())
def getSegmentList(self):
for id, seg in enumerate(self.mock_segs):
seg.get_active_primary.getSegmentHostName.return_value = Mock()
seg.get_primary_dbid.return_value = id + 2
return self.mock_segs
@patch('gppylib.operations.dump.GpArray.initFromCatalog', return_value=MyMock(1))
@patch('gppylib.gparray.GpDB.getSegmentHostName', return_value='sdw')
def test_restore_config_files_with_nbu_single_segment(self, mock1, mock2):
with patch('gppylib.operations.restore.restore_file_with_nbu', side_effect=my_counter) as nbu_mock:
global i
i = 0
self.context.netbackup_service_host = "mdw"
self.context.netbackup_policy = "test_policy"
self.context.netbackup_schedule = "test_schedule"
restore_config_files_with_nbu(self.context)
args, _ = nbu_mock.call_args_list[0]
self.assertEqual(args[1], "master_config")
for id, seg in enumerate(mock2.mock_segs):
self.assertEqual(seg.get_active_primary.call_count, 1)
self.assertEqual(seg.get_primary_dbid.call_count, 1)
args, _ = nbu_mock.call_args_list[id]
self.assertEqual(args, ("segment_config", id+2, "sdw"))
self.assertEqual(i, 2)
@patch('gppylib.operations.dump.GpArray.initFromCatalog', return_value=MyMock(3))
@patch('gppylib.gparray.GpDB.getSegmentHostName', return_value='sdw')
def test_restore_config_files_with_nbu_multiple_segments(self, mock1, mock2):
with patch('gppylib.operations.restore.restore_file_with_nbu', side_effect=my_counter) as nbu_mock:
global i
i = 0
self.context.netbackup_service_host = "mdw"
self.context.netbackup_policy = "test_policy"
self.context.netbackup_schedule = "test_schedule"
restore_config_files_with_nbu(self.context)
args, _ = nbu_mock.call_args_list[0]
self.assertEqual(args[1], "master_config")
for id, seg in enumerate(mock2.mock_segs):
self.assertEqual(seg.get_active_primary.call_count, 1)
self.assertEqual(seg.get_primary_dbid.call_count, 1)
args, _ = nbu_mock.call_args_list[id]
self.assertEqual(i, 4)
if __name__ == '__main__':
unittest.main()
i=0
def my_counter(*args, **kwargs):
global i
i += 1
return Mock()
| |
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import json
import os
import shutil
import tempfile
import time
import unittest
import itertools
import urllib
from gevent import monkey
monkey.patch_all()
from bs4 import BeautifulSoup
import PIL.Image
from urlparse import urlparse
from cStringIO import StringIO
import digits.test_views
from test_imageset_creator import create_classification_imageset, IMAGE_SIZE as DUMMY_IMAGE_SIZE, IMAGE_COUNT as DUMMY_IMAGE_COUNT
# May be too short on a slow system
TIMEOUT_DATASET = 15
################################################################################
# Base classes (they don't start with "Test" so nose won't run them)
################################################################################
class BaseViewsTest(digits.test_views.BaseViewsTest):
"""
Provides some functions
"""
@classmethod
def dataset_exists(cls, job_id):
return cls.job_exists(job_id, 'datasets')
@classmethod
def dataset_status(cls, job_id):
return cls.job_status(job_id, 'datasets')
@classmethod
def dataset_info(cls, job_id):
return cls.job_info(job_id, 'datasets')
@classmethod
def abort_dataset(cls, job_id):
return cls.abort_job(job_id, job_type='datasets')
@classmethod
def dataset_wait_completion(cls, job_id, **kwargs):
kwargs['job_type'] = 'datasets'
if 'timeout' not in kwargs:
kwargs['timeout'] = TIMEOUT_DATASET
return cls.job_wait_completion(job_id, **kwargs)
@classmethod
def delete_dataset(cls, job_id):
return cls.delete_job(job_id, job_type='datasets')
class BaseViewsTestWithImageset(BaseViewsTest):
"""
Provides an imageset and some functions
"""
# Inherited classes may want to override these attributes
IMAGE_HEIGHT = 10
IMAGE_WIDTH = 10
IMAGE_CHANNELS = 3
UNBALANCED_CATEGORY = False
@classmethod
def setUpClass(cls):
super(BaseViewsTestWithImageset, cls).setUpClass()
cls.imageset_folder = tempfile.mkdtemp()
# create imageset
cls.imageset_paths = create_classification_imageset(cls.imageset_folder,
add_unbalanced_category=cls.UNBALANCED_CATEGORY)
cls.created_datasets = []
@classmethod
def tearDownClass(cls):
# delete any created datasets
for job_id in cls.created_datasets:
cls.delete_dataset(job_id)
# delete imageset
shutil.rmtree(cls.imageset_folder)
super(BaseViewsTestWithImageset, cls).tearDownClass()
@classmethod
def create_dataset(cls, **kwargs):
"""
Create a dataset
Returns the job_id
Raises RuntimeError if job fails to create
Keyword arguments:
**kwargs -- data to be sent with POST request
"""
data = {
'dataset_name': 'test_dataset',
'method': 'folder',
'folder_train': cls.imageset_folder,
'resize_channels': cls.IMAGE_CHANNELS,
'resize_width': cls.IMAGE_WIDTH,
'resize_height': cls.IMAGE_HEIGHT,
}
data.update(kwargs)
request_json = data.pop('json', False)
url = '/datasets/images/classification'
if request_json:
url += '.json'
rv = cls.app.post(url, data=data)
if request_json:
if rv.status_code != 200:
print json.loads(rv.data)
raise RuntimeError('Model creation failed with %s' % rv.status_code)
return json.loads(rv.data)['id']
# expect a redirect
if not 300 <= rv.status_code <= 310:
s = BeautifulSoup(rv.data)
div = s.select('div.alert-danger')
if div:
raise RuntimeError(div[0])
else:
raise RuntimeError('Failed to create dataset')
job_id = cls.job_id_from_response(rv)
assert cls.dataset_exists(job_id), 'dataset not found after successful creation'
cls.created_datasets.append(job_id)
return job_id
@classmethod
def categoryCount(cls):
return len(cls.imageset_paths.keys())
class BaseViewsTestWithDataset(BaseViewsTestWithImageset):
"""
Provides a dataset and some functions
"""
@classmethod
def setUpClass(cls):
super(BaseViewsTestWithDataset, cls).setUpClass()
cls.dataset_id = cls.create_dataset(json=True)
assert cls.dataset_wait_completion(cls.dataset_id) == 'Done', 'create failed'
################################################################################
# Test classes
################################################################################
class TestViews(BaseViewsTest):
"""
Tests which don't require an imageset or a dataset
"""
def test_page_dataset_new(self):
rv = self.app.get('/datasets/images/classification/new')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
assert 'New Image Classification Dataset' in rv.data, 'unexpected page format'
def test_nonexistent_dataset(self):
assert not self.dataset_exists('foo'), "dataset shouldn't exist"
class TestCreation(BaseViewsTestWithImageset):
"""
Dataset creation tests
"""
def test_nonexistent_folder(self):
try:
job_id = self.create_dataset(
folder_train = '/not-a-directory'
)
except RuntimeError:
return
raise AssertionError('Should have failed')
def test_create_json(self):
job_id = self.create_dataset(json=True)
self.abort_dataset(job_id)
def test_create_delete(self):
job_id = self.create_dataset()
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
def test_create_abort_delete(self):
job_id = self.create_dataset()
assert self.abort_dataset(job_id) == 200, 'abort failed'
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
def test_create_wait_delete(self):
job_id = self.create_dataset()
assert self.dataset_wait_completion(job_id) == 'Done', 'create failed'
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
def test_textfiles(self):
for absolute_path in (True, False):
for local_path in (True, False):
yield self.check_textfiles, absolute_path, local_path
def check_textfiles(self, absolute_path=True, local_path=True):
"""
Create a dataset from textfiles
Arguments:
absolute_path -- if False, give relative paths and image folders
"""
textfile_train_images = ''
textfile_labels_file = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
textfile_labels_file += '%s\n' % label
for image in images:
image_path = image
if absolute_path:
image_path = os.path.join(self.imageset_folder, image_path)
textfile_train_images += '%s %d\n' % (image_path, label_id)
label_id += 1
data = {
'method': 'textfile',
'textfile_use_val': 'y',
}
if local_path:
train_file = os.path.join(self.imageset_folder, "local_train.txt")
labels_file = os.path.join(self.imageset_folder, "local_labels.txt")
# create files in local filesystem - these will be removed in tearDownClass() function
with open(train_file, "w") as outfile:
outfile.write(textfile_train_images)
with open(labels_file, "w") as outfile:
outfile.write(textfile_labels_file)
data['textfile_use_local_files'] = 'True'
data['textfile_local_train_images'] = train_file
# Use the same file for training and validation.
data['textfile_local_val_images'] = train_file
data['textfile_local_labels_file'] = labels_file
else:
# StringIO wrapping is needed to simulate POST file upload.
train_upload = (StringIO(textfile_train_images), "train.txt")
# Use the same list for training and validation.
val_upload = (StringIO(textfile_train_images), "val.txt")
labels_upload = (StringIO(textfile_labels_file), "labels.txt")
data['textfile_train_images'] = train_upload
data['textfile_val_images'] = val_upload
data['textfile_labels_file'] = labels_upload
if not absolute_path:
data['textfile_train_folder'] = self.imageset_folder
data['textfile_val_folder'] = self.imageset_folder
job_id = self.create_dataset(**data)
assert self.dataset_wait_completion(job_id) == 'Done', 'create failed'
class TestImageCount(BaseViewsTestWithImageset):
def test_image_count(self):
for type in ['train','val','test']:
yield self.check_image_count, type
def check_image_count(self, type):
data = {'folder_pct_val': 20,
'folder_pct_test': 10}
if type == 'val':
data['has_val_folder'] = 'True'
data['folder_val'] = self.imageset_folder
elif type == 'test':
data['has_test_folder'] = 'True'
data['folder_test'] = self.imageset_folder
job_id = self.create_dataset(**data)
assert self.dataset_wait_completion(job_id) == 'Done', 'create failed'
info = self.dataset_info(job_id)
if type == 'train':
assert len(info['ParseFolderTasks']) == 1, 'expected exactly one ParseFolderTasks'
parse_info = info['ParseFolderTasks'][0]
image_count = parse_info['train_count'] + parse_info['val_count'] + parse_info['test_count']
assert parse_info['val_count'] == 0.2 * image_count
assert parse_info['test_count'] == 0.1 * image_count
else:
assert len(info['ParseFolderTasks']) == 2, 'expected exactly one ParseFolderTasks'
parse_info = info['ParseFolderTasks'][1]
if type == 'val':
assert parse_info['train_count'] == 0
assert parse_info['test_count'] == 0
image_count = parse_info['val_count']
else:
assert parse_info['train_count'] == 0
assert parse_info['val_count'] == 0
image_count = parse_info['test_count']
assert self.categoryCount() == parse_info['label_count']
assert image_count == DUMMY_IMAGE_COUNT * parse_info['label_count'], 'image count mismatch'
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
class TestMaxPerClass(BaseViewsTestWithImageset):
def test_max_per_class(self):
for type in ['train','val','test']:
yield self.check_max_per_class, type
def check_max_per_class(self, type):
# create dataset, asking for at most DUMMY_IMAGE_COUNT/2 images per class
assert DUMMY_IMAGE_COUNT%2 == 0
max_per_class = DUMMY_IMAGE_COUNT/2
data = {'folder_pct_val': 0}
if type == 'train':
data['folder_train_max_per_class'] = max_per_class
if type == 'val':
data['has_val_folder'] = 'True'
data['folder_val'] = self.imageset_folder
data['folder_val_max_per_class'] = max_per_class
elif type == 'test':
data['has_test_folder'] = 'True'
data['folder_test'] = self.imageset_folder
data['folder_test_max_per_class'] = max_per_class
job_id = self.create_dataset(**data)
assert self.dataset_wait_completion(job_id) == 'Done', 'create failed'
info = self.dataset_info(job_id)
if type == 'train':
assert len(info['ParseFolderTasks']) == 1, 'expected exactly one ParseFolderTasks'
parse_info = info['ParseFolderTasks'][0]
else:
assert len(info['ParseFolderTasks']) == 2, 'expected exactly one ParseFolderTasks'
parse_info = info['ParseFolderTasks'][1]
image_count = parse_info['train_count'] + parse_info['val_count'] + parse_info['test_count']
assert image_count == max_per_class * parse_info['label_count'], 'image count mismatch'
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
class TestMinPerClass(BaseViewsTestWithImageset):
UNBALANCED_CATEGORY = True
def test_min_per_class(self):
for type in ['train','val','test']:
yield self.check_min_per_class, type
def check_min_per_class(self, type):
# create dataset, asking for one more image per class
# than available in the "unbalanced" category
min_per_class = DUMMY_IMAGE_COUNT/2+1
data = {'folder_pct_val': 0}
if type == 'train':
data['folder_train_min_per_class'] = min_per_class
if type == 'val':
data['has_val_folder'] = 'True'
data['folder_val'] = self.imageset_folder
data['folder_val_min_per_class'] = min_per_class
elif type == 'test':
data['has_test_folder'] = 'True'
data['folder_test'] = self.imageset_folder
data['folder_test_min_per_class'] = min_per_class
job_id = self.create_dataset(**data)
assert self.dataset_wait_completion(job_id) == 'Done', 'create failed'
info = self.dataset_info(job_id)
if type == 'train':
assert len(info['ParseFolderTasks']) == 1, 'expected exactly one ParseFolderTasks'
parse_info = info['ParseFolderTasks'][0]
else:
assert len(info['ParseFolderTasks']) == 2, 'expected exactly two ParseFolderTasks'
parse_info = info['ParseFolderTasks'][1]
assert self.categoryCount() == parse_info['label_count']+1
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
class TestCreated(BaseViewsTestWithDataset):
"""
Tests on a dataset that has already been created
"""
def test_index_json(self):
rv = self.app.get('/index.json')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
content = json.loads(rv.data)
found = False
for d in content['datasets']:
if d['id'] == self.dataset_id:
found = True
break
assert found, 'dataset not found in list'
def test_dataset_json(self):
rv = self.app.get('/datasets/%s.json' % self.dataset_id)
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert content['id'] == self.dataset_id, 'expected different job_id'
def test_mean_dimensions(self):
img_url = '/files/%s/mean.jpg' % self.dataset_id
rv = self.app.get(img_url)
assert rv.status_code == 200, 'GET on %s returned %s' % (img_url, rv.status_code)
buff = StringIO(rv.data)
buff.seek(0)
pil_image = PIL.Image.open(buff)
assert pil_image.size == (self.IMAGE_WIDTH, self.IMAGE_HEIGHT), 'image size is %s' % (pil_image.size,)
class TestCreatedGrayscale(TestCreated):
IMAGE_CHANNELS = 1
class TestCreatedWide(TestCreated):
IMAGE_WIDTH = 20
class TestCreatedTall(TestCreated):
IMAGE_HEIGHT = 20
| |
"""vobject module for reading vCard and vCalendar files."""
import copy
import re
import sys
import logging
import StringIO, cStringIO
import string
import exceptions
import codecs
#------------------------------------ Logging ----------------------------------
logger = logging.getLogger(__name__)
if not logging.getLogger().handlers:
handler = logging.StreamHandler()
formatter = logging.Formatter('%(name)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR) # Log errors
DEBUG = False # Don't waste time on debug calls
#----------------------------------- Constants ---------------------------------
CR = '\r'
LF = '\n'
CRLF = CR + LF
SPACE = ' '
TAB = '\t'
SPACEORTAB = SPACE + TAB
#-------------------------------- Useful modules -------------------------------
# use doctest, it kills two birds with one stone and docstrings often become
# more readable to boot (see parseLine's docstring).
# use logging, then when debugging we can just set our verbosity.
# use epydoc syntax for documenting code, please document every class and non-
# trivial method (see http://epydoc.sourceforge.net/epytext.html
# and http://epydoc.sourceforge.net/fields.html). Also, please
# follow http://www.python.org/peps/pep-0257.html for docstrings.
#-------------------------------------------------------------------------------
#--------------------------------- Main classes --------------------------------
class VBase(object):
"""Base class for ContentLine and Component.
@ivar behavior:
The Behavior class associated with this object, which controls
validation, transformations, and encoding.
@ivar parentBehavior:
The object's parent's behavior, or None if no behaviored parent exists.
@ivar isNative:
Boolean describing whether this component is a Native instance.
@ivar group:
An optional group prefix, should be used only to indicate sort order in
vCards, according to RFC2426
"""
def __init__(self, group=None, *args, **kwds):
super(VBase, self).__init__(*args, **kwds)
self.group = group
self.behavior = None
self.parentBehavior = None
self.isNative = False
def copy(self, copyit):
self.group = copyit.group
self.behavior = copyit.behavior
self.parentBehavior = copyit.parentBehavior
self.isNative = copyit.isNative
def validate(self, *args, **kwds):
"""Call the behavior's validate method, or return True."""
if self.behavior:
return self.behavior.validate(self, *args, **kwds)
else: return True
def getChildren(self):
"""Return an iterable containing the contents of the object."""
return []
def clearBehavior(self, cascade=True):
"""Set behavior to None. Do for all descendants if cascading."""
self.behavior=None
if cascade: self.transformChildrenFromNative()
def autoBehavior(self, cascade=False):
"""Set behavior if name is in self.parentBehavior.knownChildren.
If cascade is True, unset behavior and parentBehavior for all
descendants, then recalculate behavior and parentBehavior.
"""
parentBehavior = self.parentBehavior
if parentBehavior is not None:
knownChildTup = parentBehavior.knownChildren.get(self.name, None)
if knownChildTup is not None:
behavior = getBehavior(self.name, knownChildTup[2])
if behavior is not None:
self.setBehavior(behavior, cascade)
if isinstance(self, ContentLine) and self.encoded:
self.behavior.decode(self)
elif isinstance(self, ContentLine):
self.behavior = parentBehavior.defaultBehavior
if self.encoded and self.behavior:
self.behavior.decode(self)
def setBehavior(self, behavior, cascade=True):
"""Set behavior. If cascade is True, autoBehavior all descendants."""
self.behavior=behavior
if cascade:
for obj in self.getChildren():
obj.parentBehavior=behavior
obj.autoBehavior(True)
def transformToNative(self):
"""Transform this object into a custom VBase subclass.
transformToNative should always return a representation of this object.
It may do so by modifying self in place then returning self, or by
creating a new object.
"""
if self.isNative or not self.behavior or not self.behavior.hasNative:
return self
else:
try:
return self.behavior.transformToNative(self)
except Exception, e:
# wrap errors in transformation in a ParseError
lineNumber = getattr(self, 'lineNumber', None)
if isinstance(e, ParseError):
if lineNumber is not None:
e.lineNumber = lineNumber
raise
else:
msg = "In transformToNative, unhandled exception: %s: %s"
msg = msg % (sys.exc_info()[0], sys.exc_info()[1])
new_error = ParseError(msg, lineNumber)
raise ParseError, new_error, sys.exc_info()[2]
def transformFromNative(self):
"""Return self transformed into a ContentLine or Component if needed.
May have side effects. If it does, transformFromNative and
transformToNative MUST have perfectly inverse side effects. Allowing
such side effects is convenient for objects whose transformations only
change a few attributes.
Note that it isn't always possible for transformFromNative to be a
perfect inverse of transformToNative, in such cases transformFromNative
should return a new object, not self after modifications.
"""
if self.isNative and self.behavior and self.behavior.hasNative:
try:
return self.behavior.transformFromNative(self)
except Exception, e:
# wrap errors in transformation in a NativeError
lineNumber = getattr(self, 'lineNumber', None)
if isinstance(e, NativeError):
if lineNumber is not None:
e.lineNumber = lineNumber
raise
else:
msg = "In transformFromNative, unhandled exception: %s: %s"
msg = msg % (sys.exc_info()[0], sys.exc_info()[1])
new_error = NativeError(msg, lineNumber)
raise NativeError, new_error, sys.exc_info()[2]
else: return self
def transformChildrenToNative(self):
"""Recursively replace children with their native representation."""
pass
def transformChildrenFromNative(self, clearBehavior=True):
"""Recursively transform native children to vanilla representations."""
pass
def serialize(self, buf=None, lineLength=75, validate=True, behavior=None):
"""Serialize to buf if it exists, otherwise return a string.
Use self.behavior.serialize if behavior exists.
"""
if not behavior:
behavior = self.behavior
if behavior:
if DEBUG: logger.debug("serializing %s with behavior" % self.name)
return behavior.serialize(self, buf, lineLength, validate)
else:
if DEBUG: logger.debug("serializing %s without behavior" % self.name)
return defaultSerialize(self, buf, lineLength)
def ascii(s):
"""Turn s into a printable string. Won't work for 8-bit ASCII."""
return unicode(s).encode('ascii', 'replace')
def toVName(name, stripNum = 0, upper = False):
"""
Turn a Python name into an iCalendar style name, optionally uppercase and
with characters stripped off.
"""
if upper:
name = name.upper()
if stripNum != 0:
name = name[:-stripNum]
return name.replace('_', '-')
class ContentLine(VBase):
"""Holds one content line for formats like vCard and vCalendar.
For example::
<SUMMARY{u'param1' : [u'val1'], u'param2' : [u'val2']}Bastille Day Party>
@ivar name:
The uppercased name of the contentline.
@ivar params:
A dictionary of parameters and associated lists of values (the list may
be empty for empty parameters).
@ivar value:
The value of the contentline.
@ivar singletonparams:
A list of parameters for which it's unclear if the string represents the
parameter name or the parameter value. In vCard 2.1, "The value string
can be specified alone in those cases where the value is unambiguous".
This is crazy, but we have to deal with it.
@ivar encoded:
A boolean describing whether the data in the content line is encoded.
Generally, text read from a serialized vCard or vCalendar should be
considered encoded. Data added programmatically should not be encoded.
@ivar lineNumber:
An optional line number associated with the contentline.
"""
def __init__(self, name, params, value, group=None,
encoded=False, isNative=False,
lineNumber = None, *args, **kwds):
"""Take output from parseLine, convert params list to dictionary."""
# group is used as a positional argument to match parseLine's return
super(ContentLine, self).__init__(group, *args, **kwds)
self.name = name.upper()
self.value = value
self.encoded = encoded
self.params = {}
self.singletonparams = []
self.isNative = isNative
self.lineNumber = lineNumber
def updateTable(x):
if len(x) == 1:
self.singletonparams += x
else:
paramlist = self.params.setdefault(x[0].upper(), [])
paramlist.extend(x[1:])
map(updateTable, params)
qp = False
if 'ENCODING' in self.params:
if 'QUOTED-PRINTABLE' in self.params['ENCODING']:
qp = True
self.params['ENCODING'].remove('QUOTED-PRINTABLE')
if 0==len(self.params['ENCODING']):
del self.params['ENCODING']
if 'QUOTED-PRINTABLE' in self.singletonparams:
qp = True
self.singletonparams.remove('QUOTED-PRINTABLE')
if qp:
self.value = str(self.value).decode('quoted-printable')
# self.value should be unicode for iCalendar, but if quoted-printable
# is used, or if the quoted-printable state machine is used, text may be
# encoded
if type(self.value) is str:
charset = 'iso-8859-1'
if 'CHARSET' in self.params:
charsets = self.params.pop('CHARSET')
if charsets:
charset = charsets[0]
self.value = unicode(self.value, charset)
@classmethod
def duplicate(clz, copyit):
newcopy = clz('', {}, '')
newcopy.copy(copyit)
return newcopy
def copy(self, copyit):
super(ContentLine, self).copy(copyit)
self.name = copyit.name
self.value = copy.copy(copyit.value)
self.encoded = self.encoded
self.params = copy.copy(copyit.params)
self.singletonparams = copy.copy(copyit.singletonparams)
self.lineNumber = copyit.lineNumber
def __eq__(self, other):
try:
return (self.name == other.name) and (self.params == other.params) and (self.value == other.value)
except:
return False
def _getAttributeNames(self):
"""Return a list of attributes of the object.
Python 2.6 will add __dir__ to customize what attributes are returned
by dir, for now copy PyCrust so that IPython can accurately do
completion.
"""
keys = self.params.keys()
params = [param + '_param' for param in keys]
params.extend(param + '_paramlist' for param in keys)
return params
def __getattr__(self, name):
"""Make params accessible via self.foo_param or self.foo_paramlist.
Underscores, legal in python variable names, are converted to dashes,
which are legal in IANA tokens.
"""
try:
if name.endswith('_param'):
return self.params[toVName(name, 6, True)][0]
elif name.endswith('_paramlist'):
return self.params[toVName(name, 10, True)]
else:
raise exceptions.AttributeError, name
except KeyError:
raise exceptions.AttributeError, name
def __setattr__(self, name, value):
"""Make params accessible via self.foo_param or self.foo_paramlist.
Underscores, legal in python variable names, are converted to dashes,
which are legal in IANA tokens.
"""
if name.endswith('_param'):
if type(value) == list:
self.params[toVName(name, 6, True)] = value
else:
self.params[toVName(name, 6, True)] = [value]
elif name.endswith('_paramlist'):
if type(value) == list:
self.params[toVName(name, 10, True)] = value
else:
raise VObjectError("Parameter list set to a non-list")
else:
prop = getattr(self.__class__, name, None)
if isinstance(prop, property):
prop.fset(self, value)
else:
object.__setattr__(self, name, value)
def __delattr__(self, name):
try:
if name.endswith('_param'):
del self.params[toVName(name, 6, True)]
elif name.endswith('_paramlist'):
del self.params[toVName(name, 10, True)]
else:
object.__delattr__(self, name)
except KeyError:
raise exceptions.AttributeError, name
def valueRepr( self ):
"""transform the representation of the value according to the behavior,
if any"""
v = self.value
if self.behavior:
v = self.behavior.valueRepr( self )
return ascii( v )
def __str__(self):
return "<"+ascii(self.name)+ascii(self.params)+self.valueRepr()+">"
def __repr__(self):
return self.__str__().replace('\n', '\\n')
def prettyPrint(self, level = 0, tabwidth=3):
pre = ' ' * level * tabwidth
print pre, self.name + ":", self.valueRepr()
if self.params:
lineKeys= self.params.keys()
print pre, "params for ", self.name +':'
for aKey in lineKeys:
print pre + ' ' * tabwidth, aKey, ascii(self.params[aKey])
class Component(VBase):
"""A complex property that can contain multiple ContentLines.
For our purposes, a component must start with a BEGIN:xxxx line and end with
END:xxxx, or have a PROFILE:xxx line if a top-level component.
@ivar contents:
A dictionary of lists of Component or ContentLine instances. The keys
are the lowercased names of child ContentLines or Components.
Note that BEGIN and END ContentLines are not included in contents.
@ivar name:
Uppercase string used to represent this Component, i.e VCARD if the
serialized object starts with BEGIN:VCARD.
@ivar useBegin:
A boolean flag determining whether BEGIN: and END: lines should
be serialized.
"""
def __init__(self, name=None, *args, **kwds):
super(Component, self).__init__(*args, **kwds)
self.contents = {}
if name:
self.name=name.upper()
self.useBegin = True
else:
self.name = ''
self.useBegin = False
self.autoBehavior()
@classmethod
def duplicate(clz, copyit):
newcopy = clz()
newcopy.copy(copyit)
return newcopy
def copy(self, copyit):
super(Component, self).copy(copyit)
# deep copy of contents
self.contents = {}
for key, lvalue in copyit.contents.items():
newvalue = []
for value in lvalue:
newitem = value.duplicate(value)
newvalue.append(newitem)
self.contents[key] = newvalue
self.name = copyit.name
self.useBegin = copyit.useBegin
def setProfile(self, name):
"""Assign a PROFILE to this unnamed component.
Used by vCard, not by vCalendar.
"""
if self.name or self.useBegin:
if self.name == name: return
raise VObjectError("This component already has a PROFILE or uses BEGIN.")
self.name = name.upper()
def _getAttributeNames(self):
"""Return a list of attributes of the object.
Python 2.6 will add __dir__ to customize what attributes are returned
by dir, for now copy PyCrust so that IPython can accurately do
completion.
"""
names = self.contents.keys()
names.extend(name + '_list' for name in self.contents.keys())
return names
def __getattr__(self, name):
"""For convenience, make self.contents directly accessible.
Underscores, legal in python variable names, are converted to dashes,
which are legal in IANA tokens.
"""
# if the object is being re-created by pickle, self.contents may not
# be set, don't get into an infinite loop over the issue
if name == 'contents':
return object.__getattribute__(self, name)
try:
if name.endswith('_list'):
return self.contents[toVName(name, 5)]
else:
return self.contents[toVName(name)][0]
except KeyError:
raise exceptions.AttributeError, name
normal_attributes = ['contents','name','behavior','parentBehavior','group']
def __setattr__(self, name, value):
"""For convenience, make self.contents directly accessible.
Underscores, legal in python variable names, are converted to dashes,
which are legal in IANA tokens.
"""
if name not in self.normal_attributes and name.lower()==name:
if type(value) == list:
if name.endswith('_list'):
name = name[:-5]
self.contents[toVName(name)] = value
elif name.endswith('_list'):
raise VObjectError("Component list set to a non-list")
else:
self.contents[toVName(name)] = [value]
else:
prop = getattr(self.__class__, name, None)
if isinstance(prop, property):
prop.fset(self, value)
else:
object.__setattr__(self, name, value)
def __delattr__(self, name):
try:
if name not in self.normal_attributes and name.lower()==name:
if name.endswith('_list'):
del self.contents[toVName(name, 5)]
else:
del self.contents[toVName(name)]
else:
object.__delattr__(self, name)
except KeyError:
raise exceptions.AttributeError, name
def getChildValue(self, childName, default = None, childNumber = 0):
"""Return a child's value (the first, by default), or None."""
child = self.contents.get(toVName(childName))
if child is None:
return default
else:
return child[childNumber].value
def add(self, objOrName, group = None):
"""Add objOrName to contents, set behavior if it can be inferred.
If objOrName is a string, create an empty component or line based on
behavior. If no behavior is found for the object, add a ContentLine.
group is an optional prefix to the name of the object (see
RFC 2425).
"""
if isinstance(objOrName, VBase):
obj = objOrName
if self.behavior:
obj.parentBehavior = self.behavior
obj.autoBehavior(True)
else:
name = objOrName.upper()
try:
id=self.behavior.knownChildren[name][2]
behavior = getBehavior(name, id)
if behavior.isComponent:
obj = Component(name)
else:
obj = ContentLine(name, [], '', group)
obj.parentBehavior = self.behavior
obj.behavior = behavior
obj = obj.transformToNative()
except (KeyError, AttributeError):
obj = ContentLine(objOrName, [], '', group)
if obj.behavior is None and self.behavior is not None:
if isinstance(obj, ContentLine):
obj.behavior = self.behavior.defaultBehavior
self.contents.setdefault(obj.name.lower(), []).append(obj)
return obj
def remove(self, obj):
"""Remove obj from contents."""
named = self.contents.get(obj.name.lower())
if named:
try:
named.remove(obj)
if len(named) == 0:
del self.contents[obj.name.lower()]
except ValueError:
pass;
def getChildren(self):
"""Return an iterable of all children."""
for objList in self.contents.values():
for obj in objList: yield obj
def components(self):
"""Return an iterable of all Component children."""
return (i for i in self.getChildren() if isinstance(i, Component))
def lines(self):
"""Return an iterable of all ContentLine children."""
return (i for i in self.getChildren() if isinstance(i, ContentLine))
def sortChildKeys(self):
try:
first = [s for s in self.behavior.sortFirst if s in self.contents]
except:
first = []
return first + sorted(k for k in self.contents.keys() if k not in first)
def getSortedChildren(self):
return [obj for k in self.sortChildKeys() for obj in self.contents[k]]
def setBehaviorFromVersionLine(self, versionLine):
"""Set behavior if one matches name, versionLine.value."""
v=getBehavior(self.name, versionLine.value)
if v: self.setBehavior(v)
def transformChildrenToNative(self):
"""Recursively replace children with their native representation."""
#sort to get dependency order right, like vtimezone before vevent
for childArray in (self.contents[k] for k in self.sortChildKeys()):
for i in xrange(len(childArray)):
childArray[i]=childArray[i].transformToNative()
childArray[i].transformChildrenToNative()
def transformChildrenFromNative(self, clearBehavior=True):
"""Recursively transform native children to vanilla representations."""
for childArray in self.contents.values():
for i in xrange(len(childArray)):
childArray[i]=childArray[i].transformFromNative()
childArray[i].transformChildrenFromNative(clearBehavior)
if clearBehavior:
childArray[i].behavior = None
childArray[i].parentBehavior = None
def __str__(self):
if self.name:
return "<" + self.name + "| " + str(self.getSortedChildren()) + ">"
else:
return '<' + '*unnamed*' + '| ' + str(self.getSortedChildren()) + '>'
def __repr__(self):
return self.__str__()
def prettyPrint(self, level = 0, tabwidth=3):
pre = ' ' * level * tabwidth
print pre, self.name
if isinstance(self, Component):
for line in self.getChildren():
line.prettyPrint(level + 1, tabwidth)
print
class VObjectError(Exception):
def __init__(self, message, lineNumber=None):
self.message = message
if lineNumber is not None:
self.lineNumber = lineNumber
def __str__(self):
if hasattr(self, 'lineNumber'):
return "At line %s: %s" % \
(self.lineNumber, self.message)
else:
return repr(self.message)
class ParseError(VObjectError):
pass
class ValidateError(VObjectError):
pass
class NativeError(VObjectError):
pass
#-------------------------- Parsing functions ----------------------------------
# parseLine regular expressions
patterns = {}
# Note that underscore is not legal for names, it's included because
# Lotus Notes uses it
patterns['name'] = '[a-zA-Z0-9\-_]+'
patterns['safe_char'] = '[^";:,]'
patterns['qsafe_char'] = '[^"]'
# the combined Python string replacement and regex syntax is a little confusing;
# remember that %(foobar)s is replaced with patterns['foobar'], so for instance
# param_value is any number of safe_chars or any number of qsaf_chars surrounded
# by double quotes.
patterns['param_value'] = ' "%(qsafe_char)s * " | %(safe_char)s * ' % patterns
# get a tuple of two elements, one will be empty, the other will have the value
patterns['param_value_grouped'] = """
" ( %(qsafe_char)s * )" | ( %(safe_char)s + )
""" % patterns
# get a parameter and its values, without any saved groups
patterns['param'] = r"""
; (?: %(name)s ) # parameter name
(?:
(?: = (?: %(param_value)s ) )? # 0 or more parameter values, multiple
(?: , (?: %(param_value)s ) )* # parameters are comma separated
)*
""" % patterns
# get a parameter, saving groups for name and value (value still needs parsing)
patterns['params_grouped'] = r"""
; ( %(name)s )
(?: =
(
(?: (?: %(param_value)s ) )? # 0 or more parameter values, multiple
(?: , (?: %(param_value)s ) )* # parameters are comma separated
)
)?
""" % patterns
# get a full content line, break it up into group, name, parameters, and value
patterns['line'] = r"""
^ ((?P<group> %(name)s)\.)?(?P<name> %(name)s) # name group
(?P<params> (?: %(param)s )* ) # params group (may be empty)
: (?P<value> .* )$ # value group
""" % patterns
' "%(qsafe_char)s*" | %(safe_char)s* '
param_values_re = re.compile(patterns['param_value_grouped'], re.VERBOSE)
params_re = re.compile(patterns['params_grouped'], re.VERBOSE)
line_re = re.compile(patterns['line'], re.DOTALL | re.VERBOSE)
begin_re = re.compile('BEGIN', re.IGNORECASE)
def parseParams(string):
"""
>>> parseParams(';ALTREP="http://www.wiz.org"')
[['ALTREP', 'http://www.wiz.org']]
>>> parseParams('')
[]
>>> parseParams(';ALTREP="http://www.wiz.org;;",Blah,Foo;NEXT=Nope;BAR')
[['ALTREP', 'http://www.wiz.org;;', 'Blah', 'Foo'], ['NEXT', 'Nope'], ['BAR']]
"""
all = params_re.findall(string)
allParameters = []
for tup in all:
paramList = [tup[0]] # tup looks like (name, valuesString)
for pair in param_values_re.findall(tup[1]):
# pair looks like ('', value) or (value, '')
if pair[0] != '':
paramList.append(pair[0])
else:
paramList.append(pair[1])
allParameters.append(paramList)
return allParameters
def parseLine(line, lineNumber = None):
"""
>>> parseLine("BLAH:")
('BLAH', [], '', None)
>>> parseLine("RDATE:VALUE=DATE:19970304,19970504,19970704,19970904")
('RDATE', [], 'VALUE=DATE:19970304,19970504,19970704,19970904', None)
>>> parseLine('DESCRIPTION;ALTREP="http://www.wiz.org":The Fall 98 Wild Wizards Conference - - Las Vegas, NV, USA')
('DESCRIPTION', [['ALTREP', 'http://www.wiz.org']], 'The Fall 98 Wild Wizards Conference - - Las Vegas, NV, USA', None)
>>> parseLine("EMAIL;PREF;INTERNET:john@nowhere.com")
('EMAIL', [['PREF'], ['INTERNET']], 'john@nowhere.com', None)
>>> parseLine('EMAIL;TYPE="blah",hah;INTERNET="DIGI",DERIDOO:john@nowhere.com')
('EMAIL', [['TYPE', 'blah', 'hah'], ['INTERNET', 'DIGI', 'DERIDOO']], 'john@nowhere.com', None)
>>> parseLine('item1.ADR;type=HOME;type=pref:;;Reeperbahn 116;Hamburg;;20359;')
('ADR', [['type', 'HOME'], ['type', 'pref']], ';;Reeperbahn 116;Hamburg;;20359;', 'item1')
>>> parseLine(":")
Traceback (most recent call last):
...
ParseError: 'Failed to parse line: :'
"""
match = line_re.match(line)
if match is None:
raise ParseError("Failed to parse line: %s" % line, lineNumber)
# Underscores are replaced with dash to work around Lotus Notes
return (match.group('name').replace('_','-'),
parseParams(match.group('params')),
match.group('value'), match.group('group'))
# logical line regular expressions
patterns['lineend'] = r'(?:\r\n|\r|\n|$)'
patterns['wrap'] = r'%(lineend)s [\t ]' % patterns
patterns['logicallines'] = r"""
(
(?: [^\r\n] | %(wrap)s )*
%(lineend)s
)
""" % patterns
patterns['wraporend'] = r'(%(wrap)s | %(lineend)s )' % patterns
wrap_re = re.compile(patterns['wraporend'], re.VERBOSE)
logical_lines_re = re.compile(patterns['logicallines'], re.VERBOSE)
testLines="""
Line 0 text
, Line 0 continued.
Line 1;encoding=quoted-printable:this is an evil=
evil=
format.
Line 2 is a new line, it does not start with whitespace.
"""
def getLogicalLines(fp, allowQP=True, findBegin=False):
"""Iterate through a stream, yielding one logical line at a time.
Because many applications still use vCard 2.1, we have to deal with the
quoted-printable encoding for long lines, as well as the vCard 3.0 and
vCalendar line folding technique, a whitespace character at the start
of the line.
Quoted-printable data will be decoded in the Behavior decoding phase.
>>> import StringIO
>>> f=StringIO.StringIO(testLines)
>>> for n, l in enumerate(getLogicalLines(f)):
... print "Line %s: %s" % (n, l[0])
...
Line 0: Line 0 text, Line 0 continued.
Line 1: Line 1;encoding=quoted-printable:this is an evil=
evil=
format.
Line 2: Line 2 is a new line, it does not start with whitespace.
"""
if not allowQP:
bytes = fp.read(-1)
if len(bytes) > 0:
if type(bytes[0]) == unicode:
val = bytes
elif not findBegin:
val = bytes.decode('utf-8')
else:
for encoding in 'utf-8', 'utf-16-LE', 'utf-16-BE', 'iso-8859-1':
try:
val = bytes.decode(encoding)
if begin_re.search(val) is not None:
break
except UnicodeDecodeError:
pass
else:
raise ParseError, 'Could not find BEGIN when trying to determine encoding'
else:
val = bytes
# strip off any UTF8 BOMs which Python's UTF8 decoder leaves
val = val.lstrip( unicode( codecs.BOM_UTF8, "utf8" ) )
lineNumber = 1
for match in logical_lines_re.finditer(val):
line, n = wrap_re.subn('', match.group())
if line != '':
yield line, lineNumber
lineNumber += n
else:
quotedPrintable=False
newbuffer = StringIO.StringIO
logicalLine = newbuffer()
lineNumber = 0
lineStartNumber = 0
while True:
line = fp.readline()
if line == '':
break
else:
line = line.rstrip(CRLF)
lineNumber += 1
if line.rstrip() == '':
if logicalLine.pos > 0:
yield logicalLine.getvalue(), lineStartNumber
lineStartNumber = lineNumber
logicalLine = newbuffer()
quotedPrintable=False
continue
if quotedPrintable and allowQP:
logicalLine.write('\n')
logicalLine.write(line)
quotedPrintable=False
elif line[0] in SPACEORTAB:
logicalLine.write(line[1:])
elif logicalLine.pos > 0:
yield logicalLine.getvalue(), lineStartNumber
lineStartNumber = lineNumber
logicalLine = newbuffer()
logicalLine.write(line)
else:
logicalLine = newbuffer()
logicalLine.write(line)
# hack to deal with the fact that vCard 2.1 allows parameters to be
# encoded without a parameter name. False positives are unlikely, but
# possible.
val = logicalLine.getvalue()
if val[-1]=='=' and val.lower().find('quoted-printable') >= 0:
quotedPrintable=True
if logicalLine.pos > 0:
yield logicalLine.getvalue(), lineStartNumber
def textLineToContentLine(text, n=None):
return ContentLine(*parseLine(text, n), **{'encoded':True, 'lineNumber' : n})
def dquoteEscape(param):
"""Return param, or "param" if ',' or ';' or ':' is in param."""
if param.find('"') >= 0:
raise VObjectError("Double quotes aren't allowed in parameter values.")
for char in ',;:':
if param.find(char) >= 0:
return '"'+ param + '"'
return param
def foldOneLine(outbuf, input, lineLength = 75):
# Folding line procedure that ensures multi-byte utf-8 sequences are not broken
# across lines
if len(input) < lineLength:
# Optimize for unfolded line case
outbuf.write(input)
else:
# Look for valid utf8 range and write that out
start = 0
written = 0
while written < len(input):
# Start max length -1 chars on from where we are
offset = start + lineLength - 1
if offset >= len(input):
line = input[start:]
outbuf.write(line)
written = len(input)
else:
# Check whether next char is valid utf8 lead byte
while (input[offset] > 0x7F) and ((ord(input[offset]) & 0xC0) == 0x80):
# Step back until we have a valid char
offset -= 1
line = input[start:offset]
outbuf.write(line)
outbuf.write("\r\n ")
written += offset - start
start = offset
outbuf.write("\r\n")
def defaultSerialize(obj, buf, lineLength):
"""Encode and fold obj and its children, write to buf or return a string."""
outbuf = buf or cStringIO.StringIO()
if isinstance(obj, Component):
if obj.group is None:
groupString = ''
else:
groupString = obj.group + '.'
if obj.useBegin:
foldOneLine(outbuf, str(groupString + u"BEGIN:" + obj.name), lineLength)
for child in obj.getSortedChildren():
#validate is recursive, we only need to validate once
child.serialize(outbuf, lineLength, validate=False)
if obj.useBegin:
foldOneLine(outbuf, str(groupString + u"END:" + obj.name), lineLength)
elif isinstance(obj, ContentLine):
startedEncoded = obj.encoded
if obj.behavior and not startedEncoded: obj.behavior.encode(obj)
s=codecs.getwriter('utf-8')(cStringIO.StringIO()) #unfolded buffer
if obj.group is not None:
s.write(obj.group + '.')
s.write(obj.name.upper())
for key, paramvals in obj.params.iteritems():
s.write(';' + key + '=' + ','.join(dquoteEscape(p) for p in paramvals))
s.write(':' + obj.value)
if obj.behavior and not startedEncoded: obj.behavior.decode(obj)
foldOneLine(outbuf, s.getvalue(), lineLength)
return buf or outbuf.getvalue()
testVCalendar="""
BEGIN:VCALENDAR
BEGIN:VEVENT
SUMMARY;blah=hi!:Bastille Day Party
END:VEVENT
END:VCALENDAR"""
class Stack:
def __init__(self):
self.stack = []
def __len__(self):
return len(self.stack)
def top(self):
if len(self) == 0: return None
else: return self.stack[-1]
def topName(self):
if len(self) == 0: return None
else: return self.stack[-1].name
def modifyTop(self, item):
top = self.top()
if top:
top.add(item)
else:
new = Component()
self.push(new)
new.add(item) #add sets behavior for item and children
def push(self, obj): self.stack.append(obj)
def pop(self): return self.stack.pop()
def readComponents(streamOrString, validate=False, transform=True,
findBegin=True, ignoreUnreadable=False,
allowQP=False):
"""Generate one Component at a time from a stream.
>>> import StringIO
>>> f = StringIO.StringIO(testVCalendar)
>>> cal=readComponents(f).next()
>>> cal
<VCALENDAR| [<VEVENT| [<SUMMARY{u'BLAH': [u'hi!']}Bastille Day Party>]>]>
>>> cal.vevent.summary
<SUMMARY{u'BLAH': [u'hi!']}Bastille Day Party>
"""
if isinstance(streamOrString, basestring):
stream = StringIO.StringIO(streamOrString)
else:
stream = streamOrString
try:
stack = Stack()
versionLine = None
n = 0
for line, n in getLogicalLines(stream, allowQP, findBegin):
if ignoreUnreadable:
try:
vline = textLineToContentLine(line, n)
except VObjectError, e:
if e.lineNumber is not None:
msg = "Skipped line %(lineNumber)s, message: %(msg)s"
else:
msg = "Skipped a line, message: %(msg)s"
logger.error(msg % {'lineNumber' : e.lineNumber,
'msg' : e.message})
continue
else:
vline = textLineToContentLine(line, n)
if vline.name == "VERSION":
versionLine = vline
stack.modifyTop(vline)
elif vline.name == "BEGIN":
stack.push(Component(vline.value, group=vline.group))
elif vline.name == "PROFILE":
if not stack.top(): stack.push(Component())
stack.top().setProfile(vline.value)
elif vline.name == "END":
if len(stack) == 0:
err = "Attempted to end the %s component, \
but it was never opened" % vline.value
raise ParseError(err, n)
if vline.value.upper() == stack.topName(): #START matches END
if len(stack) == 1:
component=stack.pop()
if versionLine is not None:
component.setBehaviorFromVersionLine(versionLine)
else:
behavior = getBehavior(component.name)
if behavior:
component.setBehavior(behavior)
if validate: component.validate(raiseException=True)
if transform: component.transformChildrenToNative()
yield component #EXIT POINT
else: stack.modifyTop(stack.pop())
else:
err = "%s component wasn't closed"
raise ParseError(err % stack.topName(), n)
else: stack.modifyTop(vline) #not a START or END line
if stack.top():
if stack.topName() is None:
logger.warning("Top level component was never named")
elif stack.top().useBegin:
raise ParseError("Component %s was never closed" % (stack.topName()), n)
yield stack.pop()
except ParseError, e:
e.input = streamOrString
raise
def readOne(stream, validate=False, transform=True, findBegin=True,
ignoreUnreadable=False, allowQP=False):
"""Return the first component from stream."""
return readComponents(stream, validate, transform, findBegin,
ignoreUnreadable, allowQP).next()
#--------------------------- version registry ----------------------------------
__behaviorRegistry={}
def registerBehavior(behavior, name=None, default=False, id=None):
"""Register the given behavior.
If default is True (or if this is the first version registered with this
name), the version will be the default if no id is given.
"""
if not name: name=behavior.name.upper()
if id is None: id=behavior.versionString
if name in __behaviorRegistry:
if default:
__behaviorRegistry[name].insert(0, (id, behavior))
else:
__behaviorRegistry[name].append((id, behavior))
else:
__behaviorRegistry[name]=[(id, behavior)]
def getBehavior(name, id=None):
"""Return a matching behavior if it exists, or None.
If id is None, return the default for name.
"""
name=name.upper()
if name in __behaviorRegistry:
if id:
for n, behavior in __behaviorRegistry[name]:
if n==id:
return behavior
return __behaviorRegistry[name][0][1]
return None
def newFromBehavior(name, id=None):
"""Given a name, return a behaviored ContentLine or Component."""
name = name.upper()
behavior = getBehavior(name, id)
if behavior is None:
raise VObjectError("No behavior found named %s" % name)
if behavior.isComponent:
obj = Component(name)
else:
obj = ContentLine(name, [], '')
obj.behavior = behavior
obj.isNative = False
return obj
#--------------------------- Helper function -----------------------------------
def backslashEscape(s):
s=s.replace("\\","\\\\").replace(";","\;").replace(",","\,")
return s.replace("\r\n", "\\n").replace("\n","\\n").replace("\r","\\n")
#------------------- Testing and running functions -----------------------------
if __name__ == '__main__':
import tests
tests._test()
| |
"""
@package mi.instrument.uw.bars.ooicore.driver
@file mi/instrument/uw/bars/ooicore/driver.py
@author Steve Foley
@brief Driver for the ooicore
Release notes:
This supports the UW BARS instrument from the Marv Tilley lab
"""
import re
import time
from mi.core.common import BaseEnum, Units
from mi.core.exceptions import SampleException
from mi.core.exceptions import InstrumentProtocolException
from mi.core.exceptions import InstrumentParameterException
from mi.core.exceptions import InstrumentTimeoutException
from mi.core.instrument.data_particle import DataParticle, DataParticleKey, CommonDataParticleType
from mi.core.instrument.driver_dict import DriverDictKey
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.instrument_fsm import ThreadSafeFSM
from mi.core.instrument.instrument_driver import SingleConnectionInstrumentDriver
from mi.core.instrument.instrument_driver import DriverEvent
from mi.core.instrument.instrument_driver import DriverAsyncEvent
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.instrument.instrument_driver import DriverParameter
from mi.core.instrument.instrument_driver import DriverConfigKey
from mi.core.driver_scheduler import DriverSchedulerConfigKey
from mi.core.driver_scheduler import TriggerType
from mi.core.instrument.instrument_protocol import MenuInstrumentProtocol, InitializationType
from mi.core.instrument.protocol_param_dict import ProtocolParameterDict, ParameterDictVisibility, ParameterDictType
from mi.core.log import get_logger
from mi.core.log import get_logging_metaclass
__author__ = 'Steve Foley'
__license__ = 'Apache 2.0'
common_matches = {
'float': r'-?\d*\.?\d+',
'int': r'-?\d+',
'str': r'\w+',
'fn': r'\S+',
'rest': r'.*\r\n',
'tod': r'\d{8}T\d{6}',
'data': r'[^\*]+',
'crc': r'[0-9a-fA-F]{4}'
}
log = get_logger()
Directions = MenuInstrumentProtocol.MenuTree.Directions
SAMPLE_PATTERN = '\s+'.join(['(-?\d+\.\d+)'] * 12) + '\r\n'
SAMPLE_REGEX = re.compile(SAMPLE_PATTERN)
# newline.
NEWLINE = '\r'
# default timeout.
TIMEOUT = 10
class ScheduledJob(BaseEnum):
ACQUIRE_STATUS = 'acquire_status'
class DataParticleType(BaseEnum):
RAW = CommonDataParticleType.RAW
TRHPH_PARSED = 'trhph_sample'
TRHPH_STATUS = 'trhph_status'
class Command(BaseEnum):
DIRECT_SET = "SET"
BACK_MENU = "BACK_MENU"
BLANK = "BLANK"
BREAK = "BREAK"
START_AUTOSAMPLE = "START_AUTOSAMPLE"
CHANGE_PARAM = "CHANGE_PARAM"
SHOW_PARAM = "SHOW_PARAM"
SHOW_STATUS = "SHOW_STATUS"
SENSOR_POWER = "SENSOR_POWER"
CHANGE_CYCLE_TIME = "CHANGE_CYCLE_TIME"
CHANGE_VERBOSE = "CHANGE_VERBOSE"
CHANGE_METADATA_POWERUP = "CHANGE_METADATA_POWERUP"
CHANGE_METADATA_RESTART = "CHANGE_METADATA_RESTART"
CHANGE_RES_SENSOR_POWER = "CHANGE_RES_SENSOR_POWER"
CHANGE_INST_AMP_POWER = "CHANGE_INST_AMP_POWER"
CHANGE_EH_ISOLATION_AMP_POWER = "CHANGE_EH_ISOLATION_AMP_POWER"
CHANGE_HYDROGEN_POWER = "CHANGE_HYDROGEN_POWER"
CHANGE_REFERENCE_TEMP_POWER = "CHANGE_REFERENCE_TEMP_POWER"
# Strings should line up with Command class
COMMAND_CHAR = {
'BACK_MENU': '9',
'BLANK': '\r',
'BREAK': chr(0x13), # Ctrl-S
'START_AUTOSAMPLE': '1',
'CHANGE_PARAM': '2',
'SHOW_PARAM': '6',
'SHOW_STATUS': '5',
'SENSOR_POWER': '4',
'CHANGE_CYCLE_TIME': '1',
'CHANGE_VERBOSE': '2',
'CHANGE_METADATA_POWERUP': '3',
'CHANGE_METADATA_RESTART': '4',
'CHANGE_RES_SENSOR_POWER': '1',
'CHANGE_INST_AMP_POWER': '2',
'CHANGE_EH_ISOLATION_AMP_POWER': '3',
'CHANGE_HYDROGEN_POWER': '4',
'CHANGE_REFERENCE_TEMP_POWER': '5',
}
class SubMenu(BaseEnum):
MAIN = "SUBMENU_MAIN"
CHANGE_PARAM = "SUBMENU_CHANGE_PARAM"
SHOW_PARAM = "SUBMENU_SHOW_PARAM"
SHOW_STATUS = "SUBMENU_SHOW_STATUS"
SENSOR_POWER = "SUBMENU_SENSOR_POWER"
CYCLE_TIME = "SUBMENU_CYCLE_TIME"
VERBOSE = "SUBMENU_VERBOSE"
METADATA_POWERUP = "SUBMENU_METADATA_POWERUP"
METADATA_RESTART = "SUBMENU_METADATA_RESTART"
RES_SENSOR_POWER = "SUBMENU_RES_SENSOR_POWER"
INST_AMP_POWER = "SUBMENU_INST_AMP_POWER"
EH_ISOLATION_AMP_POWER = "SUBMENU_EH_ISOLATION_AMP_POWER"
HYDROGEN_POWER = "SUBMENU_HYDROGEN_POWER"
REFERENCE_TEMP_POWER = "SUBMENU_REFERENCE_TEMP_POWER"
class ProtocolState(BaseEnum):
"""
Protocol states
enum.
"""
UNKNOWN = DriverProtocolState.UNKNOWN
COMMAND = DriverProtocolState.COMMAND
DIRECT_ACCESS = DriverProtocolState.DIRECT_ACCESS
AUTOSAMPLE = DriverProtocolState.AUTOSAMPLE
class ProtocolEvent(BaseEnum):
"""
Protocol events
"""
ENTER = DriverEvent.ENTER
EXIT = DriverEvent.EXIT
GET = DriverEvent.GET
SET = DriverEvent.SET
DISCOVER = DriverEvent.DISCOVER
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
START_AUTOSAMPLE = DriverEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = DriverEvent.STOP_AUTOSAMPLE
EXECUTE_DIRECT = DriverEvent.EXECUTE_DIRECT
ACQUIRE_STATUS = DriverEvent.ACQUIRE_STATUS
SCHEDULED_ACQUIRE_STATUS = 'DRIVER_EVENT_SCHEDULED_ACQUIRE_STATUS'
class Capability(BaseEnum):
"""
Capabilities exposed to user
"""
GET = ProtocolEvent.GET
SET = ProtocolEvent.SET
START_AUTOSAMPLE = ProtocolEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = ProtocolEvent.STOP_AUTOSAMPLE
ACQUIRE_STATUS = ProtocolEvent.ACQUIRE_STATUS
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
DISCOVER = DriverEvent.DISCOVER
# Device specific parameters.
class Parameter(DriverParameter):
"""
Device parameters
"""
CYCLE_TIME = "trhph_cycle_time"
VERBOSE = "verbose"
METADATA_POWERUP = "trhph_metadata_on_powerup"
METADATA_RESTART = "trhph_metadata_on_restart"
RES_SENSOR_POWER = "trhph_res_power_status"
INST_AMP_POWER = "trhph_thermo_hydro_amp_power_status"
EH_ISOLATION_AMP_POWER = "trhph_eh_amp_power_status"
HYDROGEN_POWER = "trhph_hydro_sensor_power_status"
REFERENCE_TEMP_POWER = "trhph_ref_temp_power_status"
RUN_ACQUIRE_STATUS_INTERVAL = 'status_interval'
# Device prompts.
class Prompt(BaseEnum):
"""
io prompts.
"""
CMD_PROMPT = "-->"
BREAK_ACK = "\r\n"
NONE = ""
DEAD_END_PROMPT = "Press Enter to return to the Main Menu. -->"
CONTINUE_PROMPT = "Press ENTER to continue."
MAIN_MENU = "Enter 0, 1, 2, 3, 4, 5, or 6 here -->"
CHANGE_PARAM_MENU = "Enter 0 through 9 here -->"
SENSOR_POWER_MENU = "Enter 0 through 9 here -->"
CYCLE_TIME_PROMPT = "Enter 1 for Seconds, 2 for Minutes -->"
CYCLE_TIME_SEC_VALUE_PROMPT = "Enter a new value between 15 and 59 here -->"
CYCLE_TIME_MIN_VALUE_PROMPT = "Enter a new value between 1 and 60 here -->"
VERBOSE_PROMPT = "Enter 2 for Verbose, 1 for just Data. -->"
METADATA_PROMPT = "Enter 2 for Yes, 1 for No. -->"
MENU_PROMPTS = [Prompt.MAIN_MENU, Prompt.CHANGE_PARAM_MENU,
Prompt.SENSOR_POWER_MENU, Prompt.CYCLE_TIME_PROMPT,
Prompt.DEAD_END_PROMPT, Prompt.CONTINUE_PROMPT]
MENU = MenuInstrumentProtocol.MenuTree({
SubMenu.MAIN: [Directions(command=Command.BLANK, response=Prompt.MAIN_MENU)],
SubMenu.CHANGE_PARAM: [Directions(command=Command.CHANGE_PARAM,
response=Prompt.CHANGE_PARAM_MENU)],
SubMenu.SHOW_PARAM: [Directions(SubMenu.CHANGE_PARAM),
Directions(command=Command.SHOW_PARAM,
response=Prompt.CONTINUE_PROMPT)],
SubMenu.SHOW_STATUS: [Directions(command=Command.SHOW_STATUS,
response=Prompt.DEAD_END_PROMPT)],
SubMenu.SENSOR_POWER: [Directions(command=Command.SENSOR_POWER,
response=Prompt.SENSOR_POWER_MENU)],
SubMenu.CYCLE_TIME: [Directions(SubMenu.CHANGE_PARAM),
Directions(command=Command.CHANGE_CYCLE_TIME,
response=Prompt.CYCLE_TIME_PROMPT)],
SubMenu.VERBOSE: [Directions(SubMenu.CHANGE_PARAM),
Directions(command=Command.CHANGE_VERBOSE,
response=Prompt.VERBOSE_PROMPT)],
SubMenu.METADATA_POWERUP: [Directions(SubMenu.CHANGE_PARAM),
Directions(command=Command.CHANGE_METADATA_POWERUP,
response=Prompt.METADATA_PROMPT)],
SubMenu.METADATA_RESTART: [Directions(SubMenu.CHANGE_PARAM),
Directions(command=Command.CHANGE_METADATA_RESTART,
response=Prompt.METADATA_PROMPT)],
SubMenu.RES_SENSOR_POWER: [Directions(SubMenu.SENSOR_POWER),
Directions(command=Command.CHANGE_RES_SENSOR_POWER,
response=Prompt.SENSOR_POWER_MENU)],
SubMenu.INST_AMP_POWER: [Directions(SubMenu.SENSOR_POWER),
Directions(command=Command.CHANGE_INST_AMP_POWER,
response=Prompt.SENSOR_POWER_MENU)],
SubMenu.EH_ISOLATION_AMP_POWER: [Directions(SubMenu.SENSOR_POWER),
Directions(command=Command.CHANGE_EH_ISOLATION_AMP_POWER,
response=Prompt.SENSOR_POWER_MENU)],
SubMenu.HYDROGEN_POWER: [Directions(SubMenu.SENSOR_POWER),
Directions(command=Command.CHANGE_HYDROGEN_POWER,
response=Prompt.SENSOR_POWER_MENU)],
SubMenu.REFERENCE_TEMP_POWER: [Directions(SubMenu.SENSOR_POWER),
Directions(command=Command.CHANGE_REFERENCE_TEMP_POWER,
response=Prompt.SENSOR_POWER_MENU)],
})
class BarsStatusParticleKey(BaseEnum):
SYSTEM_INFO = "trhph_system_info"
EPROM_STATUS = "trhph_eprom_status"
CYCLE_TIME = "trhph_cycle_time"
CYCLE_TIME_UNIT = "trhph_cycle_time_units"
POWER_CONTROL_WORD = "trhph_power_control_word"
RES_POWER = "trhph_res_power_status"
THERMO_HYDRO_AMP_POWER = "trhph_thermo_hydro_amp_power_status"
EH_AMP_POWER = "trhph_eh_amp_power_status"
HYDRO_SENSOR_POWER = "trhph_hydro_sensor_power_status"
REF_TEMP_POWER = "trhph_ref_temp_power_status"
METADATA_ON_POWERUP = "trhph_metadata_on_powerup"
METADATA_ON_RESTART = "trhph_metadata_on_restart"
class BarsStatusParticle(DataParticle):
"""
Routines for parsing raw data into a status particle structure for the
Satlantic PAR sensor. Overrides the building of values, and the rest comes
along for free.
"""
_data_particle_type = DataParticleType.TRHPH_STATUS
@staticmethod
def regex():
"""
Regular expression to match a status pattern
@return: regex string
"""
pattern = r"""
(?x)
(?P<system_info> System \s Name: (%(rest)s){7}) %(rest)s %(rest)s
(?P<eprom_status> %(int)s) \s+ = \s+ Eprom .*\r\n
(?P<cycle_time> %(int)s) \s+ = \s+ Cycle \s Time .*\r\n
(?P<unit> %(int)s) \s+ = \s+ Minutes \s or \s Seconds .*\r\n
(?P<power_control> %(int)s) \s+ = \s+ Power \s Control .*\r\n
(?P<res_power> %(int)s) \s+ = \s+ Res \s Power .*\r\n
(?P<thermo_hydro_amp> %(int)s) \s+ = \s+ Thermocouple \s \& \s Hydrogen .*\r\n
(?P<eh_amp_power> %(int)s) \s+ = \s+ eh \s Amp .*\r\n
(?P<hydro_sensor_power>%(int)s) \s+ = \s+ Hydrogen \s Sensor .*\r\n
(?P<ref_temp_power> %(int)s) \s+ = \s+ Reference \s Temperature .*\r\n
(?P<print_on_powerup> %(int)s) \s+ = \s+ .* Power \s up.*\r\n
(?P<print_on_restart> %(int)s) \s+ = \s+ .* Restart \s Data.*\r\n
""" % common_matches
return pattern
@staticmethod
def regex_compiled():
"""
get the compiled regex pattern
@return: compiled re
"""
return re.compile(BarsStatusParticle.regex())
def _build_parsed_values(self):
"""
Take something in the status format and split it into
a PAR status values (with an appropriate tag)
@throw SampleException If there is a problem with status creation
"""
match = BarsStatusParticle.regex_compiled().match(self.raw_data)
if not match:
raise SampleException("No regex match of status data: [%s]" % self.raw_data)
log.trace("Matching sample %r", match.groups())
system_info = match.group('system_info')
eprom_status = int(match.group('eprom_status'))
cycle_time = int(match.group('cycle_time'))
unit = int(match.group('unit'))
power_control = int(match.group('power_control'))
res_power = int(match.group('res_power'))
thermo_hydro_amp = int(match.group('thermo_hydro_amp'))
eh_amp_power = int(match.group('eh_amp_power'))
hydro_sensor_power = int(match.group('hydro_sensor_power'))
ref_temp_power = int(match.group('ref_temp_power'))
print_on_powerup = int(match.group('print_on_powerup'))
print_on_restart = int(match.group('print_on_powerup'))
result = [{DataParticleKey.VALUE_ID: BarsStatusParticleKey.SYSTEM_INFO,
DataParticleKey.VALUE: system_info},
{DataParticleKey.VALUE_ID: BarsStatusParticleKey.EPROM_STATUS,
DataParticleKey.VALUE: eprom_status},
{DataParticleKey.VALUE_ID: BarsStatusParticleKey.CYCLE_TIME,
DataParticleKey.VALUE: cycle_time},
{DataParticleKey.VALUE_ID: BarsStatusParticleKey.CYCLE_TIME_UNIT,
DataParticleKey.VALUE: unit},
{DataParticleKey.VALUE_ID: BarsStatusParticleKey.POWER_CONTROL_WORD,
DataParticleKey.VALUE: power_control},
{DataParticleKey.VALUE_ID: BarsStatusParticleKey.RES_POWER,
DataParticleKey.VALUE: res_power},
{DataParticleKey.VALUE_ID: BarsStatusParticleKey.THERMO_HYDRO_AMP_POWER,
DataParticleKey.VALUE: thermo_hydro_amp},
{DataParticleKey.VALUE_ID: BarsStatusParticleKey.EH_AMP_POWER,
DataParticleKey.VALUE: eh_amp_power},
{DataParticleKey.VALUE_ID: BarsStatusParticleKey.HYDRO_SENSOR_POWER,
DataParticleKey.VALUE: hydro_sensor_power},
{DataParticleKey.VALUE_ID: BarsStatusParticleKey.REF_TEMP_POWER,
DataParticleKey.VALUE: ref_temp_power},
{DataParticleKey.VALUE_ID: BarsStatusParticleKey.METADATA_ON_POWERUP,
DataParticleKey.VALUE: print_on_powerup},
{DataParticleKey.VALUE_ID: BarsStatusParticleKey.METADATA_ON_RESTART,
DataParticleKey.VALUE: print_on_restart}]
return result
class BarsDataParticleKey(BaseEnum):
RESISTIVITY_5 = "resistivity_5"
RESISTIVITY_X1 = "resistivity_x1"
RESISTIVITY_X5 = "resistivity_x5"
# At the moment, none of the BARS/TRHPH instruments have hydrogen sensors attached and are therefore not
# producing such data. Commenting this out to prevent useless data from being produced in the data particle,
# but keeping it around in case we need it again in the future
# HYDROGEN_5 = "hydrogen_5"
# HYDROGEN_X1 = "hydrogen_x1"
# HYDROGEN_X5 = "hydrogen_x5"
# EH_SENSOR = "eh_sensor"
REFERENCE_TEMP_VOLTS = "ref_temp_volts"
REFERENCE_TEMP_DEG_C = "ref_temp_degc"
RESISTIVITY_TEMP_VOLTS = "resistivity_temp_volts"
RESISTIVITY_TEMP_DEG_C = "resistivity_temp_degc"
BATTERY_VOLTAGE = "battery_voltage"
class BarsDataParticle(DataParticle):
"""
Routines for parsing raw data into a data particle structure for the
Satlantic PAR sensor. Overrides the building of values, and the rest comes
along for free.
"""
_data_particle_type = DataParticleType.TRHPH_PARSED
@staticmethod
def regex():
"""
Regular expression to match a sample pattern
@return: regex string
"""
pattern = SAMPLE_PATTERN
return pattern
@staticmethod
def regex_compiled():
"""
get the compiled regex pattern
@return: compiled re
"""
return re.compile(BarsDataParticle.regex())
def _build_parsed_values(self):
"""
Take something in the sample format and split it into
a PAR values (with an appropriate tag)
@throw SampleException If there is a problem with sample creation
"""
match = BarsDataParticle.regex_compiled().match(self.raw_data)
if not match:
raise SampleException("No regex match of parsed sample data: [%s]" %
self.raw_data)
log.debug("Matching Sample Data Particle %r", match.groups())
res_5 = float(match.group(1))
res_x1 = float(match.group(2))
res_x5 = float(match.group(3))
# At the moment, none of the BARS/TRHPH instruments have hydrogen sensors attached and are therefore not
# producing such data. Commenting this out to prevent useless data from being produced in the data particle,
# but keeping it around in case we need it again in the future
# h_5 = float(match.group(4))
# h_x1 = float(match.group(5))
# h_x5 = float(match.group(6))
# eh = float(match.group(7))
ref_temp_v = float(match.group(8))
ref_temp_c = float(match.group(9))
res_temp_v = float(match.group(10))
res_temp_c = float(match.group(11))
batt_v = float(match.group(12))
result = [{DataParticleKey.VALUE_ID: BarsDataParticleKey.RESISTIVITY_5,
DataParticleKey.VALUE: res_5},
{DataParticleKey.VALUE_ID: BarsDataParticleKey.RESISTIVITY_X1,
DataParticleKey.VALUE: res_x1},
{DataParticleKey.VALUE_ID: BarsDataParticleKey.RESISTIVITY_X5,
DataParticleKey.VALUE: res_x5},
# At the moment, none of the BARS/TRHPH instruments have hydrogen sensors attached and are therefore not
# producing such data. Commenting this out to prevent useless data from being produced in the data particle,
# but keeping it around in case we need it again in the future
# {DataParticleKey.VALUE_ID: BarsDataParticleKey.HYDROGEN_5,
# DataParticleKey.VALUE: h_5},
# {DataParticleKey.VALUE_ID: BarsDataParticleKey.HYDROGEN_X1,
# DataParticleKey.VALUE: h_x1},
# {DataParticleKey.VALUE_ID: BarsDataParticleKey.HYDROGEN_X5,
# DataParticleKey.VALUE: h_x5},
# {DataParticleKey.VALUE_ID: BarsDataParticleKey.EH_SENSOR,
# DataParticleKey.VALUE: eh},
{DataParticleKey.VALUE_ID: BarsDataParticleKey.REFERENCE_TEMP_VOLTS,
DataParticleKey.VALUE: ref_temp_v},
{DataParticleKey.VALUE_ID: BarsDataParticleKey.REFERENCE_TEMP_DEG_C,
DataParticleKey.VALUE: ref_temp_c},
{DataParticleKey.VALUE_ID: BarsDataParticleKey.RESISTIVITY_TEMP_VOLTS,
DataParticleKey.VALUE: res_temp_v},
{DataParticleKey.VALUE_ID: BarsDataParticleKey.RESISTIVITY_TEMP_DEG_C,
DataParticleKey.VALUE: res_temp_c},
{DataParticleKey.VALUE_ID: BarsDataParticleKey.BATTERY_VOLTAGE,
DataParticleKey.VALUE: batt_v}]
return result
###############################################################################
# Driver
###############################################################################
class InstrumentDriver(SingleConnectionInstrumentDriver):
"""
InstrumentDriver subclass
Subclasses SingleConnectionInstrumentDriver with connection state
machine.
"""
########################################################################
# Protocol builder.
########################################################################
def _build_protocol(self):
"""
Construct the driver protocol state machine.
"""
self._protocol = Protocol(MENU, Prompt, NEWLINE, self._driver_event)
###############################################################################
# Protocol
################################################################################
class Protocol(MenuInstrumentProtocol):
"""
Instrument protocol class
Subclasses MenuInstrumentProtocol
"""
__metaclass__ = get_logging_metaclass(log_level='debug')
def __init__(self, menu, prompts, newline, driver_event):
"""
Protocol constructor.
@param prompts A BaseEnum class containing instrument prompts.
@param newline The newline.
@param driver_event Driver process event callback.
"""
# Construct protocol superclass.
MenuInstrumentProtocol.__init__(self, menu, prompts, newline, driver_event)
# Build protocol state machine.
self._protocol_fsm = ThreadSafeFSM(ProtocolState, ProtocolEvent,
ProtocolEvent.ENTER, ProtocolEvent.EXIT)
# Add event handlers for protocol state machine.
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.ENTER, self._handler_unknown_enter)
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.DISCOVER, self._handler_discover)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ENTER, self._handler_command_enter)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.GET, self._handler_command_get)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.SET, self._handler_command_set)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.START_AUTOSAMPLE,
self._handler_command_autosample)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.START_DIRECT,
self._handler_command_start_direct)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ACQUIRE_STATUS,
self._handler_command_acquire_status)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.STOP_AUTOSAMPLE,
self._handler_autosample_stop)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.SCHEDULED_ACQUIRE_STATUS,
self._handler_autosample_acquire_status)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.ENTER,
self._handler_direct_access_enter)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.EXIT,
self._handler_direct_access_exit)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.STOP_DIRECT,
self._handler_direct_access_stop_direct)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.EXECUTE_DIRECT,
self._handler_direct_access_execute_direct)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.SCHEDULED_ACQUIRE_STATUS,
self._handler_direct_access_scheduled_acquire_status)
# Construct the parameter dictionary containing device parameters,
# current parameter values, and set formatting functions.
self._build_driver_dict()
self._build_command_dict()
self._build_param_dict()
# Add build handlers for device commands.
self._add_build_handler(Command.BACK_MENU, self._build_menu_command)
self._add_build_handler(Command.BLANK, self._build_solo_command)
self._add_build_handler(Command.START_AUTOSAMPLE, self._build_menu_command)
self._add_build_handler(Command.CHANGE_PARAM, self._build_menu_command)
self._add_build_handler(Command.SHOW_PARAM, self._build_menu_command)
self._add_build_handler(Command.SHOW_STATUS, self._build_menu_command)
self._add_build_handler(Command.SENSOR_POWER, self._build_menu_command)
self._add_build_handler(Command.DIRECT_SET, self._build_direct_command)
self._add_build_handler(Command.CHANGE_CYCLE_TIME, self._build_menu_command)
self._add_build_handler(Command.CHANGE_VERBOSE, self._build_menu_command)
self._add_build_handler(Command.CHANGE_METADATA_RESTART, self._build_menu_command)
self._add_build_handler(Command.CHANGE_METADATA_POWERUP, self._build_menu_command)
self._add_build_handler(Command.CHANGE_RES_SENSOR_POWER, self._build_menu_command)
self._add_build_handler(Command.CHANGE_INST_AMP_POWER, self._build_menu_command)
self._add_build_handler(Command.CHANGE_EH_ISOLATION_AMP_POWER, self._build_menu_command)
self._add_build_handler(Command.CHANGE_HYDROGEN_POWER, self._build_menu_command)
self._add_build_handler(Command.CHANGE_REFERENCE_TEMP_POWER, self._build_menu_command)
# Add response handlers for device commands.
self._add_response_handler(Command.BACK_MENU, self._parse_menu_change_response)
self._add_response_handler(Command.BLANK, self._parse_menu_change_response)
self._add_response_handler(Command.SHOW_PARAM, self._parse_show_param_response)
self._add_response_handler(Command.SHOW_STATUS, self._parse_show_param_response)
self._add_response_handler(Command.CHANGE_CYCLE_TIME, self._parse_menu_change_response)
self._add_response_handler(Command.CHANGE_VERBOSE, self._parse_menu_change_response)
self._add_response_handler(Command.CHANGE_METADATA_RESTART, self._parse_menu_change_response)
self._add_response_handler(Command.CHANGE_METADATA_POWERUP, self._parse_menu_change_response)
self._add_response_handler(Command.CHANGE_RES_SENSOR_POWER, self._parse_menu_change_response)
self._add_response_handler(Command.CHANGE_INST_AMP_POWER, self._parse_menu_change_response)
self._add_response_handler(Command.CHANGE_EH_ISOLATION_AMP_POWER, self._parse_menu_change_response)
self._add_response_handler(Command.CHANGE_HYDROGEN_POWER, self._parse_menu_change_response)
self._add_response_handler(Command.CHANGE_REFERENCE_TEMP_POWER, self._parse_menu_change_response)
self._add_response_handler(Command.DIRECT_SET, self._parse_menu_change_response)
self._add_scheduler_event(ScheduledJob.ACQUIRE_STATUS, ProtocolEvent.SCHEDULED_ACQUIRE_STATUS)
# Add sample handlers.
# State state machine in UNKNOWN state.
self._protocol_fsm.start(ProtocolState.UNKNOWN)
# commands sent to device to be filtered in responses for telnet DA
self._sent_cmds = []
self._chunker = StringChunker(self.sieve_function)
@staticmethod
def sieve_function(raw_data):
"""
The method that splits samples
"""
return_list = []
matchers = []
matchers.append(BarsStatusParticle.regex_compiled())
matchers.append(BarsDataParticle.regex_compiled())
for matcher in matchers:
for match in matcher.finditer(raw_data):
return_list.append((match.start(), match.end()))
return return_list
def _go_to_root_menu(self):
"""
Get back to the root menu.
"""
# Issue an enter or two off the bat to get out of any display screens
# and confirm command mode
try:
response = self._do_cmd_resp(Command.BLANK, expected_prompt=Prompt.CMD_PROMPT)
while not str(response).endswith(Prompt.CMD_PROMPT):
response = self._do_cmd_resp(Command.BLANK,
expected_prompt=Prompt.CMD_PROMPT)
time.sleep(1)
except InstrumentTimeoutException:
raise InstrumentProtocolException("Not able to get valid command prompt. Is instrument in command mode?")
# When you get a --> prompt, do 9's until you get back to the root
response = self._do_cmd_resp(Command.BACK_MENU,
expected_prompt=MENU_PROMPTS)
while not str(response).endswith(Prompt.MAIN_MENU):
response = self._do_cmd_resp(Command.BACK_MENU,
expected_prompt=MENU_PROMPTS)
def _filter_capabilities(self, events):
"""
Define a small filter of the capabilities
@param events list of events to consider as capabilities
@retval A list of events that are actually capabilities
"""
events_out = [x for x in events if Capability.has(x)]
return events_out
########################################################################
# Unknown handlers.
########################################################################
def _handler_unknown_enter(self, *args, **kwargs):
"""
Enter unknown state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_discover(self, *args, **kwargs):
"""
Discover current state by going to the root menu
"""
# Try to break in case we are in auto sample
next_state = ProtocolState.COMMAND
result = []
self._send_break()
return next_state, (next_state, result)
########################################################################
# Command handlers.
########################################################################
def _handler_command_enter(self, *args, **kwargs):
"""
Enter command state.
"""
# Command device to update parameters and send a config change event.
# Tell driver superclass to send a state change event.
# Superclass will query the state.
if self._init_type != InitializationType.NONE:
self._update_params()
self._init_params()
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_command_get(self, params=None, *args, **kwargs):
"""
Get parameters while in the command state.
@param params List of the parameters to pass to the state
@throw InstrumentParameterException for invalid parameter
"""
next_state = None
result_vals = {}
if params is None:
raise InstrumentParameterException("GET parameter list empty!")
if Parameter.ALL in params:
params = Parameter.list()
params.remove(Parameter.ALL)
if not isinstance(params, list):
raise InstrumentParameterException("GET parameter list not a list!")
# Do a bulk update from the instrument since they are all on one page
self._update_params()
# fill the return values from the update
for param in params:
if not Parameter.has(param):
raise InstrumentParameterException("Invalid parameter!")
result_vals[param] = self._param_dict.get(param)
result = result_vals
return next_state, result
def _set_trhph_params(self, params):
"""
Issue commands to the instrument to set various parameters
"""
self._go_to_root_menu()
for (key, val) in params.iteritems():
if not Parameter.has(key):
raise InstrumentParameterException()
old_val = self._param_dict.format(key)
new_val = self._param_dict.format(key, params[key])
log.debug('KEY = %r, old = %r new %r', key, old_val, new_val)
if old_val != new_val:
# restrict operations to just the read/write parameters
if key == Parameter.CYCLE_TIME:
self._navigate(SubMenu.CYCLE_TIME)
(unit, value) = self._from_seconds(val)
try:
self._do_cmd_resp(Command.DIRECT_SET, unit,
expected_prompt=[Prompt.CYCLE_TIME_SEC_VALUE_PROMPT,
Prompt.CYCLE_TIME_MIN_VALUE_PROMPT])
self._do_cmd_resp(Command.DIRECT_SET, value,
expected_prompt=Prompt.CHANGE_PARAM_MENU)
except InstrumentParameterException:
self._go_to_root_menu()
raise InstrumentProtocolException("Could not set cycle time")
self._go_to_root_menu()
elif key == Parameter.METADATA_POWERUP:
self._navigate(SubMenu.METADATA_POWERUP)
result = self._do_cmd_resp(Command.DIRECT_SET, self._param_dict.get_init_value(key),
expected_prompt=Prompt.CHANGE_PARAM_MENU)
if not result:
raise InstrumentParameterException("Could not set param %s" % key)
self._go_to_root_menu()
elif key == Parameter.METADATA_RESTART:
self._navigate(SubMenu.METADATA_RESTART)
result = self._do_cmd_resp(Command.DIRECT_SET, self._param_dict.get_init_value(key),
expected_prompt=Prompt.CHANGE_PARAM_MENU)
if not result:
raise InstrumentParameterException("Could not set param %s" % key)
self._go_to_root_menu()
elif key == Parameter.VERBOSE:
self._navigate(SubMenu.VERBOSE)
result = self._do_cmd_resp(Command.DIRECT_SET, self._param_dict.get_init_value(key),
expected_prompt=Prompt.CHANGE_PARAM_MENU)
if not result:
raise InstrumentParameterException("Could not set param %s" % key)
# need to set value direct because the instrument does not indicate whether it was successful
# as long as the instrument returns from 'setting' with the command prompt, we assume success
self._param_dict.set_value(key, self._param_dict.get_init_value(key))
self._go_to_root_menu()
elif key == Parameter.RUN_ACQUIRE_STATUS_INTERVAL:
self._param_dict.set_value(key, val)
self.stop_scheduled_job(ScheduledJob.ACQUIRE_STATUS)
log.debug("Configuring the scheduler to acquire status %s",
self._param_dict.get(Parameter.RUN_ACQUIRE_STATUS_INTERVAL))
if self._param_dict.get(Parameter.RUN_ACQUIRE_STATUS_INTERVAL) != '00:00:00':
self.start_scheduled_job(Parameter.RUN_ACQUIRE_STATUS_INTERVAL, ScheduledJob.ACQUIRE_STATUS,
ProtocolEvent.SCHEDULED_ACQUIRE_STATUS)
def _set_params(self, *args, **kwargs):
"""
Verify not readonly params and call set_trhph_params to issue commands to the instrument
to set various parameters
"""
try:
params = args[0]
except IndexError:
raise InstrumentParameterException('Set command requires a parameter dict.')
# set parameters are only allowed in COMMAND state
if self.get_current_state() != ProtocolState.COMMAND:
raise InstrumentProtocolException("Not in command state. Unable to set params")
self._verify_not_readonly(*args, **kwargs)
self._set_trhph_params(params)
# re-sync with param dict
self._go_to_root_menu()
self._update_params()
def _handler_command_set(self, *args, **kwargs):
"""
Handle setting data from command mode
@param params Dict of the parameters and values to pass to the state
@retval return (next state, result)
@throw InstrumentParameterException For invalid parameter
"""
next_state = None
result = []
# Retrieve required parameter.
# Raise if no parameter provided, or not a dict.
try:
params = args[0]
except IndexError:
raise InstrumentParameterException('_handler_command_set requires a parameter dict.')
if params is None or (not isinstance(params, dict)):
raise InstrumentParameterException()
self._set_params(*args, **kwargs)
return next_state, result
def _handler_command_autosample(self, *args, **kwargs):
"""
Start autosample mode
"""
next_state = ProtocolState.AUTOSAMPLE
result = []
self._navigate(SubMenu.MAIN)
self._do_cmd_no_resp(Command.START_AUTOSAMPLE)
return next_state, (next_state, result)
def _handler_command_acquire_status(self, *args, **kwargs):
"""
Acquire Instrument Status
"""
next_state = None
result = []
self._navigate(SubMenu.MAIN)
self._do_cmd_no_resp(Command.SHOW_STATUS)
return next_state, (next_state, result)
def _handler_command_start_direct(self):
next_state = ProtocolState.DIRECT_ACCESS
result = []
return next_state, (next_state, result)
########################################################################
# Direct access handlers.
########################################################################
def _handler_direct_access_enter(self, *args, **kwargs):
"""
Enter direct access state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._sent_cmds = []
def _handler_direct_access_exit(self, *args, **kwargs):
"""
Exit direct access state.
"""
pass
def _handler_direct_access_execute_direct(self, data):
"""
@param data to be sent in direct access
"""
next_state = None
result = []
self._do_cmd_direct(data)
# add sent command to list for 'echo' filtering in callback
self._sent_cmds.append(data)
return next_state, (next_state, result)
def _handler_direct_access_scheduled_acquire_status(self, data):
"""
@param data to be sent in direct access
"""
# method does nothing.
# i.e. Ignore running ACQUIRE_STATUS commands while in direct access
def _handler_direct_access_stop_direct(self):
"""
@throw InstrumentProtocolException on invalid command
"""
next_state = ProtocolState.COMMAND
result = []
return next_state, (next_state, result)
########################################################################
# Autosample handlers
########################################################################
def stop_scheduled_job(self, schedule_job):
"""
Remove the scheduled job
"""
if self._scheduler is not None:
try:
self._remove_scheduler(schedule_job)
except KeyError:
log.debug("_remove_scheduler could not find %s", schedule_job)
def start_scheduled_job(self, param, schedule_job, protocol_event):
"""
Add a scheduled job
"""
interval = self._param_dict.get(param).split(':')
hours = interval[0]
minutes = interval[1]
seconds = interval[2]
log.debug("Setting scheduled interval to: %s %s %s", hours, minutes, seconds)
config = {DriverConfigKey.SCHEDULER: {
schedule_job: {
DriverSchedulerConfigKey.TRIGGER: {
DriverSchedulerConfigKey.TRIGGER_TYPE: TriggerType.INTERVAL,
DriverSchedulerConfigKey.HOURS: int(hours),
DriverSchedulerConfigKey.MINUTES: int(minutes),
DriverSchedulerConfigKey.SECONDS: int(seconds)
}
}
}
}
self.set_init_params(config)
self._add_scheduler_event(schedule_job, protocol_event)
def _handler_autosample_enter(self, *args, **kwargs):
"""
Enter autosample mode
"""
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_autosample_acquire_status(self, *args, **kwargs):
"""
Acquire instrument's status in autosample state
"""
next_state = None
result = []
# Break out of auto sample mode by sending control S to the instrument
self._send_break()
# Send the show parameter command to collect instrument's status
self._navigate(SubMenu.MAIN)
self._do_cmd_no_resp(Command.SHOW_STATUS)
# Send the start autosample command to get back to autosample mode once
# the instrument's status has been collected.
self._navigate(SubMenu.MAIN)
self._do_cmd_no_resp(Command.START_AUTOSAMPLE)
return next_state, (next_state, result)
def _handler_autosample_stop(self):
"""
Stop autosample mode
"""
next_state = None
result = []
if self._send_break():
next_state = ProtocolState.COMMAND
return next_state, (next_state, result)
########################################################################
# Command builders
########################################################################
def _build_solo_command(self, cmd):
""" Issue a simple command that does NOT require a newline at the end to
execute. Likely used for control characters or special characters """
return COMMAND_CHAR[cmd]
def _build_menu_command(self, cmd):
""" Pick the right character and add a newline """
if COMMAND_CHAR[cmd]:
return COMMAND_CHAR[cmd] + self._newline
else:
raise InstrumentProtocolException("Unknown command character for %s" % cmd)
def _build_direct_command(self, cmd, arg):
""" Build a command where we just send the argument to the instrument.
Ignore the command part, we don't need it here as we are already in
a submenu.
"""
return "%s%s" % (arg, self._newline)
########################################################################
# Command parsers
########################################################################
def _parse_menu_change_response(self, response, prompt):
""" Parse a response to a menu change
@param response What was sent back from the command that was sent
@param prompt The prompt that was returned from the device
@retval The prompt that was encountered after the change
"""
log.trace("Parsing menu change response with prompt: %s", prompt)
return prompt
def _parse_show_param_response(self, response, prompt):
""" Parse the show parameter response screen """
log.trace("Parsing show parameter screen")
self._param_dict.update_many(response)
########################################################################
# Utilities
########################################################################
def _wakeup(self, timeout, delay=1):
# Always awake for this instrument!
pass
def _got_chunk(self, chunk, timestamp):
"""
extract samples from a chunk of data
@param chunk: bytes to parse into a sample.
"""
if not (self._extract_sample(BarsDataParticle, SAMPLE_REGEX, chunk, timestamp) or
self._extract_sample(BarsStatusParticle, BarsStatusParticle.regex_compiled(), chunk, timestamp)):
raise InstrumentProtocolException("Unhandled chunk")
def _update_params(self):
"""
Fetch the parameters from the device, and update the param dict.
"""
old_config = self._param_dict.get_config()
self._get_config()
new_config = self._param_dict.get_config()
if new_config != old_config:
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
def _get_config(self, *args, **kwargs):
""" Get the entire configuration for the instrument
@param params The parameters and values to set
Should be a dict of parameters and values
@throw InstrumentProtocolException On a deeper issue
"""
# Just need to show the parameter screen...the parser for the command
# does the update_many()
self._go_to_root_menu()
self._navigate(SubMenu.SHOW_PARAM)
self._go_to_root_menu()
def _send_break(self, timeout=4):
"""
Execute an attempts to break out of auto sample (a few if things get garbled).
For this instrument, it is done with a ^S, a wait for a \r\n, then
another ^S within 1/2 a second
@param timeout
@retval True if 2 ^S chars were sent with a prompt in the middle, False
if not.
"""
log.debug("Sending break sequence to instrument...")
# Timing is an issue, so keep it simple, work directly with the
# couple chars instead of command/response. Could be done that way
# though. Just more steps, logic, and delay for such a simple
# exchange
for count in range(0, 3):
self._promptbuf = ""
try:
self._connection.send(COMMAND_CHAR[Command.BREAK])
time.sleep(1)
(prompt, result) = self._get_raw_response(timeout, expected_prompt=[Prompt.BREAK_ACK,
Prompt.CMD_PROMPT])
if prompt == Prompt.BREAK_ACK:
self._connection.send(COMMAND_CHAR[Command.BREAK])
time.sleep(1)
self._get_response(timeout, expected_prompt=Prompt.CMD_PROMPT)
return True
elif prompt == Prompt.CMD_PROMPT:
return True
except InstrumentTimeoutException:
continue
log.trace("_send_break failing after several attempts")
return False
def _build_driver_dict(self):
"""
Populate the driver dictionary with options
"""
self._driver_dict.add(DriverDictKey.VENDOR_SW_COMPATIBLE, True)
def _build_command_dict(self):
"""
Populate the command dictionary with command.
"""
self._cmd_dict.add(Capability.START_AUTOSAMPLE, display_name="Start Autosample")
self._cmd_dict.add(Capability.STOP_AUTOSAMPLE, display_name="Stop Autosample")
self._cmd_dict.add(Capability.ACQUIRE_STATUS, display_name="Acquire Status")
self._cmd_dict.add(Capability.DISCOVER, display_name="Discover")
def _build_param_dict(self):
"""
Populate the parameter dictionary with parameters.
For each parameter key, add match string, match lambda function,
and value formatting function for set commands.
"""
# Add parameter handlers to parameter dict.
self._param_dict = ProtocolParameterDict()
self._param_dict.add(Parameter.CYCLE_TIME,
r'(\d+)\s+= Cycle Time \(.*\)\r\n(0|1)\s+= Minutes or Seconds Cycle Time',
lambda match: self._to_seconds(int(match.group(1)),
int(match.group(2))),
self._int_to_string,
type=ParameterDictType.INT,
display_name="Cycle Time",
visibility=ParameterDictVisibility.READ_WRITE,
startup_param=True,
direct_access=True,
default_value=20,
menu_path_read=SubMenu.SHOW_PARAM,
submenu_read=[],
menu_path_write=SubMenu.CHANGE_PARAM,
units=Units.SECOND,
description='Sample interval (15 - 3600), where time greater than 59 is rounded down to '
'the nearest minute.',
submenu_write=[["1", Prompt.CYCLE_TIME_PROMPT]])
self._param_dict.add(Parameter.VERBOSE,
r'bogusdatadontmatch', # Write-only
lambda match: None,
self._int_to_string,
type=ParameterDictType.INT,
display_name="Verbose",
visibility=ParameterDictVisibility.IMMUTABLE,
startup_param=True,
direct_access=True,
init_value=0,
value=0,
# TODO - HAD PROBLEMS COMPARING VALUES BEFORE SETTING DURING INIT BECAUSE VALUE WASN'T SET IN UPDATE PARAMS (NO WAY TO GET VALUE FROM INSTRUMENT)
description="Enable verbosity with data points (1:on | 0:off)",
menu_path_write=SubMenu.CHANGE_PARAM,
submenu_write=[["2", Prompt.VERBOSE_PROMPT]])
self._param_dict.add(Parameter.METADATA_POWERUP,
r'(0|1)\s+= Metadata Print Status on Power up',
lambda match: int(match.group(1)),
self._int_to_string,
type=ParameterDictType.INT,
display_name="Metadata on Powerup",
visibility=ParameterDictVisibility.IMMUTABLE,
startup_param=True,
direct_access=True,
init_value=0,
description="Enable display of metadata at startup (1:on | 0:off)",
menu_path_write=SubMenu.CHANGE_PARAM,
submenu_write=[["3", Prompt.METADATA_PROMPT]])
self._param_dict.add(Parameter.METADATA_RESTART,
r'(0|1)\s+= Metadata Print Status on Restart Data Collection',
lambda match: int(match.group(1)),
self._int_to_string,
type=ParameterDictType.INT,
display_name="Metadata on Restart",
visibility=ParameterDictVisibility.IMMUTABLE,
startup_param=True,
direct_access=True,
init_value=0,
description="Enable display of metadata at restart (1:on | 0:off)",
menu_path_write=SubMenu.CHANGE_PARAM,
submenu_write=[["4", Prompt.METADATA_PROMPT]])
self._param_dict.add(Parameter.RES_SENSOR_POWER,
r'(0|1)\s+= Res Power',
lambda match: int(match.group(1)),
self._int_to_string,
type=ParameterDictType.INT,
display_name="Res Sensor Power",
visibility=ParameterDictVisibility.READ_ONLY,
startup_param=False,
direct_access=False,
menu_path_read=SubMenu.SHOW_PARAM,
submenu_read=[],
description="Enable res sensor power (1:on | 0:off)",
menu_path_write=SubMenu.SENSOR_POWER,
submenu_write=[["1"]])
self._param_dict.add(Parameter.INST_AMP_POWER,
r'(0|1)\s+= Thermocouple & Hydrogen Amp Power',
lambda match: int(match.group(1)),
self._int_to_string,
type=ParameterDictType.INT,
display_name="Instrumentation Amp Power",
visibility=ParameterDictVisibility.READ_ONLY,
startup_param=False,
direct_access=False,
description="Enable instrumentation amp power (1:on | 0:off)",
menu_path_read=SubMenu.SHOW_PARAM,
submenu_read=[],
menu_path_write=SubMenu.SENSOR_POWER,
submenu_write=[["2"]])
self._param_dict.add(Parameter.EH_ISOLATION_AMP_POWER,
r'(0|1)\s+= eh Amp Power',
lambda match: int(match.group(1)),
self._int_to_string,
type=ParameterDictType.INT,
display_name="eH Isolation Amp Power",
visibility=ParameterDictVisibility.READ_ONLY,
startup_param=False,
direct_access=False,
description="Enable eH isolation amp power (1:on | 0:off)",
menu_path_read=SubMenu.SHOW_PARAM,
submenu_read=[],
menu_path_write=SubMenu.SENSOR_POWER,
submenu_write=[["3"]])
self._param_dict.add(Parameter.HYDROGEN_POWER,
r'(0|1)\s+= Hydrogen Sensor Power',
lambda match: int(match.group(1)),
self._int_to_string,
type=ParameterDictType.INT,
display_name="Hydrogen Sensor Power",
visibility=ParameterDictVisibility.READ_ONLY,
startup_param=False,
direct_access=False,
description="Enable hydrogen sensor power (1:on | 0:off)",
menu_path_read=SubMenu.SHOW_PARAM,
submenu_read=[],
menu_path_write=SubMenu.SENSOR_POWER,
submenu_write=[["4"]])
self._param_dict.add(Parameter.REFERENCE_TEMP_POWER,
r'(0|1)\s+= Reference Temperature Power',
lambda match: int(match.group(1)),
self._int_to_string,
type=ParameterDictType.INT,
display_name="Reference Temp Power",
visibility=ParameterDictVisibility.READ_ONLY,
startup_param=False,
direct_access=False,
description="Enable reference temperature power (1:on | 0:off)",
menu_path_read=SubMenu.SHOW_PARAM,
submenu_read=[],
menu_path_write=SubMenu.SENSOR_POWER,
submenu_write=[["5"]])
self._param_dict.add(Parameter.RUN_ACQUIRE_STATUS_INTERVAL,
"fakeregexdontmatch",
lambda match: match.group(0),
str,
type=ParameterDictType.STRING,
expiration=None,
visibility=ParameterDictVisibility.READ_WRITE,
display_name="Acquire Status Interval",
description='Time interval for running acquiring status.',
default_value='00:00:00',
units='HH:MM:SS',
startup_param=True,
direct_access=False)
@staticmethod
def _to_seconds(value, unit):
"""
Converts a number and a unit into seconds. Ie if "4" and "1"
comes in, it spits out 240
@param value The int value for some number of minutes or seconds
@param unit int of 0 or 1 where 0 is seconds, 1 is minutes
@return Number of seconds.
"""
if (not isinstance(value, int)) or (not isinstance(unit, int)):
raise InstrumentProtocolException("Invalid second arguments!")
if unit == 1:
return value * 60
elif unit == 0:
return value
else:
raise InstrumentProtocolException("Invalid Units!")
@staticmethod
def _from_seconds(value):
"""
Converts a number of seconds into a (unit, value) tuple.
@param value The number of seconds to convert
@retval A tuple of unit and value where the unit is 1 for seconds and 2
for minutes. If the value is 15-59, units should be returned in
seconds. If the value is over 59, the units will be returned in
a number of minutes where the seconds are rounded down to the
nearest minute.
"""
if (value < 15) or (value > 3600):
raise InstrumentParameterException("Invalid seconds value: %s" % value)
if value < 60:
return 1, value
else:
return 2, value // 60
def create_playback_protocol(callback):
return Protocol(None, None, None, callback)
| |
"""Bridge to sublime functionality."""
import sublime
import logging
import re
from .row_col import ZeroIndexedRowCol
_log = logging.getLogger("ECC")
class SublBridge:
"""A small help class that bridges with sublime (maybe will grow)."""
NO_DEFAULT_COMPLETIONS = sublime.INHIBIT_WORD_COMPLETIONS \
| sublime.INHIBIT_EXPLICIT_COMPLETIONS
SHOW_DEFAULT_COMPLETIONS = None
HIDE_DEFAULT_COMPLETIONS = ([], sublime.INHIBIT_WORD_COMPLETIONS |
sublime.INHIBIT_EXPLICIT_COMPLETIONS)
@staticmethod
def set_status(message):
"""Set status message for the current view."""
view = SublBridge.active_view()
view.set_status("000_ECC", message)
@staticmethod
def erase_status():
"""Erase status message for the current view."""
view = SublBridge.active_view()
if not view:
# do nothing if there is no view
return
view.erase_status("000_ECC")
@staticmethod
def erase_phantoms(tag):
"""Erase phantoms for the current view."""
SublBridge.active_view().erase_phantoms(tag)
@staticmethod
def active_view():
"""Get the active view.
Returns:
View: Active view
"""
return sublime.active_window().active_view()
@staticmethod
def active_view_id():
"""Get the id of the active view.
Returns:
int: buffer id of the active view
"""
return SublBridge.active_view().buffer_id()
@staticmethod
def get_line(view, pos=None):
"""Get next line as text.
Args:
view (sublime.View): current view
Returns:
str: text that the next line contains
"""
row_col = ZeroIndexedRowCol.from_1d_location(view, pos)
point_on_line = view.text_point(row_col.row, 0)
line = view.line(point_on_line)
return view.substr(line)
@staticmethod
def next_line(view):
"""Get next line as text.
Args:
view (sublime.View): current view
Returns:
str: text that the next line contains
"""
row_col = ZeroIndexedRowCol.from_current_cursor_pos(view)
point_on_next_line = view.text_point(row_col.row + 1, 0)
line = view.line(point_on_next_line)
return view.substr(line)
@staticmethod
def format_completions(completions, hide_default_completions):
"""Get completions. Manage hiding default ones.
Args:
hide_default_completions (bool): True if we hide default ones
Returns:
tuple: (completions, flags)
"""
if completions and hide_default_completions:
_log.debug("Hiding default completions")
return (completions, SublBridge.NO_DEFAULT_COMPLETIONS)
else:
_log.debug("Adding clang completions to default ones")
return completions
@staticmethod
def show_auto_complete(view):
"""Reopen completion popup.
It therefore subsequently calls
EasyClangComplete.on_query_completions(...)
Args:
view (sublime.View): view to open completion window in
"""
_log.debug("reload completion tooltip")
view.run_command('hide_auto_complete')
view.run_command('auto_complete', {
'disable_auto_insert': True,
'api_completions_only': False,
'next_competion_if_showing': False})
@staticmethod
def show_error_dialog(message):
"""Show an error message dialog."""
sublime.error_message("EasyClangComplete:\n\n" + message)
SYNTAX_REGEX = re.compile(r"\/([^\/]+)\.(?:tmLanguage|sublime-syntax)")
LANG_TAG = "lang"
SYNTAXES_TAG = "syntaxes"
LANG_C_TAG = "C"
LANG_CPP_TAG = "CPP"
LANG_OBJECTIVE_C_TAG = "OBJECTIVE_C"
LANG_OBJECTIVE_CPP_TAG = "OBJECTIVE_CPP"
LANG_TAGS = [LANG_C_TAG, LANG_CPP_TAG,
LANG_OBJECTIVE_C_TAG, LANG_OBJECTIVE_CPP_TAG]
LANG_NAMES = {
LANG_C_TAG: 'c',
LANG_CPP_TAG: 'c++',
LANG_OBJECTIVE_CPP_TAG: 'objective-c++',
LANG_OBJECTIVE_C_TAG: 'objective-c'
}
@staticmethod
def get_view_lang(view, settings_storage):
"""Get language from view description.
Args:
view (sublime.View): Current view
settings_storage (SettingsStorage): ECC settings for the view
Returns:
str: language, one of LANG_TAGS or None if nothing matched
"""
syntax = SublBridge.get_view_syntax(view)
for lang_tag, syntaxes in settings_storage.valid_lang_syntaxes.items():
if syntax in syntaxes and lang_tag in SublBridge.LANG_NAMES:
return lang_tag, SublBridge.LANG_NAMES[lang_tag]
_log.debug("ECC does nothing for language syntax: '%s'", syntax)
return None, None
@staticmethod
def get_view_syntax(view):
"""Get syntax from view description.
Args:
view (sublime.View): Current view
Returns:
str: syntax, e.g. "C", "C++"
"""
try:
syntax = re.findall(SublBridge.SYNTAX_REGEX,
view.settings().get('syntax'))
if len(syntax) > 0:
return syntax[0]
except TypeError as e:
# if the view is killed while this is being run, an exception is
# thrown. Let's dela with it gracefully.
_log.error("error while getting current language: '%s'", e)
return None
@staticmethod
def has_valid_syntax(view, settings_storage):
"""Check if syntax is valid for this plugin.
Args:
view (sublime.View): current view
settings_storage (SettingsStorage): ECC settings for this view
Returns:
bool: True if valid, False otherwise
"""
lang_tag, lang = SublBridge.get_view_lang(view, settings_storage)
if not lang:
# We could not determine the language from syntax. Means the syntax
# is not valid for us.
return False
return True
@staticmethod
def is_valid_view(view):
"""Check whether the given view is one we can and want to handle.
Args:
view (sublime.View): view to check
Returns:
bool: True if we want to handle this view, False otherwise
"""
from os import path
if not view:
_log.debug("view is None")
return False
if not view.file_name():
_log.debug("view file_name is None")
return False
if view.is_scratch():
_log.debug("view is scratch view")
return False
if view.buffer_id() == 0:
_log.debug("view buffer id is 0")
return False
if not path.exists(view.file_name()):
_log.debug("view file_name does not exist in system")
return False
return True
@staticmethod
def get_pos_status(point, view, settings):
"""Check if the cursor focuses a valid trigger.
Args:
point (int): position of the cursor in the file as defined by subl
view (sublime.View): current view
settings (TYPE): Description
Returns:
PosStatus: statuf for this position
"""
trigger_length = 1
word_on_the_left = view.substr(view.word(point - trigger_length))
if word_on_the_left.isdigit():
# don't autocomplete digits
_log.debug("trying to autocomplete digit, are we? Not allowed.")
return PosStatus.WRONG_TRIGGER
# slightly counterintuitive `view.substr` returns ONE character
# to the right of given point.
curr_char = view.substr(point - trigger_length)
wrong_trigger_found = False
for trigger in settings.triggers:
# compare to the last char of a trigger
if curr_char == trigger[-1]:
trigger_length = len(trigger)
prev_char = view.substr(point - trigger_length)
if prev_char == trigger[0]:
_log.debug("matched trigger '%s'.", trigger)
return PosStatus.COMPLETION_NEEDED
else:
_log.debug("wrong trigger '%s%s'.", prev_char, curr_char)
wrong_trigger_found = True
if wrong_trigger_found:
# no correct trigger found, but a wrong one fired instead
_log.debug("wrong trigger fired")
return PosStatus.WRONG_TRIGGER
if settings.autocomplete_all:
return PosStatus.COMPLETION_NEEDED
this_line = SublBridge.get_line(view, point)
if this_line.startswith('#include'):
_log.debug("completing an include")
return PosStatus.COMPLETE_INCLUDES
# if nothing fired we don't need to do anything
_log.debug("no completions needed")
return PosStatus.COMPLETION_NOT_NEEDED
class PosStatus:
"""Enum class with values for completion status."""
COMPLETION_NEEDED = 0
COMPLETION_NOT_NEEDED = 1
WRONG_TRIGGER = 2
COMPLETE_INCLUDES = 3
| |
import sqlite3
import ddt
import mock
from openstackinabox.tests.base import TestBase
from openstackinabox.models.swift import exceptions
from openstackinabox.models.swift import model
@ddt.ddt
class TestSwiftModel(TestBase):
def setUp(self):
super(TestSwiftModel, self).setUp(initialize=False)
self.tenant_id = '123456'
self.tenant_path = '/{0}'.format(self.tenant_id)
self.container_name = 'foobar'
self.container_path = '{0}/{1}'.format(
self.tenant_path,
self.container_name
)
self.object_name = 'raboof'
self.object_path = '{0}/{1}'.format(
self.container_path,
self.object_name
)
def tearDown(self):
super(TestSwiftModel, self).tearDown()
def test_initialize_db_schema(self):
db_cursor = mock.MagicMock()
db_execute = mock.MagicMock()
db_commit = mock.MagicMock()
db_instance = mock.MagicMock()
db_instance.cursor.return_value = db_cursor
db_instance.commit = db_commit
db_cursor.execute = db_execute
model.SwiftServiceModel.initialize_db_schema(db_instance)
self.assertTrue(db_instance.cursor.called)
self.assertTrue(db_execute.called)
self.assertTrue(db_commit.called)
self.assertEqual(db_execute.call_count, len(model.schema))
for s in model.schema:
db_execute.assert_any_call(s)
@ddt.data(
False,
True
)
def test_initialize(self, auto_initialize):
instance = model.SwiftServiceModel(initialize=auto_initialize)
if not auto_initialize:
# error raised if we try to put anything into the database
with self.assertRaises(sqlite3.OperationalError):
instance.add_tenant(self.tenant_id, self.tenant_path)
else:
# put something into each table
internal_tenant_id = instance.add_tenant(
self.tenant_id,
self.tenant_path
)
internal_container_id = instance.add_container(
internal_tenant_id,
self.container_name,
self.container_path
)
instance.add_object(
internal_tenant_id,
internal_container_id,
self.object_name,
self.object_path
)
self.assertTrue(
instance.has_tenant(
self.tenant_id
)
)
self.assertTrue(
instance.has_container(
internal_tenant_id,
self.container_name
)
)
self.assertTrue(
instance.has_object(
internal_tenant_id,
internal_container_id,
self.object_name
)
)
def test_database(self):
instance = model.SwiftServiceModel()
self.assertIsInstance(instance.database, sqlite3.Connection)
@ddt.data(
'has', 'get'
)
def test_tenant_failure(self, method):
instance = model.SwiftServiceModel()
with self.assertRaises(exceptions.SwiftUnknownTenantError):
if method == 'has':
instance.has_tenant(self.tenant_id)
elif method == 'get':
instance.get_tenant(123456)
def test_tenant_success(self):
instance = model.SwiftServiceModel()
internal_tenant_id = instance.add_tenant(
self.tenant_id,
self.tenant_path
)
self.assertEqual(
instance.has_tenant(self.tenant_id),
internal_tenant_id
)
tenant_data = instance.get_tenant(internal_tenant_id)
self.assertEqual(
tenant_data['id'],
internal_tenant_id
)
self.assertEqual(
tenant_data['tenantid'],
self.tenant_id
)
self.assertEqual(
tenant_data['path'],
self.tenant_path
)
@ddt.data(
'has', 'get'
)
def test_container_failure(self, method):
instance = model.SwiftServiceModel()
internal_tenant_id = instance.add_tenant(
self.tenant_id,
self.tenant_path
)
with self.assertRaises(exceptions.SwiftUnknownContainerError):
if method == 'has':
instance.has_container(internal_tenant_id, self.container_name)
elif method == 'get':
instance.get_container(internal_tenant_id, 123456)
def test_container_success(self):
instance = model.SwiftServiceModel()
internal_tenant_id = instance.add_tenant(
self.tenant_id,
self.tenant_path
)
internal_container_id = instance.add_container(
internal_tenant_id,
self.container_name,
self.container_path
)
self.assertEqual(
instance.has_container(
internal_tenant_id,
self.container_name
),
internal_container_id
)
container_data = instance.get_container(
internal_tenant_id,
internal_container_id
)
self.assertEqual(
container_data['tenantid'],
internal_tenant_id
)
self.assertEqual(
container_data['containerid'],
internal_container_id
)
self.assertEqual(
container_data['container_name'],
self.container_name
)
self.assertEqual(
container_data['path'],
self.container_path
)
@ddt.data(
'has', 'get'
)
def test_object_failure(self, method):
instance = model.SwiftServiceModel()
internal_tenant_id = instance.add_tenant(
self.tenant_id,
self.tenant_path
)
internal_container_id = instance.add_container(
internal_tenant_id,
self.container_name,
self.container_path
)
with self.assertRaises(exceptions.SwiftUnknownObjectError):
if method == 'has':
instance.has_object(
internal_tenant_id,
internal_container_id,
self.object_name
)
elif method == 'get':
instance.get_object(
internal_tenant_id,
internal_container_id,
123456
)
def test_object_success(self):
instance = model.SwiftServiceModel()
internal_tenant_id = instance.add_tenant(
self.tenant_id,
self.tenant_path
)
internal_container_id = instance.add_container(
internal_tenant_id,
self.container_name,
self.container_path
)
internal_object_id = instance.add_object(
internal_tenant_id,
internal_container_id,
self.object_name,
self.object_path
)
self.assertEqual(
instance.has_object(
internal_tenant_id,
internal_container_id,
self.object_name
),
internal_object_id
)
object_data = instance.get_object(
internal_tenant_id,
internal_container_id,
internal_object_id
)
self.assertEqual(
object_data['tenantid'],
internal_tenant_id
)
self.assertEqual(
object_data['containerid'],
internal_container_id
)
self.assertEqual(
object_data['objectid'],
internal_object_id
)
self.assertEqual(
object_data['object_name'],
self.object_name
)
self.assertEqual(
object_data['path'],
self.object_path
)
def test_remove_object(self):
instance = model.SwiftServiceModel()
internal_tenant_id = instance.add_tenant(
self.tenant_id,
self.tenant_path
)
internal_container_id = instance.add_container(
internal_tenant_id,
self.container_name,
self.container_path
)
internal_object_id = instance.add_object(
internal_tenant_id,
internal_container_id,
self.object_name,
self.object_path
)
self.assertEqual(
instance.has_object(
internal_tenant_id,
internal_container_id,
self.object_name
),
internal_object_id
)
instance.remove_object(
internal_tenant_id,
internal_container_id,
internal_object_id
)
with self.assertRaises(exceptions.SwiftUnknownObjectError):
instance.has_object(
internal_tenant_id,
internal_container_id,
self.object_name
)
| |
import Queue
import StringIO
import gzip
import hashlib
import random
import socket
import struct
import thread
import time
import traceback
import steam
from steam import Buffer, Connection, encrypt, steamd
from steam.mapping import PROTO_MAPPING, WANTS_HEADER, EMSGS
from steam.steamd import (
EClientPersonaStateFlag, EAccountType, EUniverse, EMsg,
EPersonaState, EResult, EFriendRelationship, proto_mask,
MsgClientLogon, EOSType,
)
from steam.protobuf import steam_server, steam_server2, steam_base
class SteamClient:
def __init__(self):
self.steam_id = None
self.session_id = None
self.in_game = None
self.friends = {}
self.users = {}
self.chat_rooms = {}
self.connect_tokens = []
self.login_key = None
self.web_login = None
self.ready = False
self.handlers = {
EMsg.ChannelEncryptRequest: [self.on_encrypt_request],
EMsg.ChannelEncryptResult: [self.on_encrypt_result],
EMsg.ClientAccountInfo: [self.on_account_info],
EMsg.ClientCMList: [self.on_cm_list],
EMsg.ClientEmailAddrInfo: [self.on_addrinfo],
EMsg.ClientFriendMsgIncoming: [self.on_friend_msg],
EMsg.ClientFriendsList: [self.on_friend_list],
EMsg.ClientFromGC: [self.on_from_gc],
EMsg.ClientGameConnectTokens: [self.on_connect_token],
EMsg.ClientLogOnResponse: [self.on_login],
EMsg.ClientNewLoginKey: [self.on_login_key],
EMsg.ClientPersonaState: [self.on_friend_state],
EMsg.ClientRequestValidationMailResponse: [self.on_validate_email],
EMsg.ClientUpdateMachineAuth: [self.on_sentry],
}
self.persona_name = ''
self.persona_state = EPersonaState.Offline
def dispatch(self, emsg, hdr, data):
handlers = self.handlers.get(emsg, [])
for handler in handlers:
if emsg in WANTS_HEADER:
handler(hdr, data)
else:
handler(data)
def login(self, username, password, sentry_hash=None, code=None):
self.steam_id = steam.SteamID(0)
self.steam_id.universe = EUniverse.Public
self.steam_id.account_type = EAccountType.Individual
self.session_id = None
self.username = username
self.password = password
self.sentry_hash = sentry_hash
self.code = code
self.jobs = {}
self.msg_queue = Queue.Queue()
self.current_job = 0
for i in xrange(3):
for server in random.sample(steam.servers, len(steam.servers)):
print 'connecting to:', server
try:
self.connection = Connection(*server)
return
except Exception:
print 'timeout'
raise socket.error('could not connect to server')
def set_persona(self, name=None, state=None):
# FIXME: online state doesn't seem to work
msg = steam_server.CMsgClientChangeStatus()
self.persona_name = name or self.persona_name
self.persona_state = state or self.persona_state
msg.player_name = self.persona_name
msg.persona_state = self.persona_state
self.send(EMsg.ClientChangeStatus | proto_mask, msg)
def add_friend(self, friend):
msg = steam_server.CMsgClientAddFriend()
msg.steamid_to_add = friend
self.send(EMsg.ClientAddFriend | proto_mask, msg)
def remove_friend(self, friend):
msg = steam_server.CMsgClientRemoveFriend()
msg.friendid = friend
self.send(EMsg.ClientRemoveFriend | proto_mask, msg)
def get_friend(self, steamid):
friend = self.friends.get(steamid) or steam.Friend(self, steamid)
self.friends[steamid] = friend
return friend
def play(self, app_id):
game = steam_server.CMsgClientGamesPlayed()
played = steam_server.CMsgClientGamesPlayed.GamePlayed()
played.game_id = app_id
played.game_extra_info = steam.games[app_id]
played.process_id = 1234
played.token = self.connect_tokens.pop(-1)
game.games_played.extend([played])
self.send(EMsg.ClientGamesPlayedWithDataBlob | proto_mask, game)
self.in_game = app_id
def pump(self):
thread.start_new_thread(self.msg_pump, ())
while True:
try:
msg = self.msg_queue.get(False)
if msg:
msgs = self.on_net_msg(msg)
for emsg, hdr, msg in msgs:
yield emsg, msg
except Queue.Empty:
time.sleep(0.1)
except Exception:
print '-' * 20
traceback.print_exc()
print '-' * 20
def on_net_msg(self, data):
d = data.read('<I')
emsg = d & ~proto_mask
is_proto = d & proto_mask
if not EMSGS.get(emsg):
print 'WARNING: skipping', emsg
return []
# TODO: use steamd on these headers
elif emsg in (EMsg.ChannelEncryptRequest,
EMsg.ChannelEncryptResult):
# TODO: not going to actual implement a deserializer for a type that
# only gets used for 2 message types, RIGHT HERE
target = data.read('Q')
source = data.read('Q')
hdr = (target, source)
rest = data.truncate()
self.dispatch(emsg, hdr, rest)
return [(emsg, hdr, rest)]
elif is_proto:
length = data.read('I')
proto = data.read_len(length)
hdr = steam_base.CMsgProtoBufHeader()
hdr.ParseFromString(proto)
if not self.session_id:
self.session_id = hdr.client_sessionid
if emsg in PROTO_MAPPING:
msg = PROTO_MAPPING[emsg]()
msg.ParseFromString(data.read_rest())
if emsg == EMsg.Multi:
return self.on_multi(msg)
else:
self.dispatch(emsg, hdr, msg)
return [(emsg, hdr, msg)]
else:
rest = data.truncate()
self.dispatch(emsg, hdr, rest)
return [(emsg, hdr, rest)]
return []
def on_encrypt_request(self, data):
key, crypted_key, crc32 = encrypt.make_session_key()
resp = steamd.MsgChannelEncryptResponse.dumps({})
resp += crypted_key
resp += struct.pack('<i', crc32)
self.send(EMsg.ChannelEncryptResponse, resp)
self._tmpkey = key
def on_encrypt_result(self, data):
result = data.read('<I')
if result == EResult.OK:
self.connection.key = self._tmpkey
logon = steam_server.CMsgClientLogon()
logon.obfustucated_private_ip = 0
logon.account_name = self.username
logon.password = self.password
logon.should_remember_password = 0
logon.protocol_version = MsgClientLogon.CurrentProtocol
logon.client_os_type = EOSType.Win311
if self.code:
logon.auth_code = self.code
# latest package version is required to get a sentry file
logon.client_package_version = 1771
if self.sentry_hash:
logon.sha_sentryfile = self.sentry_hash
logon.eresult_sentryfile = EResult.OK
else:
logon.eresult_sentryfile = EResult.FileNotFound
self.send(EMsg.ClientLogon | proto_mask, logon)
def on_multi(self, msg):
if msg.size_unzipped:
gf = StringIO.StringIO(msg.message_body)
gp = gzip.GzipFile(fileobj=gf)
payload = gp.read()
payload = Buffer(payload)
else:
payload = Buffer(msg.message_body)
msgs = []
while payload.data:
l = payload.read('<I')
msgs += self.on_net_msg(Buffer(payload.read_len(l)))
payload = payload.truncate()
return msgs
def on_login(self, d):
if d.eresult == EResult.OK:
self.steam_id.id = d.client_supplied_steamid
thread.start_new_thread(self.heartbeat, (d.out_of_game_heartbeat_seconds,))
self.set_persona(state=EPersonaState.Online)
elif d.eresult == EResult.TryAnotherCM:
self.login(self.username, self.password, self.sentry_hash, self.code)
def on_login_key(self, key):
self.ready = True
resp = steam_server.CMsgClientNewLoginKeyAccepted()
resp.unique_id = key.unique_id
self.unique_id = str(key.unique_id)
self.send(EMsg.ClientNewLoginKeyAccepted | proto_mask, resp)
key = self.login_key = key.login_key
self.web_login = steam.web.login(self.steam_id, key)
def on_sentry(self, hdr, msg):
sha1 = hashlib.sha1(msg.bytes).digest()
resp = steam_server2.CMsgClientUpdateMachineAuthResponse()
resp.filename = msg.filename
resp.eresult = EResult.OK
resp.filesize = len(msg.bytes)
resp.sha_file = sha1
resp.getlasterror = 0
resp.offset = msg.offset
resp.cubwrote = msg.cubtowrite
resp.otp_type = msg.otp_type
resp.otp_value = 0
resp.otp_identifier = msg.otp_identifier
self.send(EMsg.ClientUpdateMachineAuthResponse | proto_mask, resp, job=hdr.jobid_source)
def on_account_info(self, msg):
self.persona_name = msg.persona_name
def on_cm_list(self, msg):
# TODO: do something with this server list
addrs = []
for ip, port in zip(msg.cm_addresses, msg.cm_ports):
addrs.append(['.'.join(map(str, reversed(struct.unpack('<BBBB', struct.pack('<I', ip))))), port])
print 'Server list:', addrs
def on_friend_list(self, friends):
more_info = steam_server.CMsgClientRequestFriendData()
more_info.persona_state_requested = EClientPersonaStateFlag.PlayerName | EClientPersonaStateFlag.Presence
if not friends.bincremental:
self.friends = {}
for friend in friends.friends:
fid = friend.ulfriendid
if friend.efriendrelationship == EFriendRelationship.none:
if fid in self.friends:
del self.friends[fid]
else:
self.friends[fid] = steam.Friend(self, fid)
self.friends[fid].relationship = friend.efriendrelationship
more_info.friends.append(fid)
self.send(EMsg.ClientRequestFriendData | proto_mask, more_info)
def on_friend_state(self, msg):
for friend in msg.friends:
fid = friend.friendid
if fid not in self.friends:
self.friends[fid] = steam.Friend(self, fid)
if msg.status_flags & EClientPersonaStateFlag.PlayerName:
self.friends[fid].name = friend.player_name
def on_friend_msg(self, msg):
print msg
def on_addrinfo(self, msg):
# ask steam to send us a validation email if we don't have a verified email address
if not msg.email_is_validated:
source = steam.GID(0)
source.process = 0
source.box = 0
source.sequence = self.sequence + 1
source.start_time = int(time.time())
self.send(EMsg.ClientRequestValidationMail, '', job=long(source), target=0xFFFFFFFFFFFFFFFF)
def on_validate_email(self, msg):
# TODO: check our email for the validation message and hit the verify URL
pass
def on_from_gc(self, msg):
print 'From GC:', repr(msg.payload)
def on_connect_token(self, msg):
print 'Connect tokens:', len(msg.tokens)
for t in msg.tokens:
self.connect_tokens.insert(0, t)
self.connect_tokens = self.connect_tokens[:msg.max_tokens_to_keep]
def heartbeat(self, wait=9):
while 1:
self.send(
EMsg.ClientHeartBeat | proto_mask, steam_server.CMsgClientHeartBeat())
time.sleep(wait)
def msg_pump(self):
while True:
try:
for msg in self.connection.pump():
self.msg_queue.put(msg)
except socket.error:
pass
except Exception:
print '-' * 20
print traceback.format_exc()
print '-' * 20
def send(self, emsg, body, job=None, target=None):
if not isinstance(body, str):
body = body.SerializeToString()
if isinstance(job, type(lambda: 0)):
self.current_job += 1
src_id = self.current_job
self.jobs[src_id] = job
target_id = src_id
else:
src_id = job
target_id = job
if target:
target_id = target
if emsg == EMsg.ChannelEncryptResponse:
hdr = {
'msg': emsg,
'sourceJobID': target_id,
'targetJobID': src_id,
}
hdr = {k: v for (k, v) in hdr.items() if v is not None}
header = steamd.MsgHdr.dumps(hdr)
elif emsg & proto_mask:
proto = {
'steamid': long(self.steam_id),
'client_sessionid': self.session_id,
'jobid_target': target_id,
'jobid_source': src_id,
}
proto = {k: v for (k, v) in proto.items() if v is not None}
proto = steam_base.CMsgProtoBufHeader(**proto).SerializeToString()
# '\t\x00\x00\x00\x00\x01\x00\x10\x01' # proto
header = struct.pack('<II', emsg, len(proto)) + proto
else:
hdr = {
# 'msg': emsg,
# 'targetJobID': target_id,
# 'sourceJobID': src_id,
'steamID': long(self.steam_id),
'sessionID': self.session_id,
}
header = steamd.ExtendedClientMsgHdr.dumps(hdr)
self.connection.send(header + body)
| |
#!/usr/bin/env python
"""
Gathers results from simulation experiments. Run this from the
simulated_reads subdirectory of the qtip-experiments repo. It descends into
the various experimental subdirectories and parses the Makefiles it finds.
Outputs:
- "overall.csv" with one big table of summary measures
- "summary" subdirectory with lots of raw results compiled into a directory
structure
+ Subdirectories correspond to simulation experiments and contain:
- for each sampling rate:
+ for each trial:
- featimport_*.csv -- feature importances for each alignment type
- params.csv -- feature importances for each model
- Subdirectories for training/test, each with:
+ roc.csv -- ROC table
+ summary.csv -- summarizes data, model fit
"""
from __future__ import print_function
import sys
import os
import re
import logging
from os.path import join
# We identify the experiments by fishing through the Makefiles for output
# targets. When this regex matches a line of a Makefile, we know the
# following lines are giving us the names of targets.
target_re = re.compile('^outs_[_a-zA-Z01-9]*:.*')
def parse_aligner_local(target):
""" Based on Makefile target name, parse which aligner is involved """
toks = target.split('_')
aligner, local = 'bt2', False
if 'bwamem' in toks[1]:
aligner = 'bwamem'
local = True
if 'snap' in toks[1]:
aligner = 'snap'
local = True
if aligner == 'bt2':
local = 'l' in toks[1]
return aligner, local
def parse_species(target):
""" Based on Makefile target name, parse which species is involved """
genome = 'hg38' # hg gets normalized to hg38
toks = target.split('_')
# Parsing reference species info
if toks[2] in ['mm', 'zm', 'hg19']:
genome = toks[2]
return genome
def parse_sim(target):
""" Based on Makefile target name, parse which read simulator is involved """
sim = 'mason'
toks = target.split('_')
# Parsing simulator info:
if toks[2] == 'wgsim':
sim = 'wgsim'
if toks[2] == 'art':
sim = 'art'
return sim
def parse_sensitivity(target, aligner):
""" Based on Makefile target name, parse which sensitivity level is involved """
toks = target.split('_')
sensitivity = 's'
if aligner == 'bt2':
sensitivity = toks[1][3:]
return sensitivity
def parse_paired(target):
""" Based on Makefile target name, parse whether the data is paired-end """
return target.startswith('r12')
def parse_readlen(target):
""" Based on Makefile target name, parse the read length """
readlen = target.split('_')[-1]
return 500 if readlen == '50to500' else int(readlen)
def parse_name_and_target(combined):
""" Based on Makefile target name, parse the read length """
toks = combined.split('_')
roff = 3
if toks[2] == 'r0' or toks[2] == 'r12':
roff = 2
assert toks[roff] == 'r0' or toks[roff] == 'r12'
return '_'.join(toks[:roff]), '_'.join(toks[roff:])
def mkdir_quiet(dr):
""" Create directories if needed, quietly """
import errno
if not os.path.isdir(dr):
try:
os.makedirs(dr)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def has_done(dr):
""" Return true if directory contains DONE file, indicating qtip finished
running. """
return os.path.exists(join(dr, 'DONE'))
def roc_file_to_string(roc_fn, inner_sep=':', outer_sep=';'):
""" Convert a file with a ROC table into a string with one line per ROC row """
fields = []
with open(roc_fn) as fh:
for ln in fh:
cor, _, _, incor, mapq, _, _, _, _, _ = ln.rstrip().split(',')
if mapq == "mapq":
continue
fields.append(inner_sep.join([mapq, cor, incor]))
return outer_sep.join(fields)
def feat_files_to_string(fns):
ret = []
for fn, typ in zip(fns, 'bcdu'):
if os.path.exists(fn):
with open(fn) as fh:
for ln in fh:
if not ln.startswith('feature,importance,rank'):
toks = ln.rstrip().split(',')
assert len(toks) == 3
ret.append(':'.join([typ, toks[0], toks[1]]))
return ';'.join(ret)
def compile_line(ofh, combined_target_name, variant, mapq_incl, tt, trial,
params_fn, summ_fn, roc_round_fn, roc_orig_fn, feat_fns,
first):
""" Put together one line of output and write to ofh (overall.csv)
"""
name, target = parse_name_and_target(combined_target_name)
aligner, local = parse_aligner_local(target)
paired = parse_paired(target)
sim = parse_sim(target)
readlen = parse_readlen(target)
sensitivity = parse_sensitivity(target, aligner)
species = parse_species(target)
headers = ['name', 'combined_name', 'variant', 'mapq_included', 'training', 'trial_no', 'aligner', 'local',
'paired', 'sim', 'readlen', 'sensitivity', 'species']
values = [name, combined_target_name, variant, 'T' if mapq_incl else 'F', 'T' if tt == 'train' else 'F',
trial, aligner, 'T' if local else 'F', 'T' if paired else 'F', sim,
str(readlen), sensitivity, species]
for fn in [params_fn, summ_fn]:
with open(fn, 'r') as fh:
header = fh.readline().rstrip()
headers += header.split(',')
body = fh.readline().rstrip()
values += body.split(',')
# Add ROCs; these are big long strings
headers.extend(['roc_round', 'roc_orig'])
values.extend([roc_file_to_string(roc_round_fn),
roc_file_to_string(roc_orig_fn)])
# Add feature importances; also big long strings
headers.append('feat')
values.append(feat_files_to_string(feat_fns))
if first:
ofh.write(','.join(map(str, headers)) + '\n')
ofh.write(','.join(map(str, values)) + '\n')
def get_immediate_subdirectories(a_dir):
""" Return list of subdirectories immediately under the given dir """
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
def targets_from_makefile(dirname, fn):
name = os.path.basename(dirname)
if os.path.exists(join(name, 'IGNORE')):
return
with open(join(dirname, fn)) as fh:
in_target = False
for ln in fh:
if target_re.match(ln):
in_target = True
elif in_target:
if len(ln.rstrip()) == 0:
in_target = False
else:
# Parsing a target from the list of targets in the Makefile
target = ln.split()[0]
if target.endswith('/DONE'):
target = target[:-5]
target_full = join(dirname, target)
if has_done(target_full):
yield target, target_full
else:
print("%s does not have DONE file" % target_full, file=sys.stderr)
def handle_dir(dirname, combined_target_name, variant, dest_dirname, ofh, first):
for dir_samp in get_immediate_subdirectories(dirname):
print('dir_samp=' + dir_samp, file=sys.stderr)
if dir_samp.startswith('sample'):
rate = dir_samp[6:]
logging.info(' Found sampling rate: %s' % rate)
target_full_s = join(dirname, 'sample' + rate)
if not os.path.isdir(target_full_s):
logging.warn('*** Directory "%s" does not exist!' % target_full_s)
next_subdirs1 = get_immediate_subdirectories(target_full_s)
else:
target_full_s = dirname
next_subdirs1 = [dir_samp]
for dir_mapq in next_subdirs1:
if dir_mapq in ['mapq_excluded', 'mapq_included']:
mapq_included = dir_mapq == 'mapq_included'
logging.info(' Found %s' % dir_mapq)
target_full_sm = join(target_full_s, dir_mapq)
if not os.path.isdir(target_full_sm):
logging.warn('*** Directory "%s" does not exist!' % target_full_sm)
continue
next_subdirs2 = get_immediate_subdirectories(target_full_sm)
else:
assert dir_mapq.startswith('trial'), dir_mapq
target_full_sm = target_full_s
mapq_included = False
next_subdirs2 = [dir_mapq]
for dir_trial in next_subdirs2:
assert dir_trial.startswith('trial')
trial = dir_trial[5:]
logging.info(' Found trial: %s' % trial)
target_full_smt = join(target_full_sm, 'trial' + trial)
if not os.path.isdir(target_full_smt):
logging.warn('*** Directory "%s" does not exist!' % target_full_smt)
continue
params_fn = join(target_full_smt, 'params.csv')
feat_fns = list(map(lambda x: join(target_full_smt, 'featimport_%s.csv' % x), 'bcdu'))
for tt in ['test', 'train']:
target_full_smtt = join(target_full_smt, tt)
if not os.path.isdir(target_full_smtt):
logging.warn('*** Directory "%s" does not exist!' % target_full_smtt)
continue
summ_fn = join(target_full_smtt, 'summary.csv')
roc_round_fn = join(target_full_smtt, 'roc_round.csv')
roc_orig_fn = join(target_full_smtt, 'roc_orig.csv')
compile_line(ofh, combined_target_name, variant,
mapq_included, tt, trial, params_fn,
summ_fn, roc_round_fn, roc_orig_fn,
feat_fns, first)
first = False
def go():
# Set up logger
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s',
datefmt='%m/%d/%y-%H:%M:%S', level=logging.DEBUG)
# Open output overall.csv file
first = True
makefile_fn = 'Makefile'
out_fn = 'overall.csv'
summary_fn = 'summary'
exp_name = None
if '--experiment' in sys.argv:
exp_name = sys.argv[sys.argv.index('--experiment')+1]
makefile_fn = 'Makefile.' + exp_name
out_fn = 'overall.' + exp_name + '.csv'
summary_fn = 'summary_%s' % exp_name
# Set up output directory
if os.path.exists(summary_fn):
raise RuntimeError('%s directory exists' % summary_fn)
mkdir_quiet(summary_fn)
with open(join(summary_fn, out_fn), 'w') as fh:
if '--experiment' in sys.argv:
# Descend into subdirectories looking for Makefiles
for dirname, dirs, files in os.walk('.'):
for dr in dirs:
ma = re.match('^.*\.%s\.([^.]*)\.out$' % exp_name, dr)
if ma is not None:
variant = ma.group(1)
target_dir = join(dirname, dr)
combined_target_name = os.path.basename(dirname) + '_' + dr[:dr.index('.')]
logging.info('Found target dir: %s (variant=%s)' % (target_dir, variant))
handle_dir(target_dir, combined_target_name, variant, summary_fn, fh, first)
first = False
else:
# Descend into subdirectories looking for Makefiles
for dirname, dirs, files in os.walk('.'):
name = os.path.basename(dirname)
for fn in files:
if fn == makefile_fn:
logging.info('Found a Makefile: %s' % join(dirname, fn))
for target, target_full in targets_from_makefile(dirname, makefile_fn):
combined_target_name = name + '_' + target[:-4]
logging.info(' Found target dir: %s (normal)' % join(dirname, target))
handle_dir(join(dirname, target), combined_target_name, 'normal', summary_fn, fh, first)
first = False
# Compress the output directory, which is large because of the CID and CSE curves
os.system('tar -cvzf %s.tar.gz %s' % (summary_fn, summary_fn))
if '--slurm' in sys.argv:
script_fn = '.gather.sh'
gather_args = ''
if '--experiment' in sys.argv:
exp_name = sys.argv[sys.argv.index('--experiment')+1]
script_fn = '.gather_%s.sh' % exp_name
gather_args = '--experiment ' + exp_name
my_hours = 4
with open(script_fn, 'w') as ofh:
print("#!/bin/bash -l", file=ofh)
print("#SBATCH", file=ofh)
print("#SBATCH --nodes=1", file=ofh)
print("#SBATCH --mem=4G", file=ofh)
if '--scavenger' in sys.argv:
print('#SBATCH --partition=scavenger', file=ofh)
print('#SBATCH --qos=scavenger', file=ofh)
else:
print('#SBATCH --partition=shared', file=ofh)
print('#SBATCH --time=%d:00:00' % my_hours, file=ofh)
print('#SBATCH --output=%s.o' % script_fn, file=ofh)
print('#SBATCH --error=%s.e' % script_fn, file=ofh)
print('python gather.py %s' % gather_args, file=ofh)
print('sbatch ' + script_fn)
else:
go()
| |
__source__ = 'https://leetcode.com/problems/number-of-islands-ii/'
# Time: O(klog*k) ~= O(k), k is the length of the positions
# Space: O(k)
#
# Description: Leetcode # 305. Number of Islands II
#
# A 2d grid map of m rows and n columns is initially filled with water.
# We may perform an addLand operation which turns the water at position (row, col) into a land.
# Given a list of positions to operate, count the number of islands after each addLand operation.
# An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically.
# You may assume all four edges of the grid are all surrounded by water.
#
# Example:
#
# Given m = 3, n = 3, positions = [[0,0], [0,1], [1,2], [2,1]].
# Initially, the 2d grid grid is filled with water. (Assume 0 represents water and 1 represents land).
#
# 0 0 0
# 0 0 0
# 0 0 0
# Operation #1: addLand(0, 0) turns the water at grid[0][0] into a land.
#
# 1 0 0
# 0 0 0 Number of islands = 1
# 0 0 0
# Operation #2: addLand(0, 1) turns the water at grid[0][1] into a land.
#
# 1 1 0
# 0 0 0 Number of islands = 1
# 0 0 0
# Operation #3: addLand(1, 2) turns the water at grid[1][2] into a land.
#
# 1 1 0
# 0 0 1 Number of islands = 2
# 0 0 0
# Operation #4: addLand(2, 1) turns the water at grid[2][1] into a land.
#
# 1 1 0
# 0 0 1 Number of islands = 3
# 0 1 0
# We return the result as an array: [1, 1, 2, 3]
#
# Challenge:
#
# Can you do it in time complexity O(k log mn), where k is the length of the positions?
#
# Companies
# Google
# Related Topics
# Union Find
# Similar Questions
# Number of Islands
# Time: O(klog*k) ~= O(k), k is the length of the positions
# Space: O(k)
import unittest
class Solution(object):
def numIslands2(self, m, n, positions):
"""
:type m: int
:type n: int
:type positions: List[List[int]]
:rtype: List[int]
"""
def node_id(node, n):
return node[0] * n + node[1]
def find_set(x):
if set[x] != x:
set[x] = find_set(set[x]) # path compression.
return set[x]
def union_set(x, y):
x_root, y_root = find_set(x), find_set(y)
set[min(x_root, y_root)] = max(x_root, y_root)
numbers = []
number = 0
directions = [(0, -1), (0, 1), (-1, 0), (1, 0)]
set = {}
for position in positions:
node = (position[0], position[1])
set[node_id(node, n)] = node_id(node, n)
number += 1
for d in directions:
neighbor = (position[0] + d[0], position[1] + d[1])
if 0 <= neighbor[0] < m and 0 <= neighbor[1] < n and \
node_id(neighbor, n) in set:
if find_set(node_id(node, n)) != find_set(node_id(neighbor, n)):
# Merge different islands, amortised time: O(log*k) ~= O(1)
union_set(node_id(node, n), node_id(neighbor, n))
number -= 1
numbers.append(number)
return numbers
class Solution2(object):
def numIslands2(self, m, n, positions):
"""
:type m: int
:type n: int
:type positions: List[List[int]]
:rtype: List[int]
"""
ans = []
islands = Union()
for p in map(tuple, positions):
islands.add(p)
for dp in (0,1), (0,-1), (1,0), (-1,0):
q = (p[0] + dp[0], p[1] + dp[1])
if q in islands.id:
islands.unite(p,q)
ans += [islands.count]
return ans
class Union(object):
def __init__(self):
self.id = {}
self.sz = {}
self.count = 0
def add(self, p):
self.id[p] = p
self.sz[p] = 1
self.count += 1
def root(self, i):
while i != self.id[i]:
self.id[i] = self.id[self.id[i]]
i = self.id[i]
return i
def unite(self, p, q):
i, j = self.root(p), self.root(q)
if i ==j:
return
if self.sz[i] >self.sz[j]:
i, j = j, i
self.id[i] = j
self.sz[j] += self.sz[i]
self.count -= 1
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/number-of-islands-ii/solution/
https://discuss.leetcode.com/topic/29613/easiest-java-solution-with-explanations
# Union-Find
# 17ms 91.20%
class Solution {
int[][] dirs = {{0, 1}, {1, 0}, {-1, 0}, {0, -1}};
public List<Integer> numIslands2(int m, int n, int[][] positions) {
List<Integer> result = new ArrayList<>();
if(m <= 0 || n <= 0) return result;
int count = 0; // number of islands
int[] roots = new int[m * n]; // one island = one tree
Arrays.fill(roots, -1);
for(int[] p : positions) {
int root = n * p[0] + p[1]; // assume new point is isolated island
roots[root] = root; // add new island
count++;
for(int[] dir : dirs) {
int x = p[0] + dir[0];
int y = p[1] + dir[1];
int nb = n * x + y;
if(x < 0 || x >= m || y < 0 || y >= n || roots[nb] == -1) continue;
int rootNb = findIsland(roots, nb);
if(root != rootNb) { // if neighbor is in another island
roots[root] = rootNb; // union two islands
root = rootNb; // current tree root = joined tree root
count--;
}
}
result.add(count);
}
return result;
}
public int findIsland(int[] roots, int id) {
while(id != roots[id]) {
roots[id] = roots[roots[id]]; // only one line added
id = roots[id];
}
return id;
}
}
# https://discuss.leetcode.com/topic/29518/java-python-clear-solution-with-unionfind-class-weighting-and-path-compression
Java/Python clear solution with UnionFind Class (Weighting and Path compression)
Union Find
is an abstract data structure supporting find and unite on disjointed sets of objects,
typically used to solve the network connectivity problem.
The two operations are defined like this:
find(a,b) : are a and b belong to the same set?
unite(a,b) : if a and b are not in the same set, unite the sets they belong to.
With this data structure, it is very fast for solving our problem. Every position is an new land,
if the new land connect two islands a and b, we combine them to form a whole.
The answer is then the number of the disjointed sets.
The following algorithm is derived from Princeton's lecture note on Union Find in Algorithms and Data Structures
It is a well organized note with clear illustration describing from the naive QuickFind to the one with Weighting
and Path compression.
With Weighting and Path compression, The algorithm runs in O((M+N) log* N) where M is the number of operations
( unite and find ), N is the number of objects, log* is iterated logarithm while the naive runs in O(MN).
For our problem, If there are N positions, then there are O(N) operations and N objects then total is O(N log*N),
when we don't consider the O(mn) for array initialization.
Note that log*N is almost constant (for N = 265536, log*N = 5) in this universe, so the algorithm is almost linear
with N.
However, if the map is very big, then the initialization of the arrays can cost a lot of time when mn is much larger
than N. In this case we should consider using a hashmap/dictionary for the underlying data structure to avoid this
overhead.
Of course, we can put all the functionality into the Solution class which will make the code a lot shorter.
But from a design point of view a separate class dedicated to the data sturcture is more readable and reusable.
I implemented the idea with 2D interface to better fit the problem.
# Union-Find 2D
# 81.60% 18ms
class Solution {
private int[][] dir = {{0, 1}, {0, -1}, {-1, 0}, {1, 0}};
public List<Integer> numIslands2(int m, int n, int[][] positions) {
UnionFind2D islands = new UnionFind2D(m, n);
List<Integer> ans = new ArrayList<>();
for (int[] position : positions) {
int x = position[0], y = position[1];
int p = islands.add(x, y);
for (int[] d : dir) {
int q = islands.getID(x + d[0], y + d[1]);
if (q > 0 && !islands.find(p, q))
islands.unite(p, q);
}
ans.add(islands.size());
}
return ans;
}
}
class UnionFind2D {
private int[] id;
private int[] sz;
private int m, n, count;
public UnionFind2D(int m, int n) {
this.count = 0;
this.n = n;
this.m = m;
this.id = new int[m * n + 1];
this.sz = new int[m * n + 1];
}
public int index(int x, int y) { return x * n + y + 1; }
public int size() { return this.count; }
public int getID(int x, int y) {
if (0 <= x && x < m && 0<= y && y < n)
return id[index(x, y)];
return 0;
}
public int add(int x, int y) {
int i = index(x, y);
id[i] = i; sz[i] = 1;
++count;
return i;
}
public boolean find(int p, int q) {
return root(p) == root(q);
}
public void unite(int p, int q) {
int i = root(p), j = root(q);
if (sz[i] < sz[j]) { //weighted quick union
id[i] = j; sz[j] += sz[i];
} else {
id[j] = i; sz[i] += sz[j];
}
--count;
}
private int root(int i) {
for (;i != id[i]; i = id[i])
id[i] = id[id[i]]; //path compression
return i;
}
}
# 11ms 88.04%
class Solution {
int[] dx = {0, 1, -1, 0};
int[] dy = {1, 0, 0, -1};
public List<Integer> numIslands2(int m, int n, int[][] positions) {
List<Integer> result = new ArrayList<>();
if (m <= 0 || n <= 0 ) return result;
int count = 0;
int[] roots = new int[m*n];
Arrays.fill(roots, -1);
for (int[] p : positions) {
int root = p[0] * n + p[1];
roots[root] = root;
count ++;
for (int dir = 0 ; dir < 4 ; dir ++) {
int nx = p[0] + dx[dir];
int ny = p[1] + dy[dir];
int nid = nx * n + ny;
if (nx < 0 || ny < 0 || nx >= m || ny >= n || roots[nid] == -1)
continue;
int rootNP = findRoot (nid, roots);
if (rootNP != root) {
roots[root] = rootNP;
root = rootNP;
count --;
}
}
result.add(count);
}
return result;
}
public int findRoot (int id, int[] roots) {
if (roots[id] == id) {
return id;
}
return roots[id] = findRoot(roots[id], roots);
}
}
# Non Union Find
Approach #2: (Ad hoc) [Accepted]
Complexity Analysis
Time complexity : O(L^2), for ach operation,
we have to traverse the entire HashMap to update island id and the number of operations is LL.
Space complexity : O(L) for the HashMap.
# 1618ms 4.89%
class Solution {
public List<Integer> numIslands2(int m, int n, int[][] positions) {
List<Integer> ans = new ArrayList<>();
HashMap<Integer, Integer> land2id = new HashMap<Integer, Integer>();
int num_islands = 0;
int island_id = 0;
for (int[] pos: positions) {
int r = pos[0], c = pos[1];
Set<Integer> overlap = new HashSet<Integer>();
if (r - 1 >= 0 && land2id.containsKey((r - 1) * n + c)) {
overlap.add(land2id.get((r-1) * n + c));
}
if (r + 1 < m && land2id.containsKey((r+1) * n + c)) {
overlap.add(land2id.get((r+1) * n + c));
}
if (c - 1 >= 0 && land2id.containsKey(r * n + c - 1)) {
overlap.add(land2id.get(r * n + c - 1));
}
if (c + 1 < n && land2id.containsKey(r * n + c + 1)) {
overlap.add(land2id.get(r * n + c + 1));
}
if (overlap.isEmpty()) {
++num_islands;
land2id.put(r * n + c, island_id++);
} else if (overlap.size() == 1) {
land2id.put(r * n + c, overlap.iterator().next());
} else {
int root_id = overlap.iterator().next();
for (Map.Entry<Integer, Integer> entry : land2id.entrySet()) {
int k = entry.getKey();
int id = entry.getValue();
if (overlap.contains(id)) {
land2id.put(k, root_id);
}
}
land2id.put(r * n + c, root_id);
num_islands -= (overlap.size() - 1);
}
ans.add(num_islands);
}
return ans;
}
}
Approach #3: Union Find (aka Disjoint Set) [Accepted]
Complexity Analysis
Time complexity : O(m x n + L) where L is the number of operations,
m is the number of rows and n is the number of columns.
it takes O(m x n) to initialize UnionFind, and O(L) to process positions.
Note that Union operation takes essentially constant time^1
when UnionFind is implemented with both path compression and union by rank.
Space complexity : O(m x n) as required by UnionFind data structure.
# 32ms 39.85%
class Solution {
public List<Integer> numIslands2(int m, int n, int[][] positions) {
List<Integer> ans = new ArrayList<>();
UnionFind uf = new UnionFind(m * n);
for (int[] pos : positions) {
int r = pos[0], c = pos[1];
List<Integer> overlap = new ArrayList<>();
if (r - 1 >= 0 && uf.isValid((r-1) * n + c)) overlap.add((r-1) * n + c);
if (r + 1 < m && uf.isValid((r+1) * n + c)) overlap.add((r+1) * n + c);
if (c - 1 >= 0 && uf.isValid(r * n + c - 1)) overlap.add(r * n + c - 1);
if (c + 1 < n && uf.isValid(r * n + c + 1)) overlap.add(r * n + c + 1);
int index = r * n + c;
uf.setParent(index);
for (int i : overlap) uf.union(i, index);
ans.add(uf.getCount());
}
return ans;
}
class UnionFind {
int count;
int[] parent;
int[] rank;
public UnionFind(char[][] grid) { // for problem 200
count = 0;
int m = grid.length;
int n = grid[0].length;
parent = new int[m * n];
rank = new int[m * n];
for (int i = 0; i < m; i++) {
for (int j = 0; j < n ; j++) {
if (grid[i][j] == '1') {
parent[i * n + j] = i * n + j;
++count;
}
rank[i * n + j] = 0;
}
}
}
public UnionFind(int N) { // for problem 305 and others
count = 0;
parent = new int[N];
rank = new int[N];
for (int i = 0; i < N; ++i) {
parent[i] = -1;
rank[i] = 0;
}
}
public boolean isValid(int i) { // for problem 305
return parent[i] >= 0;
}
public void setParent(int i) {
parent[i] = i;
++count;
}
public int getCount() {
return count;
}
public int find(int i) { // path compression
if (i != parent[i]) {
parent[i] = find(parent[i]);
}
return parent[i];
}
public void union(int x, int y) {
int rootx = find(x);
int rooty = find(y);
if (rootx != rooty) {
if (rank[rootx] > rank[rooty]) {
parent[rooty] = rootx;
} else if (rank[rootx] < rank[rooty]) {
parent[rootx] = rooty;
} else {
parent[rooty] = rootx;
rank[rootx]++;
}
--count;
}
}
}
}
'''
| |
# -*- coding: utf-8 -*-
"""
pygments.lexers.grammar_notation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for grammer notations like BNF.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, include, this, using, words
from pygments.token import Comment, Keyword, Literal, Name, Number, \
Operator, Punctuation, String, Text
__all__ = ['BnfLexer', 'AbnfLexer', 'JsgfLexer']
class BnfLexer(RegexLexer):
"""
This lexer is for grammer notations which are similar to
original BNF.
In order to maximize a number of targets of this lexer,
let's decide some designs:
* We don't distinguish `Terminal Symbol`.
* We do assume that `NonTerminal Symbol` are always enclosed
with arrow brackets.
* We do assume that `NonTerminal Symbol` may include
any printable characters except arrow brackets and ASCII 0x20.
This assumption is for `RBNF <http://www.rfc-base.org/txt/rfc-5511.txt>`_.
* We do assume that target notation doesn't support comment.
* We don't distinguish any operators and punctuation except
`::=`.
Though these desision making might cause too minimal highlighting
and you might be disappointed, but it is reasonable for us.
.. versionadded:: 2.1
"""
name = 'BNF'
aliases = ['bnf']
filenames = ['*.bnf']
mimetypes = ['text/x-bnf']
tokens = {
'root': [
(r'(<)([ -;=?-~]+)(>)',
bygroups(Punctuation, Name.Class, Punctuation)),
# an only operator
(r'::=', Operator),
# fallback
(r'[^<>:]+', Text), # for performance
(r'.', Text),
],
}
class AbnfLexer(RegexLexer):
"""
Lexer for `IETF 7405 ABNF
<http://www.ietf.org/rfc/rfc7405.txt>`_
(Updates `5234 <http://www.ietf.org/rfc/rfc5234.txt>`_)
grammars.
.. versionadded:: 2.1
"""
name = 'ABNF'
aliases = ['abnf']
filenames = ['*.abnf']
mimetypes = ['text/x-abnf']
_core_rules = (
'ALPHA', 'BIT', 'CHAR', 'CR', 'CRLF', 'CTL', 'DIGIT',
'DQUOTE', 'HEXDIG', 'HTAB', 'LF', 'LWSP', 'OCTET',
'SP', 'VCHAR', 'WSP')
tokens = {
'root': [
# comment
(r';.*$', Comment.Single),
# quoted
# double quote itself in this state, it is as '%x22'.
(r'(%[si])?"[^"]*"', Literal),
# binary (but i have never seen...)
(r'%b[01]+\-[01]+\b', Literal), # range
(r'%b[01]+(\.[01]+)*\b', Literal), # concat
# decimal
(r'%d[0-9]+\-[0-9]+\b', Literal), # range
(r'%d[0-9]+(\.[0-9]+)*\b', Literal), # concat
# hexadecimal
(r'%x[0-9a-fA-F]+\-[0-9a-fA-F]+\b', Literal), # range
(r'%x[0-9a-fA-F]+(\.[0-9a-fA-F]+)*\b', Literal), # concat
# repetition (<a>*<b>element) including nRule
(r'\b[0-9]+\*[0-9]+', Operator),
(r'\b[0-9]+\*', Operator),
(r'\b[0-9]+', Operator),
(r'\*', Operator),
# Strictly speaking, these are not keyword but
# are called `Core Rule'.
(words(_core_rules, suffix=r'\b'), Keyword),
# nonterminals (ALPHA *(ALPHA / DIGIT / "-"))
(r'[a-zA-Z][a-zA-Z0-9-]+\b', Name.Class),
# operators
(r'(=/|=|/)', Operator),
# punctuation
(r'[\[\]()]', Punctuation),
# fallback
(r'\s+', Text),
(r'.', Text),
],
}
class JsgfLexer(RegexLexer):
"""
For `JSpeech Grammar Format <https://www.w3.org/TR/jsgf/>`_
grammars.
.. versionadded:: 2.2
"""
name = 'JSGF'
aliases = ['jsgf']
filenames = ['*.jsgf']
mimetypes = ['application/jsgf', 'application/x-jsgf', 'text/jsgf']
flags = re.MULTILINE | re.UNICODE
tokens = {
'root': [
include('comments'),
include('non-comments'),
],
'comments': [
(r'/\*\*(?!/)', Comment.Multiline, 'documentation comment'),
(r'/\*[\w\W]*?\*/', Comment.Multiline),
(r'//.*', Comment.Single),
],
'non-comments': [
(r'\A#JSGF[^;]*', Comment.Preproc),
(r'\s+', Text),
(r';', Punctuation),
(r'[=|()\[\]*+]', Operator),
(r'/[^/]+/', Number.Float),
(r'"', String.Double, 'string'),
(r'\{', String.Other, 'tag'),
(words(('import', 'public'), suffix=r'\b'), Keyword.Reserved),
(r'grammar\b', Keyword.Reserved, 'grammar name'),
(r'(<)(NULL|VOID)(>)',
bygroups(Punctuation, Name.Builtin, Punctuation)),
(r'<', Punctuation, 'rulename'),
(r'\w+|[^\s;=|()\[\]*+/"{<\w]+', Text),
],
'string': [
(r'"', String.Double, '#pop'),
(r'\\.', String.Escape),
(r'[^\\"]+', String.Double),
],
'tag': [
(r'\}', String.Other, '#pop'),
(r'\\.', String.Escape),
(r'[^\\}]+', String.Other),
],
'grammar name': [
(r';', Punctuation, '#pop'),
(r'\s+', Text),
(r'\.', Punctuation),
(r'[^;\s.]+', Name.Namespace),
],
'rulename': [
(r'>', Punctuation, '#pop'),
(r'\*', Punctuation),
(r'\s+', Text),
(r'([^.>]+)(\s*)(\.)', bygroups(Name.Namespace, Text, Punctuation)),
(r'[^.>]+', Name.Constant),
],
'documentation comment': [
(r'\*/', Comment.Multiline, '#pop'),
(r'(^\s*\*?\s*)(@(?:example|see)\s+)'
r'([\w\W]*?(?=(?:^\s*\*?\s*@|\*/)))',
bygroups(Comment.Multiline, Comment.Special,
using(this, state='example'))),
(r'(^\s*\*?\s*)(@\S*)',
bygroups(Comment.Multiline, Comment.Special)),
(r'[^*\n@]+|\w|\W', Comment.Multiline),
],
'example': [
(r'\n\s*\*', Comment.Multiline),
include('non-comments'),
(r'.', Comment.Multiline),
],
}
| |
# coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.created_date_v20 import CreatedDateV20 # noqa: F401,E501
from orcid_api_v3.models.external_i_ds_v20 import ExternalIDsV20 # noqa: F401,E501
from orcid_api_v3.models.last_modified_date_v20 import LastModifiedDateV20 # noqa: F401,E501
from orcid_api_v3.models.publication_date_v20 import PublicationDateV20 # noqa: F401,E501
from orcid_api_v3.models.source_v20 import SourceV20 # noqa: F401,E501
from orcid_api_v3.models.work_title_v20 import WorkTitleV20 # noqa: F401,E501
class WorkSummaryV20(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'put_code': 'int',
'created_date': 'CreatedDateV20',
'last_modified_date': 'LastModifiedDateV20',
'source': 'SourceV20',
'title': 'WorkTitleV20',
'external_ids': 'ExternalIDsV20',
'type': 'str',
'publication_date': 'PublicationDateV20',
'visibility': 'str',
'path': 'str',
'display_index': 'str'
}
attribute_map = {
'put_code': 'put-code',
'created_date': 'created-date',
'last_modified_date': 'last-modified-date',
'source': 'source',
'title': 'title',
'external_ids': 'external-ids',
'type': 'type',
'publication_date': 'publication-date',
'visibility': 'visibility',
'path': 'path',
'display_index': 'display-index'
}
def __init__(self, put_code=None, created_date=None, last_modified_date=None, source=None, title=None, external_ids=None, type=None, publication_date=None, visibility=None, path=None, display_index=None): # noqa: E501
"""WorkSummaryV20 - a model defined in Swagger""" # noqa: E501
self._put_code = None
self._created_date = None
self._last_modified_date = None
self._source = None
self._title = None
self._external_ids = None
self._type = None
self._publication_date = None
self._visibility = None
self._path = None
self._display_index = None
self.discriminator = None
if put_code is not None:
self.put_code = put_code
if created_date is not None:
self.created_date = created_date
if last_modified_date is not None:
self.last_modified_date = last_modified_date
if source is not None:
self.source = source
if title is not None:
self.title = title
if external_ids is not None:
self.external_ids = external_ids
if type is not None:
self.type = type
if publication_date is not None:
self.publication_date = publication_date
if visibility is not None:
self.visibility = visibility
if path is not None:
self.path = path
if display_index is not None:
self.display_index = display_index
@property
def put_code(self):
"""Gets the put_code of this WorkSummaryV20. # noqa: E501
:return: The put_code of this WorkSummaryV20. # noqa: E501
:rtype: int
"""
return self._put_code
@put_code.setter
def put_code(self, put_code):
"""Sets the put_code of this WorkSummaryV20.
:param put_code: The put_code of this WorkSummaryV20. # noqa: E501
:type: int
"""
self._put_code = put_code
@property
def created_date(self):
"""Gets the created_date of this WorkSummaryV20. # noqa: E501
:return: The created_date of this WorkSummaryV20. # noqa: E501
:rtype: CreatedDateV20
"""
return self._created_date
@created_date.setter
def created_date(self, created_date):
"""Sets the created_date of this WorkSummaryV20.
:param created_date: The created_date of this WorkSummaryV20. # noqa: E501
:type: CreatedDateV20
"""
self._created_date = created_date
@property
def last_modified_date(self):
"""Gets the last_modified_date of this WorkSummaryV20. # noqa: E501
:return: The last_modified_date of this WorkSummaryV20. # noqa: E501
:rtype: LastModifiedDateV20
"""
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
"""Sets the last_modified_date of this WorkSummaryV20.
:param last_modified_date: The last_modified_date of this WorkSummaryV20. # noqa: E501
:type: LastModifiedDateV20
"""
self._last_modified_date = last_modified_date
@property
def source(self):
"""Gets the source of this WorkSummaryV20. # noqa: E501
:return: The source of this WorkSummaryV20. # noqa: E501
:rtype: SourceV20
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this WorkSummaryV20.
:param source: The source of this WorkSummaryV20. # noqa: E501
:type: SourceV20
"""
self._source = source
@property
def title(self):
"""Gets the title of this WorkSummaryV20. # noqa: E501
:return: The title of this WorkSummaryV20. # noqa: E501
:rtype: WorkTitleV20
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkSummaryV20.
:param title: The title of this WorkSummaryV20. # noqa: E501
:type: WorkTitleV20
"""
self._title = title
@property
def external_ids(self):
"""Gets the external_ids of this WorkSummaryV20. # noqa: E501
:return: The external_ids of this WorkSummaryV20. # noqa: E501
:rtype: ExternalIDsV20
"""
return self._external_ids
@external_ids.setter
def external_ids(self, external_ids):
"""Sets the external_ids of this WorkSummaryV20.
:param external_ids: The external_ids of this WorkSummaryV20. # noqa: E501
:type: ExternalIDsV20
"""
self._external_ids = external_ids
@property
def type(self):
"""Gets the type of this WorkSummaryV20. # noqa: E501
:return: The type of this WorkSummaryV20. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this WorkSummaryV20.
:param type: The type of this WorkSummaryV20. # noqa: E501
:type: str
"""
allowed_values = ["ARTISTIC_PERFORMANCE", "BOOK_CHAPTER", "BOOK_REVIEW", "BOOK", "CONFERENCE_ABSTRACT", "CONFERENCE_PAPER", "CONFERENCE_POSTER", "DATA_SET", "DICTIONARY_ENTRY", "DISCLOSURE", "DISSERTATION", "EDITED_BOOK", "ENCYCLOPEDIA_ENTRY", "INVENTION", "JOURNAL_ARTICLE", "JOURNAL_ISSUE", "LECTURE_SPEECH", "LICENSE", "MAGAZINE_ARTICLE", "MANUAL", "NEWSLETTER_ARTICLE", "NEWSPAPER_ARTICLE", "ONLINE_RESOURCE", "OTHER", "PATENT", "REGISTERED_COPYRIGHT", "REPORT", "RESEARCH_TECHNIQUE", "RESEARCH_TOOL", "SPIN_OFF_COMPANY", "STANDARDS_AND_POLICY", "SUPERVISED_STUDENT_PUBLICATION", "TECHNICAL_STANDARD", "TEST", "TRADEMARK", "TRANSLATION", "WEBSITE", "WORKING_PAPER", "UNDEFINED"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def publication_date(self):
"""Gets the publication_date of this WorkSummaryV20. # noqa: E501
:return: The publication_date of this WorkSummaryV20. # noqa: E501
:rtype: PublicationDateV20
"""
return self._publication_date
@publication_date.setter
def publication_date(self, publication_date):
"""Sets the publication_date of this WorkSummaryV20.
:param publication_date: The publication_date of this WorkSummaryV20. # noqa: E501
:type: PublicationDateV20
"""
self._publication_date = publication_date
@property
def visibility(self):
"""Gets the visibility of this WorkSummaryV20. # noqa: E501
:return: The visibility of this WorkSummaryV20. # noqa: E501
:rtype: str
"""
return self._visibility
@visibility.setter
def visibility(self, visibility):
"""Sets the visibility of this WorkSummaryV20.
:param visibility: The visibility of this WorkSummaryV20. # noqa: E501
:type: str
"""
allowed_values = ["LIMITED", "REGISTERED_ONLY", "PUBLIC", "PRIVATE"] # noqa: E501
if visibility not in allowed_values:
raise ValueError(
"Invalid value for `visibility` ({0}), must be one of {1}" # noqa: E501
.format(visibility, allowed_values)
)
self._visibility = visibility
@property
def path(self):
"""Gets the path of this WorkSummaryV20. # noqa: E501
:return: The path of this WorkSummaryV20. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this WorkSummaryV20.
:param path: The path of this WorkSummaryV20. # noqa: E501
:type: str
"""
self._path = path
@property
def display_index(self):
"""Gets the display_index of this WorkSummaryV20. # noqa: E501
:return: The display_index of this WorkSummaryV20. # noqa: E501
:rtype: str
"""
return self._display_index
@display_index.setter
def display_index(self, display_index):
"""Sets the display_index of this WorkSummaryV20.
:param display_index: The display_index of this WorkSummaryV20. # noqa: E501
:type: str
"""
self._display_index = display_index
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkSummaryV20, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkSummaryV20):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| |
# type: ignore
import gzip
from socket import socket
from typing import Any
from unittest import mock
import pytest
from multidict import CIMultiDict, CIMultiDictProxy
from yarl import URL
import aiohttp
from aiohttp import web
from aiohttp.test_utils import (
AioHTTPTestCase,
RawTestServer as _RawTestServer,
TestClient as _TestClient,
TestServer as _TestServer,
get_port_socket,
loop_context,
make_mocked_request,
)
_hello_world_str = "Hello, world"
_hello_world_bytes = _hello_world_str.encode("utf-8")
_hello_world_gz = gzip.compress(_hello_world_bytes)
def _create_example_app():
async def hello(request):
return web.Response(body=_hello_world_bytes)
async def websocket_handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
msg = await ws.receive()
if msg.type == aiohttp.WSMsgType.TEXT:
if msg.data == "close":
await ws.close()
else:
await ws.send_str(msg.data + "/answer")
return ws
async def cookie_handler(request):
resp = web.Response(body=_hello_world_bytes)
resp.set_cookie("cookie", "val")
return resp
app = web.Application()
app.router.add_route("*", "/", hello)
app.router.add_route("*", "/websocket", websocket_handler)
app.router.add_route("*", "/cookie", cookie_handler)
return app
# these exist to test the pytest scenario
@pytest.fixture
def loop() -> None:
with loop_context() as loop:
yield loop
@pytest.fixture
def app():
return _create_example_app()
@pytest.fixture
def test_client(loop: Any, app: Any) -> None:
async def make_client():
return _TestClient(_TestServer(app))
client = loop.run_until_complete(make_client())
loop.run_until_complete(client.start_server())
yield client
loop.run_until_complete(client.close())
async def test_aiohttp_client_close_is_idempotent() -> None:
# a test client, called multiple times, should
# not attempt to close the server again.
app = _create_example_app()
client = _TestClient(_TestServer(app))
await client.close()
await client.close()
class TestAioHTTPTestCase(AioHTTPTestCase):
def get_app(self):
return _create_example_app()
async def test_example_with_loop(self) -> None:
request = await self.client.request("GET", "/")
assert request.status == 200
text = await request.text()
assert _hello_world_str == text
def test_inner_example(self) -> None:
async def test_get_route() -> None:
resp = await self.client.request("GET", "/")
assert resp.status == 200
text = await resp.text()
assert _hello_world_str == text
self.loop.run_until_complete(test_get_route())
async def test_example_without_explicit_loop(self) -> None:
request = await self.client.request("GET", "/")
assert request.status == 200
text = await request.text()
assert _hello_world_str == text
async def test_inner_example_without_explicit_loop(self) -> None:
async def test_get_route() -> None:
resp = await self.client.request("GET", "/")
assert resp.status == 200
text = await resp.text()
assert _hello_world_str == text
await test_get_route()
def test_get_route(loop: Any, test_client: Any) -> None:
async def test_get_route() -> None:
resp = await test_client.request("GET", "/")
assert resp.status == 200
text = await resp.text()
assert _hello_world_str == text
loop.run_until_complete(test_get_route())
async def test_client_websocket(loop: Any, test_client: Any) -> None:
resp = await test_client.ws_connect("/websocket")
await resp.send_str("foo")
msg = await resp.receive()
assert msg.type == aiohttp.WSMsgType.TEXT
assert "foo" in msg.data
await resp.send_str("close")
msg = await resp.receive()
assert msg.type == aiohttp.WSMsgType.CLOSE
async def test_client_cookie(loop: Any, test_client: Any) -> None:
assert not test_client.session.cookie_jar
await test_client.get("/cookie")
cookies = list(test_client.session.cookie_jar)
assert cookies[0].key == "cookie"
assert cookies[0].value == "val"
@pytest.mark.parametrize(
"method", ["get", "post", "options", "post", "put", "patch", "delete"]
)
async def test_test_client_methods(method: Any, loop: Any, test_client: Any) -> None:
resp = await getattr(test_client, method)("/")
assert resp.status == 200
text = await resp.text()
assert _hello_world_str == text
async def test_test_client_head(loop: Any, test_client: Any) -> None:
resp = await test_client.head("/")
assert resp.status == 200
@pytest.mark.parametrize("headers", [{"token": "x"}, CIMultiDict({"token": "x"}), {}])
def test_make_mocked_request(headers: Any) -> None:
req = make_mocked_request("GET", "/", headers=headers)
assert req.method == "GET"
assert req.path == "/"
assert isinstance(req, web.Request)
assert isinstance(req.headers, CIMultiDictProxy)
def test_make_mocked_request_sslcontext() -> None:
req = make_mocked_request("GET", "/")
assert req.transport.get_extra_info("sslcontext") is None
def test_make_mocked_request_unknown_extra_info() -> None:
req = make_mocked_request("GET", "/")
assert req.transport.get_extra_info("unknown_extra_info") is None
def test_make_mocked_request_app() -> None:
app = mock.Mock()
req = make_mocked_request("GET", "/", app=app)
assert req.app is app
def test_make_mocked_request_app_can_store_values() -> None:
req = make_mocked_request("GET", "/")
req.app["a_field"] = "a_value"
assert req.app["a_field"] == "a_value"
def test_make_mocked_request_match_info() -> None:
req = make_mocked_request("GET", "/", match_info={"a": "1", "b": "2"})
assert req.match_info == {"a": "1", "b": "2"}
def test_make_mocked_request_content() -> None:
payload = mock.Mock()
req = make_mocked_request("GET", "/", payload=payload)
assert req.content is payload
def test_make_mocked_request_transport() -> None:
transport = mock.Mock()
req = make_mocked_request("GET", "/", transport=transport)
assert req.transport is transport
async def test_test_client_props() -> None:
app = _create_example_app()
server = _TestServer(app, scheme="http", host="127.0.0.1")
client = _TestClient(server)
assert client.scheme == "http"
assert client.host == "127.0.0.1"
assert client.port is None
async with client:
assert isinstance(client.port, int)
assert client.server is not None
assert client.app is not None
assert client.port is None
async def test_test_client_raw_server_props() -> None:
async def hello(request):
return web.Response(body=_hello_world_bytes)
server = _RawTestServer(hello, scheme="http", host="127.0.0.1")
client = _TestClient(server)
assert client.scheme == "http"
assert client.host == "127.0.0.1"
assert client.port is None
async with client:
assert isinstance(client.port, int)
assert client.server is not None
assert client.app is None
assert client.port is None
async def test_test_server_context_manager(loop: Any) -> None:
app = _create_example_app()
async with _TestServer(app) as server:
client = aiohttp.ClientSession()
resp = await client.head(server.make_url("/"))
assert resp.status == 200
resp.close()
await client.close()
def test_client_unsupported_arg() -> None:
with pytest.raises(TypeError) as e:
_TestClient("string")
assert (
str(e.value) == "server must be TestServer instance, found type: <class 'str'>"
)
async def test_server_make_url_yarl_compatibility(loop: Any) -> None:
app = _create_example_app()
async with _TestServer(app) as server:
make_url = server.make_url
assert make_url(URL("/foo")) == make_url("/foo")
with pytest.raises(AssertionError):
make_url("http://foo.com")
with pytest.raises(AssertionError):
make_url(URL("http://foo.com"))
def test_testcase_no_app(testdir: Any, loop: Any) -> None:
testdir.makepyfile(
"""
from aiohttp.test_utils import AioHTTPTestCase
class InvalidTestCase(AioHTTPTestCase):
def test_noop(self) -> None:
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*RuntimeError*"])
async def test_server_context_manager(app: Any, loop: Any) -> None:
async with _TestServer(app) as server:
async with aiohttp.ClientSession() as client:
async with client.head(server.make_url("/")) as resp:
assert resp.status == 200
@pytest.mark.parametrize(
"method", ["head", "get", "post", "options", "post", "put", "patch", "delete"]
)
async def test_client_context_manager_response(
method: Any, app: Any, loop: Any
) -> None:
async with _TestClient(_TestServer(app)) as client:
async with getattr(client, method)("/") as resp:
assert resp.status == 200
if method != "head":
text = await resp.text()
assert "Hello, world" in text
async def test_custom_port(loop: Any, app: Any, aiohttp_unused_port: Any) -> None:
port = aiohttp_unused_port()
client = _TestClient(_TestServer(app, port=port))
await client.start_server()
assert client.server.port == port
resp = await client.get("/")
assert resp.status == 200
text = await resp.text()
assert _hello_world_str == text
await client.close()
@pytest.mark.parametrize(
("hostname", "expected_host"),
[("127.0.0.1", "127.0.0.1"), ("localhost", "127.0.0.1"), ("::1", "::1")],
)
async def test_test_server_hostnames(
hostname: Any, expected_host: Any, loop: Any
) -> None:
app = _create_example_app()
server = _TestServer(app, host=hostname, loop=loop)
async with server:
pass
assert server.host == expected_host
@pytest.mark.parametrize("test_server_cls", [_TestServer, _RawTestServer])
async def test_base_test_server_socket_factory(
test_server_cls: type, app: Any, loop: Any
) -> None:
factory_called = False
def factory(*args, **kwargs) -> socket:
nonlocal factory_called
factory_called = True
return get_port_socket(*args, **kwargs)
server = test_server_cls(app, loop=loop, socket_factory=factory)
async with server:
pass
assert factory_called
| |
#!/usr/bin/env python
###############################################################################
# $Id: gdal2xyz_geocentricSpace.py 2014-10-21
#
# Project: GDAL
# Purpose: Script to translate GDAL supported raster (specifically a elevation DEM)
# into a geocentric (body-fixed) XYZ ASCII table.
# Defaults to Moon radius and DEM elevation values should in meters
# Author: Frank Warmerdam, warmerdam@pobox.com, original gdal2xyz version
# update: Trent Hare, Jan 31, 2011 to convert to space coordinates - only use degs
# update: Trent Hare, Feb 1, 2011 now supports any GDAL meter projection as input
# update: Trent Hare, Oct 21, 2014 now supports Lat/Lon from Bands
#
###############################################################################
# Copyright (c) 2002, Frank Warmerdam <warmerdam@pobox.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
try:
from osgeo import gdal
from osgeo import osr
from osgeo.gdalconst import *
except ImportError:
import gdal
import osr
from gdalconst import *
import sys
import math
try:
import numpy as Numeric
except ImportError:
import Numeric
# =============================================================================
def Usage():
print 'Usage: gdal2xyz_geocentricSpace.py [-skip factor] [-printLatLon] [-addheader] [-srcwin xoff yoff width height]'
print ' [-radius value_m or -radiusBand n] [-latBand n] [-lonBand n] [-band b] srcfile [dstfile]'
print 'Note: Was written for digital elevation files (DEMs), thus band 1 or -band b, should be elevation in meters'
print 'Note: if no radius is sent, the radius will default to the Moon = 1737400.0'
print 'Note: if variable radius is available as a band, then you can send -radiusBand b'
print
sys.exit( 1 )
# =============================================================================
#
# Program mainline.
#
if __name__ == '__main__':
srcwin = None
skip = 1
srcfile = None
dstfile = None
band_nums = []
addheader=False
LatLon=False
printLatLon=False
latBand_num = None
lonBand_num = None
radiusBand_num = None
#Moon's radius
theRadius = 1737400.0
gdal.AllRegister()
argv = gdal.GeneralCmdLineProcessor( sys.argv )
if argv is None:
sys.exit( 0 )
# Parse command line arguments.
i = 1
while i < len(argv):
arg = argv[i]
if arg == '-srcwin':
srcwin = (int(argv[i+1]),int(argv[i+2]),
int(argv[i+3]),int(argv[i+4]))
i = i + 4
elif arg == '-skip':
skip = int(argv[i+1])
i = i + 1
elif arg == '-radius':
theRadius = float(argv[i+1])
i = i + 1
elif arg == '-latBand':
latBand_num = int(argv[i+1])
i = i + 1
elif arg == '-lonBand':
lonBand_num = int(argv[i+1])
i = i + 1
elif arg == '-radiusBand':
radiusBand_num = int(argv[i+1])
i = i + 1
elif arg == '-band':
band_nums.append( int(argv[i+1]) )
i = i + 1
elif arg == '-addheader':
addheader = True
elif arg == '-printLatLon':
printLatLon = True
elif arg[0] == '-':
Usage()
elif srcfile is None:
srcfile = arg
elif dstfile is None:
dstfile = arg
else:
Usage()
i = i + 1
if srcfile is None:
Usage()
if band_nums == []: band_nums = [1]
# Open source file.
indataset = gdal.Open( srcfile )
if indataset is None:
print 'Could not open %s.' % srcfile
sys.exit( 1 )
bands = []
for band_num in band_nums:
band = indataset.GetRasterBand(band_num)
if band is None:
print 'Could not get band %d' % band_num
sys.exit( 1 )
bands.append(band)
if ((latBand_num is None) and (lonBand_num is not None)) or \
((latBand_num is not None) and (lonBand_num is None)):
print '\nError: Only one Lat or one Lon Band sent. You should have bands for each.\n'
Usage ()
if latBand_num is not None:
latBand = indataset.GetRasterBand(latBand_num)
if latBand is None:
print 'Could not get Latitude band %d' % latBand_num
sys.exit( 1 )
if lonBand_num is not None:
lonBand = indataset.GetRasterBand(lonBand_num)
if lonBand is None:
print 'Could not get Longitude band %d' % lonBand_num
sys.exit( 1 )
if radiusBand_num is not None:
radiusBand = indataset.GetRasterBand(radiusBand_num)
if radiusBand is None:
print 'Could not get Radius band %d' % radiusBand_num
sys.exit( 1 )
geomatrix = indataset.GetGeoTransform()
# Build Spatial Reference object based on coordinate system, fetched from the
# opened dataset
srs = osr.SpatialReference()
srs.ImportFromWkt(indataset.GetProjection())
#print srs
srsLatLong = srs.CloneGeogCS()
coordtransform = osr.CoordinateTransformation(srs, srsLatLong)
# Collect information on all the source files.
if srcwin is None:
srcwin = (0,0,indataset.RasterXSize,indataset.RasterYSize)
# Open the output file.
if dstfile is not None:
dst_fh = open(dstfile,'wt')
else:
dst_fh = sys.stdout
if addheader:
if printLatLon:
dst_fh.write( "Lon,Lat,Band\n" )
else:
dst_fh.write( "X,Y,Radius\n" )
band_format = ("%g " * len(bands)).rstrip() + '\n'
format = '%.3f,%.3f,%.3f\n'
# double check if the input is LatLon
if abs(geomatrix[0]) <= 360 and abs(geomatrix[3]) <= 360 \
and abs(indataset.RasterXSize * geomatrix[1]) <= 360 \
and abs(indataset.RasterYSize * geomatrix[5]) <= 360:
format = '%.6f,%.6f,%.3f\n'
LatLon = True
# Loop emitting data.
for y in range(srcwin[1],srcwin[1]+srcwin[3],skip):
data = []
for band in bands:
band_data = band.ReadAsArray( srcwin[0], y, srcwin[2], 1 )
band_data = Numeric.reshape( band_data, (srcwin[2],) )
data.append(band_data)
latData = []
if latBand_num is not None:
band_data = latBand.ReadAsArray( srcwin[0], y, srcwin[2], 1 )
band_data = Numeric.reshape( band_data, (srcwin[2],) )
latData = band_data
lonData = []
if lonBand_num is not None:
band_data = lonBand.ReadAsArray( srcwin[0], y, srcwin[2], 1 )
band_data = Numeric.reshape( band_data, (srcwin[2],) )
lonData = band_data
radiusData = []
if radiusBand_num is not None:
band_data = radiusBand.ReadAsArray( srcwin[0], y, srcwin[2], 1 )
band_data = Numeric.reshape( band_data, (srcwin[2],) )
radiusData = band_data
for x_i in range(0,srcwin[2],skip):
x = x_i + srcwin[0]
geo_x = geomatrix[0] + (x+0.5) * geomatrix[1] + (y+0.5) * geomatrix[2]
geo_y = geomatrix[3] + (x+0.5) * geomatrix[4] + (y+0.5) * geomatrix[5]
x_i_data = []
for i in range(len(bands)):
x_i_data.append(data[i][x_i])
band_str = band_format % tuple(x_i_data)
#convert Y/X meters from image projection to lat/on
if not LatLon:
(geo_x, geo_y, height) = coordtransform.TransformPoint(geo_x, geo_y)
#override - get lat from band
if latBand_num is not None:
geo_y = float(latData[x])
#override - get lon from band
if lonBand_num is not None:
geo_x = float(lonData[x])
#override - get radius from band
if radiusBand_num is not None:
theRadius = float(radiusData[x])
#simple sphere method. Needs to be changed for ellipse
if (abs(x_i_data[0]) < 1.0E12):
if printLatLon: #only support a single band
line = format % (float(geo_x),float(geo_y), x_i_data[0])
else:
#print body-fixed coordinates
if radiusBand_num is not None:
#just use radius as provided for in band
geoC_x = (theRadius) * math.cos(math.radians(geo_y)) * math.cos(math.radians(geo_x))
geoC_y = (theRadius) * math.cos(math.radians(geo_y)) * math.sin(math.radians(geo_x))
geoC_z = (theRadius) * math.sin(math.radians(geo_y))
else:
#radius plus elevation band
geoC_x = (theRadius + x_i_data[0]) * math.cos(math.radians(geo_y)) * math.cos(math.radians(geo_x))
geoC_y = (theRadius + x_i_data[0]) * math.cos(math.radians(geo_y)) * math.sin(math.radians(geo_x))
geoC_z = (theRadius + x_i_data[0]) * math.sin(math.radians(geo_y))
line = format % (float(geoC_x),float(geoC_y), float(geoC_z))
dst_fh.write( line )
| |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""Interface documentation.
API Stability: stable, other than IReactorUDP (semi-stable) and
IReactorMulticast (unstable).
Maintainer: U{Itamar Shtull-Trauring<mailto:twisted@itamarst.org>}
"""
from zope.interface import Interface
class IAddress(Interface):
"""An address, e.g. a TCP (host, port).
Default implementations are in L{twisted.internet.address}.
"""
### Reactor Interfaces
class IConnector(Interface):
"""Object used to interface between connections and protocols.
Each IConnector manages one connection.
"""
def stopConnecting():
"""Stop attempting to connect."""
def disconnect():
"""Disconnect regardless of the connection state.
If we are connected, disconnect, if we are trying to connect,
stop trying.
"""
def connect():
"""Try to connect to remote address."""
def getDestination():
"""Return destination this will try to connect to.
@return: An object which provides L{IAddress}.
"""
class IResolverSimple(Interface):
def getHostByName(name, timeout = (1, 3, 11, 45)):
"""Resolve the domain name C{name} into an IP address.
@type name: C{str}
@type timeout: C{tuple}
@rtype: L{twisted.internet.defer.Deferred}
@return: The callback of the Deferred that is returned will be
passed a string that represents the IP address of the specified
name, or the errback will be called if the lookup times out. If
multiple types of address records are associated with the name,
A6 records will be returned in preference to AAAA records, which
will be returned in preference to A records. If there are multiple
records of the type to be returned, one will be selected at random.
@raise twisted.internet.defer.TimeoutError: Raised (asynchronously)
if the name cannot be resolved within the specified timeout period.
"""
class IResolver(IResolverSimple):
def lookupRecord(name, cls, type, timeout = 10):
"""Lookup the records associated with the given name
that are of the given type and in the given class.
"""
def query(query, timeout = 10):
"""Interpret and dispatch a query object to the appropriate
lookup* method.
"""
def lookupAddress(name, timeout = 10):
"""Lookup the A records associated with C{name}."""
def lookupAddress6(name, timeout = 10):
"""Lookup all the A6 records associated with C{name}."""
def lookupIPV6Address(name, timeout = 10):
"""Lookup all the AAAA records associated with C{name}."""
def lookupMailExchange(name, timeout = 10):
"""Lookup the MX records associated with C{name}."""
def lookupNameservers(name, timeout = 10):
"""Lookup the the NS records associated with C{name}."""
def lookupCanonicalName(name, timeout = 10):
"""Lookup the CNAME records associated with C{name}."""
def lookupMailBox(name, timeout = 10):
"""Lookup the MB records associated with C{name}."""
def lookupMailGroup(name, timeout = 10):
"""Lookup the MG records associated with C{name}."""
def lookupMailRename(name, timeout = 10):
"""Lookup the MR records associated with C{name}."""
def lookupPointer(name, timeout = 10):
"""Lookup the PTR records associated with C{name}."""
def lookupAuthority(name, timeout = 10):
"""Lookup the SOA records associated with C{name}."""
def lookupNull(name, timeout = 10):
"""Lookup the NULL records associated with C{name}."""
def lookupWellKnownServices(name, timeout = 10):
"""Lookup the WKS records associated with C{name}."""
def lookupHostInfo(name, timeout = 10):
"""Lookup the HINFO records associated with C{name}."""
def lookupMailboxInfo(name, timeout = 10):
"""Lookup the MINFO records associated with C{name}."""
def lookupText(name, timeout = 10):
"""Lookup the TXT records associated with C{name}."""
def lookupResponsibility(name, timeout = 10):
"""Lookup the RP records associated with C{name}."""
def lookupAFSDatabase(name, timeout = 10):
"""Lookup the AFSDB records associated with C{name}."""
def lookupService(name, timeout = 10):
"""Lookup the SRV records associated with C{name}."""
def lookupAllRecords(name, timeout = 10):
"""Lookup all records associated with C{name}."""
def lookupZone(name, timeout = 10):
"""Perform a zone transfer for the given C{name}."""
class IReactorArbitrary(Interface):
def listenWith(portType, *args, **kw):
"""Start an instance of the given C{portType} listening.
@type portType: type which implements L{IListeningPort}
@param portType: The object given by C{portType(*args, **kw)} will be
started listening.
@return: an object which provides L{IListeningPort}.
"""
def connectWith(connectorType, *args, **kw):
"""
Start an instance of the given C{connectorType} connecting.
@type connectorType: type which implements L{IConnector}
@param connectorType: The object given by C{connectorType(*args, **kw)}
will be started connecting.
@return: An object which provides L{IConnector}.
"""
class IReactorTCP(Interface):
def listenTCP(port, factory, backlog=50, interface=''):
"""Connects a given protocol factory to the given numeric TCP/IP port.
@param port: a port number on which to listen
@param factory: a L{twisted.internet.protocol.ServerFactory} instance
@param backlog: size of the listen queue
@param interface: the hostname to bind to, defaults to '' (all)
@return: an object that provides L{IListeningPort}.
@raise CannotListenError: as defined here
L{twisted.internet.error.CannotListenError},
if it cannot listen on this port (e.g., it
cannot bind to the required port number)
"""
def connectTCP(host, port, factory, timeout=30, bindAddress=None):
"""Connect a TCP client.
@param host: a host name
@param port: a port number
@param factory: a L{twisted.internet.protocol.ClientFactory} instance
@param timeout: number of seconds to wait before assuming the
connection has failed.
@param bindAddress: a (host, port) tuple of local address to bind
to, or None.
@return: An object which provides L{IConnector}. This connector will
call various callbacks on the factory when a connection is
made, failed, or lost - see
L{ClientFactory<twisted.internet.protocol.ClientFactory>}
docs for details.
"""
class IReactorSSL(Interface):
def connectSSL(host, port, factory, contextFactory, timeout=30, bindAddress=None):
"""Connect a client Protocol to a remote SSL socket.
@param host: a host name
@param port: a port number
@param factory: a L{twisted.internet.protocol.ClientFactory} instance
@param contextFactory: a L{twisted.internet.ssl.ClientContextFactory} object.
@param timeout: number of seconds to wait before assuming the
connection has failed.
@param bindAddress: a (host, port) tuple of local address to bind to,
or C{None}.
@return: An object which provides L{IConnector}.
"""
def listenSSL(port, factory, contextFactory, backlog=50, interface=''):
"""
Connects a given protocol factory to the given numeric TCP/IP port.
The connection is a SSL one, using contexts created by the context
factory.
@param port: a port number on which to listen
@param factory: a L{twisted.internet.protocol.ServerFactory} instance
@param contextFactory: a L{twisted.internet.ssl.ContextFactory} instance
@param backlog: size of the listen queue
@param interface: the hostname to bind to, defaults to '' (all)
"""
class IReactorUNIX(Interface):
"""UNIX socket methods."""
def connectUNIX(address, factory, timeout=30, checkPID=0):
"""Connect a client protocol to a UNIX socket.
@param address: a path to a unix socket on the filesystem.
@param factory: a L{twisted.internet.protocol.ClientFactory} instance
@param timeout: number of seconds to wait before assuming the connection
has failed.
@param checkPID: if True, check for a pid file to verify that a server
is listening.
@return: An object which provides L{IConnector}.
"""
def listenUNIX(address, factory, backlog=50, mode=0666, wantPID=0):
"""Listen on a UNIX socket.
@param address: a path to a unix socket on the filesystem.
@param factory: a L{twisted.internet.protocol.Factory} instance.
@param backlog: number of connections to allow in backlog.
@param mode: mode to set on the unix socket.
@param wantPID: if True, create a pidfile for the socket.
@return: An object which provides L{IListeningPort}.
"""
class IReactorUNIXDatagram(Interface):
"""datagram UNIX socket methods."""
def connectUNIXDatagram(address, protocol, maxPacketSize=8192, mode=0666, bindAddress=None):
"""Connect a client protocol to a datagram UNIX socket.
@param address: a path to a unix socket on the filesystem.
@param protocol: a L{twisted.internet.protocol.ConnectedDatagramProtocol} instance
@param maxPacketSize: maximum packet size to accept
@param mode: mode to set on the unix socket.
@param bindAddress: address to bind to
@return: An object which provides L{IConnector}.
"""
def listenUNIXDatagram(address, protocol, maxPacketSize=8192, mode=0666):
"""Listen on a datagram UNIX socket.
@param address: a path to a unix socket on the filesystem.
@param protocol: a L{twisted.internet.protocol.DatagramProtocol} instance.
@param maxPacketSize: maximum packet size to accept
@param mode: mode to set on the unix socket.
@return: An object which provides L{IListeningPort}.
"""
class IReactorUDP(Interface):
"""UDP socket methods.
IMPORTANT: This is an experimental new interface. It may change
without backwards compatability. Suggestions are welcome.
"""
def listenUDP(port, protocol, interface='', maxPacketSize=8192):
"""Connects a given DatagramProtocol to the given numeric UDP port.
@return: object which provides L{IListeningPort}.
"""
def connectUDP(remotehost, remoteport, protocol, localport=0,
interface='', maxPacketSize=8192):
"""DEPRECATED.
Connects a L{twisted.internet.protocol.ConnectedDatagramProtocol}
instance to a UDP port.
"""
class IReactorMulticast(Interface):
"""UDP socket methods that support multicast.
IMPORTANT: This is an experimental new interface. It may change
without backwards compatability. Suggestions are welcome.
"""
def listenMulticast(port, protocol, interface='', maxPacketSize=8192,
listenMultiple=False):
"""
Connects a given
L{DatagramProtocol<twisted.internet.protocol.DatagramProtocol>} to the
given numeric UDP port.
@param listenMultiple: boolean indicating whether multiple sockets can
bind to same UDP port.
@returns: An object which provides L{IListeningPort}.
"""
class IReactorProcess(Interface):
def spawnProcess(processProtocol, executable, args=(), env={}, path=None,
uid=None, gid=None, usePTY=0, childFDs=None):
"""Spawn a process, with a process protocol.
@param processProtocol: a L{twisted.internet.protocol.ProcessProtocol} instance
@param executable: the file name to spawn - the full path should be
used.
@param args: the command line arguments to pass to the process; a
sequence of strings. The first string should be the
executable's name.
@param env: the environment variables to pass to the processs; a
dictionary of strings. If 'None', use os.environ.
@param path: the path to run the subprocess in - defaults to the
current directory.
@param uid: user ID to run the subprocess as. (Only available on
POSIX systems.)
@param gid: group ID to run the subprocess as. (Only available on
POSIX systems.)
@param usePTY: if true, run this process in a pseudo-terminal.
optionally a tuple of (masterfd, slavefd, ttyname),
in which case use those file descriptors.
(Not available on all systems.)
@param childFDs: A dictionary mapping file descriptors in the new child
process to an integer or to the string 'r' or 'w'.
If the value is an integer, it specifies a file
descriptor in the parent process which will be mapped
to a file descriptor (specified by the key) in the
child process. This is useful for things like inetd
and shell-like file redirection.
If it is the string 'r', a pipe will be created and
attached to the child at that file descriptor: the
child will be able to write to that file descriptor
and the parent will receive read notification via the
L{IProcessTransport.childDataReceived} callback. This
is useful for the child's stdout and stderr.
If it is the string 'w', similar setup to the previous
case will occur, with the pipe being readable by the
child instead of writeable. The parent process can
write to that file descriptor using
L{IProcessTransport.writeToChild}. This is useful for
the child's stdin.
If childFDs is not passed, the default behaviour is to
use a mapping that opens the usual stdin/stdout/stderr
pipes.
@see: L{twisted.internet.protocol.ProcessProtocol}
@return: An object which provides L{IProcessTransport}.
@raise OSError: Raised with errno EAGAIN or ENOMEM if there are
insufficient system resources to create a new process.
"""
class IReactorTime(Interface):
"""Time methods that a Reactor should implement.
"""
def callLater(delay, callable, *args, **kw):
"""Call a function later.
@type delay: C{float}
@param delay: the number of seconds to wait.
@param callable: the callable object to call later.
@param args: the arguments to call it with.
@param kw: the keyword arguments to call it with.
@return: An object which provides L{IDelayedCall} and can be used to
cancel the scheduled call, by calling its C{cancel()} method.
It also may be rescheduled by calling its C{delay()} or
C{reset()} methods.
"""
def cancelCallLater(callID):
"""This method is deprecated.
Cancel a call that would happen later.
@param callID: this is an opaque identifier returned from C{callLater}
that will be used to cancel a specific call.
@raise ValueError: if the callID is not recognized.
"""
def getDelayedCalls():
"""Retrieve all currently scheduled delayed calls.
@return: A tuple of all L{IDelayedCall} providers representing all
currently scheduled calls. This is everything that has been
returned by C{callLater} but not yet called or canceled.
"""
class IDelayedCall(Interface):
"""A scheduled call.
There are probably other useful methods we can add to this interface;
suggestions are welcome.
"""
def getTime():
"""Get time when delayed call will happen.
@return: time in seconds since epoch (a float).
"""
def cancel():
"""Cancel the scheduled call.
@raises twisted.internet.error.AlreadyCalled: if the call has already
happened.
@raises twisted.internet.error.AlreadyCancelled: if the call has already
been cancelled.
"""
def delay(secondsLater):
"""Delay the scheduled call.
@param secondsLater: how many seconds from its current firing time to delay
@raises twisted.internet.error.AlreadyCalled: if the call has already
happened.
@raises twisted.internet.error.AlreadyCancelled: if the call has already
been cancelled.
"""
def reset(secondsFromNow):
"""Reset the scheduled call's timer.
@param secondsFromNow: how many seconds from now it should fire,
equivalent to C{.cancel()} and then doing another
C{reactor.callLater(secondsLater, ...)}
@raises twisted.internet.error.AlreadyCalled: if the call has already
happened.
@raises twisted.internet.error.AlreadyCancelled: if the call has already
been cancelled.
"""
def active():
"""
@return: True if this call is still active, False if it has been
called or cancelled.
"""
class IReactorThreads(Interface):
"""Dispatch methods to be run in threads.
Internally, this should use a thread pool and dispatch methods to them.
"""
def callInThread(callable, *args, **kwargs):
"""Run the callable object in a separate thread.
"""
def callFromThread(callable, *args, **kw):
"""Cause a function to be executed by the reactor thread.
Use this method when you want to run a function in the reactor's thread
from another thread. Calling callFromThread should wake up the main
thread (where reactor.run() is executing) and run the given callable in
that thread.
Obviously, the callable must be thread safe. (If you want to call a
function in the next mainloop iteration, but you're in the same thread,
use callLater with a delay of 0.)
"""
def suggestThreadPoolSize(size):
"""
Suggest the size of the internal threadpool used to dispatch functions
passed to L{callInThread}.
"""
class IReactorCore(Interface):
"""Core methods that a Reactor must implement.
"""
def resolve(name, timeout=10):
"""Return a L{twisted.internet.defer.Deferred} that will resolve a hostname.
"""
def run():
"""Fire 'startup' System Events, move the reactor to the 'running'
state, then run the main loop until it is stopped with stop() or
crash().
"""
def stop():
"""Fire 'shutdown' System Events, which will move the reactor to the
'stopped' state and cause reactor.run() to exit. """
def crash():
"""Stop the main loop *immediately*, without firing any system events.
This is named as it is because this is an extremely "rude" thing to do;
it is possible to lose data and put your system in an inconsistent
state by calling this. However, it is necessary, as sometimes a system
can become wedged in a pre-shutdown call.
"""
def iterate(delay=0):
"""Run the main loop's I/O polling function for a period of time.
This is most useful in applications where the UI is being drawn "as
fast as possible", such as games. All pending L{IDelayedCall}s will
be called.
The reactor must have been started (via the run() method) prior to
any invocations of this method. It must also be stopped manually
after the last call to this method (via the stop() method). This
method is not re-entrant: you must not call it recursively; in
particular, you must not call it while the reactor is running.
"""
def fireSystemEvent(eventType):
"""Fire a system-wide event.
System-wide events are things like 'startup', 'shutdown', and
'persist'.
"""
def addSystemEventTrigger(phase, eventType, callable, *args, **kw):
"""Add a function to be called when a system event occurs.
Each "system event" in Twisted, such as 'startup', 'shutdown', and
'persist', has 3 phases: 'before', 'during', and 'after' (in that
order, of course). These events will be fired internally by the
Reactor.
An implementor of this interface must only implement those events
described here.
Callbacks registered for the "before" phase may return either None or a
Deferred. The "during" phase will not execute until all of the
Deferreds from the "before" phase have fired.
Once the "during" phase is running, all of the remaining triggers must
execute; their return values must be ignored.
@param phase: a time to call the event -- either the string 'before',
'after', or 'during', describing when to call it
relative to the event's execution.
@param eventType: this is a string describing the type of event.
@param callable: the object to call before shutdown.
@param args: the arguments to call it with.
@param kw: the keyword arguments to call it with.
@return: an ID that can be used to remove this call with
removeSystemEventTrigger.
"""
def removeSystemEventTrigger(triggerID):
"""Removes a trigger added with addSystemEventTrigger.
@param triggerID: a value returned from addSystemEventTrigger.
"""
def callWhenRunning(callable, *args, **kw):
"""Call a function when the reactor is running.
If the reactor has not started, the callable will be scheduled
to run when it does start. Otherwise, the callable will be invoked
immediately.
@param callable: the callable object to call later.
@param args: the arguments to call it with.
@param kw: the keyword arguments to call it with.
@return: None if the callable was invoked, otherwise a system
event id for the scheduled call.
"""
class IReactorPluggableResolver(Interface):
"""A reactor with a pluggable name resolver interface.
"""
def installResolver(resolver):
"""Set the internal resolver to use to for name lookups.
@type resolver: An object implementing the L{IResolverSimple} interface
@param resolver: The new resolver to use.
@return: The previously installed resolver.
"""
class IReactorFDSet(Interface):
"""
Implement me to be able to use
L{FileDescriptor<twisted.internet.abstract.FileDescriptor>} type resources.
This assumes that your main-loop uses UNIX-style numeric file descriptors
(or at least similarly opaque IDs returned from a .fileno() method)
"""
def addReader(reader):
"""I add reader to the set of file descriptors to get read events for.
@param reader: An L{IReadDescriptor} provider that will be checked for
read events until it is removed from the reactor with
L{removeReader}.
@return: C{None}.
"""
def addWriter(writer):
"""I add writer to the set of file descriptors to get write events for.
@param writer: An L{IWriteDescriptor} provider that will be checked for
read events until it is removed from the reactor with
L{removeWriter}.
@return: C{None}.
"""
def removeReader(reader):
"""Removes an object previously added with L{addReader}.
@return: C{None}.
"""
def removeWriter(writer):
"""Removes an object previously added with L{addWriter}.
@return: C{None}.
"""
def removeAll():
"""Remove all readers and writers.
Should not remove reactor internal reactor connections (like a waker).
@return: A list of L{IReadDescriptor} and L{IWriteDescriptor} providers
which were removed.
"""
class IListeningPort(Interface):
"""A listening port.
"""
def startListening():
"""Start listening on this port.
@raise CannotListenError: If it cannot listen on this port (e.g., it is
a TCP port and it cannot bind to the required
port number).
"""
def stopListening():
"""Stop listening on this port.
If it does not complete immediately, will return Deferred that fires
upon completion.
"""
def getHost():
"""Get the host that this port is listening for.
@return: An L{IAddress} provider.
"""
class IFileDescriptor(Interface):
"""A file descriptor.
"""
def fileno():
"""
@return: The platform-specified representation of a file-descriptor
number.
"""
def connectionLost(reason):
"""Called when the connection was lost.
This is called when the connection on a selectable object has been
lost. It will be called whether the connection was closed explicitly,
an exception occurred in an event handler, or the other end of the
connection closed it first.
See also L{IHalfCloseableDescriptor} if your descriptor wants to be
notified separately of the two halves of the connection being closed.
@param reason: A failure instance indicating the reason why the
connection was lost. L{error.ConnectionLost} and
L{error.ConnectionDone} are of special note, but the
failure may be of other classes as well.
"""
class IReadDescriptor(IFileDescriptor):
def doRead():
"""Some data is available for reading on your descriptor.
"""
class IWriteDescriptor(IFileDescriptor):
def doWrite():
"""Some data can be written to your descriptor.
"""
class IReadWriteDescriptor(IReadDescriptor, IWriteDescriptor):
"""I am a L{FileDescriptor<twisted.internet.abstract.FileDescriptor>} that can both read and write.
"""
class IHalfCloseableDescriptor(Interface):
"""A descriptor that can be half-closed."""
def writeConnectionLost(reason):
"""Indicates write connection was lost."""
def readConnectionLost(reason):
"""Indicates read connection was lost."""
class ISystemHandle(Interface):
"""An object that wraps a networking OS-specific handle."""
def getHandle():
"""Return a system- and reactor-specific handle.
This might be a socket.socket() object, or some other type of
object, depending on which reactor is being used. Use and
manipulate at your own risk.
This might be used in cases where you want to set specific
options not exposed by the Twisted APIs.
"""
class IConsumer(Interface):
"""A consumer consumes data from a producer."""
def registerProducer(producer, streaming):
"""
Register to receive data from a producer.
This sets self to be a consumer for a producer. When this object runs
out of data (as when a send(2) call on a socket succeeds in moving the
last data from a userspace buffer into a kernelspace buffer), it will
ask the producer to resumeProducing().
For L{IPullProducer} providers, C{resumeProducing} will be called once
each time data is required.
For L{IPushProducer} providers, C{pauseProducing} will be called
whenever the write buffer fills up and C{resumeProducing} will only be
called when it empties.
@type producer: L{IProducer} provider
@type streaming: C{bool}
@param streaming: C{True} if C{producer} provides L{IPushProducer},
C{False} if C{producer} provides L{IPullProducer}.
@return: C{None}
"""
def unregisterProducer():
"""Stop consuming data from a producer, without disconnecting.
"""
def write(data):
"""The producer will write data by calling this method."""
class IFinishableConsumer(IConsumer):
"""A Consumer for producers that finish.
This interface is semi-stable.
"""
def finish():
"""The producer has finished producing."""
class IProducer(Interface):
"""A producer produces data for a consumer.
Typically producing is done by calling the write method of an class
implementing L{IConsumer}.
"""
def stopProducing():
"""Stop producing data.
This tells a producer that its consumer has died, so it must stop
producing data for good.
"""
class IPushProducer(IProducer):
"""
A push producer, also known as a streaming producer is expected to
produce (write to this consumer) data on a continous basis, unless
it has been paused. A paused push producer will resume producing
after its resumeProducing() method is called. For a push producer
which is not pauseable, these functions may be noops.
This interface is semi-stable.
"""
def pauseProducing():
"""Pause producing data.
Tells a producer that it has produced too much data to process for
the time being, and to stop until resumeProducing() is called.
"""
def resumeProducing():
"""Resume producing data.
This tells a producer to re-add itself to the main loop and produce
more data for its consumer.
"""
class IPullProducer(IProducer):
"""
A pull producer, also known as a non-streaming producer, is
expected to produce data each time resumeProducing() is called.
This interface is semi-stable.
"""
def resumeProducing():
"""Produce data for the consumer a single time.
This tells a producer to produce data for the consumer once
(not repeatedly, once only). Typically this will be done
by calling the consumer's write() method a single time with
produced data.
"""
class IProtocol(Interface):
def dataReceived(data):
"""Called whenever data is received.
Use this method to translate to a higher-level message. Usually, some
callback will be made upon the receipt of each complete protocol
message.
@param data: a string of indeterminate length. Please keep in mind
that you will probably need to buffer some data, as partial
(or multiple) protocol messages may be received! I recommend
that unit tests for protocols call through to this method with
differing chunk sizes, down to one byte at a time.
"""
def connectionLost(reason):
"""Called when the connection is shut down.
Clear any circular references here, and any external references
to this Protocol. The connection has been closed. The C{reason}
Failure wraps a L{twisted.internet.error.ConnectionDone} or
L{twisted.internet.error.ConnectionLost} instance (or a subclass
of one of those).
@type reason: L{twisted.python.failure.Failure}
"""
def makeConnection(transport):
"""Make a connection to a transport and a server.
"""
def connectionMade():
"""Called when a connection is made.
This may be considered the initializer of the protocol, because
it is called when the connection is completed. For clients,
this is called once the connection to the server has been
established; for servers, this is called after an accept() call
stops blocking and a socket has been received. If you need to
send any greeting or initial message, do it here.
"""
class IHalfCloseableProtocol(Interface):
"""Implemented to indicate they want notification of half-closes.
TCP supports the notion of half-closing the connection, e.g.
closing the write side but still not stopping reading. A protocol
that implements this interface will be notified of such events,
instead of having connectionLost called.
"""
def readConnectionLost():
"""Notification of the read connection being closed.
This indicates peer did half-close of write side. It is now
the responsiblity of the this protocol to call
loseConnection(). In addition, the protocol MUST make sure a
reference to it still exists (i.e. by doing a callLater with
one of its methods, etc.) as the reactor will only have a
reference to it if it is writing.
If the protocol does not do so, it might get garbage collected
without the connectionLost method ever being called.
"""
def writeConnectionLost():
"""Notification of the write connection being closed.
This will never be called for TCP connections as TCP does not
support notification of this type of half-close.
"""
class IProtocolFactory(Interface):
"""Interface for protocol factories.
"""
def buildProtocol(addr):
"""Called when a connection has been established to addr.
If None is returned, the connection is assumed to have been refused,
and the Port will close the connection.
@type addr: (host, port)
@param addr: The address of the newly-established connection
@return: None if the connection was refused, otherwise an object
providing L{IProtocol}.
"""
def doStart():
"""Called every time this is connected to a Port or Connector."""
def doStop():
"""Called every time this is unconnected from a Port or Connector."""
class ITransport(Interface):
"""I am a transport for bytes.
I represent (and wrap) the physical connection and synchronicity
of the framework which is talking to the network. I make no
representations about whether calls to me will happen immediately
or require returning to a control loop, or whether they will happen
in the same or another thread. Consider methods of this class
(aside from getPeer) to be 'thrown over the wall', to happen at some
indeterminate time.
"""
def write(data):
"""Write some data to the physical connection, in sequence, in a
non-blocking fashion.
If possible, make sure that it is all written. No data will
ever be lost, although (obviously) the connection may be closed
before it all gets through.
"""
def writeSequence(data):
"""Write a list of strings to the physical connection.
If possible, make sure that all of the data is written to
the socket at once, without first copying it all into a
single string.
"""
def loseConnection():
"""Close my connection, after writing all pending data.
Note that if there is a registered producer on a transport it
will not be closed until the producer has been unregistered.
"""
def getPeer():
"""Get the remote address of this connection.
Treat this method with caution. It is the unfortunate result of the
CGI and Jabber standards, but should not be considered reliable for
the usual host of reasons; port forwarding, proxying, firewalls, IP
masquerading, etc.
@return: An L{IAddress} provider.
"""
def getHost():
"""
Similar to getPeer, but returns an address describing this side of the
connection.
@return: An L{IAddress} provider.
"""
class ITCPTransport(ITransport):
"""A TCP based transport."""
def loseWriteConnection():
"""Half-close the write side of a TCP connection.
If the protocol instance this is attached to provides
IHalfCloseableProtocol, it will get notified when the operation is
done. When closing write connection, as with loseConnection this will
only happen when buffer has emptied and there is no registered
producer.
"""
def getTcpNoDelay():
"""Return if TCP_NODELAY is enabled."""
def setTcpNoDelay(enabled):
"""Enable/disable TCP_NODELAY.
Enabling TCP_NODELAY turns off Nagle's algorithm. Small packets are
sent sooner, possibly at the expense of overall throughput."""
def getTcpKeepAlive():
"""Return if SO_KEEPALIVE enabled."""
def setTcpKeepAlive(enabled):
"""Enable/disable SO_KEEPALIVE.
Enabling SO_KEEPALIVE sends packets periodically when the connection
is otherwise idle, usually once every two hours. They are intended
to allow detection of lost peers in a non-infinite amount of time."""
def getHost():
"""Returns L{IPv4Address}."""
def getPeer():
"""Returns L{IPv4Address}."""
class ITLSTransport(ITCPTransport):
"""A TCP transport that supports switching to TLS midstream.
Once TLS mode is started the transport will implement L{ISSLTransport}.
"""
def startTLS(contextFactory):
"""Initiate TLS negotiation.
@param contextFactory: A context factory (see L{ssl.py<twisted.internet.ssl>})
"""
class ISSLTransport(ITCPTransport):
"""A SSL/TLS based transport."""
def getPeerCertificate():
"""Return an object with the peer's certificate info."""
class IProcessTransport(ITransport):
"""A process transport.
@ivar pid: The Process-ID of this process.
"""
def closeStdin():
"""Close stdin after all data has been written out."""
def closeStdout():
"""Close stdout."""
def closeStderr():
"""Close stderr."""
def closeChildFD(descriptor):
"""
Close a file descriptor which is connected to the child process, identified
by its FD in the child process.
"""
def writeToChild(childFD, data):
"""
Similar to L{ITransport.write} but also allows the file descriptor in
the child process which will receive the bytes to be specified.
This is not available on all platforms.
@type childFD: C{int}
@param childFD: The file descriptor to which to write.
@type data: C{str}
@param data: The bytes to write.
@return: C{None}
"""
def loseConnection():
"""Close stdin, stderr and stdout."""
def signalProcess(signalID):
"""Send a signal to the process.
@param signalID: can be
- one of C{\"HUP\"}, C{\"KILL\"}, C{\"STOP\"}, or C{\"INT\"}.
These will be implemented in a
cross-platform manner, and so should be used
if possible.
- an integer, where it represents a POSIX
signal ID.
@raise twisted.internet.error.ProcessExitedAlready: The process has
already exited.
"""
class IServiceCollection(Interface):
"""An object which provides access to a collection of services."""
def getServiceNamed(serviceName):
"""Retrieve the named service from this application.
Raise a KeyError if there is no such service name.
"""
def addService(service):
"""Add a service to this collection.
"""
def removeService(service):
"""Remove a service from this collection."""
class IUDPTransport(Interface):
"""Transport for UDP DatagramProtocols."""
def write(packet, addr=None):
"""Write packet to given address.
@param addr: a tuple of (ip, port). For connected transports must
be the address the transport is connected to, or None.
In non-connected mode this is mandatory.
@raise twisted.internet.error.MessageLengthError: C{packet} was too
long.
"""
def connect(host, port):
"""Connect the transport to an address.
This changes it to connected mode. Datagrams can only be sent to
this address, and will only be received from this address. In addition
the protocol's connectionRefused method might get called if destination
is not receiving datagrams.
@param host: an IP address, not a domain name ('127.0.0.1', not 'localhost')
@param port: port to connect to.
"""
def getHost():
"""Returns IPv4Address."""
def stopListening():
"""Stop listening on this port.
If it does not complete immediately, will return Deferred that fires
upon completion.
"""
class IUDPConnectedTransport(Interface):
"""DEPRECATED. Transport for UDP ConnectedPacketProtocols."""
def write(packet):
"""Write packet to address we are connected to."""
def getHost():
"""Returns UNIXAddress."""
class IUNIXDatagramTransport(Interface):
"""Transport for UDP PacketProtocols."""
def write(packet, address):
"""Write packet to given address."""
def getHost():
"""Returns UNIXAddress."""
class IUNIXDatagramConnectedTransport(Interface):
"""Transport for UDP ConnectedPacketProtocols."""
def write(packet):
"""Write packet to address we are connected to."""
def getHost():
"""Returns UNIXAddress."""
def getPeer():
"""Returns UNIXAddress."""
class IMulticastTransport(Interface):
"""Additional functionality for multicast UDP."""
def getOutgoingInterface():
"""Return interface of outgoing multicast packets."""
def setOutgoingInterface(addr):
"""Set interface for outgoing multicast packets.
Returns Deferred of success.
"""
def getLoopbackMode():
"""Return if loopback mode is enabled."""
def setLoopbackMode(mode):
"""Set if loopback mode is enabled."""
def getTTL():
"""Get time to live for multicast packets."""
def setTTL(ttl):
"""Set time to live on multicast packets."""
def joinGroup(addr, interface=""):
"""Join a multicast group. Returns Deferred of success or failure.
If an error occurs, the returned Deferred will fail with
L{error.MulticastJoinError}.
"""
def leaveGroup(addr, interface=""):
"""Leave multicast group, return Deferred of success."""
| |
"""
CONCENTRATION GAME API
api.py -- Contains ConcentrationApi, with numerous functions
for communicating User, Game, and Score related information
to and from users.
"""
# Imports and Setup
import endpoints
from protorpc import remote, messages, message_types
from google.appengine.api import memcache
from models import User, UserForm, UserForms
from models import Game, NewGameForm, GameForm
from models import MiniGameForms, HistoryForm
from models import CardForm, MakeGuessForm, HintForm
from models import Score, ScoreForms
from models import StringMessage
from utils import get_by_urlsafe
# UNCOMMENT THE LINES 25-27 FOR APP ENGINE DEPLOY IF SETTINGS.PY IS PRESENT,
# ALSO UNCOMMENT THE allowed_client_ids AND scopes FROM API SETUP (LINE 59-60)
# from settings import WEB_CLIENT_ID
# EMAIL_SCOPE = endpoints.EMAIL_SCOPE
# API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
# Game Logic
import game as gm
# Various Request Containers
NEW_GAME_REQUEST = endpoints.ResourceContainer(NewGameForm)
GET_GAME_REQUEST = endpoints.ResourceContainer(
urlsafe_game_key=messages.StringField(1))
FLIP_CARD_REQUEST = endpoints.ResourceContainer(
queryCard=messages.IntegerField(1),
urlsafe_game_key=messages.StringField(2))
MAKE_MOVE_REQUEST = endpoints.ResourceContainer(
MakeGuessForm,
urlsafe_game_key=messages.StringField(1))
USER_REQUEST = endpoints.ResourceContainer(
user_name=messages.StringField(1),
email=messages.StringField(2))
USER_INFO_REQUEST = endpoints.ResourceContainer(
user_name=messages.StringField(1))
MEMCACHE_HIGH_SCORE = 'TOP_SCORE'
# ### CONCENTRATION API ###
@endpoints.api(name='concentration',
version='v1',
# allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID],
# scopes=[EMAIL_SCOPE]
)
class ConcentrationApi(remote.Service):
"""Concentration Game API v0.1"""
# USER METHODS
@endpoints.method(request_message=USER_REQUEST,
response_message=StringMessage,
path='user',
name='create_user',
http_method='POST')
def create_user(self, request):
"""Create a User. Requires a unique username"""
# Check that the username is not in use
if User.query(User.name == request.user_name).get():
raise endpoints.ConflictException(
'A User with that name already exists!')
# Create a new user, send a confirmation message
user = User(name=request.user_name, email=request.email)
user.put()
return StringMessage(message='User {} created!'.format(
request.user_name))
@endpoints.method(request_message=USER_INFO_REQUEST,
response_message=UserForm,
path='user/info',
name='user_info',
http_method='GET')
def user_info(self, request):
"""Get stats about a user"""
user = User.query(User.name == request.user_name).get()
# Check that user exists
if not user:
raise endpoints.NotFoundException('No such user.')
else:
# Return a summary form with user information
return user.to_form()
@endpoints.method(request_message=USER_INFO_REQUEST,
response_message=MiniGameForms,
path='user/all',
name='get_all_games',
http_method='GET')
def get_all_games(self, request):
"""Return a list of all of a User's games"""
user = User.query(User.name == request.user_name).get()
# Check that user exists
if not user:
raise endpoints.NotFoundException('No such user.')
else:
# Fetch all games
q = Game.query(Game.user == user.key)
games = q.fetch()
# Return a set of simplified game info forms
return MiniGameForms(
games=[g.to_mini_form() for g in games]
)
@endpoints.method(request_message=USER_INFO_REQUEST,
response_message=MiniGameForms,
path='user/current',
name='get_user_games',
http_method='GET')
def get_user_games(self, request):
"""Return a list of all of a User's active (in-progress) games"""
user = User.query(User.name == request.user_name).get()
# Check that user exists
if not user:
raise endpoints.NotFoundException('No such user.')
else:
# Fetch all games
q = Game.query(Game.user == user.key)
q.filter(Game.status == 'In Progress')
games = q.fetch()
# Return a set of simplified game info forms
return MiniGameForms(
games=[g.to_mini_form() for g in games]
)
# GAME METHODS
@endpoints.method(request_message=GET_GAME_REQUEST,
response_message=StringMessage,
path='game/{urlsafe_game_key}/cancel',
name='cancel_game',
http_method='PUT')
def cancel_game(self, request):
"""Cancel an in-progress (but not completed) game"""
game = get_by_urlsafe(request.urlsafe_game_key, Game)
# Make sure we can cancel the specified game
if not game:
raise endpoints.NotFoundException(
"Can't cancel! Game doesn't exist!")
elif game.status == 'Won':
raise endpoints.BadRequestException(
"Can't cancel a game that's been won!")
elif game.status == 'Canceled':
raise endpoints.BadRequestException(
"You've already cancelled that game.")
else:
# Cancel the game and return a confirmation
game.status = 'Canceled'
game.put()
return StringMessage(message='Game canceled.')
@endpoints.method(request_message=NEW_GAME_REQUEST,
response_message=GameForm,
path='game',
name='new_game',
http_method='POST')
def new_game(self, request):
"""Creates new game"""
user = User.query(User.name == request.user_name).get()
# Make sure user exists
if not user:
raise endpoints.NotFoundException(
'A User with that name does not exist!')
try:
# Create the new Game
game = Game.new_game(user.key, request.cards)
except:
raise endpoints.BadRequestException('Request Failed')
# Increment total games by 1, but if it's initially zero, deal
# with the error that gets thrown
try:
user.total_games += 1
except TypeError:
user.total_games = 1
user.put()
# Send the new game back to the user, ready to play
return game.to_form('Let the Guessing Begin!')
@endpoints.method(request_message=GET_GAME_REQUEST,
response_message=GameForm,
path='game/{urlsafe_game_key}',
http_method='GET',
name='show_game')
def show_game(self, request):
"""Return the board state for the specified game"""
game = get_by_urlsafe(request.urlsafe_game_key, Game)
# Check that the game exists
if not game:
raise endpoints.NotFoundException('No game found!')
else:
# Return the game information, prompting user to make a move
return game.to_form('Make your move!')
@endpoints.method(request_message=GET_GAME_REQUEST,
response_message=HistoryForm,
path='game/{urlsafe_game_key}/history',
name='get_game_history',
http_method='GET')
def get_game_history(self, request):
"""Show the history of moves for a game"""
game = get_by_urlsafe(request.urlsafe_game_key, Game)
# Check that the game exists
if not game:
raise endpoints.NotFoundException('No such game!')
else:
# Return a game summary and history of moves
return game.to_history_form()
# GAME METHODS -- CARD ACTIONS
@endpoints.method(request_message=FLIP_CARD_REQUEST,
response_message=CardForm,
path='game/{urlsafe_game_key}/flip',
http_method='GET',
name='flip_card')
def flip_card(self, request):
"""Responds to a guessed card by revealing a card's value"""
game = get_by_urlsafe(request.urlsafe_game_key, Game)
# Check that the game exists
if not game:
raise endpoints.NotFoundException('No game found!')
elif game.status != 'In Progress':
raise endpoints.BadRequestException(
'Not an active game, guesses no longer allowed')
else:
# Retrieve the board and return the specified card's value
board = game.board
guessedCard = getattr(request, 'queryCard')
result = gm.turnCard(guessedCard, board)
return CardForm(cardValue=result)
@endpoints.method(request_message=MAKE_MOVE_REQUEST,
response_message=GameForm,
path='game/{urlsafe_game_key}/move',
http_method='PUT',
name='make_move')
def make_move(self, request):
"""Accepts two cards and reveals whether they match"""
game = get_by_urlsafe(request.urlsafe_game_key, Game)
# Make sure the game exists and is in progress
if not game:
raise endpoints.NotFoundException('No game found!')
elif game.status != 'In Progress':
raise endpoints.BadRequestException(
'Not an active game, moves no longer allowed')
else:
# Retrieve the board and played cards
board = game.board
displayBoard = game.boardState
card1 = getattr(request, 'card1')
card2 = getattr(request, 'card2')
if card1 == card2:
# The user is guessing the same card twice
raise endpoints.BadRequestException(
"You can't pick the same card twice!")
else:
# Evaluate the result of the move and update game information
message, resultBoard = gm.compareCards(
card1, card2, board, displayBoard)
game.guesses += 1
game.boardState = resultBoard
# Check to see if the game has now been won
if gm.isGameWon(game.boardState):
message += ' Congratulations - You win! All cards matched!'
game.status = 'Won'
game.win_game()
# Append the current move to the game history
game.history.append(
'guess: {0} result: {1}'.format([card1, card2], message))
game.put()
return game.to_form(message=message)
@endpoints.method(request_message=FLIP_CARD_REQUEST,
response_message=HintForm,
path='game/{urlsafe_game_key}/hint',
http_method='GET',
name='get_hint')
def get_hint(self, request):
"""Gives a hint for a card that matches a selected card"""
game = get_by_urlsafe(request.urlsafe_game_key, Game)
# Check that the game exists:
if not game:
raise endpoints.NotFoundException('No game found!')
elif game.status != 'In Progress':
raise endpoints.BadRequestException(
'Not an active game, no hints or moves permitted')
else:
# Get the card and generate a hint
selectedCard = getattr(request, 'queryCard')
hint = gm.giveHint(selectedCard, game.board)
return HintForm(hint=hint)
# SCORE METHODS
@endpoints.method(request_message=message_types.VoidMessage,
response_message=ScoreForms,
path='scores',
name='get_scores',
http_method='GET')
def get_scores(self, request):
"""Return all scores"""
return ScoreForms(items=[score.to_form() for score in Score.query()])
@endpoints.method(request_message=USER_INFO_REQUEST,
response_message=ScoreForms,
path='scores/user/{user_name}',
name='get_user_scores',
http_method='GET')
def get_user_scores(self, request):
"""Returns all of an individual User's scores"""
user = User.query(User.name == request.user_name).get()
# Make sure user exists
if not user:
raise endpoints.NotFoundException(
'A User with that name does not exist!')
# Retrieve and return all relevant scores
scores = Score.query(Score.user == user.key)
return ScoreForms(items=[score.to_form() for score in scores])
@endpoints.method(request_message=message_types.VoidMessage,
response_message=ScoreForms,
path='scores/high',
name='get_high_scores',
http_method='GET')
def get_high_scores(self, request):
"""Generate a list of high scores"""
q = Score.query().order(-Score.score)
# Just take the top ten scores
q.fetch(10)
return ScoreForms(items=[score.to_form() for score in q])
@endpoints.method(request_message=message_types.VoidMessage,
response_message=UserForms,
path='users/rankings',
name='get_user_rankings',
http_method='GET')
def get_user_rankings(self, request):
"""Return the players, ranked by average score"""
q = User.query().order(-User.avg_score)
# Return all players, ranked
q.fetch()
return UserForms(users=[user.to_form() for user in q])
@endpoints.method(request_message=message_types.VoidMessage,
response_message=StringMessage,
path='/scores/top',
name='get_top_score',
http_method='GET')
def get_top_score(self, request):
"""Get the cached highest score"""
return StringMessage(message=memcache.get(MEMCACHE_HIGH_SCORE) or '')
@staticmethod
def _cache_high_score():
"""Populates memcache with a high score announcement"""
q = Score.query().order(-Score.score)
q.get()
if q:
# Retrieve the high score information, if available
user = q.user_name
score = q.score
date = q.date
memcache.set(MEMCACHE_HIGH_SCORE,
'''Congratulations to {0}, with the high score
of {1}, set on {2}!'''.format(user, score, date))
api = endpoints.api_server([ConcentrationApi])
| |
# Implementation of:
# Title: Optimal Rebalancing Strategy Using Dynamic Programming for Institutional Portfolios / MIT Working Paper
# Authors: Walter Sun, Ayres Fan, Li-Wei Che, Tom Schouwenaars, Marius A. Albota
# INSTRUCTIONS: Keep the Data.csv file in the same directory and do NOT change the column names. Uncheck '#' to see the graphs if required.
# Import Required Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
from operator import itemgetter
import time
# Project Information
print '############################################################################################'
print 'Project: Optimal Rebalancing Strategy Using Dynamic Programming for Institutional Portfolios'
print '############################################################################################'
print ' '
# Start Timer to measure Total Run Time of Script
t0 = time.clock()
####################################################################################################################
# CALCULATE OPTIMAL PORTFOLIO WEIGHT - EFFICIENT FRONTIER MEAN-VARIANCE OPTIMIZATION
# Create Pandas Dataframe and read in prices of 2 assets
pd.set_option('display.line_width', 300)
#Load csv with Websites on the dataframe
Data = pd.read_csv('Data.csv')
# Calculate Daily Returns for Assets A & B using Vectoring
Data['Returns_A'] = Data['Close_A']/Data['Close_A'].shift(1)-1
Data['Returns_B'] = Data['Close_B']/Data['Close_B'].shift(1)-1
# Replace NANs with zeros in first line (since there are no returns in the first period)
Data['Returns_A'].fillna(0, inplace=True)
Data['Returns_B'].fillna(0, inplace=True)
#Calculate Covariance Matrix
Covariance_Matrix = np.cov(Data['Returns_A'],Data['Returns_B'])
# Allocate Initial weight factor and precision
wA=0.0
# Define detail of weight increments
detail = 0.0005
# Define Amount Invested in Period 0
initial_amount_invested = 1000000000
# Define Trading Costs of Asset A (in basis points)
CA = 60
# Define Trading Costs of Asset B (in basis points)
CB = 40
# Create DataFrame
index = np.arange(1 / detail)
columns = ['WeightA','Variance']
A = pd.DataFrame(columns=columns, index = index)
# Create for loop to calculate the Efficient Frontier Utility and identify its minimum along with respective weight where this occurs
for i in range(0,int(1 / detail)):
A['WeightA'][i] = wA
A['Variance'][i] = wA * (wA * Covariance_Matrix[0,0] + \
(1 - wA) * Covariance_Matrix[0,1]) + (1 - wA) * (wA * Covariance_Matrix[1,0] + (1 - wA) * Covariance_Matrix[1,1])
# Calculate minimum Efficient Frontier Function value along with respective weight
try:
# Check if next point Variance is lower than previous one (if it is keep the weight at which this occurs)
if A['Variance'][i] < A['Variance'][i-1]:
Optimal_WeightA = A['WeightA'][i]
except:
pass
# Increase weight by increment previously specified
wA = wA + detail
# Print the calculated Optimal Portfolio Weight
print 'Optimal Portfolio Weight According to Efficient Frontier Using Mean-Variance Optimization: ' + str(Optimal_WeightA)
# Plot Efficient Frontier using Mean Variance Optimization
#plt.plot(A['WeightA'] ,A['Variance'])
#plt.title('Efficient Frontier using Mean-Variance Optimization')
#plt.xlabel('Weight of Asset A (Developed Market Index)')
#plt.ylabel('Variance')
#plt.show()
####################################################################################################################
# NO REBALANCING - Calculate Investment Parameters of Portfolio if Optimal Weight is selected in period 0 and then weights are left to drift according to market prices
# Copy the DataFrame with the read data
CData = Data
# Define Additional Columns in the DataFrame
CData['WeightA'] = CData['InvestmentA'] = CData['InvestmentB'] = CData['Total_Returns'] = CData['Variance'] = CData['Expected_Utility_Current'] = CData['Variance_Optimal'] = CData['Expected_Utility_Optimal'] = CData['TC'] = CData['CEC'] = 0.0
# Define Period 0 parameters
CData['WeightA'][0] = Optimal_WeightA
CData['InvestmentA'][0] = CData['WeightA'][0]*initial_amount_invested
CData['InvestmentB'][0] = (1-Optimal_WeightA)*initial_amount_invested
# Calculate Portfolio Parameters in the Subsequent Time Periods
for i in range(1,len(Data)):
# Calculate Value of Investments in Asset A & B
CData['InvestmentA'][i] = CData['InvestmentA'][i-1]*(CData['Returns_A'][i]+1)
CData['InvestmentB'][i] = CData['InvestmentB'][i-1]*(CData['Returns_B'][i]+1)
# Calculate the New Weight of Asset A (this changes because of the change in price of the assets over time)
CData['WeightA'][i] = CData['InvestmentA'][i] / (CData['InvestmentA'][i]+CData['InvestmentB'][i])
# Calculate the Total Return of the Portfolio in the period
CData['Total_Returns'][i] = (CData['InvestmentA'][i]+CData['InvestmentB'][i])/(CData['InvestmentA'][i-1]+CData['InvestmentB'][i-1])-1
# Calculate the Expected Utility
CData['Expected_Utility_Current'][i] = math.log10(1+np.mean(CData['Total_Returns'][0:i]))- \
CData['Variance'][i]/(2*((1+np.mean(CData['Total_Returns'][0:i]))**2))
#Calculate the Variance of the Portfolio if the weight was Optimal (i.e. calculated using Mean Variance before)
CData['Variance_Optimal'][i] = ((CData['Returns_A'][i]-CData['Total_Returns'][i])**2)*(Optimal_WeightA**2) + \
((CData['Returns_B'][i]-CData['Total_Returns'][i])**2)*((1-Optimal_WeightA)**2) + \
2*(1-Optimal_WeightA)*CData['WeightA'][i]*Covariance_Matrix[1,0]
# Calculate Optimal Expected Utility (i.e. Utility if the weight was Optimal)
CData['Expected_Utility_Optimal'][i] = math.log10(1+np.mean(CData['Total_Returns'][0:i]))- \
CData['Variance_Optimal'][i]/(2*((1+np.mean(CData['Total_Returns'][0:i]))**2))
# Calculate Certainty Equivalent Costs (i.e. cost of not being at the optimal utility)
CData['CEC'][i] = (math.exp(CData['Expected_Utility_Optimal'][i]) - math.exp(CData['Expected_Utility_Current'][i]))*10*initial_amount_invested
# Print the cost of not rebalancing the portfolio to the Optimal Weight and letting it drift according to market prices
print 'Cost of not rebalancing to Optimal Portfolio Weight: ' + str(np.abs(np.sum(CData['CEC'])))
print ' '
# Plot WeightA Change over time
#CData['WeightA'].plot()
#plt.title('No Rebalancing')
#plt.xlabel('Days')
#plt.ylabel('Weight of Asset A (Developed Market Index)')
#plt.show()
####################################################################################################################
# DYNAMIC PROGRAMMIN REBALANCING
# Determine Additional Parameters in the DataFrame
Data['Min_Cost_Weight'] = Data['TC'] = Data['CEC'] = Data['Total_Costs'] = Data['Low_Bound'] = Data['High_Bound'] = Data['Rebalance'] = 0.0
# Declare new DataFrame which will contain the time series
index = np.arange(1 / detail + 1)
columns = ['Close_A', 'Close_B','Returns_A', 'Returns_B', 'WeightA', 'Investment_A', 'Investment_B', 'Total_Returns', 'Variance_Current', 'Expected_Utility_Current', 'Variance_Optimal', 'Expected_Utility_Optimal','CEC', 'TC','Costs']
Cost_Min = pd.DataFrame(columns=columns, index = index)
# Cost_Min DataFrame contains all the possible weights of the portfolio (3 decimal accuracy) for every period. The weight minimising costs (TC+CEC) is imported
# to the main dataframe where Total Costs are then calculated
# Define Period 0 Parameters
Data['Min_Cost_Weight'][0] = Optimal_WeightA
for line1 in range(1,len(Data)):
# Define Initial parameters of the Cost_Min DataFrame
Cost_Min['Costs'][0] = Cost_Min['WeightA'][0] = Cost_Min['Investment_A'][0] = 0
Cost_Min['Close_A'] = Data['Close_A'][line1]
Cost_Min['Close_B'] = Data['Close_B'][line1]
Cost_Min['Returns_A'] = Data['Returns_A'][line1]
Cost_Min['Returns_B'] = Data['Returns_B'][line1]
Cost_Min['Investment_B'][0] = initial_amount_invested
Cost_Min['Total_Returns'] = CData['Total_Returns'][line1]
# Calculate the Investments, Expected Utility, Optimal_Expected_Utility and Costs for every possible weight
for line2 in range(1,len(Cost_Min)-1):
# Define weight in the current period (this is equal to the weight in the previous period plus 0.0005)
Cost_Min['WeightA'][line2] = Cost_Min['WeightA'][line2-1] + detail
# Calculate the Value of the two Investments
Cost_Min['Investment_A'][line2] = Cost_Min['WeightA'][line2] * initial_amount_invested
Cost_Min['Investment_B'][line2] = (1-Cost_Min['WeightA'][line2]) * initial_amount_invested
# Calculate the Variance with the Current Portfolio
Cost_Min['Variance_Current'][line2] = ((Cost_Min['Returns_A'][line2]-Cost_Min['Total_Returns'][line2])**2)* \
(Cost_Min['WeightA'][line2]**2) + ((Cost_Min['Returns_B'][line2]-Cost_Min['Total_Returns'][line2])**2)* \
((1-Cost_Min['WeightA'][line2])**2) + 2*(1-Cost_Min['WeightA'][line2])*Cost_Min['WeightA'][line2]*Covariance_Matrix[1,0]
# Calculate the Expected Utility with the Current Portfolio Weight
Cost_Min['Expected_Utility_Current'][line2] = math.log10(1+np.mean(CData['Total_Returns'][0:line1]))- \
Cost_Min['Variance_Current'][line2]/(2*((1+np.mean(CData['Total_Returns'][0:line1]))**2))
# Calculate the Variance if the Optimal Portfolio Weight was Selected
Cost_Min['Variance_Optimal'][line2] = ((Cost_Min['Returns_A'][line2]-Cost_Min['Total_Returns'][line2])**2)*(Optimal_WeightA**2) + \
((Cost_Min['Returns_B'][line2]-Cost_Min['Total_Returns'][line2])**2)*((1-Optimal_WeightA)**2) + \
2*(1-Optimal_WeightA)*Cost_Min['WeightA'][line2]*Covariance_Matrix[1,0]
# Calculate the Expected Utility if the Optimal Portfolio Weight was Selected
Cost_Min['Expected_Utility_Optimal'][line2] = math.log10(1+np.mean(CData['Total_Returns'][0:line1]))- \
Cost_Min['Variance_Optimal'][line2]/(2*((1+np.mean(CData['Total_Returns'][0:line1]))**2))
# Calculate the Certenty Equivalent Costs in the particular period (i.e. cost of not rebalancing to Optimal Portfolio)
Cost_Min['CEC'][line2] = (math.exp(Cost_Min['Expected_Utility_Optimal'][line2]) - math.exp(Cost_Min['Expected_Utility_Current'][line2]))*initial_amount_invested
# Calculate the Transaction Costs to be incurred if rebalancing is to take place
Cost_Min['TC'][line2] = (CA*math.fabs(Optimal_WeightA-(Cost_Min['WeightA'][line2])) + CB*math.fabs((1-Optimal_WeightA)-(1-(Cost_Min['WeightA'][line2]))))
# Calculate Total Costs to be incurred if rebalancing is to take place
Cost_Min['Costs'][line2] = Cost_Min['CEC'][line2] + Cost_Min['TC'][line2]
#plt.plot(Cost_Min['WeightA'],Cost_Min['CEC'])
#plt.plot(Cost_Min['WeightA'],Cost_Min['TC'])
#plt.title('Rebalancing Selection Band')
#plt.xlabel('Portfolio Weight of Asset A (Developed Market Index)')
#plt.ylabel('Basis Points')
#plt.legend(['CEC','TC'])
#plt.show()
# Caclulate the low and high threshold above and below which total costs of rebalancing are negative (i.e. profit from rebalancing)
for line2 in range(min(enumerate(Cost_Min['Costs']), key=itemgetter(1))[0],len(Cost_Min)-1):
if Cost_Min['Costs'][line2] < 0:
Data['High_Bound'][line1] = Cost_Min['WeightA'][line2]
else:
break
for line2 in range(1,len(Cost_Min)-1):
if Cost_Min['Costs'][line2] > 0:
Data['Low_Bound'][line1] = Cost_Min['WeightA'][line2]
else:
break
# Populate main timeseries DataFrame with Weight, TC and CEC that minimise total costs of rebalancing
Data['Min_Cost_Weight'][line1] = Cost_Min['WeightA'][min(enumerate(Cost_Min['Costs'][1:len(Cost_Min)]), key=itemgetter(1))[0]]
Data['TC'][line1] = Cost_Min['TC'][min(enumerate(Cost_Min['Costs'][1:len(Cost_Min)]), key=itemgetter(1))[0]]
Data['CEC'][line1] = Cost_Min['CEC'][min(enumerate(Cost_Min['Costs'][1:len(Cost_Min)]), key=itemgetter(1))[0]]
# Print a string showing where in time we currently are (how many datapoints have been calculated)
print 'Finished with ' + str(line1) + ' out of ' + str(len(Data)-1) + ' Datapoints'
Data['New_WeightA'] = Data['WeightA']
# Account for Future Costs in Rebalancing Decision
Data['Total_Costs'][len(Data)-1] = Data['TC'][len(Data)-1] + Data['CEC'][len(Data)-1]
for line1 in range(len(Data)-2,1,-1):
Data['Total_Costs'][line1] = Data['TC'][line1] + Data['CEC'][line1] + Data['Total_Costs'][line1-1]
# If the Current Weight is Below the Lower Threshold / Above the Higher Threshold, Rebalance taking into account the Costs of the Next Period
for line1 in range(1,len(Data)):
if Data['New_WeightA'][line1] > Data['Low_Bound'][line1] and Data['New_WeightA'][line1] < Data['High_Bound'][line1]:
Data['Rebalance'][line1] = 0
else:
Data['Rebalance'][line1] = 1
Data['New_WeightA'][line1+1] = Data['Min_Cost_Weight'][line1]
Data['InvestmentA'][line1+1] = Data['New_WeightA'][line1+1]*(Data['InvestmentA'][line1]+Data['InvestmentB'][line1])
Data['InvestmentB'][line1+1] = (1-Data['New_WeightA'][line1+1])*(Data['InvestmentA'][line1]+Data['InvestmentB'][line1])
for line in range(line1+2,len(Data)):
Data['InvestmentA'][line] = Data['InvestmentA'][line-1]*(Data['Returns_A'][line]+1)
Data['InvestmentB'][line] = Data['InvestmentB'][line-1]*(Data['Returns_B'][line]+1)
Data['New_WeightA'][line] = Data['InvestmentA'][line] / (Data['InvestmentA'][line]+Data['InvestmentB'][line])
# Plot Weight Minimising Costs vs. Weight without Rebalancing
#Data['WeightA'].plot()
#Data['Min_Cost_Weight'].plot()
#plt.title('No Rebalancing Weight vs Minimum Cost Weight')
#plt.xlabel('Days')
#plt.ylabel('Weight')
#plt.legend(['No-Rebalancing Weight','Minimum Cost Weight'], loc='upper left')
#plt.show()
# Plot Weight with Dynamic Programming Rebalancing
#Data['New_WeightA'].plot()
#plt.title('PD Rebalancing')
#plt.xlabel('Days')
#plt.ylabel('Weight of Asset A (Developed Market Index)')
#plt.show()
# Calculate Total Costs of Rebalancing using Dynamic Programming
TC_DM = 0.0
for i in range(1,len(Data)):
if Data['Rebalance'][i] == 1:
TC_DM = TC_DM + Data['TC'][i]*1000
print ' '
print 'Total Costs of Rebalancing using Dynamic Programming ' + str(TC_DM)
# Save output file to current directory
Data.to_csv('Optimal.csv')
# Print Time it Took to Run Code
print 'Total Run Time: ' + str(time.clock() - t0,)
| |
"""
Multiclass and multilabel classification strategies
===================================================
This module implements multiclass learning algorithms:
- one-vs-the-rest / one-vs-all
- one-vs-one
- error correcting output codes
The estimators provided in this module are meta-estimators: they require a base
estimator to be provided in their constructor. For example, it is possible to
use these estimators to turn a binary classifier or a regressor into a
multiclass classifier. It is also possible to use these estimators with
multiclass estimators in the hope that their accuracy or runtime performance
improves.
All classifiers in scikit-learn implement multiclass classification; you
only need to use this module if you want to experiment with custom multiclass
strategies.
The one-vs-the-rest meta-classifier also implements a `predict_proba` method,
so long as such a method is implemented by the base classifier. This method
returns probabilities of class membership in both the single label and
multilabel case. Note that in the multilabel case, probabilities are the
marginal probability that a given sample falls in the given class. As such, in
the multilabel case the sum of these probabilities over all possible labels
for a given sample *will not* sum to unity, as they do in the single label
case.
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Author: Hamzeh Alsalhi <93hamsal@gmail.com>
#
# License: BSD 3 clause
import array
import numpy as np
import warnings
import scipy.sparse as sp
from .base import BaseEstimator, ClassifierMixin, clone, is_classifier
from .base import MetaEstimatorMixin
from .preprocessing import LabelBinarizer
from .metrics.pairwise import euclidean_distances
from .utils import check_random_state
from .utils.validation import _num_samples
from .utils import deprecated
from .externals.joblib import Parallel
from .externals.joblib import delayed
__all__ = [
"OneVsRestClassifier",
"OneVsOneClassifier",
"OutputCodeClassifier",
]
def _fit_binary(estimator, X, y, classes=None):
"""Fit a single binary estimator."""
unique_y = np.unique(y)
if len(unique_y) == 1:
if classes is not None:
if y[0] == -1:
c = 0
else:
c = y[0]
warnings.warn("Label %s is present in all training examples." %
str(classes[c]))
estimator = _ConstantPredictor().fit(X, unique_y)
else:
estimator = clone(estimator)
estimator.fit(X, y)
return estimator
def _predict_binary(estimator, X):
"""Make predictions using a single binary estimator."""
try:
score = np.ravel(estimator.decision_function(X))
except (AttributeError, NotImplementedError):
# probabilities of the positive class
score = estimator.predict_proba(X)[:, 1]
return score
def _check_estimator(estimator):
"""Make sure that an estimator implements the necessary methods."""
if (not hasattr(estimator, "decision_function") and
not hasattr(estimator, "predict_proba")):
raise ValueError("The base estimator should implement "
"decision_function or predict_proba!")
@deprecated("fit_ovr is deprecated and will be removed in 0.18."
"Use the OneVsRestClassifier instead.")
def fit_ovr(estimator, X, y, n_jobs=1):
"""Fit a one-vs-the-rest strategy.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : (sparse) array-like, shape = [n_samples] or [n_samples, n_classes]
Multi-class targets. An indicator matrix turns on multilabel
classification.
Returns
-------
estimators : list of estimators object
The list of fitted estimator.
lb : fitted LabelBinarizer
"""
ovr = OneVsRestClassifier(estimator, n_jobs=n_jobs).fit(X, y)
return ovr.estimators_, ovr.label_binarizer_
@deprecated("predict_ovr is deprecated and will be removed in 0.18."
"Use the OneVsRestClassifier instead.")
def predict_ovr(estimators, label_binarizer, X):
"""Predict multi-class targets using the one vs rest strategy.
Parameters
----------
estimators : list of `n_classes` estimators, Estimators used for
predictions. The list must be homogeneous with respect to the type of
estimators. fit_ovr supplies this list as part of its output.
label_binarizer : LabelBinarizer object, Object used to transform
multiclass labels to binary labels and vice-versa. fit_ovr supplies
this object as part of its output.
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : (sparse) array-like, shape = [n_samples] or [n_samples, n_classes].
Predicted multi-class targets.
"""
e_types = set([type(e) for e in estimators if not
isinstance(e, _ConstantPredictor)])
if len(e_types) > 1:
raise ValueError("List of estimators must contain estimators of the"
" same type but contains types {0}".format(e_types))
ovr = OneVsRestClassifier(clone(estimators[0]))
ovr.estimators_ = estimators
ovr.label_binarizer_ = label_binarizer
return ovr.predict(X)
@deprecated("predict_proba_ovr is deprecated and will be removed in 0.18."
"Use the OneVsRestClassifier instead.")
def predict_proba_ovr(estimators, X, is_multilabel):
e_types = set([type(e) for e in estimators if not
isinstance(e, _ConstantPredictor)])
if len(e_types) > 1:
raise ValueError("List of estimators must contain estimators of the"
" same type but contains types {0}".format(e_types))
Y = np.array([e.predict_proba(X)[:, 1] for e in estimators]).T
if not is_multilabel:
# Then, probabilities should be normalized to 1.
Y /= np.sum(Y, axis=1)[:, np.newaxis]
return Y
class _ConstantPredictor(BaseEstimator):
def fit(self, X, y):
self.y_ = y
return self
def predict(self, X):
return np.repeat(self.y_, X.shape[0])
def decision_function(self, X):
return np.repeat(self.y_, X.shape[0])
def predict_proba(self, X):
return np.repeat([np.hstack([1 - self.y_, self.y_])],
X.shape[0], axis=0)
class OneVsRestClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""One-vs-the-rest (OvR) multiclass/multilabel strategy
Also known as one-vs-all, this strategy consists in fitting one classifier
per class. For each classifier, the class is fitted against all the other
classes. In addition to its computational efficiency (only `n_classes`
classifiers are needed), one advantage of this approach is its
interpretability. Since each class is represented by one and one classifier
only, it is possible to gain knowledge about the class by inspecting its
corresponding classifier. This is the most commonly used strategy for
multiclass classification and is a fair default choice.
This strategy can also be used for multilabel learning, where a classifier
is used to predict multiple labels for instance, by fitting on a 2-d matrix
in which cell [i, j] is 1 if sample i has label j and 0 otherwise.
In the multilabel learning literature, OvR is also known as the binary
relevance method.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
`estimators_` : list of `n_classes` estimators
Estimators used for predictions.
`classes_` : array, shape = [`n_classes`]
Class labels.
`label_binarizer_` : LabelBinarizer object
Object used to transform multiclass labels to binary labels and
vice-versa.
`multilabel_` : boolean
Whether a OneVsRestClassifier is a multilabel classifier.
"""
def __init__(self, estimator, n_jobs=1):
self.estimator = estimator
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : (sparse) array-like, shape = [n_samples] or [n_samples, n_classes]
Multi-class targets. An indicator matrix turns on multilabel
classification.
Returns
-------
self
"""
_check_estimator(self.estimator)
# A sparse LabelBinarizer, with sparse_output=True, has been shown to
# outpreform or match a dense label binarizer in all cases and has also
# resulted in less or equal memory consumption in the fit_ovr function
# overall.
self.label_binarizer_ = LabelBinarizer(sparse_output=True)
Y = self.label_binarizer_.fit_transform(y)
Y = Y.tocsc()
columns = (col.toarray().ravel() for col in Y.T)
# In cases where individual estimators are very fast to train setting
# n_jobs > 1 in can results in slower performance due to the overhead
# of spawning threads. See joblib issue #112.
self.estimators_ = Parallel(n_jobs=self.n_jobs)(delayed(_fit_binary)
(self.estimator, X, column,
classes=["not %s" % self.label_binarizer_.classes_[i],
self.label_binarizer_.classes_[i]])
for i, column in enumerate(columns))
return self
def _check_is_fitted(self):
if not hasattr(self, "estimators_"):
raise ValueError("The object hasn't been fitted yet!")
def predict(self, X):
"""Predict multi-class targets using underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : (sparse) array-like, shape = [n_samples] or [n_samples, n_classes].
Predicted multi-class targets.
"""
self._check_is_fitted()
if (hasattr(self.estimators_[0], "decision_function") and
is_classifier(self.estimators_[0])):
thresh = 0
else:
thresh = .5
if self.label_binarizer_.y_type_ == "multiclass":
maxima = np.empty(X.shape[0], dtype=float)
maxima.fill(-np.inf)
argmaxima = np.zeros(X.shape[0], dtype=int)
for i, e in enumerate(self.estimators_):
pred = _predict_binary(e, X)
np.maximum(maxima, pred, out=maxima)
argmaxima[maxima == pred] = i
return self.label_binarizer_.classes_[np.array(argmaxima.T)]
else:
n_samples = _num_samples(X)
indices = array.array('i')
indptr = array.array('i', [0])
for e in self.estimators_:
indices.extend(np.where(_predict_binary(e, X) > thresh)[0])
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
indicator = sp.csc_matrix((data, indices, indptr),
shape=(n_samples, len(self.estimators_)))
return self.label_binarizer_.inverse_transform(indicator)
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by label of classes.
Note that in the multilabel case, each sample can have any number of
labels. This returns the marginal probability that the given sample has
the label in question. For example, it is entirely consistent that two
labels both have a 90% probability of applying to a given sample.
In the single label multiclass case, the rows of the returned matrix
sum to 1.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : (sparse) array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
"""
# Y[i,j] gives the probability that sample i has the label j.
# In the multi-label case, these are not disjoint.
Y = np.array([e.predict_proba(X)[:, 1] for e in self.estimators_]).T
if not self.multilabel_:
# Then, probabilities should be normalized to 1.
Y /= np.sum(Y, axis=1)[:, np.newaxis]
return Y
def decision_function(self, X):
"""Returns the distance of each sample from the decision boundary for
each class. This can only be used with estimators which implement the
decision_function method.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
"""
if not hasattr(self.estimators_[0], "decision_function"):
raise AttributeError(
"Base estimator doesn't have a decision_function attribute.")
return np.array([est.decision_function(X).ravel()
for est in self.estimators_]).T
@property
def multilabel_(self):
"""Whether this is a multilabel classifier"""
return self.label_binarizer_.y_type_.startswith('multilabel')
@property
def classes_(self):
return self.label_binarizer_.classes_
@property
def coef_(self):
self._check_is_fitted()
if not hasattr(self.estimators_[0], "coef_"):
raise AttributeError(
"Base estimator doesn't have a coef_ attribute.")
return np.array([e.coef_.ravel() for e in self.estimators_])
@property
def intercept_(self):
self._check_is_fitted()
if not hasattr(self.estimators_[0], "intercept_"):
raise AttributeError(
"Base estimator doesn't have an intercept_ attribute.")
return np.array([e.intercept_.ravel() for e in self.estimators_])
def _fit_ovo_binary(estimator, X, y, i, j):
"""Fit a single binary estimator (one-vs-one)."""
cond = np.logical_or(y == i, y == j)
y = y[cond]
y_binary = np.empty(y.shape, np.int)
y_binary[y == i] = 0
y_binary[y == j] = 1
ind = np.arange(X.shape[0])
return _fit_binary(estimator, X[ind[cond]], y_binary, classes=[i, j])
@deprecated("fit_ovo is deprecated and will be removed in 0.18."
"Use the OneVsRestClassifier instead.")
def fit_ovo(estimator, X, y, n_jobs=1):
ovo = OneVsOneClassifier(estimator, n_jobs=n_jobs).fit(X, y)
return ovo.estimators_, ovo.classes_
@deprecated("predict_ovo is deprecated and will be removed in 0.18."
"Use the OneVsRestClassifier instead.")
def predict_ovo(estimators, classes, X):
"""Make predictions using the one-vs-one strategy."""
e_types = set([type(e) for e in estimators if not
isinstance(e, _ConstantPredictor)])
if len(e_types) > 1:
raise ValueError("List of estimators must contain estimators of the"
" same type but contains types {0}".format(e_types))
ovo = OneVsOneClassifier(clone(estimators[0]))
ovo.estimators_ = estimators
ovo.classes_ = classes
return ovo.predict(X)
class OneVsOneClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""One-vs-one multiclass strategy
This strategy consists in fitting one classifier per class pair.
At prediction time, the class which received the most votes is selected.
Since it requires to fit `n_classes * (n_classes - 1) / 2` classifiers,
this method is usually slower than one-vs-the-rest, due to its
O(n_classes^2) complexity. However, this method may be advantageous for
algorithms such as kernel algorithms which don't scale well with
`n_samples`. This is because each individual learning problem only involves
a small subset of the data whereas, with one-vs-the-rest, the complete
dataset is used `n_classes` times.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
`estimators_` : list of `n_classes * (n_classes - 1) / 2` estimators
Estimators used for predictions.
`classes_` : numpy array of shape [n_classes]
Array containing labels.
"""
def __init__(self, estimator, n_jobs=1):
self.estimator = estimator
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : numpy array of shape [n_samples]
Multi-class targets.
Returns
-------
self
"""
self.classes_ = np.unique(y)
n_classes = self.classes_.shape[0]
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_ovo_binary)(
self.estimator, X, y, self.classes_[i], self.classes_[j])
for i in range(n_classes) for j in range(i + 1, n_classes))
return self
def predict(self, X):
"""Predict multi-class targets using underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : numpy array of shape [n_samples]
Predicted multi-class targets.
"""
if not hasattr(self, "estimators_"):
raise ValueError("The object hasn't been fitted yet!")
n_samples = X.shape[0]
n_classes = self.classes_.shape[0]
votes = np.zeros((n_samples, n_classes))
scores = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = self.estimators_[k].predict(X)
score = _predict_binary(self.estimators_[k], X)
scores[:, i] -= score
scores[:, j] += score
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# find all places with maximum votes per sample
maxima = votes == np.max(votes, axis=1)[:, np.newaxis]
# if there are ties, use scores to break them
if np.any(maxima.sum(axis=1) > 1):
scores[~maxima] = -np.inf
prediction = scores.argmax(axis=1)
else:
prediction = votes.argmax(axis=1)
return self.classes_[prediction]
@deprecated("fit_ecoc is deprecated and will be removed in 0.18."
"Use the OutputCodeClassifier instead.")
def fit_ecoc(estimator, X, y, code_size=1.5, random_state=None, n_jobs=1):
"""Fit an error-correcting output-code strategy.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
code_size : float, optional
Percentage of the number of classes to be used to create the code book.
random_state : numpy.RandomState, optional
The generator used to initialize the codebook. Defaults to
numpy.random.
Returns
--------
estimators : list of `int(n_classes * code_size)` estimators
Estimators used for predictions.
classes : numpy array of shape [n_classes]
Array containing labels.
`code_book_`: numpy array of shape [n_classes, code_size]
Binary array containing the code of each class.
"""
ecoc = OutputCodeClassifier(estimator, random_state=random_state,
n_jobs=n_jobs).fit(X, y)
return ecoc.estimators_, ecoc.classes_, ecoc.code_book_
@deprecated("predict_ecoc is deprecated and will be removed in 0.18."
"Use the OutputCodeClassifier instead.")
def predict_ecoc(estimators, classes, code_book, X):
"""Make predictions using the error-correcting output-code strategy."""
ecoc = OutputCodeClassifier(clone(estimators[0]))
ecoc.classes_ = classes
ecoc.estimators_ = estimators
ecoc.code_book_ = code_book
return ecoc.predict(X)
class OutputCodeClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""(Error-Correcting) Output-Code multiclass strategy
Output-code based strategies consist in representing each class with a
binary code (an array of 0s and 1s). At fitting time, one binary
classifier per bit in the code book is fitted. At prediction time, the
classifiers are used to project new points in the class space and the class
closest to the points is chosen. The main advantage of these strategies is
that the number of classifiers used can be controlled by the user, either
for compressing the model (0 < code_size < 1) or for making the model more
robust to errors (code_size > 1). See the documentation for more details.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
code_size : float
Percentage of the number of classes to be used to create the code book.
A number between 0 and 1 will require fewer classifiers than
one-vs-the-rest. A number greater than 1 will require more classifiers
than one-vs-the-rest.
random_state : numpy.RandomState, optional
The generator used to initialize the codebook. Defaults to
numpy.random.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
`estimators_` : list of `int(n_classes * code_size)` estimators
Estimators used for predictions.
`classes_` : numpy array of shape [n_classes]
Array containing labels.
`code_book_` : numpy array of shape [n_classes, code_size]
Binary array containing the code of each class.
References
----------
.. [1] "Solving multiclass learning problems via error-correcting output
codes",
Dietterich T., Bakiri G.,
Journal of Artificial Intelligence Research 2,
1995.
.. [2] "The error coding method and PICTs",
James G., Hastie T.,
Journal of Computational and Graphical statistics 7,
1998.
.. [3] "The Elements of Statistical Learning",
Hastie T., Tibshirani R., Friedman J., page 606 (second-edition)
2008.
"""
def __init__(self, estimator, code_size=1.5, random_state=None, n_jobs=1):
self.estimator = estimator
self.code_size = code_size
self.random_state = random_state
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : numpy array of shape [n_samples]
Multi-class targets.
Returns
-------
self
"""
if self.code_size <= 0:
raise ValueError("code_size should be greater than 0, got {1}"
"".format(self.code_size))
_check_estimator(self.estimator)
random_state = check_random_state(self.random_state)
self.classes_ = np.unique(y)
n_classes = self.classes_.shape[0]
code_size_ = int(n_classes * self.code_size)
# FIXME: there are more elaborate methods than generating the codebook
# randomly.
self.code_book_ = random_state.random_sample((n_classes, code_size_))
self.code_book_[self.code_book_ > 0.5] = 1
if hasattr(self.estimator, "decision_function"):
self.code_book_[self.code_book_ != 1] = -1
else:
self.code_book_[self.code_book_ != 1] = 0
classes_index = dict((c, i) for i, c in enumerate(self.classes_))
Y = np.array([self.code_book_[classes_index[y[i]]]
for i in range(X.shape[0])], dtype=np.int)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_binary)(self.estimator, X, Y[:, i])
for i in range(Y.shape[1]))
return self
def predict(self, X):
"""Predict multi-class targets using underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : numpy array of shape [n_samples]
Predicted multi-class targets.
"""
if not hasattr(self, "estimators_"):
raise ValueError("The object hasn't been fitted yet!")
Y = np.array([_predict_binary(e, X) for e in self.estimators_]).T
pred = euclidean_distances(Y, self.code_book_).argmin(axis=1)
return self.classes_[pred]
| |
import asyncio
import aiohttp
import collections
import logging
import pytest
import re
import sys
import warnings
from aiohttp import web
from aiohttp.test_utils import (
loop_context, unused_port
)
class _AssertWarnsContext:
"""A context manager used to implement TestCase.assertWarns* methods."""
def __init__(self, expected, expected_regex=None):
self.expected = expected
if expected_regex is not None:
expected_regex = re.compile(expected_regex)
self.expected_regex = expected_regex
self.obj_name = None
def __enter__(self):
# The __warningregistry__'s need to be in a pristine state for tests
# to work properly.
for v in sys.modules.values():
if getattr(v, '__warningregistry__', None):
v.__warningregistry__ = {}
self.warnings_manager = warnings.catch_warnings(record=True)
self.warnings = self.warnings_manager.__enter__()
warnings.simplefilter("always", self.expected)
return self
def __exit__(self, exc_type, exc_value, tb):
self.warnings_manager.__exit__(exc_type, exc_value, tb)
if exc_type is not None:
# let unexpected exceptions pass through
return
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
first_matching = None
for m in self.warnings:
w = m.message
if not isinstance(w, self.expected):
continue
if first_matching is None:
first_matching = w
if (self.expected_regex is not None and
not self.expected_regex.search(str(w))):
continue
# store warning for later retrieval
self.warning = w
self.filename = m.filename
self.lineno = m.lineno
return
# Now we simply try to choose a helpful failure message
if first_matching is not None:
__tracebackhide__ = True
assert 0, '"{}" does not match "{}"'.format(
self.expected_regex.pattern, str(first_matching))
if self.obj_name:
__tracebackhide__ = True
assert 0, "{} not triggered by {}".format(exc_name,
self.obj_name)
else:
__tracebackhide__ = True
assert 0, "{} not triggered".format(exc_name)
_LoggingWatcher = collections.namedtuple("_LoggingWatcher",
["records", "output"])
class _CapturingHandler(logging.Handler):
"""
A logging handler capturing all (raw and formatted) logging output.
"""
def __init__(self):
logging.Handler.__init__(self)
self.watcher = _LoggingWatcher([], [])
def flush(self):
pass
def emit(self, record):
self.watcher.records.append(record)
msg = self.format(record)
self.watcher.output.append(msg)
class _AssertLogsContext:
"""A context manager used to implement TestCase.assertLogs()."""
LOGGING_FORMAT = "%(levelname)s:%(name)s:%(message)s"
def __init__(self, logger_name=None, level=None):
self.logger_name = logger_name
if level:
self.level = logging._nameToLevel.get(level, level)
else:
self.level = logging.INFO
self.msg = None
def __enter__(self):
if isinstance(self.logger_name, logging.Logger):
logger = self.logger = self.logger_name
else:
logger = self.logger = logging.getLogger(self.logger_name)
formatter = logging.Formatter(self.LOGGING_FORMAT)
handler = _CapturingHandler()
handler.setFormatter(formatter)
self.watcher = handler.watcher
self.old_handlers = logger.handlers[:]
self.old_level = logger.level
self.old_propagate = logger.propagate
logger.handlers = [handler]
logger.setLevel(self.level)
logger.propagate = False
return handler.watcher
def __exit__(self, exc_type, exc_value, tb):
self.logger.handlers = self.old_handlers
self.logger.propagate = self.old_propagate
self.logger.setLevel(self.old_level)
if exc_type is not None:
# let unexpected exceptions pass through
return False
if len(self.watcher.records) == 0:
__tracebackhide__ = True
assert 0, ("no logs of level {} or higher triggered on {}"
.format(logging.getLevelName(self.level),
self.logger.name))
@pytest.yield_fixture
def warning():
yield _AssertWarnsContext
@pytest.yield_fixture
def log():
yield _AssertLogsContext
# add the unused_port and loop fixtures
pytest.fixture(unused_port)
@pytest.yield_fixture
def loop():
with loop_context() as loop:
yield loop
@pytest.yield_fixture
def create_server(loop):
app = handler = srv = None
@asyncio.coroutine
def create(*, debug=False, ssl_ctx=None, proto='http'):
nonlocal app, handler, srv
app = web.Application(loop=loop)
port = unused_port()
handler = app.make_handler(debug=debug, keep_alive_on=False)
srv = yield from loop.create_server(handler, '127.0.0.1', port,
ssl=ssl_ctx)
if ssl_ctx:
proto += 's'
url = "{}://127.0.0.1:{}".format(proto, port)
return app, url
yield create
@asyncio.coroutine
def finish():
yield from handler.finish_connections()
yield from app.finish()
srv.close()
yield from srv.wait_closed()
loop.run_until_complete(finish())
class Client:
def __init__(self, session, url):
self._session = session
if not url.endswith('/'):
url += '/'
self._url = url
def close(self):
self._session.close()
def get(self, path, **kwargs):
while path.startswith('/'):
path = path[1:]
url = self._url + path
return self._session.get(url, **kwargs)
def post(self, path, **kwargs):
while path.startswith('/'):
path = path[1:]
url = self._url + path
return self._session.post(url, **kwargs)
def delete(self, path, **kwargs):
while path.startswith('/'):
path = path[1:]
url = self._url + path
return self._session.delete(url)
def ws_connect(self, path, **kwargs):
while path.startswith('/'):
path = path[1:]
url = self._url + path
return self._session.ws_connect(url, **kwargs)
@pytest.yield_fixture
def create_app_and_client(create_server, loop):
client = None
@asyncio.coroutine
def maker(*, server_params=None, client_params=None):
nonlocal client
if server_params is None:
server_params = {}
server_params.setdefault('debug', False)
server_params.setdefault('ssl_ctx', None)
app, url = yield from create_server(**server_params)
if client_params is None:
client_params = {}
client = Client(aiohttp.ClientSession(loop=loop, **client_params), url)
return app, client
yield maker
client.close()
@pytest.mark.tryfirst
def pytest_pycollect_makeitem(collector, name, obj):
if collector.funcnamefilter(name):
if not callable(obj):
return
item = pytest.Function(name, parent=collector)
if 'run_loop' in item.keywords:
return list(collector._genfunctions(name, obj))
@pytest.mark.tryfirst
def pytest_pyfunc_call(pyfuncitem):
"""
Run asyncio marked test functions in an event loop instead of a normal
function call.
"""
if 'run_loop' in pyfuncitem.keywords:
funcargs = pyfuncitem.funcargs
loop = funcargs['loop']
testargs = {arg: funcargs[arg]
for arg in pyfuncitem._fixtureinfo.argnames}
loop.run_until_complete(pyfuncitem.obj(**testargs))
return True
def pytest_runtest_setup(item):
if 'run_loop' in item.keywords and 'loop' not in item.fixturenames:
# inject an event loop fixture for all async tests
item.fixturenames.append('loop')
def pytest_ignore_collect(path, config):
if 'test_py35' in str(path):
if sys.version_info < (3, 5, 0):
return True
| |
# -*- coding: utf-8 -*-
import json
import os
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase, Client, mock
from django.shortcuts import reverse
from ...models import Category, Language, Author, TheUser, Book, AddedBook
from ...views.add_book_views import add_book, generate_authors, generate_books, add_book_successful
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_DATA_DIR = os.path.join(TEST_DIR, '../fixtures')
# ----------------------------------------------------------------------------------------------------------------------
class AddBookViewsTest(TestCase):
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setUpTestData(cls):
cls.xhr = 'XMLHttpRequest'
cls.user = User.objects.create(username='test_user1')
cls.the_user = TheUser.objects.get(id_user=cls.user)
cls.user.set_password('Dummy#password')
cls.user.save()
cls.category1 = Category.objects.create(category_name='last category')
cls.category2 = Category.objects.create(category_name='a first category')
cls.language_en = Language.objects.create(language='English')
cls.language_ru = Language.objects.create(language='Russian')
cls.author1 = Author.objects.create(author_name='New Author Name')
cls.author2 = Author.objects.create(author_name='A best one')
cls.author3 = Author.objects.create(author_name='The new author')
cls.author4 = Author.objects.create(author_name='<AuthorSpecialSymbols>&"')
cls.anonymous_client = Client()
cls.logged_client = Client()
cls.logged_client.login(username='test_user1', password='Dummy#password')
cls.generate_books()
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def generate_books(cls):
test_book_path = os.path.join(TEST_DATA_DIR, 'test_book.pdf')
for book_name in ['First Book', 'Second Book', 'Third Book']:
Book.objects.create(
book_name=book_name,
id_author=cls.author1,
id_category=cls.category1,
language=cls.language_en,
book_file=SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read()),
who_added=cls.the_user
)
# ------------------------------------------------------------------------------------------------------------------
def tearDown(self):
for book in Book.objects.all():
if os.path.exists(book.book_file.path):
os.remove(book.book_file.path)
if book.photo and os.path.exists(book.photo.path):
os.remove(book.photo.path)
# ------------------------------------------------------------------------------------------------------------------
def test_add_book_not_logged_user(self):
response = self.anonymous_client.get(reverse('add_book'))
self.assertEqual(response.resolver_match.func, add_book)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, reverse('index'), status_code=302, target_status_code=200)
# ------------------------------------------------------------------------------------------------------------------
def test_add_book_logged_user(self):
response = self.logged_client.get(reverse('add_book'))
all_categories = Category.objects.all()
all_languages = Language.objects.all()
self.assertEqual(response.resolver_match.func, add_book)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'add_book.html')
self.assertEqual(response.context['categories'].count(), all_categories.count())
self.assertEqual(response.context['categories'][0], self.category2)
self.assertEqual(response.context['languages'].count(), all_languages.count())
# ------------------------------------------------------------------------------------------------------------------
def test_generate_authors_not_ajax(self):
response = self.logged_client.get(reverse('generate_authors'))
self.assertEqual(response.resolver_match.func, generate_authors)
self.assertEqual(response.status_code, 404)
# ------------------------------------------------------------------------------------------------------------------
def test_generate_authors_missing_params(self):
response = self.logged_client.get(reverse('generate_authors'), {}, HTTP_X_REQUESTED_WITH=self.xhr)
self.assertEqual(response.resolver_match.func, generate_authors)
self.assertEqual(response.status_code, 404)
# ------------------------------------------------------------------------------------------------------------------
def test_generate_authors_invalid_params(self):
response = self.logged_client.get(reverse('generate_authors'), {'part': ''}, HTTP_X_REQUESTED_WITH=self.xhr)
self.assertEqual(response.resolver_match.func, generate_authors)
self.assertEqual(response.status_code, 404)
response = self.logged_client.get(reverse('generate_authors'), {'part': 'x' * 75},
HTTP_X_REQUESTED_WITH=self.xhr)
self.assertEqual(response.resolver_match.func, generate_authors)
self.assertEqual(response.status_code, 404)
# ------------------------------------------------------------------------------------------------------------------
def test_generate_authors_success_parts(self):
response = self.logged_client.get(reverse('generate_authors'), {'part': 'the'}, HTTP_X_REQUESTED_WITH=self.xhr)
response_content = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.resolver_match.func, generate_authors)
self.assertEqual(response.status_code, 200)
self.assertTrue(isinstance(response_content, list))
self.assertEqual(len(response_content), 1)
self.assertEqual(response_content, ['The new author'])
response = self.logged_client.get(reverse('generate_authors'), {'part': 'new'}, HTTP_X_REQUESTED_WITH=self.xhr)
response_content = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.resolver_match.func, generate_authors)
self.assertEqual(response.status_code, 200)
self.assertTrue(isinstance(response_content, list))
self.assertEqual(len(response_content), 2)
self.assertEqual(response_content, ['New Author Name', 'The new author'])
response = self.logged_client.get(reverse('generate_authors'), {'part': 'e'}, HTTP_X_REQUESTED_WITH=self.xhr)
response_content = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.resolver_match.func, generate_authors)
self.assertEqual(response.status_code, 200)
self.assertTrue(isinstance(response_content, list))
self.assertEqual(len(response_content), 4)
self.assertEqual(
response_content,
['New Author Name', 'A best one', 'The new author', '<AuthorSpecialSymbols>&"']
)
# ------------------------------------------------------------------------------------------------------------------
def test_generate_authors_success_full_name(self):
response = self.logged_client.get(reverse('generate_authors'), {'part': 'New Author Name'},
HTTP_X_REQUESTED_WITH=self.xhr)
response_content = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.resolver_match.func, generate_authors)
self.assertEqual(response.status_code, 200)
self.assertTrue(isinstance(response_content, list))
self.assertEqual(len(response_content), 1)
self.assertEqual(response_content, ['New Author Name'])
response = self.logged_client.get(reverse('generate_authors'), {'part': 'A best one'},
HTTP_X_REQUESTED_WITH=self.xhr)
response_content = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.resolver_match.func, generate_authors)
self.assertEqual(response.status_code, 200)
self.assertTrue(isinstance(response_content, list))
self.assertEqual(len(response_content), 1)
self.assertEqual(response_content, ['A best one'])
# ------------------------------------------------------------------------------------------------------------------
def test_generate_authors_success_different_case(self):
response = self.logged_client.get(reverse('generate_authors'), {'part': 'AUTHOR'},
HTTP_X_REQUESTED_WITH=self.xhr)
response_content = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.resolver_match.func, generate_authors)
self.assertEqual(response.status_code, 200)
self.assertTrue(isinstance(response_content, list))
self.assertEqual(len(response_content), 3)
self.assertEqual(
response_content, ['New Author Name', 'The new author', '<AuthorSpecialSymbols>&"']
)
response = self.logged_client.get(reverse('generate_authors'), {'part': 'author'},
HTTP_X_REQUESTED_WITH=self.xhr)
response_content = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.resolver_match.func, generate_authors)
self.assertEqual(response.status_code, 200)
self.assertTrue(isinstance(response_content, list))
self.assertEqual(len(response_content), 3)
self.assertEqual(
response_content, ['New Author Name', 'The new author', '<AuthorSpecialSymbols>&"']
)
response = self.logged_client.get(reverse('generate_authors'), {'part': 'aUthOR'},
HTTP_X_REQUESTED_WITH=self.xhr)
response_content = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.resolver_match.func, generate_authors)
self.assertEqual(response.status_code, 200)
self.assertTrue(isinstance(response_content, list))
self.assertEqual(len(response_content), 3)
self.assertEqual(
response_content, ['New Author Name', 'The new author', '<AuthorSpecialSymbols>&"']
)
# ------------------------------------------------------------------------------------------------------------------
def test_generate_books_not_ajax(self):
response = self.logged_client.get(reverse('generate_books'))
self.assertEqual(response.resolver_match.func, generate_books)
self.assertEqual(response.status_code, 404)
# ------------------------------------------------------------------------------------------------------------------
def test_generate_books_missing_params(self):
response = self.logged_client.get(reverse('generate_books'), {}, HTTP_X_REQUESTED_WITH=self.xhr)
self.assertEqual(response.resolver_match.func, generate_books)
self.assertEqual(response.status_code, 404)
# ------------------------------------------------------------------------------------------------------------------
def test_generate_books_invalid_params(self):
response = self.logged_client.get(reverse('generate_books'), {'part': ''}, HTTP_X_REQUESTED_WITH=self.xhr)
self.assertEqual(response.resolver_match.func, generate_books)
self.assertEqual(response.status_code, 404)
response = self.logged_client.get(reverse('generate_books'), {'part': 'y' * 170},
HTTP_X_REQUESTED_WITH=self.xhr)
self.assertEqual(response.resolver_match.func, generate_books)
self.assertEqual(response.status_code, 404)
# ------------------------------------------------------------------------------------------------------------------
def test_generate_books_success_parts(self):
response = self.logged_client.get(reverse('generate_books'), {'part': 'book'},
HTTP_X_REQUESTED_WITH=self.xhr)
response_content = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.resolver_match.func, generate_books)
self.assertEqual(response.status_code, 200)
self.assertTrue(isinstance(response_content, list))
self.assertEqual(len(response_content), 3)
self.assertTrue(response_content[0].get('name', False))
self.assertTrue(response_content[0].get('url', False))
self.assertEqual(response_content[0]['name'], 'First Book')
self.assertEqual(response_content[0]['url'], '/book/{}/'.format(Book.objects.get(book_name='First Book').id))
response = self.logged_client.get(reverse('generate_books'), {'part': 'i'},
HTTP_X_REQUESTED_WITH=self.xhr)
response_content = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.resolver_match.func, generate_books)
self.assertEqual(response.status_code, 200)
self.assertTrue(isinstance(response_content, list))
self.assertEqual(len(response_content), 2)
self.assertTrue(response_content[1].get('name', False))
self.assertTrue(response_content[1].get('url', False))
self.assertEqual(response_content[1]['name'], 'Third Book')
self.assertEqual(response_content[1]['url'], '/book/{}/'.format(Book.objects.get(book_name='Third Book').id))
# ------------------------------------------------------------------------------------------------------------------
def test_generate_books_success_full_name(self):
response = self.logged_client.get(reverse('generate_books'), {'part': 'Second Book'},
HTTP_X_REQUESTED_WITH=self.xhr)
response_content = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.resolver_match.func, generate_books)
self.assertEqual(response.status_code, 200)
self.assertTrue(isinstance(response_content, list))
self.assertEqual(len(response_content), 1)
self.assertTrue(response_content[0].get('name', False))
self.assertTrue(response_content[0].get('url', False))
self.assertEqual(response_content[0]['name'], 'Second Book')
self.assertEqual(response_content[0]['url'], '/book/{}/'.format(Book.objects.get(book_name='Second Book').id))
# ------------------------------------------------------------------------------------------------------------------
def test_generate_books_success_different_case(self):
response = self.logged_client.get(reverse('generate_books'), {'part': 'second book'},
HTTP_X_REQUESTED_WITH=self.xhr)
response_content = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.resolver_match.func, generate_books)
self.assertEqual(response.status_code, 200)
self.assertTrue(isinstance(response_content, list))
self.assertEqual(len(response_content), 1)
self.assertTrue(response_content[0].get('name', False))
self.assertTrue(response_content[0].get('url', False))
self.assertEqual(response_content[0]['name'], 'Second Book')
self.assertEqual(response_content[0]['url'], '/book/{}/'.format(Book.objects.get(book_name='Second Book').id))
response = self.logged_client.get(reverse('generate_books'), {'part': 'SECOND BOOK'},
HTTP_X_REQUESTED_WITH=self.xhr)
response_content = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.resolver_match.func, generate_books)
self.assertEqual(response.status_code, 200)
self.assertTrue(isinstance(response_content, list))
self.assertEqual(len(response_content), 1)
self.assertTrue(response_content[0].get('name', False))
self.assertTrue(response_content[0].get('url', False))
self.assertEqual(response_content[0]['name'], 'Second Book')
self.assertEqual(response_content[0]['url'], '/book/{}/'.format(Book.objects.get(book_name='Second Book').id))
response = self.logged_client.get(reverse('generate_books'), {'part': 'seCoND bOOk'},
HTTP_X_REQUESTED_WITH=self.xhr)
response_content = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.resolver_match.func, generate_books)
self.assertEqual(response.status_code, 200)
self.assertTrue(isinstance(response_content, list))
self.assertEqual(len(response_content), 1)
self.assertTrue(response_content[0].get('name', False))
self.assertTrue(response_content[0].get('url', False))
self.assertEqual(response_content[0]['name'], 'Second Book')
self.assertEqual(response_content[0]['url'], '/book/{}/'.format(Book.objects.get(book_name='Second Book').id))
# ------------------------------------------------------------------------------------------------------------------
def test_add_book_successful_not_post(self):
response = self.logged_client.get(reverse('book_successful'))
self.assertEqual(response.resolver_match.func, add_book_successful)
self.assertEqual(response.status_code, 404)
# ------------------------------------------------------------------------------------------------------------------
def test_add_book_successful_missing_all_params(self):
response = self.logged_client.post(reverse('book_successful'), {})
self.assertEqual(response.resolver_match.func, add_book_successful)
self.assertEqual(response.status_code, 404)
# ------------------------------------------------------------------------------------------------------------------
def test_add_book_successful_missing_some_params(self):
response = self.logged_client.post(
reverse('book_successful'), {'book_name': 'Extra new book', 'author': 'A. Pushkin', 'about': 'blah blah'}
)
self.assertEqual(response.resolver_match.func, add_book_successful)
self.assertEqual(response.status_code, 404)
# ------------------------------------------------------------------------------------------------------------------
def test_add_book_successful_invalid_params(self):
test_book_path = os.path.join(TEST_DATA_DIR, 'test_book.pdf')
response = self.logged_client.post(
reverse('book_successful'),
{
'bookname': 'new_name' * 100,
'author': 'new_author' * 100,
'category': 'a' * 35,
'language': 'l' * 35,
'about': 'Some text about book',
'bookfile': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read())
}
)
self.assertEqual(response.resolver_match.func, add_book_successful)
self.assertEqual(response.status_code, 404)
# ------------------------------------------------------------------------------------------------------------------
def test_add_book_successful_invalid_book_type(self):
test_book_path = os.path.join(TEST_DATA_DIR, 'test_book_image.png')
response = self.logged_client.post(
reverse('book_successful'),
{
'bookname': 'new_name',
'author': 'new_author',
'category': 'a first category',
'language': 'English',
'about': 'Some text about book',
'bookfile': SimpleUploadedFile('test_book_image.png', open(test_book_path, 'rb').read())
}
)
self.assertEqual(response.resolver_match.func, add_book_successful)
self.assertEqual(response.status_code, 404)
# ------------------------------------------------------------------------------------------------------------------
def test_add_book_successful_with_creating_new_author(self):
test_book_path = os.path.join(TEST_DATA_DIR, 'test_book.pdf')
books_count = Book.objects.all().count()
authors_count = Author.objects.all().count()
added_book_count = AddedBook.objects.all().count()
response = self.logged_client.post(
reverse('book_successful'),
{
'bookname': 'book_with_new_author',
'author': 'new_author_for_book',
'category': 'a first category',
'language': 'English',
'about': 'Some text about book',
'bookfile': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read())
}
)
response_content = response.content.decode('utf-8')
created_book = Book.objects.get(book_name='book_with_new_author')
self.assertEqual(response.resolver_match.func, add_book_successful)
self.assertEqual(response.status_code, 200)
self.assertEqual(response_content, reverse('book', kwargs={'book_id': created_book.id}))
self.assertEqual(Book.objects.all().count(), books_count+1)
self.assertEqual(Author.objects.all().count(), authors_count+1)
self.assertEqual(AddedBook.objects.all().count(), added_book_count+1)
self.assertTrue(Author.objects.filter(author_name='new_author_for_book').exists())
self.assertTrue(Book.objects.filter(book_name='book_with_new_author').exists())
self.assertEqual(Book.objects.filter(book_name='book_with_new_author').count(), 1)
# ------------------------------------------------------------------------------------------------------------------
def test_add_book_successful_existing_author(self):
test_book_path = os.path.join(TEST_DATA_DIR, 'test_book.pdf')
books_count = Book.objects.all().count()
authors_count = Author.objects.all().count()
added_book_count = AddedBook.objects.all().count()
response = self.logged_client.post(
reverse('book_successful'),
{
'bookname': 'book_existing_author',
'author': 'A best one',
'category': 'a first category',
'language': 'English',
'about': 'Some text about book',
'bookfile': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read())
}
)
response_content = response.content.decode('utf-8')
created_book = Book.objects.get(book_name='book_existing_author')
self.assertEqual(response.resolver_match.func, add_book_successful)
self.assertEqual(response.status_code, 200)
self.assertEqual(response_content, reverse('book', kwargs={'book_id': created_book.id}))
self.assertEqual(Book.objects.all().count(), books_count+1)
self.assertEqual(Author.objects.all().count(), authors_count)
self.assertEqual(AddedBook.objects.all().count(), added_book_count + 1)
self.assertTrue(Book.objects.filter(book_name='book_existing_author').exists())
self.assertEqual(Book.objects.filter(book_name='book_existing_author').count(), 1)
| |
# Copyright 2014 Objectif Libre
# Copyright 2015 DotHill Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Unit tests for OpenStack Cinder DotHill driver."""
from lxml import etree
import mock
import requests
from cinder import exception
from cinder import test
from cinder.volume.drivers.dothill import dothill_client as dothill
from cinder.volume.drivers.dothill import dothill_common
from cinder.volume.drivers.dothill import dothill_fc
from cinder.volume.drivers.dothill import dothill_iscsi
from cinder.zonemanager import utils as fczm_utils
session_key = '12a1626754554a21d85040760c81b'
resp_login = '''<RESPONSE><OBJECT basetype="status" name="status" oid="1">
<PROPERTY name="response-type">success</PROPERTY>
<PROPERTY name="response-type-numeric">0</PROPERTY>
<PROPERTY name="response">12a1626754554a21d85040760c81b</PROPERTY>
<PROPERTY name="return-code">1</PROPERTY></OBJECT></RESPONSE>'''
resp_badlogin = '''<RESPONSE><OBJECT basetype="status" name="status" oid="1">
<PROPERTY name="response-type">error</PROPERTY>
<PROPERTY name="response-type-numeric">1</PROPERTY>
<PROPERTY name="response">Authentication failure</PROPERTY>
<PROPERTY name="return-code">1</PROPERTY></OBJECT></RESPONSE>'''
response_ok = '''<RESPONSE><OBJECT basetype="status" name="status" oid="1">
<PROPERTY name="response">some data</PROPERTY>
<PROPERTY name="return-code">0</PROPERTY>
</OBJECT></RESPONSE>'''
response_not_ok = '''<RESPONSE><OBJECT basetype="status" name="status" oid="1">
<PROPERTY name="response">Error Message</PROPERTY>
<PROPERTY name="return-code">1</PROPERTY>
</OBJECT></RESPONSE>'''
response_stats_linear = '''<RESPONSE><OBJECT basetype="virtual-disks">
<PROPERTY name="size-numeric">3863830528</PROPERTY>
<PROPERTY name="freespace-numeric">3863830528</PROPERTY>
</OBJECT></RESPONSE>'''
response_stats_virtual = '''<RESPONSE><OBJECT basetype="pools">
<PROPERTY name="total-size-numeric">3863830528</PROPERTY>
<PROPERTY name="total-avail-numeric">3863830528</PROPERTY>
</OBJECT></RESPONSE>'''
response_no_lun = '''<RESPONSE></RESPONSE>'''
response_lun = '''<RESPONSE><OBJECT basetype="host-view-mappings">
<PROPERTY name="lun">1</PROPERTY></OBJECT>
<OBJECT basetype="host-view-mappings">
<PROPERTY name="lun">4</PROPERTY></OBJECT></RESPONSE>'''
response_ports = '''<RESPONSE>
<OBJECT basetype="port">
<PROPERTY name="port-type">FC</PROPERTY>
<PROPERTY name="target-id">id1</PROPERTY>
<PROPERTY name="status">Disconnected</PROPERTY></OBJECT>
<OBJECT basetype="port">
<PROPERTY name="port-type">FC</PROPERTY>
<PROPERTY name="target-id">id2</PROPERTY>
<PROPERTY name="status">Up</PROPERTY></OBJECT>
<OBJECT basetype="port">
<PROPERTY name="port-type">iSCSI</PROPERTY>
<PROPERTY name="target-id">id3</PROPERTY>
<PROPERTY name="%(ip)s" >10.0.0.10</PROPERTY>
<PROPERTY name="status">Disconnected</PROPERTY></OBJECT>
<OBJECT basetype="port">
<PROPERTY name="port-type">iSCSI</PROPERTY>
<PROPERTY name="target-id">id4</PROPERTY>
<PROPERTY name="%(ip)s" >10.0.0.11</PROPERTY>
<PROPERTY name="status">Up</PROPERTY></OBJECT>
<OBJECT basetype="port">
<PROPERTY name="port-type">iSCSI</PROPERTY>
<PROPERTY name="target-id">id5</PROPERTY>
<PROPERTY name="%(ip)s" >10.0.0.12</PROPERTY>
<PROPERTY name="status">Up</PROPERTY></OBJECT>
</RESPONSE>'''
response_ports_linear = response_ports % {'ip': 'primary-ip-address'}
response_ports_virtual = response_ports % {'ip': 'ip-address'}
invalid_xml = '''<RESPONSE></RESPONSE>'''
malformed_xml = '''<RESPONSE>'''
fake_xml = '''<fakexml></fakexml>'''
stats_low_space = {'free_capacity_gb': 10, 'total_capacity_gb': 100}
stats_large_space = {'free_capacity_gb': 90, 'total_capacity_gb': 100}
vol_id = 'fceec30e-98bc-4ce5-85ff-d7309cc17cc2'
test_volume = {'id': vol_id, 'name_id': None,
'display_name': 'test volume', 'name': 'volume', 'size': 10}
test_retype_volume = {'attach_status': 'available', 'id': vol_id,
'name_id': None, 'display_name': 'test volume',
'name': 'volume', 'size': 10}
test_host = {'capabilities': {'location_info':
'DotHillVolumeDriver:xxxxx:dg02:A'}}
test_snap = {'id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'volume': {'name_id': None},
'volume_id': vol_id,
'display_name': 'test volume', 'name': 'volume', 'size': 10}
encoded_volid = 'v_O7DDpi8TOWF_9cwnMF'
encoded_snapid = 's_O7DDpi8TOWF_9cwnMF'
dest_volume = {'id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'source_volid': vol_id,
'display_name': 'test volume', 'name': 'volume', 'size': 10}
attached_volume = {'id': vol_id,
'display_name': 'test volume', 'name': 'volume',
'size': 10, 'status': 'in-use',
'attach_status': 'attached'}
attaching_volume = {'id': vol_id,
'display_name': 'test volume', 'name': 'volume',
'size': 10, 'status': 'attaching',
'attach_status': 'attached'}
detached_volume = {'id': vol_id, 'name_id': None,
'display_name': 'test volume', 'name': 'volume',
'size': 10, 'status': 'available',
'attach_status': 'detached'}
connector = {'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:222',
'wwpns': ["111111111111111", "111111111111112"],
'wwnns': ["211111111111111", "211111111111112"],
'host': 'fakehost'}
invalid_connector = {'ip': '10.0.0.2',
'initiator': '',
'wwpns': [],
'wwnns': [],
'host': 'fakehost'}
class TestDotHillClient(test.TestCase):
def setUp(self):
super(TestDotHillClient, self).setUp()
self.login = 'manage'
self.passwd = '!manage'
self.ip = '10.0.0.1'
self.protocol = 'http'
self.ssl_verify = False
self.client = dothill.DotHillClient(self.ip, self.login, self.passwd,
self.protocol, self.ssl_verify)
@mock.patch('requests.get')
def test_login(self, mock_requests_get):
m = mock.Mock()
m.text.encode.side_effect = [resp_login]
mock_requests_get.return_value = m
self.client.login()
self.assertEqual(session_key, self.client._session_key)
m.text.encode.side_effect = [resp_badlogin]
self.assertRaises(exception.DotHillAuthenticationError,
self.client.login)
def test_build_request_url(self):
url = self.client._build_request_url('/path')
self.assertEqual('http://10.0.0.1/api/path', url)
url = self.client._build_request_url('/path', arg1='val1')
self.assertEqual('http://10.0.0.1/api/path/arg1/val1', url)
url = self.client._build_request_url('/path', arg_1='val1')
self.assertEqual('http://10.0.0.1/api/path/arg-1/val1', url)
url = self.client._build_request_url('/path', 'arg1')
self.assertEqual('http://10.0.0.1/api/path/arg1', url)
url = self.client._build_request_url('/path', 'arg1', arg2='val2')
self.assertEqual('http://10.0.0.1/api/path/arg2/val2/arg1', url)
url = self.client._build_request_url('/path', 'arg1', 'arg3',
arg2='val2')
self.assertEqual('http://10.0.0.1/api/path/arg2/val2/arg1/arg3', url)
@mock.patch('requests.get')
def test_request(self, mock_requests_get):
self.client._session_key = session_key
m = mock.Mock()
m.text.encode.side_effect = [response_ok, malformed_xml,
requests.exceptions.
RequestException("error")]
mock_requests_get.return_value = m
ret = self.client._request('/path')
self.assertTrue(type(ret) == etree._Element)
self.assertRaises(exception.DotHillConnectionError,
self.client._request,
'/path')
self.assertRaises(exception.DotHillConnectionError,
self.client._request,
'/path')
def test_assert_response_ok(self):
ok_tree = etree.XML(response_ok)
not_ok_tree = etree.XML(response_not_ok)
invalid_tree = etree.XML(invalid_xml)
ret = self.client._assert_response_ok(ok_tree)
self.assertEqual(None, ret)
self.assertRaises(exception.DotHillRequestError,
self.client._assert_response_ok,
not_ok_tree)
self.assertRaises(exception.DotHillRequestError,
self.client._assert_response_ok, invalid_tree)
@mock.patch.object(dothill.DotHillClient, '_request')
def test_backend_exists(self, mock_request):
mock_request.side_effect = [exception.DotHillRequestError,
fake_xml]
self.assertEqual(False, self.client.backend_exists('backend_name',
'linear'))
self.assertEqual(True, self.client.backend_exists('backend_name',
'linear'))
@mock.patch.object(dothill.DotHillClient, '_request')
def test_backend_stats(self, mock_request):
stats = {'free_capacity_gb': 1979,
'total_capacity_gb': 1979}
linear = etree.XML(response_stats_linear)
virtual = etree.XML(response_stats_virtual)
mock_request.side_effect = [linear, virtual]
self.assertEqual(stats, self.client.backend_stats('OpenStack',
'linear'))
self.assertEqual(stats, self.client.backend_stats('A',
'virtual'))
@mock.patch.object(dothill.DotHillClient, '_request')
def test_get_lun(self, mock_request):
mock_request.side_effect = [etree.XML(response_no_lun),
etree.XML(response_lun)]
ret = self.client._get_first_available_lun_for_host("fakehost")
self.assertEqual(1, ret)
ret = self.client._get_first_available_lun_for_host("fakehost")
self.assertEqual(2, ret)
@mock.patch.object(dothill.DotHillClient, '_request')
def test_get_ports(self, mock_request):
mock_request.side_effect = [etree.XML(response_ports)]
ret = self.client.get_active_target_ports()
self.assertEqual([{'port-type': 'FC',
'target-id': 'id2',
'status': 'Up'},
{'port-type': 'iSCSI',
'target-id': 'id4',
'status': 'Up'},
{'port-type': 'iSCSI',
'target-id': 'id5',
'status': 'Up'}], ret)
@mock.patch.object(dothill.DotHillClient, '_request')
def test_get_fc_ports(self, mock_request):
mock_request.side_effect = [etree.XML(response_ports)]
ret = self.client.get_active_fc_target_ports()
self.assertEqual(['id2'], ret)
@mock.patch.object(dothill.DotHillClient, '_request')
def test_get_iscsi_iqns(self, mock_request):
mock_request.side_effect = [etree.XML(response_ports)]
ret = self.client.get_active_iscsi_target_iqns()
self.assertEqual(['id4', 'id5'], ret)
@mock.patch.object(dothill.DotHillClient, '_request')
def test_get_iscsi_portals(self, mock_request):
portals = {'10.0.0.12': 'Up', '10.0.0.11': 'Up'}
mock_request.side_effect = [etree.XML(response_ports_linear),
etree.XML(response_ports_virtual)]
ret = self.client.get_active_iscsi_target_portals()
self.assertEqual(portals, ret)
ret = self.client.get_active_iscsi_target_portals()
self.assertEqual(portals, ret)
class FakeConfiguration1(object):
dothill_backend_name = 'OpenStack'
dothill_backend_type = 'linear'
san_ip = '10.0.0.1'
san_login = 'manage'
san_password = '!manage'
dothill_api_protocol = 'http'
def safe_get(self, key):
return 'fakevalue'
class FakeConfiguration2(FakeConfiguration1):
dothill_iscsi_ips = ['10.0.0.11']
use_chap_auth = None
class TestFCDotHillCommon(test.TestCase):
def setUp(self):
super(TestFCDotHillCommon, self).setUp()
self.config = FakeConfiguration1()
self.common = dothill_common.DotHillCommon(self.config)
self.common.client_login = mock.MagicMock()
self.common.client_logout = mock.MagicMock()
self.common.serialNumber = "xxxxx"
self.common.owner = "A"
self.connector_element = "wwpns"
@mock.patch.object(dothill.DotHillClient, 'get_serial_number')
@mock.patch.object(dothill.DotHillClient, 'get_owner_info')
@mock.patch.object(dothill.DotHillClient, 'backend_exists')
def test_do_setup(self, mock_backend_exists,
mock_owner_info, mock_serial_number):
mock_backend_exists.side_effect = [False, True]
mock_owner_info.return_value = "A"
mock_serial_number.return_value = "xxxxx"
self.assertRaises(exception.DotHillInvalidBackend,
self.common.do_setup, None)
self.assertEqual(None, self.common.do_setup(None))
mock_backend_exists.assert_called_with(self.common.backend_name,
self.common.backend_type)
mock_owner_info.assert_called_with(self.common.backend_name,
self.common.backend_type)
def test_vol_name(self):
self.assertEqual(encoded_volid, self.common._get_vol_name(vol_id))
self.assertEqual(encoded_snapid, self.common._get_snap_name(vol_id))
def test_check_flags(self):
class FakeOptions(object):
def __init__(self, d):
for k, v in d.items():
self.__dict__[k] = v
options = FakeOptions({'opt1': 'val1', 'opt2': 'val2'})
required_flags = ['opt1', 'opt2']
ret = self.common.check_flags(options, required_flags)
self.assertEqual(None, ret)
options = FakeOptions({'opt1': 'val1', 'opt2': 'val2'})
required_flags = ['opt1', 'opt2', 'opt3']
self.assertRaises(exception.Invalid, self.common.check_flags,
options, required_flags)
def test_assert_connector_ok(self):
self.assertRaises(exception.InvalidInput,
self.common._assert_connector_ok, invalid_connector,
self.connector_element)
self.assertIsNone(self.common._assert_connector_ok(
connector,
self.connector_element))
@mock.patch.object(dothill.DotHillClient, 'backend_stats')
def test_update_volume_stats(self, mock_stats):
mock_stats.side_effect = [exception.DotHillRequestError,
stats_large_space]
self.assertRaises(exception.Invalid, self.common._update_volume_stats)
mock_stats.assert_called_with(self.common.backend_name,
self.common.backend_type)
ret = self.common._update_volume_stats()
self.assertEqual(None, ret)
self.assertEqual({'driver_version': self.common.VERSION,
'pools': [{'QoS_support': False,
'free_capacity_gb': 90,
'location_info':
'DotHillVolumeDriver:xxxxx:OpenStack:A',
'pool_name': 'OpenStack',
'total_capacity_gb': 100}],
'storage_protocol': None,
'vendor_name': 'DotHill',
'volume_backend_name': None}, self.common.stats)
@mock.patch.object(dothill.DotHillClient, 'create_volume')
def test_create_volume(self, mock_create):
mock_create.side_effect = [exception.DotHillRequestError, None]
self.assertRaises(exception.Invalid, self.common.create_volume,
test_volume)
ret = self.common.create_volume(test_volume)
self.assertEqual(None, ret)
mock_create.assert_called_with(encoded_volid,
"%sGB" % test_volume['size'],
self.common.backend_name,
self.common.backend_type)
@mock.patch.object(dothill.DotHillClient, 'delete_volume')
def test_delete_volume(self, mock_delete):
not_found_e = exception.DotHillRequestError(
'The volume was not found on this system.')
mock_delete.side_effect = [not_found_e, exception.DotHillRequestError,
None]
self.assertEqual(None, self.common.delete_volume(test_volume))
self.assertRaises(exception.Invalid, self.common.delete_volume,
test_volume)
self.assertEqual(None, self.common.delete_volume(test_volume))
mock_delete.assert_called_with(encoded_volid)
@mock.patch.object(dothill.DotHillClient, 'copy_volume')
@mock.patch.object(dothill.DotHillClient, 'backend_stats')
def test_create_cloned_volume(self, mock_stats, mock_copy):
mock_stats.side_effect = [stats_low_space, stats_large_space,
stats_large_space]
self.assertRaises(exception.DotHillNotEnoughSpace,
self.common.create_cloned_volume,
dest_volume, detached_volume)
self.assertFalse(mock_copy.called)
mock_copy.side_effect = [exception.DotHillRequestError, None]
self.assertRaises(exception.Invalid,
self.common.create_cloned_volume,
dest_volume, detached_volume)
ret = self.common.create_cloned_volume(dest_volume, detached_volume)
self.assertEqual(None, ret)
mock_copy.assert_called_with(encoded_volid,
'vqqqqqqqqqqqqqqqqqqq',
self.common.backend_name,
self.common.backend_type)
@mock.patch.object(dothill.DotHillClient, 'copy_volume')
@mock.patch.object(dothill.DotHillClient, 'backend_stats')
def test_create_volume_from_snapshot(self, mock_stats, mock_copy):
mock_stats.side_effect = [stats_low_space, stats_large_space,
stats_large_space]
self.assertRaises(exception.DotHillNotEnoughSpace,
self.common.create_volume_from_snapshot,
dest_volume, test_snap)
mock_copy.side_effect = [exception.DotHillRequestError, None]
self.assertRaises(exception.Invalid,
self.common.create_volume_from_snapshot,
dest_volume, test_snap)
ret = self.common.create_volume_from_snapshot(dest_volume, test_snap)
self.assertEqual(None, ret)
mock_copy.assert_called_with('sqqqqqqqqqqqqqqqqqqq',
'vqqqqqqqqqqqqqqqqqqq',
self.common.backend_name,
self.common.backend_type)
@mock.patch.object(dothill.DotHillClient, 'extend_volume')
def test_extend_volume(self, mock_extend):
mock_extend.side_effect = [exception.DotHillRequestError, None]
self.assertRaises(exception.Invalid, self.common.extend_volume,
test_volume, 20)
ret = self.common.extend_volume(test_volume, 20)
self.assertEqual(None, ret)
mock_extend.assert_called_with(encoded_volid, '10GB')
@mock.patch.object(dothill.DotHillClient, 'create_snapshot')
def test_create_snapshot(self, mock_create):
mock_create.side_effect = [exception.DotHillRequestError, None]
self.assertRaises(exception.Invalid, self.common.create_snapshot,
test_snap)
ret = self.common.create_snapshot(test_snap)
self.assertEqual(None, ret)
mock_create.assert_called_with(encoded_volid, 'sqqqqqqqqqqqqqqqqqqq')
@mock.patch.object(dothill.DotHillClient, 'delete_snapshot')
def test_delete_snapshot(self, mock_delete):
not_found_e = exception.DotHillRequestError(
'The volume was not found on this system.')
mock_delete.side_effect = [not_found_e, exception.DotHillRequestError,
None]
self.assertEqual(None, self.common.delete_snapshot(test_snap))
self.assertRaises(exception.Invalid, self.common.delete_snapshot,
test_snap)
self.assertEqual(None, self.common.delete_snapshot(test_snap))
mock_delete.assert_called_with('sqqqqqqqqqqqqqqqqqqq')
@mock.patch.object(dothill.DotHillClient, 'map_volume')
def test_map_volume(self, mock_map):
mock_map.side_effect = [exception.DotHillRequestError, 10]
self.assertRaises(exception.Invalid, self.common.map_volume,
test_volume, connector, self.connector_element)
lun = self.common.map_volume(test_volume, connector,
self.connector_element)
self.assertEqual(10, lun)
mock_map.assert_called_with(encoded_volid,
connector, self.connector_element)
@mock.patch.object(dothill.DotHillClient, 'unmap_volume')
def test_unmap_volume(self, mock_unmap):
mock_unmap.side_effect = [exception.DotHillRequestError, None]
self.assertRaises(exception.Invalid, self.common.unmap_volume,
test_volume, connector, self.connector_element)
ret = self.common.unmap_volume(test_volume, connector,
self.connector_element)
self.assertEqual(None, ret)
mock_unmap.assert_called_with(encoded_volid, connector,
self.connector_element)
@mock.patch.object(dothill.DotHillClient, 'copy_volume')
@mock.patch.object(dothill.DotHillClient, 'delete_volume')
@mock.patch.object(dothill.DotHillClient, 'modify_volume_name')
def test_retype(self, mock_modify, mock_delete, mock_copy):
mock_copy.side_effect = [exception.DotHillRequestError, None]
self.assertRaises(exception.Invalid, self.common.migrate_volume,
test_retype_volume, test_host)
ret = self.common.migrate_volume(test_retype_volume, test_host)
self.assertEqual((True, None), ret)
ret = self.common.migrate_volume(test_retype_volume,
{'capabilities': {}})
self.assertEqual((False, None), ret)
@mock.patch.object(dothill_common.DotHillCommon, '_get_vol_name')
@mock.patch.object(dothill.DotHillClient, 'modify_volume_name')
def test_manage_existing(self, mock_modify, mock_volume):
existing_ref = {'source-name': 'xxxx'}
mock_modify.side_effect = [exception.DotHillRequestError, None]
self.assertRaises(exception.Invalid, self.common.manage_existing,
test_volume, existing_ref)
ret = self.common.manage_existing(test_volume, existing_ref)
self.assertEqual(None, ret)
@mock.patch.object(dothill.DotHillClient, 'get_volume_size')
def test_manage_existing_get_size(self, mock_volume):
existing_ref = {'source-name': 'xxxx'}
mock_volume.side_effect = [exception.DotHillRequestError, 1]
self.assertRaises(exception.Invalid,
self.common.manage_existing_get_size,
None, existing_ref)
ret = self.common.manage_existing_get_size(None, existing_ref)
self.assertEqual(1, ret)
class TestISCSIDotHillCommon(TestFCDotHillCommon):
def setUp(self):
super(TestISCSIDotHillCommon, self).setUp()
self.connector_element = 'initiator'
class TestDotHillFC(test.TestCase):
@mock.patch.object(dothill_common.DotHillCommon, 'do_setup')
def setUp(self, mock_setup):
super(TestDotHillFC, self).setUp()
self.vendor_name = 'DotHill'
mock_setup.return_value = True
def fake_init(self, *args, **kwargs):
super(dothill_fc.DotHillFCDriver, self).__init__()
self.common = None
self.configuration = FakeConfiguration1()
self.lookup_service = fczm_utils.create_lookup_service()
dothill_fc.DotHillFCDriver.__init__ = fake_init
self.driver = dothill_fc.DotHillFCDriver()
self.driver.do_setup(None)
def _test_with_mock(self, mock, method, args, expected=None):
func = getattr(self.driver, method)
mock.side_effect = [exception.Invalid(), None]
self.assertRaises(exception.Invalid, func, *args)
self.assertEqual(expected, func(*args))
@mock.patch.object(dothill_common.DotHillCommon, 'create_volume')
def test_create_volume(self, mock_create):
self._test_with_mock(mock_create, 'create_volume', [None])
@mock.patch.object(dothill_common.DotHillCommon,
'create_cloned_volume')
def test_create_cloned_volume(self, mock_create):
self._test_with_mock(mock_create, 'create_cloned_volume', [None, None])
@mock.patch.object(dothill_common.DotHillCommon,
'create_volume_from_snapshot')
def test_create_volume_from_snapshot(self, mock_create):
self._test_with_mock(mock_create, 'create_volume_from_snapshot',
[None, None])
@mock.patch.object(dothill_common.DotHillCommon, 'delete_volume')
def test_delete_volume(self, mock_delete):
self._test_with_mock(mock_delete, 'delete_volume', [None])
@mock.patch.object(dothill_common.DotHillCommon, 'create_snapshot')
def test_create_snapshot(self, mock_create):
self._test_with_mock(mock_create, 'create_snapshot', [None])
@mock.patch.object(dothill_common.DotHillCommon, 'delete_snapshot')
def test_delete_snapshot(self, mock_delete):
self._test_with_mock(mock_delete, 'delete_snapshot', [None])
@mock.patch.object(dothill_common.DotHillCommon, 'extend_volume')
def test_extend_volume(self, mock_extend):
self._test_with_mock(mock_extend, 'extend_volume', [None, 10])
@mock.patch.object(dothill_common.DotHillCommon, 'client_logout')
@mock.patch.object(dothill_common.DotHillCommon,
'get_active_fc_target_ports')
@mock.patch.object(dothill_common.DotHillCommon, 'map_volume')
@mock.patch.object(dothill_common.DotHillCommon, 'client_login')
def test_initialize_connection(self, mock_login, mock_map, mock_ports,
mock_logout):
mock_login.return_value = None
mock_logout.return_value = None
mock_map.side_effect = [exception.Invalid, 1]
mock_ports.side_effect = [['id1']]
self.assertRaises(exception.Invalid,
self.driver.initialize_connection, test_volume,
connector)
mock_map.assert_called_with(test_volume, connector, 'wwpns')
ret = self.driver.initialize_connection(test_volume, connector)
self.assertEqual({'driver_volume_type': 'fibre_channel',
'data': {'initiator_target_map': {
'111111111111111': ['id1'],
'111111111111112': ['id1']},
'target_wwn': ['id1'],
'target_lun': 1,
'target_discovered': True}}, ret)
@mock.patch.object(dothill_common.DotHillCommon, 'unmap_volume')
@mock.patch.object(dothill.DotHillClient, 'list_luns_for_host')
def test_terminate_connection(self, mock_list, mock_unmap):
mock_unmap.side_effect = [exception.Invalid, 1]
mock_list.side_effect = ['yes']
actual = {'driver_volume_type': 'fibre_channel', 'data': {}}
self.assertRaises(exception.Invalid,
self.driver.terminate_connection, test_volume,
connector)
mock_unmap.assert_called_with(test_volume, connector, 'wwpns')
ret = self.driver.terminate_connection(test_volume, connector)
self.assertEqual(actual, ret)
@mock.patch.object(dothill_common.DotHillCommon, 'get_volume_stats')
def test_get_volume_stats(self, mock_stats):
stats = {'storage_protocol': None,
'driver_version': self.driver.VERSION,
'volume_backend_name': None,
'vendor_name': self.vendor_name,
'pools': [{'free_capacity_gb': 90,
'reserved_percentage': 0,
'total_capacity_gb': 100,
'QoS_support': False,
'location_info': 'xx:xx:xx:xx',
'pool_name': 'x'}]}
mock_stats.side_effect = [exception.Invalid, stats, stats]
self.assertRaises(exception.Invalid, self.driver.get_volume_stats,
False)
ret = self.driver.get_volume_stats(False)
self.assertEqual(stats, ret)
ret = self.driver.get_volume_stats(True)
self.assertEqual(stats, ret)
mock_stats.assert_called_with(True)
@mock.patch.object(dothill_common.DotHillCommon, 'retype')
def test_retype(self, mock_retype):
mock_retype.side_effect = [exception.Invalid, True, False]
args = [None, None, None, None, None]
self.assertRaises(exception.Invalid, self.driver.retype, *args)
self.assertEqual(True, self.driver.retype(*args))
self.assertEqual(False, self.driver.retype(*args))
@mock.patch.object(dothill_common.DotHillCommon, 'manage_existing')
def test_manage_existing(self, mock_manage_existing):
self._test_with_mock(mock_manage_existing, 'manage_existing',
[None, None])
@mock.patch.object(dothill_common.DotHillCommon,
'manage_existing_get_size')
def test_manage_size(self, mock_manage_size):
mock_manage_size.side_effect = [exception.Invalid, 1]
self.assertRaises(exception.Invalid,
self.driver.manage_existing_get_size,
None, None)
self.assertEqual(1, self.driver.manage_existing_get_size(None, None))
class TestDotHillISCSI(TestDotHillFC):
@mock.patch.object(dothill_common.DotHillCommon, 'do_setup')
def setUp(self, mock_setup):
super(TestDotHillISCSI, self).setUp()
self.vendor_name = 'DotHill'
mock_setup.return_value = True
def fake_init(self, *args, **kwargs):
super(dothill_iscsi.DotHillISCSIDriver, self).__init__()
self.common = None
self.configuration = FakeConfiguration2()
self.iscsi_ips = ['10.0.0.11']
dothill_iscsi.DotHillISCSIDriver.__init__ = fake_init
self.driver = dothill_iscsi.DotHillISCSIDriver()
self.driver.do_setup(None)
@mock.patch.object(dothill_common.DotHillCommon, 'client_logout')
@mock.patch.object(dothill_common.DotHillCommon,
'get_active_iscsi_target_portals')
@mock.patch.object(dothill_common.DotHillCommon,
'get_active_iscsi_target_iqns')
@mock.patch.object(dothill_common.DotHillCommon, 'map_volume')
@mock.patch.object(dothill_common.DotHillCommon, 'client_login')
def test_initialize_connection(self, mock_login, mock_map, mock_iqns,
mock_portals, mock_logout):
mock_login.return_value = None
mock_logout.return_value = None
mock_map.side_effect = [exception.Invalid, 1]
self.driver.iscsi_ips = ['10.0.0.11']
self.driver.initialize_iscsi_ports()
mock_iqns.side_effect = [['id2']]
mock_portals.return_value = {'10.0.0.11': 'Up', '10.0.0.12': 'Up'}
self.assertRaises(exception.Invalid,
self.driver.initialize_connection, test_volume,
connector)
mock_map.assert_called_with(test_volume, connector, 'initiator')
ret = self.driver.initialize_connection(test_volume, connector)
self.assertEqual({'driver_volume_type': 'iscsi',
'data': {'target_iqn': 'id2',
'target_lun': 1,
'target_discovered': True,
'target_portal': '10.0.0.11:3260'}}, ret)
@mock.patch.object(dothill_common.DotHillCommon, 'unmap_volume')
def test_terminate_connection(self, mock_unmap):
mock_unmap.side_effect = [exception.Invalid, 1]
self.assertRaises(exception.Invalid,
self.driver.terminate_connection, test_volume,
connector)
mock_unmap.assert_called_with(test_volume, connector, 'initiator')
ret = self.driver.terminate_connection(test_volume, connector)
self.assertEqual(None, ret)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.base."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.layers import base as base_layers
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
class BaseLayerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def testLayerProperties(self):
layer = base_layers.Layer(name='my_layer')
self.assertEqual(layer.variables, [])
self.assertEqual(layer.trainable_variables, [])
self.assertEqual(layer.non_trainable_variables, [])
if not context.executing_eagerly():
# updates, losses only supported in GRAPH mode
self.assertEqual(layer.updates, [])
self.assertEqual(layer.losses, [])
self.assertEqual(layer.built, False)
layer = base_layers.Layer(name='my_layer', trainable=False)
self.assertEqual(layer.trainable, False)
@test_util.run_in_graph_and_eager_modes()
def testInt64Layer(self):
layer = base_layers.Layer(name='my_layer', dtype='int64')
layer.add_variable('my_var', [2, 2])
self.assertEqual(layer.name, 'my_layer')
@test_util.run_in_graph_and_eager_modes()
def testAddWeight(self):
layer = base_layers.Layer(name='my_layer')
# Test basic variable creation.
variable = layer.add_variable(
'my_var', [2, 2], initializer=init_ops.zeros_initializer())
self.assertEqual(variable.name, 'my_layer/my_var:0')
self.assertEqual(layer.variables, [variable])
self.assertEqual(layer.trainable_variables, [variable])
self.assertEqual(layer.non_trainable_variables, [])
if not context.executing_eagerly():
self.assertEqual(
layer.variables,
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
# Test non-trainable variable creation.
# layer.add_variable should work even outside `build` and `call`.
variable_2 = layer.add_variable(
'non_trainable_var', [2, 2],
initializer=init_ops.zeros_initializer(),
trainable=False)
self.assertEqual(layer.variables, [variable, variable_2])
self.assertEqual(layer.trainable_variables, [variable])
self.assertEqual(layer.non_trainable_variables, [variable_2])
if not context.executing_eagerly():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 1)
# regularizers only supported in GRAPH mode.
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
variable = layer.add_variable(
'reg_var', [2, 2],
initializer=init_ops.zeros_initializer(),
regularizer=regularizer)
self.assertEqual(len(layer.losses), 1)
def testReusePartitionedVaraiblesAndRegularizers(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
partitioner = partitioned_variables.fixed_size_partitioner(3)
for reuse in [False, True]:
with variable_scope.variable_scope(variable_scope.get_variable_scope(),
partitioner=partitioner,
reuse=reuse):
layer = base_layers.Layer(name='my_layer')
variable = layer.add_variable(
'reg_part_var', [4, 4],
initializer=init_ops.zeros_initializer(),
regularizer=regularizer)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 3)
def testNoEagerActivityRegularizer(self):
with context.eager_mode():
with self.assertRaisesRegexp(ValueError, 'activity_regularizer'):
core_layers.Dense(1, activity_regularizer=lambda *args, **kwargs: 0.)
@test_util.run_in_graph_and_eager_modes()
def testCall(self):
class MyLayer(base_layers.Layer):
def call(self, inputs):
return math_ops.square(inputs)
layer = MyLayer(name='my_layer')
inputs = random_ops.random_uniform((5,), seed=1)
outputs = layer.apply(inputs)
self.assertEqual(layer.built, True)
if not context.executing_eagerly():
# op is only supported in GRAPH mode
self.assertEqual(outputs.op.name, 'my_layer/Square')
@test_util.run_in_graph_and_eager_modes()
def testDeepCopy(self):
class MyLayer(base_layers.Layer):
def call(self, inputs):
return math_ops.square(inputs)
layer = MyLayer(name='my_layer')
layer._private_tensor = random_ops.random_uniform(())
inputs = random_ops.random_uniform((5,), seed=1)
outputs = layer.apply(inputs)
self.assertEqual(layer.built, True)
if not context.executing_eagerly():
# op only supported in GRAPH mode.
self.assertEqual(outputs.op.name, 'my_layer/Square')
layer_copy = copy.deepcopy(layer)
self.assertEqual(layer_copy.name, layer.name)
self.assertEqual(layer_copy._scope.name, layer._scope.name)
self.assertEqual(layer_copy._graph, layer._graph)
self.assertEqual(layer_copy._private_tensor, layer._private_tensor)
@test_util.run_in_graph_and_eager_modes()
def testScopeNaming(self):
class PrivateLayer(base_layers.Layer):
def call(self, inputs):
return inputs
inputs = random_ops.random_uniform((5,))
default_layer = PrivateLayer()
_ = default_layer.apply(inputs)
self.assertEqual(default_layer._scope.name, 'private_layer')
default_layer1 = PrivateLayer()
default_layer1.apply(inputs)
self.assertEqual(default_layer1._scope.name, 'private_layer_1')
my_layer = PrivateLayer(name='my_layer')
my_layer.apply(inputs)
self.assertEqual(my_layer._scope.name, 'my_layer')
my_layer1 = PrivateLayer(name='my_layer')
my_layer1.apply(inputs)
self.assertEqual(my_layer1._scope.name, 'my_layer_1')
my_layer2 = PrivateLayer(name='my_layer')
my_layer2.apply(inputs)
self.assertEqual(my_layer2._scope.name, 'my_layer_2')
# Name scope shouldn't affect names.
with ops.name_scope('some_name_scope'):
default_layer2 = PrivateLayer()
default_layer2.apply(inputs)
self.assertEqual(default_layer2._scope.name, 'private_layer_2')
my_layer3 = PrivateLayer(name='my_layer')
my_layer3.apply(inputs)
self.assertEqual(my_layer3._scope.name, 'my_layer_3')
other_layer = PrivateLayer(name='other_layer')
other_layer.apply(inputs)
self.assertEqual(other_layer._scope.name, 'other_layer')
# Variable scope gets added to scope names.
with variable_scope.variable_scope('var_scope'):
default_layer_scoped = PrivateLayer()
default_layer_scoped.apply(inputs)
self.assertEqual(default_layer_scoped._scope.name,
'var_scope/private_layer')
my_layer_scoped = PrivateLayer(name='my_layer')
my_layer_scoped.apply(inputs)
self.assertEqual(my_layer_scoped._scope.name, 'var_scope/my_layer')
my_layer_scoped1 = PrivateLayer(name='my_layer')
my_layer_scoped1.apply(inputs)
self.assertEqual(my_layer_scoped1._scope.name, 'var_scope/my_layer_1')
@test_util.run_in_graph_and_eager_modes()
def testInputSpecNdimCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(ndim=2)
def call(self, inputs):
return inputs
if not context.executing_eagerly():
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'requires a defined rank'):
layer.apply(array_ops.placeholder('int32'))
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected ndim=2'):
layer.apply(constant_op.constant([1]))
# Note that we re-create the layer since in Eager mode, input spec checks
# only happen on first call.
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([[1], [2]]))
@test_util.run_in_graph_and_eager_modes()
def testInputSpecMinNdimCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(min_ndim=2)
def call(self, inputs):
return inputs
if not context.executing_eagerly():
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'requires a defined rank'):
layer.apply(array_ops.placeholder('int32'))
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected min_ndim=2'):
layer.apply(constant_op.constant([1]))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([[1], [2]]))
layer = CustomerLayer()
layer.apply(constant_op.constant([[[1], [2]]]))
@test_util.run_in_graph_and_eager_modes()
def testInputSpecMaxNdimCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(max_ndim=2)
def call(self, inputs):
return inputs
if not context.executing_eagerly():
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'requires a defined rank'):
layer.apply(array_ops.placeholder('int32'))
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected max_ndim=2'):
layer.apply(constant_op.constant([[[1], [2]]]))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([1]))
layer = CustomerLayer()
layer.apply(constant_op.constant([[1], [2]]))
@test_util.run_in_graph_and_eager_modes()
def testInputSpecDtypeCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(dtype='float32')
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected dtype=float32'):
layer.apply(constant_op.constant(1, dtype=dtypes.int32))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant(1.0, dtype=dtypes.float32))
@test_util.run_in_graph_and_eager_modes()
def testInputSpecAxesCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(axes={-1: 2})
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected axis'):
layer.apply(constant_op.constant([1, 2, 3]))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([1, 2]))
layer = CustomerLayer()
layer.apply(constant_op.constant([[1, 2], [3, 4], [5, 6]]))
@test_util.run_in_graph_and_eager_modes()
def testInputSpecShapeCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(shape=(None, 3))
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected shape'):
layer.apply(constant_op.constant([[1, 2]]))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([[1, 2, 3], [4, 5, 6]]))
@test_util.run_in_graph_and_eager_modes()
def testNoInputSpec(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = None
def call(self, inputs):
return inputs
layer = CustomerLayer()
layer.apply(constant_op.constant(1))
# Works
if not context.executing_eagerly():
layer.apply(array_ops.placeholder('int32'))
layer.apply(array_ops.placeholder('int32', shape=(2, 3)))
@test_util.run_in_graph_and_eager_modes()
def test_count_params(self):
dense = core_layers.Dense(16)
dense.build((None, 4))
self.assertEqual(dense.count_params(), 16 * 4 + 16)
dense = core_layers.Dense(16)
with self.assertRaises(ValueError):
dense.count_params()
@test_util.run_in_graph_and_eager_modes()
def testDictInputOutput(self):
class DictLayer(base_layers.Layer):
def call(self, inputs):
return {'l' + key: inputs[key] for key in inputs}
layer = DictLayer()
if context.executing_eagerly():
i1 = constant_op.constant(3)
i2 = constant_op.constant(4.0)
result = layer.apply({'abel': i1, 'ogits': i2})
self.assertTrue(isinstance(result, dict))
self.assertEqual(set(['label', 'logits']), set(result.keys()))
self.assertEqual(3, result['label'].numpy())
self.assertEqual(4.0, result['logits'].numpy())
else:
i1 = array_ops.placeholder('int32')
i2 = array_ops.placeholder('float32')
result = layer.apply({'abel': i1, 'ogits': i2})
self.assertTrue(isinstance(result, dict))
self.assertEqual(set(['label', 'logits']), set(result.keys()))
def testActivityRegularizer(self):
regularizer = math_ops.reduce_sum
layer = base_layers.Layer(activity_regularizer=regularizer)
x = array_ops.placeholder('int32')
layer.apply(x)
self.assertEqual(len(layer.get_losses_for(x)), 1)
def testNameScopeIsConsistentWithVariableScope(self):
# Github issue 13429.
class MyLayer(base_layers.Layer):
def build(self, input_shape):
self.my_var = self.add_variable('my_var', (), dtypes.float32)
self.built = True
def call(self, inputs):
return math_ops.multiply(inputs, self.my_var, name='my_op')
def _gen_layer(x, name=None):
layer = MyLayer(name=name)
out = layer.apply(x)
return layer, out
# unnamed layer
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32, (), 'x')
layer, op = _gen_layer(x)
layer1, op1 = _gen_layer(op)
layer2, op2 = _gen_layer(op1)
self.assertEqual(layer.my_var.name, 'my_layer/my_var:0')
self.assertEqual(op.name, 'my_layer/my_op:0')
self.assertEqual(layer1.my_var.name, 'my_layer_1/my_var:0')
self.assertEqual(op1.name, 'my_layer_1/my_op:0')
self.assertEqual(layer2.my_var.name, 'my_layer_2/my_var:0')
self.assertEqual(op2.name, 'my_layer_2/my_op:0')
# name starts from zero
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32, (), 'x')
layer, op = _gen_layer(x, name='name')
layer1, op1 = _gen_layer(op, name='name_1')
layer2, op2 = _gen_layer(op1, name='name_2')
self.assertEqual(layer.my_var.name, 'name/my_var:0')
self.assertEqual(op.name, 'name/my_op:0')
self.assertEqual(layer1.my_var.name, 'name_1/my_var:0')
self.assertEqual(op1.name, 'name_1/my_op:0')
self.assertEqual(layer2.my_var.name, 'name_2/my_var:0')
self.assertEqual(op2.name, 'name_2/my_op:0')
# name starts from one
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32, (), 'x')
layer, op = _gen_layer(x, name='name_1')
layer1, op1 = _gen_layer(op, name='name_2')
layer2, op2 = _gen_layer(op1, name='name_3')
self.assertEqual(layer.my_var.name, 'name_1/my_var:0')
self.assertEqual(op.name, 'name_1/my_op:0')
self.assertEqual(layer1.my_var.name, 'name_2/my_var:0')
self.assertEqual(op1.name, 'name_2/my_op:0')
self.assertEqual(layer2.my_var.name, 'name_3/my_var:0')
self.assertEqual(op2.name, 'name_3/my_op:0')
def testVariablesAreLiftedFromFunctionBuildingGraphs(self):
class MyLayer(base_layers.Layer):
def build(self, input_shape):
self.my_var = self.add_variable('my_var', (), dtypes.float32)
self.built = True
def call(self, inputs):
return inputs
outer_graph = ops.get_default_graph()
function_building_graph = ops.Graph()
function_building_graph._building_function = True
with outer_graph.as_default():
with function_building_graph.as_default():
layer = MyLayer()
# Create a variable by invoking build through __call__ and assert that
# it is both tracked and lifted into the outer graph.
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
layer.apply(inputs)
self.assertEqual(len(layer.variables), 1)
self.assertEqual(len(layer.trainable_variables), 1)
self.assertEqual(layer.variables[0].graph, outer_graph)
def testGetUpdateFor(self):
class MyLayer(base_layers.Layer):
def build(self, input_shape):
self.a = self.add_variable('a',
(),
dtypes.float32,
trainable=False)
self.b = self.add_variable('b',
(),
dtypes.float32,
trainable=False)
self.add_update(state_ops.assign_add(self.a, 1., name='b_update'))
self.built = True
def call(self, inputs):
self.add_update(state_ops.assign_add(self.a, inputs, name='a_update'),
inputs=True)
return inputs + 1
layer = MyLayer()
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
intermediate_inputs = inputs + 1
outputs = layer.apply(intermediate_inputs)
self.assertEqual(len(layer.updates), 2)
self.assertEqual(len(layer.get_updates_for(None)), 1)
self.assertEqual(len(layer.get_updates_for([inputs])), 1)
self.assertEqual(len(layer.get_updates_for([intermediate_inputs])), 1)
self.assertEqual(len(layer.get_updates_for([outputs])), 0)
# Call same layer on new input, creating one more conditional update
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
intermediate_inputs = inputs + 1
outputs = layer.apply(intermediate_inputs)
self.assertEqual(len(layer.updates), 3)
self.assertEqual(len(layer.get_updates_for(None)), 1)
# Check that we are successfully filtering out irrelevant updates
self.assertEqual(len(layer.get_updates_for([inputs])), 1)
self.assertEqual(len(layer.get_updates_for([intermediate_inputs])), 1)
self.assertEqual(len(layer.get_updates_for([outputs])), 0)
def testGetLossesFor(self):
class MyLayer(base_layers.Layer):
def build(self, input_shape):
self.a = self.add_variable('a',
(),
dtypes.float32,
trainable=False)
self.b = self.add_variable('b',
(),
dtypes.float32,
trainable=False)
self.add_loss(self.a)
self.built = True
def call(self, inputs):
self.add_loss(inputs, inputs=True)
return inputs + 1
layer = MyLayer()
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
intermediate_inputs = inputs + 1
outputs = layer.apply(intermediate_inputs)
self.assertEqual(len(layer.losses), 2)
self.assertEqual(len(layer.get_losses_for(None)), 1)
self.assertEqual(len(layer.get_losses_for([inputs])), 1)
self.assertEqual(len(layer.get_losses_for([intermediate_inputs])), 1)
self.assertEqual(len(layer.get_losses_for([outputs])), 0)
# Call same layer on new input, creating one more conditional loss
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
intermediate_inputs = inputs + 1
outputs = layer.apply(intermediate_inputs)
self.assertEqual(len(layer.losses), 3)
self.assertEqual(len(layer.get_losses_for(None)), 1)
# Check that we are successfully filtering out irrelevant losses
self.assertEqual(len(layer.get_losses_for([inputs])), 1)
self.assertEqual(len(layer.get_losses_for([intermediate_inputs])), 1)
self.assertEqual(len(layer.get_losses_for([outputs])), 0)
def testLayerGraphSetInFirstApply(self):
with ops.Graph().as_default():
# Graph at construction time is ignored
layer = core_layers.Dense(1)
with ops.Graph().as_default():
layer.apply(constant_op.constant([[1.]]))
# layer is now bound to second Graph
with ops.Graph().as_default(), self.assertRaisesRegexp(
ValueError, 'Input graph and Layer graph are not the same'):
layer.apply(constant_op.constant([[1.]]))
if __name__ == '__main__':
test.main()
| |
import re
import random
from random import randint
import time
import logging
import os
import urllib2
from bs4 import BeautifulSoup
from packages.google import search
from actions import *
from settings import *
"""
Jarvy : Python Intelligent Assistant for Humans
=================
Jarvy, aims to help humans by trying to understand them and figuring out best ways to respond to them.
"""
__title__ = 'jarvy'
__version__ = '1.3.0'
__build__ = 0x010300 # in the format of 00-00-00
__author__ = 'Semih Yagcioglu'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015 Semih Yagcioglu'
class Jarvy:
""" Jarvy : Python Intelligent Assistant for Humans
"""
__LOG_LOCATION__ = "log/jarvy.log"
def __init__(self):
self.__setupLogger__()
self.actions = Actions()
self.settings = Settings()
self.status = True
self.wakeup()
def __setupLogger__(self):
self.logger = logging.getLogger('Jarvy')
self.logger.setLevel(logging.INFO)
# Log formatting
formatter = logging.Formatter('%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s')
# Console handler
# ch = logging.StreamHandler()
# ch.setFormatter(formatter)
# self.logger.addHandler(ch)
# Logfile handler
directory = os.path.dirname(self.__LOG_LOCATION__)
if not os.path.exists(directory):
os.makedirs(directory)
fh = logging.FileHandler(self.__LOG_LOCATION__)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
# starts jarvy
def wakeup(self):
welcome = self.hi()
print '\n\n' + welcome + "."
self.logger.info("Woke up.")
while self.status:
try:
message = str(raw_input("What can I help you with?\n\n")).lower()
if any(m in message for m in self.settings.farewell_messages) and any(m in message for m in self.settings.jarvy_name):
self.sleep()
else:
answer = self.answer(message)
print answer
except KeyboardInterrupt:
self.sleep()
# terminates jarvy
def sleep(self):
farewell = self.farewell()
print farewell
self.status = False
self.logger.info("Slept.")
# produce salutation message
def hi(self):
chance = random.uniform(0, 100)
if chance <= 20:
greeting = 'Hello ' + self.settings.master_name
elif chance <= 30:
greeting = 'Hi ' + self.settings.master_name
elif chance <= 40:
greeting = 'Hey'
else:
hour = int(time.strftime('%H', time.localtime()))
if 0 <= hour < 5:
greeting = 'Good night ' + self.settings.master_formal_address
elif 5 <= hour < 12:
greeting = 'Good morning ' + self.settings.master_formal_address
elif 12 <= hour < 17:
greeting = 'Good afternoon ' + self.settings.master_formal_address
else:
greeting = 'Good evening ' + self.settings.master_formal_address
return greeting
# produce farewell message
def farewell(self):
chance = random.uniform(0, 100)
if chance <= 20:
farewell = 'Good bye ' + self.settings.master_name
elif chance <= 40:
farewell = 'Farewell my friend'
else:
hour = int(time.strftime('%H', time.localtime()))
if 0 <= hour < 5:
farewell = 'Good night ' + self.settings.master_formal_address
elif 5 <= hour < 12:
farewell = 'Have a good day ' + self.settings.master_formal_address
elif 12 <= hour < 17:
farewell = 'Good afternoon ' + self.settings.master_formal_address
else:
farewell = 'Good evening ' + self.settings.master_formal_address
return farewell
# respond to the query message
def answer(self, message):
action = self.understand(message)
responses = self.think(action, message)
answer = self.explain(responses)
return answer
def understand(self, message):
if any(m in message.split() for m in self.settings.personal_message_for_jarvy): # it is something about the jarvy
action = self.actions.about_jarvy
elif message == self.settings.jarvy_name.lower(): # direct address to jarvy
action = self.actions.direct_address
elif any(m.lower() in message.split() for m in self.settings.personal_message_for_master): # it is something about the master
action = self.actions.about_master
elif any(m.lower() in message.split() for m in self.settings.rudimentary_question_tags): # rewrite message as a search query
action = self.actions.search_google
else:
action = self.actions.say_sorry
return action
def think(self, action, message):
if action == self.actions.about_jarvy:
responses = self.respond('jarvy')
elif action == self.actions.direct_address:
responses = self.respond('direct_address')
elif action == self.actions.about_master:
responses = self.respond('master')
elif action == self.actions.search_google: # get results from google
responses = self.make_search(message, 'google')
elif action == self.actions.search_wolfram: # get results from wolfram
responses = self.make_search(message, 'wolfram')
elif action == self.actions.search_wikipedia: # get results from wikipedia
responses = self.make_search(message, 'wikipedia')
else:
responses = []
if not responses:
rand = randint(0, len(self.settings.sorry_messages) - 1)
responses = [self.settings.sorry_messages[rand]]
return responses
def make_search(self, query, source):
query = self.rewrite_question(source, query)
search_results = []
trusted_results = []
try:
urls = search(query, stop=self.settings.number_of_search_results) # make a search
for i, url in enumerate(urls):
search_results.append(url)
if url.find('wikipedia') > 0 and i < self.settings.trusted_source_treshold: # trusted source, skip if not in there
trusted_results.append(url)
try:
if len(trusted_results) > 0:
html = urllib2.urlopen(trusted_results[0]).read()
soup = BeautifulSoup(html, 'html.parser')
p = soup.find('div', id="bodyContent").p
soup_p = BeautifulSoup(str(p), 'html.parser')
text = soup_p.get_text()
else:
html = urllib2.urlopen(search_results[0]).read()
soup = BeautifulSoup(html, 'html.parser')
p = soup.find_all('p')
soup_p = BeautifulSoup(str(p), 'html.parser')
text = soup_p.get_text()
pat = re.compile(r'([A-Z][^\.!?]*[\.!?])', re.M) # pattern to detect sentences
matches = pat.findall(text)
if len(matches) > self.settings.number_of_minimum_sentences:
text = ''.join(matches[0:self.settings.number_of_minimum_sentences])
else:
text = ''.join(matches[0:]) # TODO: what if the answer is = ''
responses = [text]
except:
responses = [] # there is no proper response, but need to reconsider this
except:
responses = [] # no internet access
return responses
def explain(self, responses):
if len(responses) >= 1: # if there is only one response
answer = responses[0]
else:
pass # TODO: scoring might be done here!
return answer
def respond(self, about):
if about == 'jarvy':
responses = ['I am your friend.']
elif about == 'direct_address':
responses = ['Yes ' + self.settings.master_formal_address]
elif about == 'master':
responses = ['I\'m afraid, I can\'t answer that.']
else:
responses = ['']
return responses
def rewrite_question(self, source, message):
query = message # by default, set it to message
if source == 'google':
for r in self.settings.rudimentary_question_tags:
query = query.replace(r, '')
query = query.replace(' is ', ' ')
query = query.replace(' are ', ' ')
query = query.strip()
return query
def start():
j = Jarvy()
return j
if __name__ == '__main__':
start()
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django
from django.core.urlresolvers import reverse
from django.db import models, transaction, DatabaseError
from django.utils.encoding import python_2_unicode_compatible
from django.utils.html import strip_tags
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now as tznow
from pybb.compat import get_user_model_path, get_username_field, get_atomic_func
from pybb import defaults
from pybb.profiles import PybbProfile
from pybb.util import unescape, FilePathGenerator, _get_markup_formatter
from annoying.fields import AutoOneToOneField
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^annoying\.fields\.JSONField"])
add_introspection_rules([], ["^annoying\.fields\.AutoOneToOneField"])
except ImportError:
pass
@python_2_unicode_compatible
class Category(models.Model):
name = models.CharField(_('Name'), max_length=80)
position = models.IntegerField(_('Position'), blank=True, default=0)
hidden = models.BooleanField(_('Hidden'), blank=False, null=False, default=False,
help_text=_('If checked, this category will be visible only for staff'))
class Meta(object):
ordering = ['position']
verbose_name = _('Category')
verbose_name_plural = _('Categories')
def __str__(self):
return self.name
def forum_count(self):
return self.forums.all().count()
def get_absolute_url(self):
return reverse('pybb:category', kwargs={'pk': self.id})
@property
def topics(self):
return Topic.objects.filter(forum__category=self).select_related()
@property
def posts(self):
return Post.objects.filter(topic__forum__category=self).select_related()
@python_2_unicode_compatible
class Forum(models.Model):
category = models.ForeignKey(Category, related_name='forums', verbose_name=_('Category'))
parent = models.ForeignKey('self', related_name='child_forums', verbose_name=_('Parent forum'),
blank=True, null=True)
name = models.CharField(_('Name'), max_length=80)
position = models.IntegerField(_('Position'), blank=True, default=0)
description = models.TextField(_('Description'), blank=True)
moderators = models.ManyToManyField(get_user_model_path(), blank=True, null=True, verbose_name=_('Moderators'))
updated = models.DateTimeField(_('Updated'), blank=True, null=True)
post_count = models.IntegerField(_('Post count'), blank=True, default=0)
topic_count = models.IntegerField(_('Topic count'), blank=True, default=0)
hidden = models.BooleanField(_('Hidden'), blank=False, null=False, default=False)
readed_by = models.ManyToManyField(get_user_model_path(), through='ForumReadTracker', related_name='readed_forums')
headline = models.TextField(_('Headline'), blank=True, null=True)
class Meta(object):
ordering = ['position']
verbose_name = _('Forum')
verbose_name_plural = _('Forums')
def __str__(self):
return self.name
def update_counters(self):
posts = Post.objects.filter(topic__forum_id=self.id)
self.post_count = posts.count()
self.topic_count = Topic.objects.filter(forum=self).count()
try:
last_post = posts.order_by('-created', '-id')[0]
self.updated = last_post.updated or last_post.created
except IndexError:
pass
self.save()
def get_absolute_url(self):
return reverse('pybb:forum', kwargs={'pk': self.id})
@property
def posts(self):
return Post.objects.filter(topic__forum=self).select_related()
@property
def last_post(self):
try:
return self.posts.order_by('-created', '-id')[0]
except IndexError:
return None
def get_parents(self):
"""
Used in templates for breadcrumb building
"""
parents = [self.category]
parent = self.parent
while parent is not None:
parents.insert(1, parent)
parent = parent.parent
return parents
@python_2_unicode_compatible
class Topic(models.Model):
POLL_TYPE_NONE = 0
POLL_TYPE_SINGLE = 1
POLL_TYPE_MULTIPLE = 2
POLL_TYPE_CHOICES = (
(POLL_TYPE_NONE, _('None')),
(POLL_TYPE_SINGLE, _('Single answer')),
(POLL_TYPE_MULTIPLE, _('Multiple answers')),
)
forum = models.ForeignKey(Forum, related_name='topics', verbose_name=_('Forum'))
name = models.CharField(_('Subject'), max_length=255)
created = models.DateTimeField(_('Created'), null=True)
updated = models.DateTimeField(_('Updated'), null=True)
user = models.ForeignKey(get_user_model_path(), verbose_name=_('User'))
views = models.IntegerField(_('Views count'), blank=True, default=0)
sticky = models.BooleanField(_('Sticky'), blank=True, default=False)
closed = models.BooleanField(_('Closed'), blank=True, default=False)
subscribers = models.ManyToManyField(get_user_model_path(), related_name='subscriptions',
verbose_name=_('Subscribers'), blank=True)
post_count = models.IntegerField(_('Post count'), blank=True, default=0)
readed_by = models.ManyToManyField(get_user_model_path(), through='TopicReadTracker', related_name='readed_topics')
on_moderation = models.BooleanField(_('On moderation'), default=False)
poll_type = models.IntegerField(_('Poll type'), choices=POLL_TYPE_CHOICES, default=POLL_TYPE_NONE)
poll_question = models.TextField(_('Poll question'), blank=True, null=True)
class Meta(object):
ordering = ['-created']
verbose_name = _('Topic')
verbose_name_plural = _('Topics')
def __str__(self):
return self.name
@property
def head(self):
"""
Get first post and cache it for request
"""
if not hasattr(self, "_head"):
self._head = self.posts.all().order_by('created', 'id')
if not len(self._head):
return None
return self._head[0]
@property
def last_post(self):
if not getattr(self, '_last_post', None):
self._last_post = self.posts.order_by('-created', '-id').select_related('user')[0]
return self._last_post
def get_absolute_url(self):
return reverse('pybb:topic', kwargs={'pk': self.id})
def save(self, *args, **kwargs):
if self.id is None:
self.created = tznow()
forum_changed = False
old_topic = None
if self.id is not None:
old_topic = Topic.objects.get(id=self.id)
if self.forum != old_topic.forum:
forum_changed = True
super(Topic, self).save(*args, **kwargs)
if forum_changed:
old_topic.forum.update_counters()
self.forum.update_counters()
def delete(self, using=None):
super(Topic, self).delete(using)
self.forum.update_counters()
def update_counters(self):
self.post_count = self.posts.count()
last_post = Post.objects.filter(topic_id=self.id).order_by('-created', '-id')[0]
self.updated = last_post.updated or last_post.created
self.save()
def get_parents(self):
"""
Used in templates for breadcrumb building
"""
parents = self.forum.get_parents()
parents.append(self.forum)
return parents
def poll_votes(self):
if self.poll_type != self.POLL_TYPE_NONE:
return PollAnswerUser.objects.filter(poll_answer__topic=self).count()
else:
return None
class RenderableItem(models.Model):
"""
Base class for models that has markup, body, body_text and body_html fields.
"""
class Meta(object):
abstract = True
body = models.TextField(_('Message'))
body_html = models.TextField(_('HTML version'))
body_text = models.TextField(_('Text version'))
def render(self):
self.body_html = _get_markup_formatter()(self.body)
# Remove tags which was generated with the markup processor
text = strip_tags(self.body_html)
# Unescape entities which was generated with the markup processor
self.body_text = unescape(text)
@python_2_unicode_compatible
class Post(RenderableItem):
topic = models.ForeignKey(Topic, related_name='posts', verbose_name=_('Topic'))
user = models.ForeignKey(get_user_model_path(), related_name='posts', verbose_name=_('User'))
created = models.DateTimeField(_('Created'), blank=True, db_index=True)
updated = models.DateTimeField(_('Updated'), blank=True, null=True)
user_ip = models.IPAddressField(_('User IP'), blank=True, default='0.0.0.0')
on_moderation = models.BooleanField(_('On moderation'), default=False)
class Meta(object):
ordering = ['created']
verbose_name = _('Post')
verbose_name_plural = _('Posts')
def summary(self):
limit = 50
tail = len(self.body) > limit and '...' or ''
return self.body[:limit] + tail
def __str__(self):
return self.summary()
def save(self, *args, **kwargs):
created_at = tznow()
if self.created is None:
self.created = created_at
self.render()
new = self.pk is None
topic_changed = False
old_post = None
if not new:
old_post = Post.objects.get(pk=self.pk)
if old_post.topic != self.topic:
topic_changed = True
super(Post, self).save(*args, **kwargs)
# If post is topic head and moderated, moderate topic too
if self.topic.head == self and not self.on_moderation and self.topic.on_moderation:
self.topic.on_moderation = False
self.topic.update_counters()
self.topic.forum.update_counters()
if topic_changed:
old_post.topic.update_counters()
old_post.topic.forum.update_counters()
def get_absolute_url(self):
return reverse('pybb:post', kwargs={'pk': self.id})
def delete(self, *args, **kwargs):
self_id = self.id
head_post_id = self.topic.posts.order_by('created', 'id')[0].id
if self_id == head_post_id:
self.topic.delete()
else:
super(Post, self).delete(*args, **kwargs)
self.topic.update_counters()
self.topic.forum.update_counters()
def get_parents(self):
"""
Used in templates for breadcrumb building
"""
return self.topic.forum.category, self.topic.forum, self.topic,
class Profile(PybbProfile):
"""
Profile class that can be used if you doesn't have
your site profile.
"""
user = AutoOneToOneField(get_user_model_path(), related_name='pybb_profile', verbose_name=_('User'))
class Meta(object):
verbose_name = _('Profile')
verbose_name_plural = _('Profiles')
def get_absolute_url(self):
return reverse('pybb:user', kwargs={'username': getattr(self.user, get_username_field())})
def get_display_name(self):
return self.user.get_username()
class Attachment(models.Model):
class Meta(object):
verbose_name = _('Attachment')
verbose_name_plural = _('Attachments')
post = models.ForeignKey(Post, verbose_name=_('Post'), related_name='attachments')
size = models.IntegerField(_('Size'))
file = models.FileField(_('File'),
upload_to=FilePathGenerator(to=defaults.PYBB_ATTACHMENT_UPLOAD_TO))
def save(self, *args, **kwargs):
self.size = self.file.size
super(Attachment, self).save(*args, **kwargs)
def size_display(self):
size = self.size
if size < 1024:
return '%db' % size
elif size < 1024 * 1024:
return '%dKb' % int(size / 1024)
else:
return '%.2fMb' % (size / float(1024 * 1024))
class TopicReadTrackerManager(models.Manager):
def get_or_create_tracker(self, user, topic):
"""
Correctly create tracker in mysql db on default REPEATABLE READ transaction mode
It's known problem when standrard get_or_create method return can raise exception
with correct data in mysql database.
See http://stackoverflow.com/questions/2235318/how-do-i-deal-with-this-race-condition-in-django/2235624
"""
is_new = True
sid = transaction.savepoint(using=self.db)
try:
with get_atomic_func()():
obj = TopicReadTracker.objects.create(user=user, topic=topic)
transaction.savepoint_commit(sid)
except DatabaseError:
transaction.savepoint_rollback(sid)
obj = TopicReadTracker.objects.get(user=user, topic=topic)
is_new = False
return obj, is_new
class TopicReadTracker(models.Model):
"""
Save per user topic read tracking
"""
user = models.ForeignKey(get_user_model_path(), blank=False, null=False)
topic = models.ForeignKey(Topic, blank=True, null=True)
time_stamp = models.DateTimeField(auto_now=True)
objects = TopicReadTrackerManager()
class Meta(object):
verbose_name = _('Topic read tracker')
verbose_name_plural = _('Topic read trackers')
unique_together = ('user', 'topic')
class ForumReadTrackerManager(models.Manager):
def get_or_create_tracker(self, user, forum):
"""
Correctly create tracker in mysql db on default REPEATABLE READ transaction mode
It's known problem when standrard get_or_create method return can raise exception
with correct data in mysql database.
See http://stackoverflow.com/questions/2235318/how-do-i-deal-with-this-race-condition-in-django/2235624
"""
is_new = True
sid = transaction.savepoint(using=self.db)
try:
with get_atomic_func()():
obj = ForumReadTracker.objects.create(user=user, forum=forum)
transaction.savepoint_commit(sid)
except DatabaseError:
transaction.savepoint_rollback(sid)
is_new = False
obj = ForumReadTracker.objects.get(user=user, forum=forum)
return obj, is_new
class ForumReadTracker(models.Model):
"""
Save per user forum read tracking
"""
user = models.ForeignKey(get_user_model_path(), blank=False, null=False)
forum = models.ForeignKey(Forum, blank=True, null=True)
time_stamp = models.DateTimeField(auto_now=True)
objects = ForumReadTrackerManager()
class Meta(object):
verbose_name = _('Forum read tracker')
verbose_name_plural = _('Forum read trackers')
unique_together = ('user', 'forum')
@python_2_unicode_compatible
class PollAnswer(models.Model):
topic = models.ForeignKey(Topic, related_name='poll_answers', verbose_name=_('Topic'))
text = models.CharField(max_length=255, verbose_name=_('Text'))
class Meta:
verbose_name = _('Poll answer')
verbose_name_plural = _('Polls answers')
def __str__(self):
return self.text
def votes(self):
return self.users.count()
def votes_percent(self):
topic_votes = self.topic.poll_votes()
if topic_votes > 0:
return 1.0 * self.votes() / topic_votes * 100
else:
return 0
@python_2_unicode_compatible
class PollAnswerUser(models.Model):
poll_answer = models.ForeignKey(PollAnswer, related_name='users', verbose_name=_('Poll answer'))
user = models.ForeignKey(get_user_model_path(), related_name='poll_answers', verbose_name=_('User'))
timestamp = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = _('Poll answer user')
verbose_name_plural = _('Polls answers users')
unique_together = (('poll_answer', 'user', ), )
def __str__(self):
return '%s - %s' % (self.poll_answer.topic, self.user)
if django.VERSION[:2] < (1, 7):
from pybb import signals
signals.setup()
| |
import numpy as np
import pandas as pd
import networkx as nx
from indra.statements import *
from indra.assemblers.indranet.net import default_sign_dict
from indra.assemblers.indranet import IndraNetAssembler, IndraNet
ev1 = Evidence(pmid='1')
ev2 = Evidence(pmid='2')
ev3 = Evidence(pmid='3')
st1 = Activation(Agent('a', db_refs={'HGNC': '1'}), Agent('b'), evidence=[ev1])
st2 = Inhibition(Agent('a', db_refs={'HGNC': '1'}), Agent('c'),
evidence=[ev1, ev2, ev3])
st2.belief = 0.76
st3 = Activation(Agent('b'), Agent('d'))
st4 = ActiveForm(Agent('e'), None, True) # 1 agent
st5 = Complex([Agent('c'), Agent('f'), Agent('g')])
st6 = Complex([Agent('h'), Agent('i'), Agent('j'), Agent('b')])
st7 = Phosphorylation(None, Agent('x'))
st8 = Conversion(Agent('PI3K'), [Agent('PIP2')], [Agent('PIP3')])
# Test assembly from assembler side
def test_simple_assembly():
ia = IndraNetAssembler([st1, st2, st3, st4, st5, st6, st7])
g = ia.make_model()
assert len(g.nodes) == 6
assert len(g.edges) == 9
# Stmt with 1 agent should not be added
assert 'e' not in g.nodes
# Complex with more than 3 agents should not be added
assert ('f', 'g', 0) in g.edges
assert ('h', 'i', 0) not in g.edges
# Test node attributes
assert g.nodes['a']['ns'] == 'HGNC', g.nodes['a']['ns']
assert g.nodes['a']['id'] == '1'
# Test edge attributes
e = g['a']['c'][0]
assert e['stmt_type'] == 'Inhibition'
assert e['belief'] == 0.76
assert e['evidence_count'] == 3
assert g['b']['d'][0]['evidence_count'] == 0
def test_exclude_stmts():
ia = IndraNetAssembler([st1, st2, st3])
g = ia.make_model(exclude_stmts=['Inhibition'])
assert len(g.nodes) == 3
assert len(g.edges) == 2
assert 'c' not in g.nodes
assert ('a', 'c', 0) not in g.edges
def test_complex_members():
ia = IndraNetAssembler([st1, st6])
g = ia.make_model(complex_members=4)
assert len(g.nodes) == 5
assert len(g.edges) == 13, len(g.edges)
assert ('h', 'i', 0) in g.edges
assert ('i', 'h', 0) in g.edges
def test_make_df():
ia = IndraNetAssembler([st1, st2, st3, st4, st5, st6])
df = ia.make_df()
assert isinstance(df, pd.DataFrame)
assert len(df) == 9
assert set(df.columns) == {
'agA_name', 'agB_name', 'agA_ns', 'agA_id', 'agB_ns', 'agB_id',
'stmt_type', 'evidence_count', 'stmt_hash', 'belief', 'source_counts',
'initial_sign'}
# Test assembly from IndraNet directly
def test_from_df():
ia = IndraNetAssembler([st1, st2, st3, st4, st5, st6, st7])
df = ia.make_df()
net = IndraNet.from_df(df)
assert len(net.nodes) == 6
assert len(net.edges) == 9
# Stmt with 1 agent should not be added
assert 'e' not in net.nodes
# Complex with more than 3 agents should not be added
assert ('f', 'g', 0) in net.edges
assert ('h', 'i', 0) not in net.edges
# Test node attributes
assert net.nodes['a']['ns'] == 'HGNC', net.nodes['a']['ns']
assert net.nodes['a']['id'] == '1'
# Test edge attributes
e = net['a']['c'][0]
assert e['stmt_type'] == 'Inhibition'
assert e['belief'] == 0.76
assert e['evidence_count'] == 3
assert net['b']['d'][0]['evidence_count'] == 0
ab1 = Activation(Agent('a'), Agent('b'), evidence=[
Evidence(source_api='sparser')])
ab2 = Phosphorylation(Agent('a'), Agent('b'),evidence=[
Evidence(source_api='sparser'), Evidence(source_api='reach')])
ab3 = Inhibition(Agent('a'), Agent('b'), evidence=[
Evidence(source_api='sparser'), Evidence(source_api='reach')])
ab4 = IncreaseAmount(Agent('a'), Agent('b'), evidence=[
Evidence(source_api='trips')])
bc1 = Activation(Agent('b'), Agent('c'), evidence=[
Evidence(source_api='trips')])
bc2 = Inhibition(Agent('b'), Agent('c'), evidence=[
Evidence(source_api='trips'), Evidence(source_api='reach')])
bc3 = IncreaseAmount(Agent('b'), Agent('c'), evidence=[
Evidence(source_api='sparser'), Evidence(source_api='reach')])
bc4 = DecreaseAmount(Agent('b'), Agent('c'), evidence=[
Evidence(source_api='reach'), Evidence(source_api='trips')])
def test_to_digraph():
ia = IndraNetAssembler([ab1, ab2, ab3, ab4, bc1, bc2, bc3, bc4])
df = ia.make_df()
net = IndraNet.from_df(df)
assert len(net.nodes) == 3
assert len(net.edges) == 8
digraph = net.to_digraph(weight_mapping=_weight_mapping)
assert len(digraph.nodes) == 3
assert len(digraph.edges) == 2
assert set([
stmt['stmt_type'] for stmt in digraph['a']['b']['statements']]) == {
'Activation', 'Phosphorylation', 'Inhibition', 'IncreaseAmount'}
assert all(digraph.edges[e].get('belief', False) for e in digraph.edges)
assert all(isinstance(digraph.edges[e]['belief'],
(float, np.longfloat)) for e in digraph.edges)
assert all(digraph.edges[e].get('weight', False) for e in digraph.edges)
assert all(isinstance(digraph.edges[e]['weight'],
(float, np.longfloat)) for e in digraph.edges)
digraph_from_df = IndraNet.digraph_from_df(df)
assert nx.is_isomorphic(digraph, digraph_from_df)
def test_to_signed_graph():
ia = IndraNetAssembler([ab1, ab2, ab3, ab4, bc1, bc2, bc3, bc4])
df = ia.make_df()
net = IndraNet.from_df(df)
signed_graph = net.to_signed_graph(
sign_dict=default_sign_dict,
weight_mapping=_weight_mapping)
assert len(signed_graph.nodes) == 3
assert len(signed_graph.edges) == 4
assert set([stmt['stmt_type'] for stmt in
signed_graph['a']['b'][0]['statements']]) == {
'Activation', 'IncreaseAmount'}
assert set([stmt['stmt_type'] for stmt in
signed_graph['a']['b'][1]['statements']]) == {'Inhibition'}
assert set([stmt['stmt_type'] for stmt in
signed_graph['b']['c'][0]['statements']]) == {
'Activation', 'IncreaseAmount'}
assert set([stmt['stmt_type'] for stmt in
signed_graph['b']['c'][1]['statements']]) == {
'Inhibition', 'DecreaseAmount'}
assert all(signed_graph.edges[e].get('belief', False) for e in
signed_graph.edges)
assert all(isinstance(signed_graph.edges[e]['belief'],
(float, np.longfloat)) for e in signed_graph.edges)
assert all(signed_graph.edges[e].get('weight', False) for e in
signed_graph.edges)
assert all(isinstance(signed_graph.edges[e]['weight'],
(float, np.longfloat)) for e in signed_graph.edges)
def _weight_mapping(G):
for edge in G.edges:
G.edges[edge]['weight'] = 1 - G.edges[edge]['belief']
return G
def test_initial_signs():
a = Event(Concept('a'), QualitativeDelta(polarity=1))
b = Event(Concept('b'), QualitativeDelta(polarity=1))
c = Event(Concept('c'), QualitativeDelta(polarity=-1))
d = Event(Concept('d'), QualitativeDelta(polarity=-1))
st1 = Influence(a, b)
st2 = Influence(b, c)
st3 = Influence(c, d)
st4 = Influence(b, d)
ia = IndraNetAssembler([st1, st2, st3, st4])
sg = ia.make_model(graph_type='signed')
assert len(sg.nodes) == 4
assert len(sg.edges) == 4
assert ('a', 'b', 0) in sg.edges
assert ('b', 'c', 0) not in sg.edges
assert ('b', 'c', 1) in sg.edges
assert ('c', 'd', 0) in sg.edges
assert ('c', 'd', 1) not in sg.edges
assert ('b', 'd', 0) not in sg.edges
assert ('b', 'd', 1) in sg.edges
def test_conversion():
ia = IndraNetAssembler([st8])
ug = ia.make_model(graph_type='multi_graph')
assert len(ug.nodes) == 3
assert len(ug.edges) == 2, ug.edges
sg = ia.make_model(graph_type='signed')
assert len(sg.nodes) == 3
assert len(sg.edges) == 2, sg.edges
assert ('PI3K', 'PIP3', 0) in sg.edges, sg.edges
assert ('PI3K', 'PIP2', 1) in sg.edges, sg.edges
| |
# Copyright 2015 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
============
Traits Types
============
.. currentmodule:: bqplot.traits
.. autosummary::
:toctree: _generate/
Date
"""
from traitlets import TraitError, TraitType
import numpy as np
import pandas as pd
import warnings
import datetime as dt
import six
# Date
def date_to_json(value, obj):
if value is None:
return value
else:
# Dropping microseconds and only keeping milliseconds to conform
# with JavaScript's Data.toJSON's behavior - and prevent bouncing
# back updates from the front-end.
return value.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
def date_from_json(value, obj):
if value:
return dt.datetime.strptime(value.rstrip('Z'), '%Y-%m-%dT%H:%M:%S.%f')
else:
return value
date_serialization = dict(to_json=date_to_json, from_json=date_from_json)
class Date(TraitType):
"""
A datetime trait type.
Converts the passed date into a string format that can be used to
construct a JavaScript datetime.
"""
def validate(self, obj, value):
try:
if isinstance(value, dt.datetime):
return value
if isinstance(value, dt.date):
return dt.datetime(value.year, value.month, value.day)
if np.issubdtype(np.dtype(value), np.datetime64):
# TODO: Fix this. Right now, we have to limit the precision
# of time to microseconds because np.datetime64.astype(datetime)
# returns date values only for precision <= 'us'
value_truncated = np.datetime64(value, 'us')
return value_truncated.astype(dt.datetime)
except Exception:
self.error(obj, value)
self.error(obj, value)
def __init__(self, default_value=dt.datetime.today(), **kwargs):
super(Date, self).__init__(default_value=default_value, **kwargs)
self.tag(**date_serialization)
def convert_to_date(array, fmt='%m-%d-%Y'):
# If array is a np.ndarray with type == np.datetime64, the array can be
# returned as such. If it is an np.ndarray of dtype 'object' then conversion
# to string is tried according to the fmt parameter.
if(isinstance(array, np.ndarray) and np.issubdtype(array.dtype, np.datetime64)):
# no need to perform any conversion in this case
return array
elif(isinstance(array, list) or (isinstance(array, np.ndarray) and array.dtype == 'object')):
return_value = []
# Pandas to_datetime handles all the cases where the passed in
# data could be any of the combinations of
# [list, nparray] X [python_datetime, np.datetime]
# Because of the coerce=True flag, any non-compatible datetime type
# will be converted to pd.NaT. By this comparison, we can figure
# out if it is date castable or not.
if(len(np.shape(array)) == 2):
for elem in array:
temp_val = pd.to_datetime(
elem, errors='coerce', infer_datetime_format=True)
temp_val = elem if isinstance(temp_val[0], type(pd.NaT)) else temp_val
return_value.append(temp_val)
elif(isinstance(array, list)):
temp_val = pd.to_datetime(
array, errors='coerce', infer_datetime_format=True)
return_value = array if isinstance(temp_val[0], type(pd.NaT)) else temp_val
else:
temp_val = pd.to_datetime(
array, errors='coerce', infer_datetime_format=True)
return_value = array if isinstance(temp_val[0], type(pd.NaT)) else temp_val
return return_value
elif(isinstance(array, np.ndarray)):
warnings.warn("Array could not be converted into a date")
return array
def array_from_json(value, obj=None):
if value is not None:
# this will accept regular json data, like an array of values, which can be useful it you want
# to link bqplot to other libraries that use that
if isinstance(value, list):
if len(value) > 0 and (isinstance(value[0], dict) and 'value' in value[0]):
subarrays = [array_from_json(k) for k in value]
if len(subarrays) > 0:
expected_length = len(subarrays[0])
# if a 'ragged' array, we should explicitly pass dtype=object
if any(len(k) != expected_length for k in subarrays[1:]):
return np.array(subarrays, dtype=object)
return np.array(subarrays)
elif len(value) > 0 and isinstance(value[0], list):
return np.array(value, dtype=object)
else:
return np.array(value)
elif 'value' in value:
try:
ar = np.frombuffer(value['value'], dtype=value['dtype']).reshape(value['shape'])
except AttributeError:
# in some python27/numpy versions it does not like the memoryview
# we go the .tobytes() route, but since i'm not 100% sure memory copying
# is happening or not, we one take this path if the above fails.
ar = np.frombuffer(value['value'].tobytes(), dtype=value['dtype']).reshape(value['shape'])
if value.get('type') == 'date':
assert value['dtype'] == 'float64'
ar = ar.astype('datetime64[ms]')
return ar
def array_to_json(ar, obj=None, force_contiguous=True):
if ar is None:
return None
array_type = None
if ar.dtype.kind == 'O':
# Try to serialize the array of objects
is_string = np.vectorize(lambda x: isinstance(x, six.string_types))
is_timestamp = np.vectorize(lambda x: isinstance(x, pd.Timestamp))
is_array_like = np.vectorize(lambda x: isinstance(x, (list, np.ndarray)))
if np.all(is_timestamp(ar)):
ar = ar.astype('datetime64[ms]').astype(np.float64)
array_type = 'date'
elif np.all(is_string(ar)):
ar = ar.astype('U')
elif np.all(is_array_like(ar)):
return [array_to_json(np.array(row), obj, force_contiguous) for row in ar]
else:
raise ValueError("Unsupported dtype object")
if ar.dtype.kind in ['S', 'U']: # strings to as plain json
return ar.tolist()
if ar.dtype.kind == 'M':
# since there is no support for int64, we'll use float64 but as ms
# resolution, since that is the resolution the js Date object understands
ar = ar.astype('datetime64[ms]').astype(np.float64)
array_type = 'date'
if ar.dtype.kind not in ['u', 'i', 'f']: # ints and floats, and datetime
raise ValueError("Unsupported dtype: %s" % (ar.dtype))
if ar.dtype == np.int64: # JS does not support int64
ar = ar.astype(np.int32)
if force_contiguous and not ar.flags["C_CONTIGUOUS"]: # make sure it's contiguous
ar = np.ascontiguousarray(ar)
if not ar.dtype.isnative:
dtype = ar.dtype.newbyteorder()
ar = ar.astype(dtype)
return {'value': memoryview(ar), 'dtype': str(ar.dtype), 'shape': ar.shape, 'type': array_type}
array_serialization = dict(to_json=array_to_json, from_json=array_from_json)
def array_squeeze(trait, value):
if len(value.shape) > 1:
return np.squeeze(value)
else:
return value
def array_dimension_bounds(mindim=0, maxdim=np.inf):
def validator(trait, value):
dim = len(value.shape)
if dim < mindim or dim > maxdim:
raise TraitError('Dimension mismatch for trait %s of class %s: expected an \
array of dimension comprised in interval [%s, %s] and got an array of shape %s' % (
trait.name, trait.this_class, mindim, maxdim, value.shape))
return value
return validator
def array_supported_kinds(kinds='biufMSUO'):
def validator(trait, value):
if value.dtype.kind not in kinds:
raise TraitError('Array type not supported for trait %s of class %s: expected a \
array of kind in list %r and got an array of type %s (kind %s)' % (
trait.name, trait.this_class, list(kinds), value.dtype, value.dtype.kind))
return value
return validator
# DataFrame
def dataframe_from_json(value, obj):
if value is None:
return None
else:
return pd.DataFrame(value)
def dataframe_to_json(df, obj):
if df is None:
return None
else:
# Replacing NaNs with None as it's not valid JSON
cleandf = df.fillna(np.nan).replace([np.nan], [None])
return cleandf.to_dict(orient='records')
dataframe_serialization = dict(to_json=dataframe_to_json, from_json=dataframe_from_json)
# dataframe validators
def dataframe_warn_indexname(trait, value):
if value.index.name is not None:
warnings.warn("The '%s' dataframe trait of the %s instance disregards the index name" % (trait.name, trait.this_class))
value = value.reset_index()
return value
# Series
def series_from_json(value, obj):
return pd.Series(value)
def series_to_json(value, obj):
return value.to_dict()
series_serialization = dict(to_json=series_to_json, from_json=series_from_json)
def _array_equal(a, b):
"""Really tests if arrays are equal, where nan == nan == True"""
try:
return np.allclose(a, b, 0, 0, equal_nan=True)
except (TypeError, ValueError):
return False
| |
# Copyright 2011 Andrew Bogott for the Wikimedia Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
from oslo_utils import netutils
import webob
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import floating_ip_dns
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import exception
from nova.i18n import _
from nova import network
ALIAS = "os-floating-ip-dns"
authorize = extensions.os_compute_authorizer(ALIAS)
def _translate_dns_entry_view(dns_entry):
result = {}
result['ip'] = dns_entry.get('ip')
result['id'] = dns_entry.get('id')
result['type'] = dns_entry.get('type')
result['domain'] = dns_entry.get('domain')
result['name'] = dns_entry.get('name')
return {'dns_entry': result}
def _translate_dns_entries_view(dns_entries):
return {'dns_entries': [_translate_dns_entry_view(entry)['dns_entry']
for entry in dns_entries]}
def _translate_domain_entry_view(domain_entry):
result = {}
result['domain'] = domain_entry.get('domain')
result['scope'] = domain_entry.get('scope')
result['project'] = domain_entry.get('project')
result['availability_zone'] = domain_entry.get('availability_zone')
return {'domain_entry': result}
def _translate_domain_entries_view(domain_entries):
return {'domain_entries':
[_translate_domain_entry_view(entry)['domain_entry']
for entry in domain_entries]}
def _unquote_domain(domain):
"""Unquoting function for receiving a domain name in a URL.
Domain names tend to have .'s in them. Urllib doesn't quote dots,
but Routes tends to choke on them, so we need an extra level of
by-hand quoting here.
"""
return urllib.unquote(domain).replace('%2E', '.')
def _create_dns_entry(ip, name, domain):
return {'ip': ip, 'name': name, 'domain': domain}
def _create_domain_entry(domain, scope=None, project=None, av_zone=None):
return {'domain': domain, 'scope': scope, 'project': project,
'availability_zone': av_zone}
class FloatingIPDNSDomainController(wsgi.Controller):
"""DNS domain controller for OpenStack API."""
def __init__(self):
super(FloatingIPDNSDomainController, self).__init__()
self.network_api = network.API(skip_policy_check=True)
@extensions.expected_errors(501)
def index(self, req):
"""Return a list of available DNS domains."""
context = req.environ['nova.context']
authorize(context)
try:
domains = self.network_api.get_dns_domains(context)
except NotImplementedError:
common.raise_feature_not_supported()
domainlist = [_create_domain_entry(domain['domain'],
domain.get('scope'),
domain.get('project'),
domain.get('availability_zone'))
for domain in domains]
return _translate_domain_entries_view(domainlist)
@extensions.expected_errors((400, 501))
@validation.schema(floating_ip_dns.domain_entry_update)
def update(self, req, id, body):
"""Add or modify domain entry."""
context = req.environ['nova.context']
authorize(context, action="domain:update")
fqdomain = _unquote_domain(id)
entry = body['domain_entry']
scope = entry['scope']
project = entry.get('project', None)
av_zone = entry.get('availability_zone', None)
if scope == 'private' and project:
msg = _("you can not pass project if the scope is private")
raise webob.exc.HTTPBadRequest(explanation=msg)
if scope == 'public' and av_zone:
msg = _("you can not pass av_zone if the scope is public")
raise webob.exc.HTTPBadRequest(explanation=msg)
if scope == 'private':
create_dns_domain = self.network_api.create_private_dns_domain
area_name, area = 'availability_zone', av_zone
else:
create_dns_domain = self.network_api.create_public_dns_domain
area_name, area = 'project', project
try:
create_dns_domain(context, fqdomain, area)
except NotImplementedError:
common.raise_feature_not_supported()
return _translate_domain_entry_view({'domain': fqdomain,
'scope': scope,
area_name: area})
@extensions.expected_errors((404, 501))
@wsgi.response(202)
def delete(self, req, id):
"""Delete the domain identified by id."""
context = req.environ['nova.context']
authorize(context, action="domain:delete")
domain = _unquote_domain(id)
# Delete the whole domain
try:
self.network_api.delete_dns_domain(context, domain)
except NotImplementedError:
common.raise_feature_not_supported()
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
class FloatingIPDNSEntryController(wsgi.Controller):
"""DNS Entry controller for OpenStack API."""
def __init__(self):
super(FloatingIPDNSEntryController, self).__init__()
self.network_api = network.API(skip_policy_check=True)
@extensions.expected_errors((404, 501))
def show(self, req, domain_id, id):
"""Return the DNS entry that corresponds to domain_id and id."""
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(domain_id)
floating_ip = None
# Check whether id is a valid ipv4/ipv6 address.
if netutils.is_valid_ip(id):
floating_ip = id
try:
if floating_ip:
entries = self.network_api.get_dns_entries_by_address(context,
floating_ip,
domain)
else:
entries = self.network_api.get_dns_entries_by_name(context,
id,
domain)
except NotImplementedError:
common.raise_feature_not_supported()
if not entries:
explanation = _("DNS entries not found.")
raise webob.exc.HTTPNotFound(explanation=explanation)
if floating_ip:
entrylist = [_create_dns_entry(floating_ip, entry, domain)
for entry in entries]
dns_entries = _translate_dns_entries_view(entrylist)
return wsgi.ResponseObject(dns_entries)
entry = _create_dns_entry(entries[0], id, domain)
return _translate_dns_entry_view(entry)
@extensions.expected_errors(501)
@validation.schema(floating_ip_dns.dns_entry_update)
def update(self, req, domain_id, id, body):
"""Add or modify dns entry."""
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(domain_id)
name = id
entry = body['dns_entry']
address = entry['ip']
dns_type = entry['dns_type']
try:
entries = self.network_api.get_dns_entries_by_name(context,
name, domain)
if not entries:
# create!
self.network_api.add_dns_entry(context, address, name,
dns_type, domain)
else:
# modify!
self.network_api.modify_dns_entry(context, name,
address, domain)
except NotImplementedError:
common.raise_feature_not_supported()
return _translate_dns_entry_view({'ip': address,
'name': name,
'type': dns_type,
'domain': domain})
@extensions.expected_errors((404, 501))
@wsgi.response(202)
def delete(self, req, domain_id, id):
"""Delete the entry identified by req and id."""
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(domain_id)
name = id
try:
self.network_api.delete_dns_entry(context, name, domain)
except NotImplementedError:
common.raise_feature_not_supported()
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
class FloatingIpDns(extensions.V21APIExtensionBase):
"""Floating IP DNS support."""
name = "FloatingIpDns"
alias = ALIAS
version = 1
def get_resources(self):
resources = []
res = extensions.ResourceExtension(ALIAS,
controller=FloatingIPDNSDomainController())
resources.append(res)
res = extensions.ResourceExtension('entries',
controller=FloatingIPDNSEntryController(),
parent={'member_name': 'domain',
'collection_name': 'os-floating-ip-dns'})
resources.append(res)
return resources
def get_controller_extensions(self):
"""It's an abstract function V21APIExtensionBase and the extension
will not be loaded without it.
"""
return []
| |
import argparse
import crypt
import datetime
import json
import sqlite3
import time
import os
from collections import namedtuple
from os import path
import lib.bottle as bottle
from lib.bottle import (
abort,
default_app,
hook,
request,
response,
route,
run,
static_file,
template,
)
from util import namedtuple_factory
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
PORT = 8080
COOKIE_SECRET = "changeme1"
try:
os.mkdir("/tmp/jabawiki")
except:
pass
conn = sqlite3.connect('/tmp/jabawiki/jabawiki.db', detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = namedtuple_factory
c = conn.cursor()
# Routes
###################################
@route('/')
def index():
with open(path.join(SCRIPT_PATH, "templates", "base.html")) as f:
return "".join(f.readlines())
@route('/w/<rest:re:.+>')
def wiki(rest):
with open(path.join(SCRIPT_PATH, "templates", "base.html")) as f:
return "".join(f.readlines())
@route('/article/get/<name>')
def get_article(name):
if not can_access(name):
response.status = 401
return "Not authorized"
article = get_article_by_title(name)
if article is None:
response.status = 404
return "Article missing"
article_data = {
"Title": article.title,
"Body": article.text,
"Permission": article.publicity
}
return json.dumps(article_data)
@route('/article/put/<name>', 'PUT')
def put_article(name):
if not can_access(name):
response.status = 401
return "Not authorized"
body = request.json
article = get_article_by_title(name)
user_id = get_cookie_user_id()
if user_id is None:
response.status = 401
return "Not authorized"
if article is None:
create_article(body['title'], body['body'], body['permission'], body['summary'])
else:
update_article(body['title'], body['body'], body['permission'], body['summary'])
update_history(body['title'], body['summary'])
return "Good"
@route('/history/get/<name>', 'GET')
def get_history(name):
if not can_access(name):
response.status = 401
return "Not authorized"
history_items = get_history_by_article(name)
ret_items = []
for item in reversed(history_items):
ret_items.append({
"time": time.mktime(item.time.timetuple()),
"user": get_user_by_id(item.user_id).name,
"summary": item.summary
})
return json.dumps(ret_items)
# Auth
@route('/user/login', 'POST')
def user_login():
body = request.json
email = body["email"]
password = body["password"]
user = get_user_by_email(email)
if user is not None:
response.set_cookie("auth", user.id, secret=COOKIE_SECRET, path="/", max_age=7776000)
else:
response.status = 400
return "Invalid login"
@route('/user/get', 'GET')
def user_get():
user = get_user_by_id(get_cookie_user_id())
if user is None:
response.status = 401
return "Not logged in"
user_data = {
"Name": user.name,
"Email": user.email,
"Role": user.role,
}
return json.dumps(user_data)
@route('/user/logout', 'POST')
def user_logout():
response.delete_cookie("auth", path="/")
return "Good"
@route('/user/register', 'POST')
def user_register():
body = request.json
email = body["email"]
name = body["name"]
password = body["password"]
create_user(email, name, password)
return "Good"
# Files
###################################
@route('/static/<pth:path>')
def static(pth):
return static_file(pth, root=path.join(SCRIPT_PATH, "static"))
@route('/partials/<pth:path>')
def partials(pth):
return static_file(pth, root=path.join(SCRIPT_PATH, "partials"))
# Auth
###################################
def create_hash_password(plain_pass):
return crypt.crypt(plain_pass, crypt.mksalt())
def is_matching_pass(plain_pass, crypted_pass):
return crypt.crypt(plain_pass, crypted_pass) == crypted_pass
# Model
###################################
def create_tables():
c.execute("""CREATE TABLE IF NOT EXISTS user(
id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
email TEXT NOT NULL,
role TEXT NOT NULL,
password TEXT NOT NULL,
creation_date TIMESTAMP NOT NULL,
UNIQUE(email))""")
c.execute("""CREATE TABLE IF NOT EXISTS article(
id INTEGER PRIMARY KEY,
title TEXT NOT NULL,
text TEXT NOT NULL,
publicity TEXT NOT NULL,
creation_date TIMESTAMP NOT NULL,
UNIQUE (title))""")
c.execute("""CREATE TABLE IF NOT EXISTS article_history(
id INTEGER PRIMARY KEY,
article_id INTEGER NOT NULL,
user_id INTEGER NOT NULL,
summary TEXT NOT NULL,
time TIMESTAMP NOT NULL,
FOREIGN KEY(article_id) REFERENCES article(id),
FOREIGN KEY(user_id) REFERENCES user(id))""")
conn.commit()
# user
def get_user_by_email(email):
c.execute("""SELECT * from user
WHERE user.email=?""", (email,))
return c.fetchone()
def get_user_by_id(uid):
c.execute("""SELECT * from user
WHERE user.id=?""", (uid,))
return c.fetchone()
def create_user(email, name, password):
user_data = (name, email, "user", create_hash_password(password), datetime.datetime.utcnow())
c.execute("""INSERT INTO user
(name, email, role, password, creation_date)
VALUES (?, ?, ?, ?, ?)""", user_data)
conn.commit()
# article
def get_article_by_title(title):
c.execute("""SELECT * from article
WHERE article.title=?""", (title,))
return c.fetchone()
def create_article(title, text, publicity, summary):
c.execute("""INSERT INTO article
(title, text, publicity, creation_date)
VALUES (?, ?, ?, ?)""", (title, text, publicity, datetime.datetime.utcnow()))
conn.commit()
def update_article(title, text, publicity, summary):
c.execute("""UPDATE article
SET text=?, publicity=?
WHERE article.title=?""", (text, publicity, title))
conn.commit()
def get_history_by_article(article_title):
article_id = get_article_by_title(article_title).id
c.execute("""SELECT * from article_history
WHERE article_history.article_id=?""", (article_id,))
return c.fetchall()
def update_history(article_title, summary):
user_id = get_cookie_user_id()
article_id = get_article_by_title(article_title).id
c.execute("""INSERT INTO article_history
(article_id, user_id, summary, time)
VALUES (?, ?, ?, ?)""", (article_id, user_id, summary, datetime.datetime.utcnow()))
conn.commit()
# Util
###################################
# remove ending slash from requests
@hook('before_request')
def strip_path():
request.environ['PATH_INFO'] = request.environ['PATH_INFO'].rstrip('/')
def setup():
create_tables()
# dev_setup(conn, c)
def get_cookie_user_id():
user_id = request.get_cookie("auth", secret=COOKIE_SECRET)
return user_id
# TODO: the permission system is messy and
# needs to be revamped
def can_access(article_title):
article = get_article_by_title(article_title)
user_id = get_cookie_user_id()
user = get_user_by_id(user_id)
if (article is not None and
article.publicity == "private" and
((user is not None and user.id != 1) or (user is None))):
return False
return True
def dev_setup(conn, cursor):
user_data = ("ben",
"ben@bendoan.me",
"admin",
create_hash_password("pass"),
datetime.datetime.utcnow())
c.execute("""INSERT INTO user
(name, email, role, password, creation_date)
VALUES (?, ?, ?, ?, ?)""", user_data)
article_data = ("Home",
"this is the home page",
"public",
datetime.datetime.utcnow())
c.execute("""INSERT INTO article
(title, text, publicity, creation_date)
VALUES (?, ?, ?, ?)""", article_data)
conn.commit()
# Run
###################################
setup()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='starts the jabawiki server')
parser.add_argument('--config', help='specifies the config file location (default: ./config.json)',
default="./config.json")
args = parser.parse_args()
run(host='0.0.0.0', port=PORT)
app = default_app()
| |
from datetime import datetime
from collections import OrderedDict
from django.core.urlresolvers import resolve, reverse
import furl
import pytz
import jsonschema
from framework.auth.core import Auth
from osf.models import BaseFileNode, DraftNode, OSFUser, Comment, Preprint, AbstractNode
from rest_framework import serializers as ser
from rest_framework.fields import SkipField
from website import settings
from website.util import api_v2_url
from addons.base.utils import get_mfr_url
from api.base.serializers import (
FileRelationshipField,
format_relationship_links,
IDField,
JSONAPIListField,
JSONAPISerializer,
Link,
LinksField,
NodeFileHyperLinkField,
RelationshipField,
TypeField,
WaterbutlerLink,
VersionedDateTimeField,
TargetField,
HideIfPreprint,
ShowIfVersion,
)
from api.base.utils import absolute_reverse, get_user_auth
from api.base.exceptions import Conflict, InvalidModelValueError
from api.base.schemas.utils import from_json
from api.base.versioning import get_kebab_snake_case_field
class CheckoutField(ser.HyperlinkedRelatedField):
default_error_messages = {'invalid_data': 'Checkout must be either the current user or null'}
json_api_link = True # serializes to a links object
def __init__(self, **kwargs):
kwargs['queryset'] = True
kwargs['read_only'] = False
kwargs['allow_null'] = True
kwargs['lookup_field'] = '_id'
kwargs['lookup_url_kwarg'] = 'user_id'
self.meta = {'id': 'user_id'}
self.link_type = 'related'
self.always_embed = kwargs.pop('always_embed', False)
super(CheckoutField, self).__init__('users:user-detail', **kwargs)
def resolve(self, resource, field_name, request):
"""
Resolves the view when embedding.
"""
embed_value = resource.checkout._id
return resolve(
reverse(
self.view_name,
kwargs={
self.lookup_url_kwarg: embed_value,
'version': request.parser_context['kwargs']['version'],
},
),
)
def get_choices(self, cutoff=None):
"""Most of this was copied and pasted from rest_framework's RelatedField -- we needed to pass the
correct value of a user's pk as a choice, while avoiding our custom implementation of `to_representation`
which returns a dict for JSON API purposes.
"""
queryset = self.get_queryset()
if queryset is None:
# Ensure that field.choices returns something sensible
# even when accessed with a read-only field.
return {}
if cutoff is not None:
queryset = queryset[:cutoff]
return OrderedDict([
(
item.pk,
self.display_value(item),
)
for item in queryset
])
def get_queryset(self):
return OSFUser.objects.filter(guids___id=self.context['request'].user._id, guids___id__isnull=False)
def get_url(self, obj, view_name, request, format):
if obj is None:
return {}
lookup_value = getattr(obj, self.lookup_field)
return absolute_reverse(
self.view_name, kwargs={
self.lookup_url_kwarg: lookup_value,
'version': self.context['request'].parser_context['kwargs']['version'],
},
)
def to_internal_value(self, data):
if data is None:
return None
try:
return next(
user for user in
self.get_queryset()
if user._id == data
)
except StopIteration:
self.fail('invalid_data')
def to_representation(self, value):
url = super(CheckoutField, self).to_representation(value)
rel_meta = None
if value and hasattr(value, '_id'):
rel_meta = {'id': value._id}
ret = format_relationship_links(related_link=url, rel_meta=rel_meta)
return ret
class FileTagField(ser.Field):
def to_representation(self, obj):
if obj is not None:
return obj.name
return None
def to_internal_value(self, data):
return data
class FileNodeRelationshipField(RelationshipField):
def to_representation(self, value):
if not isinstance(value.target, AbstractNode):
raise SkipField
return super(FileNodeRelationshipField, self).to_representation(value)
class BaseFileSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'id',
'name',
'kind',
'path',
'materialized_path',
'size',
'provider',
'last_touched',
'tags',
])
id = IDField(source='_id', read_only=True)
type = TypeField()
guid = ser.SerializerMethodField(
read_only=True,
method_name='get_file_guid',
help_text='OSF GUID for this file (if one has been assigned)',
)
checkout = CheckoutField()
name = ser.CharField(read_only=True, help_text='Display name used in the general user interface')
kind = ser.CharField(read_only=True, help_text='Either folder or file')
path = ser.CharField(read_only=True, help_text='The unique path used to reference this object')
size = ser.SerializerMethodField(read_only=True, help_text='The size of this file at this version')
provider = ser.CharField(read_only=True, help_text='The Add-on service this file originates from')
materialized_path = ser.CharField(
read_only=True, help_text='The Unix-style path of this object relative to the provider root',
)
last_touched = VersionedDateTimeField(read_only=True, help_text='The last time this file had information fetched about it via the OSF')
date_modified = ser.SerializerMethodField(read_only=True, help_text='Timestamp when the file was last modified')
date_created = ser.SerializerMethodField(read_only=True, help_text='Timestamp when the file was created')
extra = ser.SerializerMethodField(read_only=True, help_text='Additional metadata about this file')
tags = JSONAPIListField(child=FileTagField(), required=False)
current_user_can_comment = ser.SerializerMethodField(help_text='Whether the current user is allowed to post comments')
current_version = ser.IntegerField(help_text='Latest file version', read_only=True, source='current_version_number')
delete_allowed = ser.BooleanField(read_only=True, required=False)
parent_folder = RelationshipField(
related_view='files:file-detail',
related_view_kwargs={'file_id': '<parent._id>'},
help_text='The folder in which this file exists',
)
files = NodeFileHyperLinkField(
related_view=lambda node: 'draft_nodes:node-files' if getattr(node, 'type', False) == 'osf.draftnode' else 'nodes:node-files',
view_lambda_argument='target',
related_view_kwargs={'node_id': '<target._id>', 'path': '<path>', 'provider': '<provider>'},
kind='folder',
)
versions = NodeFileHyperLinkField(
related_view='files:file-versions',
related_view_kwargs={'file_id': '<_id>'},
kind='file',
)
comments = HideIfPreprint(FileRelationshipField(
related_view='nodes:node-comments',
related_view_kwargs={'node_id': '<target._id>'},
related_meta={'unread': 'get_unread_comments_count'},
filter={'target': 'get_file_guid'},
))
metadata_records = FileRelationshipField(
related_view='files:metadata-records',
related_view_kwargs={'file_id': '<_id>'},
)
links = LinksField({
'info': Link('files:file-detail', kwargs={'file_id': '<_id>'}),
'move': WaterbutlerLink(),
'upload': WaterbutlerLink(),
'delete': WaterbutlerLink(),
'download': 'get_download_link',
'render': 'get_render_link',
'html': 'absolute_url',
'new_folder': WaterbutlerLink(must_be_folder=True, kind='folder'),
})
def absolute_url(self, obj):
if obj.is_file:
return furl.furl(settings.DOMAIN).set(
path=(obj.target._id, 'files', obj.provider, obj.path.lstrip('/')),
).url
def get_download_link(self, obj):
if obj.is_file:
return get_file_download_link(obj, view_only=self.context['request'].query_params.get('view_only'))
def get_render_link(self, obj):
if obj.is_file:
mfr_url = get_mfr_url(obj.target, obj.provider)
download_url = self.get_download_link(obj)
return get_file_render_link(mfr_url, download_url)
class Meta:
type_ = 'files'
def get_size(self, obj):
if obj.versions.exists():
self.size = obj.versions.first().size
return self.size
return None
def get_date_modified(self, obj):
mod_dt = None
if obj.provider == 'osfstorage' and obj.versions.exists():
# Each time an osfstorage file is added or uploaded, a new version object is created with its
# date_created equal to the time of the update. The external_modified is the modified date
# from the backend the file is stored on. This field refers to the modified date on osfstorage,
# so prefer to use the created of the latest version.
mod_dt = obj.versions.first().created
elif obj.provider != 'osfstorage' and obj.history:
mod_dt = obj.history[-1].get('modified', None)
if self.context['request'].version >= '2.2' and obj.is_file and mod_dt:
return datetime.strftime(mod_dt, '%Y-%m-%dT%H:%M:%S.%fZ')
return mod_dt and mod_dt.replace(tzinfo=pytz.utc)
def get_date_created(self, obj):
creat_dt = None
if obj.provider == 'osfstorage' and obj.versions.exists():
creat_dt = obj.versions.last().created
elif obj.provider != 'osfstorage' and obj.history:
# Non-osfstorage files don't store a created date, so instead get the modified date of the
# earliest entry in the file history.
creat_dt = obj.history[0].get('modified', None)
if self.context['request'].version >= '2.2' and obj.is_file and creat_dt:
return datetime.strftime(creat_dt, '%Y-%m-%dT%H:%M:%S.%fZ')
return creat_dt and creat_dt.replace(tzinfo=pytz.utc)
def get_extra(self, obj):
metadata = {}
if obj.provider == 'osfstorage' and obj.versions.exists():
metadata = obj.versions.first().metadata
elif obj.provider != 'osfstorage' and obj.history:
metadata = obj.history[-1].get('extra', {})
extras = {}
extras['hashes'] = { # mimic waterbutler response
'md5': metadata.get('md5', None),
'sha256': metadata.get('sha256', None),
}
if obj.provider == 'osfstorage' and obj.is_file:
extras['downloads'] = obj.get_download_count()
return extras
def get_current_user_can_comment(self, obj):
user = self.context['request'].user
auth = Auth(user if not user.is_anonymous else None)
if isinstance(obj.target, AbstractNode):
return obj.target.can_comment(auth)
return False
def get_unread_comments_count(self, obj):
user = self.context['request'].user
if user.is_anonymous:
return 0
return Comment.find_n_unread(user=user, node=obj.target, page='files', root_id=obj.get_guid()._id)
def user_id(self, obj):
# NOTE: obj is the user here, the meta field for
# Hyperlinks is weird
if obj:
return obj._id
return None
def update(self, instance, validated_data):
assert isinstance(instance, BaseFileNode), 'Instance must be a BaseFileNode'
if instance.provider != 'osfstorage' and 'tags' in validated_data:
raise Conflict('File service provider {} does not support tags on the OSF.'.format(instance.provider))
auth = get_user_auth(self.context['request'])
old_tags = set(instance.tags.values_list('name', flat=True))
if 'tags' in validated_data:
current_tags = set(validated_data.pop('tags', []))
else:
current_tags = set(old_tags)
for new_tag in (current_tags - old_tags):
instance.add_tag(new_tag, auth=auth)
for deleted_tag in (old_tags - current_tags):
instance.remove_tag(deleted_tag, auth=auth)
for attr, value in validated_data.items():
if attr == 'checkout':
user = self.context['request'].user
instance.check_in_or_out(user, value)
else:
setattr(instance, attr, value)
instance.save()
return instance
def is_valid(self, **kwargs):
return super(BaseFileSerializer, self).is_valid(clean_html=False, **kwargs)
def get_file_guid(self, obj):
if obj:
guid = obj.get_guid()
if guid:
return guid._id
return None
def get_absolute_url(self, obj):
return api_v2_url('files/{}/'.format(obj._id))
class FileSerializer(BaseFileSerializer):
node = ShowIfVersion(
FileNodeRelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<target._id>'},
help_text='The project that this file belongs to',
),
min_version='2.0', max_version='2.7',
)
target = TargetField(link_type='related', meta={'type': 'get_target_type'})
def get_target_type(self, obj):
target_type = 'node'
if isinstance(obj, Preprint):
target_type = 'preprint'
if isinstance(obj, DraftNode):
target_type = 'draft_node'
return target_type
class OsfStorageFileSerializer(FileSerializer):
""" Overrides `filterable_fields` to make `last_touched` non-filterable
"""
filterable_fields = frozenset([
'id',
'name',
'kind',
'path',
'size',
'provider',
'tags',
])
def create(self, validated_data):
return super(OsfStorageFileSerializer, self).create(validated_data)
class FileDetailSerializer(FileSerializer):
"""
Overrides FileSerializer to make id required.
"""
id = IDField(source='_id', required=True)
class QuickFilesSerializer(BaseFileSerializer):
user = RelationshipField(
related_view='users:user-detail',
related_view_kwargs={'user_id': '<target.creator._id>'},
help_text='The user who uploaded this file',
)
class QuickFilesDetailSerializer(QuickFilesSerializer):
id = IDField(source='_id', required=True)
class FileVersionSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'id',
'size',
'identifier',
'content_type',
])
id = ser.CharField(read_only=True, source='identifier')
size = ser.IntegerField(read_only=True, help_text='The size of this file at this version')
content_type = ser.CharField(read_only=True, help_text='The mime type of this file at this verison')
date_created = VersionedDateTimeField(source='created', read_only=True, help_text='The date that this version was created')
name = ser.SerializerMethodField()
links = LinksField({
'self': 'self_url',
'html': 'absolute_url',
'download': 'get_download_link',
'render': 'get_render_link',
})
def get_name(self, obj):
file = self.context['file']
return obj.get_basefilenode_version(file).version_name
class Meta:
@staticmethod
def get_type(request):
return get_kebab_snake_case_field(request.version, 'file-versions')
def self_url(self, obj):
return absolute_reverse(
'files:version-detail', kwargs={
'version_id': obj.identifier,
'file_id': self.context['view'].kwargs['file_id'],
'version': self.context['request'].parser_context['kwargs']['version'],
},
)
def absolute_url(self, obj):
fobj = self.context['file']
return furl.furl(settings.DOMAIN).set(
path=(fobj.target._id, 'files', fobj.provider, fobj.path.lstrip('/')),
query={fobj.version_identifier: obj.identifier}, # TODO this can probably just be changed to revision or version
).url
def get_absolute_url(self, obj):
return self.self_url(obj)
def get_download_link(self, obj):
return get_file_download_link(
self.context['file'], version=obj.identifier,
view_only=self.context['request'].query_params.get('view_only'),
)
def get_render_link(self, obj):
file = self.context['file']
mfr_url = get_mfr_url(file.target, file.provider)
download_url = self.get_download_link(obj)
return get_file_render_link(mfr_url, download_url, version=obj.identifier)
class FileMetadataRecordSerializer(JSONAPISerializer):
id = IDField(source='_id', required=True)
type = TypeField()
metadata = ser.DictField()
file = RelationshipField(
related_view='files:file-detail',
related_view_kwargs={'file_id': '<file._id>'},
)
schema = RelationshipField(
related_view='schemas:file-metadata-schema-detail',
related_view_kwargs={'schema_id': '<schema._id>'},
)
links = LinksField({
'download': 'get_download_link',
'self': 'get_absolute_url',
})
def validate_metadata(self, value):
schema = from_json(self.instance.serializer.osf_schema)
try:
jsonschema.validate(value, schema)
except jsonschema.ValidationError as e:
if e.relative_schema_path[0] == 'additionalProperties':
error_message = e.message
else:
error_message = 'Your response of {} for the field {} was invalid.'.format(
e.instance,
e.absolute_path[0],
)
raise InvalidModelValueError(detail=error_message, meta={'metadata_schema': schema})
return value
def update(self, record, validated_data):
if validated_data:
user = self.context['request'].user
proposed_metadata = validated_data.pop('metadata')
record.update(proposed_metadata, user)
return record
def get_download_link(self, obj):
return absolute_reverse(
'files:metadata-record-download', kwargs={
'file_id': obj.file._id,
'record_id': obj._id,
'version': self.context['request'].parser_context['kwargs']['version'],
},
)
def get_absolute_url(self, obj):
return obj.absolute_api_v2_url
class Meta:
@staticmethod
def get_type(request):
return get_kebab_snake_case_field(request.version, 'metadata-records')
def get_file_download_link(obj, version=None, view_only=None):
guid = obj.get_guid()
# Add '' to the path to ensure thare's a trailing slash
# The trailing slash avoids a 301
url = furl.furl(settings.DOMAIN).set(
path=('download', guid._id if guid else obj._id, ''),
)
if version:
url.args[obj.version_identifier] = version
if view_only:
url.args['view_only'] = view_only
return url.url
def get_file_render_link(mfr_url, download_url, version=None):
download_url_args = {}
if version:
download_url_args['revision'] = version
download_url_args['direct'] = None
download_url_args['mode'] = 'render'
render_url = furl.furl(mfr_url).set(
path=['render'],
args={
'url': furl.furl(download_url).set(
args=download_url_args,
),
},
)
return render_url.url
| |
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db import exception as db_exc
from oslo_log import log
from sqlalchemy import or_
from sqlalchemy.orm import exc
from neutron.common import constants as n_const
from neutron.db import api as db_api
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron.extensions import portbindings
from neutron.i18n import _LE, _LI
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2 import models
LOG = log.getLogger(__name__)
# limit the number of port OR LIKE statements in one query
MAX_PORTS_PER_QUERY = 500
def _make_segment_dict(record):
"""Make a segment dictionary out of a DB record."""
return {api.ID: record.id,
api.NETWORK_TYPE: record.network_type,
api.PHYSICAL_NETWORK: record.physical_network,
api.SEGMENTATION_ID: record.segmentation_id}
def add_network_segment(session, network_id, segment, segment_index=0,
is_dynamic=False):
with session.begin(subtransactions=True):
record = models.NetworkSegment(
id=uuidutils.generate_uuid(),
network_id=network_id,
network_type=segment.get(api.NETWORK_TYPE),
physical_network=segment.get(api.PHYSICAL_NETWORK),
segmentation_id=segment.get(api.SEGMENTATION_ID),
segment_index=segment_index,
is_dynamic=is_dynamic
)
session.add(record)
segment[api.ID] = record.id
LOG.info(_LI("Added segment %(id)s of type %(network_type)s for network"
" %(network_id)s"),
{'id': record.id,
'network_type': record.network_type,
'network_id': record.network_id})
def get_network_segments(session, network_id, filter_dynamic=False):
with session.begin(subtransactions=True):
query = (session.query(models.NetworkSegment).
filter_by(network_id=network_id).
order_by(models.NetworkSegment.segment_index))
if filter_dynamic is not None:
query = query.filter_by(is_dynamic=filter_dynamic)
records = query.all()
return [_make_segment_dict(record) for record in records]
def get_segment_by_id(session, segment_id):
with session.begin(subtransactions=True):
try:
record = (session.query(models.NetworkSegment).
filter_by(id=segment_id).
one())
return _make_segment_dict(record)
except exc.NoResultFound:
return
def get_dynamic_segment(session, network_id, physical_network=None,
segmentation_id=None):
"""Return a dynamic segment for the filters provided if one exists."""
with session.begin(subtransactions=True):
query = (session.query(models.NetworkSegment).
filter_by(network_id=network_id, is_dynamic=True))
if physical_network:
query = query.filter_by(physical_network=physical_network)
if segmentation_id:
query = query.filter_by(segmentation_id=segmentation_id)
record = query.first()
if record:
return _make_segment_dict(record)
else:
LOG.debug("No dynamic segment found for "
"Network:%(network_id)s, "
"Physical network:%(physnet)s, "
"segmentation_id:%(segmentation_id)s",
{'network_id': network_id,
'physnet': physical_network,
'segmentation_id': segmentation_id})
return None
def delete_network_segment(session, segment_id):
"""Release a dynamic segment for the params provided if one exists."""
with session.begin(subtransactions=True):
(session.query(models.NetworkSegment).
filter_by(id=segment_id).delete())
def add_port_binding(session, port_id):
with session.begin(subtransactions=True):
record = models.PortBinding(
port_id=port_id,
vif_type=portbindings.VIF_TYPE_UNBOUND)
session.add(record)
return record
def get_locked_port_and_binding(session, port_id):
"""Get port and port binding records for update within transaction."""
try:
# REVISIT(rkukura): We need the Port and PortBinding records
# to both be added to the session and locked for update. A
# single joined query should work, but the combination of left
# outer joins and postgresql doesn't seem to work.
port = (session.query(models_v2.Port).
enable_eagerloads(False).
filter_by(id=port_id).
with_lockmode('update').
one())
binding = (session.query(models.PortBinding).
enable_eagerloads(False).
filter_by(port_id=port_id).
with_lockmode('update').
one())
return port, binding
except exc.NoResultFound:
return None, None
def set_binding_levels(session, levels):
if levels:
for level in levels:
session.add(level)
LOG.debug("For port %(port_id)s, host %(host)s, "
"set binding levels %(levels)s",
{'port_id': levels[0].port_id,
'host': levels[0].host,
'levels': levels})
else:
LOG.debug("Attempted to set empty binding levels")
def get_binding_levels(session, port_id, host):
if host:
result = (session.query(models.PortBindingLevel).
filter_by(port_id=port_id, host=host).
order_by(models.PortBindingLevel.level).
all())
LOG.debug("For port %(port_id)s, host %(host)s, "
"got binding levels %(levels)s",
{'port_id': port_id,
'host': host,
'levels': result})
return result
def clear_binding_levels(session, port_id, host):
if host:
(session.query(models.PortBindingLevel).
filter_by(port_id=port_id, host=host).
delete())
LOG.debug("For port %(port_id)s, host %(host)s, "
"cleared binding levels",
{'port_id': port_id,
'host': host})
def ensure_dvr_port_binding(session, port_id, host, router_id=None):
record = (session.query(models.DVRPortBinding).
filter_by(port_id=port_id, host=host).first())
if record:
return record
try:
with session.begin(subtransactions=True):
record = models.DVRPortBinding(
port_id=port_id,
host=host,
router_id=router_id,
vif_type=portbindings.VIF_TYPE_UNBOUND,
vnic_type=portbindings.VNIC_NORMAL,
status=n_const.PORT_STATUS_DOWN)
session.add(record)
return record
except db_exc.DBDuplicateEntry:
LOG.debug("DVR Port %s already bound", port_id)
return (session.query(models.DVRPortBinding).
filter_by(port_id=port_id, host=host).one())
def delete_dvr_port_binding(session, port_id, host):
with session.begin(subtransactions=True):
(session.query(models.DVRPortBinding).
filter_by(port_id=port_id, host=host).
delete(synchronize_session=False))
def delete_dvr_port_binding_if_stale(session, binding):
if not binding.router_id and binding.status == n_const.PORT_STATUS_DOWN:
with session.begin(subtransactions=True):
LOG.debug("DVR: Deleting binding %s", binding)
session.delete(binding)
def get_port(session, port_id):
"""Get port record for update within transcation."""
with session.begin(subtransactions=True):
try:
record = (session.query(models_v2.Port).
enable_eagerloads(False).
filter(models_v2.Port.id.startswith(port_id)).
one())
return record
except exc.NoResultFound:
return
except exc.MultipleResultsFound:
LOG.error(_LE("Multiple ports have port_id starting with %s"),
port_id)
return
def get_port_from_device_mac(device_mac):
LOG.debug("get_port_from_device_mac() called for mac %s", device_mac)
session = db_api.get_session()
qry = session.query(models_v2.Port).filter_by(mac_address=device_mac)
return qry.first()
def get_ports_and_sgs(port_ids):
"""Get ports from database with security group info."""
# break large queries into smaller parts
if len(port_ids) > MAX_PORTS_PER_QUERY:
LOG.debug("Number of ports %(pcount)s exceeds the maximum per "
"query %(maxp)s. Partitioning queries.",
{'pcount': len(port_ids), 'maxp': MAX_PORTS_PER_QUERY})
return (get_ports_and_sgs(port_ids[:MAX_PORTS_PER_QUERY]) +
get_ports_and_sgs(port_ids[MAX_PORTS_PER_QUERY:]))
LOG.debug("get_ports_and_sgs() called for port_ids %s", port_ids)
if not port_ids:
# if port_ids is empty, avoid querying to DB to ask it for nothing
return []
ports_to_sg_ids = get_sg_ids_grouped_by_port(port_ids)
return [make_port_dict_with_security_groups(port, sec_groups)
for port, sec_groups in ports_to_sg_ids.iteritems()]
def get_sg_ids_grouped_by_port(port_ids):
sg_ids_grouped_by_port = {}
session = db_api.get_session()
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
with session.begin(subtransactions=True):
# partial UUIDs must be individually matched with startswith.
# full UUIDs may be matched directly in an IN statement
partial_uuids = set(port_id for port_id in port_ids
if not uuidutils.is_uuid_like(port_id))
full_uuids = set(port_ids) - partial_uuids
or_criteria = [models_v2.Port.id.startswith(port_id)
for port_id in partial_uuids]
if full_uuids:
or_criteria.append(models_v2.Port.id.in_(full_uuids))
query = session.query(models_v2.Port,
sg_db.SecurityGroupPortBinding.security_group_id)
query = query.outerjoin(sg_db.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(or_(*or_criteria))
for port, sg_id in query:
if port not in sg_ids_grouped_by_port:
sg_ids_grouped_by_port[port] = []
if sg_id:
sg_ids_grouped_by_port[port].append(sg_id)
return sg_ids_grouped_by_port
def make_port_dict_with_security_groups(port, sec_groups):
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict['security_groups'] = sec_groups
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address']
for ip in port['fixed_ips']]
return port_dict
def get_port_binding_host(session, port_id):
try:
with session.begin(subtransactions=True):
query = (session.query(models.PortBinding).
filter(models.PortBinding.port_id.startswith(port_id)).
one())
except exc.NoResultFound:
LOG.debug("No binding found for port %(port_id)s",
{'port_id': port_id})
return
except exc.MultipleResultsFound:
LOG.error(_LE("Multiple ports have port_id starting with %s"),
port_id)
return
return query.host
def generate_dvr_port_status(session, port_id):
# an OR'ed value of status assigned to parent port from the
# dvrportbinding bucket
query = session.query(models.DVRPortBinding)
final_status = n_const.PORT_STATUS_BUILD
for bind in query.filter(models.DVRPortBinding.port_id == port_id):
if bind.status == n_const.PORT_STATUS_ACTIVE:
return bind.status
elif bind.status == n_const.PORT_STATUS_DOWN:
final_status = bind.status
return final_status
def get_dvr_port_binding_by_host(session, port_id, host):
with session.begin(subtransactions=True):
binding = (session.query(models.DVRPortBinding).
filter(models.DVRPortBinding.port_id.startswith(port_id),
models.DVRPortBinding.host == host).first())
if not binding:
LOG.debug("No binding for DVR port %(port_id)s with host "
"%(host)s", {'port_id': port_id, 'host': host})
return binding
def get_dvr_port_bindings(session, port_id):
with session.begin(subtransactions=True):
bindings = (session.query(models.DVRPortBinding).
filter(models.DVRPortBinding.port_id.startswith(port_id)).
all())
if not bindings:
LOG.debug("No bindings for DVR port %s", port_id)
return bindings
| |
######################
# (c) 2012 Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3-clause
#
# Implements structured SVM as described in Joachims et. al.
# Cutting-Plane Training of Structural SVMs
from time import time
import numpy as np
import cvxopt
import cvxopt.solvers
from sklearn.externals.joblib import Parallel, delayed
from .ssvm import BaseSSVM
from ..utils import loss_augmented_inference
class NoConstraint(Exception):
# raised if we can not construct a constraint from cache
pass
class OneSlackSSVM(BaseSSVM):
"""Structured SVM solver for the 1-slack QP with l1 slack penalty.
Implements margin rescaled structural SVM using
the 1-slack formulation and cutting plane method, solved using CVXOPT.
The optimization is restarted in each iteration.
Parameters
----------
model : StructuredModel
Object containing the model structure. Has to implement
`loss`, `inference` and `loss_augmented_inference`.
max_iter : int, default=10000
Maximum number of passes over dataset to find constraints.
C : float, default=1
Regularization parameter.
check_constraints : bool
Whether to check if the new "most violated constraint" is
more violated than previous constraints. Helpful for stopping
and debugging, but costly.
verbose : int
Verbosity.
negativity_constraint : list of ints
Indices of parmeters that are constraint to be negative.
This is useful for learning submodular CRFs (inference is formulated
as maximization in SSVMs, flipping some signs).
break_on_bad : bool default=False
Whether to break (start debug mode) when inference was approximate.
n_jobs : int, default=1
Number of parallel jobs for inference. -1 means as many as cpus.
show_loss_every : int, default=0
Controlls how often the hamming loss is computed (for monitoring
purposes). Zero means never, otherwise it will be computed very
show_loss_every'th epoch.
tol : float, default=1e-3
Convergence tolerance. If dual objective decreases less than tol,
learning is stopped. The default corresponds to ignoring the behavior
of the dual objective and stop only if no more constraints can be
found.
inference_cache : int, default=0
How many results of loss_augmented_inference to cache per sample.
If > 0 the most violating of the cached examples will be used to
construct a global constraint. Only if this constraint is not violated,
inference will be run again. This parameter poses a memory /
computation tradeoff. Storing more constraints might lead to RAM being
exhausted. Using inference_cache > 0 is only advisable if computation
time is dominated by inference.
cache_tol : float, None or 'auto' default='auto'
Tolerance when to reject a constraint from cache (and do inference).
If None, ``tol`` will be used. Higher values might lead to faster
learning. 'auto' uses a heuristic to determine the cache tolerance
based on the duality gap, as described in [3].
inactive_threshold : float, default=1e-5
Threshold for dual variable of a constraint to be considered inactive.
inactive_window : float, default=50
Window for measuring inactivity. If a constraint is inactive for
``inactive_window`` iterations, it will be pruned from the QP.
If set to 0, no constraints will be removed.
switch_to : None or string, default=None
Switch to the given inference method if the previous method does not
find any more constraints.
logger : logger object, default=None
Pystruct logger for storing the model or extracting additional
information.
Attributes
----------
w : nd-array, shape=(model.size_joint_feature,)
The learned weights of the SVM.
old_solution : dict
The last solution found by the qp solver.
``loss_curve_`` : list of float
List of loss values if show_loss_every > 0.
``objective_curve_`` : list of float
Cutting plane objective after each pass through the dataset.
``primal_objective_curve_`` : list of float
Primal objective after each pass through the dataset.
``timestamps_`` : list of int
Total training time stored before each iteration.
References
----------
[1] Thorsten Joachims, and Thomas Finley and Chun-Nam John Yu:
Cutting-plane training of structural SVMs, JMLR 2009
[2] Andreas Mueller: Methods for Learning Structured Prediction in
Semantic Segmentation of Natural Images, PhD Thesis. 2014
[3] Andreas Mueller and Sven Behnke: Learning a Loopy Model For Semantic
Segmentation Exactly, VISAPP 2014
"""
def __init__(self, model, max_iter=10000, C=1.0, check_constraints=False,
verbose=0, negativity_constraint=None, n_jobs=1,
break_on_bad=False, show_loss_every=0, tol=1e-3,
inference_cache=0, inactive_threshold=1e-5,
inactive_window=50, logger=None, cache_tol='auto',
switch_to=None):
BaseSSVM.__init__(self, model, max_iter, C, verbose=verbose,
n_jobs=n_jobs, show_loss_every=show_loss_every,
logger=logger)
self.negativity_constraint = negativity_constraint
self.check_constraints = check_constraints
self.break_on_bad = break_on_bad
self.tol = tol
self.cache_tol = cache_tol
self.inference_cache = inference_cache
self.inactive_threshold = inactive_threshold
self.inactive_window = inactive_window
self.switch_to = switch_to
def _solve_1_slack_qp(self, constraints, n_samples):
C = np.float(self.C) * n_samples # this is how libsvm/svmstruct do it
joint_features = [c[0] for c in constraints]
losses = [c[1] for c in constraints]
joint_feature_matrix = np.vstack(joint_features)
n_constraints = len(joint_features)
P = cvxopt.matrix(np.dot(joint_feature_matrix, joint_feature_matrix.T))
# q contains loss from margin-rescaling
q = cvxopt.matrix(-np.array(losses, dtype=np.float))
# constraints: all alpha must be >zero
idy = np.identity(n_constraints)
tmp1 = np.zeros(n_constraints)
# positivity constraints:
if self.negativity_constraint is None:
#empty constraints
zero_constr = np.zeros(0)
joint_features_constr = np.zeros((0, n_constraints))
else:
joint_features_constr = joint_feature_matrix.T[self.negativity_constraint]
zero_constr = np.zeros(len(self.negativity_constraint))
# put together
G = cvxopt.sparse(cvxopt.matrix(np.vstack((-idy, joint_features_constr))))
h = cvxopt.matrix(np.hstack((tmp1, zero_constr)))
# equality constraint: sum of all alpha must be = C
A = cvxopt.matrix(np.ones((1, n_constraints)))
b = cvxopt.matrix([C])
# solve QP model
cvxopt.solvers.options['feastol'] = 1e-5
try:
solution = cvxopt.solvers.qp(P, q, G, h, A, b)
except ValueError:
solution = {'status': 'error'}
if solution['status'] != "optimal":
print("regularizing QP!")
P = cvxopt.matrix(np.dot(joint_feature_matrix, joint_feature_matrix.T)
+ 1e-8 * np.eye(joint_feature_matrix.shape[0]))
solution = cvxopt.solvers.qp(P, q, G, h, A, b)
if solution['status'] != "optimal":
raise ValueError("QP solver failed. Try regularizing your QP.")
# Lagrange multipliers
a = np.ravel(solution['x'])
self.old_solution = solution
self.prune_constraints(constraints, a)
# Support vectors have non zero lagrange multipliers
sv = a > self.inactive_threshold * C
if self.verbose > 1:
print("%d support vectors out of %d points" % (np.sum(sv),
n_constraints))
self.w = np.dot(a, joint_feature_matrix)
# we needed to flip the sign to make the dual into a minimization
# model
return -solution['primal objective']
def prune_constraints(self, constraints, a):
# append list for new constraint
self.alphas.append([])
assert(len(self.alphas) == len(constraints))
for constraint, alpha in zip(self.alphas, a):
constraint.append(alpha)
constraint = constraint[-self.inactive_window:]
# prune unused constraints:
# if the max of alpha in last 50 iterations was small, throw away
if self.inactive_window != 0:
max_active = [np.max(constr[-self.inactive_window:])
for constr in self.alphas]
# find strongest constraint that is not ground truth constraint
strongest = np.max(max_active[1:])
inactive = np.where(max_active
< self.inactive_threshold * strongest)[0]
for idx in reversed(inactive):
# if we don't reverse, we'll mess the indices up
del constraints[idx]
del self.alphas[idx]
def _check_bad_constraint(self, violation, djoint_feature_mean, loss,
old_constraints, break_on_bad, tol=None):
violation_difference = violation - self.last_slack_
if self.verbose > 1:
print("New violation: %f difference to last: %f"
% (violation, violation_difference))
if violation_difference < 0 and violation > 0 and break_on_bad:
raise ValueError("Bad inference: new violation is smaller than"
" old.")
if tol is None:
tol = self.tol
if violation_difference < tol:
if self.verbose:
print("new constraint too weak.")
return True
equals = [True for djoint_feature_, loss_ in old_constraints
if (np.all(djoint_feature_ == djoint_feature_mean) and loss == loss_)]
if np.any(equals):
return True
if self.check_constraints:
for con in old_constraints:
# compute violation for old constraint
violation_tmp = max(con[1] - np.dot(self.w, con[0]), 0)
if self.verbose > 5:
print("violation old constraint: %f" % violation_tmp)
# if violation of new constraint is smaller or not
# significantly larger, don't add constraint.
# if smaller, complain about approximate inference.
if violation - violation_tmp < -1e-5:
if self.verbose:
print("bad inference: %f" % (violation_tmp - violation))
if break_on_bad:
raise ValueError("Bad inference: new violation is"
" weaker than previous constraint.")
return True
return False
def _update_cache(self, X, Y, Y_hat):
"""Updated cached constraints."""
if self.inference_cache == 0:
return
if (not hasattr(self, "inference_cache_")
or self.inference_cache_ is None):
self.inference_cache_ = [[] for y in Y_hat]
def constraint_equal(y_1, y_2):
if isinstance(y_1, tuple):
return np.all(y_1[0] == y_2[0]) and np.all(y_1[1] == y_2[1])
return np.all(y_1 == y_2)
for sample, x, y, y_hat in zip(self.inference_cache_, X, Y, Y_hat):
already_there = [constraint_equal(y_hat, cache[2])
for cache in sample]
if np.any(already_there):
continue
if len(sample) > self.inference_cache:
sample.pop(0)
# we computed both of these before, but summed them up immediately
# this makes it a little less efficient in the caching case.
# the idea is that if we cache, inference is way more expensive
# and this doesn't matter much.
sample.append((self.model.joint_feature(x, y_hat),
self.model.loss(y, y_hat), y_hat))
def _constraint_from_cache(self, X, Y, joint_feature_gt, constraints):
if (not getattr(self, 'inference_cache_', False) or
self.inference_cache_ is False):
if self.verbose > 10:
print("Empty cache.")
raise NoConstraint
gap = self.primal_objective_curve_[-1] - self.objective_curve_[-1]
if (self.cache_tol == 'auto' and gap < self.cache_tol_):
# do inference if gap has become to small
if self.verbose > 1:
print("Last gap too small (%f < %f), not loading constraint"
" from cache."
% (gap, self.cache_tol_))
raise NoConstraint
Y_hat = []
joint_feature_acc = np.zeros(self.model.size_joint_feature)
loss_mean = 0
for cached in self.inference_cache_:
# cached has entries of form (joint_feature, loss, y_hat)
violations = [np.dot(joint_feature, self.w) + loss
for joint_feature, loss, _ in cached]
joint_feature, loss, y_hat = cached[np.argmax(violations)]
Y_hat.append(y_hat)
joint_feature_acc += joint_feature
loss_mean += loss
djoint_feature = (joint_feature_gt - joint_feature_acc) / len(X)
loss_mean = loss_mean / len(X)
violation = loss_mean - np.dot(self.w, djoint_feature)
if self._check_bad_constraint(violation, djoint_feature, loss_mean, constraints,
break_on_bad=False):
if self.verbose > 1:
print("No constraint from cache.")
raise NoConstraint
return Y_hat, djoint_feature, loss_mean
def _find_new_constraint(self, X, Y, joint_feature_gt, constraints, check=True):
if self.n_jobs != 1:
# do inference in parallel
verbose = max(0, self.verbose - 3)
Y_hat = Parallel(n_jobs=self.n_jobs, verbose=verbose)(
delayed(loss_augmented_inference)(
self.model, x, y, self.w, relaxed=True)
for x, y in zip(X, Y))
else:
Y_hat = self.model.batch_loss_augmented_inference(
X, Y, self.w, relaxed=True)
# compute the mean over joint_features and losses
if getattr(self.model, 'rescale_C', False):
djoint_feature = (joint_feature_gt
- self.model.batch_joint_feature(X, Y_hat, Y)) / len(X)
else:
djoint_feature = (joint_feature_gt
- self.model.batch_joint_feature(X, Y_hat)) / len(X)
loss_mean = np.mean(self.model.batch_loss(Y, Y_hat))
violation = loss_mean - np.dot(self.w, djoint_feature)
if check and self._check_bad_constraint(
violation, djoint_feature, loss_mean, constraints,
break_on_bad=self.break_on_bad):
raise NoConstraint
return Y_hat, djoint_feature, loss_mean
def fit(self, X, Y, constraints=None, warm_start=False, initialize=True):
"""Learn parameters using cutting plane method.
Parameters
----------
X : iterable
Traing instances. Contains the structured input objects.
No requirement on the particular form of entries of X is made.
Y : iterable
Training labels. Contains the strctured labels for inputs in X.
Needs to have the same length as X.
contraints : ignored
warm_start : bool, default=False
Whether we are warmstarting from a previous fit.
initialize : boolean, default=True
Whether to initialize the model for the data.
Leave this true except if you really know what you are doing.
"""
if self.verbose:
print("Training 1-slack dual structural SVM")
cvxopt.solvers.options['show_progress'] = self.verbose > 3
if initialize:
self.model.initialize(X, Y)
# parse cache_tol parameter
if self.cache_tol is None or self.cache_tol == 'auto':
self.cache_tol_ = self.tol
else:
self.cache_tol_ = self.cache_tol
if not warm_start:
self.w = np.zeros(self.model.size_joint_feature)
constraints = []
self.objective_curve_, self.primal_objective_curve_ = [], []
self.cached_constraint_ = []
self.alphas = [] # dual solutions
# append constraint given by ground truth to make our life easier
constraints.append((np.zeros(self.model.size_joint_feature), 0))
self.alphas.append([self.C])
self.inference_cache_ = None
self.timestamps_ = [time()]
elif warm_start == "soft":
self.w = np.zeros(self.model.size_joint_feature)
constraints = []
self.alphas = [] # dual solutions
# append constraint given by ground truth to make our life easier
constraints.append((np.zeros(self.model.size_joint_feature), 0))
self.alphas.append([self.C])
else:
constraints = self.constraints_
self.last_slack_ = -1
# get the joint_feature of the ground truth
if getattr(self.model, 'rescale_C', False):
joint_feature_gt = self.model.batch_joint_feature(X, Y, Y)
else:
joint_feature_gt = self.model.batch_joint_feature(X, Y)
try:
# catch ctrl+c to stop training
for iteration in range(self.max_iter):
# main loop
cached_constraint = False
if self.verbose > 0:
print("iteration %d" % iteration)
if self.verbose > 2:
print(self)
try:
Y_hat, djoint_feature, loss_mean = self._constraint_from_cache(
X, Y, joint_feature_gt, constraints)
cached_constraint = True
except NoConstraint:
try:
Y_hat, djoint_feature, loss_mean = self._find_new_constraint(
X, Y, joint_feature_gt, constraints)
self._update_cache(X, Y, Y_hat)
except NoConstraint:
if self.verbose:
print("no additional constraints")
if (self.switch_to is not None
and self.model.inference_method !=
self.switch_to):
if self.verbose:
print("Switching to %s inference" %
str(self.switch_to))
self.model.inference_method_ = \
self.model.inference_method
self.model.inference_method = self.switch_to
continue
else:
break
self.timestamps_.append(time() - self.timestamps_[0])
self._compute_training_loss(X, Y, iteration)
constraints.append((djoint_feature, loss_mean))
# compute primal objective
last_slack = -np.dot(self.w, djoint_feature) + loss_mean
primal_objective = (self.C * len(X)
* max(last_slack, 0)
+ np.sum(self.w ** 2) / 2)
self.primal_objective_curve_.append(primal_objective)
self.cached_constraint_.append(cached_constraint)
objective = self._solve_1_slack_qp(constraints,
n_samples=len(X))
# update cache tolerance if cache_tol is auto:
if self.cache_tol == "auto" and not cached_constraint:
self.cache_tol_ = (primal_objective - objective) / 4
self.last_slack_ = np.max([(-np.dot(self.w, djoint_feature) + loss_mean)
for djoint_feature, loss_mean in constraints])
self.last_slack_ = max(self.last_slack_, 0)
if self.verbose > 0:
# the cutting plane objective can also be computed as
# self.C * len(X) * self.last_slack_ + np.sum(self.w**2)/2
print("cutting plane objective: %f, primal objective %f"
% (objective, primal_objective))
# we only do this here because we didn't add the gt to the
# constraints, which makes the dual behave a bit oddly
self.objective_curve_.append(objective)
self.constraints_ = constraints
if self.logger is not None:
self.logger(self, iteration)
if self.verbose > 5:
print(self.w)
except KeyboardInterrupt:
pass
if self.verbose and self.n_jobs == 1:
print("calls to inference: %d" % self.model.inference_calls)
# compute final objective:
self.timestamps_.append(time() - self.timestamps_[0])
primal_objective = self._objective(X, Y)
self.primal_objective_curve_.append(primal_objective)
self.objective_curve_.append(objective)
self.cached_constraint_.append(False)
if self.logger is not None:
self.logger(self, 'final')
if self.verbose > 0:
print("final primal objective: %f gap: %f"
% (primal_objective, primal_objective - objective))
return self
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from antlr3 import *
from antlr3.compat import set, frozenset
allOrdinals = set([1, 2, 3, 4, 5])
numOrdinals = len(allOrdinals)
HIDDEN = BaseRecognizer.HIDDEN
MONTH=27
THURSDAY=23
FOURTH_OR_FIFTH=16
THIRD=13
DECEMBER=39
FROM=41
EVERY=6
WEDNESDAY=22
QUARTER=40
SATURDAY=25
SYNCHRONIZED=9
JANUARY=28
SUNDAY=26
TUESDAY=21
SEPTEMBER=36
UNKNOWN_TOKEN=45
AUGUST=35
JULY=34
MAY=32
FRIDAY=24
DIGITS=8
FEBRUARY=29
TWO_DIGIT_HOUR_TIME=43
OF=4
WS=44
EOF=-1
APRIL=31
COMMA=10
JUNE=33
OCTOBER=37
TIME=5
FIFTH=15
NOVEMBER=38
FIRST=11
DIGIT=7
FOURTH=14
MONDAY=20
HOURS=17
MARCH=30
SECOND=12
MINUTES=18
TO=42
DAY=19
tokenNames = [
"<invalid>", "<EOR>", "<DOWN>", "<UP>",
"OF", "TIME", "EVERY", "DIGIT", "DIGITS", "SYNCHRONIZED", "COMMA", "FIRST",
"SECOND", "THIRD", "FOURTH", "FIFTH", "FOURTH_OR_FIFTH", "HOURS", "MINUTES",
"DAY", "MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY",
"SUNDAY", "MONTH", "JANUARY", "FEBRUARY", "MARCH", "APRIL", "MAY", "JUNE",
"JULY", "AUGUST", "SEPTEMBER", "OCTOBER", "NOVEMBER", "DECEMBER", "QUARTER",
"FROM", "TO", "TWO_DIGIT_HOUR_TIME", "WS", "UNKNOWN_TOKEN"
]
class GrocParser(Parser):
grammarFileName = "Groc.g"
antlr_version = version_str_to_tuple("3.1.1")
antlr_version_str = "3.1.1"
tokenNames = tokenNames
def __init__(self, input, state=None):
if state is None:
state = RecognizerSharedState()
Parser.__init__(self, input, state)
self.dfa4 = self.DFA4(
self, 4,
eot = self.DFA4_eot,
eof = self.DFA4_eof,
min = self.DFA4_min,
max = self.DFA4_max,
accept = self.DFA4_accept,
special = self.DFA4_special,
transition = self.DFA4_transition
)
self.ordinal_set = set()
self.weekday_set = set()
self.month_set = set()
self.monthday_set = set()
self.time_string = ''
self.interval_mins = 0
self.period_string = ''
self.synchronized = False
self.start_time_string = ''
self.end_time_string = ''
valuesDict = {
SUNDAY: 0,
FIRST: 1,
MONDAY: 1,
JANUARY: 1,
TUESDAY: 2,
SECOND: 2,
FEBRUARY: 2,
WEDNESDAY: 3,
THIRD: 3,
MARCH: 3,
THURSDAY: 4,
FOURTH: 4,
APRIL: 4,
FRIDAY: 5,
FIFTH: 5,
MAY: 5,
SATURDAY: 6,
JUNE: 6,
JULY: 7,
AUGUST: 8,
SEPTEMBER: 9,
OCTOBER: 10,
NOVEMBER: 11,
DECEMBER: 12,
}
def ValueOf(self, token_type):
return self.valuesDict.get(token_type, -1)
def timespec(self, ):
try:
try:
pass
alt1 = 2
LA1_0 = self.input.LA(1)
if (LA1_0 == EVERY) :
LA1_1 = self.input.LA(2)
if ((DIGIT <= LA1_1 <= DIGITS)) :
alt1 = 2
elif ((DAY <= LA1_1 <= SUNDAY)) :
alt1 = 1
else:
nvae = NoViableAltException("", 1, 1, self.input)
raise nvae
elif ((DIGIT <= LA1_0 <= DIGITS) or (FIRST <= LA1_0 <= FOURTH_OR_FIFTH)) :
alt1 = 1
else:
nvae = NoViableAltException("", 1, 0, self.input)
raise nvae
if alt1 == 1:
pass
self._state.following.append(self.FOLLOW_specifictime_in_timespec44)
self.specifictime()
self._state.following.pop()
elif alt1 == 2:
pass
self._state.following.append(self.FOLLOW_interval_in_timespec48)
self.interval()
self._state.following.pop()
self.match(self.input, EOF, self.FOLLOW_EOF_in_timespec52)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
def specifictime(self, ):
TIME1 = None
try:
try:
pass
pass
alt4 = 2
alt4 = self.dfa4.predict(self.input)
if alt4 == 1:
pass
pass
alt2 = 2
LA2_0 = self.input.LA(1)
if (LA2_0 == EVERY or (FIRST <= LA2_0 <= FOURTH_OR_FIFTH)) :
alt2 = 1
elif ((DIGIT <= LA2_0 <= DIGITS)) :
alt2 = 2
else:
nvae = NoViableAltException("", 2, 0, self.input)
raise nvae
if alt2 == 1:
pass
pass
self._state.following.append(self.FOLLOW_ordinals_in_specifictime72)
self.ordinals()
self._state.following.pop()
self._state.following.append(self.FOLLOW_weekdays_in_specifictime74)
self.weekdays()
self._state.following.pop()
elif alt2 == 2:
pass
self._state.following.append(self.FOLLOW_monthdays_in_specifictime77)
self.monthdays()
self._state.following.pop()
self.match(self.input, OF, self.FOLLOW_OF_in_specifictime80)
alt3 = 2
LA3_0 = self.input.LA(1)
if ((MONTH <= LA3_0 <= DECEMBER)) :
alt3 = 1
elif ((FIRST <= LA3_0 <= THIRD) or LA3_0 == QUARTER) :
alt3 = 2
else:
nvae = NoViableAltException("", 3, 0, self.input)
raise nvae
if alt3 == 1:
pass
self._state.following.append(self.FOLLOW_monthspec_in_specifictime83)
self.monthspec()
self._state.following.pop()
elif alt3 == 2:
pass
self._state.following.append(self.FOLLOW_quarterspec_in_specifictime85)
self.quarterspec()
self._state.following.pop()
elif alt4 == 2:
pass
pass
self._state.following.append(self.FOLLOW_ordinals_in_specifictime101)
self.ordinals()
self._state.following.pop()
self._state.following.append(self.FOLLOW_weekdays_in_specifictime103)
self.weekdays()
self._state.following.pop()
self.month_set = set(range(1,13))
TIME1=self.match(self.input, TIME, self.FOLLOW_TIME_in_specifictime117)
self.time_string = TIME1.text
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
def interval(self, ):
intervalnum = None
period2 = None
try:
try:
pass
pass
self.match(self.input, EVERY, self.FOLLOW_EVERY_in_interval136)
intervalnum = self.input.LT(1)
if (DIGIT <= self.input.LA(1) <= DIGITS):
self.input.consume()
self._state.errorRecovery = False
else:
mse = MismatchedSetException(None, self.input)
raise mse
self.interval_mins = int(intervalnum.text)
self._state.following.append(self.FOLLOW_period_in_interval164)
period2 = self.period()
self._state.following.pop()
if ((period2 is not None) and [self.input.toString(period2.start,period2.stop)] or [None])[0] == "hours":
self.period_string = "hours"
else:
self.period_string = "minutes"
alt5 = 3
LA5_0 = self.input.LA(1)
if (LA5_0 == FROM) :
alt5 = 1
elif (LA5_0 == SYNCHRONIZED) :
alt5 = 2
if alt5 == 1:
pass
self._state.following.append(self.FOLLOW_time_range_in_interval176)
self.time_range()
self._state.following.pop()
elif alt5 == 2:
pass
pass
self.match(self.input, SYNCHRONIZED, self.FOLLOW_SYNCHRONIZED_in_interval189)
self.synchronized = True
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
def ordinals(self, ):
try:
try:
pass
alt7 = 2
LA7_0 = self.input.LA(1)
if (LA7_0 == EVERY) :
alt7 = 1
elif ((FIRST <= LA7_0 <= FOURTH_OR_FIFTH)) :
alt7 = 2
else:
nvae = NoViableAltException("", 7, 0, self.input)
raise nvae
if alt7 == 1:
pass
self.match(self.input, EVERY, self.FOLLOW_EVERY_in_ordinals218)
elif alt7 == 2:
pass
pass
self._state.following.append(self.FOLLOW_ordinal_in_ordinals226)
self.ordinal()
self._state.following.pop()
while True:
alt6 = 2
LA6_0 = self.input.LA(1)
if (LA6_0 == COMMA) :
alt6 = 1
if alt6 == 1:
pass
self.match(self.input, COMMA, self.FOLLOW_COMMA_in_ordinals229)
self._state.following.append(self.FOLLOW_ordinal_in_ordinals231)
self.ordinal()
self._state.following.pop()
else:
break
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
def ordinal(self, ):
ord = None
try:
try:
pass
ord = self.input.LT(1)
if (FIRST <= self.input.LA(1) <= FOURTH_OR_FIFTH):
self.input.consume()
self._state.errorRecovery = False
else:
mse = MismatchedSetException(None, self.input)
raise mse
self.ordinal_set.add(self.ValueOf(ord.type));
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
class period_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
def period(self, ):
retval = self.period_return()
retval.start = self.input.LT(1)
try:
try:
pass
if (HOURS <= self.input.LA(1) <= MINUTES):
self.input.consume()
self._state.errorRecovery = False
else:
mse = MismatchedSetException(None, self.input)
raise mse
retval.stop = self.input.LT(-1)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return retval
def monthdays(self, ):
try:
try:
pass
pass
self._state.following.append(self.FOLLOW_monthday_in_monthdays314)
self.monthday()
self._state.following.pop()
while True:
alt8 = 2
LA8_0 = self.input.LA(1)
if (LA8_0 == COMMA) :
alt8 = 1
if alt8 == 1:
pass
self.match(self.input, COMMA, self.FOLLOW_COMMA_in_monthdays318)
self._state.following.append(self.FOLLOW_monthday_in_monthdays320)
self.monthday()
self._state.following.pop()
else:
break
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
def monthday(self, ):
day = None
try:
try:
pass
day = self.input.LT(1)
if (DIGIT <= self.input.LA(1) <= DIGITS):
self.input.consume()
self._state.errorRecovery = False
else:
mse = MismatchedSetException(None, self.input)
raise mse
self.monthday_set.add(int(day.text));
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
def weekdays(self, ):
try:
try:
pass
alt10 = 2
LA10_0 = self.input.LA(1)
if (LA10_0 == DAY) :
alt10 = 1
elif ((MONDAY <= LA10_0 <= SUNDAY)) :
alt10 = 2
else:
nvae = NoViableAltException("", 10, 0, self.input)
raise nvae
if alt10 == 1:
pass
self.match(self.input, DAY, self.FOLLOW_DAY_in_weekdays365)
if self.ordinal_set:
self.monthday_set = self.ordinal_set
self.ordinal_set = set()
else:
self.ordinal_set = self.ordinal_set.union(allOrdinals)
self.weekday_set = set([self.ValueOf(SUNDAY), self.ValueOf(MONDAY),
self.ValueOf(TUESDAY), self.ValueOf(WEDNESDAY),
self.ValueOf(THURSDAY), self.ValueOf(FRIDAY),
self.ValueOf(SATURDAY), self.ValueOf(SUNDAY)])
elif alt10 == 2:
pass
pass
self._state.following.append(self.FOLLOW_weekday_in_weekdays373)
self.weekday()
self._state.following.pop()
while True:
alt9 = 2
LA9_0 = self.input.LA(1)
if (LA9_0 == COMMA) :
alt9 = 1
if alt9 == 1:
pass
self.match(self.input, COMMA, self.FOLLOW_COMMA_in_weekdays376)
self._state.following.append(self.FOLLOW_weekday_in_weekdays378)
self.weekday()
self._state.following.pop()
else:
break
if not self.ordinal_set:
self.ordinal_set = self.ordinal_set.union(allOrdinals)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
def weekday(self, ):
dayname = None
try:
try:
pass
dayname = self.input.LT(1)
if (MONDAY <= self.input.LA(1) <= SUNDAY):
self.input.consume()
self._state.errorRecovery = False
else:
mse = MismatchedSetException(None, self.input)
raise mse
self.weekday_set.add(self.ValueOf(dayname.type))
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
def monthspec(self, ):
try:
try:
pass
alt11 = 2
LA11_0 = self.input.LA(1)
if (LA11_0 == MONTH) :
alt11 = 1
elif ((JANUARY <= LA11_0 <= DECEMBER)) :
alt11 = 2
else:
nvae = NoViableAltException("", 11, 0, self.input)
raise nvae
if alt11 == 1:
pass
self.match(self.input, MONTH, self.FOLLOW_MONTH_in_monthspec459)
self.month_set = self.month_set.union(set([
self.ValueOf(JANUARY), self.ValueOf(FEBRUARY), self.ValueOf(MARCH),
self.ValueOf(APRIL), self.ValueOf(MAY), self.ValueOf(JUNE),
self.ValueOf(JULY), self.ValueOf(AUGUST), self.ValueOf(SEPTEMBER),
self.ValueOf(OCTOBER), self.ValueOf(NOVEMBER),
self.ValueOf(DECEMBER)]))
elif alt11 == 2:
pass
self._state.following.append(self.FOLLOW_months_in_monthspec469)
self.months()
self._state.following.pop()
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
def months(self, ):
try:
try:
pass
pass
self._state.following.append(self.FOLLOW_month_in_months486)
self.month()
self._state.following.pop()
while True:
alt12 = 2
LA12_0 = self.input.LA(1)
if (LA12_0 == COMMA) :
alt12 = 1
if alt12 == 1:
pass
self.match(self.input, COMMA, self.FOLLOW_COMMA_in_months489)
self._state.following.append(self.FOLLOW_month_in_months491)
self.month()
self._state.following.pop()
else:
break
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
def month(self, ):
monthname = None
try:
try:
pass
monthname = self.input.LT(1)
if (JANUARY <= self.input.LA(1) <= DECEMBER):
self.input.consume()
self._state.errorRecovery = False
else:
mse = MismatchedSetException(None, self.input)
raise mse
self.month_set.add(self.ValueOf(monthname.type));
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
def quarterspec(self, ):
try:
try:
pass
alt13 = 2
LA13_0 = self.input.LA(1)
if (LA13_0 == QUARTER) :
alt13 = 1
elif ((FIRST <= LA13_0 <= THIRD)) :
alt13 = 2
else:
nvae = NoViableAltException("", 13, 0, self.input)
raise nvae
if alt13 == 1:
pass
self.match(self.input, QUARTER, self.FOLLOW_QUARTER_in_quarterspec583)
self.month_set = self.month_set.union(set([
self.ValueOf(JANUARY), self.ValueOf(APRIL), self.ValueOf(JULY),
self.ValueOf(OCTOBER)]))
elif alt13 == 2:
pass
pass
self._state.following.append(self.FOLLOW_quarter_ordinals_in_quarterspec595)
self.quarter_ordinals()
self._state.following.pop()
self.match(self.input, MONTH, self.FOLLOW_MONTH_in_quarterspec597)
self.match(self.input, OF, self.FOLLOW_OF_in_quarterspec599)
self.match(self.input, QUARTER, self.FOLLOW_QUARTER_in_quarterspec601)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
def quarter_ordinals(self, ):
try:
try:
pass
pass
self._state.following.append(self.FOLLOW_month_of_quarter_ordinal_in_quarter_ordinals620)
self.month_of_quarter_ordinal()
self._state.following.pop()
while True:
alt14 = 2
LA14_0 = self.input.LA(1)
if (LA14_0 == COMMA) :
alt14 = 1
if alt14 == 1:
pass
self.match(self.input, COMMA, self.FOLLOW_COMMA_in_quarter_ordinals623)
self._state.following.append(self.FOLLOW_month_of_quarter_ordinal_in_quarter_ordinals625)
self.month_of_quarter_ordinal()
self._state.following.pop()
else:
break
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
def month_of_quarter_ordinal(self, ):
offset = None
try:
try:
pass
offset = self.input.LT(1)
if (FIRST <= self.input.LA(1) <= THIRD):
self.input.consume()
self._state.errorRecovery = False
else:
mse = MismatchedSetException(None, self.input)
raise mse
jOffset = self.ValueOf(offset.type) - 1
self.month_set = self.month_set.union(set([
jOffset + self.ValueOf(JANUARY), jOffset + self.ValueOf(APRIL),
jOffset + self.ValueOf(JULY), jOffset + self.ValueOf(OCTOBER)]))
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
def time_range(self, ):
start_time = None
end_time = None
try:
try:
pass
pass
self.match(self.input, FROM, self.FOLLOW_FROM_in_time_range673)
pass
start_time=self.match(self.input, TIME, self.FOLLOW_TIME_in_time_range680)
self.start_time_string = start_time.text
self.match(self.input, TO, self.FOLLOW_TO_in_time_range691)
pass
end_time=self.match(self.input, TIME, self.FOLLOW_TIME_in_time_range698)
self.end_time_string = end_time.text
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return
DFA4_eot = DFA.unpack(
u"\13\uffff"
)
DFA4_eof = DFA.unpack(
u"\13\uffff"
)
DFA4_min = DFA.unpack(
u"\1\6\1\23\1\12\1\uffff\2\4\1\13\1\uffff\1\24\1\12\1\4"
)
DFA4_max = DFA.unpack(
u"\1\20\2\32\1\uffff\1\5\1\12\1\20\1\uffff\2\32\1\12"
)
DFA4_accept = DFA.unpack(
u"\3\uffff\1\1\3\uffff\1\2\3\uffff"
)
DFA4_special = DFA.unpack(
u"\13\uffff"
)
DFA4_transition = [
DFA.unpack(u"\1\1\2\3\2\uffff\6\2"),
DFA.unpack(u"\1\4\7\5"),
DFA.unpack(u"\1\6\10\uffff\1\4\7\5"),
DFA.unpack(u""),
DFA.unpack(u"\1\3\1\7"),
DFA.unpack(u"\1\3\1\7\4\uffff\1\10"),
DFA.unpack(u"\6\11"),
DFA.unpack(u""),
DFA.unpack(u"\7\12"),
DFA.unpack(u"\1\6\10\uffff\1\4\7\5"),
DFA.unpack(u"\1\3\1\7\4\uffff\1\10")
]
DFA4 = DFA
FOLLOW_specifictime_in_timespec44 = frozenset([])
FOLLOW_interval_in_timespec48 = frozenset([])
FOLLOW_EOF_in_timespec52 = frozenset([1])
FOLLOW_ordinals_in_specifictime72 = frozenset([19, 20, 21, 22, 23, 24, 25, 26])
FOLLOW_weekdays_in_specifictime74 = frozenset([4])
FOLLOW_monthdays_in_specifictime77 = frozenset([4])
FOLLOW_OF_in_specifictime80 = frozenset([11, 12, 13, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40])
FOLLOW_monthspec_in_specifictime83 = frozenset([5])
FOLLOW_quarterspec_in_specifictime85 = frozenset([5])
FOLLOW_ordinals_in_specifictime101 = frozenset([19, 20, 21, 22, 23, 24, 25, 26])
FOLLOW_weekdays_in_specifictime103 = frozenset([5])
FOLLOW_TIME_in_specifictime117 = frozenset([1])
FOLLOW_EVERY_in_interval136 = frozenset([7, 8])
FOLLOW_set_in_interval146 = frozenset([17, 18])
FOLLOW_period_in_interval164 = frozenset([1, 9, 41])
FOLLOW_time_range_in_interval176 = frozenset([1])
FOLLOW_SYNCHRONIZED_in_interval189 = frozenset([1])
FOLLOW_EVERY_in_ordinals218 = frozenset([1])
FOLLOW_ordinal_in_ordinals226 = frozenset([1, 10])
FOLLOW_COMMA_in_ordinals229 = frozenset([11, 12, 13, 14, 15, 16])
FOLLOW_ordinal_in_ordinals231 = frozenset([1, 10])
FOLLOW_set_in_ordinal252 = frozenset([1])
FOLLOW_set_in_period291 = frozenset([1])
FOLLOW_monthday_in_monthdays314 = frozenset([1, 10])
FOLLOW_COMMA_in_monthdays318 = frozenset([7, 8])
FOLLOW_monthday_in_monthdays320 = frozenset([1, 10])
FOLLOW_set_in_monthday340 = frozenset([1])
FOLLOW_DAY_in_weekdays365 = frozenset([1])
FOLLOW_weekday_in_weekdays373 = frozenset([1, 10])
FOLLOW_COMMA_in_weekdays376 = frozenset([19, 20, 21, 22, 23, 24, 25, 26])
FOLLOW_weekday_in_weekdays378 = frozenset([1, 10])
FOLLOW_set_in_weekday400 = frozenset([1])
FOLLOW_MONTH_in_monthspec459 = frozenset([1])
FOLLOW_months_in_monthspec469 = frozenset([1])
FOLLOW_month_in_months486 = frozenset([1, 10])
FOLLOW_COMMA_in_months489 = frozenset([27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39])
FOLLOW_month_in_months491 = frozenset([1, 10])
FOLLOW_set_in_month510 = frozenset([1])
FOLLOW_QUARTER_in_quarterspec583 = frozenset([1])
FOLLOW_quarter_ordinals_in_quarterspec595 = frozenset([27])
FOLLOW_MONTH_in_quarterspec597 = frozenset([4])
FOLLOW_OF_in_quarterspec599 = frozenset([40])
FOLLOW_QUARTER_in_quarterspec601 = frozenset([1])
FOLLOW_month_of_quarter_ordinal_in_quarter_ordinals620 = frozenset([1, 10])
FOLLOW_COMMA_in_quarter_ordinals623 = frozenset([11, 12, 13, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40])
FOLLOW_month_of_quarter_ordinal_in_quarter_ordinals625 = frozenset([1, 10])
FOLLOW_set_in_month_of_quarter_ordinal644 = frozenset([1])
FOLLOW_FROM_in_time_range673 = frozenset([5])
FOLLOW_TIME_in_time_range680 = frozenset([42])
FOLLOW_TO_in_time_range691 = frozenset([5])
FOLLOW_TIME_in_time_range698 = frozenset([1])
def main(argv, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr):
from antlr3.main import ParserMain
main = ParserMain("GrocLexer", GrocParser)
main.stdin = stdin
main.stdout = stdout
main.stderr = stderr
main.execute(argv)
if __name__ == '__main__':
main(sys.argv)
| |
from coralillo.fields import Field, Relation, MultipleRelation, SingleRelation
from coralillo.datamodel import debyte_hash, debyte_string
from coralillo.errors import ValidationErrors, UnboundModelError, BadField, ModelNotFoundError
from coralillo.utils import snake_case, parse_embed
from coralillo.auth import PermissionHolder
from coralillo.queryset import QuerySet
from coralillo import Engine
from itertools import starmap
import json
import re
def get_fields(cls):
all_fields = dir(cls)
not_private = filter(lambda name: not name.startswith('_'), all_fields)
name_field_tuples = map(lambda name: (name, getattr(cls, name)), not_private)
only_field_types = filter(lambda ft: isinstance(ft[1], Field), name_field_tuples)
return only_field_types
def get_no_relation_fields(cls):
return filter(lambda ft: not isinstance(ft[1], Relation), get_fields(cls))
class Form:
''' Parent class of the Model class, defines validation and other useful
functions. '''
def __init__(self):
# This allows fast queries for set relations
self._old = dict()
for fieldname, field in get_fields(type(self)):
setattr(
self,
fieldname,
None
)
@classmethod
def validate(cls, **kwargs):
''' Validates the data received as keyword arguments whose name match
this class attributes. '''
# errors can store multiple errors
# obj is an instance in case validation succeeds
# redis is needed for database validation
errors = ValidationErrors()
obj = cls()
redis = cls.get_redis()
# Check the fields
for fieldname, field in get_fields(cls):
if not field.fillable:
value = field.default
else:
try:
value = field.validate(obj, kwargs.get(fieldname), redis)
except BadField as e:
errors.append(e)
continue
setattr(
obj,
fieldname,
value
)
# Check for custom validation rules
for fieldname in dir(cls):
rule = getattr(cls, fieldname)
if hasattr(rule, '_is_validation_rule') and rule._is_validation_rule:
try:
rule(obj)
except BadField as e:
errors.append(e)
# Trigger errors if any
if errors.has_errors():
raise errors
# Return the object with the new data set
return obj
def __str__(self):
return '<{} {}>'.format(type(self).__name__, ' '.join(starmap(
lambda fn, f: '{}={}'.format(fn, repr(getattr(self, fn))),
get_fields(type(self)),
)))
@classmethod
def get_engine(cls):
try:
return cls.Meta.engine
except AttributeError:
raise UnboundModelError('The model {} is not bound to any engine'.format(cls))
@classmethod
def set_engine(cls, neweng):
''' Sets the given coralillo engine so the model uses it to communicate
with the redis database '''
assert isinstance(neweng, Engine), 'Provided object must be of class Engine'
if hasattr(cls, 'Meta'):
cls.Meta.engine = neweng
else:
class Meta:
engine = neweng
cls.Meta = Meta
@classmethod
def get_redis(cls):
return cls.get_engine().redis
class Model(Form):
'''
Defines a model that comunicates to the Redis database
'''
notify = False
def __init__(self, id=None, **kwargs):
super().__init__()
# Generate this object's id using the provided id function
self.id = id if id else self.get_engine().id_function()
self._persisted = False
for fieldname, field in get_no_relation_fields(type(self)):
value = field.init(kwargs.get(fieldname))
setattr(
self,
fieldname,
value
)
def save(self):
''' Persists this object to the database. Each field knows how to store
itself so we don't have to worry about it '''
redis = type(self).get_redis()
pipe = redis.pipeline()
pipe.hset(self.key(), 'id', self.id)
for fieldname, field in get_no_relation_fields(type(self)):
field.save(self, getattr(self, fieldname), pipe)
pipe.sadd(type(self).members_key(), self.id)
pipe.execute()
if self.notify:
data = json.dumps({
'event': 'create' if not self._persisted else 'update',
'data': self.to_json(),
})
redis.publish(type(self).cls_key(), data)
redis.publish(self.key(), data)
self._persisted = True
return self
def update(self, **kwargs):
''' validates the given data against this object's rules and then
updates '''
redis = type(self).get_redis()
errors = ValidationErrors()
for fieldname, field in get_fields(type(self)):
if not field.fillable:
continue
given = kwargs.get(fieldname)
if given is None:
continue
try:
value = field.validate(self, kwargs.get(fieldname), redis)
except BadField as e:
errors.append(e)
continue
setattr(
self,
fieldname,
value
)
if errors.has_errors():
raise errors
return self.save()
@staticmethod
def is_object_key(key):
''' checks if the given key belongs to an object. Its easy since it
depends on the key ending like: ':obj' '''
return re.match('^.*:obj$', key)
@classmethod
def get(cls, id):
''' Retrieves an object by id. Returns None in case of failure '''
if not id:
return None
redis = cls.get_redis()
key = '{}:{}:obj'.format(cls.cls_key(), id)
if not redis.exists(key):
return None
obj = cls(id=id)
obj._persisted = True
data = debyte_hash(redis.hgetall(key))
for fieldname, field in get_fields(cls):
value = field.recover(obj, data, redis)
setattr(
obj,
fieldname,
value
)
return obj
@classmethod
def q(cls, **kwargs):
''' Creates an iterator over the members of this class that applies the
given filters and returns only the elements matching them '''
redis = cls.get_redis()
return QuerySet(cls, redis.sscan_iter(cls.members_key()))
@classmethod
def count(cls):
''' returns object count for this model '''
redis = cls.get_redis()
return redis.scard(cls.members_key())
def reload(self):
''' reloads this object so if it was updated in the database it now
contains the new values'''
key = self.key()
redis = type(self).get_redis()
if not redis.exists(key):
raise ModelNotFoundError('This object has been deleted')
data = debyte_hash(redis.hgetall(key))
for fieldname, field in get_fields(type(self)):
value = field.recover(data, redis)
setattr(
self,
fieldname,
value
)
return self
@classmethod
def get_or_exception(cls, id):
''' Tries to retrieve an instance of this model from the database or
raises an exception in case of failure '''
obj = cls.get(id)
if obj is None:
raise ModelNotFoundError('This object does not exist in database')
return obj
@classmethod
def get_by(cls, field, value):
''' Tries to retrieve an isinstance of this model from the database
given a value for a defined index. Return None in case of failure '''
redis = cls.get_redis()
key = cls.cls_key() + ':index_' + field
id = redis.hget(key, value)
if id:
return cls.get(debyte_string(id))
return None
@classmethod
def get_by_or_exception(cls, field, value):
obj = cls.get_by(field, value)
if obj is None:
raise ModelNotFoundError('This object does not exist in database')
return obj
@classmethod
def all(cls):
''' Gets all available instances of this model from the database '''
redis = cls.get_redis()
return list(map(
lambda id: cls.get(id),
map(
debyte_string,
redis.smembers(cls.members_key())
)
))
@classmethod
def tree_match(cls, field, string):
''' Given a tree index, retrieves the ids atached to the given prefix,
think of if as a mechanism for pattern suscription, where two models
attached to the `a`, `a:b` respectively are found by the `a:b` string,
because both model's subscription key matches the string. '''
if not string:
return set()
redis = cls.get_redis()
prefix = '{}:tree_{}'.format(cls.cls_key(), field)
pieces = string.split(':')
ans = redis.sunion(
prefix + ':' + ':'.join(pieces[0:i + 1])
for i in range(len(pieces))
)
return sorted(map(
lambda id: cls.get(id),
map(
debyte_string,
ans
)
), key=lambda x: x.id)
@classmethod
def cls_key(cls):
''' Returns the redis key prefix assigned to this model '''
return snake_case(cls.__name__)
@classmethod
def members_key(cls):
''' This key holds a set whose members are the ids that exist of objects
from this class '''
return cls.cls_key() + ':members'
def key(self):
''' Returns the redis key to access this object's values '''
prefix = type(self).cls_key()
return '{}:{}:obj'.format(prefix, self.id)
def fqn(self):
''' Returns a fully qualified name for this object '''
prefix = type(self).cls_key()
return '{}:{}'.format(prefix, self.id)
def permission(self, restrict=None):
''' Returns a fully qualified key name to a permission over this object
'''
if restrict is None:
return self.fqn()
return self.fqn() + '/' + restrict
def to_json(self, *, include=None):
''' Serializes this model to a JSON representation so it can be sent
via an HTTP REST API '''
json = dict()
if include is None or 'id' in include or '*' in include:
json['id'] = self.id
if include is None or '_type' in include or '*' in include:
json['_type'] = type(self).cls_key()
def fieldfilter(fieldtuple):
return \
not fieldtuple[1].private and \
not isinstance(fieldtuple[1], Relation) and (
include is None or fieldtuple[0] in include or '*' in include
)
json.update(dict(starmap(
lambda fn, f: (fn, f.to_json(getattr(self, fn))),
filter(
fieldfilter,
get_fields(type(self))
)
)))
for relation_name, subfields in parse_embed(include):
if not hasattr(type(self), relation_name):
continue
if not isinstance(getattr(type(self), relation_name), Relation):
continue
relation = getattr(self, relation_name)
if isinstance(getattr(type(self), relation_name), MultipleRelation):
json[relation_name] = list(map(lambda o: o.to_json(include=subfields), relation.all()))
elif isinstance(getattr(type(self), relation_name), SingleRelation):
related = relation.get()
json[relation_name] = related.to_json(include=subfields) if related is not None else None
return json
def __eq__(self, other):
''' Compares this object to another. Returns true if both are of the
same class and have the same properties. Returns false otherwise '''
if type(other) == str:
return self.id == other
if type(self) != type(other):
return False
return self.id == other.id
def delete(self):
''' Deletes this model from the database, calling delete in each field
to properly delete special cases '''
redis = type(self).get_redis()
for fieldname, field in get_fields(type(self)):
field._delete(self, redis)
redis.delete(self.key())
redis.srem(type(self).members_key(), self.id)
if isinstance(self, PermissionHolder):
redis.delete(self.allow_key())
if self.notify:
data = json.dumps({
'event': 'delete',
'data': self.to_json(),
})
redis.publish(type(self).cls_key(), data)
redis.publish(self.key(), data)
return self
class BoundedModel(Model):
''' A bounded model is bounded to a prefix in the database '''
@classmethod
def prefix(cls):
raise NotImplementedError('Bounded models must implement the prefix function')
@classmethod
def cls_key(cls):
return cls.prefix() + ':' + snake_case(cls.__name__)
| |
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Test TBR Matched Markets Diagnostics.
"""
from matched_markets.methodology import tbrmmdesignparameters
from matched_markets.methodology import tbrmmdiagnostics
import numpy as np
import unittest
TBRMMDesignParameters = tbrmmdesignparameters.TBRMMDesignParameters
TBRMMDiagnostics = tbrmmdiagnostics.TBRMMDiagnostics
class TBRMMDiagnosticsTest(unittest.TestCase):
def setUp(self):
"""Set up a valid Geo Eligibility data frame."""
super().setUp()
self.par = TBRMMDesignParameters(n_test=14, iroas=3.0, min_corr=0.8,
sig_level=0.9, power_level=0.5,
flevel=0.9)
# Corr(x, y) = 0.900.
y = (-1.4, -0.8, -0.8, -0.7, -0.6, -0.5, -0.5, -0.5, -0.2,
-0.2, -0.0, 0.3, 0.4, 0.5, 0.6, 0.6, 1.0, 1.3, 1.6, 1.9, 2.2)
x = (-0.8, -1.1, -1.2, -0.9, -1.0, -0.3, -0.4, -1.2, 0.5, 0.0,
0.1, -0.5, 0.4, 0.6, 1.1, -0.1, 1.3, 1.4, 1.8, 1.4, 1.6)
self.x = np.array(x)
self.y = np.array(y)
self.x_error_msg = 'x must be a one-dimensional vector'
self.y_error_msg = 'y must be a one-dimensional vector'
self.twodim_array = np.array([list(range(10)), list(range(10))])
self.corr = np.corrcoef(x, y)[0, 1]
self.xy_short = (1, 2) # Minimum length = 3.
def testInit(self):
"""The object must be properly initialized."""
obj = TBRMMDiagnostics(self.y, self.par)
self.assertTrue(all(obj.y == self.y))
self.assertIsNone(obj.x)
self.assertIsNone(obj.corr)
self.assertIsNone(obj.required_impact)
def testYPropertySetter(self):
"""The y property setter works."""
obj = TBRMMDiagnostics(self.y, self.par)
obj.y = self.x # Change value.
self.assertTrue(all(obj.y == self.x))
def testYPropertyDimension(self):
"""The y property requires a (1-dimensional) vector."""
with self.assertRaisesRegex(ValueError, self.y_error_msg):
TBRMMDiagnostics(self.twodim_array, self.par)
obj = TBRMMDiagnostics(self.y, self.par)
with self.assertRaisesRegex(ValueError, self.y_error_msg):
obj.y = self.twodim_array
def testYPropertyLength(self):
"""The y property must satisfy a minimum length requirement."""
with self.assertRaisesRegex(ValueError, 'y must have length >= 3'):
TBRMMDiagnostics(self.xy_short, self.par)
def testYPropertyNoneValue(self):
"""The y property disallows None."""
with self.assertRaisesRegex(ValueError, self.y_error_msg):
TBRMMDiagnostics(None, self.par)
obj = TBRMMDiagnostics(self.y, self.par)
with self.assertRaisesRegex(ValueError, self.y_error_msg):
obj.y = None
def testXProperty(self):
"""The x property setter/getter works."""
obj = TBRMMDiagnostics(self.y, self.par)
obj.x = self.x
self.assertTrue(all(obj.x == self.x))
def testXPropertyLength(self):
"""The x property must have length = length of y."""
with self.assertRaisesRegex(
ValueError,
r'x must have the same length as y \(21\)'):
obj = TBRMMDiagnostics(self.y, self.par)
obj.x = self.xy_short
def testXPropertyBadValue(self):
"""The x property requires a (1-dimensional) vector."""
obj = TBRMMDiagnostics(self.y, self.par)
with self.assertRaisesRegex(ValueError, self.x_error_msg):
obj.x = self.twodim_array
def testXPropertyNoneOk(self):
"""The x property can be set to None."""
obj = TBRMMDiagnostics(self.y, self.par)
obj.x = self.x
self.assertIsNotNone(obj.x)
obj.x = None
self.assertIsNone(obj.x)
def testCorrProperty(self):
"""The corr property returns the correlation if x is set."""
obj = TBRMMDiagnostics(self.y, self.par)
obj.x = self.x
self.assertAlmostEqual(obj.corr, 0.9, places=3)
def testCorrPropertyResetsToNone(self):
"""The corr property returns None if x is reset to None."""
obj = TBRMMDiagnostics(self.y, self.par)
obj.x = self.x
self.assertIsNotNone(obj.corr)
obj.x = None
self.assertIsNone(obj.corr)
def testEstimateImpactCorrOutOfBound(self):
"""Bad values of corr are caught."""
obj = TBRMMDiagnostics(self.y, self.par)
for bad_corr in (-1, 1):
with self.assertRaisesRegex(ValueError, 'corr must be between -1 and 1'):
obj.estimate_required_impact(bad_corr)
class ImpactEstimateTest(TBRMMDiagnosticsTest):
# tuples = (n_test, n, flevel, sig_level, power_level)
# for the estimate without the sigma term.
parameters = {1: (14, 21, 0.90, 0.90, 0.80),
2: (28, 21, 0.90, 0.90, 0.80),
3: (14, 14, 0.90, 0.90, 0.80),
4: (14, 21, 0.99, 0.90, 0.80),
5: (14, 21, 0.90, 0.95, 0.80),
6: (14, 21, 0.90, 0.90, 0.90)}
correct_values = {1: 11.055, 2: 18.272, 3: 12.533,
4: 11.841, 5: 13.083, 6: 13.413}
def setUp(self):
"""Set up a valid Geo Eligibility data frame."""
super().setUp()
self.obj = TBRMMDiagnostics(self.y, self.par)
def testRequiredImpactGivenCorr(self):
"""The impact estimates are correctly calculated.
The formula can be decomposed into 2 components:
impact = sigma(corr, y) * f(n_test, n, flevel, sig_level, power_level)
where
sigma(corr, y) = sqrt(y, ddof=2) * sqrt(1 - corr ** 2)
We'll test the function f() against known values, and check further that the
impact function varies as expected based on different values of y and corr.
"""
for corr in (0.5, 0.9):
for key, params in self.parameters.items():
n_test, n, flevel, sig_level, power_level = params
y = self.y[:n]
par = TBRMMDesignParameters(n_test=n_test, iroas=3.0, flevel=flevel,
sig_level=sig_level,
power_level=power_level)
diag = TBRMMDiagnostics(y, par)
sigma = np.std(y, ddof=2) * np.sqrt(1 - corr ** 2)
correct_estimate = self.correct_values[key] * sigma
self.assertAlmostEqual(diag.estimate_required_impact(corr),
correct_estimate,
places=3)
def testRequiredImpactProperty(self):
"""Required impact must be correctly calculated, given 'x'."""
diag = self.obj
diag.x = self.x
self.assertEqual(
diag.required_impact,
diag.estimate_required_impact(diag.corr))
def testRequiredImpactPropertyResets(self):
"""Required impact resets if x is reset."""
diag = self.obj
diag.x = self.x
diag.x = None
self.assertIsNone(diag.required_impact)
class PretestFitTest(TBRMMDiagnosticsTest):
def setUp(self):
super().setUp()
obj = TBRMMDiagnostics(self.y, self.par)
obj.x = self.x
self.obj = obj
# The expected linear regression estimates.
#
# The linear regression y = a + bx + epsilon yields:
# b = corr(x, y) * sqrt(var(y) / var(x))
# a = mean(y) - b * mean(x)
# sigma = sd(y, ddof=2) * sqrt(1 - rho ** 2)
self.sigma = np.std(obj.y, ddof=2) * np.sqrt(1 - self.corr ** 2)
self.b = 0.900 * np.std(obj.y) / np.std(obj.x)
self.a = np.mean(obj.y) - self.b * np.mean(obj.x)
self.resid = obj.y - self.a - self.b * obj.x
def testNoneIfXIsNone(self):
"""The pretestfit attribute must return None if x is None."""
obj = self.obj
obj.x = None
self.assertIsNone(self.obj.pretestfit)
def testResultIsNamedTuple(self):
"""The pretestfit result is a named tuple."""
result = self.obj.pretestfit
a, b, sigma, resid = result
self.assertIsInstance(result, tuple)
self.assertIs(result.a, a)
self.assertIs(result.b, b)
self.assertIs(result.sigma, sigma)
self.assertIs(result.resid, resid)
def testAB(self):
"""The pretestfit values (a, b) must be correctly estimated."""
a, b, sigma, resid = self.obj.pretestfit
self.assertAlmostEqual(a, self.a, places=3)
self.assertAlmostEqual(b, self.b, places=3)
self.assertAlmostEqual(sigma, self.sigma, places=3)
self.assertTrue(len(resid) == len(self.x)) # pylint: disable=g-generic-assert
# The function assert_allclose returns None iff the numbers match.
self.assertIsNone(np.testing.assert_allclose(resid, self.resid, atol=1e-3))
class BrownianBridgeBTest(TBRMMDiagnosticsTest):
def setUp(self):
super().setUp()
obj = TBRMMDiagnostics(self.y, self.par)
obj.x = self.x
self.obj = obj
def testResultIsANamedTuple(self):
"""The bbtest result is a named tuple."""
result = self.obj.bbtest
test_ok, abscumresid, bounds = result
self.assertIsInstance(result, tuple)
self.assertIs(result.test_ok, test_ok)
self.assertIs(result.abscumresid, abscumresid)
self.assertIs(result.bounds, bounds)
def testGoodResult(self):
"""The default result is True."""
result = self.obj.bbtest
self.assertTrue(result.test_ok)
def testBadResult(self):
"""An outlier causes the test go False."""
obj = TBRMMDiagnostics(self.y, self.par)
x = list(self.x)
x[10] = 10.0 # Make an outlier.
obj.x = x
result = obj.bbtest
self.assertFalse(result.test_ok)
class TBRFitTest(TBRMMDiagnosticsTest):
"""Method tbrfit."""
def setUp(self):
super().setUp()
obj = TBRMMDiagnostics(self.y, self.par)
self.obj = obj
def testMandatoryArguments(self):
"""The two arguments xt, yt are mandatatory."""
with self.assertRaisesRegex(
TypeError,
r'missing 2 required positional arguments: \'xt\' and \'yt\''):
self.obj.tbrfit()
def testNoX(self):
"""The value is none if 'x' is not set."""
self.assertIsNone(self.obj.tbrfit(0, 0))
def testValue(self):
"""The value is a named tuple of length 2."""
obj = self.obj
obj.x = self.x
result = obj.tbrfit(0, 0)
self.assertTrue(len(result) == 4) # pylint: disable=g-generic-assert
self.assertEqual(result.estimate, result[0])
self.assertEqual(result.cihw, result[1])
def testCalculation(self):
"""The Credible interval half-width and the estimate are correct."""
# Structure: ((n_test, sig_level, xt, yt), true_cihw, true_estimate).
actual_results = {1: ((14, 0.9, 0, 0), 2.8047, -1.225),
2: ((28, 0.9, 0, 0), 4.7001, -2.45),
3: ((14, 0.8, 0, 0), 1.8187, -1.225),
4: ((14, 0.9, 10, 0), 18.0534, -123.725),
5: ((14, 0.9, 0, 10), 2.8047, 138.775)}
for value in actual_results.values():
args, cihw, estimate = value
n_test, sig_level, xt, yt = args
par = TBRMMDesignParameters(n_test=n_test, iroas=1.0,
sig_level=sig_level)
obj = TBRMMDiagnostics(self.y, par)
obj.x = self.x
self.assertAlmostEqual(obj.tbrfit(xt, yt).cihw, cihw, places=3)
self.assertAlmostEqual(obj.tbrfit(xt, yt).estimate, estimate, places=3)
class DWTestTest(TBRMMDiagnosticsTest):
"""Property dwtest."""
def setUp(self):
super().setUp()
self.x2 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
def testNoX(self):
"""The value is None if 'x' is not set."""
diag = TBRMMDiagnostics(self.y, self.par)
self.assertIsNone(diag.dwtest)
def testDWStatistic(self):
"""The result is a DWTestResult diagect."""
diag = TBRMMDiagnostics(self.y, self.par)
diag.x = self.x
self.assertIsInstance(diag.dwtest, tbrmmdiagnostics.DWTestResult)
def testDWTestFailsBelow(self):
"""The D-W test fails for dwstat < 1.5."""
y = [-1, 0, 1, 2, 3, 12.8, 3, 2, 1, 0]
diag = TBRMMDiagnostics(y, self.par)
diag.x = self.x2
self.assertAlmostEqual(diag.dwtest.dwstat, 1.4996, places=4)
self.assertFalse(diag.dwtest.test_ok)
def testDWTestSucceedsLowerBound(self):
"""The D-W test succeeds for dwstat >= 1.5 and <= 2.5."""
# A data set with autocorrelation, dw statistic 0.43.
y = [-1, 0, 1, 2, 3, 12.9, 3, 2, 1, 0]
diag = TBRMMDiagnostics(y, self.par)
diag.x = self.x2
self.assertAlmostEqual(diag.dwtest.dwstat, 1.5058, places=4)
self.assertTrue(diag.dwtest.test_ok)
def testDWTestSucceedsUpperBound(self):
"""The D-W test succeeds for dwstat >= 1.5 and <= 2.5."""
y = [-1, 7.3, 1, 2, 3, 4, 3, 2, 1, 0]
diag = TBRMMDiagnostics(y, self.par)
diag.x = self.x2
self.assertAlmostEqual(diag.dwtest.dwstat, 2.4974, places=4)
self.assertTrue(diag.dwtest.test_ok)
def testDWTestFailsAbove(self):
"""The D-W test fails for > 2.5."""
y = [-1, 7.4, 1, 2, 3, 4, 3, 2, 1, 0]
diag = TBRMMDiagnostics(y, self.par)
diag.x = self.x2
self.assertAlmostEqual(diag.dwtest.dwstat, 2.5119, places=4)
self.assertFalse(diag.dwtest.test_ok)
class AATestTest(TBRMMDiagnosticsTest):
"""Property aatest."""
def setUp(self):
super().setUp()
# Correlated time series without effect.
self.x2 = np.array([132.5, 87.8, 89.4, 78.5, 117.3, 54.0, 134.9, 84.8,
106.4, 95.0, 129.2, 58.8, 93.6, 92.3, 122.7, 78.0, 96.6,
82.4, 100.8, 111.7, 78.0])
self.y2 = np.array([487.9, 393.6, 388.8, 375.0, 420.9, 305.5, 451.1, 364.2,
423.4, 376.2, 450.5, 303.9, 370.3, 371.2, 445.1, 333.7,
397.9, 398.0, 416.4, 419.6, 338.2])
self.n_test = 7
self.par = TBRMMDesignParameters(n_test=self.n_test, iroas=1.0,
sig_level=0.9)
def testNoX(self):
"""The value is None if 'x' is not set."""
diag = TBRMMDiagnostics(self.y, self.par)
self.assertIsNone(diag.aatest)
def testTooFewPretestDatapoints(self):
"""test_ok is None if the number of time points is < 2 + n_test."""
n_pretest = 2
n_test = 7
y = list(range(n_pretest + n_test))
x = y
par = TBRMMDesignParameters(n_test=n_test, iroas=1.0)
obj = TBRMMDiagnostics(y, par)
obj.x = x
self.assertIsNone(obj.aatest.test_ok)
def testBounds(self):
"""The lower-upper bounds are correctly calculated.."""
diag = TBRMMDiagnostics(self.y2, self.par)
diag.x = self.x2
self.assertAlmostEqual(diag.aatest.bounds[0], -45.6, places=1)
self.assertAlmostEqual(diag.aatest.bounds[1], 89.0, places=1)
def testOkAndProbIsNoneIfBoundsEncloseZero(self):
"""Test succeeds & prob is not calculated if bounds enclose zero."""
diag = TBRMMDiagnostics(self.y2, self.par)
diag.x = self.x2
self.assertLess(diag.aatest.bounds[0], 0)
self.assertGreater(diag.aatest.bounds[1], 0)
self.assertTrue(diag.aatest.test_ok)
self.assertIsNone(diag.aatest.prob)
def testProbIsCalculated(self):
"""Prob is calculated if lower bound is above zero."""
n_test = self.n_test
y = self.y2
y[-n_test:] = y[-n_test:] + 9.0 # Add an effect.
diag = TBRMMDiagnostics(y, self.par)
diag.x = self.x2
self.assertGreater(diag.aatest.bounds[0], 0)
self.assertAlmostEqual(diag.aatest.prob, 0.038, places=3)
def testTestOkPositiveEffectLowProb(self):
"""Test succeeds if effect is > 0 and prob <= 0.2."""
n_test = self.n_test
y = self.y2
y[-n_test:] = y[-n_test:] + 10.08 # Add an effect.
diag = TBRMMDiagnostics(y, self.par)
diag.x = self.x2
self.assertGreater(diag.aatest.bounds[0], 0)
self.assertAlmostEqual(diag.aatest.prob, 0.199, places=3)
self.assertTrue(diag.aatest.test_ok)
def testTestFailsPositiveEffectHighProb(self):
"""Test succeeds if effect is > 0 and prob >= 0.2."""
n_test = self.n_test
y = self.y2
y[-n_test:] = y[-n_test:] + 10.09 # Add an effect.
diag = TBRMMDiagnostics(y, self.par)
diag.x = self.x2
self.assertGreater(diag.aatest.bounds[0], 0)
self.assertAlmostEqual(diag.aatest.prob, 0.202, places=3)
self.assertFalse(diag.aatest.test_ok)
def testTestOkNegativeEffectLowProb(self):
"""Test succeeds if effect is < 0 and prob <= 0.2."""
n_test = self.n_test
y = self.y2
y[-n_test:] = y[-n_test:] - 16.275 # Add a negative effect.
diag = TBRMMDiagnostics(y, self.par)
diag.x = self.x2
self.assertLess(diag.aatest.bounds[1], 0)
self.assertAlmostEqual(diag.aatest.prob, 0.199, places=3)
self.assertTrue(diag.aatest.test_ok)
def testTestNotOkNegativeEffectHighProb(self):
"""Test succeeds if effect is < 0 and prob >= 0.2."""
n_test = self.n_test
y = self.y2
y[-n_test:] = y[-n_test:] - 16.29 # Add a negative effect.
diag = TBRMMDiagnostics(y, self.par)
diag.x = self.x2
self.assertLess(diag.aatest.bounds[1], 0)
self.assertAlmostEqual(diag.aatest.prob, 0.202, places=3)
self.assertFalse(diag.aatest.test_ok)
class CorrTestTest(TBRMMDiagnosticsTest):
"""Property corr_test."""
def testNoX(self):
"""The value is none if 'x' is not set."""
diag = TBRMMDiagnostics(self.y, self.par)
self.assertIsNone(diag.corr_test)
def testDefaultPass(self):
"""The default correlation threshold is 0.80, higher correlations pass."""
diag = TBRMMDiagnostics(self.y, self.par)
diag.x = (-0.8, -1.1, -1.2, -0.9, -1.0, -0.3, -0.4, -1.2, 0.5, 0.35,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
self.assertAlmostEqual(diag.corr, 0.8002, places=4)
self.assertTrue(diag.corr_test)
def testDefaultFail(self):
"""The default correlation threshold is 0.80, lower correlations fail."""
diag = TBRMMDiagnostics(self.y, self.par)
diag.x = (-0.8, -1.1, -1.2, -0.9, -1.0, -0.3, -0.4, -1.2, 0.5, 0.36,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
self.assertAlmostEqual(diag.corr, 0.7999, places=4)
self.assertFalse(diag.corr_test)
def testChangeThreshold(self):
"""Changing correlation threshold changes the behavior of the test."""
# Change the correlation threshold to 0.9. Lower correlations fail the test.
par = TBRMMDesignParameters(n_test=14, iroas=3.0, min_corr=0.9)
diag = TBRMMDiagnostics(self.y, par)
diag.x = (-0.8, -1.1, -1.2, -0.9, -1.0, -0.3, -0.4, -1.2, 0.5, 0.0,
0.1, -0.5, 0.4, 0.6, 1.1, -0.1, 1.3, 1.4, 1.8, 1.4, 1.59)
self.assertAlmostEqual(diag.corr, 0.8997, places=4)
self.assertFalse(diag.corr_test)
class TestsOkTest(TBRMMDiagnosticsTest):
"""Property 'tests_ok'."""
def setUp(self):
super().setUp()
# Correlated time series without effect. All tests pass.
self.y0 = np.array([255.2, 165.8, 186.0, 160.9, 218.1, 165.7, 212.9, 207.7,
224.4, 205.0, 247.2, 145.1, 191.1, 173.6])
self.x0 = np.array([132.5, 87.8, 89.4, 78.5, 117.3, 54.0, 134.9, 84.8,
106.4, 95.0, 129.2, 58.8, 93.6, 92.3])
self.n_test = 7
self.par = TBRMMDesignParameters(n_test=self.n_test, iroas=1.0)
def testNoX(self):
"""The value is none if 'x' is not set."""
diag = TBRMMDiagnostics(self.y, self.par)
self.assertIsNone(diag.tests_ok)
def testAllOkTestsOk(self):
"""If all tests pass, the result is True."""
diag = TBRMMDiagnostics(self.y0, self.par)
diag.x = self.x0
self.assertTrue(diag.corr_test)
self.assertTrue(diag.bbtest.test_ok)
self.assertTrue(diag.dwtest.test_ok)
self.assertTrue(diag.aatest.test_ok)
self.assertTrue(diag.tests_ok)
def testCorrelationTestFailsTestsFail(self):
"""If the correlation test fails, the result is False."""
# The pair (self.x, self.y) implies a failure for the A/A test only.
par = TBRMMDesignParameters(n_test=self.n_test, iroas=1.0, min_corr=0.90)
diag = TBRMMDiagnostics(self.y0, par)
diag.x = self.x0
self.assertFalse(diag.corr_test) # Fails as corr. = 0.86 < 0.90.
self.assertTrue(diag.bbtest.test_ok)
self.assertTrue(diag.dwtest.test_ok)
self.assertTrue(diag.aatest.test_ok)
self.assertFalse(diag.tests_ok)
def testDWTestFailsTestsFail(self):
"""If the D-W test fails, the result is False."""
# The pair (self.x2, self.y2) implies a failure for the D-W test only.
x = np.array([132.5, 87.8, 89.4, 78.5, 117.3, 54.0, 134.9, 84.8, 106.4,
95.0, 129.2, 58.8, 93.6, 92.3, 122.7, 78.0, 96.6, 82.4,
100.8, 111.7, 78.0])
y = np.array([487.9, 393.6, 388.8, 375.0, 420.9, 305.5, 451.1, 364.2, 423.4,
376.2, 450.5, 303.9, 370.3, 371.2, 445.1, 333.7, 397.9,
398.0, 416.4, 419.6, 338.2])
diag = TBRMMDiagnostics(y, self.par)
diag.x = x
self.assertTrue(diag.corr_test)
self.assertTrue(diag.bbtest.test_ok)
self.assertFalse(diag.dwtest.test_ok)
self.assertTrue(diag.aatest.test_ok)
self.assertFalse(diag.tests_ok)
def testBBTestFailsImpliesFail(self):
"""If only the Brownian Bridge test fails, the result is False."""
x = np.array([181., 69., 74., 46., 143., -15., 187., 62., 116., 88., 173.,
-3., 84., 81., 157., 45.])
y = x.copy()
y[0] = y[0] - 100.0
y[1] = y[1] + 20.0
par = TBRMMDesignParameters(n_test=self.n_test, iroas=1.0)
diag = TBRMMDiagnostics(y, par)
diag.x = x
self.assertTrue(diag.corr_test)
self.assertFalse(diag.bbtest.test_ok)
self.assertTrue(diag.dwtest.test_ok)
self.assertTrue(diag.aatest.test_ok)
self.assertFalse(diag.tests_ok)
def testAATestFailsTestsFail(self):
"""If the A/A Test fails, the result is False."""
# The pair (self.x, self.y) implies a failure for the A/A test only.
diag = TBRMMDiagnostics(self.y, self.par)
diag.x = self.x
self.assertTrue(diag.corr_test)
self.assertTrue(diag.bbtest.test_ok)
self.assertTrue(diag.dwtest.test_ok)
self.assertFalse(diag.aatest.test_ok)
self.assertFalse(diag.tests_ok)
def testAATestNoneImpliesNone(self):
"""If the A/A Test returns None, the result is None."""
# A/A Test returns None if there are not enough time points.
x = np.array([181., 69., 74., 46., 143., -15., 187., 62.])
y = x.copy()
y[0] = y[0] + 1
y[7] = y[7] + 1
diag = TBRMMDiagnostics(y, self.par)
diag.x = x
self.assertTrue(diag.corr_test)
self.assertTrue(diag.bbtest.test_ok)
self.assertTrue(diag.dwtest.test_ok)
self.assertIsNone(diag.aatest.test_ok)
self.assertIsNone(diag.tests_ok)
if __name__ == '__main__':
unittest.main()
| |
import boto
import datetime
import matplotlib.pyplot as plt
import pandas as pd
from pyspark.sql import SparkSession
import smtplib
def timestamp(date, time):
"""Convert date and time to timestamp"""
return (datetime.datetime.strptime(date + time, '%Y-%m-%d%H:%M') -
datetime.datetime(1970, 1, 1)).total_seconds()
def timestamp_hour(date, time):
"""Convert date and time to timestamp of the hour (truncate minutes)"""
dt = datetime.datetime.strptime(date + time, '%Y-%m-%d%H:%M')
return (datetime.datetime(dt.year, dt.month, dt.day, dt.hour, 0) -
datetime.datetime(1970, 1, 1)).total_seconds()
def hour_dif(date, time, fdate, ftime):
"""Hours from date/time to future date/time"""
return (timestamp_hour(fdate, ftime) - timestamp_hour(date, time))/3600
def load():
"""Load dataframes"""
cityDF = spark.read.parquet("cityDF.parquet").cache()
cityDayDF = spark.read.parquet("cityDayDF.parquet").cache()
weatherDF = spark.read.parquet("weatherDF.parquet").cache()
forecastDF = spark.read.parquet("forecastDF.parquet").cache()
statsDF = spark.read.parquet("statsDF.parquet").cache()
# Sql Views
cityDF.createOrReplaceTempView('cityV')
cityDayDF.createOrReplaceTempView('cityDayV')
weatherDF.createOrReplaceTempView('weatherV')
forecastDF.createOrReplaceTempView('forecastV')
statsDF.createOrReplaceTempView('statsV')
return cityDF, cityDayDF, weatherDF, forecastDF, statsDF
def preprocess():
"""Preprocess dataframes for use with all cities"""
# Keep only most recent day for each city
cityDay1DF = spark.sql(
"select * \
from \
(select * \
, row_number() over(partition by city \
order by date desc) as rk \
from cityDayV) \
where rk=1").drop('rk')
# Keep only most recent day/time for each city
weather1DF = spark.sql(
"select * \
from \
(select * \
, row_number() over(partition by city \
order by cast(timestamp(date, time) as int) desc) as rk \
from weatherV) \
where rk=1").drop('rk')
# Keep only most recent day/time for each city
forecast1DF = spark.sql(
"select * \
,cast(hour_dif(date, time, fdate, ftime) as int) as fhours \
,cast(timestamp(fdate, ftime) as int) as fts \
from \
(select * \
, row_number() over(partition by city \
order by cast(timestamp(date, time) as int) desc) as rk \
from forecastV) \
where rk<=240").drop('rk')
forecast1DF.createOrReplaceTempView('forecast1V')
forecast2DF = spark.sql(
"select \
f.city \
,f.fts \
,cast(f.temp as int) as temp \
,cast(f.humidity as int) as humidity \
,cast(f.wind_mph as int) as wind_mph \
,cast(f.feelslike as int) as feelslike \
,s.tempSD \
,s.humiditySD \
,s.wind_mphSD \
,s.feelslikeSD \
from forecast1V f, statsV s \
where f.fhours = s.fhours")
# Cache since you will reuse with each city
cityDay1DF.cache()
weather1DF.cache()
forecast2DF.cache()
return cityDay1DF, weather1DF, forecast2DF
def city_stats(city):
"""Obtain City Stats HTML"""
cityP = spark.sql(
"select \
city \
,state \
,country \
,elevation \
,latitude \
,longitude \
,local_tz_offset \
from cityV \
where city = '" + city + "'").toPandas().T
cityP.index = [i.replace('_', ' ').title() for i in cityP.index]
cityHTML = cityP.to_html(header=False).replace('\n', '')
return cityHTML
def cityDay_stats(city):
"""Obtain City/Day Stats HTML"""
normal_high, \
record_high, \
record_high_year, \
normal_low, \
record_low, \
record_low_year, \
sunrise_hour, \
sunrise_minute, \
sunset_hour, \
sunset_minute, \
moonrise_hour, \
moonrise_minute, \
moonset_hour, \
moonset_minute = cityDay1DF.filter(cityDay1DF['city'] == city).select(
'normal_high',
'record_high',
'record_high_year',
'normal_low',
'record_low',
'record_low_year',
'sunrise_hour',
'sunrise_minute',
'sunset_hour',
'sunset_minute',
'moonrise_hour',
'moonrise_minute',
'moonset_hour',
'moonset_minute').take(1)[0]
histP = pd.DataFrame(
{'Normal': [normal_low, normal_high],
'Record': [record_low, record_high],
'Record Year': [record_low_year, record_high_year]})
histP.index = ['Low', 'High']
histHTML = histP.to_html(header=True).replace('\n', '')
moonset = '' if (moonset_hour + ':' + moonset_minute) == ':' else \
(moonset_hour + ':' + moonset_minute)
sunMoonP = pd.DataFrame(
{'Sun': [sunrise_hour + ':' + sunrise_minute,
sunset_hour + ':' + sunset_minute],
'Moon': [moonrise_hour + ':' + moonrise_minute,
moonset]}) \
.T.sort_index(ascending=False)
sunMoonP.columns = ['Rise', 'Set']
sunMoonHTML = sunMoonP.to_html(header=True).replace('\n', '')
return histHTML, sunMoonHTML
def forecast_stats(city):
# Forecast to Pandas Dataframe with Needed Columns
forecastP = forecast2DF.filter(forecast2DF['city'] == city) \
.orderBy('fts').toPandas()
forecastP['Date'] = forecastP.fts.apply(
lambda x: datetime.datetime.fromtimestamp(x))
forecastP['fts2'] = forecastP.fts.apply(
lambda x: datetime.datetime.strftime(
datetime.datetime.fromtimestamp(x), "%m/%d %H:00"))
forecastP['temp_lower'] = forecastP['temp'] - 1.96 * forecastP['tempSD']
forecastP['temp_upper'] = forecastP['temp'] + 1.96 * forecastP['tempSD']
forecastP['humidity_lower'] = forecastP['humidity'] - \
1.96 * forecastP['humiditySD']
forecastP['humidity_upper'] = forecastP['humidity'] + \
1.96 * forecastP['humiditySD']
forecastP['humidity_lower'] = forecastP['humidity_lower'].apply(
lambda x: max(0, x))
forecastP['humidity_upper'] = forecastP['humidity_upper'].apply(
lambda x: min(100, x))
forecastP['wind_mph_lower'] = forecastP['wind_mph'] - \
1.96 * forecastP['wind_mphSD']
forecastP['wind_mph_upper'] = forecastP['wind_mph'] + \
1.96 * forecastP['wind_mphSD']
forecastP['wind_mph_lower'] = forecastP['wind_mph_lower'].apply(
lambda x: max(0, x))
forecastP['feelslike_lower'] = forecastP['feelslike'] - \
1.96 * forecastP['feelslikeSD']
forecastP['feelslike_upper'] = forecastP['feelslike'] + \
1.96 * forecastP['feelslikeSD']
# Create Plots (png files) and Save to S3
# Temp
fimage = forecastP.plot(x='Date',
y=['temp_lower', 'temp', 'temp_upper'],
legend=False,
title='Forecast Temperature',
figsize=(12, 6),
style=['k--', 'b-', 'k--']).get_figure()
filepath = 'images/' + city + 'Temp.png'
fimage.savefig(filepath)
index_key = bucket.new_key(filepath)
index_key.set_contents_from_filename(filepath, policy='public-read')
# Humidity
fimage = forecastP.plot(x='Date',
y=['humidity_lower', 'humidity', 'humidity_upper'],
legend=False,
title='Forecast Humidity',
figsize=(12, 6),
style=['k--', 'b-', 'k--']).get_figure()
filepath = 'images/' + city + 'Hum.png'
fimage.savefig(filepath)
index_key = bucket.new_key(filepath)
index_key.set_contents_from_filename(filepath, policy='public-read')
# Wind
fimage = forecastP.plot(x='Date',
y=['wind_mph_lower', 'wind_mph', 'wind_mph_upper'],
legend=False,
title='Forecast Wind Speed',
figsize=(12, 6),
style=['k--', 'b-', 'k--']).get_figure()
filepath = 'images/' + city + 'Wind.png'
fimage.savefig(filepath)
index_key = bucket.new_key(filepath)
index_key.set_contents_from_filename(filepath, policy='public-read')
# Feelslike
fimage = forecastP.plot(x='Date',
y=['feelslike_lower', 'feelslike',
'feelslike_upper'],
legend=False,
title='Forecast "Feels Like"',
figsize=(12, 6),
style=['k--', 'b-', 'k--']).get_figure()
filepath = 'images/' + city + 'Feel.png'
fimage.savefig(filepath)
index_key = bucket.new_key(filepath)
index_key.set_contents_from_filename(filepath, policy='public-read')
# Close all figures so you don't have too many open at once!
plt.close('all')
# Create Forecast Table
forecast1P = forecastP[['fts2', 'temp', 'humidity', 'wind_mph',
'feelslike']].set_index('fts2')
del forecast1P.index.name
forecast1P.columns = ['Temperature', 'Humidity', 'Wind Speed',
'Feels Like']
forecastHTML = forecast1P.to_html(header=True).replace('\n', '')
return forecastHTML
def city_html(city, cityHTML, histHTML, sunMoonHTML, current_temp,
forecastHTML):
html = '<!DOCTYPE html><html><h1>' +\
city + '</h1><h2>' +\
str(current_temp) + '°F' +\
'</h2><h3>City Stats</h3><p>' +\
cityHTML +\
'</p><h3>Sun & Moon Schedule</h3><p>' +\
sunMoonHTML +\
'</p><h3>Today\'s Historical Temperatures</h3><p>' +\
histHTML +\
'</p><h3>10-Day Forecast</h3><p>' +\
'</p><h4>95% Confidence Interval Plots</h4><p>' +\
'<img src="images\\' + city + 'Temp.png" \
style="width:800px;height:400px;">' +\
'<img src="images\\' + city + 'Hum.png" \
style="width:800px;height:400px;">' +\
'<img src="images\\' + city + 'Wind.png" \
style="width:800px;height:400px;">' +\
'<img src="images\\' + city + 'Feel.png" \
style="width:800px;height:400px;">' +\
'</p><h4>Table of Point Estimates</h4><p>' +\
forecastHTML +\
'</p></html>'
city_key = bucket.new_key(city.lower().replace(' ', '') + '.html')
city_key.content_type = 'text/html'
city_key.set_contents_from_string(html, policy='public-read')
return
def city_pages():
"""Create webpages for each city"""
current_weather = []
for c in sorted(cityDF.select('city').collect()):
city = c['city']
# City Stats
cityHTML = city_stats(city)
# Day Stats
histHTML, sunMoonHTML = cityDay_stats(city)
# Current Weather
current_temp = weather1DF.filter(weather1DF['city'] == city)\
.take(1)[0]['temp']
current_weather.append((city, current_temp))
# Forecast Weather
forecastHTML = forecast_stats(city)
# Create Web-Page on S3
city_html(city, cityHTML, histHTML, sunMoonHTML, current_temp,
forecastHTML)
current_weather = {row[0]: row[1] for row in current_weather}
return current_weather
def index_html(current_weather):
html = '<!DOCTYPE html><html><head><h1>WEATHER</h1></head>' +\
'<body><p>Please select a city:<br><br>'
for c in sorted(cityDF.select('city').collect()):
city = c['city']
html += ' ' + str(current_weather[city]) +\
'°F <a href="' + city.lower().replace(' ', '') +\
'.html">' +\
city + '</a><br>'
html += '</p></body></html>'
index_key = bucket.new_key('index.html')
index_key.content_type = 'text/html'
index_key.set_contents_from_string(html, policy='public-read')
return
def error_html():
html = '<!DOCTYPE html><html><head><h3>The page you are looking for' +\
'does not exist.</h3></head><body><p>Please return to the' +\
'<a href="index.html">Weather Homepage</a></p></body></html>'
error_key = bucket.new_key('error.html')
error_key.content_type = 'text/html'
error_key.set_contents_from_string(html, policy='public-read')
return
def send_email():
sender = 'student.seeber@galvanize.it'
receivers = ['mseeber101@gmail.com']
header = 'To: ' + receivers[0] + '\n' + 'From: ' + sender + '\n' + \
'MIME-Version: 1.0\nContent-type: text/html \nSubject: Report Job\n'
message = header + """
<!DOCTYPE html>
<html>
Report Job Complete
</html>
"""
smtpObj = smtplib.SMTP('localhost')
smtpObj.sendmail(sender, receivers, message)
return
if __name__ == "__main__":
# Start Spark
spark = SparkSession.builder.getOrCreate()
spark.sparkContext.setLogLevel('ERROR')
# Connect to bucket
conn = boto.connect_s3(host='s3.amazonaws.com')
bucket_name = 'myweatherproject'
bucket = conn.get_bucket(bucket_name)
# Register UDFs
spark.udf.register("timestamp", timestamp)
spark.udf.register("timestamp_hour", timestamp_hour)
spark.udf.register("hour_dif", hour_dif)
# Load DataFrames
cityDF, cityDayDF, weatherDF, forecastDF, statsDF = load()
# Preprocess DataFrames for Use with All Cities
cityDay1DF, weather1DF, forecast2DF = preprocess()
# Create Web-Pages for Each City and
# Return Current Weather Dictionary to Use in Creating the Index
current_weather = city_pages()
# Create Index Web-Page
index_html(current_weather)
# Create Error Web-Page
error_html()
# Send Completion E-mail
send_email()
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.cache()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from os import path
import shutil
import tempfile
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training.tracking import util as trackable_utils
class FileCacheTest(test_base.DatasetTestBase, parameterized.TestCase):
def setUp(self):
super(FileCacheTest, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.cache_prefix = path.join(self.tmp_dir, "cache")
def tearDown(self):
if self.tmp_dir:
shutil.rmtree(self.tmp_dir, ignore_errors=True)
super(FileCacheTest, self).tearDown()
@combinations.generate(test_base.default_test_combinations())
def testCacheDatasetPassthrough(self):
components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0]))
def dataset_fn(count=5, filename=None):
repeat_dataset = (
dataset_ops.Dataset.from_tensor_slices(components).repeat(count))
if filename:
return repeat_dataset.cache(filename)
else:
return repeat_dataset
self.assertEqual(
tuple([c.shape[1:] for c in components]),
dataset_ops.get_legacy_output_shapes(dataset_fn()))
get_next = self.getNext(dataset_fn())
# First run without caching to collect the "ground truth".
elements = []
for _ in range(20):
elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Assert that the cached dataset has the same elements as the
# "ground truth".
get_next = self.getNext(dataset_fn(filename=self.cache_prefix))
cached_elements = []
for _ in range(20):
cached_elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertAllEqual(elements, cached_elements)
# Re-initialize with an empty upstream (to throw errors.OutOfRangeError
# if we didn't use the cache).
get_next = self.getNext(dataset_fn(count=0, filename=self.cache_prefix))
replayed_elements = []
for _ in range(20):
replayed_elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(cached_elements, replayed_elements)
# Re-initialize with an empty upstream and a missing cache file (should
# throw errors.OutOfRangeError immediately).
get_next = self.getNext(
dataset_fn(count=0, filename=self.cache_prefix + "nonsense"))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testConcurrentWriters(self):
components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0]))
cache_dataset1 = (
dataset_ops.Dataset.from_tensor_slices(components).cache(
self.cache_prefix))
cache_dataset2 = (
dataset_ops.Dataset.from_tensor_slices(components).cache(
self.cache_prefix))
get_next1 = self.getNext(cache_dataset1)
get_next2 = self.getNext(cache_dataset2)
self.evaluate(get_next1()) # this should succeed
with self.assertRaises(errors.AlreadyExistsError):
self.evaluate(get_next2())
self.evaluate(get_next1()) # this should continue to succeed
@combinations.generate(test_base.default_test_combinations())
def testConcurrentReaders(self):
components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0]))
cache_dataset1 = (
dataset_ops.Dataset.from_tensor_slices(components).cache(
self.cache_prefix))
cache_dataset2 = (
dataset_ops.Dataset.from_tensor_slices(components).cache(
self.cache_prefix))
get_next1 = self.getNext(cache_dataset1)
get_next2 = self.getNext(cache_dataset2)
elements = []
for _ in range(4):
elements.append(self.evaluate(get_next1()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next1())
# Re-initialize
get_next1 = self.getNext(cache_dataset1, requires_initialization=True)
get_next2 = self.getNext(cache_dataset2, requires_initialization=True)
# Reading concurrently should succeed.
elements_itr1 = []
elements_itr2 = []
elements_itr2.append(self.evaluate(get_next2()))
elements_itr1.append(self.evaluate(get_next1()))
elements_itr2.append(self.evaluate(get_next2()))
elements_itr1.append(self.evaluate(get_next1()))
# Intentionally reversing the order
elements_itr1.append(self.evaluate(get_next1()))
elements_itr2.append(self.evaluate(get_next2()))
elements_itr1.append(self.evaluate(get_next1()))
elements_itr2.append(self.evaluate(get_next2()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next2())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next1())
self.assertAllEqual(elements, elements_itr1)
self.assertAllEqual(elements, elements_itr2)
@combinations.generate(test_base.default_test_combinations())
def testReadingPastEndOfSequence(self):
dataset = dataset_ops.Dataset.range(10).cache(self.cache_prefix)
dataset = dataset.map(lambda a: a).batch(4).repeat(2)
expected_output = [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]] * 2
self.assertDatasetProduces(dataset, expected_output)
@combinations.generate(test_base.default_test_combinations())
def testCleaningUpCacheFiles(self):
def do_test(i):
dataset = dataset_ops.Dataset.range(10).cache(self.cache_prefix)
get_next = self.getNext(dataset)
for _ in range(i):
try:
self.evaluate(get_next())
except errors.OutOfRangeError:
break
if not context.executing_eagerly():
self.skipTest(
"Test requires eager mode for iterators to be deconstructed")
for i in [0, 3, 10, 12, 15]:
do_test(i)
class MemoryCacheTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testCacheDatasetPassthrough(self):
with ops.device("cpu:0"):
repeat_count = variables.Variable(constant_op.constant(10, dtypes.int64))
dataset = dataset_ops.Dataset.range(3).flat_map(
lambda x: dataset_ops.Dataset.from_tensors(x).repeat(repeat_count))
cached_dataset = dataset.cache().repeat(2)
uncached_dataset = dataset.repeat(2)
self.evaluate(repeat_count.initializer)
# Needs to be initializable to capture the variable.
cached_next = self.getNext(cached_dataset, requires_initialization=True)
uncached_next = self.getNext(
uncached_dataset, requires_initialization=True)
for i in range(3):
for _ in range(10):
self.assertEqual(self.evaluate(cached_next()), i)
self.assertEqual(self.evaluate(uncached_next()), i)
self.evaluate(repeat_count.assign(0))
# The uncached iterator should now be empty.
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(uncached_next())
# The cached iterator replays from cache.
for i in range(3):
for _ in range(10):
self.assertEqual(self.evaluate(cached_next()), i)
# The cached iterator should now be empty.
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(cached_next())
@combinations.generate(test_base.default_test_combinations())
def testEmptyCacheReading(self):
components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0]))
repeat_dataset = (
dataset_ops.Dataset.from_tensor_slices(components).repeat(0))
cache_dataset = repeat_dataset.cache()
self.assertDatasetProduces(cache_dataset, expected_output=[])
@combinations.generate(test_base.default_test_combinations())
def testConcurrentReaders(self):
dataset_fn = lambda: dataset_ops.Dataset.range(5).cache()
d1 = dataset_fn().map(lambda x: x + 1)
d2 = dataset_fn().map(lambda x: x + 6)
get_next1 = self.getNext(d1)
self.assertEqual(1, self.evaluate(get_next1()))
self.assertEqual(2, self.evaluate(get_next1()))
self.assertEqual(3, self.evaluate(get_next1()))
get_next2 = self.getNext(d2)
self.assertEqual(6, self.evaluate(get_next2()))
self.assertEqual(7, self.evaluate(get_next2()))
self.assertEqual(4, self.evaluate(get_next1())) # interleave execution
self.assertEqual([8, 5],
[self.evaluate(get_next2()),
self.evaluate(get_next1())])
self.assertEqual(9, self.evaluate(get_next2()))
self.assertEqual(10, self.evaluate(get_next2()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next2())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next1())
@combinations.generate(test_base.default_test_combinations())
def testCacheTakeRepeat(self):
dataset = dataset_ops.Dataset.range(10).cache().take(5).repeat(2)
expected_output = [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testCacheRepeatEpochs(self):
counter = variables.Variable(0)
self.evaluate(counter.initializer)
def increment_fn(x):
counter.assign_add(1)
return x
dataset = dataset_ops.Dataset.range(10).map(increment_fn).cache().repeat(2)
get_next = self.getNext(dataset, requires_initialization=True)
# first epoch
for i in range(10):
self.assertEqual(i, self.evaluate(counter))
self.assertEqual(i, self.evaluate(get_next()))
# second epoch
for i in range(10):
self.assertEqual(10, self.evaluate(counter))
self.assertEqual(i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testCacheIterationEpochs(self):
counter = variables.Variable(0)
self.evaluate(counter.initializer)
def increment_fn(x):
counter.assign_add(1)
return x
dataset = dataset_ops.Dataset.range(10).map(increment_fn).cache()
# first epoch
i = 0
for elem in dataset:
self.assertEqual(i, self.evaluate(elem))
i += 1
self.assertEqual(i, self.evaluate(counter))
# second epoch
i = 0
for elem in dataset:
self.assertEqual(10, self.evaluate(counter))
self.assertEqual(i, self.evaluate(elem))
i += 1
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testCacheV2ResourceCapture(self):
def make_dataset():
ids = dataset_ops.Dataset.range(10)
ids = ids.cache()
def interleave_fn(dataset, _):
return dataset
dataset = dataset_ops.Dataset.range(1)
dataset = dataset.interleave(functools.partial(interleave_fn, ids))
return dataset
results = []
for elem in make_dataset():
results.append(elem.numpy())
self.assertAllEqual(results, range(10))
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testCacheV2ConcurrentIterators(self):
dataset = dataset_ops.Dataset.range(10).cache()
it1 = iter(dataset)
it2 = iter(dataset)
for i in range(10):
self.assertEqual(next(it1), i)
self.assertEqual(next(it2), i)
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testCacheKnownCardinality(self):
# Check that a dataset which produces random permutation of range(10) ends
# up being cached when we read all of its element but do not reach EOF.
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.shuffle(10, reshuffle_each_iteration=True).cache()
it = iter(dataset)
results = []
for _ in range(10):
results.append(next(it))
it = iter(dataset)
for i in range(10):
self.assertEqual(next(it), results[i])
@combinations.generate(test_base.eager_only_combinations())
def testCheckpointFinishedCache(self):
num_elements = 10
ds = dataset_ops.Dataset.range(num_elements)
ds = ds.cache()
iterator = iter(ds)
for i in range(num_elements):
self.assertEqual(next(iterator).numpy(), i)
ckpt = trackable_utils.Checkpoint(iterator=iterator)
manager = checkpoint_management.CheckpointManager(
ckpt, self.get_temp_dir(), max_to_keep=1)
manager.save()
manager.restore_or_initialize()
with self.assertRaises(StopIteration):
next(iterator)
@combinations.generate(test_base.eager_only_combinations())
def testCheckpointLargeCache(self):
# Tensor of size 100M
dataset = dataset_ops.Dataset.from_tensors(
array_ops.ones((25, 1000, 1000), dtype=dtypes.float32))
# Repeat 25 times to exceed the 2G proto limit
dataset = dataset.repeat(25)
dataset = dataset.cache()
# Iterate to fill the cache.
iterator = iter(dataset)
for _ in range(23):
next(iterator)
ckpt = trackable_utils.Checkpoint(iterator=iterator)
manager = checkpoint_management.CheckpointManager(
ckpt, self.get_temp_dir(), max_to_keep=1)
manager.save()
if __name__ == "__main__":
test.main()
| |
import os, os.path, shutil
from django.test import TestCase
from scrapy import log, signals
from scrapy.utils.project import get_project_settings
settings = get_project_settings()
from twisted.internet import reactor
from scrapy.crawler import Crawler
from scrapy.xlib.pydispatch import dispatcher
from dynamic_scraper.spiders.django_spider import DjangoSpider
from dynamic_scraper.spiders.django_checker import DjangoChecker
from dynamic_scraper.spiders.checker_test import CheckerTest
from dynamic_scraper.models import *
from scraper.models import EventWebsite, Event, EventItem
# Tests need webserver for serving test pages: python manage.py runserver 0.0.0.0:8010
class EventSpider(DjangoSpider):
name = 'event_spider'
def __init__(self, *args, **kwargs):
self._set_ref_object(EventWebsite, **kwargs)
self.scraper = self.ref_object.scraper
self.scrape_url = self.ref_object.url
self.scheduler_runtime = self.ref_object.scraper_runtime
self.scraped_obj_class = Event
self.scraped_obj_item_class = EventItem
super(EventSpider, self).__init__(self, *args, **kwargs)
class DjangoWriterPipeline(object):
def process_item(self, item, spider):
item['event_website'] = spider.ref_object
checker_rt = SchedulerRuntime()
checker_rt.save()
item['checker_runtime'] = checker_rt
if not 'description' in item or item['description'] == None:
item['description'] = u''
item.save()
return item
class EventChecker(DjangoChecker):
name = 'event_checker'
def __init__(self, *args, **kwargs):
self._set_ref_object(Event, **kwargs)
self.scraper = self.ref_object.event_website.scraper
self.scrape_url = self.ref_object.url
self.scheduler_runtime = self.ref_object.checker_runtime
super(EventChecker, self).__init__(self, *args, **kwargs)
class ScraperTest(TestCase):
SERVER_URL = 'http://localhost:8010/static/'
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
IMG_DIR = './scraper/imgs/'
def __init__(self, *args, **kwargs):
if args[0] == 'test_img_store_format_flat_with_thumbs' or args[0] == 'test_delete_with_img_flat_with_thumbs':
os.environ['SCRAPY_SETTINGS_MODULE'] = 'settings.images_store_format_flat_with_thumbs';
from settings import images_store_format_flat_with_thumbs as file_settings
elif args[0] == 'test_img_store_format_all_no_thumbs' or args[0] == 'test_delete_with_img_all_no_thumbs':
os.environ['SCRAPY_SETTINGS_MODULE'] = 'settings.images_store_format_all_no_thumbs';
from settings import images_store_format_all_no_thumbs as file_settings
elif args[0] == 'test_img_store_format_all_with_thumbs' or args[0] == 'test_delete_with_img_all_with_thumbs':
os.environ['SCRAPY_SETTINGS_MODULE'] = 'settings.images_store_format_all_with_thumbs';
from settings import images_store_format_all_with_thumbs as file_settings
elif args[0] == 'test_img_store_format_thumbs_with_thumbs' or args[0] == 'test_delete_with_img_thumbs_with_thumbs':
os.environ['SCRAPY_SETTINGS_MODULE'] = 'settings.images_store_format_thumbs_with_thumbs';
from settings import images_store_format_thumbs_with_thumbs as file_settings
else:
os.environ['SCRAPY_SETTINGS_MODULE'] = 'settings.base_settings';
from settings import base_settings as file_settings
self.dds_settings = {}
self.dds_settings['ITEM_PIPELINES'] = file_settings.ITEM_PIPELINES
self.dds_settings['SPLASH_URL'] = file_settings.SPLASH_URL
self.dds_settings['DUPEFILTER_CLASS'] = file_settings.DUPEFILTER_CLASS
self.dds_settings['DOWNLOADER_MIDDLEWARES'] = file_settings.DOWNLOADER_MIDDLEWARES
self.dds_settings['DSCRAPER_SPLASH_ARGS'] = file_settings.DSCRAPER_SPLASH_ARGS
self.dds_settings['IMAGES_STORE'] = file_settings.IMAGES_STORE
if 'IMAGES_THUMBS' in file_settings.__dict__:
self.dds_settings['IMAGES_THUMBS'] = file_settings.IMAGES_THUMBS
if 'DSCRAPER_IMAGES_STORE_FORMAT' in file_settings.__dict__:
self.dds_settings['DSCRAPER_IMAGES_STORE_FORMAT'] = file_settings.DSCRAPER_IMAGES_STORE_FORMAT
super(ScraperTest, self).__init__(*args, **kwargs)
def record_signal(self, *args, **kwargs):
pass
#print kwargs
def run_event_spider(self, id, do_action='yes'):
kwargs = {
'id': id,
'do_action': do_action,
}
self.spider = EventSpider(**kwargs)
self.crawler.crawl(self.spider)
self.crawler.start()
#log.start()
reactor.run()
def run_event_checker(self, id):
kwargs = {
'id': id,
'do_action': 'yes'
}
self.checker = EventChecker(**kwargs)
self.crawler.crawl(self.checker)
self.crawler.start()
#log.start()
reactor.run()
def run_checker_test(self, id):
kwargs = {
'id': id,
}
self.checker_test = CheckerTest(**kwargs)
self.checker_test.conf['RUN_TYPE'] = 'TASK'
self.checker_test.conf['DO_ACTION'] = True
self.checker_test.conf['LOG_ENABLED'] = True
self.checker_test.conf['LOG_LEVEL'] = 'DEBUG'
self.crawler.crawl(self.checker_test)
self.crawler.start()
reactor.run()
def setUp(self):
if os.path.exists(self.IMG_DIR):
shutil.rmtree(self.IMG_DIR)
os.mkdir(self.IMG_DIR)
settings.set('ITEM_PIPELINES', self.dds_settings['ITEM_PIPELINES'], priority='cmdline')
settings.set('SPLASH_URL', self.dds_settings['SPLASH_URL'], priority='cmdline')
settings.set('DUPEFILTER_CLASS', self.dds_settings['DUPEFILTER_CLASS'], priority='cmdline')
settings.set('DOWNLOADER_MIDDLEWARES', self.dds_settings['DOWNLOADER_MIDDLEWARES'], priority='cmdline')
settings.set('IMAGES_STORE', self.dds_settings['IMAGES_STORE'], priority='cmdline')
if 'IMAGES_THUMBS' in self.dds_settings:
settings.set('IMAGES_THUMBS', self.dds_settings['IMAGES_THUMBS'], priority='cmdline')
if 'DSCRAPER_IMAGES_STORE_FORMAT' in self.dds_settings:
settings.set('DSCRAPER_IMAGES_STORE_FORMAT', self.dds_settings['DSCRAPER_IMAGES_STORE_FORMAT'], priority='cmdline')
self.crawler = Crawler(settings)
self.crawler.signals.connect(reactor.stop, signal=signals.spider_closed)
self.crawler.configure()
self.sc = ScrapedObjClass(name='Event')
self.sc.save()
self.soa_base = ScrapedObjAttr(name=u'base', attr_type='B', obj_class=self.sc)
self.soa_base.save()
self.soa_title = ScrapedObjAttr(name=u'title', attr_type='S', obj_class=self.sc)
self.soa_title.save()
self.soa_url = ScrapedObjAttr(name=u'url', attr_type='U', obj_class=self.sc, id_field=True)
self.soa_url.save()
self.soa_desc = ScrapedObjAttr(name=u'description', attr_type='S', obj_class=self.sc)
self.soa_desc.save()
self.scraper = Scraper(name=u'Event Scraper', scraped_obj_class=self.sc, status='A',)
self.scraper.save()
self.se_base = ScraperElem(scraped_obj_attr=self.soa_base, scraper=self.scraper,
x_path=u'//ul/li', from_detail_page=False)
self.se_base.save()
self.se_title = ScraperElem(scraped_obj_attr=self.soa_title, scraper=self.scraper,
x_path=u'a/text()', from_detail_page=False)
self.se_title.save()
self.se_url = ScraperElem(scraped_obj_attr=self.soa_url, scraper=self.scraper,
x_path=u'a/@href', from_detail_page=False)
self.se_url.save()
self.se_desc = ScraperElem(scraped_obj_attr=self.soa_desc, scraper=self.scraper,
x_path=u'//div/div[@class="description"]/text()', from_detail_page=True, mandatory=False)
self.se_desc.save()
self.sched_rt = SchedulerRuntime()
self.sched_rt.save()
self.event_website = EventWebsite(pk=1, name=u'Event Website', scraper=self.scraper,
url=os.path.join(self.SERVER_URL, 'site_generic/event_main.html'), scraper_runtime=self.sched_rt,)
self.event_website.save()
for name, signal in vars(signals).items():
if not name.startswith('_'):
dispatcher.connect(self.record_signal, signal)
def tearDown(self):
pass
| |
# coding=utf-8
""" Reconsider Package
Used to clone DBs and Tables from one RethinkDB instance to another
"""
__author__ = "Chris Nasr"
__copyright__ = "OuroborosCoding"
__license__ = "Apache"
__version__ = "1.0.0"
__maintainer__ = "Chris Nasr"
__email__ = "ouroboroscode@gmail.com"
# Import python core modules
import math
import re
import sys
# Import pip modules
import rethinkdb as r
# Compile index regex
_INDEX_REGEX = re.compile(r'(?:\.getField\("([^"]+)"\)|r\.row\("([^"]+)"\))')
# Number of "=" in progress bar
_PROGRESS_TICKS = 25
# Clone
def clone(source, destination, dbs = None, verbose = False):
"""Clone
Clone is used to clone one or many DBs/Tables from one host to another
Args:
source (dict): Data specifying the source instance
A dictionary with the following possible elements: host, port, user,
password, timeout, ssl (see rethinkdb python api)
destination (dict): Date specifying the destination instance
Works the same as source, but for the destination host
dbs (list|dict): A list of DBs
This is the list of Databases that will be cloned. If the value is a
list, all tables in each DB with be cloned. If the value is a dict,
it is assumed the keys are the names of the DBs, and the value (a
list) is the tables that will be cloned from each DB
verbose (bool): Optional verbose flag
If true the function will print out details about what it's doing.
Defaults to False
Returns:
bool: Returns true on success
Raises:
ValueError: If any arguments are incorrect a ValueError will be raised
"""
# If the source is not a valid dict
if not isinstance(source, dict):
raise ValueError('source must be a dict')
# Open a connection to the source instance
try:
oSource = r.connect(**source)
# Catch possible error
except r.errors.RqlDriverError:
sys.stderr.write('Can not connect to source host: ' + str(source) + '\n')
return False
# If the destination is not a valid dict
if not isinstance(destination, dict):
raise ValueError('destination must be a dict')
# Open a connection to the destination instance
try:
oDest = r.connect(**destination)
# Catch possible error
except r.errors.RqlDriverError:
sys.stderr.write('Can not connect to destination host: ' + str(destination) + '\n')
return False
# Get all the DBs on the source
lSourceDBs = r.db_list().run(oSource)
# If no DBs were specified
if not dbs:
dbs = lSourceDBs
# If the DBs were sent as a list (no tables specified)
if isinstance(dbs, (list,tuple)):
dbs = {s:None for s in dbs}
# Go through each DB listed
for sDB,lTables in dbs.iteritems():
# DBs can't have spaces, so we assume we want to rename the DB if there is one
if ' ' in sDB:
sDB, sCopyDB = sDB.split(' ')
else:
sCopyDB = sDB
# If the DB doesn't exist in the source
if sDB not in lSourceDBs:
sys.stderr.write('No such DB "%s" on the source host\n' % sDB)
continue
# Check if the DB exists on the destination
if r.db_list().contains(sCopyDB).run(oDest):
sys.stderr.write('DB "%s" already exists on the destination host\n"' % sDB)
continue
# If verbose mode is on
if verbose:
sys.stdout.write('Processing DB "%s"\n' % sDB)
# Create the DB on the destination host
r.db_create(sCopyDB).run(oDest)
# Get all the tables in the DB
lSourceTables = r.db(sDB).table_list().run(oSource)
# If no tables were specified
if not lTables:
lTables = lSourceTables
# Go through each Table
for sTable in lTables:
# If the DB doesn't exist in the source
if sTable not in lSourceTables:
sys.stderr.write('No such Table "%s.%s" on the source host\n' % (sDB,sTable))
continue
# If verbose mode is on
if verbose:
# Output
sys.stdout.write(' Processing Table "%s": [%s] 0%%' % (sTable, (' ' * _PROGRESS_TICKS)))
# Get the number of documents in the table
fTotal = float(r.db(sDB).table(sTable).count().run(oSource))
# Calculate the block size
fBlock = fTotal / _PROGRESS_TICKS
# Init the count and the ticks
iCount = 0
iTicks = 0
# Get the primary key of the table
sKeyField = r.db(sDB).table(sTable).info().run(oSource)['primary_key']
# Create the Table
r.db(sCopyDB).table_create(sTable, primary_key=sKeyField).run(oDest)
# Get the list of indexes
lIndexes = r.db(sDB).table(sTable).index_status().run(oSource)
# Create each index
for dIndex in lIndexes:
# Pull out the name
sName = dIndex['index']
# Pull out the fields
oMatches = _INDEX_REGEX.findall(dIndex['query'])
# If there's only one field
if len(oMatches) == 1:
# Create a single field index
r.db(sCopyDB).table(sTable).index_create(oMatches[0][0]).run(oDest)
# Else if the index is comprised of multiple fields
else:
# Pull out each field
lFields = []
for tMatch in oMatches:
lFields.append(r.row[tMatch[1]])
# Create a multi-index field
r.db(sCopyDB).table(sTable).index_create(sName, lFields).run(oDest)
# Copy the data one document at a time
for dDoc in r.db(sDB).table(sTable).run(oSource):
# Copy the document to the destination
r.db(sCopyDB).table(sTable).insert(dDoc).run(oDest)
# If verbose mode is on
if verbose:
# Increment the count
iCount += 1
# Get the number of ticks
iTemp = int(round(float(iCount) / fBlock))
# If the ticks are more than the previous
if iTemp > iTicks:
iTicks = iTemp
# Output
sys.stdout.write('\r Processing Table "%s": [%s%s] %d%%' % (
sTable,
('=' * iTicks),
(' ' * (_PROGRESS_TICKS - iTicks)),
(iTicks * 4)
))
sys.stdout.flush()
# If verbose mode is on
if verbose:
# Output
sys.stdout.write('\r Processing Table "%s": [%s] 100%%\n' % (sTable, ('=' * _PROGRESS_TICKS)))
| |
import flask; from flask import request
import os
import subprocess
import traceback
import urllib.parse
from voussoirkit import cacheclass
from voussoirkit import flasktools
from voussoirkit import pathclass
from voussoirkit import stringtools
from voussoirkit import vlogging
log = vlogging.get_logger(__name__)
import etiquette
from .. import common
from .. import helpers
site = common.site
session_manager = common.session_manager
photo_download_zip_tokens = cacheclass.Cache(maxlen=100)
# Individual photos ################################################################################
@site.route('/photo/<photo_id>')
def get_photo_html(photo_id):
photo = common.P_photo(photo_id, response_type='html')
return common.render_template(request, 'photo.html', photo=photo)
@site.route('/photo/<photo_id>.json')
def get_photo_json(photo_id):
photo = common.P_photo(photo_id, response_type='json')
photo = photo.jsonify()
photo = flasktools.json_response(photo)
return photo
@site.route('/file/<photo_id>')
@site.route('/file/<photo_id>/<basename>')
def get_file(photo_id, basename=None):
photo_id = photo_id.split('.')[0]
photo = common.P.get_photo(photo_id)
do_download = request.args.get('download', False)
do_download = stringtools.truthystring(do_download)
use_original_filename = request.args.get('original_filename', False)
use_original_filename = stringtools.truthystring(use_original_filename)
if do_download:
if use_original_filename:
download_as = photo.basename
else:
download_as = photo.id + photo.dot_extension
download_as = etiquette.helpers.remove_path_badchars(download_as)
download_as = urllib.parse.quote(download_as)
response = flask.make_response(common.send_file(photo.real_path.absolute_path))
response.headers['Content-Disposition'] = 'attachment; filename*=UTF-8\'\'%s' % download_as
return response
else:
return common.send_file(photo.real_path.absolute_path, override_mimetype=photo.mimetype)
@site.route('/thumbnail/<photo_id>')
def get_thumbnail(photo_id):
photo_id = photo_id.split('.')[0]
photo = common.P_photo(photo_id, response_type='html')
if photo.thumbnail:
path = photo.thumbnail
else:
flask.abort(404, 'That file doesnt have a thumbnail')
return common.send_file(path)
# Photo create and delete ##########################################################################
@site.route('/photo/<photo_id>/delete', methods=['POST'])
def post_photo_delete(photo_id):
print(photo_id)
photo = common.P_photo(photo_id, response_type='json')
delete_file = request.form.get('delete_file', False)
delete_file = stringtools.truthystring(delete_file)
photo.delete(delete_file=delete_file, commit=True)
return flasktools.json_response({})
# Photo tag operations #############################################################################
def post_photo_add_remove_tag_core(photo_ids, tagname, add_or_remove):
if isinstance(photo_ids, str):
photo_ids = stringtools.comma_space_split(photo_ids)
photos = list(common.P_photos(photo_ids, response_type='json'))
tag = common.P_tag(tagname, response_type='json')
for photo in photos:
if add_or_remove == 'add':
photo.add_tag(tag)
elif add_or_remove == 'remove':
photo.remove_tag(tag)
common.P.commit('photo add remove tag core')
response = {'action': add_or_remove, 'tagname': tag.name}
return flasktools.json_response(response)
@site.route('/photo/<photo_id>/add_tag', methods=['POST'])
@flasktools.required_fields(['tagname'], forbid_whitespace=True)
def post_photo_add_tag(photo_id):
'''
Add a tag to this photo.
'''
response = post_photo_add_remove_tag_core(
photo_ids=photo_id,
tagname=request.form['tagname'],
add_or_remove='add',
)
return response
@site.route('/photo/<photo_id>/copy_tags', methods=['POST'])
@flasktools.required_fields(['other_photo'], forbid_whitespace=True)
def post_photo_copy_tags(photo_id):
'''
Copy the tags from another photo.
'''
photo = common.P_photo(photo_id, response_type='json')
other = common.P_photo(request.form['other_photo'], response_type='json')
photo.copy_tags(other)
common.P.commit('photo copy tags')
return flasktools.json_response([tag.jsonify(minimal=True) for tag in photo.get_tags()])
@site.route('/photo/<photo_id>/remove_tag', methods=['POST'])
@flasktools.required_fields(['tagname'], forbid_whitespace=True)
def post_photo_remove_tag(photo_id):
'''
Remove a tag from this photo.
'''
response = post_photo_add_remove_tag_core(
photo_ids=photo_id,
tagname=request.form['tagname'],
add_or_remove='remove',
)
return response
@site.route('/batch/photos/add_tag', methods=['POST'])
@flasktools.required_fields(['photo_ids', 'tagname'], forbid_whitespace=True)
def post_batch_photos_add_tag():
response = post_photo_add_remove_tag_core(
photo_ids=request.form['photo_ids'],
tagname=request.form['tagname'],
add_or_remove='add',
)
return response
@site.route('/batch/photos/remove_tag', methods=['POST'])
@flasktools.required_fields(['photo_ids', 'tagname'], forbid_whitespace=True)
def post_batch_photos_remove_tag():
response = post_photo_add_remove_tag_core(
photo_ids=request.form['photo_ids'],
tagname=request.form['tagname'],
add_or_remove='remove',
)
return response
# Photo metadata operations ########################################################################
@site.route('/photo/<photo_id>/generate_thumbnail', methods=['POST'])
def post_photo_generate_thumbnail(photo_id):
special = request.form.to_dict()
special.pop('commit', None)
photo = common.P_photo(photo_id, response_type='json')
photo.generate_thumbnail(commit=True, **special)
response = flasktools.json_response({})
return response
def post_photo_refresh_metadata_core(photo_ids):
if isinstance(photo_ids, str):
photo_ids = stringtools.comma_space_split(photo_ids)
photos = list(common.P_photos(photo_ids, response_type='json'))
for photo in photos:
photo._uncache()
photo = common.P_photo(photo.id, response_type='json')
try:
photo.reload_metadata()
except pathclass.NotFile:
flask.abort(404)
if photo.thumbnail is None:
try:
photo.generate_thumbnail()
except Exception:
log.warning(traceback.format_exc())
common.P.commit('photo refresh metadata core')
return flasktools.json_response({})
@site.route('/photo/<photo_id>/refresh_metadata', methods=['POST'])
def post_photo_refresh_metadata(photo_id):
response = post_photo_refresh_metadata_core(photo_ids=photo_id)
return response
@site.route('/batch/photos/refresh_metadata', methods=['POST'])
@flasktools.required_fields(['photo_ids'], forbid_whitespace=True)
def post_batch_photos_refresh_metadata():
response = post_photo_refresh_metadata_core(photo_ids=request.form['photo_ids'])
return response
@site.route('/photo/<photo_id>/set_searchhidden', methods=['POST'])
def post_photo_set_searchhidden(photo_id):
photo = common.P_photo(photo_id, response_type='json')
photo.set_searchhidden(True)
return flasktools.json_response({})
@site.route('/photo/<photo_id>/unset_searchhidden', methods=['POST'])
def post_photo_unset_searchhidden(photo_id):
photo = common.P_photo(photo_id, response_type='json')
photo.set_searchhidden(False)
return flasktools.json_response({})
def post_batch_photos_searchhidden_core(photo_ids, searchhidden):
if isinstance(photo_ids, str):
photo_ids = stringtools.comma_space_split(photo_ids)
photos = list(common.P_photos(photo_ids, response_type='json'))
for photo in photos:
photo.set_searchhidden(searchhidden)
common.P.commit('photo set searchhidden core')
return flasktools.json_response({})
@site.route('/photo/<photo_id>/show_in_folder', methods=['POST'])
def post_photo_show_in_folder(photo_id):
if not request.is_localhost:
flask.abort(403)
photo = common.P_photo(photo_id, response_type='json')
if os.name == 'nt':
command = f'explorer.exe /select,"{photo.real_path.absolute_path}"'
subprocess.Popen(command, shell=True)
return flasktools.json_response({})
else:
command = ['xdg-open', photo.real_path.parent.absolute_path]
subprocess.Popen(command, shell=True)
return flasktools.json_response({})
flask.abort(501)
@site.route('/batch/photos/set_searchhidden', methods=['POST'])
@flasktools.required_fields(['photo_ids'], forbid_whitespace=True)
def post_batch_photos_set_searchhidden():
photo_ids = request.form['photo_ids']
response = post_batch_photos_searchhidden_core(photo_ids=photo_ids, searchhidden=True)
return response
@site.route('/batch/photos/unset_searchhidden', methods=['POST'])
@flasktools.required_fields(['photo_ids'], forbid_whitespace=True)
def post_batch_photos_unset_searchhidden():
photo_ids = request.form['photo_ids']
response = post_batch_photos_searchhidden_core(photo_ids=photo_ids, searchhidden=False)
return response
# Clipboard ########################################################################################
@site.route('/clipboard')
def get_clipboard_page():
return common.render_template(request, 'clipboard.html')
@site.route('/batch/photos', methods=['POST'])
@flasktools.required_fields(['photo_ids'], forbid_whitespace=True)
def post_batch_photos():
'''
Return a list of photo.jsonify() for each requested photo id.
'''
photo_ids = request.form['photo_ids']
photo_ids = stringtools.comma_space_split(photo_ids)
photos = list(common.P_photos(photo_ids, response_type='json'))
photos = [photo.jsonify() for photo in photos]
response = flasktools.json_response(photos)
return response
@site.route('/batch/photos/photo_card', methods=['POST'])
@flasktools.required_fields(['photo_ids'], forbid_whitespace=True)
def post_batch_photos_photo_cards():
photo_ids = request.form['photo_ids']
photo_ids = stringtools.comma_space_split(photo_ids)
photos = list(common.P_photos(photo_ids, response_type='json'))
# Photo filenames are prevented from having colons, so using it as a split
# delimiter should be safe.
template = '''
{% import "cards.html" as cards %}
{% for photo in photos %}
{{photo.id}}:
{{cards.create_photo_card(photo)}}
:SPLITME:
{% endfor %}
'''
html = flask.render_template_string(template, photos=photos)
divs = [div.strip() for div in html.split(':SPLITME:')]
divs = [div for div in divs if div]
divs = [div.split(':', 1) for div in divs]
divs = {photo_id.strip(): photo_card.strip() for (photo_id, photo_card) in divs}
response = flasktools.json_response(divs)
return response
# Zipping ##########################################################################################
@site.route('/batch/photos/download_zip/<zip_token>', methods=['GET'])
def get_batch_photos_download_zip(zip_token):
'''
After the user has generated their zip token, they can retrieve
that zip file.
'''
zip_token = zip_token.split('.')[0]
try:
photo_ids = photo_download_zip_tokens[zip_token]
except KeyError:
flask.abort(404)
# Let's re-validate those IDs just in case anything has changed.
photos = list(common.P_photos(photo_ids, response_type='json'))
if not photos:
flask.abort(400)
streamed_zip = etiquette.helpers.zip_photos(photos)
download_as = zip_token + '.zip'
download_as = urllib.parse.quote(download_as)
outgoing_headers = {
'Content-Type': 'application/octet-stream',
'Content-Disposition': f'attachment; filename*=UTF-8\'\'{download_as}',
}
return flask.Response(streamed_zip, headers=outgoing_headers)
@site.route('/batch/photos/download_zip', methods=['POST'])
@flasktools.required_fields(['photo_ids'], forbid_whitespace=True)
def post_batch_photos_download_zip():
'''
Initiating file downloads via POST requests is a bit clunky and unreliable,
so the way this works is we generate a token representing the photoset
that they want, and then they can retrieve the zip itself via GET.
'''
photo_ids = request.form['photo_ids']
photo_ids = stringtools.comma_space_split(photo_ids)
photos = list(common.P_photos(photo_ids, response_type='json'))
if not photos:
flask.abort(400)
photo_ids = [p.id for p in photos]
zip_token = 'etiquette_' + etiquette.helpers.hash_photoset(photos)
photo_download_zip_tokens[zip_token] = photo_ids
response = {'zip_token': zip_token}
response = flasktools.json_response(response)
return response
# Search ###########################################################################################
def get_search_core():
warning_bag = etiquette.objects.WarningBag()
has_tags = request.args.get('has_tags')
tag_musts = request.args.get('tag_musts')
tag_mays = request.args.get('tag_mays')
tag_forbids = request.args.get('tag_forbids')
tag_expression = request.args.get('tag_expression')
filename_terms = request.args.get('filename')
extension = request.args.get('extension')
extension_not = request.args.get('extension_not')
mimetype = request.args.get('mimetype')
is_searchhidden = request.args.get('is_searchhidden', False)
yield_albums = request.args.get('yield_albums', True)
yield_photos = request.args.get('yield_photos', True)
limit = request.args.get('limit')
# This is being pre-processed because the site enforces a maximum value
# which the PhotoDB api does not.
limit = etiquette.searchhelpers.normalize_limit(limit, warning_bag=warning_bag)
if limit is None:
limit = 50
else:
limit = min(limit, 1000)
offset = request.args.get('offset')
author = request.args.get('author')
orderby = request.args.get('orderby')
area = request.args.get('area')
width = request.args.get('width')
height = request.args.get('height')
ratio = request.args.get('ratio')
bytes = request.args.get('bytes')
has_thumbnail = request.args.get('has_thumbnail')
duration = request.args.get('duration')
created = request.args.get('created')
# These are in a dictionary so I can pass them to the page template.
search_kwargs = {
'area': area,
'width': width,
'height': height,
'ratio': ratio,
'bytes': bytes,
'duration': duration,
'author': author,
'created': created,
'extension': extension,
'extension_not': extension_not,
'filename': filename_terms,
'has_tags': has_tags,
'has_thumbnail': has_thumbnail,
'is_searchhidden': is_searchhidden,
'mimetype': mimetype,
'tag_musts': tag_musts,
'tag_mays': tag_mays,
'tag_forbids': tag_forbids,
'tag_expression': tag_expression,
'limit': limit,
'offset': offset,
'orderby': orderby,
'warning_bag': warning_bag,
'give_back_parameters': True,
'yield_albums': yield_albums,
'yield_photos': yield_photos,
}
# print(search_kwargs)
search_generator = common.P.search(**search_kwargs)
# Because of the giveback, first element is cleaned up kwargs
search_kwargs = next(search_generator)
# Web UI users aren't allowed to use within_directory anyway, so don't
# show it to them.
search_kwargs.pop('within_directory', None)
# print(search_kwargs)
warnings = set()
search_results = []
for item in search_generator:
if isinstance(item, etiquette.objects.WarningBag):
warnings.update(item.warnings)
continue
search_results.append(item)
warnings = [
w.error_message if hasattr(w, 'error_message') else str(w)
for w in warnings
]
# TAGS ON THIS PAGE
total_tags = set()
for result in search_results:
if isinstance(result, etiquette.objects.Photo):
total_tags.update(result.get_tags())
total_tags = sorted(total_tags, key=lambda t: t.name)
# PREV-NEXT PAGE URLS
offset = search_kwargs['offset'] or 0
original_params = request.args.to_dict()
original_params['limit'] = limit
if limit and len(search_results) >= limit:
next_params = original_params.copy()
next_params['offset'] = offset + limit
next_params = helpers.dict_to_params(next_params)
next_page_url = '/search' + next_params
else:
next_page_url = None
if limit and offset > 0:
prev_params = original_params.copy()
prev_offset = max(0, offset - limit)
if prev_offset > 0:
prev_params['offset'] = prev_offset
else:
prev_params.pop('offset', None)
prev_params = helpers.dict_to_params(prev_params)
prev_page_url = '/search' + prev_params
else:
prev_page_url = None
search_kwargs['view'] = request.args.get('view', 'grid')
final_results = {
'next_page_url': next_page_url,
'prev_page_url': prev_page_url,
'results': search_results,
'total_tags': total_tags,
'warnings': list(warnings),
'search_kwargs': search_kwargs,
}
return final_results
@site.route('/search_embed')
def get_search_embed():
search_results = get_search_core()
response = common.render_template(
request,
'search_embed.html',
results=search_results['results'],
search_kwargs=search_results['search_kwargs'],
)
return response
@site.route('/search')
def get_search_html():
search_results = get_search_core()
response = common.render_template(
request,
'search.html',
next_page_url=search_results['next_page_url'],
prev_page_url=search_results['prev_page_url'],
results=search_results['results'],
search_kwargs=search_results['search_kwargs'],
total_tags=search_results['total_tags'],
warnings=search_results['warnings'],
)
return response
@site.route('/search.json')
def get_search_json():
search_results = get_search_core()
search_kwargs = search_results['search_kwargs']
# The search has converted many arguments into sets or other types.
# Convert them back into something that will display nicely on the search form.
join_helper = lambda x: ', '.join(x) if x else None
search_kwargs['extension'] = join_helper(search_kwargs['extension'])
search_kwargs['extension_not'] = join_helper(search_kwargs['extension_not'])
search_kwargs['mimetype'] = join_helper(search_kwargs['mimetype'])
author_helper = lambda users: ', '.join(user.username for user in users) if users else None
search_kwargs['author'] = author_helper(search_kwargs['author'])
tagname_helper = lambda tags: [tag.name for tag in tags] if tags else None
search_kwargs['tag_musts'] = tagname_helper(search_kwargs['tag_musts'])
search_kwargs['tag_mays'] = tagname_helper(search_kwargs['tag_mays'])
search_kwargs['tag_forbids'] = tagname_helper(search_kwargs['tag_forbids'])
search_results['results'] = [
result.jsonify(include_albums=False)
if isinstance(result, etiquette.objects.Photo) else
result.jsonify(minimal=True)
for result in search_results['results']
]
search_results['total_tags'] = [
tag.jsonify(minimal=True) for tag in search_results['total_tags']
]
return flasktools.json_response(search_results)
# Swipe ############################################################################################
@site.route('/swipe')
def get_swipe():
response = common.render_template(request, 'swipe.html')
return response
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" Collection of parser fonctions for qibuild actions. """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import qibuild.deps
import qibuild.worktree
import qibuild.cmake_builder
import qisys.parsers
from qisys import ui
def cmake_build_parser(parser, group=None, with_build_parser=True):
""" CMake Build Parser """
if with_build_parser:
qisys.parsers.build_parser(parser, group=None)
if not group:
group = parser.add_argument_group("Build options")
qisys.parsers.parallel_parser(group, default=None)
group.add_argument("--verbose-make", action="store_true", default=False,
help="Print the executed commands while building")
def cmake_configure_parser(parser):
""" CMAke Configure Parser """
group = parser.add_argument_group("configure options")
group.add_argument("-G", "--cmake-generator", action="store",
help="Specify the CMake generator")
group.add_argument("-D", dest="cmake_flags",
action="append",
help="additional cmake flags")
group.add_argument("--no-clean-first", dest="clean_first",
action="store_false",
help="do not clean CMake cache")
group.add_argument("--debug-trycompile", dest="debug_trycompile",
action="store_true",
help="pass --debug-trycompile to CMake call")
group.add_argument("--eff-c++", dest="effective_cplusplus",
action="store_true",
help="activate warnings from the 'Effective C++' book (gcc only)")
group.add_argument("--werror", dest="werror",
action="store_true",
help="treat warnings as error")
group.add_argument("--profiling", dest="profiling", action="store_true",
help="profile cmake execution")
group.add_argument("--summarize-options", dest="summarize_options",
action="store_true",
help="summarize build options at the end")
group.add_argument("--trace-cmake", dest="trace_cmake",
action="store_true",
help="run cmake in trace mode")
group.add_argument("--coverage", dest="coverage",
action="store_true",
help="activate coverage support (gcc only)")
group.add_argument("--32-bits", dest="force_32_bits",
action="store_true", help="force 32 bits build")
group.add_argument("--with-debug-info", action="store_true", dest="debug_info",
help="include debug information in binaries, even when used with --release. "
"Note that you can also use --build-type=RelWithDebInfo "
"for the same effect")
group.add_argument("--without-debug-info", action="store_false", dest="debug_info",
help="remove debug information from binaries, even when used with --debug")
group.add_argument("--release", action="store_const", const="Release",
dest="build_type",
help="Build in release")
group.add_argument("--debug", action="store_const", const="Debug",
dest="build_type",
help="Build in debug, default")
group.add_argument("--build-type", dest="build_type",
help="Set CMAKE_BUILD_TYPE")
parser.set_defaults(clean_first=True, effective_cplusplus=False,
werror=False, profiling=False,
trace_cmake=False, debug_info=None,
build_type="Debug")
def convert_cmake_args_to_flags(args):
""" Convert 'helper' options into cmake flags. """
if not args.cmake_flags:
args.cmake_flags = list()
if args.effective_cplusplus:
args.cmake_flags.append("QI_EFFECTIVE_CPP=ON")
if args.werror:
args.cmake_flags.append("QI_WERROR=ON")
if args.coverage:
args.cmake_flags.append("QI_WITH_COVERAGE=ON")
# args.debug_info has 3 values: None (not set at all), True, False
if args.debug_info is True:
args.cmake_flags.append("QI_WITH_DEBUG_INFO=ON")
if args.debug_info is False:
args.cmake_flags.append("QI_WITH_DEBUG_INFO=OFF")
if args.force_32_bits:
args.cmake_flags.append("QI_FORCE_32_BITS=ON")
def project_parser(parser, positional=True):
""" Parser settings for every action using several build projects. """
group = qisys.parsers.project_parser(parser, positional=positional)
# --use-deps is only useful when it would make more sense to
# NOT solve the deps by default (for instance for `qibuild test`)
group.add_argument("--use-deps", action="store_true", dest="use_deps",
help="Force deps resolution")
group.add_argument("--build-deps-only", action="store_const",
const=["build"], dest="dep_types",
help="Work on specified projects by ignoring "
"the runtime deps.")
parser.set_defaults(dep_types="default")
def get_build_worktree(args, verbose=True):
""" Get a build worktree to use from a argparse.Namespace object. """
worktree = qisys.parsers.get_worktree(args)
build_worktree = qibuild.worktree.BuildWorkTree(worktree)
if verbose:
ui.info(ui.green, "Current build worktree:", ui.reset, ui.bold,
build_worktree.root)
build_config = get_build_config(build_worktree, args)
build_worktree.build_config = build_config
if verbose:
if build_config.local_cmake:
ui.info(ui.green, "Using additional cmake file", ui.blue,
build_config.local_cmake)
if build_config.toolchain:
ui.info(ui.green, "Using toolchain:", ui.blue,
build_config.toolchain.name)
for profile in build_config.profiles:
ui.info(ui.green, "Using profile:", ui.blue, profile)
return build_worktree
def get_build_projects(build_worktree, args, solve_deps=True, default_all=False):
"""
Get a list of build projects to use from an argparse.Namespace
object. Useful when you do not need a CMakeBuilder.
You can choose whether or not to solve the dependencies.
"""
parser = BuildProjectParser(build_worktree)
projects = parser.parse_args(args, default_all=default_all)
if not solve_deps or args.single:
return projects
dep_types = get_dep_types(args)
deps_solver = qibuild.deps.DepsSolver(build_worktree)
return deps_solver.get_dep_projects(projects, dep_types)
def get_one_build_project(build_worktree, args):
"""
Get one build project from the command line.
(zero or one project name may be specified).
"""
parser = BuildProjectParser(build_worktree)
projects = parser.parse_args(args)
if not len(projects) == 1:
raise Exception("This action can only work on one project")
return projects[0]
def get_dep_types(args, default=None):
""" Get a list of dep types from the command line. """
if not default:
default = ["build", "runtime", "test"]
if args.single:
return list()
if not hasattr(args, "dep_types") or args.dep_types == "default":
return default
return args.dep_types
def get_cmake_builder(args, default_dep_types=None):
""" Get a :py:class:`.CMakeBuilder` object from the command line. """
build_worktree = get_build_worktree(args)
# dep solving will be made later by the CMakeBuilder
build_projects = get_build_projects(build_worktree, args, solve_deps=False)
cmake_builder = qibuild.cmake_builder.CMakeBuilder(build_worktree, build_projects)
cmake_builder.dep_types = get_dep_types(args, default=default_dep_types)
return cmake_builder
def get_host_tools_builder(args):
"""
Get a :py:class:`.CMakeBuilder object from the
command line suitable to build host dependencies.
"""
qibuild_cfg = qibuild.config.QiBuildConfig()
qibuild_cfg.read(create_if_missing=True)
host_config = qibuild_cfg.get_host_config()
if args.config and args.config != host_config:
raise Exception("""\
Trying to get a host tools builder with the following
build config: {config}, but the given config is not
marked as a host config\
""".format(config=args.config))
build_worktree = get_build_worktree(args)
if host_config:
build_worktree.set_active_config(host_config)
host_projects = get_host_projects(build_worktree, args)
cmake_builder = qibuild.cmake_builder.CMakeBuilder(build_worktree, host_projects)
return cmake_builder
def get_host_projects(build_worktree, args):
""" Get Host Projects """
projects = list()
if args.all:
projects = build_worktree.build_projects
else:
if args.projects:
for project_name in args.projects:
project = build_worktree.get_build_project(project_name, raises=True)
projects.append(project)
else:
projects = [get_one_build_project(build_worktree, args)]
deps_solver = qibuild.deps.DepsSolver(build_worktree)
return deps_solver.get_host_projects(projects)
def get_build_config(build_worktree, args):
""" Get a CMakeBuildConfig object from an argparse.Namespace object """
build_config = build_worktree.build_config
if hasattr(args, "config"):
if args.config:
build_config.set_active_config(args.config)
if hasattr(args, "build_type"):
build_config.build_type = args.build_type
if hasattr(args, "cmake_generator"):
build_config.cmake_generator = args.cmake_generator
if hasattr(args, "verbose_make"):
build_config.verbose_make = args.verbose_make
if hasattr(args, "cmake_flags") and args.cmake_flags:
# should be a list a strings looking like key=value
user_flags = list()
for flag_string in args.cmake_flags:
if "=" not in flag_string:
raise Exception("Expecting a flag looking like -Dkey=value")
(key, value) = flag_string.split("=", 1)
user_flags.append((key, value))
build_config.user_flags = user_flags
if hasattr(args, "num_jobs"):
build_config.num_jobs = args.num_jobs
if hasattr(args, "build_prefix"):
if args.build_prefix:
build_config.build_prefix = args.build_prefix
return build_config
class BuildProjectParser(qisys.parsers.AbstractProjectParser):
""" Implements AbstractProjectParser for a BuildWorkTree """
def __init__(self, build_worktree):
""" BuildProjectParser Init """
super(BuildProjectParser, self).__init__()
self.build_worktree = build_worktree
def all_projects(self, args):
""" Return All Projects """
return self.build_worktree.build_projects
def parse_no_project(self, args):
""" Try to find the closest worktree project that mathes the current directory. """
# step 1: find the closest buildable project
parser = qisys.parsers.WorkTreeProjectParser(self.build_worktree.worktree)
worktree_projects = parser.parse_no_project(args)
if not worktree_projects:
raise CouldNotGuessProjectName()
# WorkTreeProjectParser returns None or a list of one element
worktree_proj = worktree_projects[0]
build_proj = qisys.parsers.find_parent_project(self.build_worktree.build_projects,
worktree_proj.path)
if not build_proj:
# step 2: if we can't find, still look for a qiproject.xml not
# registered yet and add it to the worktree:
build_proj = qibuild.worktree.new_build_project(self.build_worktree,
worktree_proj)
if not build_proj:
# give up:
raise CouldNotGuessProjectName()
return self.parse_one_project(args, build_proj.name)
def parse_one_project(self, args, project_arg):
""" Accept both an absolute path matching a worktree project, or a project src. """
project = self.build_worktree.get_build_project(project_arg, raises=True)
return [project]
class CouldNotGuessProjectName(Exception):
""" CouldNotGuessProjectName Exception """
def __str__(self):
""" String Representation """
return """
Could not guess qibuild project name from current working directory
Please go inside a project, or specify the project name
on the command line
"""
| |
# The following comment should be removed at some point in the future.
# mypy: strict-optional=False
from __future__ import absolute_import
import logging
from collections import OrderedDict
from pipenv.patched.notpip._vendor.packaging.utils import canonicalize_name
from pipenv.patched.notpip._internal import pep425tags
from pipenv.patched.notpip._internal.exceptions import InstallationError
from pipenv.patched.notpip._internal.models.wheel import Wheel
from pipenv.patched.notpip._internal.utils.logging import indent_log
from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Dict, Iterable, List, Optional, Tuple
from pipenv.patched.notpip._internal.req.req_install import InstallRequirement
logger = logging.getLogger(__name__)
class RequirementSet(object):
def __init__(self, check_supported_wheels=True, ignore_compatibility=True):
# type: (bool, bool) -> None
"""Create a RequirementSet.
"""
self.requirements = OrderedDict() # type: Dict[str, InstallRequirement] # noqa: E501
self.check_supported_wheels = check_supported_wheels
self.unnamed_requirements = [] # type: List[InstallRequirement]
self.successfully_downloaded = [] # type: List[InstallRequirement]
self.reqs_to_cleanup = [] # type: List[InstallRequirement]
if ignore_compatibility:
self.check_supported_wheels = False
self.ignore_compatibility = (check_supported_wheels is False or ignore_compatibility is True)
def __str__(self):
# type: () -> str
requirements = sorted(
(req for req in self.requirements.values() if not req.comes_from),
key=lambda req: canonicalize_name(req.name),
)
return ' '.join(str(req.req) for req in requirements)
def __repr__(self):
# type: () -> str
requirements = sorted(
self.requirements.values(),
key=lambda req: canonicalize_name(req.name),
)
format_string = '<{classname} object; {count} requirement(s): {reqs}>'
return format_string.format(
classname=self.__class__.__name__,
count=len(requirements),
reqs=', '.join(str(req.req) for req in requirements),
)
def add_unnamed_requirement(self, install_req):
# type: (InstallRequirement) -> None
assert not install_req.name
self.unnamed_requirements.append(install_req)
def add_named_requirement(self, install_req):
# type: (InstallRequirement) -> None
assert install_req.name
project_name = canonicalize_name(install_req.name)
self.requirements[project_name] = install_req
def add_requirement(
self,
install_req, # type: InstallRequirement
parent_req_name=None, # type: Optional[str]
extras_requested=None # type: Optional[Iterable[str]]
):
# type: (...) -> Tuple[List[InstallRequirement], Optional[InstallRequirement]] # noqa: E501
"""Add install_req as a requirement to install.
:param parent_req_name: The name of the requirement that needed this
added. The name is used because when multiple unnamed requirements
resolve to the same name, we could otherwise end up with dependency
links that point outside the Requirements set. parent_req must
already be added. Note that None implies that this is a user
supplied requirement, vs an inferred one.
:param extras_requested: an iterable of extras used to evaluate the
environment markers.
:return: Additional requirements to scan. That is either [] if
the requirement is not applicable, or [install_req] if the
requirement is applicable and has just been added.
"""
# If the markers do not match, ignore this requirement.
if not install_req.match_markers(extras_requested):
logger.info(
"Ignoring %s: markers '%s' don't match your environment",
install_req.name, install_req.markers,
)
return [], None
# If the wheel is not supported, raise an error.
# Should check this after filtering out based on environment markers to
# allow specifying different wheels based on the environment/OS, in a
# single requirements file.
if install_req.link and install_req.link.is_wheel:
wheel = Wheel(install_req.link.filename)
tags = pep425tags.get_supported()
if (self.check_supported_wheels and not wheel.supported(tags)):
raise InstallationError(
"%s is not a supported wheel on this platform." %
wheel.filename
)
# This next bit is really a sanity check.
assert install_req.is_direct == (parent_req_name is None), (
"a direct req shouldn't have a parent and also, "
"a non direct req should have a parent"
)
# Unnamed requirements are scanned again and the requirement won't be
# added as a dependency until after scanning.
if not install_req.name:
self.add_unnamed_requirement(install_req)
return [install_req], None
try:
existing_req = self.get_requirement(install_req.name)
except KeyError:
existing_req = None
has_conflicting_requirement = (
parent_req_name is None and
existing_req and
not existing_req.constraint and
existing_req.extras == install_req.extras and
existing_req.req.specifier != install_req.req.specifier
)
if has_conflicting_requirement:
raise InstallationError(
"Double requirement given: %s (already in %s, name=%r)"
% (install_req, existing_req, install_req.name)
)
# When no existing requirement exists, add the requirement as a
# dependency and it will be scanned again after.
if not existing_req:
self.add_named_requirement(install_req)
# We'd want to rescan this requirement later
return [install_req], install_req
# Assume there's no need to scan, and that we've already
# encountered this for scanning.
if install_req.constraint or not existing_req.constraint:
return [], existing_req
does_not_satisfy_constraint = (
install_req.link and
not (
existing_req.link and
install_req.link.path == existing_req.link.path
)
)
if does_not_satisfy_constraint:
self.reqs_to_cleanup.append(install_req)
raise InstallationError(
"Could not satisfy constraints for '%s': "
"installation from path or url cannot be "
"constrained to a version" % install_req.name,
)
# If we're now installing a constraint, mark the existing
# object for real installation.
existing_req.constraint = False
existing_req.extras = tuple(sorted(
set(existing_req.extras) | set(install_req.extras)
))
logger.debug(
"Setting %s extras to: %s",
existing_req, existing_req.extras,
)
# Return the existing requirement for addition to the parent and
# scanning again.
return [existing_req], existing_req
def has_requirement(self, name):
# type: (str) -> bool
project_name = canonicalize_name(name)
return (
project_name in self.requirements and
not self.requirements[project_name].constraint
)
def get_requirement(self, name):
# type: (str) -> InstallRequirement
project_name = canonicalize_name(name)
if project_name in self.requirements:
return self.requirements[project_name]
pass
def cleanup_files(self):
# type: () -> None
"""Clean up files, remove builds."""
logger.debug('Cleaning up...')
with indent_log():
for req in self.reqs_to_cleanup:
req.remove_temporary_source()
| |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import ast
import base64
import hashlib
import hmac
import sys
import types
import warnings
import inspect
if sys.version_info < (3,):
from urllib2 import quote as url_quote
from urllib2 import unquote as url_unquote
_strtype = basestring
else:
from urllib.parse import quote as url_quote
from urllib.parse import unquote as url_unquote
_strtype = str
from datetime import datetime
from xml.dom import minidom
from xml.sax.saxutils import escape as xml_escape
#--------------------------------------------------------------------------
# constants
__author__ = 'Microsoft Corp. <ptvshelp@microsoft.com>'
__version__ = '0.8.4'
# Live ServiceClient URLs
BLOB_SERVICE_HOST_BASE = '.blob.core.windows.net'
QUEUE_SERVICE_HOST_BASE = '.queue.core.windows.net'
TABLE_SERVICE_HOST_BASE = '.table.core.windows.net'
SERVICE_BUS_HOST_BASE = '.servicebus.windows.net'
MANAGEMENT_HOST = 'management.core.windows.net'
# Development ServiceClient URLs
DEV_BLOB_HOST = '127.0.0.1:10000'
DEV_QUEUE_HOST = '127.0.0.1:10001'
DEV_TABLE_HOST = '127.0.0.1:10002'
# Default credentials for Development Storage Service
DEV_ACCOUNT_NAME = 'devstoreaccount1'
DEV_ACCOUNT_KEY = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=='
# All of our error messages
_ERROR_CANNOT_FIND_PARTITION_KEY = 'Cannot find partition key in request.'
_ERROR_CANNOT_FIND_ROW_KEY = 'Cannot find row key in request.'
_ERROR_INCORRECT_TABLE_IN_BATCH = \
'Table should be the same in a batch operations'
_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH = \
'Partition Key should be the same in a batch operations'
_ERROR_DUPLICATE_ROW_KEY_IN_BATCH = \
'Row Keys should not be the same in a batch operations'
_ERROR_BATCH_COMMIT_FAIL = 'Batch Commit Fail'
_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_DELETE = \
'Message is not peek locked and cannot be deleted.'
_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_UNLOCK = \
'Message is not peek locked and cannot be unlocked.'
_ERROR_QUEUE_NOT_FOUND = 'Queue was not found'
_ERROR_TOPIC_NOT_FOUND = 'Topic was not found'
_ERROR_CONFLICT = 'Conflict ({0})'
_ERROR_NOT_FOUND = 'Not found ({0})'
_ERROR_UNKNOWN = 'Unknown error ({0})'
_ERROR_SERVICEBUS_MISSING_INFO = \
'You need to provide servicebus namespace, access key and Issuer'
_ERROR_STORAGE_MISSING_INFO = \
'You need to provide both account name and access key'
_ERROR_ACCESS_POLICY = \
'share_access_policy must be either SignedIdentifier or AccessPolicy ' + \
'instance'
_WARNING_VALUE_SHOULD_BE_BYTES = \
'Warning: {0} must be bytes data type. It will be converted ' + \
'automatically, with utf-8 text encoding.'
_ERROR_VALUE_SHOULD_BE_BYTES = '{0} should be of type bytes.'
_ERROR_VALUE_NONE = '{0} should not be None.'
_ERROR_VALUE_NEGATIVE = '{0} should not be negative.'
_ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY = \
'Cannot serialize the specified value ({0}) to an entity. Please use ' + \
'an EntityProperty (which can specify custom types), int, str, bool, ' + \
'or datetime.'
_ERROR_PAGE_BLOB_SIZE_ALIGNMENT = \
'Invalid page blob size: {0}. ' + \
'The size must be aligned to a 512-byte boundary.'
_USER_AGENT_STRING = 'pyazure/' + __version__
METADATA_NS = 'http://schemas.microsoft.com/ado/2007/08/dataservices/metadata'
class WindowsAzureData(object):
''' This is the base of data class.
It is only used to check whether it is instance or not. '''
pass
class WindowsAzureError(Exception):
''' WindowsAzure Excpetion base class. '''
def __init__(self, message):
super(WindowsAzureError, self).__init__(message)
class WindowsAzureConflictError(WindowsAzureError):
'''Indicates that the resource could not be created because it already
exists'''
def __init__(self, message):
super(WindowsAzureConflictError, self).__init__(message)
class WindowsAzureMissingResourceError(WindowsAzureError):
'''Indicates that a request for a request for a resource (queue, table,
container, etc...) failed because the specified resource does not exist'''
def __init__(self, message):
super(WindowsAzureMissingResourceError, self).__init__(message)
class WindowsAzureBatchOperationError(WindowsAzureError):
'''Indicates that a batch operation failed'''
def __init__(self, message, code):
super(WindowsAzureBatchOperationError, self).__init__(message)
self.code = code
class Feed(object):
pass
class _Base64String(str):
pass
class HeaderDict(dict):
def __getitem__(self, index):
return super(HeaderDict, self).__getitem__(index.lower())
def _encode_base64(data):
if isinstance(data, _unicode_type):
data = data.encode('utf-8')
encoded = base64.b64encode(data)
return encoded.decode('utf-8')
def _decode_base64_to_bytes(data):
if isinstance(data, _unicode_type):
data = data.encode('utf-8')
return base64.b64decode(data)
def _decode_base64_to_text(data):
decoded_bytes = _decode_base64_to_bytes(data)
return decoded_bytes.decode('utf-8')
def _get_readable_id(id_name, id_prefix_to_skip):
"""simplified an id to be more friendly for us people"""
# id_name is in the form 'https://namespace.host.suffix/name'
# where name may contain a forward slash!
pos = id_name.find('//')
if pos != -1:
pos += 2
if id_prefix_to_skip:
pos = id_name.find(id_prefix_to_skip, pos)
if pos != -1:
pos += len(id_prefix_to_skip)
pos = id_name.find('/', pos)
if pos != -1:
return id_name[pos + 1:]
return id_name
def _get_entry_properties_from_node(entry, include_id, id_prefix_to_skip=None, use_title_as_id=False):
''' get properties from entry xml '''
properties = {}
etag = entry.getAttributeNS(METADATA_NS, 'etag')
if etag:
properties['etag'] = etag
for updated in _get_child_nodes(entry, 'updated'):
properties['updated'] = updated.firstChild.nodeValue
for name in _get_children_from_path(entry, 'author', 'name'):
if name.firstChild is not None:
properties['author'] = name.firstChild.nodeValue
if include_id:
if use_title_as_id:
for title in _get_child_nodes(entry, 'title'):
properties['name'] = title.firstChild.nodeValue
else:
for id in _get_child_nodes(entry, 'id'):
properties['name'] = _get_readable_id(
id.firstChild.nodeValue, id_prefix_to_skip)
return properties
def _get_entry_properties(xmlstr, include_id, id_prefix_to_skip=None):
''' get properties from entry xml '''
xmldoc = minidom.parseString(xmlstr)
properties = {}
for entry in _get_child_nodes(xmldoc, 'entry'):
properties.update(_get_entry_properties_from_node(entry, include_id, id_prefix_to_skip))
return properties
def _get_first_child_node_value(parent_node, node_name):
xml_attrs = _get_child_nodes(parent_node, node_name)
if xml_attrs:
xml_attr = xml_attrs[0]
if xml_attr.firstChild:
value = xml_attr.firstChild.nodeValue
return value
def _get_child_nodes(node, tagName):
return [childNode for childNode in node.getElementsByTagName(tagName)
if childNode.parentNode == node]
def _get_children_from_path(node, *path):
'''descends through a hierarchy of nodes returning the list of children
at the inner most level. Only returns children who share a common parent,
not cousins.'''
cur = node
for index, child in enumerate(path):
if isinstance(child, _strtype):
next = _get_child_nodes(cur, child)
else:
next = _get_child_nodesNS(cur, *child)
if index == len(path) - 1:
return next
elif not next:
break
cur = next[0]
return []
def _get_child_nodesNS(node, ns, tagName):
return [childNode for childNode in node.getElementsByTagNameNS(ns, tagName)
if childNode.parentNode == node]
def _create_entry(entry_body):
''' Adds common part of entry to a given entry body and return the whole
xml. '''
updated_str = datetime.utcnow().isoformat()
if datetime.utcnow().utcoffset() is None:
updated_str += '+00:00'
entry_start = '''<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<entry xmlns:d="http://schemas.microsoft.com/ado/2007/08/dataservices" xmlns:m="http://schemas.microsoft.com/ado/2007/08/dataservices/metadata" xmlns="http://www.w3.org/2005/Atom" >
<title /><updated>{updated}</updated><author><name /></author><id />
<content type="application/xml">
{body}</content></entry>'''
return entry_start.format(updated=updated_str, body=entry_body)
def _to_datetime(strtime):
return datetime.strptime(strtime, "%Y-%m-%dT%H:%M:%S.%f")
_KNOWN_SERIALIZATION_XFORMS = {
'include_apis': 'IncludeAPIs',
'message_id': 'MessageId',
'content_md5': 'Content-MD5',
'last_modified': 'Last-Modified',
'cache_control': 'Cache-Control',
'account_admin_live_email_id': 'AccountAdminLiveEmailId',
'service_admin_live_email_id': 'ServiceAdminLiveEmailId',
'subscription_id': 'SubscriptionID',
'fqdn': 'FQDN',
'private_id': 'PrivateID',
'os_virtual_hard_disk': 'OSVirtualHardDisk',
'logical_disk_size_in_gb': 'LogicalDiskSizeInGB',
'logical_size_in_gb': 'LogicalSizeInGB',
'os': 'OS',
'persistent_vm_downtime_info': 'PersistentVMDowntimeInfo',
'copy_id': 'CopyId',
}
def _get_serialization_name(element_name):
"""converts a Python name into a serializable name"""
known = _KNOWN_SERIALIZATION_XFORMS.get(element_name)
if known is not None:
return known
if element_name.startswith('x_ms_'):
return element_name.replace('_', '-')
if element_name.endswith('_id'):
element_name = element_name.replace('_id', 'ID')
for name in ['content_', 'last_modified', 'if_', 'cache_control']:
if element_name.startswith(name):
element_name = element_name.replace('_', '-_')
return ''.join(name.capitalize() for name in element_name.split('_'))
if sys.version_info < (3,):
_unicode_type = unicode
def _str(value):
if isinstance(value, unicode):
return value.encode('utf-8')
return str(value)
else:
_str = str
_unicode_type = str
def _str_or_none(value):
if value is None:
return None
return _str(value)
def _int_or_none(value):
if value is None:
return None
return str(int(value))
def _bool_or_none(value):
if value is None:
return None
if isinstance(value, bool):
if value:
return 'true'
else:
return 'false'
return str(value)
def _convert_class_to_xml(source, xml_prefix=True):
if source is None:
return ''
xmlstr = ''
if xml_prefix:
xmlstr = '<?xml version="1.0" encoding="utf-8"?>'
if isinstance(source, list):
for value in source:
xmlstr += _convert_class_to_xml(value, False)
elif isinstance(source, WindowsAzureData):
class_name = source.__class__.__name__
xmlstr += '<' + class_name + '>'
for name, value in vars(source).items():
if value is not None:
if isinstance(value, list) or \
isinstance(value, WindowsAzureData):
xmlstr += _convert_class_to_xml(value, False)
else:
xmlstr += ('<' + _get_serialization_name(name) + '>' +
xml_escape(str(value)) + '</' +
_get_serialization_name(name) + '>')
xmlstr += '</' + class_name + '>'
return xmlstr
def _find_namespaces_from_child(parent, child, namespaces):
"""Recursively searches from the parent to the child,
gathering all the applicable namespaces along the way"""
for cur_child in parent.childNodes:
if cur_child is child:
return True
if _find_namespaces_from_child(cur_child, child, namespaces):
# we are the parent node
for key in cur_child.attributes.keys():
if key.startswith('xmlns:') or key == 'xmlns':
namespaces[key] = cur_child.attributes[key]
break
return False
def _find_namespaces(parent, child):
res = {}
for key in parent.documentElement.attributes.keys():
if key.startswith('xmlns:') or key == 'xmlns':
res[key] = parent.documentElement.attributes[key]
_find_namespaces_from_child(parent, child, res)
return res
def _clone_node_with_namespaces(node_to_clone, original_doc):
clone = node_to_clone.cloneNode(True)
for key, value in _find_namespaces(original_doc, node_to_clone).items():
clone.attributes[key] = value
return clone
def _convert_response_to_feeds(response, convert_callback):
if response is None:
return None
feeds = _list_of(Feed)
x_ms_continuation = HeaderDict()
for name, value in response.headers:
if 'x-ms-continuation' in name:
x_ms_continuation[name[len('x-ms-continuation') + 1:]] = value
if x_ms_continuation:
setattr(feeds, 'x_ms_continuation', x_ms_continuation)
xmldoc = minidom.parseString(response.body)
xml_entries = _get_children_from_path(xmldoc, 'feed', 'entry')
if not xml_entries:
# in some cases, response contains only entry but no feed
xml_entries = _get_children_from_path(xmldoc, 'entry')
if inspect.isclass(convert_callback) and issubclass(convert_callback, WindowsAzureData):
for xml_entry in xml_entries:
return_obj = convert_callback()
for node in _get_children_from_path(xml_entry,
'content',
convert_callback.__name__):
_fill_data_to_return_object(node, return_obj)
for name, value in _get_entry_properties_from_node(xml_entry,
include_id=True,
use_title_as_id=True).items():
setattr(return_obj, name, value)
feeds.append(return_obj)
else:
for xml_entry in xml_entries:
new_node = _clone_node_with_namespaces(xml_entry, xmldoc)
feeds.append(convert_callback(new_node.toxml('utf-8')))
return feeds
def _validate_type_bytes(param_name, param):
if not isinstance(param, bytes):
raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))
def _validate_not_none(param_name, param):
if param is None:
raise TypeError(_ERROR_VALUE_NONE.format(param_name))
def _fill_list_of(xmldoc, element_type, xml_element_name):
xmlelements = _get_child_nodes(xmldoc, xml_element_name)
return [_parse_response_body_from_xml_node(xmlelement, element_type) \
for xmlelement in xmlelements]
def _fill_scalar_list_of(xmldoc, element_type, parent_xml_element_name,
xml_element_name):
'''Converts an xml fragment into a list of scalar types. The parent xml
element contains a flat list of xml elements which are converted into the
specified scalar type and added to the list.
Example:
xmldoc=
<Endpoints>
<Endpoint>http://{storage-service-name}.blob.core.windows.net/</Endpoint>
<Endpoint>http://{storage-service-name}.queue.core.windows.net/</Endpoint>
<Endpoint>http://{storage-service-name}.table.core.windows.net/</Endpoint>
</Endpoints>
element_type=str
parent_xml_element_name='Endpoints'
xml_element_name='Endpoint'
'''
xmlelements = _get_child_nodes(xmldoc, parent_xml_element_name)
if xmlelements:
xmlelements = _get_child_nodes(xmlelements[0], xml_element_name)
return [_get_node_value(xmlelement, element_type) \
for xmlelement in xmlelements]
def _fill_dict(xmldoc, element_name):
xmlelements = _get_child_nodes(xmldoc, element_name)
if xmlelements:
return_obj = {}
for child in xmlelements[0].childNodes:
if child.firstChild:
return_obj[child.nodeName] = child.firstChild.nodeValue
return return_obj
def _fill_dict_of(xmldoc, parent_xml_element_name, pair_xml_element_name,
key_xml_element_name, value_xml_element_name):
'''Converts an xml fragment into a dictionary. The parent xml element
contains a list of xml elements where each element has a child element for
the key, and another for the value.
Example:
xmldoc=
<ExtendedProperties>
<ExtendedProperty>
<Name>Ext1</Name>
<Value>Val1</Value>
</ExtendedProperty>
<ExtendedProperty>
<Name>Ext2</Name>
<Value>Val2</Value>
</ExtendedProperty>
</ExtendedProperties>
element_type=str
parent_xml_element_name='ExtendedProperties'
pair_xml_element_name='ExtendedProperty'
key_xml_element_name='Name'
value_xml_element_name='Value'
'''
return_obj = {}
xmlelements = _get_child_nodes(xmldoc, parent_xml_element_name)
if xmlelements:
xmlelements = _get_child_nodes(xmlelements[0], pair_xml_element_name)
for pair in xmlelements:
keys = _get_child_nodes(pair, key_xml_element_name)
values = _get_child_nodes(pair, value_xml_element_name)
if keys and values:
key = keys[0].firstChild.nodeValue
value = values[0].firstChild.nodeValue
return_obj[key] = value
return return_obj
def _fill_instance_child(xmldoc, element_name, return_type):
'''Converts a child of the current dom element to the specified type.
'''
xmlelements = _get_child_nodes(
xmldoc, _get_serialization_name(element_name))
if not xmlelements:
return None
return_obj = return_type()
_fill_data_to_return_object(xmlelements[0], return_obj)
return return_obj
def _fill_instance_element(element, return_type):
"""Converts a DOM element into the specified object"""
return _parse_response_body_from_xml_node(element, return_type)
def _fill_data_minidom(xmldoc, element_name, data_member):
xmlelements = _get_child_nodes(
xmldoc, _get_serialization_name(element_name))
if not xmlelements or not xmlelements[0].childNodes:
return None
value = xmlelements[0].firstChild.nodeValue
if data_member is None:
return value
elif isinstance(data_member, datetime):
return _to_datetime(value)
elif type(data_member) is bool:
return value.lower() != 'false'
else:
return type(data_member)(value)
def _get_node_value(xmlelement, data_type):
value = xmlelement.firstChild.nodeValue
if data_type is datetime:
return _to_datetime(value)
elif data_type is bool:
return value.lower() != 'false'
else:
return data_type(value)
def _get_request_body_bytes_only(param_name, param_value):
'''Validates the request body passed in and converts it to bytes
if our policy allows it.'''
if param_value is None:
return b''
if isinstance(param_value, bytes):
return param_value
# Previous versions of the SDK allowed data types other than bytes to be
# passed in, and they would be auto-converted to bytes. We preserve this
# behavior when running under 2.7, but issue a warning.
# Python 3 support is new, so we reject anything that's not bytes.
if sys.version_info < (3,):
warnings.warn(_WARNING_VALUE_SHOULD_BE_BYTES.format(param_name))
return _get_request_body(param_value)
raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))
def _get_request_body(request_body):
'''Converts an object into a request body. If it's None
we'll return an empty string, if it's one of our objects it'll
convert it to XML and return it. Otherwise we just use the object
directly'''
if request_body is None:
return b''
if isinstance(request_body, WindowsAzureData):
request_body = _convert_class_to_xml(request_body)
if isinstance(request_body, bytes):
return request_body
if isinstance(request_body, _unicode_type):
return request_body.encode('utf-8')
request_body = str(request_body)
if isinstance(request_body, _unicode_type):
return request_body.encode('utf-8')
return request_body
def _parse_enum_results_list(response, return_type, resp_type, item_type):
"""resp_body is the XML we received
resp_type is a string, such as Containers,
return_type is the type we're constructing, such as ContainerEnumResults
item_type is the type object of the item to be created, such as Container
This function then returns a ContainerEnumResults object with the
containers member populated with the results.
"""
# parsing something like:
# <EnumerationResults ... >
# <Queues>
# <Queue>
# <Something />
# <SomethingElse />
# </Queue>
# </Queues>
# </EnumerationResults>
respbody = response.body
return_obj = return_type()
doc = minidom.parseString(respbody)
items = []
for enum_results in _get_child_nodes(doc, 'EnumerationResults'):
# path is something like Queues, Queue
for child in _get_children_from_path(enum_results,
resp_type,
resp_type[:-1]):
items.append(_fill_instance_element(child, item_type))
for name, value in vars(return_obj).items():
# queues, Queues, this is the list its self which we populated
# above
if name == resp_type.lower():
# the list its self.
continue
value = _fill_data_minidom(enum_results, name, value)
if value is not None:
setattr(return_obj, name, value)
setattr(return_obj, resp_type.lower(), items)
return return_obj
def _parse_simple_list(response, type, item_type, list_name):
respbody = response.body
res = type()
res_items = []
doc = minidom.parseString(respbody)
type_name = type.__name__
item_name = item_type.__name__
for item in _get_children_from_path(doc, type_name, item_name):
res_items.append(_fill_instance_element(item, item_type))
setattr(res, list_name, res_items)
return res
def _parse_response(response, return_type):
'''
Parse the HTTPResponse's body and fill all the data into a class of
return_type.
'''
return _parse_response_body_from_xml_text(response.body, return_type)
def _parse_service_resources_response(response, return_type):
'''
Parse the HTTPResponse's body and fill all the data into a class of
return_type.
'''
return _parse_response_body_from_service_resources_xml_text(response.body, return_type)
def _fill_data_to_return_object(node, return_obj):
members = dict(vars(return_obj))
for name, value in members.items():
if isinstance(value, _list_of):
setattr(return_obj,
name,
_fill_list_of(node,
value.list_type,
value.xml_element_name))
elif isinstance(value, _scalar_list_of):
setattr(return_obj,
name,
_fill_scalar_list_of(node,
value.list_type,
_get_serialization_name(name),
value.xml_element_name))
elif isinstance(value, _dict_of):
setattr(return_obj,
name,
_fill_dict_of(node,
_get_serialization_name(name),
value.pair_xml_element_name,
value.key_xml_element_name,
value.value_xml_element_name))
elif isinstance(value, _xml_attribute):
real_value = None
if node.hasAttribute(value.xml_element_name):
real_value = node.getAttribute(value.xml_element_name)
if real_value is not None:
setattr(return_obj, name, real_value)
elif isinstance(value, WindowsAzureData):
setattr(return_obj,
name,
_fill_instance_child(node, name, value.__class__))
elif isinstance(value, dict):
setattr(return_obj,
name,
_fill_dict(node, _get_serialization_name(name)))
elif isinstance(value, _Base64String):
value = _fill_data_minidom(node, name, '')
if value is not None:
value = _decode_base64_to_text(value)
# always set the attribute, so we don't end up returning an object
# with type _Base64String
setattr(return_obj, name, value)
else:
value = _fill_data_minidom(node, name, value)
if value is not None:
setattr(return_obj, name, value)
def _parse_response_body_from_xml_node(node, return_type):
'''
parse the xml and fill all the data into a class of return_type
'''
return_obj = return_type()
_fill_data_to_return_object(node, return_obj)
return return_obj
def _parse_response_body_from_xml_text(respbody, return_type):
'''
parse the xml and fill all the data into a class of return_type
'''
doc = minidom.parseString(respbody)
return_obj = return_type()
xml_name = return_type._xml_name if hasattr(return_type, '_xml_name') else return_type.__name__
for node in _get_child_nodes(doc, xml_name):
_fill_data_to_return_object(node, return_obj)
return return_obj
def _parse_response_body_from_service_resources_xml_text(respbody, return_type):
'''
parse the xml and fill all the data into a class of return_type
'''
doc = minidom.parseString(respbody)
return_obj = _list_of(return_type)
for node in _get_children_from_path(doc, "ServiceResources", "ServiceResource"):
local_obj = return_type()
_fill_data_to_return_object(node, local_obj)
return_obj.append(local_obj)
return return_obj
class _dict_of(dict):
"""a dict which carries with it the xml element names for key,val.
Used for deserializaion and construction of the lists"""
def __init__(self, pair_xml_element_name, key_xml_element_name,
value_xml_element_name):
self.pair_xml_element_name = pair_xml_element_name
self.key_xml_element_name = key_xml_element_name
self.value_xml_element_name = value_xml_element_name
super(_dict_of, self).__init__()
class _list_of(list):
"""a list which carries with it the type that's expected to go in it.
Used for deserializaion and construction of the lists"""
def __init__(self, list_type, xml_element_name=None):
self.list_type = list_type
if xml_element_name is None:
self.xml_element_name = list_type.__name__
else:
self.xml_element_name = xml_element_name
super(_list_of, self).__init__()
class _scalar_list_of(list):
"""a list of scalar types which carries with it the type that's
expected to go in it along with its xml element name.
Used for deserializaion and construction of the lists"""
def __init__(self, list_type, xml_element_name):
self.list_type = list_type
self.xml_element_name = xml_element_name
super(_scalar_list_of, self).__init__()
class _xml_attribute:
"""a accessor to XML attributes
expected to go in it along with its xml element name.
Used for deserialization and construction"""
def __init__(self, xml_element_name):
self.xml_element_name = xml_element_name
def _update_request_uri_query_local_storage(request, use_local_storage):
''' create correct uri and query for the request '''
uri, query = _update_request_uri_query(request)
if use_local_storage:
return '/' + DEV_ACCOUNT_NAME + uri, query
return uri, query
def _update_request_uri_query(request):
'''pulls the query string out of the URI and moves it into
the query portion of the request object. If there are already
query parameters on the request the parameters in the URI will
appear after the existing parameters'''
if '?' in request.path:
request.path, _, query_string = request.path.partition('?')
if query_string:
query_params = query_string.split('&')
for query in query_params:
if '=' in query:
name, _, value = query.partition('=')
request.query.append((name, value))
request.path = url_quote(request.path, '/()$=\',')
# add encoded queries to request.path.
if request.query:
request.path += '?'
for name, value in request.query:
if value is not None:
request.path += name + '=' + url_quote(value, '/()$=\',') + '&'
request.path = request.path[:-1]
return request.path, request.query
def _dont_fail_on_exist(error):
''' don't throw exception if the resource exists.
This is called by create_* APIs with fail_on_exist=False'''
if isinstance(error, WindowsAzureConflictError):
return False
else:
raise error
def _dont_fail_not_exist(error):
''' don't throw exception if the resource doesn't exist.
This is called by create_* APIs with fail_on_exist=False'''
if isinstance(error, WindowsAzureMissingResourceError):
return False
else:
raise error
def _general_error_handler(http_error):
''' Simple error handler for azure.'''
if http_error.status == 409:
raise WindowsAzureConflictError(
_ERROR_CONFLICT.format(str(http_error)))
elif http_error.status == 404:
raise WindowsAzureMissingResourceError(
_ERROR_NOT_FOUND.format(str(http_error)))
else:
if http_error.respbody is not None:
raise WindowsAzureError(
_ERROR_UNKNOWN.format(str(http_error)) + '\n' + \
http_error.respbody.decode('utf-8'))
else:
raise WindowsAzureError(_ERROR_UNKNOWN.format(str(http_error)))
def _parse_response_for_dict(response):
''' Extracts name-values from response header. Filter out the standard
http headers.'''
if response is None:
return None
http_headers = ['server', 'date', 'location', 'host',
'via', 'proxy-connection', 'connection']
return_dict = HeaderDict()
if response.headers:
for name, value in response.headers:
if not name.lower() in http_headers:
return_dict[name] = value
return return_dict
def _parse_response_for_dict_prefix(response, prefixes):
''' Extracts name-values for names starting with prefix from response
header. Filter out the standard http headers.'''
if response is None:
return None
return_dict = {}
orig_dict = _parse_response_for_dict(response)
if orig_dict:
for name, value in orig_dict.items():
for prefix_value in prefixes:
if name.lower().startswith(prefix_value.lower()):
return_dict[name] = value
break
return return_dict
else:
return None
def _parse_response_for_dict_filter(response, filter):
''' Extracts name-values for names in filter from response header. Filter
out the standard http headers.'''
if response is None:
return None
return_dict = {}
orig_dict = _parse_response_for_dict(response)
if orig_dict:
for name, value in orig_dict.items():
if name.lower() in filter:
return_dict[name] = value
return return_dict
else:
return None
def _sign_string(key, string_to_sign, key_is_base64=True):
if key_is_base64:
key = _decode_base64_to_bytes(key)
else:
if isinstance(key, _unicode_type):
key = key.encode('utf-8')
if isinstance(string_to_sign, _unicode_type):
string_to_sign = string_to_sign.encode('utf-8')
signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)
digest = signed_hmac_sha256.digest()
encoded_digest = _encode_base64(digest)
return encoded_digest
| |
import itertools
import functools
import operator
import warnings
from distutils.version import LooseVersion
import numpy as np
from pandas import compat
from pandas._libs import tslibs, lib
from pandas.core.dtypes.common import (
_get_dtype,
is_float, is_scalar,
is_integer, is_complex, is_float_dtype,
is_complex_dtype, is_integer_dtype,
is_bool_dtype, is_object_dtype,
is_numeric_dtype,
is_datetime64_dtype, is_timedelta64_dtype,
is_datetime_or_timedelta_dtype,
is_int_or_datetime_dtype, is_any_int_dtype)
from pandas.core.dtypes.cast import _int64_max, maybe_upcast_putmask
from pandas.core.dtypes.missing import isna, notna, na_value_for_dtype
from pandas.core.config import get_option
import pandas.core.common as com
_BOTTLENECK_INSTALLED = False
_MIN_BOTTLENECK_VERSION = '1.0.0'
try:
import bottleneck as bn
ver = bn.__version__
_BOTTLENECK_INSTALLED = (LooseVersion(ver) >=
LooseVersion(_MIN_BOTTLENECK_VERSION))
if not _BOTTLENECK_INSTALLED:
warnings.warn(
"The installed version of bottleneck {ver} is not supported "
"in pandas and will be not be used\nThe minimum supported "
"version is {min_ver}\n".format(
ver=ver, min_ver=_MIN_BOTTLENECK_VERSION), UserWarning)
except ImportError: # pragma: no cover
pass
_USE_BOTTLENECK = False
def set_use_bottleneck(v=True):
# set/unset to use bottleneck
global _USE_BOTTLENECK
if _BOTTLENECK_INSTALLED:
_USE_BOTTLENECK = v
set_use_bottleneck(get_option('compute.use_bottleneck'))
class disallow(object):
def __init__(self, *dtypes):
super(disallow, self).__init__()
self.dtypes = tuple(np.dtype(dtype).type for dtype in dtypes)
def check(self, obj):
return hasattr(obj, 'dtype') and issubclass(obj.dtype.type,
self.dtypes)
def __call__(self, f):
@functools.wraps(f)
def _f(*args, **kwargs):
obj_iter = itertools.chain(args, compat.itervalues(kwargs))
if any(self.check(obj) for obj in obj_iter):
msg = 'reduction operation {name!r} not allowed for this dtype'
raise TypeError(msg.format(name=f.__name__.replace('nan', '')))
try:
with np.errstate(invalid='ignore'):
return f(*args, **kwargs)
except ValueError as e:
# we want to transform an object array
# ValueError message to the more typical TypeError
# e.g. this is normally a disallowed function on
# object arrays that contain strings
if is_object_dtype(args[0]):
raise TypeError(e)
raise
return _f
class bottleneck_switch(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def __call__(self, alt):
bn_name = alt.__name__
try:
bn_func = getattr(bn, bn_name)
except (AttributeError, NameError): # pragma: no cover
bn_func = None
@functools.wraps(alt)
def f(values, axis=None, skipna=True, **kwds):
if len(self.kwargs) > 0:
for k, v in compat.iteritems(self.kwargs):
if k not in kwds:
kwds[k] = v
try:
if values.size == 0 and kwds.get('min_count') is None:
# We are empty, returning NA for our type
# Only applies for the default `min_count` of None
# since that affects how empty arrays are handled.
# TODO(GH-18976) update all the nanops methods to
# correctly handle empty inputs and remove this check.
# It *may* just be `var`
return _na_for_min_count(values, axis)
if (_USE_BOTTLENECK and skipna and
_bn_ok_dtype(values.dtype, bn_name)):
result = bn_func(values, axis=axis, **kwds)
# prefer to treat inf/-inf as NA, but must compute the func
# twice :(
if _has_infs(result):
result = alt(values, axis=axis, skipna=skipna, **kwds)
else:
result = alt(values, axis=axis, skipna=skipna, **kwds)
except Exception:
try:
result = alt(values, axis=axis, skipna=skipna, **kwds)
except ValueError as e:
# we want to transform an object array
# ValueError message to the more typical TypeError
# e.g. this is normally a disallowed function on
# object arrays that contain strings
if is_object_dtype(values):
raise TypeError(e)
raise
return result
return f
def _bn_ok_dtype(dt, name):
# Bottleneck chokes on datetime64
if (not is_object_dtype(dt) and not is_datetime_or_timedelta_dtype(dt)):
# GH 15507
# bottleneck does not properly upcast during the sum
# so can overflow
# GH 9422
# further we also want to preserve NaN when all elements
# are NaN, unlinke bottleneck/numpy which consider this
# to be 0
if name in ['nansum', 'nanprod']:
return False
return True
return False
def _has_infs(result):
if isinstance(result, np.ndarray):
if result.dtype == 'f8':
return lib.has_infs_f8(result.ravel())
elif result.dtype == 'f4':
return lib.has_infs_f4(result.ravel())
try:
return np.isinf(result).any()
except (TypeError, NotImplementedError):
# if it doesn't support infs, then it can't have infs
return False
def _get_fill_value(dtype, fill_value=None, fill_value_typ=None):
""" return the correct fill value for the dtype of the values """
if fill_value is not None:
return fill_value
if _na_ok_dtype(dtype):
if fill_value_typ is None:
return np.nan
else:
if fill_value_typ == '+inf':
return np.inf
else:
return -np.inf
else:
if fill_value_typ is None:
return tslibs.iNaT
else:
if fill_value_typ == '+inf':
# need the max int here
return _int64_max
else:
return tslibs.iNaT
def _get_values(values, skipna, fill_value=None, fill_value_typ=None,
isfinite=False, copy=True):
""" utility to get the values view, mask, dtype
if necessary copy and mask using the specified fill_value
copy = True will force the copy
"""
values = com._values_from_object(values)
if isfinite:
mask = _isfinite(values)
else:
mask = isna(values)
dtype = values.dtype
dtype_ok = _na_ok_dtype(dtype)
# get our fill value (in case we need to provide an alternative
# dtype for it)
fill_value = _get_fill_value(dtype, fill_value=fill_value,
fill_value_typ=fill_value_typ)
if skipna:
if copy:
values = values.copy()
if dtype_ok:
np.putmask(values, mask, fill_value)
# promote if needed
else:
values, changed = maybe_upcast_putmask(values, mask, fill_value)
elif copy:
values = values.copy()
values = _view_if_needed(values)
# return a platform independent precision dtype
dtype_max = dtype
if is_integer_dtype(dtype) or is_bool_dtype(dtype):
dtype_max = np.int64
elif is_float_dtype(dtype):
dtype_max = np.float64
return values, mask, dtype, dtype_max
def _isfinite(values):
if is_datetime_or_timedelta_dtype(values):
return isna(values)
if (is_complex_dtype(values) or is_float_dtype(values) or
is_integer_dtype(values) or is_bool_dtype(values)):
return ~np.isfinite(values)
return ~np.isfinite(values.astype('float64'))
def _na_ok_dtype(dtype):
return not is_int_or_datetime_dtype(dtype)
def _view_if_needed(values):
if is_datetime_or_timedelta_dtype(values):
return values.view(np.int64)
return values
def _wrap_results(result, dtype):
""" wrap our results if needed """
if is_datetime64_dtype(dtype):
if not isinstance(result, np.ndarray):
result = tslibs.Timestamp(result)
else:
result = result.view(dtype)
elif is_timedelta64_dtype(dtype):
if not isinstance(result, np.ndarray):
# raise if we have a timedelta64[ns] which is too large
if np.fabs(result) > _int64_max:
raise ValueError("overflow in timedelta operation")
result = tslibs.Timedelta(result, unit='ns')
else:
result = result.astype('i8').view(dtype)
return result
def _na_for_min_count(values, axis):
"""Return the missing value for `values`
Parameters
----------
values : ndarray
axis : int or None
axis for the reduction
Returns
-------
result : scalar or ndarray
For 1-D values, returns a scalar of the correct missing type.
For 2-D values, returns a 1-D array where each element is missing.
"""
# we either return np.nan or pd.NaT
if is_numeric_dtype(values):
values = values.astype('float64')
fill_value = na_value_for_dtype(values.dtype)
if values.ndim == 1:
return fill_value
else:
result_shape = (values.shape[:axis] +
values.shape[axis + 1:])
result = np.empty(result_shape, dtype=values.dtype)
result.fill(fill_value)
return result
def nanany(values, axis=None, skipna=True):
values, mask, dtype, _ = _get_values(values, skipna, False, copy=skipna)
return values.any(axis)
def nanall(values, axis=None, skipna=True):
values, mask, dtype, _ = _get_values(values, skipna, True, copy=skipna)
return values.all(axis)
@disallow('M8')
def nansum(values, axis=None, skipna=True, min_count=0):
values, mask, dtype, dtype_max = _get_values(values, skipna, 0)
dtype_sum = dtype_max
if is_float_dtype(dtype):
dtype_sum = dtype
elif is_timedelta64_dtype(dtype):
dtype_sum = np.float64
the_sum = values.sum(axis, dtype=dtype_sum)
the_sum = _maybe_null_out(the_sum, axis, mask, min_count=min_count)
return _wrap_results(the_sum, dtype)
@disallow('M8')
@bottleneck_switch()
def nanmean(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna, 0)
dtype_sum = dtype_max
dtype_count = np.float64
if is_integer_dtype(dtype) or is_timedelta64_dtype(dtype):
dtype_sum = np.float64
elif is_float_dtype(dtype):
dtype_sum = dtype
dtype_count = dtype
count = _get_counts(mask, axis, dtype=dtype_count)
the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_sum))
if axis is not None and getattr(the_sum, 'ndim', False):
the_mean = the_sum / count
ct_mask = count == 0
if ct_mask.any():
the_mean[ct_mask] = np.nan
else:
the_mean = the_sum / count if count > 0 else np.nan
return _wrap_results(the_mean, dtype)
@disallow('M8')
@bottleneck_switch()
def nanmedian(values, axis=None, skipna=True):
def get_median(x):
mask = notna(x)
if not skipna and not mask.all():
return np.nan
return np.nanmedian(x[mask])
values, mask, dtype, dtype_max = _get_values(values, skipna)
if not is_float_dtype(values):
values = values.astype('f8')
values[mask] = np.nan
if axis is None:
values = values.ravel()
notempty = values.size
# an array from a frame
if values.ndim > 1:
# there's a non-empty array to apply over otherwise numpy raises
if notempty:
if not skipna:
return _wrap_results(
np.apply_along_axis(get_median, axis, values), dtype)
# fastpath for the skipna case
return _wrap_results(np.nanmedian(values, axis), dtype)
# must return the correct shape, but median is not defined for the
# empty set so return nans of shape "everything but the passed axis"
# since "axis" is where the reduction would occur if we had a nonempty
# array
shp = np.array(values.shape)
dims = np.arange(values.ndim)
ret = np.empty(shp[dims != axis])
ret.fill(np.nan)
return _wrap_results(ret, dtype)
# otherwise return a scalar value
return _wrap_results(get_median(values) if notempty else np.nan, dtype)
def _get_counts_nanvar(mask, axis, ddof, dtype=float):
dtype = _get_dtype(dtype)
count = _get_counts(mask, axis, dtype=dtype)
d = count - dtype.type(ddof)
# always return NaN, never inf
if is_scalar(count):
if count <= ddof:
count = np.nan
d = np.nan
else:
mask2 = count <= ddof
if mask2.any():
np.putmask(d, mask2, np.nan)
np.putmask(count, mask2, np.nan)
return count, d
@disallow('M8')
@bottleneck_switch(ddof=1)
def nanstd(values, axis=None, skipna=True, ddof=1):
result = np.sqrt(nanvar(values, axis=axis, skipna=skipna, ddof=ddof))
return _wrap_results(result, values.dtype)
@disallow('M8')
@bottleneck_switch(ddof=1)
def nanvar(values, axis=None, skipna=True, ddof=1):
values = com._values_from_object(values)
dtype = values.dtype
mask = isna(values)
if is_any_int_dtype(values):
values = values.astype('f8')
values[mask] = np.nan
if is_float_dtype(values):
count, d = _get_counts_nanvar(mask, axis, ddof, values.dtype)
else:
count, d = _get_counts_nanvar(mask, axis, ddof)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
# xref GH10242
# Compute variance via two-pass algorithm, which is stable against
# cancellation errors and relatively accurate for small numbers of
# observations.
#
# See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
avg = _ensure_numeric(values.sum(axis=axis, dtype=np.float64)) / count
if axis is not None:
avg = np.expand_dims(avg, axis)
sqr = _ensure_numeric((avg - values)**2)
np.putmask(sqr, mask, 0)
result = sqr.sum(axis=axis, dtype=np.float64) / d
# Return variance as np.float64 (the datatype used in the accumulator),
# unless we were dealing with a float array, in which case use the same
# precision as the original values array.
if is_float_dtype(dtype):
result = result.astype(dtype)
return _wrap_results(result, values.dtype)
@disallow('M8', 'm8')
def nansem(values, axis=None, skipna=True, ddof=1):
var = nanvar(values, axis, skipna, ddof=ddof)
mask = isna(values)
if not is_float_dtype(values.dtype):
values = values.astype('f8')
count, _ = _get_counts_nanvar(mask, axis, ddof, values.dtype)
var = nanvar(values, axis, skipna, ddof=ddof)
return np.sqrt(var) / np.sqrt(count)
def _nanminmax(meth, fill_value_typ):
@bottleneck_switch()
def reduction(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(
values, skipna, fill_value_typ=fill_value_typ, )
if ((axis is not None and values.shape[axis] == 0) or
values.size == 0):
try:
result = getattr(values, meth)(axis, dtype=dtype_max)
result.fill(np.nan)
except:
result = np.nan
else:
result = getattr(values, meth)(axis)
result = _wrap_results(result, dtype)
return _maybe_null_out(result, axis, mask)
reduction.__name__ = 'nan' + meth
return reduction
nanmin = _nanminmax('min', fill_value_typ='+inf')
nanmax = _nanminmax('max', fill_value_typ='-inf')
@disallow('O')
def nanargmax(values, axis=None, skipna=True):
"""
Returns -1 in the NA case
"""
values, mask, dtype, _ = _get_values(values, skipna, fill_value_typ='-inf')
result = values.argmax(axis)
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
@disallow('O')
def nanargmin(values, axis=None, skipna=True):
"""
Returns -1 in the NA case
"""
values, mask, dtype, _ = _get_values(values, skipna, fill_value_typ='+inf')
result = values.argmin(axis)
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
@disallow('M8', 'm8')
def nanskew(values, axis=None, skipna=True):
""" Compute the sample skewness.
The statistic computed here is the adjusted Fisher-Pearson standardized
moment coefficient G1. The algorithm computes this coefficient directly
from the second and third central moment.
"""
values = com._values_from_object(values)
mask = isna(values)
if not is_float_dtype(values.dtype):
values = values.astype('f8')
count = _get_counts(mask, axis)
else:
count = _get_counts(mask, axis, dtype=values.dtype)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
mean = values.sum(axis, dtype=np.float64) / count
if axis is not None:
mean = np.expand_dims(mean, axis)
adjusted = values - mean
if skipna:
np.putmask(adjusted, mask, 0)
adjusted2 = adjusted ** 2
adjusted3 = adjusted2 * adjusted
m2 = adjusted2.sum(axis, dtype=np.float64)
m3 = adjusted3.sum(axis, dtype=np.float64)
# floating point error
#
# #18044 in _libs/windows.pyx calc_skew follow this behavior
# to fix the fperr to treat m2 <1e-14 as zero
m2 = _zero_out_fperr(m2)
m3 = _zero_out_fperr(m3)
with np.errstate(invalid='ignore', divide='ignore'):
result = (count * (count - 1) ** 0.5 / (count - 2)) * (m3 / m2 ** 1.5)
dtype = values.dtype
if is_float_dtype(dtype):
result = result.astype(dtype)
if isinstance(result, np.ndarray):
result = np.where(m2 == 0, 0, result)
result[count < 3] = np.nan
return result
else:
result = 0 if m2 == 0 else result
if count < 3:
return np.nan
return result
@disallow('M8', 'm8')
def nankurt(values, axis=None, skipna=True):
""" Compute the sample excess kurtosis.
The statistic computed here is the adjusted Fisher-Pearson standardized
moment coefficient G2, computed directly from the second and fourth
central moment.
"""
values = com._values_from_object(values)
mask = isna(values)
if not is_float_dtype(values.dtype):
values = values.astype('f8')
count = _get_counts(mask, axis)
else:
count = _get_counts(mask, axis, dtype=values.dtype)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
mean = values.sum(axis, dtype=np.float64) / count
if axis is not None:
mean = np.expand_dims(mean, axis)
adjusted = values - mean
if skipna:
np.putmask(adjusted, mask, 0)
adjusted2 = adjusted ** 2
adjusted4 = adjusted2 ** 2
m2 = adjusted2.sum(axis, dtype=np.float64)
m4 = adjusted4.sum(axis, dtype=np.float64)
with np.errstate(invalid='ignore', divide='ignore'):
adj = 3 * (count - 1) ** 2 / ((count - 2) * (count - 3))
numer = count * (count + 1) * (count - 1) * m4
denom = (count - 2) * (count - 3) * m2**2
result = numer / denom - adj
# floating point error
#
# #18044 in _libs/windows.pyx calc_kurt follow this behavior
# to fix the fperr to treat denom <1e-14 as zero
numer = _zero_out_fperr(numer)
denom = _zero_out_fperr(denom)
if not isinstance(denom, np.ndarray):
# if ``denom`` is a scalar, check these corner cases first before
# doing division
if count < 4:
return np.nan
if denom == 0:
return 0
with np.errstate(invalid='ignore', divide='ignore'):
result = numer / denom - adj
dtype = values.dtype
if is_float_dtype(dtype):
result = result.astype(dtype)
if isinstance(result, np.ndarray):
result = np.where(denom == 0, 0, result)
result[count < 4] = np.nan
return result
@disallow('M8', 'm8')
def nanprod(values, axis=None, skipna=True, min_count=0):
mask = isna(values)
if skipna and not is_any_int_dtype(values):
values = values.copy()
values[mask] = 1
result = values.prod(axis)
return _maybe_null_out(result, axis, mask, min_count=min_count)
def _maybe_arg_null_out(result, axis, mask, skipna):
# helper function for nanargmin/nanargmax
if axis is None or not getattr(result, 'ndim', False):
if skipna:
if mask.all():
result = -1
else:
if mask.any():
result = -1
else:
if skipna:
na_mask = mask.all(axis)
else:
na_mask = mask.any(axis)
if na_mask.any():
result[na_mask] = -1
return result
def _get_counts(mask, axis, dtype=float):
dtype = _get_dtype(dtype)
if axis is None:
return dtype.type(mask.size - mask.sum())
count = mask.shape[axis] - mask.sum(axis)
if is_scalar(count):
return dtype.type(count)
try:
return count.astype(dtype)
except AttributeError:
return np.array(count, dtype=dtype)
def _maybe_null_out(result, axis, mask, min_count=1):
if axis is not None and getattr(result, 'ndim', False):
null_mask = (mask.shape[axis] - mask.sum(axis) - min_count) < 0
if np.any(null_mask):
if is_numeric_dtype(result):
if np.iscomplexobj(result):
result = result.astype('c16')
else:
result = result.astype('f8')
result[null_mask] = np.nan
else:
# GH12941, use None to auto cast null
result[null_mask] = None
elif result is not tslibs.NaT:
null_mask = mask.size - mask.sum()
if null_mask < min_count:
result = np.nan
return result
def _zero_out_fperr(arg):
# #18044 reference this behavior to fix rolling skew/kurt issue
if isinstance(arg, np.ndarray):
with np.errstate(invalid='ignore'):
return np.where(np.abs(arg) < 1e-14, 0, arg)
else:
return arg.dtype.type(0) if np.abs(arg) < 1e-14 else arg
@disallow('M8', 'm8')
def nancorr(a, b, method='pearson', min_periods=None):
"""
a, b: ndarrays
"""
if len(a) != len(b):
raise AssertionError('Operands to nancorr must have same size')
if min_periods is None:
min_periods = 1
valid = notna(a) & notna(b)
if not valid.all():
a = a[valid]
b = b[valid]
if len(a) < min_periods:
return np.nan
f = get_corr_func(method)
return f(a, b)
def get_corr_func(method):
if method in ['kendall', 'spearman']:
from scipy.stats import kendalltau, spearmanr
def _pearson(a, b):
return np.corrcoef(a, b)[0, 1]
def _kendall(a, b):
rs = kendalltau(a, b)
if isinstance(rs, tuple):
return rs[0]
return rs
def _spearman(a, b):
return spearmanr(a, b)[0]
_cor_methods = {
'pearson': _pearson,
'kendall': _kendall,
'spearman': _spearman
}
return _cor_methods[method]
@disallow('M8', 'm8')
def nancov(a, b, min_periods=None):
if len(a) != len(b):
raise AssertionError('Operands to nancov must have same size')
if min_periods is None:
min_periods = 1
valid = notna(a) & notna(b)
if not valid.all():
a = a[valid]
b = b[valid]
if len(a) < min_periods:
return np.nan
return np.cov(a, b)[0, 1]
def _ensure_numeric(x):
if isinstance(x, np.ndarray):
if is_integer_dtype(x) or is_bool_dtype(x):
x = x.astype(np.float64)
elif is_object_dtype(x):
try:
x = x.astype(np.complex128)
except:
x = x.astype(np.float64)
else:
if not np.any(x.imag):
x = x.real
elif not (is_float(x) or is_integer(x) or is_complex(x)):
try:
x = float(x)
except Exception:
try:
x = complex(x)
except Exception:
raise TypeError('Could not convert {value!s} to numeric'
.format(value=x))
return x
# NA-friendly array comparisons
def make_nancomp(op):
def f(x, y):
xmask = isna(x)
ymask = isna(y)
mask = xmask | ymask
with np.errstate(all='ignore'):
result = op(x, y)
if mask.any():
if is_bool_dtype(result):
result = result.astype('O')
np.putmask(result, mask, np.nan)
return result
return f
nangt = make_nancomp(operator.gt)
nange = make_nancomp(operator.ge)
nanlt = make_nancomp(operator.lt)
nanle = make_nancomp(operator.le)
naneq = make_nancomp(operator.eq)
nanne = make_nancomp(operator.ne)
| |
import sqlalchemy as sa
from sqlalchemy import BigInteger
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Sequence
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy.dialects import sqlite
from sqlalchemy.schema import CreateSequence
from sqlalchemy.schema import DropSequence
from sqlalchemy.sql import select
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_false
from sqlalchemy.testing import is_true
from sqlalchemy.testing.assertsql import AllOf
from sqlalchemy.testing.assertsql import CompiledSQL
from sqlalchemy.testing.assertsql import EachOf
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
class SequenceDDLTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = "default"
__backend__ = True
def test_create_drop_ddl(self):
self.assert_compile(
CreateSequence(Sequence("foo_seq")),
"CREATE SEQUENCE foo_seq START WITH 1",
)
self.assert_compile(
CreateSequence(Sequence("foo_seq", start=5)),
"CREATE SEQUENCE foo_seq START WITH 5",
)
self.assert_compile(
CreateSequence(Sequence("foo_seq", increment=2)),
"CREATE SEQUENCE foo_seq INCREMENT BY 2 START WITH 1",
)
self.assert_compile(
CreateSequence(Sequence("foo_seq", increment=2, start=5)),
"CREATE SEQUENCE foo_seq INCREMENT BY 2 START WITH 5",
)
self.assert_compile(
CreateSequence(
Sequence("foo_seq", increment=2, start=0, minvalue=0)
),
"CREATE SEQUENCE foo_seq INCREMENT BY 2 START WITH 0 MINVALUE 0",
)
self.assert_compile(
CreateSequence(
Sequence("foo_seq", increment=2, start=1, maxvalue=5)
),
"CREATE SEQUENCE foo_seq INCREMENT BY 2 START WITH 1 MAXVALUE 5",
)
self.assert_compile(
CreateSequence(
Sequence("foo_seq", increment=2, start=1, nomaxvalue=True)
),
"CREATE SEQUENCE foo_seq INCREMENT BY 2 START WITH 1 NO MAXVALUE",
)
self.assert_compile(
CreateSequence(
Sequence("foo_seq", increment=2, start=0, nominvalue=True)
),
"CREATE SEQUENCE foo_seq INCREMENT BY 2 START WITH 0 NO MINVALUE",
)
self.assert_compile(
CreateSequence(
Sequence("foo_seq", start=1, maxvalue=10, cycle=True)
),
"CREATE SEQUENCE foo_seq START WITH 1 MAXVALUE 10 CYCLE",
)
self.assert_compile(
CreateSequence(Sequence("foo_seq", cache=1000, order=True)),
"CREATE SEQUENCE foo_seq START WITH 1 CACHE 1000 ORDER",
)
self.assert_compile(
CreateSequence(Sequence("foo_seq", order=True)),
"CREATE SEQUENCE foo_seq START WITH 1 ORDER",
)
self.assert_compile(
DropSequence(Sequence("foo_seq")), "DROP SEQUENCE foo_seq"
)
class SequenceExecTest(fixtures.TestBase):
__requires__ = ("sequences",)
__backend__ = True
@classmethod
def setup_test_class(cls):
cls.seq = Sequence("my_sequence")
cls.seq.create(testing.db)
@classmethod
def teardown_test_class(cls):
cls.seq.drop(testing.db)
def _assert_seq_result(self, ret):
"""asserts return of next_value is an int"""
assert isinstance(ret, int)
assert ret >= testing.db.dialect.default_sequence_base
def test_execute(self, connection):
s = Sequence("my_sequence")
self._assert_seq_result(connection.execute(s))
def test_execute_optional(self, connection):
"""test dialect executes a Sequence, returns nextval, whether
or not "optional" is set"""
s = Sequence("my_sequence", optional=True)
self._assert_seq_result(connection.execute(s))
def test_execute_next_value(self, connection):
"""test func.next_value().execute()/.scalar() works
with connectionless execution."""
s = Sequence("my_sequence")
self._assert_seq_result(connection.scalar(s.next_value()))
def test_execute_optional_next_value(self, connection):
"""test func.next_value().execute()/.scalar() works
with connectionless execution."""
s = Sequence("my_sequence", optional=True)
self._assert_seq_result(connection.scalar(s.next_value()))
def test_func_embedded_select(self, connection):
"""test can use next_value() in select column expr"""
s = Sequence("my_sequence")
self._assert_seq_result(connection.scalar(select(s.next_value())))
@testing.requires.sequences_in_other_clauses
@testing.provide_metadata
def test_func_embedded_whereclause(self, connection):
"""test can use next_value() in whereclause"""
metadata = self.metadata
t1 = Table("t", metadata, Column("x", Integer))
t1.create(testing.db)
connection.execute(t1.insert(), [{"x": 1}, {"x": 300}, {"x": 301}])
s = Sequence("my_sequence")
eq_(
list(
connection.execute(t1.select().where(t1.c.x > s.next_value()))
),
[(300,), (301,)],
)
@testing.provide_metadata
def test_func_embedded_valuesbase(self, connection):
"""test can use next_value() in values() of _ValuesBase"""
metadata = self.metadata
t1 = Table(
"t",
metadata,
Column("x", Integer),
)
t1.create(testing.db)
s = Sequence("my_sequence")
connection.execute(t1.insert().values(x=s.next_value()))
self._assert_seq_result(connection.scalar(t1.select()))
def test_inserted_pk_no_returning(self, metadata, connection):
"""test inserted_primary_key contains [None] when
pk_col=next_value(), implicit returning is not used."""
# I'm not really sure what this test wants to accomlish.
t1 = Table(
"t",
metadata,
Column("x", Integer, primary_key=True),
implicit_returning=False,
)
s = Sequence("my_sequence_here", metadata=metadata)
conn = connection
t1.create(conn)
s.create(conn)
r = conn.execute(t1.insert().values(x=s.next_value()))
if testing.requires.emulated_lastrowid_even_with_sequences.enabled:
eq_(r.inserted_primary_key, (1,))
else:
eq_(r.inserted_primary_key, (None,))
@testing.combinations(
("implicit_returning",),
("no_implicit_returning",),
("explicit_returning", testing.requires.returning),
("return_defaults_no_implicit_returning", testing.requires.returning),
("return_defaults_implicit_returning", testing.requires.returning),
argnames="returning",
)
@testing.requires.multivalues_inserts
def test_seq_multivalues_inline(self, metadata, connection, returning):
_implicit_returning = "no_implicit_returning" not in returning
t1 = Table(
"t",
metadata,
Column("x", Integer, Sequence("my_seq"), primary_key=True),
Column("data", String(50)),
implicit_returning=_implicit_returning,
)
metadata.create_all(connection)
conn = connection
stmt = t1.insert().values(
[{"data": "d1"}, {"data": "d2"}, {"data": "d3"}]
)
if returning == "explicit_returning":
stmt = stmt.returning(t1.c.x)
elif "return_defaults" in returning:
stmt = stmt.return_defaults()
r = conn.execute(stmt)
if returning == "explicit_returning":
eq_(r.all(), [(1,), (2,), (3,)])
elif "return_defaults" in returning:
eq_(r.returned_defaults_rows, None)
# TODO: not sure what this is
eq_(r.inserted_primary_key_rows, [(None,)])
eq_(
conn.execute(t1.select().order_by(t1.c.x)).all(),
[(1, "d1"), (2, "d2"), (3, "d3")],
)
@testing.combinations(
("implicit_returning",),
("no_implicit_returning",),
(
"explicit_returning",
testing.requires.returning
+ testing.requires.insert_executemany_returning,
),
(
"return_defaults_no_implicit_returning",
testing.requires.returning
+ testing.requires.insert_executemany_returning,
),
(
"return_defaults_implicit_returning",
testing.requires.returning
+ testing.requires.insert_executemany_returning,
),
argnames="returning",
)
def test_seq_multivalues_executemany(
self, connection, metadata, returning
):
_implicit_returning = "no_implicit_returning" not in returning
t1 = Table(
"t",
metadata,
Column("x", Integer, Sequence("my_seq"), primary_key=True),
Column("data", String(50)),
implicit_returning=_implicit_returning,
)
metadata.create_all(connection)
conn = connection
stmt = t1.insert()
if returning == "explicit_returning":
stmt = stmt.returning(t1.c.x)
elif "return_defaults" in returning:
stmt = stmt.return_defaults()
r = conn.execute(
stmt, [{"data": "d1"}, {"data": "d2"}, {"data": "d3"}]
)
if returning == "explicit_returning":
eq_(r.all(), [(1,), (2,), (3,)])
elif "return_defaults" in returning:
if "no_implicit_returning" in returning:
eq_(r.returned_defaults_rows, None)
eq_(r.inserted_primary_key_rows, [(1,), (2,), (3,)])
else:
eq_(r.returned_defaults_rows, [(1,), (2,), (3,)])
eq_(r.inserted_primary_key_rows, [(1,), (2,), (3,)])
eq_(
conn.execute(t1.select().order_by(t1.c.x)).all(),
[(1, "d1"), (2, "d2"), (3, "d3")],
)
@testing.requires.returning
def test_inserted_pk_implicit_returning(self, connection, metadata):
"""test inserted_primary_key contains the result when
pk_col=next_value(), when implicit returning is used."""
s = Sequence("my_sequence")
t1 = Table(
"t",
metadata,
Column(
"x",
Integer,
primary_key=True,
),
implicit_returning=True,
)
t1.create(connection)
r = connection.execute(t1.insert().values(x=s.next_value()))
self._assert_seq_result(r.inserted_primary_key[0])
class SequenceTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__requires__ = ("sequences",)
__backend__ = True
@testing.combinations(
(Sequence("foo_seq"),),
(Sequence("foo_seq", start=8),),
(Sequence("foo_seq", increment=5),),
)
def test_start_increment(self, seq):
seq.create(testing.db)
try:
with testing.db.connect() as conn:
values = [conn.execute(seq) for i in range(3)]
start = seq.start or testing.db.dialect.default_sequence_base
inc = seq.increment or 1
eq_(values, list(range(start, start + inc * 3, inc)))
finally:
seq.drop(testing.db)
def _has_sequence(self, connection, name):
return testing.db.dialect.has_sequence(connection, name)
def test_nextval_unsupported(self):
"""test next_value() used on non-sequence platform
raises NotImplementedError."""
s = Sequence("my_seq")
d = sqlite.dialect()
assert_raises_message(
NotImplementedError,
"Dialect 'sqlite' does not support sequence increments.",
s.next_value().compile,
dialect=d,
)
def test_checkfirst_sequence(self, connection):
s = Sequence("my_sequence")
s.create(connection, checkfirst=False)
assert self._has_sequence(connection, "my_sequence")
s.create(connection, checkfirst=True)
s.drop(connection, checkfirst=False)
assert not self._has_sequence(connection, "my_sequence")
s.drop(connection, checkfirst=True)
def test_checkfirst_metadata(self, connection):
m = MetaData()
Sequence("my_sequence", metadata=m)
m.create_all(connection, checkfirst=False)
assert self._has_sequence(connection, "my_sequence")
m.create_all(connection, checkfirst=True)
m.drop_all(connection, checkfirst=False)
assert not self._has_sequence(connection, "my_sequence")
m.drop_all(connection, checkfirst=True)
def test_checkfirst_table(self, connection):
m = MetaData()
s = Sequence("my_sequence")
t = Table("t", m, Column("c", Integer, s, primary_key=True))
t.create(connection, checkfirst=False)
assert self._has_sequence(connection, "my_sequence")
t.create(connection, checkfirst=True)
t.drop(connection, checkfirst=False)
assert not self._has_sequence(connection, "my_sequence")
t.drop(connection, checkfirst=True)
@testing.provide_metadata
def test_table_overrides_metadata_create(self, connection):
metadata = self.metadata
Sequence("s1", metadata=metadata)
s2 = Sequence("s2", metadata=metadata)
s3 = Sequence("s3")
t = Table("t", metadata, Column("c", Integer, s3, primary_key=True))
assert s3.metadata is metadata
t.create(connection, checkfirst=True)
s3.drop(connection)
# 't' is created, and 's3' won't be
# re-created since it's linked to 't'.
# 's1' and 's2' are, however.
metadata.create_all(connection)
assert self._has_sequence(connection, "s1")
assert self._has_sequence(connection, "s2")
assert not self._has_sequence(connection, "s3")
s2.drop(connection)
assert self._has_sequence(connection, "s1")
assert not self._has_sequence(connection, "s2")
metadata.drop_all(connection)
assert not self._has_sequence(connection, "s1")
assert not self._has_sequence(connection, "s2")
@testing.requires.returning
@testing.requires.supports_sequence_for_autoincrement_column
@testing.provide_metadata
def test_freestanding_sequence_via_autoinc(self, connection):
t = Table(
"some_table",
self.metadata,
Column(
"id",
Integer,
autoincrement=True,
primary_key=True,
default=Sequence(
"my_sequence", metadata=self.metadata
).next_value(),
),
)
self.metadata.create_all(connection)
result = connection.execute(t.insert())
eq_(result.inserted_primary_key, (1,))
@testing.requires.sequences_as_server_defaults
@testing.provide_metadata
def test_shared_sequence(self, connection):
# test case for #6071
common_seq = Sequence("common_sequence", metadata=self.metadata)
Table(
"table_1",
self.metadata,
Column(
"id",
Integer,
common_seq,
server_default=common_seq.next_value(),
primary_key=True,
),
)
Table(
"table_2",
self.metadata,
Column(
"id",
Integer,
common_seq,
server_default=common_seq.next_value(),
primary_key=True,
),
)
self.metadata.create_all(connection)
is_true(self._has_sequence(connection, "common_sequence"))
is_true(testing.db.dialect.has_table(connection, "table_1"))
is_true(testing.db.dialect.has_table(connection, "table_2"))
self.metadata.drop_all(connection)
is_false(self._has_sequence(connection, "common_sequence"))
is_false(testing.db.dialect.has_table(connection, "table_1"))
is_false(testing.db.dialect.has_table(connection, "table_2"))
def test_next_value_type(self):
seq = Sequence("my_sequence", data_type=BigInteger)
assert isinstance(seq.next_value().type, BigInteger)
class TableBoundSequenceTest(fixtures.TablesTest):
__requires__ = ("sequences",)
__backend__ = True
@testing.fixture
def table_fixture(self, metadata, connection, implicit_returning):
def go(implicit_returning):
cartitems = Table(
"cartitems",
metadata,
Column(
"cart_id",
Integer,
Sequence("cart_id_seq"),
primary_key=True,
autoincrement=False,
),
Column("description", String(40)),
Column("createdate", sa.DateTime()),
implicit_returning=implicit_returning,
)
# a little bit of implicit case sensitive naming test going on here
Manager = Table(
"Manager",
metadata,
Column(
"obj_id",
Integer,
Sequence("obj_id_seq"),
),
Column("name", String(128)),
Column(
"id",
Integer,
Sequence("Manager_id_seq", optional=True),
primary_key=True,
),
implicit_returning=implicit_returning,
)
metadata.create_all(connection)
return Manager, cartitems
return go
@testing.combinations(
(True, testing.requires.returning),
(False,),
argnames="implicit_returning",
)
def test_insert_via_seq(
self, table_fixture, connection, implicit_returning
):
Manager, cartitems = table_fixture(implicit_returning)
connection.execute(cartitems.insert(), dict(description="hi"))
connection.execute(cartitems.insert(), dict(description="there"))
r = connection.execute(cartitems.insert(), dict(description="lala"))
expected = 2 + testing.db.dialect.default_sequence_base
eq_(r.inserted_primary_key[0], expected)
eq_(
connection.scalar(
sa.select(cartitems.c.cart_id).where(
cartitems.c.description == "lala"
),
),
expected,
)
@testing.combinations(
(True, testing.requires.returning),
(False,),
argnames="implicit_returning",
)
def test_seq_nonpk(self, connection, table_fixture, implicit_returning):
"""test sequences fire off as defaults on non-pk columns"""
sometable, cartitems = table_fixture(implicit_returning)
conn = connection
result = conn.execute(sometable.insert(), dict(name="somename"))
eq_(result.postfetch_cols(), [sometable.c.obj_id])
result = conn.execute(sometable.insert(), dict(name="someother"))
conn.execute(
sometable.insert(), [{"name": "name3"}, {"name": "name4"}]
)
dsb = testing.db.dialect.default_sequence_base
eq_(
list(conn.execute(sometable.select().order_by(sometable.c.id))),
[
(
dsb,
"somename",
dsb,
),
(
dsb + 1,
"someother",
dsb + 1,
),
(
dsb + 2,
"name3",
dsb + 2,
),
(
dsb + 3,
"name4",
dsb + 3,
),
],
)
class SequenceAsServerDefaultTest(
testing.AssertsExecutionResults, fixtures.TablesTest
):
__requires__ = ("sequences_as_server_defaults",)
__backend__ = True
run_create_tables = "each"
@classmethod
def define_tables(cls, metadata):
m = metadata
s = Sequence("t_seq", metadata=m)
Table(
"t_seq_test",
m,
Column("id", Integer, s, server_default=s.next_value()),
Column("data", String(50)),
)
s2 = Sequence("t_seq_2", metadata=m)
Table(
"t_seq_test_2",
m,
Column("id", Integer, server_default=s2.next_value()),
Column("data", String(50)),
)
def test_default_textual_w_default(self, connection):
connection.exec_driver_sql(
"insert into t_seq_test (data) values ('some data')"
)
eq_(
connection.exec_driver_sql("select id from t_seq_test").scalar(), 1
)
def test_default_core_w_default(self, connection):
t_seq_test = self.tables.t_seq_test
connection.execute(t_seq_test.insert().values(data="some data"))
eq_(connection.scalar(select(t_seq_test.c.id)), 1)
def test_default_textual_server_only(self, connection):
connection.exec_driver_sql(
"insert into t_seq_test_2 (data) values ('some data')"
)
eq_(
connection.exec_driver_sql("select id from t_seq_test_2").scalar(),
1,
)
def test_default_core_server_only(self, connection):
t_seq_test = self.tables.t_seq_test_2
connection.execute(t_seq_test.insert().values(data="some data"))
eq_(connection.scalar(select(t_seq_test.c.id)), 1)
def test_drop_ordering(self):
with self.sql_execution_asserter(testing.db) as asserter:
self.tables_test_metadata.drop_all(testing.db, checkfirst=False)
asserter.assert_(
AllOf(
CompiledSQL("DROP TABLE t_seq_test_2", {}),
CompiledSQL("DROP TABLE t_seq_test", {}),
),
AllOf(
# dropped as part of metadata level
CompiledSQL("DROP SEQUENCE t_seq", {}),
CompiledSQL("DROP SEQUENCE t_seq_2", {}),
),
)
def test_drop_ordering_single_table(self):
with self.sql_execution_asserter(testing.db) as asserter:
for table in self.tables_test_metadata.tables.values():
table.drop(testing.db, checkfirst=False)
asserter.assert_(
AllOf(
CompiledSQL("DROP TABLE t_seq_test_2", {}),
EachOf(
CompiledSQL("DROP TABLE t_seq_test", {}),
CompiledSQL("DROP SEQUENCE t_seq", {}),
),
)
)
| |
#!/usr/bin/env python
"""
Written by Vadim Aleksandrov
Github: https://github.com/verdel
Email: valeksandrov@me.com
This code has been released under the terms of the Apache-2.0 license
http://opensource.org/licenses/Apache-2.0
Example of using Storage Policy
Based Management (SPBM) API to set VM Home
and Virtual Disk Storage Policies
Thanks to William Lam (https://github.com/lamw) for ideas from
the script list_vm_storage_policy.py
"""
import re
from tools import cli, service_instance
from pyVmomi import pbm, vim, VmomiSupport, SoapStubAdapter
class BColors(object):
"""A class used to represent ANSI escape sequences
for console color output.
"""
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def pbm_connect(stub_adapter, disable_ssl_verification=False):
"""Connect to the VMware Storage Policy Server
:param stub_adapter: The ServiceInstance stub adapter
:type stub_adapter: SoapStubAdapter
:param disable_ssl_verification: A flag used to skip ssl certificate
verification (default is False)
:type disable_ssl_verification: bool
:returns: A VMware Storage Policy Service content object
:rtype: ServiceContent
"""
if disable_ssl_verification:
import ssl
if hasattr(ssl, '_create_unverified_context'):
ssl_context = ssl._create_unverified_context()
else:
ssl_context = None
else:
ssl_context = None
VmomiSupport.GetRequestContext()["vcSessionCookie"] = \
stub_adapter.cookie.split('"')[1]
hostname = stub_adapter.host.split(":")[0]
pbm_stub = SoapStubAdapter(
host=hostname,
version="pbm.version.version1",
path="/pbm/sdk",
poolSize=0,
sslContext=ssl_context)
pbm_si = pbm.ServiceInstance("ServiceInstance", pbm_stub)
pbm_content = pbm_si.RetrieveContent()
return pbm_content
def check_storage_profile_associated(profile_manager, ref, name):
"""Get name of VMware Storage Policy profile associated with
the specified entities
:param profileManager: A VMware Storage Policy Service manager object
:type profileManager: pbm.profile.ProfileManager
:param ref: A server reference to a virtual machine, virtual disk,
or datastore
:type ref: pbm.ServerObjectRef
:param name: A VMware Storage Policy profile name
:type name: str
:returns: True if VMware Storage Policy profile with the specified
name associated with the specified entities
:rtype: bool
"""
profile_ids = profile_manager.PbmQueryAssociatedProfile(ref)
if len(profile_ids) > 0:
profiles = profile_manager.PbmRetrieveContent(profileIds=profile_ids)
for profile in profiles:
if profile.name == name:
return True
return False
def search_storage_profile_by_name(profile_manager, name):
"""Search vmware storage policy profile by name
:param profile_manager: A VMware Storage Policy Service manager object
:type profileManager: pbm.profile.ProfileManager
:param name: A VMware Storage Policy profile name
:type name: str
:returns: A VMware Storage Policy profile
:rtype: pbm.profile.Profile
"""
profile_ids = profile_manager.PbmQueryProfile(
resourceType=pbm.profile.ResourceType(resourceType="STORAGE"),
profileCategory="REQUIREMENT"
)
if len(profile_ids) > 0:
storage_profiles = profile_manager.PbmRetrieveContent(
profileIds=profile_ids)
for storageProfile in storage_profiles:
if storageProfile.name == name:
return storageProfile
return -1
def set_vm_storage_profile(vm, profile):
"""Set vmware storage policy profile to VM Home
:param vm: A virtual machine object
:type vm: VirtualMachine
:param profile: A VMware Storage Policy profile
:type profile: pbm.profile.Profile
:returns: None
"""
spec = vim.vm.ConfigSpec()
profile_specs = []
profile_spec = vim.vm.DefinedProfileSpec()
profile_spec.profileId = profile.profileId.uniqueId
profile_specs.append(profile_spec)
spec.vmProfile = profile_specs
vm.ReconfigVM_Task(spec)
def set_virtual_disk_storage_profile(vm, hardware_device, profile):
"""Set vmware storage policy profile to Virtual Disk
:param vm: A virtual machine object
:type vm: VirtualMachine
:param hardware_device: A virtual disk object
:type hardware_device: VirtualDevice
:param profile: A VMware Storage Policy profile
:type profile: pbm.profile.Profile
:returns: None
"""
spec = vim.vm.ConfigSpec()
device_specs = []
profile_specs = []
profile_spec = vim.vm.DefinedProfileSpec()
profile_spec.profileId = profile.profileId.uniqueId
profile_specs.append(profile_spec)
device_spec = vim.vm.device.VirtualDeviceSpec()
device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
device_spec.device = hardware_device
device_spec.profile = profile_specs
device_specs.append(device_spec)
spec.deviceChange = device_specs
vm.ReconfigVM_Task(spec)
def search_vm_by_name(si, name, strict=False):
"""Search virtual machine by name
:param si: A ServiceInstance managed object
:type name: si
:param name: A virtual machine name
:type name: str
:param strict: A flag used to set strict search method
(default is False)
:type strict: bool
:returns: A virtual machine object
:rtype: VirtualMachine
"""
content = si.content
root_folder = content.rootFolder
obj_view = content.viewManager.CreateContainerView(root_folder, [vim.VirtualMachine], True)
vm_list = obj_view.view
obj_view.Destroy()
obj = []
for vm in vm_list:
if strict:
if vm.name == name:
obj.append(vm)
return obj
else:
if re.match(".*{}.*".format(name), vm.name):
obj.append(vm)
return obj
def main():
"""Main program.
"""
parser = cli.Parser()
parser.add_required_arguments(cli.Argument.VM_NAME, cli.Argument.STORAGE_POLICY_NAME)
parser.add_custom_argument('--strict', required=False, action='store_true',
help='Search strict virtual machine name matches')
parser.add_custom_argument('--set_vm_home', required=False, action='store_true',
help='Set the specified policy for vm home.')
parser.add_custom_argument('--virtual_disk_number', required=False, nargs='+', metavar='int',
help='The sequence numbers of the virtual disks for which '
'the specified policy should be set. Space as delimiter.')
args = parser.get_args()
si = service_instance.connect(args)
vd_number = args.virtual_disk_number
policy_name = args.storage_policy_name
pbm_content = pbm_connect(si._stub, args.disable_ssl_verification)
pm = pbm_content.profileManager
storage_profile = search_storage_profile_by_name(pm, policy_name)
if not storage_profile:
raise SystemExit('Unable to find storage profile with name '
'{}{}{}.'.format(BColors.FAIL, policy_name, BColors.ENDC))
vm_list = search_vm_by_name(si, args.vm_name, args.strict)
for vm in vm_list:
pm_object_type = pbm.ServerObjectRef.ObjectType("virtualMachine")
pm_ref = pbm.ServerObjectRef(key=vm._moId, objectType=pm_object_type)
print('\r\nVirtual machine name: {}{}{}'.format(BColors.OKGREEN,
vm.name,
BColors.ENDC))
# The implementation of idempotency for the operation of the storage
# policy assignment for VM Home
if args.set_vm_home:
if not check_storage_profile_associated(pm, pm_ref, policy_name):
print('Set VM Home policy: '
'{}{}{}'.format(BColors.OKGREEN,
policy_name,
BColors.ENDC))
try:
set_vm_storage_profile(vm, storage_profile)
except Exception as exc:
print('VM reconfiguration task error: '
'{}{}{}'.format(BColors.FAIL,
exc,
BColors.ENDC))
else:
print('Set VM Home policy: Nothing to do')
if vd_number:
for device in vm.config.hardware.device:
device_type = type(device).__name__
if device_type == "vim.vm.device.VirtualDisk" and \
re.search('Hard disk (.+)',
device.deviceInfo.label).group(1) in vd_number:
pm_object_type = \
pbm.ServerObjectRef.ObjectType("virtualDiskId")
pm_ref = pbm.ServerObjectRef(
key="{}:{}".format(vm._moId, device.key), objectType=pm_object_type)
# The implementation of idempotency for the operation
# of the storage policy assignment for virtual disk
if not check_storage_profile_associated(pm, pm_ref, policy_name):
print('Set {} policy: '
'{}{}{}'.format(device.deviceInfo.label,
BColors.OKGREEN,
policy_name,
BColors.ENDC))
try:
set_virtual_disk_storage_profile(vm, device, storage_profile)
except Exception as exc:
print('Virtual disk reconfiguration task error: '
'{}{}{}'.format(BColors.FAIL,
exc,
BColors.ENDC))
else:
print('Set {} policy: Nothing to do'.format(
device.deviceInfo.label))
if __name__ == "__main__":
main()
| |
"""
Spatial diagnostics module
"""
__author__ = "Luc Anselin luc.anselin@asu.edu, Daniel Arribas-Bel darribas@asu.edu"
from utils import spdot
from scipy.stats.stats import chisqprob
from scipy.stats import norm
import numpy as np
import numpy.linalg as la
__all__ = ['LMtests', 'MoranRes', 'AKtest']
class LMtests:
"""
Lagrange Multiplier tests. Implemented as presented in Anselin et al.
(1996) [1]_
...
Attributes
----------
ols : OLS
OLS regression object
w : W
Spatial weights instance
tests : list
Lists of strings with the tests desired to be performed.
Values may be:
* 'all': runs all the options (default)
* 'lme': LM error test
* 'rlme': Robust LM error test
* 'lml' : LM lag test
* 'rlml': Robust LM lag test
Parameters
----------
lme : tuple
(Only if 'lme' or 'all' was in tests). Pair of statistic and
p-value for the LM error test.
lml : tuple
(Only if 'lml' or 'all' was in tests). Pair of statistic and
p-value for the LM lag test.
rlme : tuple
(Only if 'rlme' or 'all' was in tests). Pair of statistic
and p-value for the Robust LM error test.
rlml : tuple
(Only if 'rlml' or 'all' was in tests). Pair of statistic
and p-value for the Robust LM lag test.
sarma : tuple
(Only if 'rlml' or 'all' was in tests). Pair of statistic
and p-value for the SARMA test.
References
----------
.. [1] Anselin, L., Bera, A. K., Florax, R., Yoon, M. J. (1996) "Simple
diagnostic tests for spatial dependence". Regional Science and Urban
Economics, 26, 77-104.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> from ols import OLS
Open the csv file to access the data for analysis
>>> csv = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
Pull out from the csv the files we need ('HOVAL' as dependent as well as
'INC' and 'CRIME' as independent) and directly transform them into nx1 and
nx2 arrays, respectively
>>> y = np.array([csv.by_col('HOVAL')]).T
>>> x = np.array([csv.by_col('INC'), csv.by_col('CRIME')]).T
Create the weights object from existing .gal file
>>> w = pysal.open(pysal.examples.get_path('columbus.gal'), 'r').read()
Row-standardize the weight object (not required although desirable in some
cases)
>>> w.transform='r'
Run an OLS regression
>>> ols = OLS(y, x)
Run all the LM tests in the residuals. These diagnostics test for the
presence of remaining spatial autocorrelation in the residuals of an OLS
model and give indication about the type of spatial model. There are five
types: presence of a spatial lag model (simple and robust version),
presence of a spatial error model (simple and robust version) and joint presence
of both a spatial lag as well as a spatial error model.
>>> lms = pysal.spreg.diagnostics_sp.LMtests(ols, w)
LM error test:
>>> print round(lms.lme[0],4), round(lms.lme[1],4)
3.0971 0.0784
LM lag test:
>>> print round(lms.lml[0],4), round(lms.lml[1],4)
0.9816 0.3218
Robust LM error test:
>>> print round(lms.rlme[0],4), round(lms.rlme[1],4)
3.2092 0.0732
Robust LM lag test:
>>> print round(lms.rlml[0],4), round(lms.rlml[1],4)
1.0936 0.2957
LM SARMA test:
>>> print round(lms.sarma[0],4), round(lms.sarma[1],4)
4.1907 0.123
"""
def __init__(self, ols, w, tests=['all']):
cache = spDcache(ols, w)
if tests == ['all']:
tests = ['lme', 'lml', 'rlme', 'rlml', 'sarma']
if 'lme' in tests:
self.lme = lmErr(ols, w, cache)
if 'lml' in tests:
self.lml = lmLag(ols, w, cache)
if 'rlme' in tests:
self.rlme = rlmErr(ols, w, cache)
if 'rlml' in tests:
self.rlml = rlmLag(ols, w, cache)
if 'sarma' in tests:
self.sarma = lmSarma(ols, w, cache)
class MoranRes:
"""
Moran's I for spatial autocorrelation in residuals from OLS regression
...
Parameters
----------
ols : OLS
OLS regression object
w : W
Spatial weights instance
z : boolean
If set to True computes attributes eI, vI and zI. Due to computational burden of vI, defaults to False.
Attributes
----------
I : float
Moran's I statistic
eI : float
Moran's I expectation
vI : float
Moran's I variance
zI : float
Moran's I standardized value
Examples
--------
>>> import numpy as np
>>> import pysal
>>> from ols import OLS
Open the csv file to access the data for analysis
>>> csv = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
Pull out from the csv the files we need ('HOVAL' as dependent as well as
'INC' and 'CRIME' as independent) and directly transform them into nx1 and
nx2 arrays, respectively
>>> y = np.array([csv.by_col('HOVAL')]).T
>>> x = np.array([csv.by_col('INC'), csv.by_col('CRIME')]).T
Create the weights object from existing .gal file
>>> w = pysal.open(pysal.examples.get_path('columbus.gal'), 'r').read()
Row-standardize the weight object (not required although desirable in some
cases)
>>> w.transform='r'
Run an OLS regression
>>> ols = OLS(y, x)
Run Moran's I test for residual spatial autocorrelation in an OLS model.
This computes the traditional statistic applying a correction in the
expectation and variance to account for the fact it comes from residuals
instead of an independent variable
>>> m = pysal.spreg.diagnostics_sp.MoranRes(ols, w, z=True)
Value of the Moran's I statistic:
>>> print round(m.I,4)
0.1713
Value of the Moran's I expectation:
>>> print round(m.eI,4)
-0.0345
Value of the Moran's I variance:
>>> print round(m.vI,4)
0.0081
Value of the Moran's I standardized value. This is
distributed as a standard Normal(0, 1)
>>> print round(m.zI,4)
2.2827
P-value of the standardized Moran's I value (z):
>>> print round(m.p_norm,4)
0.0224
"""
def __init__(self, ols, w, z=False):
cache = spDcache(ols, w)
self.I = get_mI(ols, w, cache)
if z:
self.eI = get_eI(ols, w, cache)
self.vI = get_vI(ols, w, self.eI, cache)
self.zI, self.p_norm = get_zI(self.I, self.eI, self.vI)
class AKtest:
"""
Moran's I test of spatial autocorrelation for IV estimation.
Implemented following the original reference Anselin and Kelejian
(1997) [AK97]_
...
Parameters
----------
iv : TSLS
Regression object from TSLS class
w : W
Spatial weights instance
case : string
Flag for special cases (default to 'nosp'):
* 'nosp': Only NO spatial end. reg.
* 'gen': General case (spatial lag + end. reg.)
Attributes
----------
mi : float
Moran's I statistic for IV residuals
ak : float
Square of corrected Moran's I for residuals::
.. math::
ak = \dfrac{N \times I^*}{\phi^2}
Note: if case='nosp' then it simplifies to the LMerror
p : float
P-value of the test
References
----------
.. [AK97] Anselin, L., Kelejian, H. (1997) "Testing for spatial error
autocorrelation in the presence of endogenous regressors".
Interregional Regional Science Review, 20, 1.
.. [2] Kelejian, H.H., Prucha, I.R. and Yuzefovich, Y. (2004)
"Instrumental variable estimation of a spatial autorgressive model with
autoregressive disturbances: large and small sample results".
Advances in Econometrics, 18, 163-198.
Examples
--------
We first need to import the needed modules. Numpy is needed to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis. The TSLS is required to run the model on
which we will perform the tests.
>>> import numpy as np
>>> import pysal
>>> from twosls import TSLS
>>> from twosls_sp import GM_Lag
Open data on Columbus neighborhood crime (49 areas) using pysal.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),'r')
Before being able to apply the diagnostics, we have to run a model and,
for that, we need the input variables. Extract the CRIME column (crime
rates) from the DBF file and make it the dependent variable for the
regression. Note that PySAL requires this to be an numpy array of shape
(n, 1) as opposed to the also common shape of (n, ) that other packages
accept.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Extract INC (income) vector from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this model adds a vector of ones to the
independent variables passed in, but this can be overridden by passing
constant=False.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
In this case, we consider HOVAL (home value) as an endogenous regressor,
so we acknowledge that by reading it in a different category.
>>> yd = []
>>> yd.append(db.by_col("HOVAL"))
>>> yd = np.array(yd).T
In order to properly account for the endogeneity, we have to pass in the
instruments. Let us consider DISCBD (distance to the CBD) is a good one:
>>> q = []
>>> q.append(db.by_col("DISCBD"))
>>> q = np.array(q).T
Now we are good to run the model. It is an easy one line task.
>>> reg = TSLS(y, X, yd, q=q)
Now we are concerned with whether our non-spatial model presents spatial
autocorrelation in the residuals. To assess this possibility, we can run
the Anselin-Kelejian test, which is a version of the classical LM error
test adapted for the case of residuals from an instrumental variables (IV)
regression. First we need an extra object, the weights matrix, which
includes the spatial configuration of the observations
into the error component of the model. To do that, we can open an already
existing gal file or create a new one. In this case, we will create one
from ``columbus.shp``.
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, this allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
We are good to run the test. It is a very simple task:
>>> ak = AKtest(reg, w)
And explore the information obtained:
>>> print('AK test: %f\tP-value: %f'%(ak.ak, ak.p))
AK test: 4.642895 P-value: 0.031182
The test also accomodates the case when the residuals come from an IV
regression that includes a spatial lag of the dependent variable. The only
requirement needed is to modify the ``case`` parameter when we call
``AKtest``. First, let us run a spatial lag model:
>>> reg_lag = GM_Lag(y, X, yd, q=q, w=w)
And now we can run the AK test and obtain similar information as in the
non-spatial model.
>>> ak_sp = AKtest(reg, w, case='gen')
>>> print('AK test: %f\tP-value: %f'%(ak_sp.ak, ak_sp.p))
AK test: 1.157593 P-value: 0.281965
"""
def __init__(self, iv, w, case='nosp'):
if case == 'gen':
cache = spDcache(iv, w)
self.mi, self.ak, self.p = akTest(iv, w, cache)
elif case == 'nosp':
cache = spDcache(iv, w)
self.mi = get_mI(iv, w, cache)
self.ak, self.p = lmErr(iv, w, cache)
else:
print """\n
Fix the optional argument 'case' to match the requirements:
* 'gen': General case (spatial lag + end. reg.)
* 'nosp': No spatial end. reg.
\n"""
class spDcache:
"""
Helper class to compute reusable pieces in the spatial diagnostics module
...
Parameters
----------
reg : OLS_dev, TSLS_dev, STSLS_dev
Instance from a regression class
w : W
Spatial weights instance
Attributes
----------
j : array
1x1 array with the result from:
.. math::
J = \dfrac{1}{[(WX\beta)' M (WX\beta) + T \sigma^2]}
wu : array
nx1 array with spatial lag of the residuals
utwuDs : array
1x1 array with the result from:
.. math::
utwuDs = \dfrac{u' W u}{\tilde{\sigma^2}}
utwyDs : array
1x1 array with the result from:
.. math::
utwyDs = \dfrac{u' W y}{\tilde{\sigma^2}}
t : array
1x1 array with the result from :
.. math::
T = tr[(W' + W) W]
trA : float
Trace of A as in Cliff & Ord (1981)
"""
def __init__(self, reg, w):
self.reg = reg
self.w = w
self._cache = {}
@property
def j(self):
if 'j' not in self._cache:
wxb = self.w.sparse * self.reg.predy
wxb2 = np.dot(wxb.T, wxb)
xwxb = spdot(self.reg.x.T, wxb)
num1 = wxb2 - np.dot(xwxb.T, np.dot(self.reg.xtxi, xwxb))
num = num1 + (self.t * self.reg.sig2n)
den = self.reg.n * self.reg.sig2n
self._cache['j'] = num / den
return self._cache['j']
@property
def wu(self):
if 'wu' not in self._cache:
self._cache['wu'] = self.w.sparse * self.reg.u
return self._cache['wu']
@property
def utwuDs(self):
if 'utwuDs' not in self._cache:
res = np.dot(self.reg.u.T, self.wu) / self.reg.sig2n
self._cache['utwuDs'] = res
return self._cache['utwuDs']
@property
def utwyDs(self):
if 'utwyDs' not in self._cache:
res = np.dot(self.reg.u.T, self.w.sparse * self.reg.y)
self._cache['utwyDs'] = res / self.reg.sig2n
return self._cache['utwyDs']
@property
def t(self):
if 't' not in self._cache:
prod = (self.w.sparse.T + self.w.sparse) * self.w.sparse
self._cache['t'] = np.sum(prod.diagonal())
return self._cache['t']
@property
def trA(self):
if 'trA' not in self._cache:
xtwx = spdot(self.reg.x.T, spdot(self.w.sparse, self.reg.x))
mw = np.dot(self.reg.xtxi, xtwx)
self._cache['trA'] = np.sum(mw.diagonal())
return self._cache['trA']
@property
def AB(self):
"""
Computes A and B matrices as in Cliff-Ord 1981, p. 203
"""
if 'AB' not in self._cache:
U = (self.w.sparse + self.w.sparse.T) / 2.
z = spdot(U, self.reg.x, array_out=False)
c1 = spdot(self.reg.x.T, z, array_out=False)
c2 = spdot(z.T, z, array_out=False)
G = self.reg.xtxi
A = spdot(G, c1)
B = spdot(G, c2)
self._cache['AB'] = [A, B]
return self._cache['AB']
def lmErr(reg, w, spDcache):
"""
LM error test. Implemented as presented in eq. (9) of Anselin et al.
(1996) [1]_
...
Attributes
----------
reg : OLS_dev, TSLS_dev, STSLS_dev
Instance from a regression class
w : W
Spatial weights instance
spDcache : spDcache
Instance of spDcache class
Returns
-------
lme : tuple
Pair of statistic and p-value for the LM error test.
References
----------
.. _ Anselin, L., Bera, A. K., Florax, R., Yoon, M. J. (1996) "Simple
diagnostic tests for spatial dependence". Regional Science and Urban
Economics, 26, 77-104.
"""
lm = spDcache.utwuDs ** 2 / spDcache.t
pval = chisqprob(lm, 1)
return (lm[0][0], pval[0][0])
def lmLag(ols, w, spDcache):
"""
LM lag test. Implemented as presented in eq. (13) of Anselin et al.
(1996) [1]_
...
Attributes
----------
ols : OLS_dev
Instance from an OLS_dev regression
w : W
Spatial weights instance
spDcache : spDcache
Instance of spDcache class
Returns
-------
lml : tuple
Pair of statistic and p-value for the LM lag test.
References
----------
.. _ Anselin, L., Bera, A. K., Florax, R., Yoon, M. J. (1996) "Simple
diagnostic tests for spatial dependence". Regional Science and Urban
Economics, 26, 77-104.
"""
lm = spDcache.utwyDs ** 2 / (ols.n * spDcache.j)
pval = chisqprob(lm, 1)
return (lm[0][0], pval[0][0])
def rlmErr(ols, w, spDcache):
"""
Robust LM error test. Implemented as presented in eq. (8) of Anselin et al. (1996) [1]_
NOTE: eq. (8) has an errata, the power -1 in the denominator should be inside the square bracket.
...
Attributes
----------
ols : OLS_dev
Instance from an OLS_dev regression
w : W
Spatial weights instance
spDcache : spDcache
Instance of spDcache class
Returns
-------
rlme : tuple
Pair of statistic and p-value for the Robust LM error test.
References
----------
.. _ Anselin, L., Bera, A. K., Florax, R., Yoon, M. J. (1996) "Simple
diagnostic tests for spatial dependence". Regional Science and Urban
Economics, 26, 77-104.
"""
nj = ols.n * spDcache.j
num = (spDcache.utwuDs - (spDcache.t * spDcache.utwyDs) / nj) ** 2
den = spDcache.t * (1. - (spDcache.t / nj))
lm = num / den
pval = chisqprob(lm, 1)
return (lm[0][0], pval[0][0])
def rlmLag(ols, w, spDcache):
"""
Robust LM lag test. Implemented as presented in eq. (12) of Anselin et al.
(1996) [1]_
...
Attributes
----------
ols : OLS_dev
Instance from an OLS_dev regression
w : W
Spatial weights instance
spDcache : spDcache
Instance of spDcache class
Returns
-------
rlml : tuple
Pair of statistic and p-value for the Robust LM lag test.
References
----------
.. _ Anselin, L., Bera, A. K., Florax, R., Yoon, M. J. (1996) "Simple
diagnostic tests for spatial dependence". Regional Science and Urban
Economics, 26, 77-104.
"""
lm = (spDcache.utwyDs - spDcache.utwuDs) ** 2 / \
((ols.n * spDcache.j) - spDcache.t)
pval = chisqprob(lm, 1)
return (lm[0][0], pval[0][0])
def lmSarma(ols, w, spDcache):
"""
LM error test. Implemented as presented in eq. (15) of Anselin et al.
(1996) [1]_
...
Attributes
----------
ols : OLS_dev
Instance from an OLS_dev regression
w : W
Spatial weights instance
spDcache : spDcache
Instance of spDcache class
Returns
-------
sarma : tuple
Pair of statistic and p-value for the LM sarma test.
References
----------
.. _ Anselin, L., Bera, A. K., Florax, R., Yoon, M. J. (1996) "Simple
diagnostic tests for spatial dependence". Regional Science and Urban
Economics, 26, 77-104.
"""
first = (spDcache.utwyDs - spDcache.utwuDs) ** 2 / \
(w.n * spDcache.j - spDcache.t)
secnd = spDcache.utwuDs ** 2 / spDcache.t
lm = first + secnd
pval = chisqprob(lm, 2)
return (lm[0][0], pval[0][0])
def get_mI(reg, w, spDcache):
"""
Moran's I statistic of spatial autocorrelation as showed in Cliff & Ord
(1981) [CO81]_, p. 201-203
...
Attributes
----------
reg : OLS_dev, TSLS_dev, STSLS_dev
Instance from a regression class
w : W
Spatial weights instance
spDcache : spDcache
Instance of spDcache class
Returns
-------
moran : float
Statistic Moran's I test.
References
----------
.. [CO81] Cliff, AD., Ord, JK. (1981) "Spatial processes: models & applications".
Pion London
"""
mi = (w.n * np.dot(reg.u.T, spDcache.wu)) / (w.s0 * reg.utu)
return mi[0][0]
def get_vI(ols, w, ei, spDcache):
"""
Moran's I variance coded as in Cliff & Ord 1981 (p. 201-203) and R's spdep
"""
A = spDcache.AB[0]
trA2 = np.dot(A, A)
trA2 = np.sum(trA2.diagonal())
B = spDcache.AB[1]
trB = np.sum(B.diagonal()) * 4.
vi = (w.n ** 2 / (w.s0 ** 2 * (w.n - ols.k) * (w.n - ols.k + 2.))) * \
(w.s1 + 2. * trA2 - trB -
((2. * (spDcache.trA ** 2)) / (w.n - ols.k)))
return vi
def get_eI(ols, w, spDcache):
"""
Moran's I expectation using matrix M
"""
return - (w.n * spDcache.trA) / (w.s0 * (w.n - ols.k))
def get_zI(I, ei, vi):
"""
Standardized I
Returns two-sided p-values as provided in the GeoDa family
"""
z = abs((I - ei) / np.sqrt(vi))
pval = norm.sf(z) * 2.
return (z, pval)
def akTest(iv, w, spDcache):
"""
Computes AK-test for the general case (end. reg. + sp. lag)
...
Parameters
----------
iv : STSLS_dev
Instance from spatial 2SLS regression
w : W
Spatial weights instance
spDcache : spDcache
Instance of spDcache class
Attributes
----------
mi : float
Moran's I statistic for IV residuals
ak : float
Square of corrected Moran's I for residuals::
.. math::
ak = \dfrac{N \times I^*}{\phi^2}
p : float
P-value of the test
ToDo:
* Code in as Nancy
* Compare both
"""
mi = get_mI(iv, w, spDcache)
# Phi2
etwz = spdot(iv.u.T, spdot(w.sparse, iv.z))
a = np.dot(etwz, np.dot(iv.varb, etwz.T))
s12 = (w.s0 / w.n) ** 2
phi2 = (spDcache.t + (4.0 / iv.sig2n) * a) / (s12 * w.n)
ak = w.n * mi ** 2 / phi2
pval = chisqprob(ak, 1)
return (mi, ak[0][0], pval[0][0])
def _test():
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
| |
import io
import os
import sys
import pickle
import subprocess
import unittest
from unittest.case import _Outcome
from unittest.test.support import (LoggingResult,
ResultWithNoStartTestRunStopTestRun)
class TestCleanUp(unittest.TestCase):
def testCleanUp(self):
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
test = TestableTest('testNothing')
self.assertEqual(test._cleanups, [])
cleanups = []
def cleanup1(*args, **kwargs):
cleanups.append((1, args, kwargs))
def cleanup2(*args, **kwargs):
cleanups.append((2, args, kwargs))
test.addCleanup(cleanup1, 1, 2, 3, four='hello', five='goodbye')
test.addCleanup(cleanup2)
self.assertEqual(test._cleanups,
[(cleanup1, (1, 2, 3), dict(four='hello', five='goodbye')),
(cleanup2, (), {})])
self.assertTrue(test.doCleanups())
self.assertEqual(cleanups, [(2, (), {}), (1, (1, 2, 3), dict(four='hello', five='goodbye'))])
def testCleanUpWithErrors(self):
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
test = TestableTest('testNothing')
outcome = test._outcome = _Outcome()
exc1 = Exception('foo')
exc2 = Exception('bar')
def cleanup1():
raise exc1
def cleanup2():
raise exc2
test.addCleanup(cleanup1)
test.addCleanup(cleanup2)
self.assertFalse(test.doCleanups())
self.assertFalse(outcome.success)
((_, (Type1, instance1, _)),
(_, (Type2, instance2, _))) = reversed(outcome.errors)
self.assertEqual((Type1, instance1), (Exception, exc1))
self.assertEqual((Type2, instance2), (Exception, exc2))
def testCleanupInRun(self):
blowUp = False
ordering = []
class TestableTest(unittest.TestCase):
def setUp(self):
ordering.append('setUp')
if blowUp:
raise Exception('foo')
def testNothing(self):
ordering.append('test')
def tearDown(self):
ordering.append('tearDown')
test = TestableTest('testNothing')
def cleanup1():
ordering.append('cleanup1')
def cleanup2():
ordering.append('cleanup2')
test.addCleanup(cleanup1)
test.addCleanup(cleanup2)
def success(some_test):
self.assertEqual(some_test, test)
ordering.append('success')
result = unittest.TestResult()
result.addSuccess = success
test.run(result)
self.assertEqual(ordering, ['setUp', 'test', 'tearDown',
'cleanup2', 'cleanup1', 'success'])
blowUp = True
ordering = []
test = TestableTest('testNothing')
test.addCleanup(cleanup1)
test.run(result)
self.assertEqual(ordering, ['setUp', 'cleanup1'])
def testTestCaseDebugExecutesCleanups(self):
ordering = []
class TestableTest(unittest.TestCase):
def setUp(self):
ordering.append('setUp')
self.addCleanup(cleanup1)
def testNothing(self):
ordering.append('test')
def tearDown(self):
ordering.append('tearDown')
test = TestableTest('testNothing')
def cleanup1():
ordering.append('cleanup1')
test.addCleanup(cleanup2)
def cleanup2():
ordering.append('cleanup2')
test.debug()
self.assertEqual(ordering, ['setUp', 'test', 'tearDown', 'cleanup1', 'cleanup2'])
class Test_TextTestRunner(unittest.TestCase):
"""Tests for TextTestRunner."""
def setUp(self):
# clean the environment from pre-existing PYTHONWARNINGS to make
# test_warnings results consistent
self.pythonwarnings = os.environ.get('PYTHONWARNINGS')
if self.pythonwarnings:
del os.environ['PYTHONWARNINGS']
def tearDown(self):
# bring back pre-existing PYTHONWARNINGS if present
if self.pythonwarnings:
os.environ['PYTHONWARNINGS'] = self.pythonwarnings
def test_init(self):
runner = unittest.TextTestRunner()
self.assertFalse(runner.failfast)
self.assertFalse(runner.buffer)
self.assertEqual(runner.verbosity, 1)
self.assertEqual(runner.warnings, None)
self.assertTrue(runner.descriptions)
self.assertEqual(runner.resultclass, unittest.TextTestResult)
self.assertFalse(runner.tb_locals)
def test_multiple_inheritance(self):
class AResult(unittest.TestResult):
def __init__(self, stream, descriptions, verbosity):
super(AResult, self).__init__(stream, descriptions, verbosity)
class ATextResult(unittest.TextTestResult, AResult):
pass
# This used to raise an exception due to TextTestResult not passing
# on arguments in its __init__ super call
ATextResult(None, None, 1)
def testBufferAndFailfast(self):
class Test(unittest.TestCase):
def testFoo(self):
pass
result = unittest.TestResult()
runner = unittest.TextTestRunner(stream=io.StringIO(), failfast=True,
buffer=True)
# Use our result object
runner._makeResult = lambda: result
runner.run(Test('testFoo'))
self.assertTrue(result.failfast)
self.assertTrue(result.buffer)
def test_locals(self):
runner = unittest.TextTestRunner(stream=io.StringIO(), tb_locals=True)
result = runner.run(unittest.TestSuite())
self.assertEqual(True, result.tb_locals)
def testRunnerRegistersResult(self):
class Test(unittest.TestCase):
def testFoo(self):
pass
originalRegisterResult = unittest.runner.registerResult
def cleanup():
unittest.runner.registerResult = originalRegisterResult
self.addCleanup(cleanup)
result = unittest.TestResult()
runner = unittest.TextTestRunner(stream=io.StringIO())
# Use our result object
runner._makeResult = lambda: result
self.wasRegistered = 0
def fakeRegisterResult(thisResult):
self.wasRegistered += 1
self.assertEqual(thisResult, result)
unittest.runner.registerResult = fakeRegisterResult
runner.run(unittest.TestSuite())
self.assertEqual(self.wasRegistered, 1)
def test_works_with_result_without_startTestRun_stopTestRun(self):
class OldTextResult(ResultWithNoStartTestRunStopTestRun):
separator2 = ''
def printErrors(self):
pass
class Runner(unittest.TextTestRunner):
def __init__(self):
super(Runner, self).__init__(io.StringIO())
def _makeResult(self):
return OldTextResult()
runner = Runner()
runner.run(unittest.TestSuite())
def test_startTestRun_stopTestRun_called(self):
class LoggingTextResult(LoggingResult):
separator2 = ''
def printErrors(self):
pass
class LoggingRunner(unittest.TextTestRunner):
def __init__(self, events):
super(LoggingRunner, self).__init__(io.StringIO())
self._events = events
def _makeResult(self):
return LoggingTextResult(self._events)
events = []
runner = LoggingRunner(events)
runner.run(unittest.TestSuite())
expected = ['startTestRun', 'stopTestRun']
self.assertEqual(events, expected)
def test_pickle_unpickle(self):
# Issue #7197: a TextTestRunner should be (un)pickleable. This is
# required by test_multiprocessing under Windows (in verbose mode).
stream = io.StringIO("foo")
runner = unittest.TextTestRunner(stream)
for protocol in range(2, pickle.HIGHEST_PROTOCOL + 1):
s = pickle.dumps(runner, protocol)
obj = pickle.loads(s)
# StringIO objects never compare equal, a cheap test instead.
self.assertEqual(obj.stream.getvalue(), stream.getvalue())
def test_resultclass(self):
def MockResultClass(*args):
return args
STREAM = object()
DESCRIPTIONS = object()
VERBOSITY = object()
runner = unittest.TextTestRunner(STREAM, DESCRIPTIONS, VERBOSITY,
resultclass=MockResultClass)
self.assertEqual(runner.resultclass, MockResultClass)
expectedresult = (runner.stream, DESCRIPTIONS, VERBOSITY)
self.assertEqual(runner._makeResult(), expectedresult)
def test_warnings(self):
"""
Check that warnings argument of TextTestRunner correctly affects the
behavior of the warnings.
"""
# see #10535 and the _test_warnings file for more information
def get_parse_out_err(p):
return [b.splitlines() for b in p.communicate()]
opts = dict(stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=os.path.dirname(__file__))
ae_msg = b'Please use assertEqual instead.'
at_msg = b'Please use assertTrue instead.'
# no args -> all the warnings are printed, unittest warnings only once
p = subprocess.Popen([sys.executable, '_test_warnings.py'], **opts)
with p:
out, err = get_parse_out_err(p)
self.assertIn(b'OK', err)
# check that the total number of warnings in the output is correct
self.assertEqual(len(out), 12)
# check that the numbers of the different kind of warnings is correct
for msg in [b'dw', b'iw', b'uw']:
self.assertEqual(out.count(msg), 3)
for msg in [ae_msg, at_msg, b'rw']:
self.assertEqual(out.count(msg), 1)
args_list = (
# passing 'ignore' as warnings arg -> no warnings
[sys.executable, '_test_warnings.py', 'ignore'],
# -W doesn't affect the result if the arg is passed
[sys.executable, '-Wa', '_test_warnings.py', 'ignore'],
# -W affects the result if the arg is not passed
[sys.executable, '-Wi', '_test_warnings.py']
)
# in all these cases no warnings are printed
for args in args_list:
p = subprocess.Popen(args, **opts)
with p:
out, err = get_parse_out_err(p)
self.assertIn(b'OK', err)
self.assertEqual(len(out), 0)
# passing 'always' as warnings arg -> all the warnings printed,
# unittest warnings only once
p = subprocess.Popen([sys.executable, '_test_warnings.py', 'always'],
**opts)
with p:
out, err = get_parse_out_err(p)
self.assertIn(b'OK', err)
self.assertEqual(len(out), 14)
for msg in [b'dw', b'iw', b'uw', b'rw']:
self.assertEqual(out.count(msg), 3)
for msg in [ae_msg, at_msg]:
self.assertEqual(out.count(msg), 1)
def testStdErrLookedUpAtInstantiationTime(self):
# see issue 10786
old_stderr = sys.stderr
f = io.StringIO()
sys.stderr = f
try:
runner = unittest.TextTestRunner()
self.assertTrue(runner.stream.stream is f)
finally:
sys.stderr = old_stderr
def testSpecifiedStreamUsed(self):
# see issue 10786
f = io.StringIO()
runner = unittest.TextTestRunner(f)
self.assertTrue(runner.stream.stream is f)
if __name__ == "__main__":
unittest.main()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Autor: xielequan
import config
import logging
from rest_adapter import Rest_Adapter
logger = logging.getLogger("ops.sshEngine")
from process_adapter import Process_Adapter
import os
import paramiko
import json
import inspect
from dao.urlmapping_mgmt import urlmapping_mgmt
try:
from lxml import etree
except ImportError:
import xml.etree.ElementTree as etree
from sshClient import SSHClient
##-------------------------------------------------------------------------------
##-- Class Name : Process_Rest_Api
##-- Date Created : 2015-3-24
##-- Author : x00302603
##-- Description : Class contains
##-- a. Methods to open and close ssh session
##-- b. Methods to handle REST api's (POST, PUT, DELETE GET)
##-- c. Methods to handle example string
##-- d. Methods to get the Status of ssh session
##-- e. Methods to get the file of sqlite db
##-- Caution :
##------------------------------------------------------------------------------
class Process_Rest_Api(Rest_Adapter):
createid = 0
instanceMap = { }
def __init__(self,ip,port=22,username="root123",password="Root@123", dbfilename=None):
self.ip = ip
self.port = port
self.username = username
self.password = password
self.createid = self.createid+1
self.cid = self.createid
self.status = True
self.errorinfo = ""
if (dbfilename == None):
self.dbfile = self.getUrlmappingClass()
else :
self.dbfile = dbfilename
self.sshEngine = SSHClient(ip, port, username, password)
try:
self.sshEngine.connect()
self.status = self.sshEngine.getStatus()
logger.debug('client session creation is successful ')
except Exception as e:
self.status = False
self.errorinfo = e
logger.error('client session creation is failed. error: %s ', self.errorinfo)
logger.info('create sshEngine %s, ip:%s, port:%s, username:%s '% (self.cid ,self.ip, self.port, self.username))
def getUrlmappingClass(self):
stack = inspect.stack()
stacklocal = stack[4][0]
return stacklocal.f_globals['dbfile']
def getInstance(self, filename):
mod = None
if (self.instanceMap.has_key(filename)):
mod = self.instanceMap.get(filename)
else:
mod = __import__(filename)
self.instanceMap[filename] = mod
logger.info (' renew process class %s' % (filename))
if (mod == None) :
return None
components = filename.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
classObj = mod.Rest_Process(self.sshEngine)
assert isinstance(classObj, Process_Adapter)
return classObj
def getFileName(self, schemapath):
import re
processUrlfileName = None
urlmappingClass = urlmapping_mgmt(None,self.dbfile)
templateMap = urlmappingClass.find_urlmappingbygroup("sshengine");
if (templateMap == None) :
return None
for elem in templateMap:
urlpath = elem.get("uriregular")
fileName = elem.get("modulename")
#fileName = self.templateMap[elem]
result = re.match(urlpath, schemapath)
if (result != None):
processUrlfileName = fileName
break
return processUrlfileName
def __exit__(self):
self.close()
logger.debug('close cid:%s, ip:%s, port:%s, username:%s '% (self.cid ,self.ip, self.port, self.username))
def getStatus(self):
logger.debug('getStatus() cid:%s '% (self.cid))
self.status = self.sshEngine.getStatus()
return self.status
def close(self):
self.sshEngine.disconnect
def reconnect(self):
self.sshEngine.reconnect()
return True
def getBodyType(self, dInputBody):
isIpBodyJsonType = False
isIpBodyXMLType = False
isIpBodyTextType = False
for k,v in dInputBody.items():
if k == config.ISDK_JSON_IP_BODY:
if v != None:
isIpBodyJsonType = True
elif k == config.ISDK_XML_IP_BODY:
if v != None:
isIpBodyXMLType = True
elif k == config.ISDK_TEXT_IP_BODY:
if v != None:
isIpBodyTextType = True
return isIpBodyJsonType,isIpBodyXMLType, isIpBodyTextType
def getBody(self, dInputBody):
isIpBodyJsonType, isIpBodyXMLType, isIpBodyTextType = self.getBodyType(dInputBody)
executetext = ''
bodytype = "text"
if isIpBodyJsonType == True:
bodytype="json"
executetext = dInputBody[config.ISDK_JSON_IP_BODY]
elif isIpBodyXMLType == True:
bodytype="xml"
executetext = dInputBody[config.ISDK_XML_IP_BODY]
elif isIpBodyTextType == True:
bodytype="text"
executetext = dInputBody[config.ISDK_TEXT_IP_BODY]
return bodytype,executetext
def rest_api_handle(self, method, dInputBody, schemapath):
logger.debug('post: cid:%d, XPATH:%s, body:%s'% (self.cid, schemapath, dInputBody))
lrestApiPath = schemapath.split('/')
lrestApiPath.pop(0)
strschemapath = '/'.join(lrestApiPath)
buildApiPath = lrestApiPath.pop(0)
if buildApiPath == None:
raise Exception('only accept URL path /apiname .')
if ((buildApiPath == "example") or (buildApiPath == "help")):
return self.get_example_str()
successReport = etree.Element('success')
errorReport = etree.Element('error')
bodytype, executetext = self.getBody(dInputBody)
instanceClass = None
processFileName = self.getFileName(strschemapath)
if (processFileName != None):
instanceClass = self.getInstance(processFileName)
# execute process from filename
err = ""
output = ""
if (instanceClass != None):
output, err = instanceClass.process(method, strschemapath, bodytype, executetext)
else :
#executetext = executetext.replace('\r\n', ';')
#executetext = executetext.replace('\n', ';')
#output, err = self.sshEngine.exec_cmd(executetext)
raise NotImplementedError
logger.debug('cid: %d run end'% (self.cid))
if ("" != err):
errorReport.text = err
return err
#return etree.tostring(errorReport, pretty_print=True)
successReport.text = output
return output
#return etree.tostring(successReport, pretty_print=True)
def post_rest_api_handle(self, dInputBody, schemapath):
return self.rest_api_handle("POST", dInputBody, schemapath)
def put_rest_api_handle(self, dInputBody, schemapath):
return self.rest_api_handle("PUT", dInputBody, schemapath)
def delete_rest_api_handle(self, dInputBody, schemapath):
return self.rest_api_handle("DELETE", dInputBody, schemapath)
def get_rest_api_handle(self, dInputBody, schemapath):
return self.rest_api_handle("GET", dInputBody, schemapath)
def set_main_device(self, ipaddress, port, username , password):
pass
def get_esn(self):
return ''
def set_multi_ipaddress(self, deviceList):
pass
def get_example_str(self):
examples = etree.Element('examples')
# example of tsu add
cfgElement = etree.SubElement(examples, 'example')
childCfgElement = etree.SubElement(cfgElement, 'URL')
childCfgElement.text = "/exec"
childCfgElement = etree.SubElement(cfgElement, 'Method')
childCfgElement.text = "POST|PUT"
childCfgElement = etree.SubElement(cfgElement, 'Body')
childCfgElement.text = 'ovs-vsctl show'
childCfgElement = etree.SubElement(cfgElement, 'Description')
childCfgElement.text = "execute ovs-vsctl show with username and password "
# example of Connect tsu
cfgElement = etree.SubElement(examples, 'example')
childCfgElement = etree.SubElement(cfgElement, 'URL')
childCfgElement.text = "/example"
childCfgElement = etree.SubElement(cfgElement, 'Method')
childCfgElement.text = "GET"
childCfgElement = etree.SubElement(cfgElement, 'Body')
childCfgElement.text = ''
childCfgElement = etree.SubElement(cfgElement, 'Description')
childCfgElement.text = "get a examples !!! "
return etree.tostring(examples, pretty_print=True)
if __name__ == '__main__':
server_ip = '10.110.139.189'
server_user = 'root'
server_passwd = 'passw0rd'
server_port = 22
DBPath=os.getcwdu()
from plugin.bootle_sqlite import SQLitePlugin
dbfile='%s%sOPS2.db' % (DBPath, os.sep)
sqlite_plugin=SQLitePlugin(dbfile)
classObj = Process_Rest_Api(server_ip, server_port, server_user, server_passwd, dbfile)
#schemapath = '6/TgTsuAdd?fixed_params=LAN-1002,10.137.222.217'
schemapath = '6/inventory/interfaces/interface?portID=tapf89b9d66-8e'
dInputBody ={
'text_input':"dir"
}
jsonObj = classObj.get_rest_api_handle(dInputBody, schemapath)
#jsonObj = classObj.get_rest_api_handle(dInputBody, schemapath)
# jsonObj = classObj._m.poweroff_machine()
print '\n POST Output JSON object:\n',jsonObj
classObj.__exit__()
| |
"""Starts a service to scan in intervals for new devices."""
from datetime import timedelta
import json
import logging
from netdisco.discovery import NetworkDiscovery
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import zeroconf
from homeassistant.const import EVENT_HOMEASSISTANT_STARTED
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import async_discover, async_load_platform
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.loader import async_get_zeroconf
import homeassistant.util.dt as dt_util
DOMAIN = "discovery"
SCAN_INTERVAL = timedelta(seconds=300)
SERVICE_APPLE_TV = "apple_tv"
SERVICE_DAIKIN = "daikin"
SERVICE_DLNA_DMR = "dlna_dmr"
SERVICE_ENIGMA2 = "enigma2"
SERVICE_HASS_IOS_APP = "hass_ios"
SERVICE_HASSIO = "hassio"
SERVICE_HEOS = "heos"
SERVICE_KONNECTED = "konnected"
SERVICE_MOBILE_APP = "hass_mobile_app"
SERVICE_NETGEAR = "netgear_router"
SERVICE_OCTOPRINT = "octoprint"
SERVICE_SABNZBD = "sabnzbd"
SERVICE_SAMSUNG_PRINTER = "samsung_printer"
SERVICE_TELLDUSLIVE = "tellstick"
SERVICE_YEELIGHT = "yeelight"
SERVICE_WEMO = "belkin_wemo"
SERVICE_WINK = "wink"
SERVICE_XIAOMI_GW = "xiaomi_gw"
# These have custom protocols
CONFIG_ENTRY_HANDLERS = {
SERVICE_TELLDUSLIVE: "tellduslive",
"logitech_mediaserver": "squeezebox",
}
# These have no config flows
SERVICE_HANDLERS = {
SERVICE_NETGEAR: ("device_tracker", None),
SERVICE_ENIGMA2: ("media_player", "enigma2"),
SERVICE_SABNZBD: ("sabnzbd", None),
"yamaha": ("media_player", "yamaha"),
"frontier_silicon": ("media_player", "frontier_silicon"),
"openhome": ("media_player", "openhome"),
"bose_soundtouch": ("media_player", "soundtouch"),
"bluesound": ("media_player", "bluesound"),
"lg_smart_device": ("media_player", "lg_soundbar"),
}
OPTIONAL_SERVICE_HANDLERS = {SERVICE_DLNA_DMR: ("media_player", "dlna_dmr")}
MIGRATED_SERVICE_HANDLERS = [
SERVICE_APPLE_TV,
"axis",
"deconz",
SERVICE_DAIKIN,
"denonavr",
"esphome",
"google_cast",
SERVICE_HASS_IOS_APP,
SERVICE_HASSIO,
SERVICE_HEOS,
"harmony",
"homekit",
"ikea_tradfri",
"kodi",
SERVICE_KONNECTED,
SERVICE_MOBILE_APP,
SERVICE_OCTOPRINT,
"philips_hue",
SERVICE_SAMSUNG_PRINTER,
"sonos",
"songpal",
SERVICE_WEMO,
SERVICE_WINK,
SERVICE_XIAOMI_GW,
"volumio",
SERVICE_YEELIGHT,
"nanoleaf_aurora",
]
DEFAULT_ENABLED = (
list(CONFIG_ENTRY_HANDLERS) + list(SERVICE_HANDLERS) + MIGRATED_SERVICE_HANDLERS
)
DEFAULT_DISABLED = list(OPTIONAL_SERVICE_HANDLERS) + MIGRATED_SERVICE_HANDLERS
CONF_IGNORE = "ignore"
CONF_ENABLE = "enable"
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(DOMAIN): vol.Schema(
{
vol.Optional(CONF_IGNORE, default=[]): vol.All(
cv.ensure_list, [vol.In(DEFAULT_ENABLED)]
),
vol.Optional(CONF_ENABLE, default=[]): vol.All(
cv.ensure_list, [vol.In(DEFAULT_DISABLED + DEFAULT_ENABLED)]
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Start a discovery service."""
logger = logging.getLogger(__name__)
netdisco = NetworkDiscovery()
already_discovered = set()
if DOMAIN in config:
# Platforms ignore by config
ignored_platforms = config[DOMAIN][CONF_IGNORE]
# Optional platforms enabled by config
enabled_platforms = config[DOMAIN][CONF_ENABLE]
else:
ignored_platforms = []
enabled_platforms = []
for platform in enabled_platforms:
if platform in DEFAULT_ENABLED:
logger.warning(
"Please remove %s from your discovery.enable configuration "
"as it is now enabled by default",
platform,
)
zeroconf_instance = await zeroconf.async_get_instance(hass)
# Do not scan for types that have already been converted
# as it will generate excess network traffic for questions
# the zeroconf instance already knows the answers
zeroconf_types = list(await async_get_zeroconf(hass))
async def new_service_found(service, info):
"""Handle a new service if one is found."""
if service in MIGRATED_SERVICE_HANDLERS:
return
if service in ignored_platforms:
logger.info("Ignoring service: %s %s", service, info)
return
discovery_hash = json.dumps([service, info], sort_keys=True)
if discovery_hash in already_discovered:
logger.debug("Already discovered service %s %s.", service, info)
return
already_discovered.add(discovery_hash)
if service in CONFIG_ENTRY_HANDLERS:
await hass.config_entries.flow.async_init(
CONFIG_ENTRY_HANDLERS[service],
context={"source": config_entries.SOURCE_DISCOVERY},
data=info,
)
return
comp_plat = SERVICE_HANDLERS.get(service)
if not comp_plat and service in enabled_platforms:
comp_plat = OPTIONAL_SERVICE_HANDLERS[service]
# We do not know how to handle this service.
if not comp_plat:
logger.debug("Unknown service discovered: %s %s", service, info)
return
logger.info("Found new service: %s %s", service, info)
component, platform = comp_plat
if platform is None:
await async_discover(hass, service, info, component, config)
else:
await async_load_platform(hass, component, platform, info, config)
async def scan_devices(now):
"""Scan for devices."""
try:
results = await hass.async_add_executor_job(
_discover, netdisco, zeroconf_instance, zeroconf_types
)
for result in results:
hass.async_create_task(new_service_found(*result))
except OSError:
logger.error("Network is unreachable")
async_track_point_in_utc_time(
hass, scan_devices, dt_util.utcnow() + SCAN_INTERVAL
)
@callback
def schedule_first(event):
"""Schedule the first discovery when Home Assistant starts up."""
async_track_point_in_utc_time(hass, scan_devices, dt_util.utcnow())
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STARTED, schedule_first)
return True
def _discover(netdisco, zeroconf_instance, zeroconf_types):
"""Discover devices."""
results = []
try:
netdisco.scan(
zeroconf_instance=zeroconf_instance, suppress_mdns_types=zeroconf_types
)
for disc in netdisco.discover():
for service in netdisco.get_info(disc):
results.append((disc, service))
finally:
netdisco.stop()
return results
| |
# -*- coding: utf-8 -*-
from sqlalchemy.ext.declarative import declared_attr
from coaster.utils import LabeledEnum
from baseframe import __
from ..registry import OrderedDict
from . import db, BaseMixin, BaseScopedNameMixin
from .user import User, UserEmail, UserPhone
from .client import Client
__all__ = ['SMSMessage', 'SMS_STATUS']
# --- Flags -------------------------------------------------------------------
class SMS_STATUS(LabeledEnum):
QUEUED = (0, __(u"Queued"))
PENDING = (1, __(u"Pending"))
DELIVERED = (2, __(u"Delivered"))
FAILED = (3, __(u"Failed"))
UNKNOWN = (4, __(u"Unknown"))
class NOTIFICATION_FLAGS(LabeledEnum):
DELIVERY = (0, __(u"Delivery"))
READ = (1, __(u"Read"))
BOUNCE = (2, __(u"Bounce"))
class NOTIFICATION_TYPE(LabeledEnum):
MANDATORY = (0, u'mandatory', __(u"Mandatory")) # Mandatory service announcement
TRANSACTIONAL = (1, u'transactional', __(u"Transactional")) # Result of user activity
ALERT = (2, u'alert', __(u"Alert")) # Periodic alert based on set criteria
MASS = (3, u'mass', __(u"Mass")) # Mass mail from the service provider
# A note on frequency: scheduling/batching is done by Lastuser, not by the client app
class NOTIFICATION_FREQUENCY(LabeledEnum):
IMMEDIATE = (0, u'immed', __(u"Immediately")) # Alert user immediately
DELAYED = (1, u'delay', __(u"Delayed")) # Send after a timeout, allowing app to cancel (tentative)
DAILY = (2, u'daily', __(u"Batched daily")) # Send a daily digest
WEEKLY = (3, u'weekly', __(u"Batched weekly")) # Send a weekly digest
MONTHLY = (4, u'monthly', __(u"Batched monthly")) # Send a monthly digest
# --- Transport Channels ------------------------------------------------------
# Move these into a registry like the LoginRegistry
class Channel(object):
name = u''
title = u""
flags = []
delivery_flag = False
bounce_flag = False
read_flag = False
class ChannelBrowser(Channel):
name = u'browser'
title = __(u"In app")
flags = [NOTIFICATION_FLAGS.DELIVERY, NOTIFICATION_FLAGS.READ]
delivery_flag = True
bounce_flag = False
read_flag = True
class ChannelEmail(Channel):
name = u'email'
title = __(u"Email")
flags = [NOTIFICATION_FLAGS.BOUNCE, NOTIFICATION_FLAGS.READ]
delivery_flag = False
bounce_flag = True
read_flag = True
class ChannelTwitter(Channel):
name = u'twitter'
title = __(u"Twitter")
flags = [NOTIFICATION_FLAGS.DELIVERY, NOTIFICATION_FLAGS.BOUNCE]
delivery_flag = True
bounce_flag = True
read_flag = False
class ChannelSMS(Channel):
name = u'sms'
title = __(u"SMS")
flags = [NOTIFICATION_FLAGS.DELIVERY, NOTIFICATION_FLAGS.BOUNCE]
delivery_flag = True
bounce_flag = True
read_flag = False
channel_registry = OrderedDict([(c.name, c.title) for c in [
ChannelBrowser, ChannelEmail, ChannelSMS, ChannelTwitter
]])
# --- Models ------------------------------------------------------------------
class SMSMessage(BaseMixin, db.Model):
__tablename__ = 'smsmessage'
__bind_key__ = 'lastuser'
# Phone number that the message was sent to
phone_number = db.Column(db.String(15), nullable=False)
transaction_id = db.Column(db.Unicode(40), unique=True, nullable=True)
# The message itself
message = db.Column(db.UnicodeText, nullable=False)
# Flags
status = db.Column(db.Integer, default=0, nullable=False)
status_at = db.Column(db.DateTime, nullable=True)
fail_reason = db.Column(db.Unicode(25), nullable=True)
# class ChannelMixin(object):
# @declared_attr
# def _channels(self):
# """
# Preferred channels for sending this notification class (in order of preference).
# Only listed channels are available for delivery of this notification.
# """
# return db.Column('channels', db.Unicode(250), default=u'', nullable=False)
# def _channels_get(self):
# return [c.strip() for c in self._channels.replace(u'\r', u' ').replace(u'\n', u' ').split(u' ') if c]
# def _channels_set(self, value):
# if isinstance(value, basestring):
# value = [value]
# self._channels = u' '.join([c.strip() for c in value if c])
# @declared_attr
# def channels(self):
# return db.synonym('_channels', descriptor=property(self._channels_get, self._channels_set))
# class NotificationClass(ChannelMixin, BaseScopedNameMixin, db.Model):
# """
# A NotificationClass is a type of notification
# """
# __tablename__ = 'notification_class'
# __bind_key__ = 'lastuser'
# #: Client app that will send these notifications
# client_id = db.Column(None, db.ForeignKey('client.id'), nullable=False)
# client = db.relationship(Client, backref=db.backref('notifications', cascade='all, delete-orphan'))
# parent = db.synonym('client')
# #: User-unique notification class. The name is now a random unique string that is saved in the app
# #: and is used to send these notifications
# user_id = db.Column(None, db.ForeignKey('user.id'), nullable=True)
# user = db.relationship(User, foreign_keys=[user_id],
# backref=db.backref('notifications', cascade='all, delete-orphan'))
# #: Type of notification (as per NOTIFICATION_TYPE), currently for informational purposes only
# type = db.Column(db.SmallInteger, nullable=False)
# #: Default delivery frequency
# freq = db.Column(db.SmallInteger, nullable=False, default=NOTIFICATION_FREQUENCY.IMMEDIATE)
# __table_args__ = (db.UniqueConstraint('client_id', 'name'),)
# class UserNotificationPreference(ChannelMixin, BaseMixin, db.Model):
# __tablename__ = 'user_notification_preference'
# __bind_key__ = 'lastuser'
# #: The notification class these preferences are for
# notification_class_id = db.Column(None, db.ForeignKey('notification_class.id'), nullable=False)
# notification_class = db.relationship(NotificationClass,
# backref=db.backref('user_preferences', lazy='dynamic', cascade='all, delete-orphan'))
# #: The user these preferences are for
# user_id = db.Column(None, db.ForeignKey('user.id'), nullable=False)
# user = db.relationship(User, foreign_keys=[user_id],
# backref=db.backref('notification_preferences', cascade='all, delete-orphan'))
# #: Context for user's preferences (default user's userid, else org's userid)
# #: If we migrate User/Organization/Team into a Principal model (ticket #91)
# #: this should become a foreign key to Principal.
# context = db.Column(db.Unicode(22))
# #: Preferred email address for delivering these notifications. If blank,
# #: implies default email (user.email) or no email, depending on whether
# #: 'email' is in the channels
# email_id = db.Column(None, db.ForeignKey('useremail.id'), nullable=True)
# email = db.relationship(UserEmail)
# #: Preferred phone number for delivering these notifications. If blank,
# #: implies default phone (user.phone) or no SMS, depending on whether
# #: 'sms' is in the channels
# phone_id = db.Column(None, db.ForeignKey('userphone.id'), nullable=True)
# phone = db.relationship(UserPhone)
# #: User's preferred delivery frequency (null = default)
# _freq = db.Column('freq', db.SmallInteger, nullable=True)
# __table_args__ = (db.UniqueConstraint('user_id', 'notification_class_id', 'context'),)
# def _freq_get(self):
# return self._freq if self._freq is not None else self.notification_class.freq
# def _freq_set(self, value):
# self._freq = value
# freq = db.synonym('_freq', descriptor=property(_freq_get, _freq_set))
# def _channels_set(self, value):
# available_channels = self.notification_class.channels
# if isinstance(value, basestring):
# value = [value]
# self._channels = u' '.join([c.strip() for c in value if c and c in available_channels])
# channels = db.synonym('_channels', descriptor=property(ChannelMixin._channels_get, _channels_set))
| |
# coding: utf-8
import time
from collections import OrderedDict
from itertools import chain
from django.conf import settings
from django.utils.six import string_types
from pandas.core.frame import DataFrame
# an immediate fix to an error with the installation of pandas v0.15
try:
from pandas.io.parsers import ExcelWriter
except ImportError:
from pandas import ExcelWriter
from pyxform.constants import SELECT_ALL_THAT_APPLY
from pyxform.survey_element import SurveyElement
from pyxform.section import Section, RepeatingSection
from pyxform.question import Question
from onadata.apps.viewer.models.data_dictionary import DataDictionary
from onadata.apps.viewer.models.parsed_instance import ParsedInstance
from onadata.libs.exceptions import NoRecordsFoundError
from onadata.libs.utils.common_tags import (
ID,
XFORM_ID_STRING,
STATUS,
ATTACHMENTS,
GEOLOCATION, UUID,
SUBMISSION_TIME,
NA_REP,
DELETEDAT,
TAGS,
NOTES,
SUBMITTED_BY,
VALIDATION_STATUS
)
from onadata.libs.utils.export_tools import question_types_to_exclude
# this is Mongo Collection where we will store the parsed submissions
xform_instances = settings.MONGO_DB.instances
GEOPOINT_BIND_TYPE = "geopoint"
# column group delimiters
GROUP_DELIMITER_SLASH = '/'
GROUP_DELIMITER_DOT = '.'
DEFAULT_GROUP_DELIMITER = GROUP_DELIMITER_SLASH
GROUP_DELIMITERS = [GROUP_DELIMITER_SLASH, GROUP_DELIMITER_DOT]
def get_valid_sheet_name(sheet_name, existing_name_list):
# truncate sheet_name to XLSDataFrameBuilder.SHEET_NAME_MAX_CHARS
new_sheet_name = \
sheet_name[:XLSDataFrameBuilder.SHEET_NAME_MAX_CHARS]
# make sure its unique within the list
i = 1
generated_name = new_sheet_name
while generated_name in existing_name_list:
digit_length = len(str(i))
allowed_name_len = XLSDataFrameBuilder.SHEET_NAME_MAX_CHARS - \
digit_length
# make name the required len
if len(generated_name) > allowed_name_len:
generated_name = generated_name[:allowed_name_len]
generated_name = "{0}{1}".format(generated_name, i)
i += 1
return generated_name
def remove_dups_from_list_maintain_order(l):
return list(OrderedDict.fromkeys(l))
def get_prefix_from_xpath(xpath):
xpath = str(xpath)
parts = xpath.rsplit('/', 1)
if len(parts) == 1:
return None
elif len(parts) == 2:
return '%s/' % parts[0]
else:
raise ValueError(
'%s cannot be prefixed, it returns %s' % (xpath, str(parts)))
class AbstractDataFrameBuilder:
IGNORED_COLUMNS = [
XFORM_ID_STRING,
STATUS,
ID,
ATTACHMENTS,
GEOLOCATION,
DELETEDAT, # no longer used but may persist in old submissions
SUBMITTED_BY,
]
# fields NOT within the form def that we want to include
ADDITIONAL_COLUMNS = [UUID, SUBMISSION_TIME, TAGS, NOTES, VALIDATION_STATUS]
BINARY_SELECT_MULTIPLES = False
"""
Group functionality used by any DataFrameBuilder i.e. XLS, CSV and KML
"""
def __init__(self, username, id_string, filter_query=None,
group_delimiter=DEFAULT_GROUP_DELIMITER,
split_select_multiples=True, binary_select_multiples=False):
self.username = username
self.id_string = id_string
self.filter_query = filter_query
self.group_delimiter = group_delimiter
self.split_select_multiples = split_select_multiples
self.BINARY_SELECT_MULTIPLES = binary_select_multiples
self._setup()
def _setup(self):
self.dd = DataDictionary.objects.get(user__username=self.username,
id_string=self.id_string)
self.select_multiples = self._collect_select_multiples(self.dd)
self.gps_fields = self._collect_gps_fields(self.dd)
@classmethod
def _fields_to_select(cls, dd):
return [c.get_abbreviated_xpath()
for c in dd.get_survey_elements() if isinstance(c, Question)]
@classmethod
def _collect_select_multiples(cls, dd):
return dict([(e.get_abbreviated_xpath(), [c.get_abbreviated_xpath()
for c in e.children])
for e in dd.get_survey_elements()
if e.type == SELECT_ALL_THAT_APPLY])
@classmethod
def _split_select_multiples(cls, record, select_multiples,
binary_select_multiples=False):
""" Prefix contains the xpath and slash if we are within a repeat so
that we can figure out which select multiples belong to which repeat
"""
for key, choices in select_multiples.items():
# the select multiple might be blank or not exist in the record,
# need to make those False
selections = []
if key in record:
# split selected choices by spaces and join by / to the
# element's xpath
selections = ["%s/%s" % (key, r)
for r in record[key].split(" ")]
# remove the column since we are adding separate columns
# for each choice
record.pop(key)
if not binary_select_multiples:
# add columns to record for every choice, with default
# False and set to True for items in selections
record.update(dict([(choice, choice in selections)
for choice in choices]))
else:
YES = 1
NO = 0
record.update(
dict([(choice, YES if choice in selections else NO)
for choice in choices]))
# recurs into repeats
for record_key, record_item in record.items():
if type(record_item) == list:
for list_item in record_item:
if type(list_item) == dict:
cls._split_select_multiples(
list_item, select_multiples)
return record
@classmethod
def _collect_gps_fields(cls, dd):
return [e.get_abbreviated_xpath() for e in dd.get_survey_elements()
if e.bind.get("type") == "geopoint"]
@classmethod
def _tag_edit_string(cls, record):
"""
Turns a list of tags into a string representation.
"""
if '_tags' in record:
tags = []
for tag in record['_tags']:
if ',' in tag and ' ' in tag:
tags.append('"%s"' % tag)
else:
tags.append(tag)
record.update({'_tags': ', '.join(sorted(tags))})
@classmethod
def _split_gps_fields(cls, record, gps_fields):
updated_gps_fields = {}
for key, value in record.items():
if key in gps_fields and isinstance(value, string_types):
gps_xpaths = DataDictionary.get_additional_geopoint_xpaths(key)
gps_parts = dict([(xpath, None) for xpath in gps_xpaths])
# hack, check if its a list and grab the object within that
parts = value.split(' ')
# TODO: check whether or not we can have a gps recording
# from ODKCollect that has less than four components,
# for now we are assuming that this is not the case.
if len(parts) == 4:
gps_parts = dict(zip(gps_xpaths, parts))
updated_gps_fields.update(gps_parts)
# check for repeats within record i.e. in value
elif type(value) == list:
for list_item in value:
if type(list_item) == dict:
cls._split_gps_fields(list_item, gps_fields)
record.update(updated_gps_fields)
def _query_mongo(self, query='{}', start=0,
limit=ParsedInstance.DEFAULT_LIMIT,
fields='[]', count=False):
# ParsedInstance.query_mongo takes params as json strings
# so we dumps the fields dictionary
count_args = {
'username': self.username,
'id_string': self.id_string,
'query': query,
'fields': '[]',
'sort': '{}',
'count': True
}
count_object = ParsedInstance.query_mongo(**count_args)
record_count = count_object[0]["count"]
if record_count == 0:
raise NoRecordsFoundError("No records found for your query")
# if count was requested, return the count
if count:
return record_count
else:
query_args = {
'username': self.username,
'id_string': self.id_string,
'query': query,
'fields': fields,
# TODO: we might want to add this in for the user
# to specify a sort order
'sort': '{}',
'start': start,
'limit': limit,
'count': False
}
# use ParsedInstance.query_mongo
cursor = ParsedInstance.query_mongo(**query_args)
return cursor
class XLSDataFrameBuilder(AbstractDataFrameBuilder):
"""
Generate structures from mongo and DataDictionary for a DataFrameXLSWriter
This builder can choose to query the data in batches and write to a single
ExcelWriter object using multiple instances of DataFrameXLSWriter
"""
INDEX_COLUMN = "_index"
PARENT_TABLE_NAME_COLUMN = "_parent_table_name"
PARENT_INDEX_COLUMN = "_parent_index"
EXTRA_COLUMNS = [INDEX_COLUMN, PARENT_TABLE_NAME_COLUMN,
PARENT_INDEX_COLUMN]
SHEET_NAME_MAX_CHARS = 30
XLS_SHEET_COUNT_LIMIT = 255
XLS_COLUMN_COUNT_MAX = 255
CURRENT_INDEX_META = 'current_index'
def __init__(self, username, id_string, filter_query=None,
group_delimiter=DEFAULT_GROUP_DELIMITER,
split_select_multiples=True, binary_select_multiples=False):
super().__init__(
username, id_string, filter_query, group_delimiter,
split_select_multiples, binary_select_multiples)
def _setup(self):
super()._setup()
# need to split columns, with repeats in individual sheets and
# everything else on the default sheet
self._generate_sections()
def export_to(self, file_path, batchsize=1000):
self.xls_writer = ExcelWriter(file_path)
# get record count
record_count = self._query_mongo(count=True)
# query in batches and for each batch create an XLSDataFrameWriter and
# write to existing xls_writer object
start = 0
header = True
while start < record_count:
cursor = self._query_mongo(self.filter_query, start=start,
limit=batchsize)
data = self._format_for_dataframe(cursor)
# write all cursor's data to their respective sheets
for section_name, section in self.sections.items():
records = data[section_name]
# TODO: currently ignoring nested repeats
# so ignore sections that have 0 records
if len(records) > 0:
# use a different group delimiter if needed
columns = section["columns"]
if self.group_delimiter != DEFAULT_GROUP_DELIMITER:
columns = [self.group_delimiter.join(col.split("/"))
for col in columns]
columns = columns + self.EXTRA_COLUMNS
writer = XLSDataFrameWriter(records, columns)
writer.write_to_excel(self.xls_writer, section_name,
header=header, index=False)
header = False
# increment counter(s)
start += batchsize
time.sleep(0.1)
self.xls_writer.save()
def _format_for_dataframe(self, cursor):
"""
Format each record for consumption by a dataframe
returns a dictionary with the key being the name of the sheet,
and values a list of dicts to feed into a DataFrame
"""
data = dict((section_name, []) for section_name in self.sections.keys())
main_section = self.sections[self.survey_name]
main_sections_columns = main_section["columns"]
for record in cursor:
# from record, we'll end up with multiple records, one for each
# section we have
# add records for the default section
self._add_data_for_section(data[self.survey_name],
record, main_sections_columns,
self.survey_name)
parent_index = main_section[self.CURRENT_INDEX_META]
for sheet_name, section in self.sections.items():
# skip default section i.e survey name
if sheet_name != self.survey_name:
xpath = section["xpath"]
columns = section["columns"]
# TODO: handle nested repeats -ignoring nested repeats for
# now which will not be in the top level record, perhaps
# nest sections as well so we can recurs in and get them
if xpath in record:
repeat_records = record[xpath]
# num_repeat_records = len(repeat_records)
for repeat_record in repeat_records:
self._add_data_for_section(
data[sheet_name],
repeat_record, columns, sheet_name,
parent_index, self.survey_name)
return data
def _add_data_for_section(self, data_section, record, columns,
section_name, parent_index=-1,
parent_table_name=None):
data_section.append({})
self.sections[section_name][self.CURRENT_INDEX_META] += 1
index = self.sections[section_name][self.CURRENT_INDEX_META]
# data_section[len(data_section)-1].update(record) # we could simply do
# this but end up with duplicate data from repeats
if self.split_select_multiples:
# find any select multiple(s) and add additional columns to record
record = self._split_select_multiples(
record, self.select_multiples)
# alt, precision
self._split_gps_fields(record, self.gps_fields)
for column in columns:
data_value = None
try:
data_value = record[column]
except KeyError:
# a record may not have responses for some elements simply
# because they were not captured
pass
data_section[
len(data_section) - 1].update({
self.group_delimiter.join(column.split('/'))
if self.group_delimiter != DEFAULT_GROUP_DELIMITER
else column: data_value})
data_section[len(data_section) - 1].update({
XLSDataFrameBuilder.INDEX_COLUMN: index,
XLSDataFrameBuilder.PARENT_INDEX_COLUMN: parent_index,
XLSDataFrameBuilder.PARENT_TABLE_NAME_COLUMN: parent_table_name})
# add ADDITIONAL_COLUMNS
data_section[len(data_section) - 1].update(
dict([(column, record[column] if column in record else None)
for column in self.ADDITIONAL_COLUMNS]))
def _generate_sections(self):
"""
Split survey questions into separate sections for each xls sheet and
columns for each section
"""
# clear list
self.sections = OrderedDict()
# dict of select multiple elements
self.select_multiples = {}
survey_element = self.dd.survey
self.survey_name = get_valid_sheet_name(
survey_element.name, self.sections.keys())
self._create_section(
self.survey_name, survey_element.get_abbreviated_xpath(), False)
# build sections
self._build_sections_recursive(self.survey_name, self.dd.get_survey())
for section_name in self.sections:
self.sections[section_name]['columns'] += self.ADDITIONAL_COLUMNS
self.get_exceeds_xls_limits()
def _build_sections_recursive(self, section_name, element,
is_repeating=False):
"""Builds a section's children and recurses any repeating sections
to build those as a separate section
"""
for child in element.children:
# if a section, recurse
if isinstance(child, Section):
new_is_repeating = isinstance(child, RepeatingSection)
new_section_name = section_name
# if its repeating, build a new section
if new_is_repeating:
new_section_name = get_valid_sheet_name(
child.name, list(self.sections))
self._create_section(
new_section_name, child.get_abbreviated_xpath(), True)
self._build_sections_recursive(
new_section_name, child, new_is_repeating)
else:
# add to survey_sections
child_bind_type = child.bind.get("type")
if isinstance(child, Question) and not \
question_types_to_exclude(child.type)\
and not child.type == SELECT_ALL_THAT_APPLY:
self._add_column_to_section(section_name, child)
elif child.type == SELECT_ALL_THAT_APPLY:
self.select_multiples[child.get_abbreviated_xpath()] = \
[option.get_abbreviated_xpath()
for option in child.children]
# if select multiple, get its choices and make them
# columns
if self.split_select_multiples:
for option in child.children:
self._add_column_to_section(section_name, option)
else:
self._add_column_to_section(section_name, child)
# split gps fields within this section
if child_bind_type == GEOPOINT_BIND_TYPE:
# add columns for geopoint components
for xpath in self.dd.get_additional_geopoint_xpaths(
child.get_abbreviated_xpath()):
self._add_column_to_section(section_name, xpath)
def get_exceeds_xls_limits(self):
if not hasattr(self, "exceeds_xls_limits"):
self.exceeds_xls_limits = False
if len(self.sections) > self.XLS_SHEET_COUNT_LIMIT:
self.exceeds_xls_limits = True
else:
for section in self.sections.values():
if len(section["columns"]) > self.XLS_COLUMN_COUNT_MAX:
self.exceeds_xls_limits = True
break
return self.exceeds_xls_limits
def _create_section(self, section_name, xpath, is_repeat):
# index = len(self.sections)
self.sections[section_name] = {
"name": section_name, "xpath": xpath, "columns": [],
"is_repeat": is_repeat, self.CURRENT_INDEX_META: 0}
def _add_column_to_section(self, sheet_name, column):
section = self.sections[sheet_name]
xpath = None
if isinstance(column, SurveyElement):
xpath = column.get_abbreviated_xpath()
elif isinstance(column, string_types):
xpath = column
assert(xpath)
# make sure column is not already in list
if xpath not in section["columns"]:
section["columns"].append(xpath)
class CSVDataFrameBuilder(AbstractDataFrameBuilder):
def __init__(self, username, id_string, filter_query=None,
group_delimiter=DEFAULT_GROUP_DELIMITER,
split_select_multiples=True, binary_select_multiples=False):
super().__init__(
username, id_string, filter_query, group_delimiter,
split_select_multiples, binary_select_multiples)
self.ordered_columns = OrderedDict()
def _setup(self):
super()._setup()
@classmethod
def _reindex(cls, key, value, ordered_columns, parent_prefix=None):
"""
Flatten list columns by appending an index, otherwise return as is
"""
d = {}
# check for lists
if type(value) is list and len(value) > 0 \
and key != NOTES and key != ATTACHMENTS:
for index, item in enumerate(value):
# start at 1
index += 1
# for each list check for dict, we want to transform the key of
# this dict
if type(item) is dict:
for nested_key, nested_val in item.items():
# given the key "children/details" and nested_key/
# abbreviated xpath
# "children/details/immunization/polio_1",
# generate ["children", index, "immunization/polio_1"]
xpaths = [
"%s[%s]" % (
nested_key[:nested_key.index(key) + len(key)],
index),
nested_key[nested_key.index(key) + len(key) + 1:]]
# re-create xpath the split on /
xpaths = "/".join(xpaths).split("/")
new_prefix = xpaths[:-1]
if type(nested_val) is list:
# if nested_value is a list, rinse and repeat
d.update(cls._reindex(
nested_key, nested_val,
ordered_columns, new_prefix))
else:
# it can only be a scalar
# collapse xpath
if parent_prefix:
xpaths[0:len(parent_prefix)] = parent_prefix
new_xpath = "/".join(xpaths)
# check if this key exists in our ordered columns
if key in ordered_columns.keys():
if new_xpath not in ordered_columns[key]:
ordered_columns[key].append(new_xpath)
d[new_xpath] = nested_val
else:
d[key] = value
else:
# anything that's not a list will be in the top level dict so its
# safe to simply assign
if key == NOTES:
# Match behavior of
# onadata.libs.utils.export_tools.dict_to_joined_export()
d[key] = "\r\n".join([v['note'] for v in value])
elif key == ATTACHMENTS:
d[key] = []
else:
d[key] = value
return d
@classmethod
def _build_ordered_columns(cls, survey_element, ordered_columns,
is_repeating_section=False):
"""
Build a flat ordered dict of column groups
is_repeating_section ensures that child questions of repeating sections
are not considered columns
"""
for child in survey_element.children:
# child_xpath = child.get_abbreviated_xpath()
if isinstance(child, Section):
child_is_repeating = False
if isinstance(child, RepeatingSection):
ordered_columns[child.get_abbreviated_xpath()] = []
child_is_repeating = True
cls._build_ordered_columns(child, ordered_columns,
child_is_repeating)
elif isinstance(child, Question) and not \
question_types_to_exclude(child.type) and not\
is_repeating_section: # if is_repeating_section,
# its parent already initiliased an empty list
# so we dont add it to our list of columns,
# the repeating columns list will be
# generated when we reindex
ordered_columns[child.get_abbreviated_xpath()] = None
def _format_for_dataframe(self, cursor):
# TODO: check for and handle empty results
# add ordered columns for select multiples
if self.split_select_multiples:
for key, choices in self.select_multiples.items():
# HACK to ensure choices are NOT duplicated
self.ordered_columns[key] = \
remove_dups_from_list_maintain_order(choices)
# add ordered columns for gps fields
for key in self.gps_fields:
gps_xpaths = self.dd.get_additional_geopoint_xpaths(key)
self.ordered_columns[key] = [key] + gps_xpaths
data = []
for record in cursor:
# split select multiples
if self.split_select_multiples:
record = self._split_select_multiples(
record, self.select_multiples,
self.BINARY_SELECT_MULTIPLES)
# check for gps and split into components i.e. latitude, longitude,
# altitude, precision
self._split_gps_fields(record, self.gps_fields)
self._tag_edit_string(record)
flat_dict = {}
# re index repeats
for key, value in record.items():
reindexed = self._reindex(key, value, self.ordered_columns)
flat_dict.update(reindexed)
# if delimiter is different, replace within record as well
if self.group_delimiter != DEFAULT_GROUP_DELIMITER:
flat_dict = dict((self.group_delimiter.join(k.split('/')), v)
for k, v in flat_dict.items())
data.append(flat_dict)
return data
def export_to(self, file_or_path, data_frame_max_size=30000):
from math import ceil
# get record count
record_count = self._query_mongo(query=self.filter_query, count=True)
self.ordered_columns = OrderedDict()
self._build_ordered_columns(self.dd.survey, self.ordered_columns)
# pandas will only export 30k records in a dataframe to a csv
# - we need to create multiple 30k dataframes if required,
# we need to go through all the records though so that
# we can figure out the columns we need for repeats
datas = []
num_data_frames = \
int(ceil(float(record_count) / float(data_frame_max_size)))
for i in range(num_data_frames):
cursor = self._query_mongo(
self.filter_query, start=(i * data_frame_max_size),
limit=data_frame_max_size)
data = self._format_for_dataframe(cursor)
datas.append(data)
columns = list(chain.from_iterable(
[[xpath] if cols is None else cols
for xpath, cols in self.ordered_columns.items()]))
# use a different group delimiter if needed
if self.group_delimiter != DEFAULT_GROUP_DELIMITER:
columns = [self.group_delimiter.join(col.split("/"))
for col in columns]
# add extra columns
columns += [col for col in self.ADDITIONAL_COLUMNS]
header = True
if hasattr(file_or_path, 'read'):
csv_file = file_or_path
close = False
else:
csv_file = open(file_or_path, "w")
close = True
for data in datas:
writer = CSVDataFrameWriter(data, columns)
writer.write_to_csv(csv_file, header=header)
header = False
if close:
csv_file.close()
class XLSDataFrameWriter:
def __init__(self, records, columns):
self.dataframe = DataFrame(records, columns=columns)
def write_to_excel(self, excel_writer, sheet_name, header=False,
index=False):
self.dataframe.to_excel(excel_writer, sheet_name, header=header,
index=index)
class CSVDataFrameWriter:
def __init__(self, records, columns):
# TODO: if records is empty, raise a known exception
# catch it in the view and handle
assert(len(records) > 0)
self.dataframe = DataFrame(records, columns=columns)
# remove columns we don't want
for col in AbstractDataFrameBuilder.IGNORED_COLUMNS:
if col in self.dataframe.columns:
del(self.dataframe[col])
def write_to_csv(self, csv_file, header=True, index=False):
na_rep = getattr(settings, 'NA_REP', NA_REP)
self.dataframe.to_csv(csv_file, header=header, index=index,
na_rep=na_rep, encoding='utf-8')
| |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of a selector model using convolutional encoders.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
from absl import logging
from keras.layers import Convolution1D
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Embedding
from keras.layers import GlobalMaxPooling1D
from keras.layers import Input
from keras.layers.merge import Concatenate
from keras.models import Model
from keras.models import model_from_json
from keras.preprocessing.sequence import pad_sequences
from keras_preprocessing import text
import numpy as np
from tensorflow import gfile
flags.DEFINE_string('glove_path', '', 'Path to pretrained Glove embeddings.')
flags.DEFINE_integer('embedding_dim', 100, 'Embedding dimension.')
flags.DEFINE_integer('max_sequence_length', 10, 'Maximum sequence length.')
flags.DEFINE_string('save_path', '',
'Directory where models will be saved to/loaded from.')
FLAGS = flags.FLAGS
class Selector(object):
"""A selector model that selects the best question/answer out of a set."""
def __init__(self):
"""Constructor for the selector."""
logging.info('Initializing tokenizer..')
words, embedding_matrix = self._build_embedding_matrix()
self.tokenizer = text.Tokenizer(num_words=len(words), lower=False)
# Tokenizer treats each item in a nested list as a token.
self.tokenizer.fit_on_texts([[word] for word in words])
# Preppend a array of zeros to the embeddings matrix that will be used by
# out-of-vocabulary words.
embedding_matrix = np.concatenate(
[np.zeros((1, embedding_matrix.shape[1])), embedding_matrix])
assert len(words) == len(self.tokenizer.word_index), (
'embeddings_matrix and tokenizer.word_index do not have the same size:'
' {} and {}, respectively'.format(
len(words), len(self.tokenizer.word_index)))
assert all([
self.tokenizer.word_index[word] == i + 1 for i, word in enumerate(words)
]), ('embeddings_matrix and tokenizer.word_index are not aligned.')
self.model = self._build_model(embedding_matrix)
def load(self, name):
checkpoint_path_json, checkpoint_path_h5 = self._get_checkpoint_paths(name)
with gfile.Open(checkpoint_path_json, 'r') as json_file:
loaded_model_json = json_file.read()
model = model_from_json(loaded_model_json)
gfile.Copy(checkpoint_path_h5, '/tmp/tmp_model_weights.h5')
model.load_weights('/tmp/tmp_model_weights.h5')
logging.info('Loaded model from disk.')
return model
def save(self, name):
checkpoint_path_json, checkpoint_path_h5 = self._get_checkpoint_paths(name)
model_json = self.model.to_json()
with gfile.Open(checkpoint_path_json, 'w') as json_file:
json_file.write(model_json)
self.model.save_weights('/tmp/tmp_model_weights.h5')
gfile.Copy('/tmp/tmp_model_weights.h5', checkpoint_path_h5)
def _get_checkpoint_paths(self, name):
checkpoint_path_json = os.path.join(FLAGS.save_path,
'model_' + name + '.json')
checkpoint_path_h5 = os.path.join(FLAGS.save_path, 'model_' + name + '.h5')
return checkpoint_path_json, checkpoint_path_h5
def _build_embedding_matrix(self):
"""Builds the embedding matrix for the model.
Returns:
words: a list of strings representing the words in the vocabulary.
embeddings: a float32 array of shape [vocab_size, embeddings_dim].
"""
logging.info('Loading Glove embeddings.')
words = []
embeddings = []
with gfile.GFile(FLAGS.glove_path) as f:
for line in f:
values = line.split()
words.append(values[0])
embeddings.append(np.asarray(values[1:], dtype='float32'))
logging.info('Found %s word vectors.', len(embeddings))
return words, np.array(embeddings)
def _build_model(self, embedding_matrix):
"""Builds the model.
Args:
embedding_matrix: A float32 array of shape [vocab_size, embedding_dim].
Returns:
The model.
"""
max_feature_length = FLAGS.max_sequence_length
model_inputs = []
encoder_outputs = []
for _ in range(3):
model_input = Input(shape=(max_feature_length,))
model_inputs.append(model_input)
embed = Embedding(
output_dim=100,
input_dim=len(embedding_matrix),
input_length=max_feature_length,
weights=[embedding_matrix],
trainable=False)(
model_input)
conv = Convolution1D(
filters=100,
kernel_size=3,
padding='valid',
activation='relu',
strides=1)(
embed)
conv = Dropout(0.4)(conv)
conv = GlobalMaxPooling1D()(conv)
encoder_outputs.append(conv)
merge = Concatenate()(encoder_outputs)
model_output = Dense(1, activation='sigmoid')(merge)
model = Model(model_inputs, model_output)
model.compile(
loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
logging.info('Model successfully built. Summary: %s', model.summary())
return model
def encode_labels(self, labels):
return np.asarray(labels).astype(np.float)
def encode_texts(self, texts):
sequences = self.tokenizer.texts_to_sequences(texts)
return pad_sequences(sequences, maxlen=FLAGS.max_sequence_length)
def encode_data(self, questions, original_questions, answers, labels):
return (self.encode_texts(questions), self.encode_texts(original_questions),
self.encode_texts(answers), self.encode_labels(labels))
def encode_train(self, question_lists, answer_lists, score_lists):
"""Encodes the input for training purposes.
The data points consist of:
- question (original or rewrite)
- original question
- answer
- label
where the label is the difference between the F1 score of the question and
the average F1 score of all the questions with the same source.
Args:
question_lists: A list of lists of questions. The first question is the
original question and the others are generated by a
Reformulator model.
answer_lists: A list of lists of answers to the questions given by the
BiDAF model.
score_lists: A list of lists of F1 scores for the answers given by the
BiDAF model.
Returns:
* A numpy array with dimensions [len(questions), max_sequence_length]
containing the tokenized questions.
* A numpy array with dimensions
[len(original_questions), max_sequence_length] containing the tokenized
original questions.
* A numpy array with dimensions [len(answers), max_sequence_length]
containing the tokenized answers.
* A numpy array with dimensions [len(answers)] containing the differences
of the F1 score from the average of all rewrites with the same source.
"""
rewritten_questions = []
original_questions = []
ans = []
labels = []
for questions, answers, scores in zip(question_lists, answer_lists,
score_lists):
mean_score = np.mean(scores)
original_question = questions[0]
for question, answer, score in zip(questions, answers, scores):
if score == mean_score:
# Ignore all examples where the F1 score is equal to the mean. This
# helps filter out examples that we cannot learn from; e.g. if all
# rewrites in a set give the same F1 score, all of the set is ignored.
continue
rewritten_questions.append(question)
original_questions.append(original_question)
ans.append(answer)
labels.append(score - mean_score)
return self.encode_data(rewritten_questions, original_questions, ans,
labels)
def train(self, questions, answers, scores):
"""Train the model with the given data.
Args:
questions: A list of lists of questions. The first question is the
original question and the others are generated by a
Reformulator model.
answers: A list of lists of answers to the questions given by the BiDAF
model.
scores: A list of lists of F1 scores for the answers given by the BiDAF
model.
Returns:
A tuple containing the training loss and accuracy of the batch.
"""
(question_array, original_question_array, answer_array,
train_labels) = self.encode_train(questions, answers, scores)
train_labels_binary = (np.sign(train_labels) + 1) / 2
train_labels_array_binary = np.array(train_labels_binary)
return self.model.train_on_batch(
x=[question_array, original_question_array, answer_array],
y=train_labels_array_binary)
def eval(self, question_lists, answer_lists, score_lists):
"""Run an eval with the given data.
Args:
question_lists: A list of lists of questions. The first question is the
original question and the others are generated by a
Reformulator model.
answer_lists: A list of lists of answers to the questions given by the
BiDAF model.
score_lists: A list of lists of F1 scores for the answers given by the
BiDAF model.
Returns:
Average F1 score achieved with the model.
"""
f1s = []
for questions, answers, scores in zip(question_lists, answer_lists,
score_lists):
original_questions = [questions[0]] * len(questions)
xs1, xs2, xs3, ys = self.encode_data(questions, original_questions,
answers, scores)
prediction = np.argmax(self.model.predict([xs1, xs2, xs3]))
f1s.append(ys[prediction])
return np.mean(f1s)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Note: Any AirflowException raised is expected to cause the TaskInstance
# to be marked in an ERROR state
"""Exceptions used by Airflow"""
import datetime
import warnings
from typing import Any, Dict, List, NamedTuple, Optional, Sized
class AirflowException(Exception):
"""
Base class for all Airflow's errors.
Each custom exception should be derived from this class.
"""
status_code = 500
class AirflowBadRequest(AirflowException):
"""Raise when the application or server cannot handle the request."""
status_code = 400
class AirflowNotFoundException(AirflowException):
"""Raise when the requested object/resource is not available in the system."""
status_code = 404
class AirflowConfigException(AirflowException):
"""Raise when there is configuration problem."""
class AirflowSensorTimeout(AirflowException):
"""Raise when there is a timeout on sensor polling."""
class AirflowRescheduleException(AirflowException):
"""
Raise when the task should be re-scheduled at a later time.
:param reschedule_date: The date when the task should be rescheduled
"""
def __init__(self, reschedule_date):
super().__init__()
self.reschedule_date = reschedule_date
class AirflowSmartSensorException(AirflowException):
"""
Raise after the task register itself in the smart sensor service.
It should exit without failing a task.
"""
class InvalidStatsNameException(AirflowException):
"""Raise when name of the stats is invalid."""
class AirflowTaskTimeout(AirflowException):
"""Raise when the task execution times-out."""
class AirflowWebServerTimeout(AirflowException):
"""Raise when the web server times out."""
class AirflowSkipException(AirflowException):
"""Raise when the task should be skipped"""
class AirflowFailException(AirflowException):
"""Raise when the task should be failed without retrying."""
class AirflowOptionalProviderFeatureException(AirflowException):
"""Raise by providers when imports are missing for optional provider features."""
class UnmappableXComTypePushed(AirflowException):
"""Raise when an unmappable type is pushed as a mapped downstream's dependency."""
def __init__(self, value: Any) -> None:
super().__init__(value)
self.value = value
def __str__(self) -> str:
return f"unmappable return type {type(self.value).__qualname__!r}"
class UnmappableXComLengthPushed(AirflowException):
"""Raise when the pushed value is too large to map as a downstream's dependency."""
def __init__(self, value: Sized, max_length: int) -> None:
super().__init__(value)
self.value = value
self.max_length = max_length
def __str__(self) -> str:
return f"unmappable return value length: {len(self.value)} > {self.max_length}"
class AirflowDagCycleException(AirflowException):
"""Raise when there is a cycle in DAG definition."""
class AirflowDagDuplicatedIdException(AirflowException):
"""Raise when a DAG's ID is already used by another DAG."""
def __init__(self, dag_id: str, incoming: str, existing: str) -> None:
super().__init__(dag_id, incoming, existing)
self.dag_id = dag_id
self.incoming = incoming
self.existing = existing
def __str__(self) -> str:
return f"Ignoring DAG {self.dag_id} from {self.incoming} - also found in {self.existing}"
class AirflowClusterPolicyViolation(AirflowException):
"""Raise when there is a violation of a Cluster Policy in DAG definition."""
class AirflowTimetableInvalid(AirflowException):
"""Raise when a DAG has an invalid timetable."""
class DagNotFound(AirflowNotFoundException):
"""Raise when a DAG is not available in the system."""
class DagCodeNotFound(AirflowNotFoundException):
"""Raise when a DAG code is not available in the system."""
class DagRunNotFound(AirflowNotFoundException):
"""Raise when a DAG Run is not available in the system."""
class DagRunAlreadyExists(AirflowBadRequest):
"""Raise when creating a DAG run for DAG which already has DAG run entry."""
class DagFileExists(AirflowBadRequest):
"""Raise when a DAG ID is still in DagBag i.e., DAG file is in DAG folder."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn("DagFileExists is deprecated and will be removed.", DeprecationWarning, stacklevel=2)
class DuplicateTaskIdFound(AirflowException):
"""Raise when a Task with duplicate task_id is defined in the same DAG."""
class SerializationError(AirflowException):
"""A problem occurred when trying to serialize a DAG."""
class ParamValidationError(AirflowException):
"""Raise when DAG params is invalid"""
class TaskNotFound(AirflowNotFoundException):
"""Raise when a Task is not available in the system."""
class TaskInstanceNotFound(AirflowNotFoundException):
"""Raise when a task instance is not available in the system."""
class PoolNotFound(AirflowNotFoundException):
"""Raise when a Pool is not available in the system."""
class NoAvailablePoolSlot(AirflowException):
"""Raise when there is not enough slots in pool."""
class DagConcurrencyLimitReached(AirflowException):
"""Raise when DAG max_active_tasks limit is reached."""
class TaskConcurrencyLimitReached(AirflowException):
"""Raise when task max_active_tasks limit is reached."""
class BackfillUnfinished(AirflowException):
"""
Raises when not all tasks succeed in backfill.
:param message: The human-readable description of the exception
:param ti_status: The information about all task statuses
"""
def __init__(self, message, ti_status):
super().__init__(message)
self.ti_status = ti_status
class FileSyntaxError(NamedTuple):
"""Information about a single error in a file."""
line_no: Optional[int]
message: str
def __str__(self):
return f"{self.message}. Line number: s{str(self.line_no)},"
class AirflowFileParseException(AirflowException):
"""
Raises when connection or variable file can not be parsed.
:param msg: The human-readable description of the exception
:param file_path: A processed file that contains errors
:param parse_errors: File syntax errors
"""
def __init__(self, msg: str, file_path: str, parse_errors: List[FileSyntaxError]) -> None:
super().__init__(msg)
self.msg = msg
self.file_path = file_path
self.parse_errors = parse_errors
def __str__(self):
from airflow.utils.code_utils import prepare_code_snippet
from airflow.utils.platform import is_tty
result = f"{self.msg}\nFilename: {self.file_path}\n\n"
for error_no, parse_error in enumerate(self.parse_errors, 1):
result += "=" * 20 + f" Parse error {error_no:3} " + "=" * 20 + "\n"
result += f"{parse_error.message}\n"
if parse_error.line_no:
result += f"Line number: {parse_error.line_no}\n"
if parse_error.line_no and is_tty():
result += "\n" + prepare_code_snippet(self.file_path, parse_error.line_no) + "\n"
return result
class ConnectionNotUnique(AirflowException):
"""Raise when multiple values are found for the same connection ID."""
class TaskDeferred(BaseException):
"""
Special exception raised to signal that the operator it was raised from
wishes to defer until a trigger fires.
"""
def __init__(
self,
*,
trigger,
method_name: str,
kwargs: Optional[Dict[str, Any]] = None,
timeout: Optional[datetime.timedelta] = None,
):
super().__init__()
self.trigger = trigger
self.method_name = method_name
self.kwargs = kwargs
self.timeout = timeout
# Check timeout type at runtime
if self.timeout is not None and not hasattr(self.timeout, "total_seconds"):
raise ValueError("Timeout value must be a timedelta")
def __repr__(self) -> str:
return f"<TaskDeferred trigger={self.trigger} method={self.method_name}>"
class TaskDeferralError(AirflowException):
"""Raised when a task failed during deferral for some reason."""
| |
"""
Collection of utility functions for wrapping-textures.
Written by Zachary Ferguson
"""
from __future__ import print_function
import sys
import time
import math
import itertools
import numpy
from recordclass import recordclass
######################################
# Record classes for neccessary data #
######################################
UV = recordclass('UV', ['u', 'v'])
Pixel = recordclass('Pixel', ['x', 'y'])
XY = recordclass('XY', ['x', 'y'])
XYZ = recordclass('XYZ', ['x', 'y', 'z'])
# Quadtratic energy: x.T @ Q @ x + 2 * x.T @ L + C = 0
QuadEnergy = recordclass('QuadraticEnergy', ['Q', 'L', 'C'])
def pairwise(iterable):
""" Returns: s -> (s0,s1), (s1,s2), (s2, s3), ... """
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
def pairwise_loop(iterable):
"""
Create pair wise list of the iterable given with the last element being the
first. Returns: s -> (s0,s1), (s1,s2), (s2, s3), ..., (sN, s0)
"""
return tuple(pairwise(iterable)) + ((iterable[-1], iterable[0]),)
def rowcol_to_index(row, col, width):
""" Convert row major coordinates to 1-D index. """
return width * row + col
def lerp(t, x0, x1):
""" Linearly interpolate between x0 and x1. """
return x0 + t * (x1 - x0)
def lerpPair(t, p0, p1):
""" Linearly interpolate independent indexed paires """
return [lerp(t, p0[0], p1[0]), lerp(t, p0[1], p1[1])]
def lerp_UV(t, uv0, uv1):
"""
Linearly interpolate between (u0,v0) and (u1,v1).
Returns a UV object.
"""
return UV(*lerpPair(t, uv0, uv1))
def lerp_XY(t, xy0, xy1):
"""
Linearly interpolate between (x0,y0) and (x1,y1).
Returns a XY object.
"""
return XY(*lerpPair(t, xy0, xy1))
def UV_to_XY(uv, width, height, is_clamped = False):
"""
Converts the given UV to XY coordinates
uv is defined in terms of GPU UV space.
"""
# s*width - 0.5; t*height - 0.5
xy = XY(x = uv.u * width - 0.5, y = uv.v * height - 0.5)
if is_clamped:
xy = (numpy.clip(xy[0], 0, max(0, width - 1)),
numpy.clip(xy[1], 0, max(0, height - 1)))
return xy
def UVs_to_XYs(uvEdges, width, height):
""" Convert a UV edge to XY space in the texture """
return [UV_to_XY(vert, width, height) for edge in uvEdges for vert in edge]
def globalUV_to_local(uv, minX, minY, width, height):
"""
Convert from a texture's global UV to local UV.
Local pixel values defined by the minimum x and y values.
uv is defined in terms of GPU UV space.
"""
x, y = UV_to_XY(uv, width, height, True)
return UV(u = x - minX, v = y - minY)
def globalEdge_to_local(uv0, uv1, minI, width, height):
"""
Convert a edge from a texture's global UV to local UV.
Local pixel values defined by the minimum x and y values.
uv is defined in terms of GPU UV space.
"""
minX = minI % width
minY = minI // width
return [globalUV_to_local(uv, minX, minY, width, height)
for uv in (uv0, uv1)]
def surrounding_pixels(uv, w, h, as_index = False, as_tuple = False):
"""
Determine the surrounding pixels of the given point at (u,v).
uv is defined in terms of GPU UV space.
Returns a Tuple of surrounding four Pixel objects.
Pixels are ordered as: (Lower Left, Lower Right, Upper Left, Upper Right)
"""
assert not(as_index and as_tuple)
# Convert from GPU UV coordinates to XY coordinates
(x, y) = UV_to_XY(uv, w, h, is_clamped = True)
# Convert from XY to Pixel coordinates
px = int(min(max(0, math.floor(x)), w - 2)) # X in Range(0,w-1)
py = int(min(max(0, math.floor(y)), h - 2)) # Y in Range(0,h-1)
p00 = Pixel(x = px, y = py)
px = int(min(max(0, math.floor(x) + 1), w - 1)) # X in Range(0,w-1)
py = int(min(max(0, math.floor(y) + 1), h - 1)) # Y in Range(0,h-1)
p11 = Pixel(x = px, y = py)
# Create tuple of soronding pixels in Pixel Space
ps = (p00, Pixel(x = p11.x, y = p00.y), Pixel(x = p00.x, y = p11.y), p11)
# If requested, convert from Pixel space to 1D index space
if as_index:
return [rowcol_to_index(p.y, p.x, w) for p in ps]
if as_tuple:
return tuple(tuple(p) for p in ps)
return ps
def range_min_max(a, b):
""" Creates a range from the min value to the max value. """
return range(int(min(a, b)), int(max(a, b)))
def print_dots(time_delta = 1.0):
"""
Print out a dot every time_delta seconds.
Loop after three dots.
"""
dot_count = 0
while True:
dot_count = (dot_count % 3) + 1
print(("." * dot_count) + (" " * 3), end = "\r")
sys.stdout.flush()
time.sleep(time_delta)
def print_progress(percent):
"""
Prints a dot followed by the given percentage.
Given value should be a decimal in range [0, 1].
"""
print("\r%.2f%%" % (percent * 100), end = "")
sys.stdout.flush()
def print_clear_line(line_length = 80):
""" Clear the current line with 80 spaces followed by a carage return. """
print("\r" + (" " * line_length) + "\r", end = "")
# !!! These functions are not useful !!!
# def texUV_to_gpuUV(uv, width, height):
# """ Convert from the Texture UV space to GPU/OpenGL UV space. """
# u = uv.u - (uv.u / float(width)) + 0.5 / width
# v = uv.v - (uv.v / float(height)) + 0.5 / height
# return UV(u = u, v = v)
#
#
# def texSeam_to_gpuUV(seam, width, height):
# """ Convert a texture seam to GPU/OpenGL UV space. """
# gpu_seam = list()
# for edgePair in seam:
# gpu_edgePair = list()
# for edge in edgePair:
# gpu_edgePair.append(
# [texUV_to_gpuUV(uv, width, height) for uv in edge])
# gpu_seam.append(gpu_edgePair)
# return gpu_seam
def verts_equal(v0, v1, epsilon = 1e-8):
"""
Test if two given vertices are equal within a certain epsilon.
WARNING:
This is slower than ==, but it allows for a tolerance level of
equality.
"""
assert epsilon >= 0.0
if len(v0) != len(v1):
return False
for a, b in zip(v0, v1):
if(abs(a - b) > epsilon):
return False
return True
def normalize_array(arr):
""" Normalize the given array to be in range [0,1]. """
minVal = numpy.amin(arr)
maxVal = numpy.amax(arr)
return (arr - minVal) / float(maxVal - minVal)
def is_counterclockwise(v0, v1, v2):
"""
Is the triangle defined by the given vertices in counter-clockwise order?
Input:
v0, v1, v2 - 2D coordinates for the vertices of the triangle
Output:
Returns True if the triangle is counter-clockwise order.
"""
mat = numpy.array([[1, v[0], v[1]] for v in (v0, v1, v2)])
return numpy.linalg.det(mat) > 0
# Convert back to image format
def to_uint8(data, normalize = False):
""" Convert the data in a floating-point vector to unsigned bytes. """
# Normilize the solved values.
if(normalize):
data = normalize_array(data)
for i in range(data.shape[0]):
data[i] = data[i].clip(0.0, 1.0)
data = (data * 255).round().astype("uint8")
return data
def save_ijvs(A, fname):
""" Save a sparse matrix as a list of ijv pairings. """
A = A.tocoo()
height, width = A.shape
M = numpy.empty((A.row.shape[0], 3))
M[:, 0] = A.row
M[:, 1] = A.col
M[:, 2] = A.data
lines = ["%d %d %.17f\n" % (ijv[0], ijv[1], ijv[2]) for ijv in M]
with open(fname, "w") as f:
f.write("%d %d\n" % (height, width))
for line in lines:
f.write(line)
def save_dense(A, fname):
""" Saves an array as a text file, one line per row. """
m, n = A.shape
with open(fname, "w") as f:
for row in A:
for val in row:
f.write("%.17f " % val)
f.write("\n")
| |
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.devices
~~~~~~~~~~~~
Utility functions for managing devices in Felix.
"""
import logging
from calico.felix.actor import Actor, actor_message
import os
import socket
import struct
from calico import common
from calico.felix import futils
# Logger
from calico.felix.futils import FailedSystemCall
_log = logging.getLogger(__name__)
def configure_global_kernel_config():
"""
Configures the global kernel config. In particular, sets the flags
that we rely on to ensure security, such as the kernel's RPF check.
:raises BadKernelConfig if a problem is detected.
"""
# For IPv4, we rely on the kernel's reverse path filtering to prevent
# workloads from spoofing their IP addresses.
#
# The RPF check for a particular interface is controlled by several
# sysctls:
#
# - ipv4.conf.all.rp_filter is a global override
# - ipv4.conf.default.rp_filter controls the value that is set on a newly
# created interface
# - ipv4.conf.<interface>.rp_filter controls a particular interface.
#
# The algorithm for combining the global override and per-interface values
# is to take the *numeric* maximum between the two. The values are:
# 0=off, 1=strict, 2=loose. "loose" is not suitable for Calico since it
# would allow workloads to spoof packets from other workloads on the same
# host. Hence, we need the global override to be <=1 or it would override
# the per-interface setting to "strict" that we require.
#
# We bail out rather than simply setting it because setting 2, "loose",
# is unusual and it is likely to have been set deliberately.
ps_name = "/proc/sys/net/ipv4/conf/all/rp_filter"
rp_filter = int(_read_proc_sys(ps_name))
if rp_filter > 1:
_log.critical("Kernel's RPF check is set to 'loose'. This would "
"allow endpoints to spoof their IP address. Calico "
"requires net.ipv4.conf.all.rp_filter to be set to "
"0 or 1.")
raise BadKernelConfig("net.ipv4.conf.all.rp_filter set to 'loose'")
# Make sure the default for new interfaces is set to strict checking so
# that there's no race when a new interface is added and felix hasn't
# configured it yet.
_write_proc_sys("/proc/sys/net/ipv4/conf/default/rp_filter", "1")
# We use sysfs for inspecting devices.
if not os.path.exists("/sys/class/net"):
raise BadKernelConfig("Felix requires sysfs to be mounted at /sys")
def interface_exists(interface):
"""
Checks if an interface exists.
:param str interface: Interface name
:returns: True if interface device exists
Note: this checks that the interface exists at a particular point in time
but the caller needs to be defensive to the interface disappearing before
it has a chance to access it.
"""
return os.path.exists("/sys/class/net/%s" % interface)
def list_interface_ips(ip_type, interface):
"""
List IP addresses for which there are routes to a given interface.
:param str ip_type: IP type, either futils.IPV4 or futils.IPV6
:param str interface: Interface name
:returns: a set of all addresses for which there is a route to the device.
"""
ips = set()
if ip_type == futils.IPV4:
data = futils.check_call(
["ip", "route", "list", "dev", interface]).stdout
else:
data = futils.check_call(
["ip", "-6", "route", "list", "dev", interface]).stdout
lines = data.split("\n")
_log.debug("Existing routes to %s : %s" % (interface, ",".join(lines)))
for line in lines:
# Example of the lines we care about is (having specified the
# device above): "10.11.2.66 proto static scope link"
words = line.split()
if len(words) > 1:
ip = words[0]
if common.validate_ip_addr(ip, futils.IP_TYPE_TO_VERSION[ip_type]):
# Looks like an IP address. Note that we here are ignoring
# routes to networks configured when the interface is created.
ips.add(words[0])
_log.debug("Found existing IP addresses : %s", ips)
return ips
def configure_interface_ipv4(if_name):
"""
Configure the various proc file system parameters for the interface for
IPv4.
Specifically,
- Allow packets from controlled interfaces to be directed to localhost
- Enable proxy ARP
- Enable the kernel's RPF check.
:param if_name: The name of the interface to configure.
:returns: None
"""
# Enable the kernel's RPF check, which ensures that a VM cannot spoof
# its IP address.
_write_proc_sys('/proc/sys/net/ipv4/conf/%s/rp_filter' % if_name, 1)
_write_proc_sys('/proc/sys/net/ipv4/conf/%s/route_localnet' % if_name, 1)
_write_proc_sys("/proc/sys/net/ipv4/conf/%s/proxy_arp" % if_name, 1)
_write_proc_sys("/proc/sys/net/ipv4/neigh/%s/proxy_delay" % if_name, 0)
def configure_interface_ipv6(if_name, proxy_target):
"""
Configure an interface to support IPv6 traffic from an endpoint.
- Enable proxy NDP on the interface.
- Program the given proxy target (gateway the endpoint will use).
:param if_name: The name of the interface to configure.
:param proxy_target: IPv6 address which is proxied on this interface for
NDP.
:returns: None
:raises: FailedSystemCall
"""
_write_proc_sys("/proc/sys/net/ipv6/conf/%s/proxy_ndp" % if_name, 1)
# Allows None if no IPv6 proxy target is required.
if proxy_target:
futils.check_call(["ip", "-6", "neigh", "add",
"proxy", str(proxy_target), "dev", if_name])
def _read_proc_sys(name):
with open(name, "rb") as f:
return f.read().strip()
def _write_proc_sys(name, value):
with open(name, "wb") as f:
f.write(str(value))
def add_route(ip_type, ip, interface, mac):
"""
Add a route to a given interface (including arp config).
Errors lead to exceptions that are not handled here.
Note that we use "ip route replace", since that overrides any imported
routes to the same IP, which might exist in the middle of a migration.
:param ip_type: Type of IP (IPV4 or IPV6)
:param str ip: IP address
:param str interface: Interface name
:param str mac: MAC address. May not be None unless ip is None.
:raises FailedSystemCall
"""
if mac is None and ip:
raise ValueError("mac must be supplied if ip is provided")
if ip_type == futils.IPV4:
futils.check_call(['arp', '-s', ip, mac, '-i', interface])
futils.check_call(["ip", "route", "replace", ip, "dev", interface])
else:
futils.check_call(["ip", "-6", "route", "replace", ip, "dev",
interface])
def del_route(ip_type, ip, interface):
"""
Delete a route to a given interface (including arp config).
:param ip_type: Type of IP (IPV4 or IPV6)
:param str ip: IP address
:param str interface: Interface name
:raises FailedSystemCall
"""
if ip_type == futils.IPV4:
futils.check_call(['arp', '-d', ip, '-i', interface])
futils.check_call(["ip", "route", "del", ip, "dev", interface])
else:
futils.check_call(["ip", "-6", "route", "del", ip, "dev", interface])
def set_routes(ip_type, ips, interface, mac=None, reset_arp=False):
"""
Set the routes on the interface to be the specified set.
:param ip_type: Type of IP (IPV4 or IPV6)
:param set ips: IPs to set up (any not in the set are removed)
:param str interface: Interface name
:param str mac|NoneType: MAC address. May not be none unless ips is empty.
:param bool reset_arp: Reset arp. Only valid if IPv4.
"""
if mac is None and ips:
raise ValueError("mac must be supplied if ips is not empty")
if reset_arp and ip_type != futils.IPV4:
raise ValueError("reset_arp may only be supplied for IPv4")
current_ips = list_interface_ips(ip_type, interface)
removed_ips = (current_ips - ips)
for ip in removed_ips:
del_route(ip_type, ip, interface)
remove_conntrack_flows(removed_ips, 4 if ip_type == futils.IPV4 else 6)
for ip in (ips - current_ips):
add_route(ip_type, ip, interface, mac)
if reset_arp:
for ip in (ips & current_ips):
futils.check_call(['arp', '-s', ip, mac, '-i', interface])
def interface_up(if_name):
"""
Checks whether a given interface is up.
Check this by examining the operstate of the interface, which is the
highest level "is it ready to work with" flag.
:param str if_name: Interface name
:returns: True if interface up, False if down or cannot detect
"""
operstate_filename = '/sys/class/net/%s/operstate' % if_name
try:
with open(operstate_filename, 'r') as f:
oper_state = f.read().strip()
except IOError as e:
# If we fail to check that the interface is up, then it has probably
# gone under our feet or is flapping.
_log.warning("Failed to read state of interface %s (%s) - assume "
"down/absent: %r.", if_name, operstate_filename, e)
return False
else:
_log.debug("Interface %s has state %s", if_name, oper_state)
return oper_state == "up"
def remove_conntrack_flows(ip_addresses, ip_version):
"""
Removes any conntrack entries that use any of the given IP
addresses in their source/destination.
"""
assert ip_version in (4, 6)
for ip in ip_addresses:
_log.debug("Removing conntrack rules for %s", ip)
for direction in ["--orig-src", "--orig-dst",
"--reply-src", "--reply-dst"]:
try:
futils.check_call(["conntrack", "--family",
"ipv%s" % ip_version, "--delete",
direction, ip])
except FailedSystemCall as e:
if e.retcode == 1 and "0 flow entries" in e.stderr:
# Expected if there are no flows.
_log.debug("No conntrack entries found for %s/%s.",
ip, direction)
else:
# Suppress the exception, conntrack entries will timeout
# and it's hard to think of an example where killing and
# restarting felix would help.
_log.exception("Failed to remove conntrack flows for %s. "
"Ignoring.", ip)
# These constants map to constants in the Linux kernel. This is a bit poor, but
# the kernel can never change them, so live with it for now.
RTMGRP_LINK = 1
NLMSG_NOOP = 1
NLMSG_ERROR = 2
RTM_NEWLINK = 16
RTM_DELLINK = 17
IFLA_IFNAME = 3
IFLA_OPERSTATE = 16
IF_OPER_UP = 6
class RTNetlinkError(Exception):
"""
How we report an error message.
"""
pass
class InterfaceWatcher(Actor):
def __init__(self, update_splitter):
super(InterfaceWatcher, self).__init__()
self.update_splitter = update_splitter
self.interfaces = {}
@actor_message()
def watch_interfaces(self):
"""
Detects when interfaces appear, sending notifications to the update
splitter.
:returns: Never returns.
"""
# Create the netlink socket and bind to RTMGRP_LINK,
s = socket.socket(socket.AF_NETLINK,
socket.SOCK_RAW,
socket.NETLINK_ROUTE)
s.bind((os.getpid(), RTMGRP_LINK))
# A dict that remembers the detailed flags of an interface
# when we last signalled it as being up. We use this to avoid
# sending duplicate interface_update signals.
if_last_flags = {}
while True:
# Get the next set of data.
data = s.recv(65535)
# First 16 bytes is the message header; unpack it.
hdr = data[:16]
data = data[16:]
msg_len, msg_type, flags, seq, pid = struct.unpack("=LHHLL", hdr)
if msg_type == NLMSG_NOOP:
# Noop - get some more data.
continue
elif msg_type == NLMSG_ERROR:
# We have got an error. Raise an exception which brings the
# process down.
raise RTNetlinkError("Netlink error message, header : %s",
futils.hex(hdr))
_log.debug("Netlink message type %s len %s", msg_type, msg_len)
if msg_type in [RTM_NEWLINK, RTM_DELLINK]:
# A new or removed interface. Read the struct
# ifinfomsg, which is 16 bytes.
hdr = data[:16]
data = data[16:]
_, _, _, index, flags, _ = struct.unpack("=BBHiII", hdr)
_log.debug("Interface index %s flags %x", index, flags)
# Bytes left is the message length minus the two headers of 16
# bytes each.
remaining = msg_len - 32
# Loop through attributes, looking for the pieces of
# information that we need.
ifname = None
operstate = None
while remaining:
# The data content is an array of RTA objects, each of
# which has a 4 byte header and some data.
rta_len, rta_type = struct.unpack("=HH", data[:4])
# This check comes from RTA_OK, and terminates a string of
# routing attributes.
if rta_len < 4:
break
rta_data = data[4:rta_len]
# Remove the RTA object from the data. The length to jump
# is the rta_len rounded up to the nearest 4 byte boundary.
increment = int((rta_len + 3) / 4) * 4
data = data[increment:]
remaining -= increment
if rta_type == IFLA_IFNAME:
ifname = rta_data[:-1]
_log.debug("IFLA_IFNAME: %s", ifname)
elif rta_type == IFLA_OPERSTATE:
operstate, = struct.unpack("=B", rta_data[:1])
_log.debug("IFLA_OPERSTATE: %s", operstate)
if (ifname and
(msg_type == RTM_DELLINK or operstate != IF_OPER_UP)):
# The interface is down; make sure the other actors know
# about it.
self.update_splitter.on_interface_update(ifname,
iface_up=False,
async=True)
# Remove any record we had of the interface so that, when
# it goes back up, we'll report that.
if_last_flags.pop(ifname, None)
if (ifname and
msg_type == RTM_NEWLINK and
operstate == IF_OPER_UP and
(ifname not in if_last_flags or
if_last_flags[ifname] != flags)):
# We only care about notifying when a new
# interface is usable, which - according to
# https://www.kernel.org/doc/Documentation/networking/
# operstates.txt - is fully conveyed by the
# operstate. (When an interface goes away, it
# automatically takes its routes with it.)
_log.debug("New network interface : %s %x", ifname, flags)
if_last_flags[ifname] = flags
self.update_splitter.on_interface_update(ifname,
iface_up=True,
async=True)
class BadKernelConfig(Exception):
pass
| |
#
# Copyright (c) 2017 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from datetime import date, datetime, timedelta
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase
from rest_framework import status
from pdc.apps.release.models import Release
from pdc.apps.componentbranch.models import SLA
from pdc.apps.releaseschedule.models import ReleaseSchedule
def backend_url(viewname, *args):
return 'http://testserver' + reverse(viewname, args=args)
class ReleaseScheduleAPITestCase(APITestCase):
fixtures = [
'pdc/apps/releaseschedule/fixtures/tests/release.json',
'pdc/apps/releaseschedule/fixtures/tests/sla.json',
'pdc/apps/releaseschedule/fixtures/tests/releaseschedule.json',
]
def test_create(self):
url = reverse('releaseschedule-list')
data = {
'release': 'test-release-0.1',
'sla': 'bug_fixes',
'date': '2017-01-01',
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
expected = {
'active': False,
'date': '2017-01-01',
'release': 'test-release-0.1',
'sla': 'bug_fixes',
'release_url': backend_url('release-detail', 'test-release-0.1'),
'sla_url': backend_url('sla-detail', 2),
'id': 2,
}
self.assertEqual(response.data, expected)
def test_create_duplicate(self):
# This release schedule already exists.
url = reverse('releaseschedule-list')
data = {
'release': 'test-release-0.1',
'sla': 'development',
'date': '2017-01-01',
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_patch(self):
url = reverse('releaseschedule-detail', args=[1])
changes = [
('date', '2018-01-01'),
('release', 'test-release-0.2'),
('sla', 'bug_fixes'),
]
for change in changes:
response = self.client.patch(url, dict([change]), format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['id'], 1)
self.assertEqual(response.data[change[0]], change[1])
def test_get(self):
url = reverse('releaseschedule-list')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
self.assertEqual(response.data['results'][0], {
'active': False,
'date': '2017-01-01',
'release': 'test-release-0.1',
'sla': 'development',
'release_url': backend_url('release-detail', 'test-release-0.1'),
'sla_url': backend_url('sla-detail', 1),
'id': 1,
})
def test_get_filter(self):
url = reverse('releaseschedule-list')
# Define some dates
today = datetime.utcnow().date()
tomorrow = today + timedelta(days=1)
day_after = today + timedelta(days=2)
yesterday = today - timedelta(days=1)
# Create test data
release_1 = Release.objects.get(pk=1)
release_2 = Release.objects.get(pk=2)
sla_dev = SLA.objects.get(pk=1)
sla_bug = SLA.objects.get(pk=2)
sla_sec = SLA.objects.get(pk=3)
expired_schedule_1 = ReleaseSchedule.objects.get(pk=1)
active_schedule_1 = ReleaseSchedule.objects.create(
release=release_1, sla=sla_bug, date=tomorrow)
future_schedule_1 = ReleaseSchedule.objects.create(
release=release_1, sla=sla_sec, date=day_after)
expired_schedule_2 = ReleaseSchedule.objects.create(
release=release_2, sla=sla_dev, date=yesterday)
active_schedule_2 = ReleaseSchedule.objects.create(
release=release_2, sla=sla_bug, date=tomorrow)
future_schedule_2 = ReleaseSchedule.objects.create(
release=release_2, sla=sla_sec, date=day_after)
# Assert that we get all release schedules by default.
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 6)
# Filter on release
response = self.client.get("{}?ordering=id&release=test-release-0.1".format(url))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 3)
self.assertEqual(
[result["id"] for result in response.data['results']],
[expired_schedule_1.id, active_schedule_1.id, future_schedule_1.id]
)
# Filter on sla
response = self.client.get("{}?ordering=id&sla=bug_fixes".format(url))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 2)
self.assertEqual(
[result["id"] for result in response.data['results']],
[active_schedule_1.id, active_schedule_2.id])
# Filter on active state
response = self.client.get("{}?ordering=id&active=1".format(url))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 4)
self.assertEqual(
[result["id"] for result in response.data['results']],
[
active_schedule_1.id, future_schedule_1.id,
active_schedule_2.id, future_schedule_2.id,
]
)
# Filter on date
response = self.client.get(
"{}?ordering=id&date_after={}".format(url, tomorrow.isoformat()))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 4)
self.assertEqual(
[result["id"] for result in response.data['results']],
[active_schedule_1.id, future_schedule_1.id,
active_schedule_2.id, future_schedule_2.id]
)
response = self.client.get(
"{}?ordering=id&date_before={}".format(url, tomorrow.isoformat()))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 4)
self.assertEqual(
[
result["id"] for result in response.data['results']
],
[
expired_schedule_1.id, active_schedule_1.id,
expired_schedule_2.id, active_schedule_2.id,
]
)
def test_use_case_current_releases(self):
# Get the current releases, defined by "releases that have an active
# `bug_fixes` SLA".
url = reverse('releaseschedule-list')
# Define some dates
today = datetime.utcnow().date()
tomorrow = today + timedelta(days=1)
yesterday = today - timedelta(days=1)
# Create test data
release_1 = Release.objects.get(pk=1)
release_2 = Release.objects.get(pk=2)
release_3 = Release.objects.get(pk=3)
sla_dev = SLA.objects.get(pk=1)
sla_bug = SLA.objects.get(pk=2)
# Release 1 is old
ReleaseSchedule.objects.get(pk=1)
ReleaseSchedule.objects.create(
release=release_1, sla=sla_bug, date=yesterday)
# Release 2 is current
ReleaseSchedule.objects.create(
release=release_2, sla=sla_dev, date=yesterday)
ReleaseSchedule.objects.create(
release=release_2, sla=sla_bug, date=tomorrow)
# Release 3 is still in dev
ReleaseSchedule.objects.create(
release=release_3, sla=sla_dev, date=tomorrow)
# Assert that we get all release schedules by default.
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 5)
# Filter current releases
response = self.client.get("{}?active=1&sla=bug_fixes".format(url))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
self.assertEqual(
response.data['results'][0]["release"], "test-release-0.2")
def test_use_case_default_slas(self):
# Get the default SLAs for a release.
url = reverse('releaseschedule-list')
# Define some dates
day1 = date(2018, 1, 1)
day2 = date(2019, 1, 1)
day3 = date(2020, 1, 1)
# Create test data
release = Release.objects.get(pk=1)
sla_bug = SLA.objects.get(pk=2)
sla_sec = SLA.objects.get(pk=3)
sla_api = SLA.objects.create(name="stable_api")
ReleaseSchedule.objects.create(
release=release, sla=sla_bug, date=day1)
ReleaseSchedule.objects.create(
release=release, sla=sla_sec, date=day2)
ReleaseSchedule.objects.create(
release=release, sla=sla_api, date=day3)
# Assert that we get all release schedules by default.
response = self.client.get(
"{}?ordering=date&release=test-release-0.1".format(url))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 4)
self.assertEqual(
[
(result["sla"], result["date"])
for result in response.data['results']
], [
("development", "2017-01-01"),
("bug_fixes", "2018-01-01"),
("security_fixes", "2019-01-01"),
("stable_api", "2020-01-01")
])
class ReleaseScheduleModelTestCase(APITestCase):
fixtures = [
'pdc/apps/releaseschedule/fixtures/tests/release.json',
'pdc/apps/releaseschedule/fixtures/tests/sla.json',
]
def test_active(self):
today = datetime.utcnow().date()
release = Release.objects.get(pk=1)
sla_dev = SLA.objects.get(pk=1)
sla_bug = SLA.objects.get(pk=2)
expired_schedule = ReleaseSchedule.objects.create(
release=release, sla=sla_dev, date=(today - timedelta(days=1))
)
active_schedule = ReleaseSchedule.objects.create(
release=release, sla=sla_bug, date=(today + timedelta(days=1))
)
self.assertFalse(expired_schedule.active)
self.assertTrue(active_schedule.active)
| |
#! /usr/bin/env python
########################################################################
# SimpleFIX
# Copyright (C) 2017-2022, David Arnold.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
########################################################################
"""FIX protocol constants."""
import sys
if sys.version_info[0] == 2:
EQUALS_BYTE = b'='
SOH_BYTE = b'\x01'
SOH_STR = SOH_BYTE
else:
EQUALS_BYTE = 61
SOH_BYTE = 1
SOH_STR = b'\x01'
# Tag 1
TAG_ACCOUNT = b'1'
# Tag 2
TAG_ADVID = b'2'
# Tag 3
TAG_ADVREFID = b'3'
# Tag 4
TAG_ADVSIDE = b'4'
ADVSIDE_BUY = b'B'
ADVSIDE_CROSS = b'X'
ADVSIDE_SELL = b'S'
ADVSIDE_TRADE = b'T'
# Tag 5
TAG_ADVTRANSTYPE = b'5'
ADVTRANSTYPE_CANCEL = b'C'
ADVTRANSTYPE_NEW = b'N'
ADVTRANSTYPE_REPLACE = b'R'
# Tag 6
TAG_AVGPX = b'6'
# Tag 7
TAG_BEGINSEQNO = b'7'
# Tag 8
TAG_BEGINSTRING = b'8'
# Tag 9
TAG_BODYLENGTH = b'9'
# Tag 10
TAG_CHECKSUM = b'10'
# Tag 11
TAG_CLORDID = b'11'
# Tag 12
TAG_COMMISSION = b'12'
# Tag 13
TAG_COMMTYPE = b'13'
COMMTYPE_PER_UNIT = b'1'
COMMTYPE_PERCENT = b'2'
COMMTYPE_ABSOLUTE = b'3'
COMMTYPE_PERCENT_WAIVED_CASH = b'4'
COMMTYPE_PERCENT_WAIVED_ENHANCED = b'5'
COMMTYPE_POINTS = b'6'
# Tag 14
TAG_CUMQTY = b'14'
# Tag 15
TAG_CURRENCY = b'15'
CURRENCY_AFGHANI = b'AFA'
CURRENCY_ALGERIAN_DINAR = b'DZD'
CURRENCY_ANDORRAN_PESETA = b'ADP'
CURRENCY_ARGENTINE_PESO = b'ARS'
CURRENCY_ARMENIAN_DRAM = b'AMD'
CURRENCY_ARUBAN_GUILDER = b'AWG'
CURRENCY_AUSTRALIAN_DOLLAR = b'AUD'
CURRENCY_AZERBAIJANIAN_MANAT = b'AZM'
CURRENCY_BAHAMIAN_DOLLAR = b'BSD'
# FIXME: many, many, more.
# Tag 16
TAG_ENDSEQNO = b'16'
# Tag 17
TAG_EXECID = b'17'
# Tag 18
TAG_EXECINST = b'18'
EXECINST_NOT_HELD = b'1'
EXECINST_WORK = b'2'
EXECINST_GO_ALONG = b'3'
EXECINST_OVER_THE_DAY = b'4'
EXECINST_HELD = b'5'
EXECINST_PARTICIPATE_DONT_INITIATE = b'6'
EXECINST_STRICT_SCALE = b'7'
EXECINST_TRY_TO_SCALE = b'8'
EXECINST_STAY_ON_BID_SIDE = b'9'
EXECINST_STAY_ON_OFFER_SIDE = b'0'
EXECINST_NO_CROSS = b'A'
EXECINST_OK_TO_CROSS = b'B'
EXECINST_CALL_FIRST = b'C'
EXECINST_PERCENT_OF_VOLUME = b'D'
EXECINST_DO_NOT_INCREASE = b'E'
EXECINST_DO_NOT_REDUCE = b'F'
EXECINST_ALL_OR_NONE = b'G'
EXECINST_REINSTATE_ON_SYSTEM_FAILURE = b'H'
EXECINST_INSTITUTIONS_ONLY = b'I'
EXECINST_REINSTATE_ON_TRADING_HALT = b'J'
EXECINST_CANCEL_ON_TRADING_HALT = b'K'
EXECINST_LAST_PEG = b'L'
EXECINST_MID_PRICE_PEG = b'M'
EXECINST_NON_NEGOTIABLE = b'N'
EXECINST_OPENING_PEG = b'O'
EXECINST_MARKET_PEG = b'P'
EXECINST_CANCEL_ON_SYSTEM_FAILURE = b'Q'
EXECINST_PRIMARY_PEG = b'R'
EXECINST_SUSPEND = b'S'
EXECINST_CUSTOMER_DISPLAY_INSTRUCTION = b'U'
EXECINST_NETTING = b'V'
EXECINST_PEG_TO_VWAP = b'W'
EXECINST_TRADE_ALONG = b'X'
EXECINST_TRY_TO_STOP = b'Y'
EXECINST_CANCEL_IF_NOT_BEST = b'Z'
EXECINST_TRAILING_STOP_PEG = b'a'
EXECINST_STRICT_LIMIT = b'b'
EXECINST_IGNORE_PRICE_VALIDITY_CHECKS = b'c'
EXECINST_PEG_TO_LIMIT_PRICE = b'd'
EXECINST_WORK_TO_TARGET_STRATEGY = b'e'
# Tag 19
TAG_EXECREFID = b'19'
# Tag 20
TAG_EXECTRANSTYPE = b'20'
EXECTRANSTYPE_NEW = b'0'
EXECTRANSTYPE_CANCEL = b'1'
EXECTRANSTYPE_CORRECT = b'2'
EXECTRANSTYPE_STATUS = b'3'
# Tag 21
TAG_HANDLINST = b'21'
HANDLINST_AUTO_PRIVATE = b'1'
HANDLINST_AUTO_PUBLIC = b'2'
HANDLINST_MANUAL = b'3'
# Tag 22
TAG_SECURITYIDSOURCE = b'22'
SECURITYIDSOURCE_CUSIP = b'1'
SECURITYIDSOURCE_SEDOL = b'2'
SECURITYIDSOURCE_QUIK = b'3'
SECURITYIDSOURCE_ISIN = b'4'
SECURITYIDSOURCE_RIC = b'5'
SECURITYIDSOURCE_ISO_CURRENCY = b'6'
SECURITYIDSOURCE_ISO_COUNTRY = b'7'
SECURITYIDSOURCE_EXCHANGE = b'8'
SECURITYIDSOURCE_CTA = b'9'
SECURITYIDSOURCE_BLOOMBERG = b'A'
SECURITYIDSOURCE_WERTPAPIER = b'B'
SECURITYIDSOURCE_BUTCH = b'C'
SECURITYIDSOURCE_VALOREN = b'D'
SECURITYIDSOURCE_SICOVAM = b'E'
SECURITYIDSOURCE_BELGIAN = b'F'
SECURITYIDSOURCE_COMMON = b'G'
SECURITYIDSOURCE_CLEARING = b'H'
SECURITYIDSOURCE_IDSA = b'I'
SECURITYIDSOURCE_OPRA = b'J'
SECURITYIDSOURCE_IDSA_URL = b'K'
SECURITYIDSOURCE_LETTER_OF_CREDIT = b'L'
SECURITYIDSOURCE_MARKETPLACE = b'M'
SECURITYIDSOURCE_MARKIT_RED_ENTITY_CLIP = b'N'
SECURITYIDSOURCE_MARKIT_RED_PAIR_CLIP = b'P'
SECURITYIDSOURCE_CFTC = b'Q'
SECURITYIDSOURCE_IDSA_COMMODITY_REFERENCE_PRICE = b'R'
SECURITYIDSOURCE_FINANCIAL_INSTRUMENT_GLOBAL_IDENTIFIER = b'S'
SECURITYIDSOURCE_LEGAL_ENTITY_IDENTIFIER = b'T'
SECURITYIDSOURCE_SYNTHETIC = b'U'
SECURITYIDSOURCE_FIM = b'V'
SECURITYIDSOURCE_INDEX_NAME = b'W'
# Tag 23
TAG_IOIID = b'23'
# Tag 24
TAG_IOIOTHSVC = b'24'
IOIOTHSVC_AUTEX = b'A'
IOIOTHSVC_BRIDGE = b'B'
IOIOTHSVC_AUTEX_AND_BRIDGE = b'AB'
IOIOTHSVC_BRIDGE_AND_AUTEX = b'BA'
# Tag 25
TAG_IOIQLTYIND = b'25'
# this is to avoid breaking code using the old typo
TAG_IOIQlTYIND = TAG_IOIQLTYIND
IOIQLTYIND_HIGH = b'H'
IOIQLTYIND_MEDIUM = b'M'
IOIQLTYIND_LOW = b'L'
# Tag 26
TAG_IOIREFID = b'26'
# Tag 27
TAG_IOIQTY = b'27'
IOIQTY_SMALL = b'S'
IOIQTY_MEDIUM = b'M'
IOIQTY_LARGE = b'L'
IOIQTY_UNDISCLOSED = b'U'
# Tag 28
TAG_IOITRANSTYPE = b'28'
IOITRANSTYPE_NEW = b'N'
IOITRANSTYPE_CANCEL = b'C'
IOITRANSTYPE_REPLACE = b'R'
# Tag 29
TAG_LASTCAPACITY = b'29'
LASTCAPACITY_AGENT = b'1'
LASTCAPACITY_CROSS_AS_AGENT = b'2'
LASTCAPACITY_CROSS_AS_PRINCIPAL = b'3'
LASTCAPACITY_PRINCIPAL = b'4'
LASTCAPACITY_RISKLESS_PRINCIPAL = b'5'
# Tag 30
TAG_LASTMKT = b'30'
LASTMKT_BLOOMBERG_TRADEBOOK = b'31'
LASTMKT_BONDBOOK = b'32'
LASTMKT_BONDCLICK = b'35'
LASTMKT_BONDHUB = b'36'
LASTMKT_LIMITRADER = b'37'
LASTMKT_MARKETAXESS = b'33'
LASTMKT_MUNICENTER = b'34'
LASTMKT_NONE = b'0'
LASTMKT_OTC = b'11'
LASTMKT_NYFIX_MILLENIUM = b'13'
LASTMKT_NYSE_BBSS = b'10'
LASTMKT_POSIT = b'4'
LASTMKT_STOCKHOLM_OPTIONS_MARKET = b'17'
LASTMKT_VANCOUVER_OPTIONS_EXCHANGE = b'9'
LASTMKT_VISIBLE_MARKETS = b'38'
LASTMKT_TRADEWEB = b'30'
LASTMKT_ARCHIPELAGO = b'39'
LASTMKT_ATTAIN = b'40'
LASTMKT_BRUT = b'41'
LASTMKT_GLOBENET = b'42'
LASTMKT_INSTINET = b'43'
LASTMKT_ISLAND = b'44'
LASTMKT_MARKETXT = b'45'
LASTMKT_NEXTRADE = b'46'
LASTMKT_REDIBOOK = b'47'
LASTMKT_NQLX = b'49'
LASTMKT_ONECHICAGO = b'50'
LASTMKT_TRACK_DATA = b'51'
LASTMKT_TRACK_TRAC = b'52'
LASTMKT_PIPELINE = b'53'
LASTMKT_BATS = b'54'
LASTMKT_BIDS = b'55'
LASTMKT_DIRECT_EDGE_X = b'56'
LASTMKT_DIRECT_EDGE = b'57'
LASTMKT_LEVELATS = b'58'
LASTMKT_LAVA_TRADING = b'59'
LASTMKT_BOSTON_OPTIONS_EXCHANGE = b'60'
LASTMKT_NATIONAL_STOCK_EXCHANGE = b'61'
LASTMKT_LIQUIDNET = b'62'
LASTMKT_NYFIX_EURO_MILLENIUM = b'63'
LASTMKT_NASDAQ_OPTIONS_MARKET = b'64'
LASTMKT_BLOCKCROSS_ATS = b'66'
LASTMKT_MATCH_ATS = b'67'
LASTMKT_ATHENS_STOCK_EXCHANGE_REUTERS = b'AT'
LASTMKT_ATHENS_STOCK_EXCHANGE_MARKET = b'ASE'
LASTMKT_LATIBEX = b'LA'
LASTMKT_MADRID_STOCK_EXCHANGE = b'MC'
LASTMKT_OCCIDENTS_STOCK_EXCHANGE = b'OD'
LASTMKT_SBI_STOCK_EXCHANGE = b'SBI'
LASTMKT_DOHA_SECURITIES_MARKET = b'DSMD'
LASTMKT_INTERCONTINENTAL_EXCHANGE = b'IEPA'
LASTMKT_PINKSHEETS = b'PINX'
LASTMKT_THE_THIRD_MARKET_CORPORATION = b'THRD'
LASTMKT_TRADEWEB_LLC = b'TRWB'
# Tag 31
TAG_LASTPX = b'31'
# Tag 32
TAG_LASTQTY = b'32'
# Tag 33
TAG_NOLINESOFTEXT = b'33'
# Tag 34
TAG_MSGSEQNUM = b'34'
# Tag 35
TAG_MSGTYPE = b'35'
MSGTYPE_HEARTBEAT = b'0'
MSGTYPE_TEST_REQUEST = b'1'
MSGTYPE_RESEND_REQUEST = b'2'
MSGTYPE_REJECT = b'3'
MSGTYPE_SEQUENCE_RESET = b'4'
MSGTYPE_LOGOUT = b'5'
MSGTYPE_INDICATION_OF_INTEREST = b'6'
MSGTYPE_ADVERTISEMENT = b'7'
MSGTYPE_EXECUTION_REPORT = b'8'
MSGTYPE_ORDER_CANCEL_REJECT = b'9'
MSGTYPE_LOGON = b'A'
MSGTYPE_NEWS = b'B'
MSGTYPE_EMAIL = b'C'
MSGTYPE_NEW_ORDER_SINGLE = b'D'
MSGTYPE_NEW_ORDER_LIST = b'E'
MSGTYPE_ORDER_CANCEL_REQUEST = b'F'
MSGTYPE_ORDER_CANCEL_REPLACE_REQUEST = b'G'
MSGTYPE_ORDER_STATUS_REQUEST = b'H'
MSGTYPE_ALLOCATION = b'J'
MSGTYPE_LIST_CANCEL_REQUEST = b'K'
MSGTYPE_LIST_EXECUTE = b'L'
MSGTYPE_LIST_STATUS_REQUEST = b'M'
MSGTYPE_LIST_STATUS = b'N'
MSGTYPE_ALLOCATION_ACK = b'P'
MSGTYPE_DONT_KNOW_TRADE = b'Q'
MSGTYPE_QUOTE_REQUEST = b'R'
MSGTYPE_QUOTE = b'S'
MSGTYPE_SETTLEMENT_INSTRUCTIONS = b'T'
MSGTYPE_MARKET_DATA_REQUEST = b'V'
MSGTYPE_MARKET_DATA_SNAPSHOT_FULL_REFRESH = b'W'
MSGTYPE_MARKET_DATA_INCREMENTAL_REFRESH = b'X'
MSGTYPE_MARKET_DATA_REQUEST_REJECT = b'Y'
MSGTYPE_QUOTE_CANCEL = b'Z'
MSGTYPE_QUOTE_STATUS_REQUEST = b'a'
MSGTYPE_QUOTE_ACKNOWLEDGEMENT = b'b'
MSGTYPE_SECURITY_DEFINITION_REQUEST = b'c'
MSGTYPE_SECURITY_DEFINITION = b'd'
MSGTYPE_SECURITY_STATUS_REQUEST = b'e'
MSGTYPE_SECURITY_STATUS = b'f'
MSGTYPE_TRADING_SESSION_STATUS_REQUEST = b'g'
MSGTYPE_TRADING_SESSION_STATUS = b'h'
MSGTYPE_MASS_QUOTE = b'i'
MSGTYPE_BUSINESS_MESSAGE_REJECT = b'j'
MSGTYPE_BID_REQUEST = b'k'
MSGTYPE_BID_RESPONSE = b'l'
MSGTYPE_LIST_STRIKE_PRICE = b'm'
MSGTYPE_XML_MESSAGE = b'n'
MSGTYPE_REGISTRATION_INSTRUCTIONS = b'o'
MSGTYPE_REGISTRATION_INSTRUCTIONS_RESPONSE = b'p'
MSGTYPE_ORDER_MASS_CANCEL_REQUEST = b'q'
MSGTYPE_ORDER_MASS_CANCEL_REPORT = b'r'
MSGTYPE_NEW_ORDER_CROSS = b's'
MSGTYPE_CROSS_ORDER_CANCEL_REPLACE_REQUEST = b't'
MSGTYPE_CROSS_ORDER_CANCEL_REQUEST = b'u'
MSGTYPE_SECURITY_TYPE_REQUEST = b'v'
MSGTYPE_SECURITY_TYPES = b'w'
MSGTYPE_SECURITY_LIST_REQUEST = b'x'
MSGTYPE_SECURITY_LIST = b'y'
MSGTYPE_DERIVATIVE_SECURITY_LIST_REQUEST = b'z'
MSGTYPE_DERIVATIVE_SECURITY_LIST = b'AA'
MSGTYPE_NEW_ORDER_MULTILEG = b'AB'
MSGTYPE_MULTILEG_ORDER_CANCEL_REPLACE_REQUEST = b'AC'
MSGTYPE_TRADE_CAPTURE_REPORT_REQUEST = b'AD'
MSGTYPE_TRADE_CAPTURE_REPORT = b'AE'
MSGTYPE_ORDER_MASS_STATUS_REQUEST = b'AF'
MSGTYPE_QUOTE_REQUEST_REJECT = b'AG'
MSGTYPE_RFQ_REQUEST = b'AH'
MSGTYPE_QUOTE_STATUS_REPORT = b'AI'
MSGTYPE_QUOTE_RESPONSE = b'AJ'
MSGTYPE_CONFIRMATION = b'AK'
MSGTYPE_POSITION_MAINTENANCE_REQUEST = b'AL'
MSGTYPE_POSITION_MAINTENANCE_REPORT = b'AM'
MSGTYPE_REQUEST_FOR_POSITIONS = b'AN'
MSGTYPE_REQUEST_FOR_POSITIONS_ACK = b'AO'
MSGTYPE_POSITION_REPORT = b'AP'
MSGTYPE_TRADE_CAPTURE_REPORT_REQUEST_ACK = b'AQ'
MSGTYPE_TRADE_CAPTURE_REPORT_ACK = b'AR'
MSGTYPE_ALLOCATION_REPORT = b'AS'
MSGTYPE_ALLOCATION_REPORT_ACK = b'AT'
MSGTYPE_CONFIRMATION_ACK = b'AU'
MSGTYPE_SETTLEMENT_INSTRUCTION_REQUEST = b'AV'
MSGTYPE_ASSIGNMENT_REPORT = b'AW'
MSGTYPE_COLLATERAL_REQUEST = b'AX'
MSGTYPE_COLLATERAL_ASSIGNMENT = b'AY'
MSGTYPE_COLLATERAL_RESPONSE = b'AZ'
MSGTYPE_COLLATERAL_REPORT = b'BA'
MSGTYPE_COLLATERAL_INQUIRY = b'BB'
MSGTYPE_NETWORK_STATUS_REQUEST = b'BC'
MSGTYPE_NETWORK_STATUS_RESPONSE = b'BD'
MSGTYPE_USER_REQUEST = b'BE'
MSGTYPE_USER_RESPONSE = b'BF'
MSGTYPE_COLLATERAL_INQUIRY_ACK = b'BG'
MSGTYPE_CONFIRMATION_REQUEST = b'BH'
# Tag 36
TAG_NEWSEQNO = b'36'
# Tag 37
TAG_ORDERID = b'37'
# Tag 38
TAG_ORDERQTY = b'38'
# Tag 39
TAG_ORDSTATUS = b'39'
ORDSTATUS_NEW = b'0'
ORDSTATUS_PARTIALLY_FILLED = b'1'
ORDSTATUS_FILLED = b'2'
ORDSTATUS_DONE_FOR_DAY = b'3'
ORDSTATUS_CANCELED = b'4'
ORDSTATUS_REPLACED = b'5'
ORDSTATUS_PENDING_CANCEL = b'6'
ORDSTATUS_STOPPED = b'7'
ORDSTATUS_REJECTED = b'8'
ORDSTATUS_SUSPENDED = b'9'
ORDSTATUS_PENDING_NEW = b'A'
ORDSTATUS_CALCULATED = b'B'
ORDSTATUS_EXPIRED = b'C'
ORDSTATUS_ACCEPTED_FOR_BIDDING = b'D'
ORDSTATUS_PENDING_REPLACE = b'E'
# Tag 40
TAG_ORDTYPE = b'40'
ORDTYPE_MARKET = b'1'
ORDTYPE_LIMIT = b'2'
ORDTYPE_STOP = b'3'
ORDTYPE_STOP_LIMIT = b'4'
ORDTYPE_MARKET_ON_CLOSE = b'5'
ORDTYPE_WITH_OR_WITHOUT = b'6'
ORDTYPE_LIMIT_OR_BETTER = b'7'
ORDTYPE_LIMIT_WITH_OR_WITHOUT = b'8'
ORDTYPE_ON_BASIS = b'9'
ORDTYPE_ON_CLOSE = b'A'
ORDTYPE_LIMIT_ON_CLOSE = b'B'
ORDTYPE_FOREX_MARKET = b'C'
ORDTYPE_PREVIOUSLY_QUOTED = b'D'
ORDTYPE_PREVIOUSLY_INDICATED = b'E'
ORDTYPE_FOREX_LIMIT = b'F'
ORDTYPE_FOREX_SWAP = b'G'
ORDTYPE_FOREX_PREVIOUSLY_QUOTED = b'H'
ORDTYPE_FUNARI = b'I'
ORDTYPE_MARKET_IF_TOUCHED = b'J'
ORDTYPE_MARKET_WITH_LEFTOVER_AS_LIMIT = b'K'
ORDTYPE_PREVIOUS_FUND_VALUATION_POINT = b'L'
ORDTYPE_NEXT_FUND_VALUATION_POINT = b'M'
ORDTYPE_PEGGED = b'P'
ORDTYPE_COUNTER_ORDER_SELECTION = b'Q'
ORDTYPE_STOP_ON_BID_OR_OFFER = b'R'
ORDTYPE_STOP_LIMIT_ON_BID_OR_OFFER = b'S'
# Tag 41
TAG_ORIGCLORDID = b'41'
# Tag 42
TAG_ORIGTIME = b'42'
# Tag 43
TAG_POSSDUPFLAG = b'43'
POSSDUPFLAG_NO = b'N'
POSSDUPFLAG_YES = b'Y'
# Tag 44
TAG_PRICE = b'44'
# Tag 45
TAG_REFSEQNUM = b'45'
# Tag 46
TAG_RELATDSYM = b'46'
# Tag 47
TAG_RULE80A = b'47'
RULE80A_AGENCY_SINGLE_ORDER = b'A'
RULE80A_ALL_OTHER_ORDERS_AS_AGENT_FOR_OTHER_MEMBER = b'W'
RULE80A_COMPETING_DEALER_TRADES = b'T'
RULE80A_INDIVIDUAL_INVESTOR_SINGLE_ORDER = b'I'
RULE80A_PRINCIPAL = b'P'
RULE80A_PROGRAM_ORDER_INDEX_ARB_FOR_INDIVIDUAL_CUSTOMER = b'J'
RULE80A_PROGRAM_ORDER_INDEX_ARB_FOR_MEMBER_FIRM_ORG = b'D'
RULE80A_PROGRAM_ORDER_INDEX_ARB_FOR_OTHER_AGENCY = b'U'
RULE80A_PROGRAM_ORDER_INDEX_ARB_FOR_OTHER_MEMBER = b'M'
RULE80A_PROGRAM_ORDER_NON_INDEX_ARB_FOR_INDIVIDUAL_CUSTOMER = b'K'
RULE80A_PROGRAM_ORDER_NON_INDEX_ARB_FOR_MEMBER_FIRM_ORG = b'C'
RULE80A_PROGRAM_ORDER_NON_INDEX_ARB_FOR_OTHER_AGENCY = b'Y'
RULE80A_PROGRAM_ORDER_NON_INDEX_ARB_FOR_OTHER_MEMBER = b'N'
RULE80A_ROPRIETARY_TRANSACTIONS_FOR_COMPETING_MARKET_MAKER_THAT_IS_AFFILIATED_WITH_THE_CLEARING_MEMBER = b'O'
RULE80A_SHORT_EXEMPT_TRANSACTION_B = b'B'
RULE80A_SHORT_EXEMPT_TRANSACTION_F = b'F'
RULE80A_SHORT_EXEMPT_TRANSACTION_FOR_MEMBER_COMPETING_MARKET_MAKER_AFFILIATED_WITH_THE_FIRM_CLEARING_THE_TRADE = b'L'
RULE80A_SHORT_EXEMPT_TRANSACTION_FOR_MEMBER_COMPETING_MARKET_MAKER_NOT_AFFILIATED_WITH_THE_FIRM_CLEARING_THE_THREAD = b'X'
RULE80A_SHORT_EXEMPT_TRANSACTION_FOR_NON_MEMBER_COMPETING_MARKET_MAKER = b'Z'
RULE80A_SHORT_EXEMPT_TRANSACTION_FOR_PRINCIPAL = b'E'
RULE80A_SHORT_EXEMPT_TRANSACTION_H = b'H'
RULE80A_SPECIALIST_TRADES = b'S'
RULE80A_TRANSACTIONS_FOR_THE_ACCOUNT_OF_A_NON_MEMBER_COMPETING_MARKET_MAKER = b'R'
# Tag 48
TAG_SECURITY_ID = b'48'
# Tag 49
TAG_SENDER_COMPID = b'49'
# Tag 50
TAG_SENDER_SUBID = b'50'
# Tag 51
TAG_SENDING_DATE = b'51'
# Tag 52
TAG_SENDING_TIME = b'52'
# Tag 53
TAG_QUANTITY = b'53'
# Tag 54
TAG_SIDE = b'54'
SIDE_BUY = b'1'
SIDE_SELL = b'2'
SIDE_BUY_MINUS = b'3'
SIDE_SELL_PLUS = b'4'
SIDE_SELL_SHORT = b'5'
SIDE_SELL_SHORT_EXEMPT = b'6'
SIDE_UNDISCLOSED = b'7'
SIDE_CROSS = b'8'
SIDE_CROSS_SHORT = b'9'
SIDE_CROSS_SHORT_EXEMPT = b'A'
SIDE_AS_DEFINED = b'B'
SIDE_OPPOSITE = b'C'
SIDE_SUBSCRIBE = b'D'
SIDE_REDEEM = b'E'
SIDE_LEND = b'F'
SIDE_BORROW = b'G'
SIDE_SELL_UNDISCLOSED = b'H'
# Tag 55
TAG_SYMBOL = b'55'
# Tag 56
TAG_TARGET_COMPID = b'56'
# Tag 57
TAG_TARGET_SUBID = b'57'
# Tag 58
TAG_TEXT = b'58'
# Tag 59
TAG_TIMEINFORCE = b'59'
TIMEINFORCE_DAY = b'0'
TIMEINFORCE_GOOD_TILL_CANCEL = b'1'
TIMEINFORCE_AT_THE_OPENING = b'2'
TIMEINFORCE_IMMEDIATE_OR_CANCEL = b'3'
TIMEINFORCE_FILL_OR_KILL = b'4'
TIMEINFORCE_GOOD_TILL_CROSSING = b'5'
TIMEINFORCE_GOOD_TILL_DATE = b'6'
TIMEINFORCE_AT_THE_CLOSE = b'7'
TIMEINFORCE_GOOD_THROUGH_CROSSING = b'8'
TIMEINFORCE_AT_CROSSING = b'9'
TIMEINFORCE_GOOD_FOR_TIME = b'A'
TIMEINFORCE_GOOD_FOR_AUCTION = b'B'
# Tag 60
TAG_TRANSACTTIME = b'60'
# Tag 61
TAG_URGENCY = b'61'
URGENCY_NORMAL = b'0'
URGENCY_FLASH = b'1'
URGENCY_BACKGROUND = b'2'
# Tag 62
TAG_VALIDUNTILTIME = b'62'
# Tag 63
TAG_SETTLTYPE = b'63'
SETTLTYPE_REGULAR = b'0'
SETTLTYPE_CASH = b'1'
SETTLTYPE_NEXT_DAY = b'2'
SETTLTYPE_T2 = b'3'
SETTLTYPE_T3 = b'4'
SETTLTYPE_T4 = b'5'
SETTLTYPE_FUTURE = b'6'
SETTLTYPE_WHEN_AND_IF_ISSUED = b'7'
SETTLTYPE_SELLERS_OPTION = b'8'
SETTLTYPE_T5 = b'9'
SETTLTYPE_T1 = b'A'
SETTLTYPE_BROKEN_DATE = b'B'
SETTLTYPE_SPOT1 = b'C'
# Tag 64
TAG_SETTLDATE = b'64'
# Tag 65
TAG_SYMBOLSFX = b'65'
# Tag 66
TAG_LISTID = b'66'
# Tag 67
TAG_LISTSEQNO = b'67'
# Tag 68
TAG_TOTNOORDERS = b'68'
# Tag 69
TAG_LISTEXECINST = b'69'
# Tag 70
TAG_ALLOCID = b'70'
# Tag 71
TAG_ALLOCTRANSTYPE = b'71'
ALLOCTRANSTYPE_NEW = b'0'
ALLOCTRANSTYPE_REPLACE = b'1'
ALLOCTRANSTYPE_CANCEL = b'2'
ALLOCTRANSTYPE_PRELIMINARY = b'3'
ALLOCTRANSTYPE_CALCULATED = b'4'
ALLOCTRANSTYPE_CALCULATED_WITHOUT_PRELIMINARY = b'5'
ALLOCTRANSTYPE_REVERSAL = b'6'
# Tag 72
TAG_REFALLOCID = b'72'
# Tag 73
TAG_NOORDERS = b'73'
# Tag 74
TAG_AVGPRXPRECISION = b'74'
# Tag 75
TAG_TRADEDATE = b'75'
# Tag 76
TAG_EXECBROKER = b'76'
# Tag 77
TAG_OPENCLOSE = b'77'
OPENCLOSE_OPEN = b'O'
OPENCLOSE_CLOSER = b'C'
# Tag 78
TAG_NOALLOCS = b'78'
# Tag 79
TAG_ALLOCACCOUNT = b'79'
# Tag 80
TAG_ALLOCSHARES = b'80'
# Tag 81
TAG_PROCESSCODE = b'81'
PROCESSCODE_REGULAR = b'0'
PROCESSCODE_SOFT_DOLLAR = b'1'
PROCESSCODE_STEP_IN = b'2'
PROCESSCODE_STEP_OUT = b'3'
PROCESSCODE_SOFT_DOLLAR_STEP_IN = b'4'
PROCESSCODE_SOFT_DOLLAR_STEP_OUT = b'5'
PROCESSCODE_PLAN_SPONSOR = b'6'
# Tag 82
TAG_NORPTS = b'82'
# Tag 83
TAG_RPTSEQ = b'83'
# Tag 84
TAG_CXLQTY = b'84'
# Tag 85
# Tag 86
# Tag 87
TAG_ALLOCSTATUS = b'87'
ALLOCSTATUS_ACCEPTED = b'0'
ALLOCSTATUS_REJECTED = b'1'
ALLOCSTATUS_PARTIAL_ACCEPT = b'2'
ALLOCSTATUS_RECEIVED = b'3'
# Tag 88
TAG_ALLOCREJCODE = b'88'
ALLOCREJCODE_UNKNOWN_ACCOUNT = b'0'
ALLOCREJCODE_INCORRECT_QUANTITY = b'1'
ALLOCREJCODE_INCORRECT_AVERAGE_PRICE = b'2'
ALLOCREJCODE_UNKNOWN_EXECUTING_BROKER_MNEMONIC = b'3'
ALLOCREJCODE_COMMISSION_DIFFERENCE = b'4'
ALLOCREJCODE_UNKNOWN_ORDERID = b'5'
ALLOCREJCODE_UNKNOWN_LISTID = b'6'
ALLOCREJCODE_OTHER = b'7'
# Tag 89
TAG_SIGNATURE = b'89'
# Tag 90
TAG_SECUREDATALEN = b'90'
# Tag 91
TAG_SECUREDATA = b'91'
# Tag 92
TAG_BROKEROFCREDIT = b'92'
# Tag 93
TAG_SIGNATURELENGTH = b'93'
# Tag 94
TAG_EMAILTYPE = b'94'
EMAILTYPE_NEW = b'0'
EMAILTYPE_REPLY = b'1'
EMAILTYPE_ADMIN_REPLY = b'2'
# Tag 95
TAG_RAWDATALENGTH = b'95'
# Tag 96
TAG_RAWDATA = b'96'
# Tag 97
TAG_POSSRESEND = b'97'
# Tag 98
TAG_ENCRYPTMETHOD = b'98'
ENCRYPTMETHOD_NONE = b'0'
ENCRYPTMETHOD_PKCS = b'1'
ENCRYPTMETHOD_DES = b'2'
ENCRYPTMETHOD_PKCS_DES = b'3'
ENCRYPTMETHOD_PGP_DES = b'4'
ENCRYPTMETHOD_PGP_DES_MD5 = b'5'
ENCRYPTMETHOD_PEM_DES_MD5 = b'6'
# Tag 99
TAG_STOPPX = b'99'
# TAg 100
TAG_EXDESTINATION = b'100'
# Tag 101
# Tag 102
TAG_CXLREJREASON = b'102'
CXLREJREASON_TOO_LATE_TO_CANCEL = b'0'
CXLREJREASON_UNKNOWN_ORDER = b'1'
CXLREJREASON_BROKER_OPTION = b'2'
CXLREJREASON_ORDER_ALREADY_PENDING_CANCEL = b'3'
# Tag 103
TAG_ORDERREJREASON = b'103'
ORDERREJREASON_BROKER_OPTION = b'0'
ORDERREJREASON_UNKNOWN_SYMBOL = b'1'
ORDERREJREASON_EXCHANGE_CLOSED = b'2'
ORDERREJREASON_ORDER_EXCEEDS_LIMIT = b'3'
ORDERREJREASON_TOO_LATE_TO_ENTER = b'4'
ORDERREJREASON_UNKNOWN_ORDER = b'5'
ORDERREJREASON_DUPLICATE_ORDER = b'6'
ORDERREJREASON_DUPLICATE_OF_VERBALLY_COMMUNICATED_ORDER = b'7'
ORDERREJREASON_STALE_ORDER = b'8'
# Tag 104
TAG_IOIQUALIFIER = b'104'
IOIQUALIFIER_ALL_OR_NONE = b'A'
IOIQUALIFIER_AT_THE_CLOSE = b'C'
IOIQUALIFIER_IN_TOUCH_WITH = b'I'
IOIQUALIFIER_LIMIT = b'L'
IOIQUALIFIER_MORE_BEHIND = b'M'
IOIQUALIFIER_AT_THE_OPEN = b'O'
IOIQUALIFIER_TAKING_A_POSITION = b'P'
IOIQUALIFIER_AT_THE_MARKET = b'Q'
IOIQUALIFIER_PORTFOLIO_SHOWN = b'S'
IOIQUALIFIER_THROUGH_THE_DAY = b'T'
IOIQUALIFIER_VERSUS = b'V'
IOIQUALIFIER_INDICATION_WORKING_AWAY = b'W'
IOIQUALIFIER_CROSSING_OPPORTUNITY = b'X'
IOIQUALIFIER_AT_THE_MIDPOINT = b'Y'
IOIQUALIFIER_PRE_OPEN = b'Z'
# Tag 105
TAG_WAVENO = b'105'
# Tag 106
TAG_ISSUER = b'106'
# Tag 107
TAG_SECURITYDESC = b'107'
# Tag 108
TAG_HEARTBTINT = b'108'
# Tag 109
TAG_CLIENTID = b'109'
# Tag 110
TAG_MINQTY = b'110'
# Tag 111
TAG_MAXFLOOR = b'111'
# Tag 112
TAG_TESTREQID = b'112'
# Tag 113
TAG_REPORTTOEXCH = b'113'
# Tag 114
TAG_LOCATEREQD = b'114'
# Tag 115
TAG_ONBEHALFOFCOMPID = b'115'
# Tag 116
TAG_ONBEHALFOFSUBID = b'116'
# Tag 117
TAG_QUOTEID = b'117'
# Tag 118
TAG_NETMONEY = b'118'
# Tag 119
TAG_SETTLCURRAMT = b'119'
# Tag 120
TAG_SETTLCURRENCY = b'120'
# Tag 122
TAG_ORIGSENDINGTIME = b'122'
# Tag 123
TAG_GAPFILLFLAG = b'123'
GAPFILLFLAG_NO = b'N'
GAPFILLFLAG_YES = b'Y'
# Tag 131
TAG_QUOTEREQID = b'131'
# Tag 132
TAG_BIDPX = b'132'
# Tag 133
TAG_ASKBX = b'133'
# Tag 141
TAG_RESETSEQNUMFLAG = b'141'
RESETSEQNUMFLAG_NO = b'N'
RESETSEQNUMFLAG_YES = b'Y'
# Tag 150
TAG_EXECTYPE = b'150'
EXECTYPE_NEW = b'0'
EXECTYPE_PARTIAL_FILL = b'1'
EXECTYPE_FILL = b'2'
EXECTYPE_DONE_FOR_DAY = b'3'
EXECTYPE_CANCELED = b'4'
EXECTYPE_REPLACE = b'5'
EXECTYPE_PENDING_CANCEL = b'6'
EXECTYPE_STOPPED = b'7'
EXECTYPE_REJECTED = b'8'
EXECTYPE_SUSPENDED = b'9'
EXECTYPE_PENDING_NEW = b'A'
EXECTYPE_CALCULATED = b'B'
EXECTYPE_EXPIRED = b'C'
EXECTYPE_RESTATED = b'D'
EXECTYPE_PENDING_REPLACE = b'E'
# Tag 151
TAG_LEAVESQTY = b'151'
# Tag 152
TAG_CASHORDERQTY = b'152'
# Tag 167
TAG_SECURITYTYPE = b'167'
SECURITYTYPE_WILDCARD_ENTRY = b'?'
SECURITYTYPE_BANKERS_ACCEPTANCE = b'BA'
SECURITYTYPE_CONVERTIBLE_BOND = b'CB'
SECURITYTYPE_CERTIFICATE_OF_DEPOSIT = b'CD'
SECURITYTYPE_COLLATERALIZE_MORTGAGE_OBLIGATION = b'CMO'
SECURITYTYPE_CORPORATE_BOND = b'CORP'
SECURITYTYPE_COMMERCIAL_PAPER = b'CP'
SECURITYTYPE_CORPORATE_PRIVATE_PLACEMENT = b'CPP'
SECURITYTYPE_COMMON_STOCK = b'CS'
SECURITYTYPE_FEDERAL_HOUSING_AUTHORITY = b'FHA'
SECURITYTYPE_FEDERAL_HOME_LOAN = b'FHL'
SECURITYTYPE_FEDERAL_NATIONAL_MORTGAGE_ASSOCIATION = b'FN'
SECURITYTYPE_FOREIGN_EXCHANGE_CONTRACT = b'FOR'
SECURITYTYPE_FUTURE = b'FUT'
SECURITYTYPE_GOVERNMENT_NATIONAL_MORTGAGE_ASSOCIATION = b'GN'
SECURITYTYPE_TREASURIES_PLUS_AGENCY_DEBENTURE = b'GOVT'
SECURITYTYPE_MORTGAGE_IOETTE = b'IET'
SECURITYTYPE_MUTUAL_FUND = b'MF'
SECURITYTYPE_MORTGAGE_INTEREST_ONLY = b'MIO'
SECURITYTYPE_MORTGAGE_PRINCIPAL_ONLY = b'MPO'
SECURITYTYPE_MORTGAGE_PRIVATE_PLACEMENT = b'MPP'
SECURITYTYPE_MISCELLANEOUS_PASS_THRU = b'MPT'
SECURITYTYPE_MUNICIPAL_BOND = b'MUNI'
SECURITYTYPE_NO_ISITC_SECURITY_TYPE = b'NONE'
SECURITYTYPE_OPTION = b'OPT'
SECURITYTYPE_PREFERRED_STOCK = b'PS'
SECURITYTYPE_REPURCHASE_AGREEMENT = b'RP'
SECURITYTYPE_REVERSE_REPURCHASE_AGREEMENT = b'RVRP'
SECURITYTYPE_STUDENT_LOAN_MARKETING_ASSOCIATION = b'SL'
SECURITYTYPE_TIME_DEPOSIT = b'TD'
SECURITYTYPE_US_TREASURY_BILL = b'USTB'
SECURITYTYPE_WARRANT = b'WAR'
SECURITYTYPE_CATS_TIGERS_LIONS = b'ZOO'
# Tag 297
TAG_QUOTESTATUS = b'297'
QUOTESTATUS_ACCEPTED = b'0'
QUOTESTATUS_CANCELED_FOR_SYMBOL = b'1'
QUOTESTATUS_CANCELED_FOR_SECURITY_TYPE = b'2'
QUOTESTATUS_CANCELED_FOR_UNDERLYING = b'3'
QUOTESTATUS_CANCELED_ALL = b'4'
QUOTESTATUS_REJECTED = b'5'
QUOTESTATUS_REMOVED_FROM_MARKET = b'6'
QUOTESTATUS_EXPIRED = b'7'
QUOTESTATUS_QUERY = b'8'
QUOTESTATUS_QUOTE_NOT_FOUND = b'9'
QUOTESTATUS_PENDING = b'10'
QUOTESTATUS_PASS = b'11'
QUOTESTATUS_LOCKED_MARKET_WARNING = b'12'
QUOTESTATUS_CROSS_MARKET_WARNING = b'13'
QUOTESTATUS_CANCELED_DUE_TO_LOCK_MARKET = b'14'
QUOTESTATUS_CANCELED_DUE_TO_CROSS_MARKET = b'15'
# Tag 373
TAG_SESSIONREJECTREASON = b'373'
SESSIONREJECTREASON_INVALID_TAG_NUMBER = b'0'
SESSIONREJECTREASON_REQUIRED_TAG_MISSING = b'1'
SESSIONREJECTREASON_TAG_NOT_DEFINED_FOR_THIS_MESSAGE_TYPE = b'2'
SESSIONREJECTREASON_UNDEFINED_TAG = b'3'
SESSIONREJECTREASON_TAG_SPECIFIED_WITHOUT_A_VALUE = b'4'
SESSIONREJECTREASON_VALUE_INCORRECT_FOR_THIS_TAG = b'5'
SESSIONREJECTREASON_INCORRECT_DATA_FORMAT_FOR_VALUE = b'6'
SESSIONREJECTREASON_DECRYPTION_PROBLEM = b'7'
SESSIONREJECTREASON_SIGNATURE_PROBLEM = b'8'
SESSIONREJECTREASON_COMPID_PROBLEM = b'9'
SESSIONREJECTREASON_SENDINGTIME_ACCURACY_PROBLEM = b'10'
SESSIONREJECTREASON_INVALID_MSGTYPE = b'11'
SESSIONREJECTREASON_XML_VALIDATION_ERROR = b'12'
SESSIONREJECTREASON_TAG_APPEARS_MORE_THAN_ONCE = b'13'
SESSIONREJECTREASON_TAG_SPECIFIED_OUT_OF_REQUIRED_ORDER = b'14'
SESSIONREJECTREASON_REPEATING_GROUP_FIELDS_OUT_OF_ORDER = b'15'
SESSIONREJECTREASON_INCORRECT_NUMINGROUP_COUNT = b'16'
SESSIONREJECTREASON_NON_DATA_VALUE_INCLUDES_FIELD_DELIMITER = b'17'
SESSIONREJECTREASON_OTHER = b'99'
# Tag 423
TAG_PRICETYPE = b'423'
PRICETYPE_PERCENTAGE = b'1'
PRICETYPE_PER_UNIT = b'2'
PRICETYPE_FIXED_AMOUNT = b'3'
# Tag 434
TAG_CXLREJRESPONSETO = b'434'
CXLREJRESPONSETO_ORDER_CANCEL_REQUEST = b'1'
CXLREJRESPONSETO_ORDER_CANCEL_REPLACE_REQUEST = b'2'
# Tag 658
TAG_QUOTEREQUESTREJECTREASON = b'658'
QUOTEREQUESTREJECTREASON_UNKNOWN_SYMBOL = b'1'
QUOTEREQUESTREJECTREASON_EXCHANGE = b'2'
QUOTEREQUESTREJECTREASON_QUOTE_REQUEST_EXCEEDS_LIMIT = b'3'
QUOTEREQUESTREJECTREASON_TOO_LATE_TO_ENTER = b'4'
QUOTEREQUESTREJECTREASON_INVALID_PRICE = b'5'
QUOTEREQUESTREJECTREASON_NOT_AUTHORIZED_TO_REQUEST_QUOTE = b'6'
QUOTEREQUESTREJECTREASON_NO_MATCH_FOR_INQUIRY = b'7'
QUOTEREQUESTREJECTREASON_NO_MARKET_FOR_INSTRUMENT = b'8'
QUOTEREQUESTREJECTREASON_NO_INVENTORY = b'9'
QUOTEREQUESTREJECTREASON_PASS = b'10'
QUOTEREQUESTREJECTREASON_OTHER = b'99'
# Tag 693
TAG_QUOTERESPID = b'693'
# Tag 2759
TAG_GROUPAMOUNT = b'2759'
# Tag 2760
TAG_GROUP_REMAINING_AMOUNT = b'2760'
# Tag 2761
TAG_ALLOCGROUPAMOUNT = b'2761'
| |
__author__ = 'Zhang Shaojun'
import struct
import cfg
import msg_proto
from cfg import LOG
"""
# scapy form of message
# allign to 64 bits
class TFLCPHeader(Packet):
name = "tflc header"
fields_desc = [
XByteField("version", cfg.TFLC_VERSION_1),
ByteEnumField("type", msg_proto.TFLCT_HELLO_UP, msg_proto.tflcp_type),
ShortField("length", msg_proto.TFLCP_HEADER_SIZE),
IntField("xid", 1)
]
# HELLO_UP --> up
class TFLCPHelloUp(Packet):
name = "tflc hello up"
fields_desc = []
bind_layers(TFLCPHeader, TFLCPHelloUp, type=msg_proto.TFLCT_HELLO_UP)
# HELLO_DOWN --> down
class TFLCPHelloDown(Packet):
name = "tflc hello down"
fields_desc = [
BitField('lcid', None, 32),
IntField('pad', 0)
]
bind_layers(TFLCPHeader, TFLCPHelloDown, type=msg_proto.TFLCT_HELLO_DOWN)
# DATAPATH_CONNECTED --> up
class TFLCPDPConnected(Packet):
name = "tflc datapath connected"
fields_desc = [
BitField('dpid', None, 64)
]
bind_layers(TFLCPHeader, TFLCPDPConnected, type=msg_proto.TFLCT_DATAPATH_CONNECTED)
# ROLE_ASSIGN --> down
class TFLCPRoleAssign(Packet):
name = "tflc role assign"
fields_desc = [
BitField('dpid', None, 64),
BitField('lcid', None, 32),
IntEnumField('role', msg_proto.OFPCR_ROLE_EQUAL, msg_proto.ofp_controller_role),
BitField('gid', 0, 64)
]
bind_layers(TFLCPHeader, TFLCPRoleAssign, type=msg_proto.TFLCT_ROLE_ASSIGN)
# GID_REQUEST --> up
class TFLCPGidRequest(Packet):
name = "tflc gid request"
fields_desc = [
BitField('dpid', None, 64)
]
bind_layers(TFLCPHeader, TFLCPGidRequest, type=msg_proto.TFLCT_GID_REQUEST)
# GID_REPLY --> down
class TFLCPGidReply(Packet):
name = "tflc gid reply"
fields_desc = [
BitField('dpid', None, 64),
BitField('gid', 0, 64)
]
bind_layers(TFLCPHeader, TFLCPGidReply, type=msg_proto.TFLCT_GID_REPLY)
# PACKET_IN --> up
class TFLCPPackerIn(Packet):
name = "tflc packet in"
fields_desc = [
BitField('in_dpid', None, 64),
ShortField('total_len', 0),
ShortEnumField('reason', msg_proto.TFLCR_PIN_NO_MATCH, msg_proto.tflc_packet_in_reason),
IntField('pad', 0)
]
bind_layers(TFLCPHeader, TFLCPPackerIn, type=msg_proto.TFLCT_PACKET_IN)
# FLOW_MOD --> down
class TFLCPFlowMod(Packet):
name = "tflc flow mod"
fields_desc = []
bind_layers(TFLCPHeader, TFLCPFlowMod, type=msg_proto.TFLCT_FLOW_MOD)
# LOAD_REPORT --> up
class TFLCPLoadReport(Packet):
name = "tflc load report"
fields_desc = [
BitField('dpid', None, 64),
BitField('pkt_in_cnt', 0, 64)
]
bind_layers(TFLCPHeader, TFLCPLoadReport, type=msg_proto.TFLCT_LOAD_REPORT)
# DATAPATH_MIGRATION --> down
class TFLCPDpMigration(Packet):
name = "tflc datapath migration"
fields_desc = [
BitField('src_lcid', None, 32),
BitField('dst_lcid', None, 32),
BitField('m_dpid', None, 64)
]
bind_layers(TFLCPHeader, TFLCPDpMigration, type=msg_proto.TFLCT_DATAPATH_MIGRATION)
# CONTRL_POOL_CHANGE --> down
class TFLCPCtrlPoolChange(Packet):
name = "tflc controller pool change"
fields_desc = []
bind_layers(TFLCPHeader, TFLCPCtrlPoolChange, type=msg_proto.TFLCT_CONTRL_POOL_CHANGE)
# ROLE_NOTIFY --> up
class TFLCPRoleNotify(Packet):
name = "tflc role notify"
fields_desc = [
BitField('dpid', None, 64),
BitField('lcid', None, 32),
IntEnumField('role', msg_proto.OFPCR_ROLE_EQUAL, msg_proto.ofp_controller_role)
]
bind_layers(TFLCPHeader, TFLCPRoleNotify, type=msg_proto.TFLCT_ROLE_NOTIFY)
# ECHO_REQUEST --> down
class TFLCPEchoRequest(Packet):
name = "tflc echo request"
fields_desc = [
BitField('time_stamp', 0, 64)
]
bind_layers(TFLCPHeader, TFLCPEchoRequest, type=msg_proto.TFLCT_ECHO_REQUEST)
# ECHO_REPLY --> down
class TFLCPEchoReply(Packet):
name = "tflc echo reply"
fields_desc = [
BitField('time_stamp', 0, 64)
]
bind_layers(TFLCPHeader, TFLCPEchoReply, type=msg_proto.TFLCT_ECHO_REPLY)
# ERROR --> up
class TFLCPError(Packet):
name = "tflc error"
fields_desc = [
IntEnumField('type', 0, msg_proto.tflc_error_type),
IntField('code', 0)
]
"""
# downlink message base class
class MsgDownBase(object):
def __init__(self, local_ctrl):
super(MsgDownBase, self).__init__()
self.local_ctrl = local_ctrl
self.version = None
self.msg_type = None
self.msg_len = None
self.xid = None
self.buf = None
def set_xid(self, xid):
self.xid = xid
def _serialize_pre(self):
self.version = cfg.TFLC_VERSION_1
self.buf = bytearray(msg_proto.TFLCP_HEADER_SIZE)
def _serialize_header(self):
self.msg_len = len(self.buf)
if self.xid is None:
self.xid = 0
struct.pack_into(msg_proto.TFLCP_HEADER_PACK_STR, self.buf, 0,
self.version, self.msg_type, self.msg_len, self.xid)
def _serialize_body(self):
pass
def serialize(self):
self._serialize_pre()
self._serialize_body()
self._serialize_header()
#uplink message base class
class MsgUpBase(object):
def __init__(self, local_ctrl, version = None, msg_type = None, msg_len = None, xid = None, buf = None):
super(MsgUpBase, self).__init__()
self.local_ctrl = local_ctrl
self.version = version
self.msg_type = msg_type
self.msg_len = msg_len
self.xid = xid
self.buf = buf
# pack function
def msg_pack_into(fmt, buf, offset, *args):
if len(buf) < offset:
buf += bytearray(offset - len(buf))
if len(buf) == offset:
buf += struct.pack(fmt, *args)
return
needed_len = offset + struct.calcsize(fmt)
if len(buf) < needed_len:
buf += bytearray(needed_len - len(buf))
struct.pack_into(fmt, buf, offset, *args)
# header parser
def header(buf):
assert len(buf) >= msg_proto.TFLCP_HEADER_SIZE
return struct.unpack_from(msg_proto.TFLCP_HEADER_PACK_STR, buffer(buf))
# the controller address for controller pool change
class TFLCPLocalCtrlAddr(object):
def __init__(self, type, ip_addr, port):
super(TFLCPLocalCtrlAddr, self).__init__()
self.type = type
self.ip_addr = ip_addr
self.port = port
# messages
# allign to 64 bit?
# HELLO_UP --> up
class TFLCPHelloUp(MsgUpBase):
def __init__(self, local_ctrl):
super(TFLCPHelloUp, self).__init__(local_ctrl)
# HELLO_DOWN --> down
class TFLCPHelloDown(MsgDownBase):
def __init__(self, local_ctrl, lcid = None):
super(TFLCPHelloDown, self).__init__(local_ctrl)
self.msg_type = msg_proto.TFLCT_HELLO_DOWN
self.lcid = lcid
def _serialize_body(self):
msg_pack_into(msg_proto.TFLCP_HELLO_DOWN_PACK_STR, self.buf, msg_proto.TFLCP_HEADER_SIZE, self.lcid)
# DATAPATH_CONNECTED --> up
class TFLCPDPConnected(MsgUpBase):
def __init__(self, local_ctrl, dpid = None, is_window = False):
super(TFLCPDPConnected, self).__init__(local_ctrl)
self.dpid = dpid
self.is_window = is_window
# ROLE_ASSIGN --> down
class TFLCPRoleAssign(MsgDownBase):
def __init__(self, local_ctrl, dpid = None, lcid = None, role = None, gid = None):
super(TFLCPRoleAssign, self).__init__(local_ctrl)
self.msg_type = msg_proto.TFLCT_ROLE_ASSIGN
self.dpid = dpid
self.lcid = lcid
self.role = role
self.gid = gid
def _serialize_body(self):
msg_pack_into(msg_proto.TFLCP_ROLE_ASSIGN_PACK_STR, self.buf, msg_proto.TFLCP_HEADER_SIZE,
self.dpid, self.lcid, self.role, self.gid)
# GID_REQUEST --> up
class TFLCPGidRequest(MsgUpBase):
def __init__(self, local_ctrl, dpid = None):
super(TFLCPGidRequest, self).__init__(local_ctrl)
self.dpid = dpid
# GID_REPLY --> down
class TFLCPGidReply(MsgDownBase):
def __init__(self, local_ctrl, dpid = None, gid = None):
super(TFLCPGidReply, self).__init__(local_ctrl)
self.msg_type = msg_proto.TFLCT_GID_REPLY
self.dpid = dpid
self.gid = gid
def _serialize_body(self):
msg_pack_into(msg_proto.TFLCP_GID_REPLY_PACK_STR, self.buf, msg_proto.TFLCP_HEADER_SIZE, self.dpid, self.gid)
# PACKET_IN --> up
"""
class TFLCPPackerIn(MsgUpBase):
def __init__(self, local_ctrl, in_dpid = None, total_len = None, reason = None, package = None):
super(TFLCPPackerIn, self).__init__(local_ctrl)
self.in_dpid = in_dpid
self.total_len = total_len
self.reason = reason
self.package = package
"""
# currently only the second layer is considered.
# so the Packet in msg only contains the in_dpid/src mac/dst mac
class TFLCPPacketIn(MsgUpBase):
def __init__(self, local_ctrl, in_dpid = None, src_mac = None, dst_mac = None):
super(TFLCPPacketIn, self).__init__(local_ctrl)
self.in_dpid = in_dpid
self.src_mac = src_mac # 48 bits
self.dst_mac = dst_mac # 48 bits
# FLOW_MOD --> down
# flow_mod = header/wildcards/match/flow_mod/action
# simplified flow_mod only considers the second layer
# so only src/dst mac and the out_dpid is included
class TFLCPFlowMod(MsgDownBase):
def __init__(self, local_ctrl, in_dpid = None, out_dpid = None, dst_mac = None, wildcards = None):
super(TFLCPFlowMod, self).__init__(local_ctrl)
self.msg_type = msg_proto.TFLCT_FLOW_MOD
self.in_dpid = in_dpid
self.out_dpid = out_dpid
self.dst_mac = dst_mac
self.wildcards = wildcards
def _serialize_body(self):
msg_pack_into(msg_proto.TFLCP_FLOW_MOD_PACK_STR, self.buf, msg_proto.TFLCP_HEADER_SIZE,
self.in_dpid, self.out_dpid, self.dst_mac, self.wildcards)
# LOAD_REPORT --> up
class TFLCPLoadReport(MsgUpBase):
def __init__(self, local_ctrl, dpid = None, pkt_in_cnt = None):
super(TFLCPLoadReport, self).__init__(local_ctrl)
self.dpid = dpid
self.pkt_in_cnt = pkt_in_cnt
# DATAPATH_MIGRATION --> down
class TFLCPDpMigration(MsgDownBase):
def __init__(self, local_ctrl, src_lcid = None, dst_lcid = None, m_dpid = None):
super(TFLCPDpMigration, self).__init__(local_ctrl)
self.msg_type = msg_proto.TFLCT_DATAPATH_MIGRATION
self.src_lcid = src_lcid
self.dst_lcid = dst_lcid
self.m_dpid = m_dpid
def _serialize_body(self):
msg_pack_into(msg_proto.TFLCP_DATAPATH_CONNECTED_PACK_STR, self.buf, msg_proto.TFLCP_HEADER_SIZE,
self.src_lcid, self.dst_lcid, self.m_dpid)
# CONTRL_POOL_CHANGE --> down
class TFLCPCtrlPoolChange(MsgDownBase):
def __init__(self, local_ctrl, dpid = None, lc_cnt = None, lc_list = None):
super(TFLCPCtrlPoolChange, self).__init__(local_ctrl)
self.msg_type = msg_proto.TFLCT_CONTRL_POOL_CHANGE
self.dpid = dpid
self.lc_cnt = lc_cnt
self.lc_list = lc_list
def _serialize_body(self):
msg_pack_into(msg_proto.TFLCP_CONTRL_POOL_CHANGE_PACK_STR, self.buf, msg_proto.TFLCP_HEADER_SIZE,
self.dpid, self.lc_cnt)
for lc in self.lc_list:
self.buf += struct.pack(msg_proto.TFLCP_LOCAL_CTRL_ADDRESS_PACK_STR, lc)
# ROLE_NOTIFY --> up
class TFLCPRoleNotify(MsgUpBase):
def __init__(self, local_ctrl, dpid = None, lcid = None, role = None):
super(TFLCPRoleNotify, self).__init__(local_ctrl)
self.dpid = dpid
self.lcid = lcid
self.role = role
# ECHO_REQUEST --> down
class TFLCPEchoRequest(MsgDownBase):
def __init__(self, local_ctrl, timestamp = None):
super(TFLCPEchoRequest, self).__init__(local_ctrl)
self.msg_type = msg_proto.TFLCT_ECHO_REQUEST
self.timestamp = timestamp
def _serialize_body(self):
msg_pack_into(msg_proto.TFLCP_ECHO_REQUEST_PACK_STR, self.buf, msg_proto.TFLCP_HEADER_SIZE, self.timestamp)
# ECHO_REPLY --> up
class TFLCPEchoReply(MsgUpBase):
def __init__(self, local_ctrl, timestamp = None):
super(TFLCPEchoReply, self).__init__(local_ctrl)
self.timestamp = timestamp
# ERROR --> up
class TFLCPError(MsgUpBase):
def __init__(self, local_ctrl, type = None, code = None, data = None):
super(TFLCPError, self).__init__(local_ctrl)
self.type = type
self.code = code
self.data = data
# BARRIER_REQUEST --> down
class TFLCPBarrierRequest(MsgDownBase):
def __init__(self, local_ctrl):
super(TFLCPBarrierRequest, self).__init__(local_ctrl)
self.msg_type = msg_proto.TFLCT_BARRIER_REQUEST
# BARRIER_REPLY --> up
class TFLCPBarrierReply(MsgUpBase):
def __init__(self, local_ctrl):
super(TFLCPBarrierReply, self).__init__(local_ctrl)
# HOST_CONNECTED --> up
class TFLCPHostConnected(MsgUpBase):
def __init__(self, local_ctrl, dpid = None, mac = None):
super(TFLCPHostConnected, self).__init__(local_ctrl)
self.dpid = dpid
self.mac = mac
# DATAPATH_LEAVE --> up
class TFLCPDatapathLeave(MsgUpBase):
def __init__(self, local_ctrl, dpid = None):
super(TFLCPDatapathLeave, self).__init__(local_ctrl)
self.dpid = dpid
# HOST_LEAVE --> up
class TFLCPHostLeave(MsgUpBase):
def __init__(self, cc_agent, dpid = None, mac = None):
super(TFLCPHostLeave, self).__init__(cc_agent)
self.msg_type = msg_proto.TFLCT_HOST_LEAVE
self.dpid = dpid
self.mac = mac
#************************************************************************************************************
# class TFLCPSimpleFlowMod(MsgUpBase):
# def __init__(self, cc_agent, dst_ip=None, dst_mask=None, dst_mac=None):
# super(TFLCPSimpleFlowMod, self).__init__(cc_agent)
# self.msg_type = msg_proto.TFLCT_SIMPLE_FLOW_MOD
# self.dst_ip = dst_ip # Q - 8 bytes
# self.dst_mask = dst_mask # I - 4 bytes
# # 3x - 3 bytes
# self.dst_mac = dst_mac # 17s - 17 bytes
# # print "TFLCPSimpleFlowMod initialized: ", self.in_dpid, self.out_dpid
# def _serialize_body(self):
# msg_pack_into(msg_proto.TFLCP_SIMPLE_FLOW_MOD_PACK_STR,
# self.buf, msg_proto.TFLCP_HEADER_SIZE,
# self.dst_ip,
# self.dst_mask,
# self.dst_mac)
class TFLCPSimpleFlowMod(MsgUpBase):
def __init__(self, cc_agent, dst_ip=None, dst_mask=None, dst_mac=None):
super(TFLCPSimpleFlowMod, self).__init__(cc_agent)
self.msg_type = msg_proto.TFLCT_SIMPLE_FLOW_MOD
self.dst_ip = dst_ip # 18s - 18 bytes
# 5x - 5 bytes
self.dst_mac = dst_mac # 17s - 17 bytes
# 40 bytes
# print "TFLCPSimpleFlowMod initialized: ", self.in_dpid, self.out_dpid
def _serialize_body(self):
msg_pack_into(msg_proto.TFLCP_SIMPLE_FLOW_MOD_PACK_STR,
self.buf, msg_proto.TFLCP_HEADER_SIZE,
self.dst_ip,
self.dst_mac)
#************************************************************************************************************
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
from __future__ import annotations
import logging
from datetime import datetime, timedelta
from typing import Any, Dict, List, NamedTuple, Optional, TYPE_CHECKING
from flask_babel import gettext as _
from pandas import DataFrame
from superset.common.chart_data import ChartDataResultType
from superset.exceptions import (
QueryClauseValidationException,
QueryObjectValidationError,
)
from superset.sql_parse import validate_filter_clause
from superset.typing import Column, Metric, OrderBy
from superset.utils import pandas_postprocessing
from superset.utils.core import (
DTTM_ALIAS,
find_duplicates,
get_column_names,
get_metric_names,
is_adhoc_metric,
json_int_dttm_ser,
QueryObjectFilterClause,
)
from superset.utils.date_parser import parse_human_timedelta
from superset.utils.hashing import md5_sha_from_dict
if TYPE_CHECKING:
from superset.connectors.base.models import BaseDatasource
logger = logging.getLogger(__name__)
# TODO: Type Metrics dictionary with TypedDict when it becomes a vanilla python type
# https://github.com/python/mypy/issues/5288
class DeprecatedField(NamedTuple):
old_name: str
new_name: str
DEPRECATED_FIELDS = (
DeprecatedField(old_name="granularity_sqla", new_name="granularity"),
DeprecatedField(old_name="groupby", new_name="columns"),
DeprecatedField(old_name="timeseries_limit", new_name="series_limit"),
DeprecatedField(old_name="timeseries_limit_metric", new_name="series_limit_metric"),
)
DEPRECATED_EXTRAS_FIELDS = (
DeprecatedField(old_name="where", new_name="where"),
DeprecatedField(old_name="having", new_name="having"),
DeprecatedField(old_name="having_filters", new_name="having_druid"),
DeprecatedField(old_name="druid_time_origin", new_name="druid_time_origin"),
)
class QueryObject: # pylint: disable=too-many-instance-attributes
"""
The query object's schema matches the interfaces of DB connectors like sqla
and druid. The query objects are constructed on the client.
"""
annotation_layers: List[Dict[str, Any]]
applied_time_extras: Dict[str, str]
apply_fetch_values_predicate: bool
columns: List[Column]
datasource: Optional[BaseDatasource]
extras: Dict[str, Any]
filter: List[QueryObjectFilterClause]
from_dttm: Optional[datetime]
granularity: Optional[str]
inner_from_dttm: Optional[datetime]
inner_to_dttm: Optional[datetime]
is_rowcount: bool
is_timeseries: bool
metrics: Optional[List[Metric]]
order_desc: bool
orderby: List[OrderBy]
post_processing: List[Dict[str, Any]]
result_type: Optional[ChartDataResultType]
row_limit: int
row_offset: int
series_columns: List[Column]
series_limit: int
series_limit_metric: Optional[Metric]
time_offsets: List[str]
time_shift: Optional[timedelta]
time_range: Optional[str]
to_dttm: Optional[datetime]
def __init__( # pylint: disable=too-many-locals
self,
*,
annotation_layers: Optional[List[Dict[str, Any]]] = None,
applied_time_extras: Optional[Dict[str, str]] = None,
apply_fetch_values_predicate: bool = False,
columns: Optional[List[Column]] = None,
datasource: Optional[BaseDatasource] = None,
extras: Optional[Dict[str, Any]] = None,
filters: Optional[List[QueryObjectFilterClause]] = None,
granularity: Optional[str] = None,
is_rowcount: bool = False,
is_timeseries: Optional[bool] = None,
metrics: Optional[List[Metric]] = None,
order_desc: bool = True,
orderby: Optional[List[OrderBy]] = None,
post_processing: Optional[List[Optional[Dict[str, Any]]]] = None,
row_limit: int,
row_offset: Optional[int] = None,
series_columns: Optional[List[Column]] = None,
series_limit: int = 0,
series_limit_metric: Optional[Metric] = None,
time_range: Optional[str] = None,
time_shift: Optional[str] = None,
**kwargs: Any,
):
self._set_annotation_layers(annotation_layers)
self.applied_time_extras = applied_time_extras or {}
self.apply_fetch_values_predicate = apply_fetch_values_predicate or False
self.columns = columns or []
self.datasource = datasource
self.extras = extras or {}
self.filter = filters or []
self.granularity = granularity
self.is_rowcount = is_rowcount
self._set_is_timeseries(is_timeseries)
self._set_metrics(metrics)
self.order_desc = order_desc
self.orderby = orderby or []
self._set_post_processing(post_processing)
self.row_limit = row_limit
self.row_offset = row_offset or 0
self._init_series_columns(series_columns, metrics, is_timeseries)
self.series_limit = series_limit
self.series_limit_metric = series_limit_metric
self.time_range = time_range
self.time_shift = parse_human_timedelta(time_shift)
self.from_dttm = kwargs.get("from_dttm")
self.to_dttm = kwargs.get("to_dttm")
self.result_type = kwargs.get("result_type")
self.time_offsets = kwargs.get("time_offsets", [])
self.inner_from_dttm = kwargs.get("inner_from_dttm")
self.inner_to_dttm = kwargs.get("inner_to_dttm")
self._rename_deprecated_fields(kwargs)
self._move_deprecated_extra_fields(kwargs)
def _set_annotation_layers(
self, annotation_layers: Optional[List[Dict[str, Any]]]
) -> None:
self.annotation_layers = [
layer
for layer in (annotation_layers or [])
# formula annotations don't affect the payload, hence can be dropped
if layer["annotationType"] != "FORMULA"
]
def _set_is_timeseries(self, is_timeseries: Optional[bool]) -> None:
# is_timeseries is True if time column is in either columns or groupby
# (both are dimensions)
self.is_timeseries = (
is_timeseries if is_timeseries is not None else DTTM_ALIAS in self.columns
)
def _set_metrics(self, metrics: Optional[List[Metric]] = None) -> None:
# Support metric reference/definition in the format of
# 1. 'metric_name' - name of predefined metric
# 2. { label: 'label_name' } - legacy format for a predefined metric
# 3. { expressionType: 'SIMPLE' | 'SQL', ... } - adhoc metric
def is_str_or_adhoc(metric: Metric) -> bool:
return isinstance(metric, str) or is_adhoc_metric(metric)
self.metrics = metrics and [
x if is_str_or_adhoc(x) else x["label"] for x in metrics # type: ignore
]
def _set_post_processing(
self, post_processing: Optional[List[Optional[Dict[str, Any]]]]
) -> None:
post_processing = post_processing or []
self.post_processing = [post_proc for post_proc in post_processing if post_proc]
def _init_series_columns(
self,
series_columns: Optional[List[Column]],
metrics: Optional[List[Metric]],
is_timeseries: Optional[bool],
) -> None:
if series_columns:
self.series_columns = series_columns
elif is_timeseries and metrics:
self.series_columns = self.columns
else:
self.series_columns = []
def _rename_deprecated_fields(self, kwargs: Dict[str, Any]) -> None:
# rename deprecated fields
for field in DEPRECATED_FIELDS:
if field.old_name in kwargs:
logger.warning(
"The field `%s` is deprecated, please use `%s` instead.",
field.old_name,
field.new_name,
)
value = kwargs[field.old_name]
if value:
if hasattr(self, field.new_name):
logger.warning(
"The field `%s` is already populated, "
"replacing value with contents from `%s`.",
field.new_name,
field.old_name,
)
setattr(self, field.new_name, value)
def _move_deprecated_extra_fields(self, kwargs: Dict[str, Any]) -> None:
# move deprecated extras fields to extras
for field in DEPRECATED_EXTRAS_FIELDS:
if field.old_name in kwargs:
logger.warning(
"The field `%s` is deprecated and should "
"be passed to `extras` via the `%s` property.",
field.old_name,
field.new_name,
)
value = kwargs[field.old_name]
if value:
if hasattr(self.extras, field.new_name):
logger.warning(
"The field `%s` is already populated in "
"`extras`, replacing value with contents "
"from `%s`.",
field.new_name,
field.old_name,
)
self.extras[field.new_name] = value
@property
def metric_names(self) -> List[str]:
"""Return metrics names (labels), coerce adhoc metrics to strings."""
return get_metric_names(self.metrics or [])
@property
def column_names(self) -> List[str]:
"""Return column names (labels). Gives priority to groupbys if both groupbys
and metrics are non-empty, otherwise returns column labels."""
return get_column_names(self.columns)
def validate(
self, raise_exceptions: Optional[bool] = True
) -> Optional[QueryObjectValidationError]:
"""Validate query object"""
try:
self._validate_there_are_no_missing_series()
self._validate_no_have_duplicate_labels()
self._validate_filters()
return None
except QueryObjectValidationError as ex:
if raise_exceptions:
raise ex
return ex
def _validate_no_have_duplicate_labels(self) -> None:
all_labels = self.metric_names + self.column_names
if len(set(all_labels)) < len(all_labels):
dup_labels = find_duplicates(all_labels)
raise QueryObjectValidationError(
_(
"Duplicate column/metric labels: %(labels)s. Please make "
"sure all columns and metrics have a unique label.",
labels=", ".join(f'"{x}"' for x in dup_labels),
)
)
def _validate_filters(self) -> None:
for param in ("where", "having"):
clause = self.extras.get(param)
if clause:
try:
validate_filter_clause(clause)
except QueryClauseValidationException as ex:
raise QueryObjectValidationError(ex.message) from ex
def _validate_there_are_no_missing_series(self) -> None:
missing_series = [col for col in self.series_columns if col not in self.columns]
if missing_series:
raise QueryObjectValidationError(
_(
"The following entries in `series_columns` are missing "
"in `columns`: %(columns)s. ",
columns=", ".join(f'"{x}"' for x in missing_series),
)
)
def to_dict(self) -> Dict[str, Any]:
query_object_dict = {
"apply_fetch_values_predicate": self.apply_fetch_values_predicate,
"columns": self.columns,
"extras": self.extras,
"filter": self.filter,
"from_dttm": self.from_dttm,
"granularity": self.granularity,
"inner_from_dttm": self.inner_from_dttm,
"inner_to_dttm": self.inner_to_dttm,
"is_rowcount": self.is_rowcount,
"is_timeseries": self.is_timeseries,
"metrics": self.metrics,
"order_desc": self.order_desc,
"orderby": self.orderby,
"row_limit": self.row_limit,
"row_offset": self.row_offset,
"series_columns": self.series_columns,
"series_limit": self.series_limit,
"series_limit_metric": self.series_limit_metric,
"to_dttm": self.to_dttm,
}
return query_object_dict
def cache_key(self, **extra: Any) -> str:
"""
The cache key is made out of the key/values from to_dict(), plus any
other key/values in `extra`
We remove datetime bounds that are hard values, and replace them with
the use-provided inputs to bounds, which may be time-relative (as in
"5 days ago" or "now").
"""
cache_dict = self.to_dict()
cache_dict.update(extra)
# TODO: the below KVs can all be cleaned up and moved to `to_dict()` at some
# predetermined point in time when orgs are aware that the previously
# chached results will be invalidated.
if not self.apply_fetch_values_predicate:
del cache_dict["apply_fetch_values_predicate"]
if self.datasource:
cache_dict["datasource"] = self.datasource.uid
if self.result_type:
cache_dict["result_type"] = self.result_type
if self.time_range:
cache_dict["time_range"] = self.time_range
if self.post_processing:
cache_dict["post_processing"] = self.post_processing
if self.time_offsets:
cache_dict["time_offsets"] = self.time_offsets
for k in ["from_dttm", "to_dttm"]:
del cache_dict[k]
annotation_fields = [
"annotationType",
"descriptionColumns",
"intervalEndColumn",
"name",
"overrides",
"sourceType",
"timeColumn",
"titleColumn",
"value",
]
annotation_layers = [
{field: layer[field] for field in annotation_fields if field in layer}
for layer in self.annotation_layers
]
# only add to key if there are annotations present that affect the payload
if annotation_layers:
cache_dict["annotation_layers"] = annotation_layers
return md5_sha_from_dict(cache_dict, default=json_int_dttm_ser, ignore_nan=True)
def exec_post_processing(self, df: DataFrame) -> DataFrame:
"""
Perform post processing operations on DataFrame.
:param df: DataFrame returned from database model.
:return: new DataFrame to which all post processing operations have been
applied
:raises QueryObjectValidationError: If the post processing operation
is incorrect
"""
for post_process in self.post_processing:
operation = post_process.get("operation")
if not operation:
raise QueryObjectValidationError(
_("`operation` property of post processing object undefined")
)
if not hasattr(pandas_postprocessing, operation):
raise QueryObjectValidationError(
_(
"Unsupported post processing operation: %(operation)s",
type=operation,
)
)
options = post_process.get("options", {})
df = getattr(pandas_postprocessing, operation)(df, **options)
return df
| |
#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
#
"""This is the interface for managing hunts."""
import collections as py_collections
import StringIO
import urllib
import logging
from grr.gui import plot_lib
from grr.gui import renderers
from grr.gui.plugins import fileview
from grr.gui.plugins import foreman
from grr.gui.plugins import forms
from grr.gui.plugins import searchclient
from grr.gui.plugins import semantic
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import data_store
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib.hunts import standard as hunts_standard
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import flows as rdf_flows
class ManageHunts(renderers.AngularDirectiveRenderer):
description = "Hunt Manager"
behaviours = frozenset(["General"])
directive = "grr-hunts-view"
class RunHuntConfirmationDialog(renderers.ConfirmationDialogRenderer):
"""Dialog that asks confirmation to run a hunt and actually runs it."""
post_parameters = ["hunt_id"]
inner_dialog_only = True
header = "Run a hunt?"
content_template = renderers.Template("""
<p>Are you sure you want to <strong>run</strong> this hunt?</p>
""")
ajax_template = renderers.Template("""
<p class="text-info">Hunt started successfully!</p>
""")
def Layout(self, request, response):
self.check_access_subject = rdfvalue.RDFURN(request.REQ.get("hunt_id"))
return super(RunHuntConfirmationDialog, self).Layout(request, response)
def RenderAjax(self, request, response):
flow.GRRFlow.StartFlow(flow_name="StartHuntFlow", token=request.token,
hunt_urn=rdfvalue.RDFURN(request.REQ.get("hunt_id")))
return self.RenderFromTemplate(self.ajax_template, response,
unique=self.unique)
class StopHuntConfirmationDialog(renderers.ConfirmationDialogRenderer):
"""Dialog that asks confirmation to stop a hunt."""
post_parameters = ["hunt_id"]
inner_dialog_only = True
header = "Stop a hunt?"
content_template = renderers.Template("""
<p>Are you sure you want to <strong>stop</strong> this hunt? Once a hunt is
stopped, restarting it will run it on all clients again.</p>
""")
ajax_template = renderers.Template("""
<p class="text-info">Hunt stopped successfully!</p>
""")
def Layout(self, request, response):
self.check_access_subject = rdfvalue.RDFURN(request.REQ.get("hunt_id"))
return super(StopHuntConfirmationDialog, self).Layout(request, response)
def RenderAjax(self, request, response):
flow.GRRFlow.StartFlow(flow_name="StopHuntFlow", token=request.token,
hunt_urn=rdfvalue.RDFURN(request.REQ.get("hunt_id")))
return self.RenderFromTemplate(self.ajax_template, response,
unique=self.unique)
class ModifyHuntDialog(renderers.ConfirmationDialogRenderer):
"""Dialog that allows user to modify certain hunt parameters."""
post_parameters = ["hunt_id"]
inner_dialog_only = True
header = "Modify a hunt"
proceed_button_title = "Modify!"
expiry_time_dividers = (
(60 * 60 * 24, "d"), (60 * 60, "h"), (60, "m"), (1, "s"))
content_template = renderers.Template("""
{{this.hunt_params_form|safe}}
""")
ajax_template = renderers.Template("""
<p class="text-info">Hunt modified successfully!</p>
""")
def Layout(self, request, response):
"""Layout handler."""
hunt_urn = rdfvalue.RDFURN(request.REQ.get("hunt_id"))
with aff4.FACTORY.Open(hunt_urn, aff4_type="GRRHunt",
token=request.token) as hunt:
runner = hunt.GetRunner()
hunt_args = hunts_standard.ModifyHuntFlowArgs(
client_limit=runner.args.client_limit,
expiry_time=runner.context.expires,
)
self.hunt_params_form = forms.SemanticProtoFormRenderer(
hunt_args, supressions=["hunt_urn"]).RawHTML(request)
self.check_access_subject = hunt_urn
return super(ModifyHuntDialog, self).Layout(request, response)
def RenderAjax(self, request, response):
"""Starts ModifyHuntFlow that actually modifies a hunt."""
hunt_urn = rdfvalue.RDFURN(request.REQ.get("hunt_id"))
args = forms.SemanticProtoFormRenderer(
hunts_standard.ModifyHuntFlowArgs()).ParseArgs(request)
flow.GRRFlow.StartFlow(flow_name="ModifyHuntFlow", token=request.token,
hunt_urn=hunt_urn, args=args)
return self.RenderFromTemplate(self.ajax_template, response,
unique=self.unique)
class DeleteHuntDialog(renderers.ConfirmationDialogRenderer):
"""Dialog that confirms deletion of a hunt."""
post_parameters = ["hunt_id"]
inner_dialog_only = True
header = "Delete a hunt"
proceed_button_title = "Delete!"
content_template = renderers.Template("""
<p>Are you sure you want to <strong>delete</strong> this hunt? Note that
hunts can only be deleted if there are no results. </p>
""")
ajax_template = renderers.Template("""
<p class="text-info">Hunt Deleted!</p>
""")
def Layout(self, request, response):
"""Layout handler."""
# TODO(user) Switch from requiring approval to requiring ownership.
self.check_access_subject = rdfvalue.RDFURN(request.REQ.get("hunt_id"))
return super(DeleteHuntDialog, self).Layout(request, response)
def RenderAjax(self, request, response):
"""Starts DeleteHuntFlow that actually modifies a hunt."""
flow.GRRFlow.StartFlow(flow_name="DeleteHuntFlow", token=request.token,
hunt_urn=rdfvalue.RDFURN(request.REQ.get("hunt_id")))
return self.RenderFromTemplate(self.ajax_template, response,
unique=self.unique)
class ManageHuntsClientView(renderers.Splitter2Way):
"""Manages the clients involved in a hunt."""
description = "Hunt Client View"
top_renderer = "HuntClientTableRenderer"
bottom_renderer = "HuntClientViewTabs"
class ResourceRenderer(semantic.RDFValueRenderer):
"""Renders resource usage as meters."""
cls = "vertical_aligned"
layout_template = renderers.Template(
"<div>"
"<meter value=\"{{this.proxy|escape}}\"></meter>"
"</div>")
class FloatRenderer(semantic.RDFValueRenderer):
layout_template = renderers.Template("{{this.value|escape}}")
def Layout(self, request, response):
if self.proxy is None:
self.value = "0.0"
else:
self.value = "%.2f" % self.proxy
super(FloatRenderer, self).Layout(request, response)
class HuntClientTableRenderer(fileview.AbstractFileTable):
"""Displays the clients."""
selection_publish_queue = "hunt_client_select"
layout_template = """
{{this.title|escape}}
<a id="backlink_{{unique|escape}}" href='#{{this.hash|escape}}'>
back to hunt view</a>
<span class='pull-right'> Filter by State
<select id='{{unique|escape}}_select'>
<option>ALL</option>
<option>OUTSTANDING</option>
<option>COMPLETED</option>
<option>BAD</option>
</select>
</span>
""" + fileview.AbstractFileTable.layout_template
post_parameters = ["hunt_id"]
def __init__(self, **kwargs):
super(HuntClientTableRenderer, self).__init__(**kwargs)
self.AddColumn(semantic.RDFValueColumn(
"Client ID", width="20%", renderer=semantic.SubjectRenderer))
self.AddColumn(semantic.RDFValueColumn("Hostname", width="10%"))
self.AddColumn(semantic.RDFValueColumn("Status", width="10%"))
self.AddColumn(semantic.RDFValueColumn("User CPU seconds", width="10%",
renderer=FloatRenderer))
self.AddColumn(semantic.RDFValueColumn("System CPU seconds", width="10%",
renderer=FloatRenderer))
self.AddColumn(semantic.RDFValueColumn("CPU",
renderer=ResourceRenderer,
width="10%"))
self.AddColumn(semantic.RDFValueColumn("Network bytes sent", width="10%"))
self.AddColumn(semantic.RDFValueColumn("Network",
renderer=ResourceRenderer,
width="10%"))
self.AddColumn(semantic.RDFValueColumn("Last Checkin", width="10%"))
def Layout(self, request, response):
"""Ensure our hunt is in our state for HTML layout."""
hunt_id = request.REQ.get("hunt_id")
self.title = "Viewing Hunt %s" % hunt_id
h = dict(main="ManageHunts", hunt_id=hunt_id)
self.hunt_hash = urllib.urlencode(sorted(h.items()))
response = super(HuntClientTableRenderer, self).Layout(request, response)
return self.CallJavascript(response, "HuntClientTableRenderer.Layout",
hunt_hash=self.hunt_hash)
def BuildTable(self, start_row, end_row, request):
"""Called to fill in the data in the table."""
hunt_id = request.REQ.get("hunt_id")
completion_status_filter = request.REQ.get("completion_status", "ALL")
if hunt_id is None:
return
try:
self.hunt = aff4.FACTORY.Open(hunt_id, token=request.token,
aff4_type="GRRHunt")
except IOError:
logging.error("Invalid hunt %s", hunt_id)
return
# TODO(user): enable per-client resource usage display.
resource_usage = {}
resource_max = [0, 0, 0]
for resource in resource_usage.values():
for i in range(3):
if resource_max[i] < resource[i]:
resource_max[i] = resource[i]
results = {}
for status, client_list in self.hunt.GetClientsByStatus().items():
if (completion_status_filter == "ALL" or
status == completion_status_filter):
for client in client_list:
results[client] = status
# Get the list of clients and sort so that we can page accurately.
client_list = results.keys()
client_list.sort()
client_list = client_list[start_row:end_row]
row_index = start_row
for c_urn, cdict in self.hunt.GetClientStates(client_list):
row = {"Client ID": c_urn,
"Hostname": cdict.get("hostname"),
"Status": results[c_urn],
"Last Checkin": searchclient.FormatLastSeenTime(
cdict.get("age") or 0)}
client_id = c_urn.Basename()
if client_id in resource_usage:
usage = resource_usage[client_id]
row["User CPU seconds"] = usage[0]
row["System CPU seconds"] = usage[1]
row["Network bytes sent"] = usage[2]
usage_percent = []
for i in range(3):
if resource_max[i]:
usage_percent.append(round(usage[i], 2) / resource_max[i])
else:
usage_percent.append(0.0)
row["CPU"] = usage_percent[0]
row["Network"] = usage_percent[2]
else:
row["User CPU seconds"] = 0
row["System CPU seconds"] = 0
row["Network bytes sent"] = 0
row["CPU"] = 0
row["Network"] = 0
self.AddRow(row, row_index)
row_index += 1
self.size = len(results)
class AbstractLogRenderer(renderers.TemplateRenderer):
"""Render a page for view a Log file.
Implements a very simple view. That will be extended with filtering
capabilities.
Implementations should implement the GetLog function.
"""
show_total_count = False
layout_template = renderers.Template("""
<table class="proto_table">
{% if this.log|length > 0 %}
{% if this.show_total_count %}
<h5>{{this.log|length}} Entries</h5>
{% endif %}
{% endif %}
{% for line in this.log %}
<tr>
{% for val in line %}
<td class="proto_key">{{ val|safe }}</td>
{% endfor %}
</tr>
{% empty %}
<tr><td>No entries</tr></td>
{% endfor %}
<table>
""")
def GetLog(self, request):
"""Take a request and return a list of tuples for a log."""
_ = request
return []
def Layout(self, request, response):
"""Fill in the form with the specific fields for the flow requested."""
self.log = []
for row in self.GetLog(request):
rendered_row = []
for item in row:
item_renderer = semantic.FindRendererForObject(item)
rendered_row.append(item_renderer.RawHTML(request))
self.log.append(rendered_row)
return super(AbstractLogRenderer, self).Layout(request, response)
class HuntOverviewRenderer(AbstractLogRenderer):
"""Renders the overview tab."""
# Will be retrieved from request.REQ if not set.
hunt_id = None
layout_template = renderers.Template("""
<a id="ViewHuntDetails_{{unique}}" href='#{{this.hash|escape}}'
onclick='grr.loadFromHash("{{this.hash|escape}}");'
class="btn btn-info">
View hunt details
</a>
<br/>
<dl class="dl-horizontal dl-hunt">
<dt>Name</dt><dd>{{ this.hunt_name|escape }}</dd>
<dt>Hunt ID</dt>
<dd>{{ this.hunt.urn.Basename|escape }}</dd>
<dt>Hunt URN</dt>
<dd>{{ this.hunt.urn|escape }}</dd>
<dt>Creator</dt>
<dd>{{ this.hunt_creator|escape }}</dd>
<dt>Client Limit</dt>
{% if this.client_limit == 0 %}
<dd>None</dd>
{% else %}
<dd>{{ this.client_limit|escape }}</dd>
{% endif %}
<dt>Client Rate (clients/min)</dt>
{% if this.client_rate == 0.0 %}
<dd>No rate limit</dd>
{% else %}
<dd>{{ this.client_rate|escape }}</dd>
{% endif %}
<dt>Clients Scheduled</dt>
<dd>{{ this.all_clients_count|escape }}</dd>
<dt>Outstanding</dt>
<dd>{{ this.outstanding_clients_count|escape }}</dd>
<dt>Completed</dt>
<dd>{{ this.completed_clients_count|escape }}</dd>
<dt>Total CPU seconds used</dt>
<dd>{{ this.cpu_sum|escape }}</dd>
<dt>Total network traffic</dt>
<dd>{{ this.net_sum|filesizeformat }}</dd>
<dt>Regex Rules</dt>
<dd>{{ this.regex_rules|safe }}</dd>
<dt>Integer Rules</dt>
<dd>{{ this.integer_rules|safe }}</dd>
<dt>Arguments</dt><dd>{{ this.args_str|safe }}</dd>
{% for key, val in this.data.items %}
<dt>{{ key|escape }}</dt><dd>{{ val|escape }}</dd>
{% endfor %}
</dl>
""")
error_template = renderers.Template(
"No information available for this Hunt.")
ajax_template = renderers.Template("""
<div id="RunHuntResult_{{unique|escape}}"></div>
""")
def RenderAjax(self, request, response):
self.hunt_id = request.REQ.get("hunt_id")
self.subject = rdfvalue.RDFURN(self.hunt_id)
response = renderers.TemplateRenderer.Layout(
self, request, response, apply_template=self.ajax_template)
return self.CallJavascript(response, "HuntOverviewRenderer.RenderAjax",
subject=self.subject, hunt_id=self.hunt_id)
def Layout(self, request, response):
"""Display the overview."""
if not self.hunt_id:
self.hunt_id = request.REQ.get("hunt_id")
h = dict(main="ManageHuntsClientView", hunt_id=self.hunt_id)
self.hash = urllib.urlencode(sorted(h.items()))
self.data = {}
self.args_str = ""
if self.hunt_id:
try:
self.hunt = aff4.FACTORY.Open(self.hunt_id, aff4_type="GRRHunt",
token=request.token)
if self.hunt.state.Empty():
raise IOError("No valid state could be found.")
hunt_stats = self.hunt.state.context.usage_stats
self.cpu_sum = "%.2f" % hunt_stats.user_cpu_stats.sum
self.net_sum = hunt_stats.network_bytes_sent_stats.sum
(self.all_clients_count,
self.completed_clients_count, _) = self.hunt.GetClientsCounts()
self.outstanding_clients_count = (self.all_clients_count -
self.completed_clients_count)
runner = self.hunt.GetRunner()
self.hunt_name = runner.args.hunt_name
self.hunt_creator = runner.context.creator
self.data = py_collections.OrderedDict()
self.data["Start Time"] = runner.context.start_time
self.data["Expiry Time"] = runner.context.expires
self.data["Status"] = self.hunt.Get(self.hunt.Schema.STATE)
self.client_limit = runner.args.client_limit
self.client_rate = runner.args.client_rate
self.args_str = renderers.DictRenderer(
self.hunt.state, filter_keys=["context"]).RawHTML(request)
if runner.args.regex_rules:
self.regex_rules = foreman.RegexRuleArray(
runner.args.regex_rules).RawHTML(request)
else:
self.regex_rules = "None"
if runner.args.integer_rules:
self.integer_rules = foreman.IntegerRuleArray(
runner.args.integer_rules).RawHTML(request)
else:
self.integer_rules = "None"
except IOError:
self.layout_template = self.error_template
return super(AbstractLogRenderer, self).Layout(request, response)
class HuntContextView(renderers.TemplateRenderer):
"""Render a the hunt context."""
layout_template = renderers.Template("""
{{this.args_str|safe}}
""")
def Layout(self, request, response):
"""Display hunt's context presented as dict."""
if not hasattr(self, "hunt_id"):
self.hunt_id = request.REQ.get("hunt_id")
self.hunt = aff4.FACTORY.Open(self.hunt_id, aff4_type="GRRHunt",
token=request.token)
if self.hunt.state.Empty():
raise IOError("No valid state could be found.")
self.args_str = renderers.DictRenderer(
self.hunt.state.context).RawHTML(request)
return super(HuntContextView, self).Layout(request, response)
class HuntLogRenderer(renderers.AngularDirectiveRenderer):
directive = "grr-hunt-log"
def Layout(self, request, response):
self.directive_args = {}
self.directive_args["hunt-urn"] = request.REQ.get("hunt_id")
return super(HuntLogRenderer, self).Layout(request, response)
class HuntErrorRenderer(renderers.AngularDirectiveRenderer):
directive = "grr-hunt-errors"
def Layout(self, request, response):
self.directive_args = {}
self.directive_args["hunt-urn"] = request.REQ.get("hunt_id")
return super(HuntErrorRenderer, self).Layout(request, response)
class HuntClientViewTabs(renderers.TabLayout):
"""Show a tabset to inspect the selected client of the selected hunt."""
names = ["Status", "Hunt Log", "Hunt Errors", "Client Detail"]
delegated_renderers = ["HuntClientOverviewRenderer", "HuntLogRenderer",
"HuntErrorRenderer", "HuntHostInformationRenderer"]
post_parameters = ["hunt_id", "hunt_client"]
def Layout(self, request, response):
response = super(HuntClientViewTabs, self).Layout(request, response)
return self.CallJavascript(response, "HuntClientViewTabs.Layout",
hunt_id=self.state["hunt_id"])
class HuntClientOverviewRenderer(renderers.TemplateRenderer):
"""Renders the Client Hunt Overview tab."""
layout_template = renderers.Template("""
<a href='#{{this.hash|escape}}' onclick='grr.loadFromHash(
"{{this.hash|escape}}");' ">
Go to client {{ this.client.urn|escape }}
</a>
<table class="proto_table">
<tr><td class="proto_key">Last Checkin</td>
<td>{{ this.last_checkin|escape }}</td>
</table>
""")
def Layout(self, request, response):
"""Display the overview."""
hunt_id = request.REQ.get("hunt_id")
hunt_client = request.REQ.get("hunt_client")
if hunt_id is not None and hunt_client is not None:
try:
self.client = aff4.FACTORY.Open(hunt_client, token=request.token,
aff4_type="VFSGRRClient")
self.last_checkin = rdfvalue.RDFDatetime(
self.client.Get(self.client.Schema.PING))
h = dict(main="HostInformation", c=self.client.client_id)
self.hash = urllib.urlencode(sorted(h.items()))
except IOError as e:
logging.error("Attempt to open client %s. Err %s", hunt_client, e)
return super(HuntClientOverviewRenderer, self).Layout(request, response)
class HuntClientGraphRenderer(renderers.TemplateRenderer):
"""Renders the button to download a hunt graph."""
layout_template = renderers.Template("""
{% if this.clients %}
<button id="{{ unique|escape }}">
Generate
</button>
{% else %}
No data to graph yet.
{% endif %}
""")
def Layout(self, request, response):
self.hunt_id = request.REQ.get("hunt_id")
hunt = aff4.FACTORY.Open(self.hunt_id, token=request.token)
all_count, _, _ = hunt.GetClientsCounts()
self.clients = bool(all_count)
response = super(HuntClientGraphRenderer, self).Layout(request, response)
return self.CallJavascript(response, "HuntClientGraphRenderer.Layout",
hunt_id=self.hunt_id)
class HuntClientCompletionGraphRenderer(renderers.ImageDownloadRenderer):
def Content(self, request, _):
"""Generates the actual image to display."""
hunt_id = request.REQ.get("hunt_id")
hunt = aff4.FACTORY.Open(hunt_id, aff4_type="GRRHunt", token=request.token)
clients_by_status = hunt.GetClientsByStatus()
cl = clients_by_status["STARTED"]
fi = clients_by_status["COMPLETED"]
cdict = {}
for c in cl:
cdict.setdefault(c, []).append(c.age)
fdict = {}
for c in fi:
fdict.setdefault(c, []).append(c.age)
cl_age = [int(min(x) / 1e6) for x in cdict.values()]
fi_age = [int(min(x) / 1e6) for x in fdict.values()]
cl_hist = {}
fi_hist = {}
for age in cl_age:
cl_hist.setdefault(age, 0)
cl_hist[age] += 1
for age in fi_age:
fi_hist.setdefault(age, 0)
fi_hist[age] += 1
t0 = min(cl_age) - 1
times = [t0]
cl = [0]
fi = [0]
all_times = set(cl_age) | set(fi_age)
cl_count = 0
fi_count = 0
for time in sorted(all_times):
# Check if there is a datapoint one second earlier, add one if not.
if times[-1] != time - 1:
times.append(time)
cl.append(cl_count)
fi.append(fi_count)
cl_count += cl_hist.get(time, 0)
fi_count += fi_hist.get(time, 0)
times.append(time)
cl.append(cl_count)
fi.append(fi_count)
# Convert to hours, starting from 0.
times = [(t - t0) / 3600.0 for t in times]
params = {"backend": "png"}
plot_lib.plt.rcParams.update(params)
plot_lib.plt.figure(1)
plot_lib.plt.clf()
plot_lib.plt.plot(times, cl, label="Agents issued.")
plot_lib.plt.plot(times, fi, label="Agents completed.")
plot_lib.plt.title("Agent Coverage")
plot_lib.plt.xlabel("Time (h)")
plot_lib.plt.ylabel(r"Agents")
plot_lib.plt.grid(True)
plot_lib.plt.legend(loc=4)
buf = StringIO.StringIO()
plot_lib.plt.savefig(buf)
buf.seek(0)
return buf.read()
class HuntHostInformationRenderer(fileview.AFF4Stats):
"""Modified HostInformation that reads from hunt_client variable."""
description = "Hunt Client Host Information"
css_class = "TableBody"
attributes_to_show = ["USERNAMES", "HOSTNAME", "MAC_ADDRESS", "INSTALL_DATE",
"SYSTEM", "CLOCK", "CLIENT_INFO"]
def Layout(self, request, response):
"""Produce a summary of the client information."""
client_id = request.REQ.get("hunt_client")
if client_id:
super(HuntHostInformationRenderer, self).Layout(
request, response, client_id=client_id,
aff4_path=rdf_client.ClientURN(client_id),
age=aff4.ALL_TIMES)
class HuntStatsRenderer(renderers.TemplateRenderer):
"""Display hunt's resources usage stats."""
layout_template = renderers.Template("""
<h3>Total number of clients: {{this.stats.user_cpu_stats.num|escape}}</h3>
<h3>User CPU</h3>
<dl class="dl-horizontal">
<dt>User CPU mean</dt>
<dd>{{this.stats.user_cpu_stats.mean|floatformat}}</dd>
<dt>User CPU stdev</dt>
<dd>{{this.stats.user_cpu_stats.std|floatformat}}</dd>
<dt>Clients Histogram</dt>
<dd class="histogram">
<div id="user_cpu_{{unique|escape}}"></div>
</dd>
</dl>
<h3>System CPU</h3>
<dl class="dl-horizontal">
<dt>System CPU mean</dt>
<dd>{{this.stats.system_cpu_stats.mean|floatformat}}</dd>
<dt>System CPU stdev</dt>
<dd>{{this.stats.system_cpu_stats.std|floatformat}}</dd>
<dt>Clients Histogram</dt>
<dd class="histogram">
<div id="system_cpu_{{unique|escape}}"></div>
</dd>
</dl>
<h3>Network bytes sent</h3>
<dl class="dl-horizontal">
<dt>Network bytes sent mean</dt>
<dd>{{this.stats.network_bytes_sent_stats.mean|floatformat}}</dd>
<dt>Network bytes sent stdev</dt>
<dd>{{this.stats.network_bytes_sent_stats.std|floatformat}}</dd>
<dt>Clients Hisogram</dt>
<dd class="histogram">
<div id="network_bytes_sent_{{unique|escape}}"></div>
</dd>
</dl>
<h3>Worst performers</h3>
<div class="row">
<div class="col-md-8">
<table id="performers_{{unique|escape}}"
class="table table-condensed table-striped table-bordered">
<thead>
<th>Client Id</th>
<th>User CPU</th>
<th>System CPU</th>
<th>Network bytes sent</th>
</thead>
<tbody>
{% for r in this.stats.worst_performers %}
<tr>
<td><a client_id="{{r.client_id|escape}}">{{r.client_id|escape}}</a></td>
<td>{{r.cpu_usage.user_cpu_time|floatformat}}</td>
<td>{{r.cpu_usage.system_cpu_time|floatformat}}</td>
<td>{{r.network_bytes_sent|escape}}</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
</div>
""")
error_template = renderers.Template(
"No information available for this Hunt.")
def _HistogramToJSON(self, histogram):
hist_data = [(b.range_max_value, b.num) for b in histogram.bins]
return renderers.JsonDumpForScriptContext(hist_data)
def Layout(self, request, response):
"""Layout the HuntStatsRenderer data."""
hunt_id = request.REQ.get("hunt_id")
if hunt_id:
try:
hunt = aff4.FACTORY.Open(hunt_id,
aff4_type="GRRHunt",
token=request.token)
if hunt.state.Empty():
raise IOError("No valid state could be found.")
self.stats = hunt.state.context.usage_stats
self.user_cpu_json_data = self._HistogramToJSON(
self.stats.user_cpu_stats.histogram)
self.system_cpu_json_data = self._HistogramToJSON(
self.stats.user_cpu_stats.histogram)
self.network_bytes_sent_json_data = self._HistogramToJSON(
self.stats.network_bytes_sent_stats.histogram)
response = super(HuntStatsRenderer, self).Layout(request, response)
return self.CallJavascript(
response, "HuntStatsRenderer.Layout",
user_cpu_json_data=self.user_cpu_json_data,
system_cpu_json_data=self.system_cpu_json_data,
network_bytes_sent_json_data=self.network_bytes_sent_json_data)
except IOError:
self.layout_template = self.error_template
return super(HuntStatsRenderer, self).Layout(request, response)
class HuntOutstandingRenderer(renderers.TableRenderer):
"""A renderer that shows debug information for outstanding clients."""
post_parameters = ["hunt_id"]
def __init__(self, **kwargs):
super(HuntOutstandingRenderer, self).__init__(**kwargs)
self.AddColumn(semantic.RDFValueColumn("Client"))
self.AddColumn(semantic.RDFValueColumn("Flow"))
self.AddColumn(semantic.RDFValueColumn("Incomplete Request #"))
self.AddColumn(semantic.RDFValueColumn("State"))
self.AddColumn(semantic.RDFValueColumn("Args Expected"))
self.AddColumn(semantic.RDFValueColumn("Available Responses"))
self.AddColumn(semantic.RDFValueColumn("Status"))
self.AddColumn(semantic.RDFValueColumn("Expected Responses"))
self.AddColumn(semantic.RDFValueColumn("Client Requests Pending"))
def GetClientRequests(self, client_urns, token):
"""Returns all client requests for the given client urns."""
task_urns = [urn.Add("tasks") for urn in client_urns]
client_requests_raw = data_store.DB.MultiResolveRegex(task_urns, "task:.*",
token=token)
client_requests = {}
for client_urn, requests in client_requests_raw:
client_id = str(client_urn)[6:6 + 18]
client_requests.setdefault(client_id, [])
for _, serialized, _ in requests:
client_requests[client_id].append(rdf_flows.GrrMessage(serialized))
return client_requests
def GetAllSubflows(self, hunt_urn, client_urns, token):
"""Lists all subflows for a given hunt for all clients in client_urns."""
client_ids = [urn.Split()[0] for urn in client_urns]
client_bases = [hunt_urn.Add(client_id) for client_id in client_ids]
all_flows = []
act_flows = client_bases
while act_flows:
next_flows = []
for _, children in aff4.FACTORY.MultiListChildren(act_flows, token=token):
for flow_urn in children:
next_flows.append(flow_urn)
all_flows.extend(next_flows)
act_flows = next_flows
return all_flows
def GetFlowRequests(self, flow_urns, token):
"""Returns all outstanding requests for the flows in flow_urns."""
flow_requests = {}
flow_request_urns = [flow_urn.Add("state") for flow_urn in flow_urns]
for flow_urn, values in data_store.DB.MultiResolveRegex(
flow_request_urns, "flow:.*", token=token):
for subject, serialized, _ in values:
try:
if "status" in subject:
msg = rdf_flows.GrrMessage(serialized)
else:
msg = rdf_flows.RequestState(serialized)
except Exception as e: # pylint: disable=broad-except
logging.warn("Error while parsing: %s", e)
continue
flow_requests.setdefault(flow_urn, []).append(msg)
return flow_requests
def BuildTable(self, start_row, end_row, request):
"""Renders the table."""
hunt_id = request.REQ.get("hunt_id")
token = request.token
if hunt_id is None:
return
hunt_id = rdfvalue.RDFURN(hunt_id)
hunt = aff4.FACTORY.Open(hunt_id, aff4_type="GRRHunt", age=aff4.ALL_TIMES,
token=token)
clients_by_status = hunt.GetClientsByStatus()
outstanding = clients_by_status["OUTSTANDING"]
self.size = len(outstanding)
outstanding = sorted(outstanding)[start_row:end_row]
all_flow_urns = self.GetAllSubflows(hunt_id, outstanding, token)
flow_requests = self.GetFlowRequests(all_flow_urns, token)
try:
client_requests = self.GetClientRequests(outstanding, token)
except access_control.UnauthorizedAccess:
client_requests = None
waitingfor = {}
status_by_request = {}
for flow_urn in flow_requests:
for obj in flow_requests[flow_urn]:
if isinstance(obj, rdf_flows.RequestState):
waitingfor.setdefault(flow_urn, obj)
if waitingfor[flow_urn].id > obj.id:
waitingfor[flow_urn] = obj
elif isinstance(obj, rdf_flows.GrrMessage):
status_by_request.setdefault(flow_urn, {})[obj.request_id] = obj
response_urns = []
for request_base_urn, request in waitingfor.iteritems():
response_urns.append(rdfvalue.RDFURN(request_base_urn).Add(
"request:%08X" % request.id))
response_dict = dict(data_store.DB.MultiResolveRegex(
response_urns, "flow:.*", token=token))
row_index = start_row
for flow_urn in sorted(all_flow_urns):
request_urn = flow_urn.Add("state")
client_id = flow_urn.Split()[2]
try:
request_obj = waitingfor[request_urn]
response_urn = rdfvalue.RDFURN(request_urn).Add(
"request:%08X" % request_obj.id)
responses_available = len(response_dict.setdefault(response_urn, []))
status_available = "No"
responses_expected = "Unknown"
if request_obj.id in status_by_request.setdefault(request_urn, {}):
status_available = "Yes"
status = status_by_request[request_urn][request_obj.id]
responses_expected = status.response_id
if client_requests is None:
client_requests_available = "Must use raw access."
else:
client_requests_available = 0
for client_req in client_requests.setdefault(client_id, []):
if request_obj.request.session_id == client_req.session_id:
client_requests_available += 1
row_data = {
"Client": client_id,
"Flow": flow_urn,
"Incomplete Request #": request_obj.id,
"State": request_obj.next_state,
"Args Expected": request_obj.request.args_rdf_name,
"Available Responses": responses_available,
"Status": status_available,
"Expected Responses": responses_expected,
"Client Requests Pending": client_requests_available}
except KeyError:
row_data = {
"Client": client_id,
"Flow": flow_urn,
"Incomplete Request #": "No request found"}
self.AddRow(row_data, row_index=row_index)
row_index += 1
| |
# Copyright (C) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to groups.
"""
import functools
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
from cinder import db
from cinder.db import base
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import base as objects_base
from cinder.objects import fields as c_fields
import cinder.policy
from cinder import quota
from cinder import quota_utils
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder.volume import api as volume_api
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
GROUP_QUOTAS = quota.GROUP_QUOTAS
VALID_REMOVE_VOL_FROM_GROUP_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_ADD_VOL_TO_GROUP_STATUS = (
'available',
'in-use')
def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution.
This decorator requires the first 3 args of the wrapped function
to be (self, context, group)
"""
@functools.wraps(func)
def wrapped(self, context, target_obj, *args, **kwargs):
check_policy(context, func.__name__, target_obj)
return func(self, context, target_obj, *args, **kwargs)
return wrapped
def check_policy(context, action, target_obj=None):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
if isinstance(target_obj, objects_base.CinderObject):
# Turn object into dict so target.update can work
target.update(
target_obj.obj_to_primitive()['versioned_object.data'] or {})
else:
target.update(target_obj or {})
_action = 'group:%s' % action
cinder.policy.enforce(context, _action, target)
class API(base.Base):
"""API for interacting with the volume manager for groups."""
def __init__(self, db_driver=None):
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
self.volume_api = volume_api.API()
super(API, self).__init__(db_driver)
def _extract_availability_zone(self, availability_zone):
raw_zones = self.volume_api.list_availability_zones(enable_cache=True)
availability_zones = set([az['name'] for az in raw_zones])
if CONF.storage_availability_zone:
availability_zones.add(CONF.storage_availability_zone)
if availability_zone is None:
if CONF.default_availability_zone:
availability_zone = CONF.default_availability_zone
else:
# For backwards compatibility use the storage_availability_zone
availability_zone = CONF.storage_availability_zone
if availability_zone not in availability_zones:
if CONF.allow_availability_zone_fallback:
original_az = availability_zone
availability_zone = (
CONF.default_availability_zone or
CONF.storage_availability_zone)
LOG.warning("Availability zone '%(s_az)s' not found, falling "
"back to '%(s_fallback_az)s'.",
{'s_az': original_az,
's_fallback_az': availability_zone})
else:
msg = _("Availability zone '%(s_az)s' is invalid.")
msg = msg % {'s_az': availability_zone}
raise exception.InvalidInput(reason=msg)
return availability_zone
def create(self, context, name, description, group_type,
volume_types, availability_zone=None):
check_policy(context, 'create')
req_volume_types = []
# NOTE: Admin context is required to get extra_specs of volume_types.
req_volume_types = (self.db.volume_types_get_by_name_or_id(
context.elevated(), volume_types))
if not uuidutils.is_uuid_like(group_type):
req_group_type = self.db.group_type_get_by_name(context,
group_type)
else:
req_group_type = self.db.group_type_get(context, group_type)
availability_zone = self._extract_availability_zone(availability_zone)
kwargs = {'user_id': context.user_id,
'project_id': context.project_id,
'availability_zone': availability_zone,
'status': c_fields.GroupStatus.CREATING,
'name': name,
'description': description,
'volume_type_ids': [t['id'] for t in req_volume_types],
'group_type_id': req_group_type['id'],
'replication_status': c_fields.ReplicationStatus.DISABLED}
try:
reservations = GROUP_QUOTAS.reserve(context,
project_id=context.project_id,
groups=1)
except exception.OverQuota as e:
quota_utils.process_reserve_over_quota(context, e,
resource='groups')
group = None
try:
group = objects.Group(context=context, **kwargs)
group.create()
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Error occurred when creating group"
" %s.", name)
GROUP_QUOTAS.rollback(context, reservations)
request_spec_list = []
filter_properties_list = []
for req_volume_type in req_volume_types:
request_spec = {'volume_type': req_volume_type.copy(),
'group_id': group.id}
filter_properties = {}
request_spec_list.append(request_spec)
filter_properties_list.append(filter_properties)
group_spec = {'group_type': req_group_type.copy(),
'group_id': group.id}
group_filter_properties = {}
# Update quota for groups
GROUP_QUOTAS.commit(context, reservations)
self._cast_create_group(context, group,
group_spec,
request_spec_list,
group_filter_properties,
filter_properties_list)
return group
def create_from_src(self, context, name, description=None,
group_snapshot_id=None, source_group_id=None):
check_policy(context, 'create')
# Populate group_type_id and volume_type_ids
group_type_id = None
volume_type_ids = []
if group_snapshot_id:
grp_snap = self.get_group_snapshot(context, group_snapshot_id)
group_type_id = grp_snap.group_type_id
grp_snap_src_grp = self.get(context, grp_snap.group_id)
volume_type_ids = [vt.id for vt in grp_snap_src_grp.volume_types]
elif source_group_id:
source_group = self.get(context, source_group_id)
group_type_id = source_group.group_type_id
volume_type_ids = [vt.id for vt in source_group.volume_types]
kwargs = {
'user_id': context.user_id,
'project_id': context.project_id,
'status': c_fields.GroupStatus.CREATING,
'name': name,
'description': description,
'group_snapshot_id': group_snapshot_id,
'source_group_id': source_group_id,
'group_type_id': group_type_id,
'volume_type_ids': volume_type_ids,
'replication_status': c_fields.ReplicationStatus.DISABLED
}
try:
reservations = GROUP_QUOTAS.reserve(context,
project_id=context.project_id,
groups=1)
except exception.OverQuota as e:
quota_utils.process_reserve_over_quota(context, e,
resource='groups')
group = None
try:
group = objects.Group(context=context, **kwargs)
group.create(group_snapshot_id=group_snapshot_id,
source_group_id=source_group_id)
except exception.GroupNotFound:
with excutils.save_and_reraise_exception():
LOG.error("Source Group %(source_group)s not found when "
"creating group %(group)s from source.",
{'group': name, 'source_group': source_group_id})
GROUP_QUOTAS.rollback(context, reservations)
except exception.GroupSnapshotNotFound:
with excutils.save_and_reraise_exception():
LOG.error("Group snapshot %(group_snap)s not found when "
"creating group %(group)s from source.",
{'group': name, 'group_snap': group_snapshot_id})
GROUP_QUOTAS.rollback(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Error occurred when creating group"
" %(group)s from group_snapshot %(grp_snap)s.",
{'group': name, 'grp_snap': group_snapshot_id})
GROUP_QUOTAS.rollback(context, reservations)
# Update quota for groups
GROUP_QUOTAS.commit(context, reservations)
if not group.host:
msg = _("No host to create group %s.") % group.id
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
group.assert_not_frozen()
if group_snapshot_id:
self._create_group_from_group_snapshot(context, group,
group_snapshot_id)
elif source_group_id:
self._create_group_from_source_group(context, group,
source_group_id)
return group
def _create_group_from_group_snapshot(self, context, group,
group_snapshot_id):
try:
group_snapshot = objects.GroupSnapshot.get_by_id(
context, group_snapshot_id)
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
if not snapshots:
msg = _("Group snapshot is empty. No group will be created.")
raise exception.InvalidGroup(reason=msg)
for snapshot in snapshots:
kwargs = {}
kwargs['availability_zone'] = group.availability_zone
kwargs['group_snapshot'] = group_snapshot
kwargs['group'] = group
kwargs['snapshot'] = snapshot
volume_type_id = snapshot.volume_type_id
if volume_type_id:
kwargs['volume_type'] = (
objects.VolumeType.get_by_name_or_id(
context, volume_type_id))
# Create group volume_type mapping entries
try:
db.group_volume_type_mapping_create(context, group.id,
volume_type_id)
except exception.GroupVolumeTypeMappingExists:
# Only need to create one group volume_type mapping
# entry for the same combination, skipping.
LOG.info("A mapping entry already exists for group"
" %(grp)s and volume type %(vol_type)s. "
"Do not need to create again.",
{'grp': group.id,
'vol_type': volume_type_id})
pass
# Since group snapshot is passed in, the following call will
# create a db entry for the volume, but will not call the
# volume manager to create a real volume in the backend yet.
# If error happens, taskflow will handle rollback of quota
# and removal of volume entry in the db.
try:
self.volume_api.create(context,
snapshot.volume_size,
None,
None,
**kwargs)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.error("Error occurred when creating volume "
"entry from snapshot in the process of "
"creating group %(group)s "
"from group snapshot %(group_snap)s.",
{'group': group.id,
'group_snap': group_snapshot.id})
except Exception:
with excutils.save_and_reraise_exception():
try:
group.destroy()
finally:
LOG.error("Error occurred when creating group "
"%(group)s from group snapshot %(group_snap)s.",
{'group': group.id,
'group_snap': group_snapshot.id})
volumes = objects.VolumeList.get_all_by_generic_group(context,
group.id)
for vol in volumes:
# Update the host field for the volume.
vol.host = group.host
vol.save()
self.volume_rpcapi.create_group_from_src(
context, group, group_snapshot)
def _create_group_from_source_group(self, context, group,
source_group_id):
try:
source_group = objects.Group.get_by_id(context,
source_group_id)
source_vols = objects.VolumeList.get_all_by_generic_group(
context, source_group.id)
if not source_vols:
msg = _("Source Group is empty. No group "
"will be created.")
raise exception.InvalidGroup(reason=msg)
for source_vol in source_vols:
kwargs = {}
kwargs['availability_zone'] = group.availability_zone
kwargs['source_group'] = source_group
kwargs['group'] = group
kwargs['source_volume'] = source_vol
volume_type_id = source_vol.volume_type_id
if volume_type_id:
kwargs['volume_type'] = (
objects.VolumeType.get_by_name_or_id(
context, volume_type_id))
# Create group volume_type mapping entries
try:
db.group_volume_type_mapping_create(context, group.id,
volume_type_id)
except exception.GroupVolumeTypeMappingExists:
# Only need to create one group volume_type mapping
# entry for the same combination, skipping.
LOG.info("A mapping entry already exists for group"
" %(grp)s and volume type %(vol_type)s. "
"Do not need to create again.",
{'grp': group.id,
'vol_type': volume_type_id})
pass
# Since source_group is passed in, the following call will
# create a db entry for the volume, but will not call the
# volume manager to create a real volume in the backend yet.
# If error happens, taskflow will handle rollback of quota
# and removal of volume entry in the db.
try:
self.volume_api.create(context,
source_vol.size,
None,
None,
**kwargs)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.error("Error occurred when creating cloned "
"volume in the process of creating "
"group %(group)s from "
"source group %(source_group)s.",
{'group': group.id,
'source_group': source_group.id})
except Exception:
with excutils.save_and_reraise_exception():
try:
group.destroy()
finally:
LOG.error("Error occurred when creating "
"group %(group)s from source group "
"%(source_group)s.",
{'group': group.id,
'source_group': source_group.id})
volumes = objects.VolumeList.get_all_by_generic_group(context,
group.id)
for vol in volumes:
# Update the host field for the volume.
vol.host = group.host
vol.save()
self.volume_rpcapi.create_group_from_src(context, group,
None, source_group)
def _cast_create_group(self, context, group,
group_spec,
request_spec_list,
group_filter_properties,
filter_properties_list):
try:
for request_spec in request_spec_list:
volume_type = request_spec.get('volume_type')
volume_type_id = None
if volume_type:
volume_type_id = volume_type.get('id')
specs = {}
if volume_type_id:
qos_specs = volume_types.get_volume_type_qos_specs(
volume_type_id)
specs = qos_specs['qos_specs']
if not specs:
# to make sure we don't pass empty dict
specs = None
volume_properties = {
'size': 0, # Need to populate size for the scheduler
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'attach_status': 'detached',
'encryption_key_id': request_spec.get('encryption_key_id'),
'display_description': request_spec.get('description'),
'display_name': request_spec.get('name'),
'volume_type_id': volume_type_id,
'group_type_id': group.group_type_id,
}
request_spec['volume_properties'] = volume_properties
request_spec['qos_specs'] = specs
group_properties = {
'size': 0, # Need to populate size for the scheduler
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'display_description': group_spec.get('description'),
'display_name': group_spec.get('name'),
'group_type_id': group.group_type_id,
}
group_spec['volume_properties'] = group_properties
group_spec['qos_specs'] = None
except Exception:
with excutils.save_and_reraise_exception():
try:
group.destroy()
finally:
LOG.error("Error occurred when building request spec "
"list for group %s.", group.id)
# Cast to the scheduler and let it handle whatever is needed
# to select the target host for this group.
self.scheduler_rpcapi.create_group(
context,
group,
group_spec=group_spec,
request_spec_list=request_spec_list,
group_filter_properties=group_filter_properties,
filter_properties_list=filter_properties_list)
def update_quota(self, context, group, num, project_id=None):
reserve_opts = {'groups': num}
try:
reservations = GROUP_QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
if reservations:
GROUP_QUOTAS.commit(context, reservations)
except Exception as e:
with excutils.save_and_reraise_exception():
try:
group.destroy()
if isinstance(e, exception.OverQuota):
quota_utils.process_reserve_over_quota(
context, e, resource='groups')
finally:
LOG.error("Failed to update quota for group %s.", group.id)
@wrap_check_policy
def delete(self, context, group, delete_volumes=False):
if not group.host:
self.update_quota(context, group, -1, group.project_id)
LOG.debug("No host for group %s. Deleting from "
"the database.", group.id)
group.destroy()
return
group.assert_not_frozen()
if not delete_volumes and group.status not in (
[c_fields.GroupStatus.AVAILABLE,
c_fields.GroupStatus.ERROR]):
msg = _("Group status must be available or error, "
"but current status is: %s") % group.status
raise exception.InvalidGroup(reason=msg)
# NOTE(tommylikehu): Admin context is required to load group snapshots.
with group.obj_as_admin():
if group.group_snapshots:
raise exception.InvalidGroup(
reason=_("Group has existing snapshots."))
# TODO(smcginnis): Add conditional update handling for volumes
# Should probably utilize the volume_api.delete code to handle
# cascade snapshot deletion and force delete.
volumes = self.db.volume_get_all_by_generic_group(context.elevated(),
group.id)
if volumes and not delete_volumes:
msg = (_("Group %s still contains volumes. "
"The delete-volumes flag is required to delete it.")
% group.id)
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
volumes_model_update = []
for volume in volumes:
if volume['attach_status'] == "attached":
msg = _("Volume in group %s is attached. "
"Need to detach first.") % group.id
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
snapshots = objects.SnapshotList.get_all_for_volume(context,
volume['id'])
if snapshots:
msg = _("Volume in group still has "
"dependent snapshots.")
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
volumes_model_update.append({'id': volume['id'],
'status': 'deleting'})
self.db.volumes_update(context, volumes_model_update)
if delete_volumes:
# We're overloading the term "delete_volumes" somewhat to also
# mean to delete the group regardless of the state.
expected = {}
else:
expected = {'status': (c_fields.GroupStatus.AVAILABLE,
c_fields.GroupStatus.ERROR)}
filters = [~db.group_has_group_snapshot_filter(),
~db.group_has_volumes_filter(
attached_or_with_snapshots=delete_volumes),
~db.group_creating_from_src(group_id=group.id)]
values = {'status': c_fields.GroupStatus.DELETING}
if not group.conditional_update(values, expected, filters):
if delete_volumes:
reason = _('Group status must be available or error and must '
'not have dependent group snapshots')
else:
reason = _('Group must not have attached volumes, volumes '
'with snapshots, or dependent group snapshots')
msg = _('Cannot delete group %(id)s. %(reason)s, and '
'it cannot be the source for an ongoing group or group '
'snapshot creation.') % {
'id': group.id, 'reason': reason}
raise exception.InvalidGroup(reason=msg)
self.volume_rpcapi.delete_group(context, group)
@wrap_check_policy
def update(self, context, group, name, description,
add_volumes, remove_volumes):
"""Update group."""
# Validate name.
if name == group.name:
name = None
# Validate description.
if description == group.description:
description = None
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes = add_volumes.strip(',')
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes = remove_volumes.strip(',')
remove_volumes_list = remove_volumes.split(',')
invalid_uuids = []
for uuid in add_volumes_list:
if uuid in remove_volumes_list:
invalid_uuids.append(uuid)
if invalid_uuids:
msg = _("UUIDs %s are in both add and remove volume "
"list.") % invalid_uuids
raise exception.InvalidVolume(reason=msg)
volumes = self.db.volume_get_all_by_generic_group(context, group.id)
# Validate volumes in add_volumes and remove_volumes.
add_volumes_new = ""
remove_volumes_new = ""
if add_volumes_list:
add_volumes_new = self._validate_add_volumes(
context, volumes, add_volumes_list, group)
if remove_volumes_list:
remove_volumes_new = self._validate_remove_volumes(
volumes, remove_volumes_list, group)
if (name is None and description is None and not add_volumes_new and
not remove_volumes_new):
msg = (_("Cannot update group %(group_id)s "
"because no valid name, description, add_volumes, "
"or remove_volumes were provided.") %
{'group_id': group.id})
raise exception.InvalidGroup(reason=msg)
expected = {}
fields = {'updated_at': timeutils.utcnow()}
# Update name and description in db now. No need to
# to send them over through an RPC call.
if name is not None:
fields['name'] = name
if description is not None:
fields['description'] = description
if not add_volumes_new and not remove_volumes_new:
# Only update name or description. Set status to available.
fields['status'] = c_fields.GroupStatus.AVAILABLE
else:
expected['status'] = c_fields.GroupStatus.AVAILABLE
fields['status'] = c_fields.GroupStatus.UPDATING
if not group.conditional_update(fields, expected):
msg = _("Group status must be available.")
raise exception.InvalidGroup(reason=msg)
# Do an RPC call only if the update request includes
# adding/removing volumes. add_volumes_new and remove_volumes_new
# are strings of volume UUIDs separated by commas with no spaces
# in between.
if add_volumes_new or remove_volumes_new:
self.volume_rpcapi.update_group(
context, group,
add_volumes=add_volumes_new,
remove_volumes=remove_volumes_new)
def _validate_remove_volumes(self, volumes, remove_volumes_list, group):
# Validate volumes in remove_volumes.
remove_volumes_new = ""
for volume in volumes:
if volume['id'] in remove_volumes_list:
if volume['status'] not in VALID_REMOVE_VOL_FROM_GROUP_STATUS:
msg = (_("Cannot remove volume %(volume_id)s from "
"group %(group_id)s because volume "
"is in an invalid state: %(status)s. Valid "
"states are: %(valid)s.") %
{'volume_id': volume['id'],
'group_id': group.id,
'status': volume['status'],
'valid': VALID_REMOVE_VOL_FROM_GROUP_STATUS})
raise exception.InvalidVolume(reason=msg)
# Volume currently in group. It will be removed from group.
if remove_volumes_new:
remove_volumes_new += ","
remove_volumes_new += volume['id']
for rem_vol in remove_volumes_list:
if rem_vol not in remove_volumes_new:
msg = (_("Cannot remove volume %(volume_id)s from "
"group %(group_id)s because it "
"is not in the group.") %
{'volume_id': rem_vol,
'group_id': group.id})
raise exception.InvalidVolume(reason=msg)
return remove_volumes_new
def _validate_add_volumes(self, context, volumes, add_volumes_list, group):
add_volumes_new = ""
for volume in volumes:
if volume['id'] in add_volumes_list:
# Volume already in group. Remove from add_volumes.
add_volumes_list.remove(volume['id'])
for add_vol in add_volumes_list:
try:
add_vol_ref = self.db.volume_get(context, add_vol)
except exception.VolumeNotFound:
msg = (_("Cannot add volume %(volume_id)s to "
"group %(group_id)s because volume cannot be "
"found.") %
{'volume_id': add_vol,
'group_id': group.id})
raise exception.InvalidVolume(reason=msg)
orig_group = add_vol_ref.get('group_id', None)
if orig_group:
# If volume to be added is already in the group to be updated,
# it should have been removed from the add_volumes_list in the
# beginning of this function. If we are here, it means it is
# in a different group.
msg = (_("Cannot add volume %(volume_id)s to group "
"%(group_id)s because it is already in "
"group %(orig_group)s.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id,
'orig_group': orig_group})
raise exception.InvalidVolume(reason=msg)
if add_vol_ref:
if add_vol_ref.project_id != group.project_id:
msg = (_("Cannot add volume %(volume_id)s to group "
"%(group_id)s as they belong to different "
"projects.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id})
raise exception.InvalidVolume(reason=msg)
add_vol_type_id = add_vol_ref.get('volume_type_id', None)
if not add_vol_type_id:
msg = (_("Cannot add volume %(volume_id)s to group "
"%(group_id)s because it has no volume "
"type.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id})
raise exception.InvalidVolume(reason=msg)
vol_type_ids = [v_type.id for v_type in group.volume_types]
if add_vol_type_id not in vol_type_ids:
msg = (_("Cannot add volume %(volume_id)s to group "
"%(group_id)s because volume type "
"%(volume_type)s is not supported by the "
"group.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id,
'volume_type': add_vol_type_id})
raise exception.InvalidVolume(reason=msg)
if (add_vol_ref['status'] not in
VALID_ADD_VOL_TO_GROUP_STATUS):
msg = (_("Cannot add volume %(volume_id)s to group "
"%(group_id)s because volume is in an "
"invalid state: %(status)s. Valid states are: "
"%(valid)s.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id,
'status': add_vol_ref['status'],
'valid': VALID_ADD_VOL_TO_GROUP_STATUS})
raise exception.InvalidVolume(reason=msg)
# group.host and add_vol_ref['host'] are in this format:
# 'host@backend#pool'. Extract host (host@backend) before
# doing comparison.
vol_host = vol_utils.extract_host(add_vol_ref['host'])
group_host = vol_utils.extract_host(group.host)
if group_host != vol_host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node."))
# Volume exists. It will be added to CG.
if add_volumes_new:
add_volumes_new += ","
add_volumes_new += add_vol_ref['id']
else:
msg = (_("Cannot add volume %(volume_id)s to group "
"%(group_id)s because volume does not exist.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id})
raise exception.InvalidVolume(reason=msg)
return add_volumes_new
def get(self, context, group_id):
group = objects.Group.get_by_id(context, group_id)
check_policy(context, 'get', group)
return group
def get_all(self, context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
check_policy(context, 'get_all')
if filters is None:
filters = {}
if filters:
LOG.debug("Searching by: %s", filters)
if (context.is_admin and 'all_tenants' in filters):
del filters['all_tenants']
groups = objects.GroupList.get_all(
context, filters=filters, marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs)
else:
groups = objects.GroupList.get_all_by_project(
context, context.project_id, filters=filters, marker=marker,
limit=limit, offset=offset, sort_keys=sort_keys,
sort_dirs=sort_dirs)
return groups
@wrap_check_policy
def reset_status(self, context, group, status):
"""Reset status of generic group"""
if status not in c_fields.GroupStatus.ALL:
msg = _("Group status: %(status)s is invalid, valid status "
"are: %(valid)s.") % {'status': status,
'valid': c_fields.GroupStatus.ALL}
raise exception.InvalidGroupStatus(reason=msg)
field = {'updated_at': timeutils.utcnow(),
'status': status}
group.update(field)
group.save()
@wrap_check_policy
def create_group_snapshot(self, context, group, name, description):
group.assert_not_frozen()
options = {'group_id': group.id,
'user_id': context.user_id,
'project_id': context.project_id,
'status': "creating",
'name': name,
'description': description,
'group_type_id': group.group_type_id}
group_snapshot = None
group_snapshot_id = None
try:
group_snapshot = objects.GroupSnapshot(context, **options)
group_snapshot.create()
group_snapshot_id = group_snapshot.id
snap_name = group_snapshot.name
snap_desc = group_snapshot.description
with group.obj_as_admin():
self.volume_api.create_snapshots_in_db(
context, group.volumes, snap_name, snap_desc,
None, group_snapshot_id)
except Exception:
with excutils.save_and_reraise_exception():
try:
# If the group_snapshot has been created
if group_snapshot.obj_attr_is_set('id'):
group_snapshot.destroy()
finally:
LOG.error("Error occurred when creating group_snapshot"
" %s.", group_snapshot_id)
self.volume_rpcapi.create_group_snapshot(context, group_snapshot)
return group_snapshot
def delete_group_snapshot(self, context, group_snapshot, force=False):
check_policy(context, 'delete_group_snapshot')
group_snapshot.assert_not_frozen()
values = {'status': 'deleting'}
expected = {'status': ('available', 'error')}
filters = [~db.group_creating_from_src(
group_snapshot_id=group_snapshot.id)]
res = group_snapshot.conditional_update(values, expected, filters)
if not res:
msg = _('GroupSnapshot status must be available or error, and no '
'Group can be currently using it as source for its '
'creation.')
raise exception.InvalidGroupSnapshot(reason=msg)
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
# TODO(xyang): Add a new db API to update all snapshots statuses
# in one db API call.
for snap in snapshots:
snap.status = c_fields.SnapshotStatus.DELETING
snap.save()
self.volume_rpcapi.delete_group_snapshot(context.elevated(),
group_snapshot)
def update_group_snapshot(self, context, group_snapshot, fields):
check_policy(context, 'update_group_snapshot')
group_snapshot.update(fields)
group_snapshot.save()
def get_group_snapshot(self, context, group_snapshot_id):
check_policy(context, 'get_group_snapshot')
group_snapshots = objects.GroupSnapshot.get_by_id(context,
group_snapshot_id)
return group_snapshots
def get_all_group_snapshots(self, context, filters=None, marker=None,
limit=None, offset=None, sort_keys=None,
sort_dirs=None):
check_policy(context, 'get_all_group_snapshots')
filters = filters or {}
if context.is_admin and 'all_tenants' in filters:
# Need to remove all_tenants to pass the filtering below.
del filters['all_tenants']
group_snapshots = objects.GroupSnapshotList.get_all(
context, filters=filters, marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs)
else:
group_snapshots = objects.GroupSnapshotList.get_all_by_project(
context.elevated(), context.project_id, filters=filters,
marker=marker, limit=limit, offset=offset, sort_keys=sort_keys,
sort_dirs=sort_dirs)
return group_snapshots
def reset_group_snapshot_status(self, context, gsnapshot, status):
"""Reset status of group snapshot"""
check_policy(context, 'reset_group_snapshot_status')
if status not in c_fields.GroupSnapshotStatus.ALL:
msg = _("Group snapshot status: %(status)s is invalid, "
"valid statuses are: "
"%(valid)s.") % {'status': status,
'valid': c_fields.GroupSnapshotStatus.ALL}
raise exception.InvalidGroupSnapshotStatus(reason=msg)
field = {'updated_at': timeutils.utcnow(),
'status': status}
gsnapshot.update(field)
gsnapshot.save()
def _check_type(self, group):
if not group.is_replicated:
msg = _("Group %s is not a replication group type.") % group.id
LOG.error(msg)
raise exception.InvalidGroupType(reason=msg)
for vol_type in group.volume_types:
if not vol_utils.is_replicated_spec(vol_type.extra_specs):
msg = _("Volume type %s does not have 'replication_enabled' "
"spec key set to '<is> True'.") % vol_type.id
LOG.error(msg)
raise exception.InvalidVolumeType(reason=msg)
# Replication group API (Tiramisu)
@wrap_check_policy
def enable_replication(self, context, group):
self._check_type(group)
valid_status = [c_fields.GroupStatus.AVAILABLE]
if group.status not in valid_status:
params = {'valid': valid_status,
'current': group.status,
'id': group.id}
msg = _("Group %(id)s status must be %(valid)s, "
"but current status is: %(current)s. "
"Cannot enable replication.") % params
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
valid_rep_status = [c_fields.ReplicationStatus.DISABLED,
c_fields.ReplicationStatus.ENABLED]
if group.replication_status not in valid_rep_status:
params = {'valid': valid_rep_status,
'current': group.replication_status,
'id': group.id}
msg = _("Group %(id)s replication status must be %(valid)s, "
"but current status is: %(current)s. "
"Cannot enable replication.") % params
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
volumes = objects.VolumeList.get_all_by_generic_group(
context.elevated(), group.id)
valid_status = ['available', 'in-use']
for vol in volumes:
if vol.status not in valid_status:
params = {'valid': valid_status,
'current': vol.status,
'id': vol.id}
msg = _("Volume %(id)s status must be %(valid)s, "
"but current status is: %(current)s. "
"Cannot enable replication.") % params
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# replication_status could be set to enabled when volume is
# created and the mirror is built.
if vol.replication_status not in valid_rep_status:
params = {'valid': valid_rep_status,
'current': vol.replication_status,
'id': vol.id}
msg = _("Volume %(id)s replication status must be %(valid)s, "
"but current status is: %(current)s. "
"Cannot enable replication.") % params
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
vol.replication_status = c_fields.ReplicationStatus.ENABLING
vol.save()
group.replication_status = c_fields.ReplicationStatus.ENABLING
group.save()
self.volume_rpcapi.enable_replication(context, group)
@wrap_check_policy
def disable_replication(self, context, group):
self._check_type(group)
valid_status = [c_fields.GroupStatus.AVAILABLE,
c_fields.GroupStatus.ERROR]
if group.status not in valid_status:
params = {'valid': valid_status,
'current': group.status,
'id': group.id}
msg = _("Group %(id)s status must be %(valid)s, "
"but current status is: %(current)s. "
"Cannot disable replication.") % params
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
valid_rep_status = [c_fields.ReplicationStatus.ENABLED,
c_fields.ReplicationStatus.ERROR]
if group.replication_status not in valid_rep_status:
params = {'valid': valid_rep_status,
'current': group.replication_status,
'id': group.id}
msg = _("Group %(id)s replication status must be %(valid)s, "
"but current status is: %(current)s. "
"Cannot disable replication.") % params
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
volumes = objects.VolumeList.get_all_by_generic_group(
context.elevated(), group.id)
for vol in volumes:
if vol.replication_status not in valid_rep_status:
params = {'valid': valid_rep_status,
'current': vol.replication_status,
'id': vol.id}
msg = _("Volume %(id)s replication status must be %(valid)s, "
"but current status is: %(current)s. "
"Cannot disable replication.") % params
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
vol.replication_status = c_fields.ReplicationStatus.DISABLING
vol.save()
group.replication_status = c_fields.ReplicationStatus.DISABLING
group.save()
self.volume_rpcapi.disable_replication(context, group)
@wrap_check_policy
def failover_replication(self, context, group,
allow_attached_volume=False,
secondary_backend_id=None):
self._check_type(group)
valid_status = [c_fields.GroupStatus.AVAILABLE]
if group.status not in valid_status:
params = {'valid': valid_status,
'current': group.status,
'id': group.id}
msg = _("Group %(id)s status must be %(valid)s, "
"but current status is: %(current)s. "
"Cannot failover replication.") % params
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
valid_rep_status = [c_fields.ReplicationStatus.ENABLED,
c_fields.ReplicationStatus.FAILED_OVER]
if group.replication_status not in valid_rep_status:
params = {'valid': valid_rep_status,
'current': group.replication_status,
'id': group.id}
msg = _("Group %(id)s replication status must be %(valid)s, "
"but current status is: %(current)s. "
"Cannot failover replication.") % params
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
volumes = objects.VolumeList.get_all_by_generic_group(
context.elevated(), group.id)
valid_status = ['available', 'in-use']
for vol in volumes:
if vol.status not in valid_status:
params = {'valid': valid_status,
'current': vol.status,
'id': vol.id}
msg = _("Volume %(id)s status must be %(valid)s, "
"but current status is: %(current)s. "
"Cannot failover replication.") % params
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if vol.status == 'in-use' and not allow_attached_volume:
msg = _("Volume %s is attached but allow_attached_volume flag "
"is False. Cannot failover replication.") % vol.id
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if vol.replication_status not in valid_rep_status:
params = {'valid': valid_rep_status,
'current': vol.replication_status,
'id': vol.id}
msg = _("Volume %(id)s replication status must be %(valid)s, "
"but current status is: %(current)s. "
"Cannot failover replication.") % params
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
vol.replication_status = c_fields.ReplicationStatus.FAILING_OVER
vol.save()
group.replication_status = c_fields.ReplicationStatus.FAILING_OVER
group.save()
self.volume_rpcapi.failover_replication(context, group,
allow_attached_volume,
secondary_backend_id)
@wrap_check_policy
def list_replication_targets(self, context, group):
self._check_type(group)
return self.volume_rpcapi.list_replication_targets(context, group)
| |
import webapp2
import os
from google.appengine.ext.webapp import template
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import ndb
from google.appengine.api import images
from src.model.WorkModels import Work,PhotoModel,WorkResourceAttribute
class WorkAdminCreate(webapp2.RequestHandler):
#create project page render
def get(self):
user=users.get_current_user()
if user is None:
self.redirect(users.create_login_url(self.request.uri))
else:
self.response.headers["Content-Type"]="text/html"
admins=['subhasisdutta300887','subhasistubai','test@example.com']
if user.nickname() not in admins:
self.response.out.write(callAccessDeniedPage())
else:
template_values={
'pageTitle':"Create New Work Project",
}
path=os.path.join(os.path.dirname(__file__),'../../template/createWork.html')
page=template.render(path,template_values)
self.response.out.write(page)
#create project post
def post(self):
handle=self.request.get("handle")
category=self.request.get("category")
title=self.request.get("title")
description=self.request.get("description")
icon=self.request.get("iconImage")
largeImage=self.request.get("largeImage")
iconImg=db.Blob(icon)
largeImg=db.Blob(largeImage)
workItem=Work(id=handle)
workItem.category=category
workItem.title=title
workItem.order=int(self.request.get("order"))
workItem.description=description
workItem.iconImage=iconImg
workItem.bigImage=largeImg
workItem.put()
self.response.headers["Content-Type"]="text/html"
self.response.out.write("""
<html>
<head>
<title>New WORK Created</title>
</head>
<body>
<h3> New WORK Created</h3>
</body></html>
""")
class WorkAdminEdit(webapp2.RequestHandler):
#display the 3 form controller to get
def get(self):
user=users.get_current_user()
if user is None:
self.redirect(users.create_login_url(self.request.uri))
else:
self.response.headers["Content-Type"]="text/html"
admins=['subhasisdutta300887','subhasistubai','test@example.com']
if user.nickname() not in admins:
self.response.out.write(callAccessDeniedPage())
else:
work_name=self.request.get("name")
work_key=ndb.Key('Work',work_name)
work=work_key.get()
if work is None:
self.response.out.write(callNoSuchWorkPage())
else:
attrList=ndb.get_multi(work.attributes)
photoList=ndb.get_multi(work.photoGallery)
template_values={
'pageTitle':"Edit Work",
'work':work,
'attrList': attrList,
'photoList': photoList
}
path=os.path.join(os.path.dirname(__file__),'../../template/editWork.html')
page=template.render(path,template_values)
self.response.out.write(page)
def post(self):
user=users.get_current_user()
if user is None:
self.redirect(users.create_login_url(self.request.uri))
else:
self.response.headers["Content-Type"]="text/html"
admins=['subhasisdutta300887','subhasistubai','test@example.com']
if user.nickname() not in admins:
self.response.out.write(callAccessDeniedPage())
else:
work_name=self.request.get("name")
work_key=ndb.Key('Work',work_name)
work=work_key.get()
if work is None:
self.response.out.write(callNoSuchWorkPage())
else:
title=self.request.get("title")
description=self.request.get("description")
work.title=title
work.description=description
work.order=int(self.request.get("order"))
work.publish=bool(self.request.get("publish"))
work.put()
self.response.headers["Content-Type"]="text/html"
self.response.out.write("""
<html>
<head>
<title>Project main Updated</title>
</head>
<body>
<h3> Project main Updated</h3>
</body></html>
""")
class AddWorkAttribute(webapp2.RequestHandler):
def post(self):
attribute=WorkResourceAttribute(name=self.request.get("name"),htmlDescription=self.request.get("htmlDescription"),order=int(self.request.get("order")))
attribute.put()
work_key=ndb.Key('Work',self.request.get("workHandle"))
work=work_key.get()
work.attributes.append(attribute.key)
work.put()
self.response.headers["Content-Type"]="text/html"
self.response.out.write("""
<html>
<head>
<title>New Attribute Added</title>
</head>
<body>
<h3> New Attribute Added</h3>
</body></html>
""")
class AddPhotoTOCollection(webapp2.RequestHandler):
def get(self):
user=users.get_current_user()
if user is None:
self.redirect(users.create_login_url(self.request.uri))
else:
self.response.headers["Content-Type"]="text/html"
admins=['subhasisdutta300887','subhasistubai','test@example.com']
if user.nickname() not in admins:
self.response.out.write(callAccessDeniedPage())
else:
template_values={
'pageTitle':"Create New Photo",
}
path=os.path.join(os.path.dirname(__file__),'../../template/createPhoto.html')
page=template.render(path,template_values)
self.response.out.write(page)
def post(self):
photoImage=self.request.get("image")
photoImg=db.Blob(photoImage)
thumbnail=images.resize(photoImage, 250, 170)
thumbnailImg=db.Blob(thumbnail)
photo=PhotoModel(title=self.request.get("title"),image=photoImg,type=self.request.get("type"),thumbnail=thumbnailImg,caption=self.request.get("caption"))
photo.put()
self.response.headers["Content-Type"]="text/html"
self.response.out.write("""
<html>
<head>
<title>Photo Added</title>
</head>
<body>
<h3> New Photo Added</h3>
</body></html>
""")
class MapPhotoToWork(webapp2.RequestHandler):
def get(self):
user=users.get_current_user()
if user is None:
self.redirect(users.create_login_url(self.request.uri))
else:
self.response.headers["Content-Type"]="text/html"
admins=['subhasisdutta300887','subhasistubai','test@example.com']
if user.nickname() not in admins:
self.response.out.write(callAccessDeniedPage())
else:
work_name=self.request.get("name")
work_key=ndb.Key('Work',work_name)
work=work_key.get()
if work is None:
self.response.out.write(callNoSuchWorkPage())
else:
attrList=ndb.get_multi(work.attributes)
photoList=ndb.get_multi(work.photoGallery)
photoCollection= PhotoModel.query()
template_values={
'pageTitle':"Map Photo To Work : ",
'work':work,
'attrList': attrList,
'photoList': photoList,
'photoCollection': photoCollection
}
path=os.path.join(os.path.dirname(__file__),'../../template/mapPhotoWork.html')
page=template.render(path,template_values)
self.response.out.write(page)
def post(self):
workPhoto_key=ndb.Key(urlsafe=self.request.get("photoKey"))
work_key=ndb.Key('Work',self.request.get("name"))
work=work_key.get()
work.photoGallery.append(workPhoto_key)
work.put()
self.response.headers["Content-Type"]="text/html"
self.response.out.write("""
<html>
<head>
<title>New Photo Added</title>
</head>
<body>
<h3> New Photo Added</h3>
</body></html>
""")
def callNoSuchWorkPage():
template_parameters={
'pageTitle':'No Such Work!!!!',
'title':"ERROR! Requested Work cannot be found",
'message':"ERROR!! The requested work was not found. Please check the name again."
}
error_template=os.path.join(os.path.dirname(__file__),'../../template/error.html')
page=template.render(error_template,template_parameters)
return page
def callAccessDeniedPage():
template_parameters={
'pageTitle':'Access Denied!!!!',
'title':"ERROR! You don't have access to this page",
'message':"ERROR!! You don't have access to this page."
}
error_template=os.path.join(os.path.dirname(__file__),'../../template/error.html')
page=template.render(error_template,template_parameters)
return page
| |
#!/usr/bin/env python
from __future__ import print_function
import re
import time
import shutil
import unittest
from ruffus.ruffus_exceptions import RethrownJobError
from ruffus.ruffus_utility import RUFFUS_HISTORY_FILE, CHECKSUM_FILE_TIMESTAMPS, \
CHECKSUM_HISTORY_TIMESTAMPS, CHECKSUM_FUNCTIONS, CHECKSUM_FUNCTIONS_AND_PARAMS
from ruffus.ruffus_utility import get_default_history_file_name
from ruffus import pipeline_run, pipeline_printout, suffix, transform, split, merge, dbdict, Pipeline
import sys
"""
test_job_completion_transform.py
test several cases where the dbdict should be updated
"""
import os
tempdir = os.path.relpath(os.path.abspath(os.path.splitext(__file__)[0])) + "/"
# add grandparent to search path for testing
grandparent_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", ".."))
sys.path.insert(0, grandparent_dir)
# module name = script name without extension
module_name = os.path.splitext(os.path.basename(__file__))[0]
# funky code to import by file name
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
# ___________________________________________________________________________
#
# imports
# ___________________________________________________________________________
try:
from StringIO import StringIO
except:
from io import StringIO
possible_chksms = list(range(CHECKSUM_FUNCTIONS_AND_PARAMS + 1))
tempdir = 'tmp_test_job_completion/'
input_file = os.path.join(tempdir, 'input.txt')
transform1_out = input_file.replace('.txt', '.output')
split1_outputs = [os.path.join(tempdir, 'split.out1.txt'),
os.path.join(tempdir, 'split.out2.txt')]
merge2_output = os.path.join(tempdir, 'merged.out')
runtime_data = []
@transform(input_file, suffix('.txt'), '.output', runtime_data)
def transform1(in_name, out_name, how_many):
with open(out_name, 'w') as outfile:
with open(in_name) as ii:
outfile.write(ii.read())
@transform(input_file, suffix('.txt'), '.output', runtime_data)
def transform_raise_error(in_name, out_name, how_many):
# raise an error unless runtime_data has 'okay' in it
with open(out_name, 'w') as outfile:
with open(in_name) as ii:
outfile.write(ii.read())
if 'okay' not in how_many:
raise RuntimeError("'okay' wasn't in runtime_data!")
@split(input_file, split1_outputs)
def split1(in_name, out_names):
for n in out_names:
with open(n, 'w') as outfile:
with open(in_name) as ii:
outfile.write(ii.read() + '\n')
@merge(split1, merge2_output)
def merge2(in_names, out_name):
with open(out_name, 'w') as outfile:
for n in in_names:
with open(n) as ii:
outfile.write(ii.read() + '\n')
# CHECKSUM_FILE_TIMESTAMPS = 0 # only rerun when the file timestamps are out of date (classic mode)
# CHECKSUM_HISTORY_TIMESTAMPS = 1 # also rerun when the history shows a job as being out of date
# CHECKSUM_FUNCTIONS = 2 # also rerun when function body has changed
# CHECKSUM_FUNCTIONS_AND_PARAMS = 3 # also rerun when function parameters have changed
def cleanup_tmpdir():
os.system('rm -f %s %s' %
(os.path.join(tempdir, '*'), get_default_history_file_name()))
count_pipelines = 0
class TestJobCompletion(unittest.TestCase):
def setUp(self):
try:
os.mkdir(tempdir)
except OSError:
pass
def create_pipeline(self):
"""
Create new pipeline on the fly without using decorators
"""
global count_pipelines
count_pipelines = count_pipelines + 1
test_pipeline = Pipeline("test %d" % count_pipelines)
test_pipeline.transform(task_func=transform1,
input=input_file,
filter=suffix('.txt'),
output='.output',
extras=[runtime_data])
test_pipeline.transform(task_func=transform_raise_error,
input=input_file,
filter=suffix('.txt'),
output='.output',
extras=[runtime_data])
test_pipeline.split(task_func=split1,
input=input_file,
output=split1_outputs)
test_pipeline.merge(task_func=merge2,
input=split1,
output=merge2_output)
return test_pipeline
def test_output_doesnt_exist(self):
"""Input file exists, output doesn't exist"""
# output doesn't exist-- should run for all levels
# create a new input file
cleanup_tmpdir()
with open(input_file, 'w') as outfile:
outfile.write('testme')
for chksm in possible_chksms:
s = StringIO()
pipeline_printout(s, [transform1], verbose=6,
checksum_level=chksm, pipeline="main")
self.assertTrue(re.search(
r'Job needs update:.*Missing file.*\[tmp_test_job_completion/input.output\]', s.getvalue(), re.DOTALL))
def test_output_out_of_date(self):
"""Input file exists, output out of date"""
# output exists but is out of date-- should run for all levels
cleanup_tmpdir()
with open(transform1_out, 'w') as outfile:
outfile.write('testme')
time.sleep(0.1)
with open(input_file, 'w') as outfile:
outfile.write('testme')
for chksm in possible_chksms:
s = StringIO()
pipeline_printout(s, [transform1], verbose=6,
checksum_level=chksm, pipeline="main")
self.assertIn('Job needs update:', s.getvalue())
if chksm == CHECKSUM_FILE_TIMESTAMPS:
self.assertIn('Input files:', s.getvalue())
self.assertIn('Output files:', s.getvalue())
else:
self.assertIn('left over from a failed run?', s.getvalue())
def test_output_timestamp_okay(self):
"""Input file exists, output timestamp up to date"""
# output exists and timestamp is up to date-- not run for lvl 0, run for all others
cleanup_tmpdir()
with open(input_file, 'w') as outfile:
outfile.write('testme')
time.sleep(0.1)
with open(transform1_out, 'w') as outfile:
outfile.write('testme')
for chksm in possible_chksms:
s = StringIO()
pipeline_printout(s, [transform1], verbose=6,
checksum_level=chksm, pipeline="main")
if chksm == CHECKSUM_FILE_TIMESTAMPS:
#self.assertIn('Job up-to-date', s.getvalue())
pass
else:
self.assertIn('Job needs update:', s.getvalue())
self.assertIn('left over from a failed run?',
s.getvalue())
def test_output_up_to_date(self):
"""Input file exists, output up to date"""
# output is up to date-- not run for any levels
cleanup_tmpdir()
with open(input_file, 'w') as outfile:
outfile.write('testme')
pipeline_run([transform1], verbose=0,
checksum_level=CHECKSUM_HISTORY_TIMESTAMPS, pipeline="main")
for chksm in possible_chksms:
s = StringIO()
pipeline_printout(s, [transform1], verbose=6,
checksum_level=chksm, pipeline="main")
#self.assertIn('Job up-to-date', s.getvalue())
pass
def test_output_up_to_date_func_changed(self):
"""Input file exists, output up to date, function body changed"""
# output is up to date, but function body changed (e.g., source different)
cleanup_tmpdir()
with open(input_file, 'w') as outfile:
outfile.write('testme')
pipeline_run([transform1], verbose=0,
checksum_level=CHECKSUM_HISTORY_TIMESTAMPS, pipeline="main")
if sys.hexversion >= 0x03000000:
transform1.__code__ = split1.__code__ # simulate source change
else:
transform1.func_code = split1.func_code # simulate source change
for chksm in possible_chksms:
s = StringIO()
pipeline_printout(s, [transform1], verbose=6,
checksum_level=chksm, pipeline="main")
if chksm >= CHECKSUM_FUNCTIONS:
self.assertIn('Job needs update:', s.getvalue())
self.assertIn('Pipeline function has changed',
s.getvalue())
else:
#self.assertIn('Job up-to-date', s.getvalue())
pass
def test_output_up_to_date_func_changed(self):
"""Input file exists, output up to date, function body changed"""
# output is up to date, but function body changed (e.g., source different)
cleanup_tmpdir()
with open(input_file, 'w') as outfile:
outfile.write('testme')
pipeline_run([transform1], verbose=0,
checksum_level=CHECKSUM_HISTORY_TIMESTAMPS, pipeline="main")
# simulate source change
if sys.hexversion >= 0x03000000:
split1.__code__, transform1.__code__ = transform1.__code__, split1.__code__
else:
split1.func_code, transform1.func_code = transform1.func_code, split1.func_code
for chksm in possible_chksms:
s = StringIO()
pipeline_printout(s, [transform1], verbose=6,
checksum_level=chksm, pipeline="main")
if chksm >= CHECKSUM_FUNCTIONS:
self.assertIn('Job needs update:', s.getvalue())
self.assertIn('Pipeline function has changed',
s.getvalue())
else:
#self.assertIn('Job up-to-date', s.getvalue())
pass
# clean up our function-changing mess!
if sys.hexversion >= 0x03000000:
split1.__code__, transform1.__code__ = transform1.__code__, split1.__code__
else:
split1.func_code, transform1.func_code = transform1.func_code, split1.func_code
def test_output_up_to_date_param_changed(self):
"""Input file exists, output up to date, parameter to function changed"""
# output is up to date, but function body changed (e.g., source different)
cleanup_tmpdir()
with open(input_file, 'w') as outfile:
outfile.write('testme')
pipeline_run([transform1], verbose=0,
checksum_level=CHECKSUM_HISTORY_TIMESTAMPS, pipeline="main")
runtime_data.append('different') # simulate change to config file
for chksm in possible_chksms:
s = StringIO()
pipeline_printout(s, [transform1], verbose=6,
checksum_level=chksm, pipeline="main")
if chksm >= CHECKSUM_FUNCTIONS_AND_PARAMS:
self.assertIn('Job needs update:', s.getvalue())
self.assertIn('Pipeline parameters have changed',
s.getvalue())
else:
#self.assertIn('Job up-to-date', s.getvalue())
pass
def test_split_output(self):
"""test multiple-output checksums"""
# outputs out of date
cleanup_tmpdir()
with open(input_file, 'w') as outfile:
outfile.write('testme')
pipeline_run([split1], verbose=0,
checksum_level=CHECKSUM_HISTORY_TIMESTAMPS, pipeline="main")
time.sleep(.5)
with open(input_file, 'w') as outfile:
outfile.write('testme')
for chksm in possible_chksms:
s = StringIO()
pipeline_printout(s, [split1], verbose=6,
checksum_level=chksm, pipeline="main")
self.assertIn('Job needs update:', s.getvalue())
# all outputs incorrectly generated
cleanup_tmpdir()
with open(input_file, 'w') as outfile:
outfile.write('testme')
time.sleep(.5)
for f in split1_outputs:
with open(f, 'w') as outfile:
outfile.write('testme')
for chksm in possible_chksms:
s = StringIO()
pipeline_printout(s, [split1], verbose=6,
checksum_level=chksm, pipeline="main")
if chksm >= CHECKSUM_HISTORY_TIMESTAMPS:
self.assertIn('Job needs update:', s.getvalue())
self.assertIn('left over from a failed run?',
s.getvalue())
else:
#self.assertIn('Job up-to-date', s.getvalue())
pass
# one output incorrectly generated
cleanup_tmpdir()
with open(input_file, 'w') as outfile:
outfile.write('testme')
pipeline_run([split1], verbose=0,
checksum_level=CHECKSUM_HISTORY_TIMESTAMPS, pipeline="main")
job_history = dbdict.open(
get_default_history_file_name(), picklevalues=True)
del job_history[os.path.relpath(split1_outputs[0])]
for chksm in possible_chksms:
s = StringIO()
pipeline_printout(s, [split1], verbose=6,
checksum_level=chksm, pipeline="main")
if chksm >= CHECKSUM_HISTORY_TIMESTAMPS:
self.assertIn('Job needs update:', s.getvalue())
self.assertIn('left over from a failed run?',
s.getvalue())
else:
#self.assertIn('Job up-to-date', s.getvalue())
pass
def test_merge_output(self):
"""test multiple-input checksums"""
# one output incorrectly generated
cleanup_tmpdir()
with open(input_file, 'w') as outfile:
outfile.write('testme')
pipeline_run([split1], verbose=0,
checksum_level=CHECKSUM_HISTORY_TIMESTAMPS, pipeline="main")
job_history = dbdict.open(
get_default_history_file_name(), picklevalues=True)
del job_history[os.path.relpath(split1_outputs[0])]
for chksm in possible_chksms:
s = StringIO()
pipeline_printout(s, [merge2], verbose=6,
checksum_level=chksm, pipeline="main")
if chksm >= CHECKSUM_HISTORY_TIMESTAMPS:
self.assertIn('Job needs update:', s.getvalue())
self.assertIn('left over from a failed run?', s.getvalue())
else:
#self.assertIn('Job up-to-date', s.getvalue())
pass
# make sure the jobs run fine
cleanup_tmpdir()
with open(input_file, 'w') as outfile:
outfile.write('testme')
pipeline_run([merge2], verbose=0,
checksum_level=CHECKSUM_HISTORY_TIMESTAMPS, pipeline="main")
for chksm in possible_chksms:
s = StringIO()
pipeline_printout(s, [merge2], verbose=6,
checksum_level=chksm, pipeline="main")
#self.assertIn('Job up-to-date', s.getvalue())
self.assertNotIn('Job needs update:', s.getvalue())
self.assertNotIn('left over from a failed run?', s.getvalue())
def test_newstyle_output_doesnt_exist(self):
"""Input file exists, output doesn't exist"""
# output doesn't exist-- should run for all levels
# create a new input file
cleanup_tmpdir()
with open(input_file, 'w') as outfile:
outfile.write('testme')
for chksm in possible_chksms:
s = StringIO()
self.create_pipeline().printout(
s, [transform1], verbose=6, checksum_level=chksm)
self.assertTrue(re.search(
r'Job needs update:.*Missing file.*\[tmp_test_job_completion/input.output\]', s.getvalue(), re.DOTALL))
def test_newstyle_output_out_of_date(self):
"""Input file exists, output out of date"""
# output exists but is out of date-- should run for all levels
cleanup_tmpdir()
with open(transform1_out, 'w') as outfile:
outfile.write('testme')
time.sleep(0.1)
with open(input_file, 'w') as outfile:
outfile.write('testme')
for chksm in possible_chksms:
s = StringIO()
self.create_pipeline().printout(
s, [transform1], verbose=6, checksum_level=chksm)
self.assertIn('Job needs update:', s.getvalue())
if chksm == CHECKSUM_FILE_TIMESTAMPS:
self.assertIn('Input files:', s.getvalue())
self.assertIn('Output files:', s.getvalue())
else:
self.assertIn('left over from a failed run?', s.getvalue())
def test_newstyle_output_timestamp_okay(self):
"""Input file exists, output timestamp up to date"""
# output exists and timestamp is up to date-- not run for lvl 0, run for all others
cleanup_tmpdir()
with open(input_file, 'w') as outfile:
outfile.write('testme')
time.sleep(0.1)
with open(transform1_out, 'w') as outfile:
outfile.write('testme')
for chksm in possible_chksms:
s = StringIO()
self.create_pipeline().printout(
s, [transform1], verbose=6, checksum_level=chksm)
if chksm == CHECKSUM_FILE_TIMESTAMPS:
#self.assertIn('Job up-to-date', s.getvalue())
pass
else:
self.assertIn('Job needs update:', s.getvalue())
self.assertIn('left over from a failed run?',
s.getvalue())
def test_newstyle_output_up_to_date(self):
"""Input file exists, output up to date"""
test_pipeline = self.create_pipeline()
# output is up to date-- not run for any levels
cleanup_tmpdir()
with open(input_file, 'w') as outfile:
outfile.write('testme')
test_pipeline.run([transform1], verbose=0,
checksum_level=CHECKSUM_HISTORY_TIMESTAMPS)
for chksm in possible_chksms:
s = StringIO()
test_pipeline.printout(
s, [transform1], verbose=6, checksum_level=chksm)
#self.assertIn('Job up-to-date', s.getvalue())
pass
def test_newstyle_output_up_to_date_func_changed(self):
"""Input file exists, output up to date, function body changed"""
test_pipeline = self.create_pipeline()
# output is up to date, but function body changed (e.g., source different)
cleanup_tmpdir()
with open(input_file, 'w') as outfile:
outfile.write('testme')
test_pipeline.run([transform1], verbose=0,
checksum_level=CHECKSUM_HISTORY_TIMESTAMPS)
if sys.hexversion >= 0x03000000:
transform1.__code__ = split1.__code__ # simulate source change
else:
transform1.func_code = split1.func_code # simulate source change
for chksm in possible_chksms:
s = StringIO()
test_pipeline.printout(
s, [transform1], verbose=6, checksum_level=chksm)
if chksm >= CHECKSUM_FUNCTIONS:
self.assertIn('Job needs update:', s.getvalue())
self.assertIn('Pipeline function has changed',
s.getvalue())
else:
#self.assertIn('Job up-to-date', s.getvalue())
pass
def test_newstyle_output_up_to_date_func_changed(self):
"""Input file exists, output up to date, function body changed"""
test_pipeline = self.create_pipeline()
# output is up to date, but function body changed (e.g., source different)
cleanup_tmpdir()
with open(input_file, 'w') as outfile:
outfile.write('testme')
test_pipeline.run([transform1], verbose=0,
checksum_level=CHECKSUM_HISTORY_TIMESTAMPS)
# simulate source change
if sys.hexversion >= 0x03000000:
split1.__code__, transform1.__code__ = transform1.__code__, split1.__code__
else:
split1.func_code, transform1.func_code = transform1.func_code, split1.func_code
for chksm in possible_chksms:
s = StringIO()
test_pipeline.printout(
s, [transform1], verbose=6, checksum_level=chksm)
if chksm >= CHECKSUM_FUNCTIONS:
self.assertIn('Job needs update:', s.getvalue())
self.assertIn('Pipeline function has changed',
s.getvalue())
else:
#self.assertIn('Job up-to-date', s.getvalue())
pass
# clean up our function-changing mess!
if sys.hexversion >= 0x03000000:
split1.__code__, transform1.__code__ = transform1.__code__, split1.__code__
else:
split1.func_code, transform1.func_code = transform1.func_code, split1.func_code
def test_newstyle_output_up_to_date_param_changed(self):
"""Input file exists, output up to date, parameter to function changed"""
test_pipeline = self.create_pipeline()
# output is up to date, but function body changed (e.g., source different)
cleanup_tmpdir()
with open(input_file, 'w') as outfile:
outfile.write('testme')
test_pipeline.run([transform1], verbose=0,
checksum_level=CHECKSUM_HISTORY_TIMESTAMPS)
runtime_data.append('different') # simulate change to config file
for chksm in possible_chksms:
s = StringIO()
test_pipeline.printout(
s, [transform1], verbose=6, checksum_level=chksm)
if chksm >= CHECKSUM_FUNCTIONS_AND_PARAMS:
self.assertIn('Job needs update:', s.getvalue())
self.assertIn('Pipeline parameters have changed',
s.getvalue())
else:
#self.assertIn('Job up-to-date', s.getvalue())
pass
def test_raises_error(self):
"""run a function that fails but creates output, then check what should run"""
# output is up to date, but function body changed (e.g., source different)
cleanup_tmpdir()
with open(input_file, 'w') as outfile:
outfile.write('testme')
time.sleep(.5)
del runtime_data[:]
# poo. Shouldn't this be RuntimeError?
with self.assertRaises(RethrownJobError):
# generates output then fails
pipeline_run([transform_raise_error], verbose=0,
checksum_level=CHECKSUM_HISTORY_TIMESTAMPS, pipeline="main")
for chksm in possible_chksms:
s = StringIO()
pipeline_printout(s, [transform_raise_error],
verbose=6, checksum_level=chksm, pipeline="main")
if chksm >= CHECKSUM_HISTORY_TIMESTAMPS:
self.assertIn('Job needs update:', s.getvalue())
self.assertIn('left over from a failed run?',
s.getvalue())
else:
#self.assertIn('Job up-to-date', s.getvalue())
pass
def test_newstyle_raises_error(self):
"""run a function that fails but creates output, then check what should run"""
test_pipeline = self.create_pipeline()
# output is up to date, but function body changed (e.g., source different)
cleanup_tmpdir()
with open(input_file, 'w') as outfile:
outfile.write('testme')
time.sleep(.5)
del runtime_data[:]
# poo. Shouldn't this be RuntimeError?
with self.assertRaises(RethrownJobError):
# generates output then fails
test_pipeline.run([transform_raise_error], verbose=0,
checksum_level=CHECKSUM_HISTORY_TIMESTAMPS)
for chksm in possible_chksms:
s = StringIO()
test_pipeline.printout(
s, [transform_raise_error], verbose=6, checksum_level=chksm)
if chksm >= CHECKSUM_HISTORY_TIMESTAMPS:
self.assertIn('Job needs update:', s.getvalue())
self.assertIn('left over from a failed run?',
s.getvalue())
else:
#self.assertIn('Job up-to-date', s.getvalue())
pass
def test_newstyle_split_output(self):
"""test multiple-output checksums"""
test_pipeline = self.create_pipeline()
# outputs out of date
cleanup_tmpdir()
with open(input_file, 'w') as outfile:
outfile.write('testme')
test_pipeline.run([split1], verbose=0,
checksum_level=CHECKSUM_HISTORY_TIMESTAMPS)
time.sleep(.5)
with open(input_file, 'w') as outfile:
outfile.write('testme')
for chksm in possible_chksms:
s = StringIO()
test_pipeline.printout(
s, [split1], verbose=6, checksum_level=chksm)
self.assertIn('Job needs update:', s.getvalue())
# all outputs incorrectly generated
cleanup_tmpdir()
with open(input_file, 'w') as outfile:
outfile.write('testme')
time.sleep(.5)
for f in split1_outputs:
with open(f, 'w') as outfile:
outfile.write('testme')
for chksm in possible_chksms:
s = StringIO()
test_pipeline.printout(
s, [split1], verbose=6, checksum_level=chksm)
if chksm >= CHECKSUM_HISTORY_TIMESTAMPS:
self.assertIn('Job needs update:', s.getvalue())
self.assertIn('left over from a failed run?',
s.getvalue())
else:
#self.assertIn('Job up-to-date', s.getvalue())
pass
# one output incorrectly generated
cleanup_tmpdir()
with open(input_file, 'w') as outfile:
outfile.write('testme')
test_pipeline.run([split1], verbose=0,
checksum_level=CHECKSUM_HISTORY_TIMESTAMPS)
job_history = dbdict.open(
get_default_history_file_name(), picklevalues=True)
del job_history[os.path.relpath(split1_outputs[0])]
for chksm in possible_chksms:
s = StringIO()
test_pipeline.printout(
s, [split1], verbose=6, checksum_level=chksm)
if chksm >= CHECKSUM_HISTORY_TIMESTAMPS:
self.assertIn('Job needs update:', s.getvalue())
self.assertIn('left over from a failed run?',
s.getvalue())
else:
#self.assertIn('Job up-to-date', s.getvalue())
pass
def test_newstyle_merge_output(self):
"""test multiple-input checksums"""
test_pipeline = self.create_pipeline()
# one output incorrectly generated
cleanup_tmpdir()
with open(input_file, 'w') as outfile:
outfile.write('testme')
test_pipeline.run([split1], verbose=0,
checksum_level=CHECKSUM_HISTORY_TIMESTAMPS)
job_history = dbdict.open(
get_default_history_file_name(), picklevalues=True)
del job_history[os.path.relpath(split1_outputs[0])]
for chksm in possible_chksms:
s = StringIO()
test_pipeline.printout(
s, [merge2], verbose=6, checksum_level=chksm)
if chksm >= CHECKSUM_HISTORY_TIMESTAMPS:
self.assertIn('Job needs update:', s.getvalue())
self.assertIn('left over from a failed run?', s.getvalue())
else:
#self.assertIn('Job up-to-date', s.getvalue())
pass
# make sure the jobs run fine
cleanup_tmpdir()
with open(input_file, 'w') as outfile:
outfile.write('testme')
test_pipeline.run([merge2], verbose=0,
checksum_level=CHECKSUM_HISTORY_TIMESTAMPS)
for chksm in possible_chksms:
s = StringIO()
test_pipeline.printout(
s, [merge2], verbose=6, checksum_level=chksm)
#self.assertIn('Job up-to-date', s.getvalue())
self.assertNotIn('Job needs update:', s.getvalue())
self.assertNotIn('left over from a failed run?', s.getvalue())
def tearDown(self):
shutil.rmtree(tempdir)
pass
if __name__ == '__main__':
unittest.main()
# try:
# os.mkdir(tempdir)
# except OSError:
# pass
# #os.system('rm %s/*' % tempdir)
# #open(input_file, 'w').close()
# s = StringIO()
# pipeline_run([transform1], checksum_level=CHECKSUM_HISTORY_TIMESTAMPS, pipeline= "main")
# pipeline_printout(s, [transform1], verbose=6, checksum_level=0, pipeline= "main")
# print s.getvalue()
# #open(transform1_out) # raise an exception if test fails
| |
"""Provides functionality to interact with fans."""
from __future__ import annotations
from dataclasses import dataclass
from datetime import timedelta
import functools as ft
import logging
import math
from typing import final
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ( # noqa: F401
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
)
from homeassistant.helpers.entity import ToggleEntity, ToggleEntityDescription
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.typing import ConfigType
from homeassistant.loader import bind_hass
from homeassistant.util.percentage import (
ordered_list_item_to_percentage,
percentage_to_ordered_list_item,
percentage_to_ranged_value,
ranged_value_to_percentage,
)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "fan"
SCAN_INTERVAL = timedelta(seconds=30)
ENTITY_ID_FORMAT = DOMAIN + ".{}"
# Bitfield of features supported by the fan entity
SUPPORT_SET_SPEED = 1
SUPPORT_OSCILLATE = 2
SUPPORT_DIRECTION = 4
SUPPORT_PRESET_MODE = 8
SERVICE_SET_SPEED = "set_speed"
SERVICE_INCREASE_SPEED = "increase_speed"
SERVICE_DECREASE_SPEED = "decrease_speed"
SERVICE_OSCILLATE = "oscillate"
SERVICE_SET_DIRECTION = "set_direction"
SERVICE_SET_PERCENTAGE = "set_percentage"
SERVICE_SET_PRESET_MODE = "set_preset_mode"
SPEED_OFF = "off"
SPEED_LOW = "low"
SPEED_MEDIUM = "medium"
SPEED_HIGH = "high"
DIRECTION_FORWARD = "forward"
DIRECTION_REVERSE = "reverse"
ATTR_SPEED = "speed"
ATTR_PERCENTAGE = "percentage"
ATTR_PERCENTAGE_STEP = "percentage_step"
ATTR_SPEED_LIST = "speed_list"
ATTR_OSCILLATING = "oscillating"
ATTR_DIRECTION = "direction"
ATTR_PRESET_MODE = "preset_mode"
ATTR_PRESET_MODES = "preset_modes"
_NOT_SPEED_OFF = "off"
OFF_SPEED_VALUES = [SPEED_OFF, None]
LEGACY_SPEED_LIST = [SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
class NoValidSpeedsError(ValueError):
"""Exception class when there are no valid speeds."""
class NotValidSpeedError(ValueError):
"""Exception class when the speed in not in the speed list."""
class NotValidPresetModeError(ValueError):
"""Exception class when the preset_mode in not in the preset_modes list."""
@bind_hass
def is_on(hass, entity_id: str) -> bool:
"""Return if the fans are on based on the statemachine."""
state = hass.states.get(entity_id)
if ATTR_SPEED in state.attributes:
return state.attributes[ATTR_SPEED] not in OFF_SPEED_VALUES
return state.state == STATE_ON
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Expose fan control via statemachine and services."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL
)
await component.async_setup(config)
# After the transition to percentage and preset_modes concludes,
# switch this back to async_turn_on and remove async_turn_on_compat
component.async_register_entity_service(
SERVICE_TURN_ON,
{
vol.Optional(ATTR_SPEED): cv.string,
vol.Optional(ATTR_PERCENTAGE): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
vol.Optional(ATTR_PRESET_MODE): cv.string,
},
"async_turn_on_compat",
)
component.async_register_entity_service(SERVICE_TURN_OFF, {}, "async_turn_off")
component.async_register_entity_service(SERVICE_TOGGLE, {}, "async_toggle")
# After the transition to percentage and preset_modes concludes,
# remove this service
component.async_register_entity_service(
SERVICE_SET_SPEED,
{vol.Required(ATTR_SPEED): cv.string},
"async_set_speed_deprecated",
[SUPPORT_SET_SPEED],
)
component.async_register_entity_service(
SERVICE_INCREASE_SPEED,
{
vol.Optional(ATTR_PERCENTAGE_STEP): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
)
},
"async_increase_speed",
[SUPPORT_SET_SPEED],
)
component.async_register_entity_service(
SERVICE_DECREASE_SPEED,
{
vol.Optional(ATTR_PERCENTAGE_STEP): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
)
},
"async_decrease_speed",
[SUPPORT_SET_SPEED],
)
component.async_register_entity_service(
SERVICE_OSCILLATE,
{vol.Required(ATTR_OSCILLATING): cv.boolean},
"async_oscillate",
[SUPPORT_OSCILLATE],
)
component.async_register_entity_service(
SERVICE_SET_DIRECTION,
{vol.Optional(ATTR_DIRECTION): cv.string},
"async_set_direction",
[SUPPORT_DIRECTION],
)
component.async_register_entity_service(
SERVICE_SET_PERCENTAGE,
{
vol.Required(ATTR_PERCENTAGE): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
)
},
"async_set_percentage",
[SUPPORT_SET_SPEED],
)
component.async_register_entity_service(
SERVICE_SET_PRESET_MODE,
{vol.Required(ATTR_PRESET_MODE): cv.string},
"async_set_preset_mode",
[SUPPORT_SET_SPEED, SUPPORT_PRESET_MODE],
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up a config entry."""
component: EntityComponent = hass.data[DOMAIN]
return await component.async_setup_entry(entry)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
component: EntityComponent = hass.data[DOMAIN]
return await component.async_unload_entry(entry)
@dataclass
class FanEntityDescription(ToggleEntityDescription):
"""A class that describes fan entities."""
class FanEntity(ToggleEntity):
"""Base class for fan entities."""
entity_description: FanEntityDescription
_attr_current_direction: str | None = None
_attr_oscillating: bool | None = None
_attr_percentage: int | None
_attr_preset_mode: str | None
_attr_preset_modes: list[str] | None
_attr_speed_count: int
_attr_supported_features: int = 0
def set_speed(self, speed: str) -> None:
"""Set the speed of the fan."""
raise NotImplementedError()
async def async_set_speed_deprecated(self, speed: str):
"""Set the speed of the fan."""
_LOGGER.error(
"The fan.set_speed service is deprecated and will fail in 2022.3 and later, use fan.set_percentage or fan.set_preset_mode instead"
)
await self.async_set_speed(speed)
async def async_set_speed(self, speed: str):
"""Set the speed of the fan."""
if speed == SPEED_OFF:
await self.async_turn_off()
return
if self.preset_modes and speed in self.preset_modes:
await self.async_set_preset_mode(speed)
return
await self.async_set_percentage(self.speed_to_percentage(speed))
def set_percentage(self, percentage: int) -> None:
"""Set the speed of the fan, as a percentage."""
raise NotImplementedError()
async def async_set_percentage(self, percentage: int) -> None:
"""Set the speed of the fan, as a percentage."""
if percentage == 0:
await self.async_turn_off()
await self.hass.async_add_executor_job(self.set_percentage, percentage)
async def async_increase_speed(self, percentage_step: int | None = None) -> None:
"""Increase the speed of the fan."""
await self._async_adjust_speed(1, percentage_step)
async def async_decrease_speed(self, percentage_step: int | None = None) -> None:
"""Decrease the speed of the fan."""
await self._async_adjust_speed(-1, percentage_step)
async def _async_adjust_speed(
self, modifier: int, percentage_step: int | None
) -> None:
"""Increase or decrease the speed of the fan."""
current_percentage = self.percentage or 0
if percentage_step is not None:
new_percentage = current_percentage + (percentage_step * modifier)
else:
speed_range = (1, self.speed_count)
speed_index = math.ceil(
percentage_to_ranged_value(speed_range, current_percentage)
)
new_percentage = ranged_value_to_percentage(
speed_range, speed_index + modifier
)
new_percentage = max(0, min(100, new_percentage))
await self.async_set_percentage(new_percentage)
def set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
raise NotImplementedError()
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
await self.hass.async_add_executor_job(self.set_preset_mode, preset_mode)
def _valid_preset_mode_or_raise(self, preset_mode):
"""Raise NotValidPresetModeError on invalid preset_mode."""
preset_modes = self.preset_modes
if not preset_modes or preset_mode not in preset_modes:
raise NotValidPresetModeError(
f"The preset_mode {preset_mode} is not a valid preset_mode: {preset_modes}"
)
def set_direction(self, direction: str) -> None:
"""Set the direction of the fan."""
raise NotImplementedError()
async def async_set_direction(self, direction: str):
"""Set the direction of the fan."""
await self.hass.async_add_executor_job(self.set_direction, direction)
# pylint: disable=arguments-differ
def turn_on(
self,
speed: str | None = None,
percentage: int | None = None,
preset_mode: str | None = None,
**kwargs,
) -> None:
"""Turn on the fan."""
raise NotImplementedError()
async def async_turn_on_compat(
self,
speed: str | None = None,
percentage: int | None = None,
preset_mode: str | None = None,
**kwargs,
) -> None:
"""Turn on the fan.
This _compat version wraps async_turn_on with
backwards and forward compatibility.
This compatibility shim will be removed in 2022.3
"""
if preset_mode is not None:
self._valid_preset_mode_or_raise(preset_mode)
speed = preset_mode
percentage = None
elif speed is not None:
_LOGGER.error(
"Calling fan.turn_on with the speed argument is deprecated and will fail in 2022.3 and later, use percentage or preset_mode instead"
)
if self.preset_modes and speed in self.preset_modes:
preset_mode = speed
percentage = None
else:
percentage = self.speed_to_percentage(speed)
elif percentage is not None:
speed = self.percentage_to_speed(percentage)
await self.async_turn_on(
speed=speed,
percentage=percentage,
preset_mode=preset_mode,
**kwargs,
)
# pylint: disable=arguments-differ
async def async_turn_on(
self,
speed: str | None = None,
percentage: int | None = None,
preset_mode: str | None = None,
**kwargs,
) -> None:
"""Turn on the fan."""
if speed == SPEED_OFF:
await self.async_turn_off()
else:
await self.hass.async_add_executor_job(
ft.partial(
self.turn_on,
speed=speed,
percentage=percentage,
preset_mode=preset_mode,
**kwargs,
)
)
def oscillate(self, oscillating: bool) -> None:
"""Oscillate the fan."""
raise NotImplementedError()
async def async_oscillate(self, oscillating: bool):
"""Oscillate the fan."""
await self.hass.async_add_executor_job(self.oscillate, oscillating)
@property
def is_on(self):
"""Return true if the entity is on."""
return self.speed not in [SPEED_OFF, None]
@property
def speed(self) -> str | None:
"""Return the current speed."""
if preset_mode := self.preset_mode:
return preset_mode
if (percentage := self.percentage) is None:
return None
return self.percentage_to_speed(percentage)
@property
def percentage(self) -> int | None:
"""Return the current speed as a percentage."""
if hasattr(self, "_attr_percentage"):
return self._attr_percentage
return 0
@property
def speed_count(self) -> int:
"""Return the number of speeds the fan supports."""
if hasattr(self, "_attr_speed_count"):
return self._attr_speed_count
return 100
@property
def percentage_step(self) -> float:
"""Return the step size for percentage."""
return 100 / self.speed_count
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
speeds = [SPEED_OFF, *LEGACY_SPEED_LIST]
if preset_modes := self.preset_modes:
speeds.extend(preset_modes)
return speeds
@property
def current_direction(self) -> str | None:
"""Return the current direction of the fan."""
return self._attr_current_direction
@property
def oscillating(self) -> bool | None:
"""Return whether or not the fan is currently oscillating."""
return self._attr_oscillating
@property
def capability_attributes(self):
"""Return capability attributes."""
attrs = {}
if self.supported_features & SUPPORT_SET_SPEED:
attrs[ATTR_SPEED_LIST] = self.speed_list
if (
self.supported_features & SUPPORT_SET_SPEED
or self.supported_features & SUPPORT_PRESET_MODE
):
attrs[ATTR_PRESET_MODES] = self.preset_modes
return attrs
def speed_to_percentage(self, speed: str) -> int: # pylint: disable=no-self-use
"""Map a legacy speed to a percentage."""
if speed in OFF_SPEED_VALUES:
return 0
if speed not in LEGACY_SPEED_LIST:
raise NotValidSpeedError(f"The speed {speed} is not a valid speed.")
return ordered_list_item_to_percentage(LEGACY_SPEED_LIST, speed)
def percentage_to_speed( # pylint: disable=no-self-use
self, percentage: int
) -> str:
"""Map a percentage to a legacy speed."""
if percentage == 0:
return SPEED_OFF
return percentage_to_ordered_list_item(LEGACY_SPEED_LIST, percentage)
@final
@property
def state_attributes(self) -> dict:
"""Return optional state attributes."""
data: dict[str, float | str | None] = {}
supported_features = self.supported_features
if supported_features & SUPPORT_DIRECTION:
data[ATTR_DIRECTION] = self.current_direction
if supported_features & SUPPORT_OSCILLATE:
data[ATTR_OSCILLATING] = self.oscillating
if supported_features & SUPPORT_SET_SPEED:
data[ATTR_SPEED] = self.speed
data[ATTR_PERCENTAGE] = self.percentage
data[ATTR_PERCENTAGE_STEP] = self.percentage_step
if (
supported_features & SUPPORT_PRESET_MODE
or supported_features & SUPPORT_SET_SPEED
):
data[ATTR_PRESET_MODE] = self.preset_mode
return data
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._attr_supported_features
@property
def preset_mode(self) -> str | None:
"""Return the current preset mode, e.g., auto, smart, interval, favorite.
Requires SUPPORT_SET_SPEED.
"""
if hasattr(self, "_attr_preset_mode"):
return self._attr_preset_mode
return None
@property
def preset_modes(self) -> list[str] | None:
"""Return a list of available preset modes.
Requires SUPPORT_SET_SPEED.
"""
if hasattr(self, "_attr_preset_modes"):
return self._attr_preset_modes
return None
| |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import urllib2
import httplib
import socket
import json
import re
import sys
from telemetry.core import util
from telemetry.core import exceptions
from telemetry.core import user_agent
from telemetry.core import wpr_modes
from telemetry.core import wpr_server
from telemetry.core.chrome import extension_dict_backend
from telemetry.core.chrome import tab_list_backend
from telemetry.core.chrome import tracing_backend
from telemetry.test import options_for_unittests
class ExtensionsNotSupportedException(Exception):
pass
class BrowserBackend(object):
"""A base class for browser backends. Provides basic functionality
once a remote-debugger port has been established."""
WEBPAGEREPLAY_HOST = '127.0.0.1'
def __init__(self, is_content_shell, supports_extensions, options):
self.browser_type = options.browser_type
self.is_content_shell = is_content_shell
self._supports_extensions = supports_extensions
self.options = options
self._browser = None
self._port = None
self._inspector_protocol_version = 0
self._chrome_branch_number = 0
self._webkit_base_revision = 0
self._tracing_backend = None
self.webpagereplay_local_http_port = util.GetAvailableLocalPort()
self.webpagereplay_local_https_port = util.GetAvailableLocalPort()
self.webpagereplay_remote_http_port = self.webpagereplay_local_http_port
self.webpagereplay_remote_https_port = self.webpagereplay_local_https_port
if options.dont_override_profile and not options_for_unittests.AreSet():
sys.stderr.write('Warning: Not overriding profile. This can cause '
'unexpected effects due to profile-specific settings, '
'such as about:flags settings, cookies, and '
'extensions.\n')
self._tab_list_backend = tab_list_backend.TabListBackend(self)
self._extension_dict_backend = None
if supports_extensions:
self._extension_dict_backend = \
extension_dict_backend.ExtensionDictBackend(self)
def SetBrowser(self, browser):
self._browser = browser
self._tab_list_backend.Init()
@property
def browser(self):
return self._browser
@property
def supports_extensions(self):
"""True if this browser backend supports extensions."""
return self._supports_extensions
@property
def tab_list_backend(self):
return self._tab_list_backend
@property
def extension_dict_backend(self):
return self._extension_dict_backend
def GetBrowserStartupArgs(self):
args = []
args.extend(self.options.extra_browser_args)
args.append('--disable-background-networking')
args.append('--metrics-recording-only')
args.append('--no-first-run')
if self.options.wpr_mode != wpr_modes.WPR_OFF:
args.extend(wpr_server.GetChromeFlags(
self.WEBPAGEREPLAY_HOST,
self.webpagereplay_remote_http_port,
self.webpagereplay_remote_https_port))
args.extend(user_agent.GetChromeUserAgentArgumentFromType(
self.options.browser_user_agent_type))
extensions = [extension.local_path for extension in
self.options.extensions_to_load if not extension.is_component]
extension_str = ','.join(extensions)
if len(extensions) > 0:
args.append('--load-extension=%s' % extension_str)
component_extensions = [extension.local_path for extension in
self.options.extensions_to_load if extension.is_component]
component_extension_str = ','.join(component_extensions)
if len(component_extensions) > 0:
args.append('--load-component-extension=%s' % component_extension_str)
return args
@property
def wpr_mode(self):
return self.options.wpr_mode
def _WaitForBrowserToComeUp(self, timeout=None):
def IsBrowserUp():
try:
self.Request('', timeout=timeout)
except (socket.error, httplib.BadStatusLine, urllib2.URLError):
return False
else:
return True
try:
util.WaitFor(IsBrowserUp, timeout=30)
except util.TimeoutException:
raise exceptions.BrowserGoneException()
def AllExtensionsLoaded():
for e in self.options.extensions_to_load:
if not e.extension_id in self._extension_dict_backend:
return False
extension_object = self._extension_dict_backend[e.extension_id]
extension_object.WaitForDocumentReadyStateToBeInteractiveOrBetter()
return True
if self._supports_extensions:
util.WaitFor(AllExtensionsLoaded, timeout=30)
def _PostBrowserStartupInitialization(self):
# Detect version information.
data = self.Request('version')
resp = json.loads(data)
if 'Protocol-Version' in resp:
self._inspector_protocol_version = resp['Protocol-Version']
if 'Browser' in resp:
branch_number_match = re.search('Chrome/\d+\.\d+\.(\d+)\.\d+',
resp['Browser'])
else:
branch_number_match = re.search(
'Chrome/\d+\.\d+\.(\d+)\.\d+ (Mobile )?Safari',
resp['User-Agent'])
webkit_version_match = re.search('\((trunk)?\@(\d+)\)',
resp['WebKit-Version'])
if branch_number_match:
self._chrome_branch_number = int(branch_number_match.group(1))
else:
# Content Shell returns '' for Browser, for now we have to
# fall-back and assume branch 1025.
self._chrome_branch_number = 1025
if webkit_version_match:
self._webkit_base_revision = int(webkit_version_match.group(2))
return
# Detection has failed: assume 18.0.1025.168 ~= Chrome Android.
self._inspector_protocol_version = 1.0
self._chrome_branch_number = 1025
self._webkit_base_revision = 106313
def Request(self, path, timeout=None):
url = 'http://localhost:%i/json' % self._port
if path:
url += '/' + path
req = urllib2.urlopen(url, timeout=timeout)
return req.read()
@property
def chrome_branch_number(self):
return self._chrome_branch_number
@property
def supports_tab_control(self):
return self._chrome_branch_number >= 1303
@property
def supports_tracing(self):
return self.is_content_shell or self._chrome_branch_number >= 1385
def StartTracing(self):
if self._tracing_backend is None:
self._tracing_backend = tracing_backend.TracingBackend(self._port)
self._tracing_backend.BeginTracing()
def StopTracing(self):
self._tracing_backend.EndTracing()
def GetTraceResultAndReset(self):
return self._tracing_backend.GetTraceResultAndReset()
def GetRemotePort(self, _):
return util.GetAvailableLocalPort()
def Close(self):
if self._tracing_backend:
self._tracing_backend.Close()
self._tracing_backend = None
def CreateForwarder(self, *port_pairs):
raise NotImplementedError()
def IsBrowserRunning(self):
raise NotImplementedError()
def GetStandardOutput(self):
raise NotImplementedError()
class DoNothingForwarder(object):
def __init__(self, *port_pairs):
self._host_port = port_pairs[0].local_port
@property
def url(self):
assert self._host_port
return 'http://127.0.0.1:%i' % self._host_port
def Close(self):
self._host_port = None
| |
import logging
import os
import environ
import datetime
from olympia.lib.settings_base import * # noqa
environ.Env.read_env(env_file='/etc/olympia/settings.env')
env = environ.Env()
CSP_BASE_URI += (
# Required for the legacy discovery pane.
'https://addons.allizom.org',
)
CDN_HOST = 'https://addons-stage-cdn.allizom.org'
CSP_FONT_SRC += (CDN_HOST,)
CSP_CHILD_SRC += ('https://www.sandbox.paypal.com',)
CSP_FRAME_SRC = CSP_CHILD_SRC
CSP_IMG_SRC += (CDN_HOST,)
CSP_SCRIPT_SRC += (
CDN_HOST,
)
CSP_STYLE_SRC += (CDN_HOST,)
ENGAGE_ROBOTS = False
EMAIL_URL = env.email_url('EMAIL_URL')
EMAIL_HOST = EMAIL_URL['EMAIL_HOST']
EMAIL_PORT = EMAIL_URL['EMAIL_PORT']
EMAIL_BACKEND = EMAIL_URL['EMAIL_BACKEND']
EMAIL_HOST_USER = EMAIL_URL['EMAIL_HOST_USER']
EMAIL_HOST_PASSWORD = EMAIL_URL['EMAIL_HOST_PASSWORD']
EMAIL_QA_ALLOW_LIST = env.list('EMAIL_QA_ALLOW_LIST')
EMAIL_DENY_LIST = env.list('EMAIL_DENY_LIST')
ENV = env('ENV')
DEBUG = False
DEBUG_PROPAGATE_EXCEPTIONS = False
SESSION_COOKIE_SECURE = True
CRONJOB_LOCK_PREFIX = DOMAIN
API_THROTTLE = False
REDIRECT_SECRET_KEY = env('REDIRECT_SECRET_KEY')
DOMAIN = env('DOMAIN', default='addons.allizom.org')
SERVER_EMAIL = 'zstage@addons.mozilla.org'
SITE_URL = 'https://' + DOMAIN
SERVICES_URL = env('SERVICES_URL',
default='https://services.addons.allizom.org')
STATIC_URL = '%s/static/' % CDN_HOST
MEDIA_URL = '%s/user-media/' % CDN_HOST
SESSION_COOKIE_DOMAIN = ".%s" % DOMAIN
# Filter IP addresses of allowed clients that can post email through the API.
ALLOWED_CLIENTS_EMAIL_API = env.list('ALLOWED_CLIENTS_EMAIL_API', default=[])
# Auth token required to authorize inbound email.
INBOUND_EMAIL_SECRET_KEY = env('INBOUND_EMAIL_SECRET_KEY', default='')
# Validation key we need to send in POST response.
INBOUND_EMAIL_VALIDATION_KEY = env('INBOUND_EMAIL_VALIDATION_KEY', default='')
# Domain emails should be sent to.
INBOUND_EMAIL_DOMAIN = env('INBOUND_EMAIL_DOMAIN', default=DOMAIN)
SYSLOG_TAG = "http_app_addons_stage"
SYSLOG_TAG2 = "http_app_addons_stage_timer"
SYSLOG_CSP = "http_app_addons_stage_csp"
DATABASES = {}
DATABASES['default'] = env.db('DATABASES_DEFAULT_URL')
DATABASES['default']['ENGINE'] = 'django.db.backends.mysql'
# Run all views in a transaction (on master) unless they are decorated not to.
DATABASES['default']['ATOMIC_REQUESTS'] = True
# Pool our database connections up for 300 seconds
DATABASES['default']['CONN_MAX_AGE'] = 300
DATABASES['slave'] = env.db('DATABASES_SLAVE_URL')
# Do not open a transaction for every view on the slave DB.
DATABASES['slave']['ATOMIC_REQUESTS'] = False
DATABASES['slave']['ENGINE'] = 'django.db.backends.mysql'
# Pool our database connections up for 300 seconds
DATABASES['slave']['CONN_MAX_AGE'] = 300
SERVICES_DATABASE = env.db('SERVICES_DATABASE_URL')
SLAVE_DATABASES = ['slave']
CACHE_MIDDLEWARE_KEY_PREFIX = CACHE_PREFIX
CACHES = {}
CACHES['default'] = env.cache('CACHES_DEFAULT')
CACHES['default']['TIMEOUT'] = 500
CACHES['default']['BACKEND'] = 'caching.backends.memcached.MemcachedCache'
CACHES['default']['KEY_PREFIX'] = CACHE_PREFIX
SECRET_KEY = env('SECRET_KEY')
LOG_LEVEL = logging.DEBUG
# Celery
BROKER_URL = env('BROKER_URL')
CELERY_IGNORE_RESULT = True
CELERY_DISABLE_RATE_LIMITS = True
CELERYD_PREFETCH_MULTIPLIER = 1
CELERY_RESULT_BACKEND = env('CELERY_RESULT_BACKEND')
NETAPP_STORAGE_ROOT = env('NETAPP_STORAGE_ROOT')
NETAPP_STORAGE = NETAPP_STORAGE_ROOT + '/shared_storage'
GUARDED_ADDONS_PATH = NETAPP_STORAGE_ROOT + '/guarded-addons'
MEDIA_ROOT = NETAPP_STORAGE + '/uploads'
TMP_PATH = os.path.join(NETAPP_STORAGE, 'tmp')
PACKAGER_PATH = os.path.join(TMP_PATH, 'packager')
ADDONS_PATH = NETAPP_STORAGE_ROOT + '/files'
# Must be forced in settings because name => path can't be dyncamically
# computed: reviewer_attachmentS VS reviewer_attachment.
# TODO: rename folder on file system.
# (One can also just rename the setting, but this will not be consistent
# with the naming scheme.)
REVIEWER_ATTACHMENTS_PATH = MEDIA_ROOT + '/reviewer_attachment'
LOGGING['loggers'].update({
'z.task': {'level': logging.DEBUG},
'z.redis': {'level': logging.DEBUG},
'z.pool': {'level': logging.ERROR},
})
# This is used for `django-cache-machine`
REDIS_BACKEND = env('REDIS_BACKENDS_CACHE')
REDIS_BACKENDS = {
'cache': get_redis_settings(env('REDIS_BACKENDS_CACHE')),
'cache_slave': get_redis_settings(env('REDIS_BACKENDS_CACHE_SLAVE')),
'master': get_redis_settings(env('REDIS_BACKENDS_MASTER')),
'slave': get_redis_settings(env('REDIS_BACKENDS_SLAVE'))
}
CACHE_MACHINE_USE_REDIS = True
# Old recaptcha V1
RECAPTCHA_PUBLIC_KEY = env('RECAPTCHA_PUBLIC_KEY')
RECAPTCHA_PRIVATE_KEY = env('RECAPTCHA_PRIVATE_KEY')
# New Recaptcha V2
NOBOT_RECAPTCHA_PUBLIC_KEY = env('NOBOT_RECAPTCHA_PUBLIC_KEY')
NOBOT_RECAPTCHA_PRIVATE_KEY = env('NOBOT_RECAPTCHA_PRIVATE_KEY')
# Remove DetectMobileMiddleware from middleware in production.
detect = 'mobility.middleware.DetectMobileMiddleware'
csp = 'csp.middleware.CSPMiddleware'
RESPONSYS_ID = env('RESPONSYS_ID')
ES_TIMEOUT = 60
ES_HOSTS = env('ES_HOSTS')
ES_URLS = ['http://%s' % h for h in ES_HOSTS]
ES_INDEXES = dict((k, '%s_%s' % (v, ENV)) for k, v in ES_INDEXES.items())
STATSD_HOST = env('STATSD_HOST')
STATSD_PREFIX = env('STATSD_PREFIX')
GRAPHITE_HOST = env('GRAPHITE_HOST')
GRAPHITE_PREFIX = env('GRAPHITE_PREFIX')
CEF_PRODUCT = STATSD_PREFIX
NEW_FEATURES = True
REDIRECT_URL = 'https://outgoing.stage.mozaws.net/v1/'
CLEANCSS_BIN = 'cleancss'
UGLIFY_BIN = 'uglifyjs'
ADDONS_LINTER_BIN = 'addons-linter'
LESS_PREPROCESS = True
XSENDFILE_HEADER = 'X-Accel-Redirect'
ALLOW_SELF_REVIEWS = True
GOOGLE_ANALYTICS_CREDENTIALS = env.dict('GOOGLE_ANALYTICS_CREDENTIALS')
GOOGLE_ANALYTICS_CREDENTIALS['user_agent'] = None
GOOGLE_ANALYTICS_CREDENTIALS['token_expiry'] = datetime.datetime(2013, 1, 3, 1, 20, 16, 45465) # noqa
GOOGLE_API_CREDENTIALS = env('GOOGLE_API_CREDENTIALS')
GEOIP_URL = 'https://geo.services.mozilla.com'
AES_KEYS = env.dict('AES_KEYS')
PERSONA_DEFAULT_PAGES = 5
# Signing
SIGNING_SERVER = env('SIGNING_SERVER')
# sandbox
PAYPAL_PAY_URL = 'https://svcs.sandbox.paypal.com/AdaptivePayments/'
PAYPAL_FLOW_URL = (
'https://www.sandbox.paypal.com/webapps/adaptivepayment/flow/pay')
PAYPAL_API_URL = 'https://api-3t.sandbox.paypal.com/nvp'
PAYPAL_EMAIL = env('PAYPAL_EMAIL')
PAYPAL_APP_ID = env('PAYPAL_APP_ID')
PAYPAL_PERMISSIONS_URL = 'https://svcs.sandbox.paypal.com/Permissions/'
PAYPAL_CGI_URL = 'https://www.sandbox.paypal.com/cgi-bin/webscr'
PAYPAL_EMBEDDED_AUTH = {
'USER': env('PAYPAL_EMBEDDED_AUTH_USER'),
'PASSWORD': env('PAYPAL_EMBEDDED_AUTH_PASSWORD'),
'SIGNATURE': env('PAYPAL_EMBEDDED_AUTH_SIGNATURE'),
}
PAYPAL_CGI_AUTH = {
'USER': env('PAYPAL_CGI_AUTH_USER'),
'PASSWORD': env('PAYPAL_CGI_AUTH_PASSWORD'),
'SIGNATURE': env('PAYPAL_CGI_AUTH_SIGNATURE'),
}
PAYPAL_CHAINS = (
(30, env('PAYPAL_CHAINS_EMAIL')),
)
SENTRY_DSN = env('SENTRY_DSN')
AMO_LANGUAGES = AMO_LANGUAGES + ('dbg',)
LANGUAGES = lazy(lazy_langs, dict)(AMO_LANGUAGES)
LANGUAGE_URL_MAP = dict([(i.lower(), i) for i in AMO_LANGUAGES])
GOOGLE_ANALYTICS_DOMAIN = 'addons.mozilla.org'
NEWRELIC_ENABLE = env.bool('NEWRELIC_ENABLE', default=False)
if NEWRELIC_ENABLE:
NEWRELIC_INI = '/etc/newrelic.d/%s.ini' % DOMAIN
FXA_CONFIG = {
'default': {
'client_id': env('FXA_CLIENT_ID'),
'client_secret': env('FXA_CLIENT_SECRET'),
'content_host': 'https://accounts.firefox.com',
'oauth_host': 'https://oauth.accounts.firefox.com/v1',
'profile_host': 'https://profile.accounts.firefox.com/v1',
'redirect_url':
'https://addons.allizom.org/api/v3/accounts/authenticate/',
'scope': 'profile',
},
'internal': {
'client_id': env('INTERNAL_FXA_CLIENT_ID'),
'client_secret': env('INTERNAL_FXA_CLIENT_SECRET'),
'content_host': 'https://accounts.firefox.com',
'oauth_host': 'https://oauth.accounts.firefox.com/v1',
'profile_host': 'https://profile.accounts.firefox.com/v1',
'redirect_url':
'https://addons-admin.stage.mozaws.net/fxa-authenticate',
'scope': 'profile',
},
}
FXA_CONFIG['amo'] = FXA_CONFIG['default']
DEFAULT_FXA_CONFIG_NAME = 'default'
INTERNAL_FXA_CONFIG_NAME = 'internal'
ALLOWED_FXA_CONFIGS = ['default', 'amo']
CORS_ENDPOINT_OVERRIDES = cors_endpoint_overrides(
public=['amo.addons.allizom.org'],
internal=['addons-admin.stage.mozaws.net'],
)
READ_ONLY = env.bool('READ_ONLY', default=False)
RAVEN_DSN = (
'https://e35602be5252460d97587478bcc642df@sentry.prod.mozaws.net/77')
RAVEN_ALLOW_LIST = ['addons.allizom.org', 'addons-cdn.allizom.org']
GITHUB_API_USER = env('GITHUB_API_USER')
GITHUB_API_TOKEN = env('GITHUB_API_TOKEN')
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2014 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import os
import time
import socket
import errno
import struct
import logging
info = sys.version_info
if not (info[0] == 2 and info[1] >= 7):
print 'Python 2.7 required'
sys.exit(1)
import argparse
from shadowsocks import eventloop, asyncdns, lru_cache
BUF_SIZE = 16384
CACHE_TIMEOUT = 10
EMPTY_RESULT_DELAY = 4
GFW_LIST = set(["74.125.127.102", "74.125.155.102", "74.125.39.102",
"74.125.39.113", "209.85.229.138", "128.121.126.139",
"159.106.121.75", "169.132.13.103", "192.67.198.6",
"202.106.1.2", "202.181.7.85", "203.161.230.171",
"203.98.7.65", "207.12.88.98", "208.56.31.43",
"209.145.54.50", "209.220.30.174", "209.36.73.33",
"211.94.66.147", "213.169.251.35", "216.221.188.182",
"216.234.179.13", "243.185.187.39", "37.61.54.158",
"4.36.66.178", "46.82.174.68", "59.24.3.173", "64.33.88.161",
"64.33.99.47", "64.66.163.251", "65.104.202.252",
"65.160.219.113", "66.45.252.237", "72.14.205.104",
"72.14.205.99", "78.16.49.15", "8.7.198.45", "93.46.8.89"])
class DNSRelay(object):
def __init__(self, config):
self._loop = None
self._config = config
self._last_time = time.time()
self._local_addr = (config['local_address'], config['local_port'])
self._remote_addrs = []
for addr in config['dns'].split(','):
parts = addr.strip().rsplit(':', 1)
host = parts[0]
port = int(parts[1]) if len(parts) == 2 else 53
self._remote_addrs.append((host, port))
self._remote_addr = self._remote_addrs[-1]
self._hosts = {}
self._parse_hosts()
def add_to_loop(self, loop):
if self._loop:
raise Exception('already add to loop')
self._loop = loop
loop.add_handler(self.handle_events)
def _parse_hosts(self):
etc_path = '/etc/hosts'
if os.environ.__contains__('WINDIR'):
etc_path = os.environ['WINDIR'] + '/system32/drivers/etc/hosts'
try:
with open(etc_path, 'rb') as f:
for line in f.readlines():
line = line.strip()
parts = line.split()
if len(parts) >= 2:
ip = parts[0]
if asyncdns.is_ip(ip):
for i in xrange(1, len(parts)):
hostname = parts[i]
if hostname:
self._hosts[hostname] = ip
except IOError:
pass
@staticmethod
def build_response(request, ip):
addrs = socket.getaddrinfo(ip, 0, 0, 0, 0)
if not addrs:
return None
af, socktype, proto, canonname, sa = addrs[0]
header = struct.unpack('!HBBHHHH', request[:12])
header = struct.pack('!HBBHHHH', header[0], 0x80 | header[1], 0x80, 1,
1, 0, 0)
if af == socket.AF_INET:
qtype = asyncdns.QTYPE_A
else:
qtype = asyncdns.QTYPE_AAAA
addr = socket.inet_pton(af, ip)
question = request[12:]
# for hostname compression
answer = struct.pack('!H', ((128 + 64) << 8 | 12)) + \
struct.pack('!HHiH', qtype, asyncdns.QCLASS_IN, 300,
len(addr)) + addr
return header + question + answer
def handle_events(self, events):
pass
class UDPDNSRelay(DNSRelay):
def __init__(self, config):
DNSRelay.__init__(self, config)
self._id_to_addr = lru_cache.LRUCache(CACHE_TIMEOUT)
self._local_sock = None
self._remote_sock = None
self._create_sockets()
self._pending_responses = []
def _create_sockets(self):
sockets = []
for addr in (self._local_addr, self._remote_addr):
addrs = socket.getaddrinfo(addr[0], addr[1], 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" % addr)
af, socktype, proto, canonname, sa = addrs[0]
sock = socket.socket(af, socktype, proto)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(False)
sockets.append(sock)
self._local_sock, self._remote_sock = sockets
self._local_sock.bind(self._local_addr)
def _rebuild_sockets(self):
self._id_to_addr.clear()
self._loop.remove(self._local_sock)
self._loop.remove(self._remote_sock)
self._local_sock.close()
self._remote_sock.close()
self._create_sockets()
self._loop.add(self._local_sock, eventloop.POLL_IN)
self._loop.add(self._remote_sock, eventloop.POLL_IN)
def add_to_loop(self, loop):
DNSRelay.add_to_loop(self, loop)
loop.add(self._local_sock, eventloop.POLL_IN)
loop.add(self._remote_sock, eventloop.POLL_IN)
def _handle_local(self, sock):
try:
data, addr = sock.recvfrom(BUF_SIZE)
except (OSError, IOError) as e:
logging.error(e)
if eventloop.errno_from_exception(e) == errno.ECONNRESET:
# just for Windows lol
self._rebuild_sockets()
return
header = asyncdns.parse_header(data)
if header:
try:
req_id = header[0]
req = asyncdns.parse_response(data)
logging.info('request %s', req.hostname)
if req.hostname in self._hosts:
response = self.build_response(data,
self._hosts[req.hostname])
if response:
logging.info('%s hit /etc/hosts', req.hostname)
self._local_sock.sendto(response, addr)
return
self._id_to_addr[req_id] = addr
for remote_addr in self._remote_addrs:
self._remote_sock.sendto(data, remote_addr)
except Exception as e:
import traceback
traceback.print_exc()
logging.error(e)
def _handle_remote(self, sock):
try:
data, addr = sock.recvfrom(BUF_SIZE)
except (OSError, IOError) as e:
logging.error(e)
if eventloop.errno_from_exception(e) == errno.ECONNRESET:
# just for Windows lol
self._rebuild_sockets()
return
if data:
try:
header = asyncdns.parse_header(data)
if header:
req_id = header[0]
res = asyncdns.parse_response(data)
logging.info('response from %s:%d %s', addr[0], addr[1],
res)
addr = self._id_to_addr.get(req_id, None)
if addr:
for answer in res.answers:
if answer and answer[0] in GFW_LIST:
return
if not res.answers:
# delay empty results
def _send_later():
self._local_sock.sendto(data, addr)
self._pending_responses.append((time.time(),
_send_later))
return
self._local_sock.sendto(data, addr)
del self._id_to_addr[req_id]
except Exception as e:
import traceback
traceback.print_exc()
logging.error(e)
if eventloop.errno_from_exception(e) == errno.EACCES:
# when we have changed our ip
self._rebuild_sockets()
def handle_events(self, events):
for sock, fd, event in events:
if sock == self._local_sock:
self._handle_local(sock)
elif sock == self._remote_sock:
self._handle_remote(sock)
now = time.time()
if now - self._last_time > CACHE_TIMEOUT / 2:
self._id_to_addr.sweep()
i = 0
for pending_response in self._pending_responses:
ts, cb = pending_response
if now - ts > EMPTY_RESULT_DELAY:
cb()
i += 1
else:
break
self._pending_responses = self._pending_responses[i:]
class TCPDNSRelay(DNSRelay):
def __init__(self, config):
DNSRelay.__init__(self, config)
self._local_to_remote = {}
self._remote_to_local = {}
addrs = socket.getaddrinfo(self._local_addr[0], self._local_addr[1], 0,
socket.SOCK_STREAM, socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" % self._local_addr)
af, socktype, proto, canonname, sa = addrs[0]
self._listen_sock = socket.socket(af, socktype, proto)
self._listen_sock.setblocking(False)
self._listen_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._listen_sock.bind(self._local_addr)
self._listen_sock.listen(1024)
def _handle_conn(self, sock):
try:
local, addr = sock.accept()
addrs = socket.getaddrinfo(self._remote_addr[0],
self._remote_addr[1], 0,
socket.SOCK_STREAM, socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" %
self._remote_addr)
af, socktype, proto, canonname, sa = addrs[0]
remote = socket.socket(af, socktype, proto)
local.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
remote.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
self._local_to_remote[local] = remote
self._remote_to_local[remote] = local
self._loop.add(local, 0)
self._loop.add(remote, eventloop.POLL_OUT)
try:
remote.connect(self._remote_addr)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) in (errno.EINPROGRESS,
errno.EAGAIN):
pass
else:
raise
except (OSError, IOError) as e:
logging.error(e)
def _destroy(self, local, remote):
if local in self._local_to_remote:
self._loop.remove(local)
self._loop.remove(remote)
del self._local_to_remote[local]
del self._remote_to_local[remote]
local.close()
remote.close()
else:
logging.error('already destroyed')
def _handle_local(self, local, event):
remote = self._local_to_remote[local]
if event & (eventloop.POLL_ERR | eventloop.POLL_HUP):
self._destroy(local, remote)
elif event & eventloop.POLL_IN:
try:
data = local.recv(BUF_SIZE)
if not data:
self._destroy(local, remote)
else:
remote.send(data)
except (OSError, IOError) as e:
self._destroy(local, self._local_to_remote[local])
logging.error(e)
def _handle_remote(self, remote, event):
local = self._remote_to_local[remote]
if event & (eventloop.POLL_ERR | eventloop.POLL_HUP):
self._destroy(local, remote)
elif event & eventloop.POLL_OUT:
self._loop.modify(remote, eventloop.POLL_IN)
self._loop.modify(local, eventloop.POLL_IN)
elif event & eventloop.POLL_IN:
try:
data = remote.recv(BUF_SIZE)
if not data:
self._destroy(local, remote)
else:
try:
res = asyncdns.parse_response(data[2:])
if res:
logging.info('response %s', res)
except Exception as e:
logging.error(e)
local.send(data)
except (OSError, IOError) as e:
self._destroy(local, remote)
logging.error(e)
def add_to_loop(self, loop):
DNSRelay.add_to_loop(self, loop)
loop.add(self._listen_sock, eventloop.POLL_IN)
def handle_events(self, events):
for sock, fd, event in events:
if sock == self._listen_sock:
self._handle_conn(sock)
elif sock in self._local_to_remote:
self._handle_local(sock, event)
elif sock in self._remote_to_local:
self._handle_remote(sock, event)
# TODO implement timeout
def main():
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S', filemode='a+')
parser = argparse.ArgumentParser(description='Forward DNS requests.')
parser.add_argument('-b', '--local_address', metavar='BIND_ADDR', type=str,
help='address that listens, default: 127.0.0.1',
default='127.0.0.1')
parser.add_argument('-p', '--local_port', metavar='BIND_PORT', type=int,
help='port that listens, default: 53', default=53)
parser.add_argument('-s', '--dns', metavar='DNS', type=str,
help='DNS server to use, default: '
'114.114.114.114,208.67.222.222,8.8.8.8',
default='114.114.114.114,208.67.222.222,8.8.8.8')
parser.add_argument('-l', '--ip_list', metavar='IP_LIST_FILE', type=str,
default=None)
config = vars(parser.parse_args())
if config['ip_list']:
logging.info('loading IP list from %s', config['ip_list'])
with open(config['ip_list'], 'rb') as f:
global GFW_LIST
GFW_LIST = set(f.readlines())
logging.info("starting dns at %s:%d",
config['local_address'], config['local_port'])
loop = eventloop.EventLoop()
try:
udprelay = UDPDNSRelay(config)
udprelay.add_to_loop(loop)
tcprelay = TCPDNSRelay(config)
tcprelay.add_to_loop(loop)
loop.run()
except (OSError, IOError) as e:
logging.error(e)
if eventloop.errno_from_exception(e) == errno.EACCES:
logging.info('please use sudo to run this program')
sys.exit(1)
if __name__ == '__main__':
main()
| |
# Copyright (c) 2014 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# The contents of this file are mainly copied from cm_api sources,
# released by Cloudrea. Codes not used by Sahara CDH plugin are removed.
# You can find the original codes at
#
# https://github.com/cloudera/cm_api/tree/master/python/src/cm_api
#
# To satisfy the pep8 and python3 tests, we did some changes to the codes.
# We also change some importings to use Sahara inherited classes.
import cookielib
import posixpath
import types
import urllib
import urllib2
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
import six
from sahara.i18n import _LW
from sahara.plugins.cdh import exceptions as ex
LOG = logging.getLogger(__name__)
class HttpClient(object):
"""Basic HTTP client tailored for rest APIs."""
def __init__(self, base_url, exc_class=ex.CMApiException):
"""Init Method
:param base_url: The base url to the API.
:param exc_class: An exception class to handle non-200 results.
Creates an HTTP(S) client to connect to the Cloudera Manager API.
"""
self._base_url = base_url.rstrip('/')
self._exc_class = exc_class
self._headers = {}
# Make a basic auth handler that does nothing. Set credentials later.
self._passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
authhandler = urllib2.HTTPBasicAuthHandler(self._passmgr)
# Make a cookie processor
cookiejar = cookielib.CookieJar()
self._opener = urllib2.build_opener(
urllib2.HTTPErrorProcessor(),
urllib2.HTTPCookieProcessor(cookiejar),
authhandler)
def set_basic_auth(self, username, password, realm):
"""Set up basic auth for the client
:param username: Login name.
:param password: Login password.
:param realm: The authentication realm.
:return: The current object
"""
self._passmgr.add_password(realm, self._base_url, username, password)
return self
def set_headers(self, headers):
"""Add headers to the request
:param headers: A dictionary with the key value pairs for the headers
:return: The current object
"""
self._headers = headers
return self
@property
def base_url(self):
return self._base_url
def _get_headers(self, headers):
res = self._headers.copy()
if headers:
res.update(headers)
return res
def execute(self, http_method, path, params=None, data=None, headers=None):
"""Submit an HTTP request
:param http_method: GET, POST, PUT, DELETE
:param path: The path of the resource.
:param params: Key-value parameter data.
:param data: The data to attach to the body of the request.
:param headers: The headers to set for this request.
:return: The result of urllib2.urlopen()
"""
# Prepare URL and params
url = self._make_url(path, params)
if http_method in ("GET", "DELETE"):
if data is not None:
LOG.warning(_LW("{method} method does not pass any data. "
"Path {path}").format(method=http_method,
path=path))
data = None
# Setup the request
request = urllib2.Request(url, data)
# Hack/workaround because urllib2 only does GET and POST
request.get_method = lambda: http_method
headers = self._get_headers(headers)
for k, v in headers.items():
request.add_header(k, v)
# Call it
LOG.debug("Method: {method}, URL: {url}".format(method=http_method,
url=url))
try:
return self._opener.open(request)
except urllib2.HTTPError as ex:
message = six.text_type(ex)
try:
json_body = json.loads(message)
message = json_body['message']
except (ValueError, KeyError):
pass # Ignore json parsing error
raise self._exc_class(message)
def _make_url(self, path, params):
res = self._base_url
if path:
res += posixpath.normpath('/' + path.lstrip('/'))
if params:
param_str = urllib.urlencode(params, True)
res += '?' + param_str
return iri_to_uri(res)
#
# Method copied from Django
#
def iri_to_uri(iri):
"""Convert IRI to URI
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987. However, since we are
assuming input is either UTF-8 or unicode already, we can simplify things a
little from the full method.
Returns an ASCII string containing the encoded result.
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.quote already considers all but
# the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of section 3.1 of RFC 3987 specifically mentions that % must not be
# converted.
if iri is None:
return iri
return urllib.quote(smart_str(iri), safe="/#%[]=:;$&()+,!?*@'~")
#
# Method copied from Django
#
def smart_str(s, encoding='utf-8', strings_only=False, errors='strict'):
"""Convert string into bytestring version
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if strings_only and isinstance(s, (types.NoneType, int)):
return s
elif not isinstance(s, basestring):
try:
return six.text_type(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return ' '.join([smart_str(arg, encoding, strings_only,
errors) for arg in s])
return unicode(s).encode(encoding, errors)
elif isinstance(s, unicode):
return s.encode(encoding, errors)
elif s and encoding != 'utf-8':
return s.decode('utf-8', errors).encode(encoding, errors)
else:
return s
| |
import binascii
import datetime
import io
import operator
import sys
import pytest
import temporenc
PY2 = sys.version_info[0] == 2
def from_hex(s):
"""Compatibility helper like bytes.fromhex() in Python 3"""
return binascii.unhexlify(s.replace(' ', '').encode('ascii'))
def test_type_d():
actual = temporenc.packb(type='D', year=1983, month=1, day=15)
expected = from_hex('8f 7e 0e')
assert actual == expected
v = temporenc.unpackb(expected)
assert (v.year, v.month, v.day) == (1983, 1, 15)
assert (v.hour, v.minute, v.second) == (None, None, None)
def test_type_t():
actual = temporenc.packb(type='T', hour=18, minute=25, second=12)
expected = from_hex('a1 26 4c')
assert actual == expected
v = temporenc.unpackb(expected)
assert (v.year, v.month, v.day) == (None, None, None)
assert (v.hour, v.minute, v.second) == (18, 25, 12)
def test_type_dt():
actual = temporenc.packb(
type='DT',
year=1983, month=1, day=15,
hour=18, minute=25, second=12)
expected = from_hex('1e fc 1d 26 4c')
assert actual == expected
v = temporenc.unpackb(expected)
assert (v.year, v.month, v.day) == (1983, 1, 15)
assert (v.hour, v.minute, v.second) == (18, 25, 12)
def test_type_dtz():
actual = temporenc.packb(
type='DTZ',
year=1983, month=1, day=15,
hour=18, minute=25, second=12,
tz_offset=60)
expected = from_hex('cf 7e 0e 93 26 44')
assert actual == expected
v = temporenc.unpackb(expected)
assert (v.year, v.month, v.day) == (1983, 1, 15)
assert (v.hour, v.minute, v.second) == (18, 25, 12)
assert v.tz_offset == 60
def test_type_dts():
actual = temporenc.packb(
type='DTS',
year=1983, month=1, day=15,
hour=18, minute=25, second=12, millisecond=123)
dts_ms = from_hex('47 bf 07 49 93 07 b0')
assert actual == dts_ms
v = temporenc.unpackb(dts_ms)
assert (v.year, v.month, v.day) == (1983, 1, 15)
assert (v.hour, v.minute, v.second) == (18, 25, 12)
assert v.millisecond == 123
assert v.microsecond == 123000
assert v.nanosecond == 123000000
actual = temporenc.packb(
type='DTS',
year=1983, month=1, day=15,
hour=18, minute=25, second=12, microsecond=123456)
dts_us = from_hex('57 bf 07 49 93 07 89 00')
assert actual == dts_us
v = temporenc.unpackb(dts_us)
assert (v.year, v.month, v.day) == (1983, 1, 15)
assert (v.hour, v.minute, v.second) == (18, 25, 12)
assert v.millisecond == 123
assert v.microsecond == 123456
assert v.nanosecond == 123456000
actual = temporenc.packb(
type='DTS',
year=1983, month=1, day=15,
hour=18, minute=25, second=12, nanosecond=123456789)
dts_ns = from_hex('67 bf 07 49 93 07 5b cd 15')
assert actual == dts_ns
v = temporenc.unpackb(dts_ns)
assert (v.year, v.month, v.day) == (1983, 1, 15)
assert (v.hour, v.minute, v.second) == (18, 25, 12)
assert v.millisecond == 123
assert v.microsecond == 123456
assert v.nanosecond == 123456789
actual = temporenc.packb(
type='DTS',
year=1983, month=1, day=15,
hour=18, minute=25, second=12)
dts_none = from_hex('77 bf 07 49 93 00')
assert actual == dts_none
v = temporenc.unpackb(dts_none)
assert (v.year, v.month, v.day) == (1983, 1, 15)
assert (v.hour, v.minute, v.second) == (18, 25, 12)
assert v.millisecond is None
assert v.microsecond is None
assert v.nanosecond is None
def test_type_dtsz():
actual = temporenc.packb(
type='DTSZ',
year=1983, month=1, day=15,
hour=18, minute=25, second=12, millisecond=123,
tz_offset=60)
dtsz_ms = from_hex('e3 df 83 a4 c9 83 dc 40')
assert actual == dtsz_ms
v = temporenc.unpackb(dtsz_ms)
assert (v.year, v.month, v.day) == (1983, 1, 15)
assert (v.hour, v.minute, v.second) == (18, 25, 12)
assert v.millisecond == 123
assert v.microsecond == 123000
assert v.nanosecond == 123000000
assert v.tz_offset == 60
actual = temporenc.packb(
type='DTSZ',
year=1983, month=1, day=15,
hour=18, minute=25, second=12, microsecond=123456,
tz_offset=60)
dtsz_us = from_hex('eb df 83 a4 c9 83 c4 81 10')
assert actual == dtsz_us
assert temporenc.unpackb(dtsz_us).microsecond == 123456
assert v.tz_offset == 60
actual = temporenc.packb(
type='DTSZ',
year=1983, month=1, day=15,
hour=18, minute=25, second=12, nanosecond=123456789,
tz_offset=60)
dtsz_ns = from_hex('f3 df 83 a4 c9 83 ad e6 8a c4')
assert actual == dtsz_ns
assert temporenc.unpackb(dtsz_ns).nanosecond == 123456789
assert v.tz_offset == 60
actual = temporenc.packb(
type='DTSZ',
year=1983, month=1, day=15,
hour=18, minute=25, second=12,
tz_offset=60)
dtsz_none = from_hex('fb df 83 a4 c9 91 00')
assert actual == dtsz_none
v = temporenc.unpackb(dtsz_none)
assert v.millisecond is None
assert v.millisecond is None
assert v.millisecond is None
assert v.tz_offset == 60
def test_type_detection():
# Empty value, so should result in the smallest type
assert len(temporenc.packb()) == 3
# Type D
assert len(temporenc.packb(year=1983)) == 3
assert temporenc.unpackb(temporenc.packb(year=1983)).year == 1983
# Type T
assert len(temporenc.packb(hour=18)) == 3
assert temporenc.unpackb(temporenc.packb(hour=18)).hour == 18
# Type DT
assert len(temporenc.packb(year=1983, hour=18)) == 5
# Type DTS
assert len(temporenc.packb(millisecond=0)) == 7
assert len(temporenc.packb(microsecond=0)) == 8
assert len(temporenc.packb(nanosecond=0)) == 9
# Type DTZ
assert len(temporenc.packb(year=1983, hour=18, tz_offset=120)) == 6
# Type DTSZ
assert len(temporenc.packb(millisecond=0, tz_offset=120)) == 8
def test_type_empty_values():
v = temporenc.unpackb(temporenc.packb(type='DTS'))
assert (v.year, v.month, v.day) == (None, None, None)
assert (v.hour, v.minute, v.second) == (None, None, None)
assert (v.millisecond, v.microsecond, v.nanosecond) == (None, None, None)
assert v.tz_offset is None
def test_incorrect_sizes():
# Too long
with pytest.raises(ValueError):
temporenc.unpackb(temporenc.packb(year=1983) + b'foo')
with pytest.raises(ValueError):
temporenc.unpackb(temporenc.packb(millisecond=0) + b'foo')
# Too short
with pytest.raises(ValueError):
temporenc.unpackb(temporenc.packb(year=1983)[:-1])
with pytest.raises(ValueError):
temporenc.unpackb(temporenc.packb(millisecond=0)[:-1])
def test_unpack_bytearray():
ba = bytearray((0x8f, 0x7e, 0x0e))
assert temporenc.unpackb(ba) is not None
def test_stream_unpacking():
# This stream contains two values and one byte of trailing data
fp = io.BytesIO(from_hex('8f 7e 0e 8f 7e 0f ff'))
assert temporenc.unpack(fp).day == 15
assert fp.tell() == 3
assert temporenc.unpack(fp).day == 16
assert fp.tell() == 6
assert fp.read() == b'\xff'
def test_stream_packing():
fp = io.BytesIO()
assert temporenc.pack(fp, year=1983) == 3
assert temporenc.pack(fp, year=1984) == 3
assert fp.tell() == 6
assert len(fp.getvalue()) == 6
def test_wrong_type():
with pytest.raises(ValueError):
temporenc.packb(type="foo", year=1983)
def test_out_of_range_values():
with pytest.raises(ValueError):
temporenc.packb(year=123456)
with pytest.raises(ValueError):
temporenc.packb(month=-12)
with pytest.raises(ValueError):
temporenc.packb(day=1234)
with pytest.raises(ValueError):
temporenc.packb(hour=1234)
with pytest.raises(ValueError):
temporenc.packb(minute=1234)
with pytest.raises(ValueError):
temporenc.packb(second=1234)
with pytest.raises(ValueError):
temporenc.packb(millisecond=1000)
with pytest.raises(ValueError):
temporenc.packb(microsecond=1000000)
with pytest.raises(ValueError):
temporenc.packb(nanosecond=10000000000)
with pytest.raises(ValueError):
temporenc.packb(tz_offset=1050)
with pytest.raises(ValueError):
temporenc.packb(tz_offset=13) # not a full quarter
def test_unpacking_bogus_data():
with pytest.raises(ValueError) as e:
# First byte can never occur in valid values.
temporenc.unpackb(from_hex('bb 12 34'))
assert 'tag' in str(e.value)
with pytest.raises(ValueError) as e:
temporenc.unpackb(from_hex('47 bf 07 49 93 07 b2'))
assert 'padding' in str(e.value)
def test_range_check_unpacking():
# Type T with out of range hour
with pytest.raises(ValueError) as e:
temporenc.unpackb(bytearray((
0b10100001, 0b11100000, 0b00000000)))
assert 'hour' in str(e.value)
# Type T with out of range minute
with pytest.raises(ValueError) as e:
temporenc.unpackb(bytearray((
0b10100000, 0b00001111, 0b01000000)))
assert 'minute' in str(e.value)
# Type T with out of range second
with pytest.raises(ValueError) as e:
temporenc.unpackb(bytearray((
0b10100000, 0b00000000, 0b00111110)))
assert 'second' in str(e.value)
# Type D with out of range month
with pytest.raises(ValueError) as e:
temporenc.unpackb(bytearray((
0b10000000, 0b00000001, 0b11000000)))
assert 'month' in str(e.value)
# Type DTS with out of range millisecond
with pytest.raises(ValueError) as e:
temporenc.unpackb(bytearray((
0b01000000, 0b00000000, 0b00000000, 0b00000000,
0b00000000, 0b00111111, 0b11110000)))
assert 'sub-second' in str(e.value)
# Type DTS with out of range microsecond
with pytest.raises(ValueError) as e:
temporenc.unpackb(bytearray((
0b01010000, 0b00000000, 0b00000000, 0b00000000,
0b00000000, 0b00111111, 0b11111111, 0b11111100)))
assert 'sub-second' in str(e.value)
# Type DTS with out of range nanosecond
with pytest.raises(ValueError) as e:
temporenc.unpackb(bytearray((
0b01100000, 0b00000000, 0b00000000, 0b00000000,
0b00000000, 0b00111111, 0b11111111, 0b11111111,
0b11111111)))
assert 'sub-second' in str(e.value)
def test_native_packing():
with pytest.raises(ValueError):
temporenc.packb(object())
# datetime.date => D
actual = temporenc.packb(datetime.date(1983, 1, 15))
expected = from_hex('8f 7e 0e')
assert actual == expected
# datetime.datetime => DTS, unless told otherwise
actual = temporenc.packb(datetime.datetime(
1983, 1, 15, 18, 25, 12, 123456))
expected = from_hex('57 bf 07 49 93 07 89 00')
assert actual == expected
actual = temporenc.packb(
datetime.datetime(1983, 1, 15, 18, 25, 12),
type='DT')
expected = from_hex('1e fc 1d 26 4c')
assert actual == expected
# datetime.time => DTS, unless told otherwise
assert len(temporenc.packb(datetime.datetime.now().time())) == 8
actual = temporenc.packb(
datetime.time(18, 25, 12),
type='T')
expected = from_hex('a1 26 4c')
assert actual == expected
def test_native_packing_with_overrides():
actual = temporenc.packb(
datetime.datetime(1984, 1, 16, 18, 26, 12, 123456),
year=1983, day=15, minute=25)
expected = from_hex('57 bf 07 49 93 07 89 00')
assert actual == expected
def test_native_unpacking():
value = temporenc.unpackb(temporenc.packb(
year=1983, month=1, day=15))
assert value.date() == datetime.date(1983, 1, 15)
value = temporenc.unpackb(temporenc.packb(
year=1983, month=1, day=15,
hour=1, minute=2, second=3, microsecond=456))
assert value.datetime() == datetime.datetime(1983, 1, 15, 1, 2, 3, 456)
value = temporenc.unpackb(temporenc.packb(
year=1983, month=1, day=15, # will be ignored
hour=1, minute=2, second=3, microsecond=456))
assert value.time() == datetime.time(1, 2, 3, 456)
value = temporenc.unpackb(temporenc.packb(year=1234))
with pytest.raises(ValueError):
value.date()
assert value.date(strict=False).year == 1234
assert value.datetime(strict=False).year == 1234
value = temporenc.unpackb(temporenc.packb(hour=14))
with pytest.raises(ValueError):
value.time()
assert value.time(strict=False).hour == 14
assert value.datetime(strict=False).hour == 14
def test_native_unpacking_leap_second():
value = temporenc.unpackb(temporenc.packb(
year=2013, month=6, day=30,
hour=23, minute=59, second=60))
with pytest.raises(ValueError):
value.datetime() # second out of range
dt = value.datetime(strict=False)
assert dt == datetime.datetime(2013, 6, 30, 23, 59, 59)
def test_native_unpacking_incomplete():
moment = temporenc.unpackb(temporenc.packb(type='DT', year=1983, hour=12))
with pytest.raises(ValueError):
moment.date()
with pytest.raises(ValueError):
moment.time()
with pytest.raises(ValueError):
moment.datetime()
moment = temporenc.unpackb(temporenc.packb(datetime.datetime.now().date()))
with pytest.raises(ValueError):
moment.datetime()
def test_native_time_zone():
# Python < 3.2 doesn't have concrete tzinfo implementations. This
# test uses the internal helper class instead to avoid depending on
# newer Python versions (or on pytz).
from temporenc.temporenc import FixedOffset
dutch_winter = FixedOffset(60) # UTC +01:00
zero_delta = datetime.timedelta(0)
hour_delta = datetime.timedelta(minutes=60)
expected_name = "UTC+01:00"
assert dutch_winter.tzname(None) == expected_name
assert expected_name in str(dutch_winter)
assert expected_name in repr(dutch_winter)
assert dutch_winter.dst(None) == zero_delta
# DTZ
actual = temporenc.packb(
datetime.datetime(1983, 1, 15, 18, 25, 12, 0, tzinfo=dutch_winter),
type='DTZ')
expected = from_hex('cf 7e 0e 93 26 44')
assert actual == expected
moment = temporenc.unpackb(expected)
assert moment.hour == 18
assert moment.tz_offset == 60
dt = moment.datetime()
assert dt.hour == 18
assert dt.utcoffset() == hour_delta
# DTSZ (microsecond, since native types have that precision)
actual = temporenc.packb(
datetime.datetime(
1983, 1, 15, 18, 25, 12, 123456,
tzinfo=dutch_winter),
type='DTSZ')
dtsz_us = from_hex('eb df 83 a4 c9 83 c4 81 10')
assert actual == dtsz_us
moment = temporenc.unpackb(expected)
assert moment.datetime().hour == 18
# Time only with time zone
moment = temporenc.unpackb(temporenc.packb(
datetime.time(0, 30, 0, 123456, tzinfo=dutch_winter),
type='DTSZ'))
assert moment.tz_offset == 60
for obj in [moment.time(), moment.datetime(strict=False)]:
assert (obj.hour, obj.minute, obj.microsecond) == (0, 30, 123456)
assert obj.utcoffset() == hour_delta
def test_string_conversion():
# Date only
value = temporenc.unpackb(temporenc.packb(year=1983, month=1, day=15))
assert str(value) == "1983-01-15"
value = temporenc.unpackb(temporenc.packb(year=1983, day=15))
assert str(value) == "1983-??-15"
# Time only
value = temporenc.unpackb(temporenc.packb(hour=1, minute=2, second=3))
assert str(value) == "01:02:03"
value = temporenc.unpackb(temporenc.packb(
hour=1, second=3, microsecond=12340))
assert str(value) == "01:??:03.01234"
# Date and time
value = temporenc.unpackb(temporenc.packb(
year=1983, month=1, day=15,
hour=18, minute=25))
assert str(value) == "1983-01-15 18:25:??"
# If sub-second is set but equal to 0, the string should show it
# properly anyway.
value = temporenc.unpackb(temporenc.packb(
hour=12, minute=34, second=56, microsecond=0))
assert str(value) == "12:34:56.0"
# Time zone info should be included
moment = temporenc.unpackb(from_hex('cf 7e 0e 93 26 40'))
assert str(moment) == '1983-01-15 18:25:12Z'
moment = temporenc.unpackb(from_hex('cf 7e 0e 93 26 44'))
assert str(moment) == '1983-01-15 18:25:12+01:00'
# Very contrived example...
value = temporenc.unpackb(temporenc.packb(microsecond=1250))
assert str(value) == "??:??:??.00125"
# And a basic one for repr()
value = temporenc.unpackb(temporenc.packb(hour=12, minute=34, second=56))
assert '12:34:56' in repr(value)
def test_comparison():
now = datetime.datetime.now()
later = now.replace(microsecond=0) + datetime.timedelta(hours=1)
v1 = temporenc.unpackb(temporenc.packb(now))
v2 = temporenc.unpackb(temporenc.packb(now))
v3 = temporenc.unpackb(temporenc.packb(later))
# Same
assert v1 == v2
assert v1 != v3
assert v1 >= v2
assert v1 >= v2
assert not (v1 > v2)
assert not (v1 < v2)
# Different
assert v3 > v1
assert v1 < v3
assert v3 >= v1
assert v1 <= v3
# Equality tests against other types: not equal
bogus = 'junk'
assert not (v1 == bogus)
assert v1 != bogus
# Comparison against other types:
# * fail on Python 3 (unorderable types)
# * use fallback comparison on Python 2.
for op in (operator.gt, operator.lt, operator.ge, operator.le):
if PY2:
op(v1, bogus) # should not raise
else:
with pytest.raises(TypeError):
op(v1, bogus) # should raise
def test_hash():
now = datetime.datetime.now()
later = now.replace(microsecond=0) + datetime.timedelta(hours=1)
v1 = temporenc.unpackb(temporenc.packb(now))
v2 = temporenc.unpackb(temporenc.packb(now))
v3 = temporenc.unpackb(temporenc.packb(later))
assert hash(v1) == hash(v2)
assert hash(v1) != hash(v3)
d = {}
d[v1] = 1
d[v2] = 2
d[v3] = 3
assert len(d) == 2
assert d[v1] == 2
| |
"""HTML utilities suitable for global use."""
from __future__ import unicode_literals
import re
from django.utils import six
from django.utils.encoding import force_str, force_text
from django.utils.functional import keep_lazy, keep_lazy_text
from django.utils.http import RFC3986_GENDELIMS, RFC3986_SUBDELIMS
from django.utils.safestring import SafeData, SafeText, mark_safe
from django.utils.six.moves.urllib.parse import (
parse_qsl, quote, unquote, urlencode, urlsplit, urlunsplit,
)
from django.utils.text import normalize_newlines
from .html_parser import HTMLParseError, HTMLParser
# Configuration for urlize() function.
TRAILING_PUNCTUATION_CHARS = '.,:;!'
WRAPPING_PUNCTUATION = [('(', ')'), ('<', '>'), ('[', ']'), ('<', '>'), ('"', '"'), ('\'', '\'')]
# List of possible strings used for bullets in bulleted lists.
DOTS = ['·', '*', '\u2022', '•', '•', '•']
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
word_split_re = re.compile(r'''([\s<>"']+)''')
simple_url_re = re.compile(r'^https?://\[?\w', re.IGNORECASE)
simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)($|/.*)$', re.IGNORECASE)
@keep_lazy(six.text_type, SafeText)
def escape(text):
"""
Returns the given text with ampersands, quotes and angle brackets encoded
for use in HTML.
This function always escapes its input, even if it's already escaped and
marked as such. This may result in double-escaping. If this is a concern,
use conditional_escape() instead.
"""
return mark_safe(
force_text(text).replace('&', '&').replace('<', '<')
.replace('>', '>').replace('"', '"').replace("'", ''')
)
_js_escapes = {
ord('\\'): '\\u005C',
ord('\''): '\\u0027',
ord('"'): '\\u0022',
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
ord('='): '\\u003D',
ord('-'): '\\u002D',
ord(';'): '\\u003B',
ord('`'): '\\u0060',
ord('\u2028'): '\\u2028',
ord('\u2029'): '\\u2029'
}
# Escape every ASCII character with a value less than 32.
_js_escapes.update((ord('%c' % z), '\\u%04X' % z) for z in range(32))
@keep_lazy(six.text_type, SafeText)
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
return mark_safe(force_text(value).translate(_js_escapes))
def conditional_escape(text):
"""
Similar to escape(), except that it doesn't operate on pre-escaped strings.
This function relies on the __html__ convention used both by Django's
SafeData class and by third-party libraries like markupsafe.
"""
if hasattr(text, '__html__'):
return text.__html__()
else:
return escape(text)
def format_html(format_string, *args, **kwargs):
"""
Similar to str.format, but passes all arguments through conditional_escape,
and calls 'mark_safe' on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
"""
args_safe = map(conditional_escape, args)
kwargs_safe = {k: conditional_escape(v) for (k, v) in six.iteritems(kwargs)}
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
def format_html_join(sep, format_string, args_generator):
"""
A wrapper of format_html, for the common case of a group of arguments that
need to be formatted using the same format string, and then joined using
'sep'. 'sep' is also passed through conditional_escape.
'args_generator' should be an iterator that returns the sequence of 'args'
that will be passed to format_html.
Example:
format_html_join('\n', "<li>{} {}</li>", ((u.first_name, u.last_name)
for u in users))
"""
return mark_safe(conditional_escape(sep).join(
format_html(format_string, *tuple(args))
for args in args_generator))
@keep_lazy_text
def linebreaks(value, autoescape=False):
"""Converts newlines into <p> and <br />s."""
value = normalize_newlines(force_text(value))
paras = re.split('\n{2,}', value)
if autoescape:
paras = ['<p>%s</p>' % escape(p).replace('\n', '<br />') for p in paras]
else:
paras = ['<p>%s</p>' % p.replace('\n', '<br />') for p in paras]
return '\n\n'.join(paras)
class MLStripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def handle_entityref(self, name):
self.fed.append('&%s;' % name)
def handle_charref(self, name):
self.fed.append('&#%s;' % name)
def get_data(self):
return ''.join(self.fed)
def _strip_once(value):
"""
Internal tag stripping utility used by strip_tags.
"""
s = MLStripper()
try:
s.feed(value)
except HTMLParseError:
return value
try:
s.close()
except HTMLParseError:
return s.get_data() + s.rawdata
else:
return s.get_data()
@keep_lazy_text
def strip_tags(value):
"""Returns the given HTML with all tags stripped."""
# Note: in typical case this loop executes _strip_once once. Loop condition
# is redundant, but helps to reduce number of executions of _strip_once.
value = force_text(value)
while '<' in value and '>' in value:
new_value = _strip_once(value)
if len(new_value) >= len(value) or value.count('<') == new_value.count('<'):
# _strip_once wasn't able to detect more tags, or line length increased.
# due to http://bugs.python.org/issue20288
# (affects Python 2 < 2.7.7 and Python 3 < 3.3.5)
break
value = new_value
return value
@keep_lazy_text
def strip_spaces_between_tags(value):
"""Returns the given HTML with spaces between tags removed."""
return re.sub(r'>\s+<', '><', force_text(value))
def smart_urlquote(url):
"Quotes a URL if it isn't already quoted."
def unquote_quote(segment):
segment = unquote(force_str(segment))
# Tilde is part of RFC3986 Unreserved Characters
# http://tools.ietf.org/html/rfc3986#section-2.3
# See also http://bugs.python.org/issue16285
segment = quote(segment, safe=RFC3986_SUBDELIMS + RFC3986_GENDELIMS + str('~'))
return force_text(segment)
# Handle IDN before quoting.
try:
scheme, netloc, path, query, fragment = urlsplit(url)
except ValueError:
# invalid IPv6 URL (normally square brackets in hostname part).
return unquote_quote(url)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
return unquote_quote(url)
if query:
# Separately unquoting key/value, so as to not mix querystring separators
# included in query values. See #22267.
query_parts = [(unquote(force_str(q[0])), unquote(force_str(q[1])))
for q in parse_qsl(query, keep_blank_values=True)]
# urlencode will take care of quoting
query = urlencode(query_parts)
path = unquote_quote(path)
fragment = unquote_quote(fragment)
return urlunsplit((scheme, netloc, path, query, fragment))
@keep_lazy_text
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
"""
Converts any URLs in text into clickable links.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
If trim_url_limit is not None, the URLs in the link text longer than this
limit will be truncated to trim_url_limit-3 characters and appended with
an ellipsis.
If nofollow is True, the links will get a rel="nofollow" attribute.
If autoescape is True, the link text and URLs will be autoescaped.
"""
safe_input = isinstance(text, SafeData)
def trim_url(x, limit=trim_url_limit):
if limit is None or len(x) <= limit:
return x
return '%s...' % x[:max(0, limit - 3)]
def unescape(text, trail):
"""
If input URL is HTML-escaped, unescape it so as we can safely feed it to
smart_urlquote. For example:
http://example.com?x=1&y=<2> => http://example.com?x=1&y=<2>
"""
unescaped = (text + trail).replace(
'&', '&').replace('<', '<').replace(
'>', '>').replace('"', '"').replace(''', "'")
if trail and unescaped.endswith(trail):
# Remove trail for unescaped if it was not consumed by unescape
unescaped = unescaped[:-len(trail)]
elif trail == ';':
# Trail was consumed by unescape (as end-of-entity marker), move it to text
text += trail
trail = ''
return text, unescaped, trail
def trim_punctuation(lead, middle, trail):
"""
Trim trailing and wrapping punctuation from `middle`. Return the items
of the new state.
"""
# Continue trimming until middle remains unchanged.
trimmed_something = True
while trimmed_something:
trimmed_something = False
# Trim trailing punctuation.
stripped = middle.rstrip(TRAILING_PUNCTUATION_CHARS)
if middle != stripped:
trail = middle[len(stripped):] + trail
middle = stripped
trimmed_something = True
# Trim wrapping punctuation.
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening):]
lead += opening
trimmed_something = True
# Keep parentheses at the end only if they're balanced.
if (middle.endswith(closing) and
middle.count(closing) == middle.count(opening) + 1):
middle = middle[:-len(closing)]
trail = closing + trail
trimmed_something = True
return lead, middle, trail
def is_email_simple(value):
"""Return True if value looks like an email address."""
# An @ must be in the middle of the value.
if '@' not in value or value.startswith('@') or value.endswith('@'):
return False
try:
p1, p2 = value.split('@')
except ValueError:
# value contains more than one @.
return False
# Dot must be in p2 (e.g. example.com)
if '.' not in p2 or p2.startswith('.'):
return False
return True
words = word_split_re.split(force_text(text))
for i, word in enumerate(words):
if '.' in word or '@' in word or ':' in word:
# lead: Current punctuation trimmed from the beginning of the word.
# middle: Current state of the word.
# trail: Current punctuation trimmed from the end of the word.
lead, middle, trail = '', word, ''
# Deal with punctuation.
lead, middle, trail = trim_punctuation(lead, middle, trail)
# Make URL we want to point to.
url = None
nofollow_attr = ' rel="nofollow"' if nofollow else ''
if simple_url_re.match(middle):
middle, middle_unescaped, trail = unescape(middle, trail)
url = smart_urlquote(middle_unescaped)
elif simple_url_2_re.match(middle):
middle, middle_unescaped, trail = unescape(middle, trail)
url = smart_urlquote('http://%s' % middle_unescaped)
elif ':' not in middle and is_email_simple(middle):
local, domain = middle.rsplit('@', 1)
try:
domain = domain.encode('idna').decode('ascii')
except UnicodeError:
continue
url = 'mailto:%s@%s' % (local, domain)
nofollow_attr = ''
# Make link.
if url:
trimmed = trim_url(middle)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
trimmed = escape(trimmed)
middle = '<a href="%s"%s>%s</a>' % (escape(url), nofollow_attr, trimmed)
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return ''.join(words)
def avoid_wrapping(value):
"""
Avoid text wrapping in the middle of a phrase by adding non-breaking
spaces where there previously were normal spaces.
"""
return value.replace(" ", "\xa0")
def html_safe(klass):
"""
A decorator that defines the __html__ method. This helps non-Django
templates to detect classes whose __str__ methods return SafeText.
"""
if '__html__' in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it defines "
"__html__()." % klass.__name__
)
if six.PY2:
if '__unicode__' not in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it doesn't "
"define __unicode__()." % klass.__name__
)
klass_unicode = klass.__unicode__
klass.__unicode__ = lambda self: mark_safe(klass_unicode(self))
klass.__html__ = lambda self: unicode(self) # NOQA: unicode undefined on PY3
else:
if '__str__' not in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it doesn't "
"define __str__()." % klass.__name__
)
klass_str = klass.__str__
klass.__str__ = lambda self: mark_safe(klass_str(self))
klass.__html__ = lambda self: str(self)
return klass
| |
""" Threading logic.
The functionality here improves upon that in the standard L{threading} module.
"""
from __future__ import absolute_import
import threading, os.path, traceback
from srllib import util
from srllib.error import *
class ThreadError(SrlError):
""" Encapsulation of an exception caught in a thread.
@ivar name: Thread name.
@ivar exc_type: Exception type.
@ivar exc_value: Exception value.
@ivar exc_traceback: Exception traceback.
"""
def __init__(self, name, exc_info):
SrlError.__init__(self, "An exception was caught in thread '%s':\n%s" %
(name, " ".join(traceback.format_exception(*exc_info))))
self.name = name
self.exc_type, self.exc_value, self.exc_traceback = exc_info
class Cancellation(Exception):
pass
class TimeoutError(Exception):
pass
def _def_handler(thread_exc):
print thread_exc
_prev_handler = _exc_handler = _def_handler
def register_exceptionhandler(handler):
""" Register a handler for exceptions happening in background thread.
The exception handler will receive a L{ThreadError}. """
global _exc_handler, _prev_handler
if _prev_handler != handler:
_prev_handler = handler
_exc_handler = handler
def restore_exceptionhandler():
""" Restore global exception handler to previous one. """
global _exc_handler, _prev_handler
_exc_handler = _prev_handler
def synchronized(func):
""" Decorator for making functions thread-safe. """
def syncfunc(*args, **kwds):
func._sync_lock.acquire()
try: r = func(*args, **kwds)
finally: func._sync_lock.release()
return r
func._sync_lock = threading.Lock()
return syncfunc
_thread_specific = {}
class Lock(object):
def __init__(self, *args, **kwds):
self._lk = threading.Lock()
self.__inError = None
def acquire(self, *args, **kwds):
ret = self._lk.acquire(*args, **kwds)
if ret:
global _thread_specific
try:
locks = _thread_specific[Thread.current_thread()]["locks"]
locks.append(self)
except KeyError:
pass
if self.__inError:
raise self.__inError
return ret
def release(self, *args, **kwds):
if self._lk.locked():
self._lk.release(*args, **kwds)
global _thread_specific
try:
locks = _thread_specific[Thread.current_thread()]["locks"]
if self in locks:
locks.remove(self)
except KeyError:
pass
def forceRelease(self, exception=None):
""" Called by Thread upon in order to forcefully release locks upon exit.
Since held locks are released in an abnormal manner, this will cause the waiting thread to
receive an exception from acquire().
"""
self.__inError = exception
self.release()
class Condition(threading._Condition):
""" Reimplement threading.Condition in order to provide own Lock implementation as default. This is
because our own Lock supports forceful release. """
__super = threading._Condition
def __init__(self, lock=None):
if lock is None:
lock = Lock()
Condition.__super.__init__(self, lock)
self.__exc = None
def wait(self, *args, **kwds):
Condition.__super.wait(self, *args, **kwds)
if self.__exc is not None:
raise self.__exc
def notifyException(self, exception):
self.__exc = exception
self.acquire()
self.notify()
self.release()
class Event(object):
def __init__(self):
self.__cond = Condition()
self.__flag = False
def isSet(self):
return self.__flag
def set(self):
self.__cond.acquire()
try:
self.__flag = True
self.__cond.notifyAll()
finally:
self.__cond.release()
def clear(self):
self.__cond.acquire()
try:
self.__flag = False
finally:
self.__cond.release()
def wait(self, timeout=None):
self.__cond.acquire()
try:
if not self.__flag:
self.__cond.wait(timeout)
finally:
self.__cond.release()
class SynchronousCondition(object):
""" Synchronize two threads, by having one signal a condition and wait until the other receives
it. """
def __init__(self):
self.__notified, self.__waited = Event(), Event()
self.__exc = None
def wait(self):
""" Wait for condition to become true. """
if Thread._threadLocal.eventCancel.isSet():
return
self.__notified.wait()
self.__notified.clear()
self.__waited.set()
if self.__exc is not None:
raise self.__exc
def notify(self):
""" Signal that condition holds true, wait until other thread gets the message. """
self.__notified.set()
if Thread._threadLocal.eventCancel.isSet():
return
self.__waited.wait()
self.__waited.clear()
def notifyException(self, exception):
""" Notify waiting threads of exception. """
self.__exc = exception
self.__notified.set()
self.__waited.set()
def test_cancel():
thrd = Thread.current_thread()
thrd.test_cancel()
class Thread(object):
_thread_local = threading.local()
_thread_local.current = None
class _DummyThread:
""" Dummy class for objects that get returned by current_thread if no Thread is controlling the current thread. """
def __init__(self):
self.name = "Dummy"
def test_cancel(self):
pass
def __init__(self, target=None, args=[], kwds={}, name=None, daemon=False, start=False,
slot_finished=util.no_op):
""" @param target: function to execute in background thread
@param args: arguments to target
@param kwds: keywords to target
@param name: thread's name
@param daemon: die with the main thread?
@param start: start at once?
@param slot_finished: a function to invoke once the thread finishes
"""
self._thrd = threading.Thread(target=self._run, name=name)
self._trgt, self._args, self._kwds = target, args, kwds
self._slot_finished = slot_finished
self.__eventCancel = Event()
global _exc_handler
self.__exc_handler = _exc_handler
'''
for mthd in ("join",):
setattr(self, mthd, getattr(self._thrd, mthd))
'''
if daemon:
self.daemon = True
if start:
self.start()
def __str__(self):
return self.name
@classmethod
def current_thread(cls):
cur = cls._thread_local.current
if cur is None:
return Thread._DummyThread()
return cur
def __getName(self):
return self._thrd.getName()
def __setName(self, name):
self._thrd.setName(name)
name = property(__getName, __setName)
def __isDaemon(self):
return self._thrd.isDaemon()
def __setDaemon(self, daemon):
self._thrd.setDaemon(daemon)
daemon = property(__isDaemon, __setDaemon)
@property
def alive(self):
return self._thrd.isAlive()
def start(self):
self._thrd.start()
def join(self):
self._thrd.join()
@synchronized
def cancel(self, wait=False, timeout=None):
""" Tell this thread to cancel itself. Will wait till the request is honoured.
It is also possible that the thread finishes its execution independently of this request,
this function will return anyway when it notices that the thread is no longer running. """
assert Thread.current_thread() is not self
self.__eventCancel.set()
if wait:
self.join(timeout=timeout)
if self.alive:
raise TimeoutError
self._release_locks()
def test_cancel(self):
assert Thread.current_thread() is self
if self.__eventCancel.isSet():
# Clear event, so that it can be reused
self.__eventCancel.clear()
raise Cancellation
def run(self):
if self._trgt is None:
raise NotImplementedError
self._trgt(*self._args, **self._kwds)
def register_exception_handler(self, handler):
""" Set exception handler for this thread. """
self.__exc_handler = handler
def unregister_exception_handler(self):
""" Unset exception handler for this thread. """
global _exc_handler
self.__exc_handler = _exc_handler
def _run(self):
Thread._thread_local.current = self
global _thread_specific
_thread_specific[self] = {"locks": []}
thrd_exc = None
try:
self.run()
except Cancellation:
pass
except:
import sys
thrd_exc = ThreadError(self.name, sys.exc_info())
self.__exc_handler(thrd_exc)
else:
self._slot_finished()
finally:
# Release all locks held by this thread
self._release_locks(thrd_exc)
def _release_locks(self, exception=None):
global _thread_specific
if not self in _thread_specific:
return
for lk in _thread_specific[self]["locks"]:
lk.forceRelease(exception)
_thread_specific[self]["locks"] = []
| |
# -*- coding: utf-8 -*-
# Copyright Brandon Stafford
#
# This file is part of Pysolar.
#
# Pysolar is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Pysolar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with Pysolar. If not, see <http://www.gnu.org/licenses/>.
"""Additional support functions for solar geometry, astronomy, radiation correlation
:Original author: Simeon Nwaogaidu
:Contact: SimeonObinna.Nwaogaidu AT lahmeyer DOT de
:Additional author: Holger Zebner
:Contact: holger.zebner AT lahmeyer DOT de
:Additional author: Brandon Stafford
"""
from datetime import \
datetime, \
timedelta
import math
from . import solar, constants
# Some default constants
AM_default = 2.0 # Default air mass is 2.0
TL_default = 1.0 # Default Linke turbidity factor is 1.0
SC_default = 1367.0 # Solar constant in W/m^2 is 1367.0. Note that this value could vary by +/-4 W/m^2
TY_default = 365 # Total year number from 1 to 365 days
elevation_default = 0.0 # Default elevation is 0.0
# Useful equations for analysis
def get_sunrise_sunset(latitude_deg, longitude_deg, when):
"""This function calculates the astronomical sunrise and sunset times in local time.
Parameters
----------
latitude_deg : float
latitude in decimal degree. A geographical term denoting
the north/south angular location of a place on a sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location
in an east-west direction,relative to the Greenwich meridian.
when : datetime.datetime
date and time in any valid timezone, answers will be for same day in same timezone.
Returns
-------
sunrise_time_dt : datetime.datetime
Sunrise time in local time.
sunset_time_dt : datetime.datetime
Sunset time in local time.
References
----------
.. [1] http://www.skypowerinternational.com/pdf/Radiation/7.1415.01.121_cm121_bed-anleitung_engl.pdf
.. [2] http://pysolar.org/
Examples
--------
>>> lat = 50.111512
>>> lon = 8.680506
>>> timezone_local = pytz.timezone('Europe/Berlin')
>>> now = datetime.now(timezone_local)
>>> sr, ss = sb.get_sunrise_sunset(lat, lon, now)
>>> print('sunrise: ', sr)
>>> print('sunset:', ss)
"""
utc_offset = when.utcoffset()
if utc_offset != None :
utc_offset = utc_offset.total_seconds()
else :
utc_offset = 0
#end if
day = when.utctimetuple().tm_yday # Day of the year
SHA = utc_offset / 3600 * 15.0 - longitude_deg # Solar hour angle
TT = math.radians(279.134 + 0.985647 * day) # Time adjustment angle
time_adst = \
(
(
5.0323
-
100.976 * math.sin(TT)
+
595.275 * math.sin(2 * TT)
+
3.6858 * math.sin(3 * TT)
-
12.47 * math.sin(4 * TT)
-
430.847 * math.cos(TT)
+
12.5024 * math.cos(2 * TT)
+
18.25 * math.cos(3 * TT)
)
/
3600
) # Time adjustment in hours
TON = 12 + SHA / 15.0 - time_adst # Time of noon
sunn = \
(
(
math.pi / 2
-
math.radians(constants.earth_axis_inclination)
*
math.tan(math.radians(latitude_deg))
*
math.cos(2 * math.pi * day / 365.25)
)
*
(12 / math.pi)
)
same_day = datetime(year = when.year, month = when.month, day = when.day, tzinfo = when.tzinfo)
sunrise_time = same_day + timedelta(hours = TON - sunn + time_adst)
sunset_time = same_day + timedelta(hours = TON + sunn - time_adst)
return sunrise_time, sunset_time
def get_sunrise_time(latitude_deg, longitude_deg, when):
"Wrapper for get_sunrise_sunset that returns just the sunrise time."
return \
get_sunrise_sunset(latitude_deg, longitude_deg, when)[0]
def get_sunset_time(latitude_deg, longitude_deg, when):
"Wrapper for get_sunrise_sunset that returns just the sunset time."
return \
get_sunrise_sunset(latitude_deg, longitude_deg, when)[1]
def mean_earth_sun_distance(when):
"""Mean Earth-Sun distance is the arithmetical mean of the maximum and minimum distances
between a planet (Earth) and the object about which it revolves (Sun). However,
the function is used to calculate the Mean earth sun distance.
Parameters
----------
when : datetime.datetime
date/time for which to do the calculation
Returns
-------
KD : float
Mean earth sun distance
References
----------
.. [1] http://sunbird.jrc.it/pvgis/solres/solmod3.htm#clear-sky%20radiation
.. [2] R. aguiar and et al, "The ESRA user guidebook, vol. 2. database", models and exploitation software-Solar
radiation models, p.113
"""
return 1 - 0.0335 * math.sin(2 * math.pi * (when.utctimetuple().tm_yday - 94)) / 365
def extraterrestrial_irrad(when, latitude_deg, longitude_deg,SC=SC_default):
"""Equation calculates Extratrestrial radiation. Solar radiation incident outside the earth's
atmosphere is called extraterrestrial radiation. On average the extraterrestrial irradiance
is 1367 Watts/meter2 (W/m2). This value varies by + or - 3 percent as the earth orbits the sun.
The earth's closest approach to the sun occurs around January 4th and it is furthest
from the sun around July 5th.
Parameters
----------
when : datetime.datetime
date/time for which to do the calculation
latitude_deg : float
latitude in decimal degree. A geographical term denoting the north/south angular location
of a place on a sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location in an east-west direction,relative
to the Greenwich meridian.
SC : float
The solar constant is the amount of incoming solar electromagnetic radiation per unit area, measured
on the outer surface of Earth's atmosphere in a plane perpendicular to the rays.It is measured by
satellite to be roughly 1366 watts per square meter (W/m^2)
Returns
-------
EXTR1 : float
Extraterrestrial irradiation
References
----------
.. [1] http://solardat.uoregon.edu/SolarRadiationBasics.html
.. [2] Dr. J. Schumacher and et al,"INSEL LE(Integrated Simulation Environment Language)Block reference",p.68
"""
day = when.utctimetuple().tm_yday
ab = math.cos(2 * math.pi * (day - 1.0)/(365.0))
bc = math.sin(2 * math.pi * (day - 1.0)/(365.0))
cd = math.cos(2 * (2 * math.pi * (day - 1.0)/(365.0)))
df = math.sin(2 * (2 * math.pi * (day - 1.0)/(365.0)))
decl = solar.get_declination(day)
ha = solar.get_hour_angle(when, longitude_deg)
ZA = math.sin(latitude_deg) * math.sin(decl) + math.cos(latitude_deg) * math.cos(decl) * math.cos(ha)
return SC * ZA * (1.00010 + 0.034221 * ab + 0.001280 * bc + 0.000719 * cd + 0.000077 * df)
def declination_degree(when, TY = TY_default ):
"""The declination of the sun is the angle between Earth's equatorial plane and a line
between the Earth and the sun. It varies between 23.45 degrees and -23.45 degrees,
hitting zero on the equinoxes and peaking on the solstices.
Parameters
----------
when : datetime.datetime
date/time for which to do the calculation
TY : float
Total number of days in a year. eg. 365 days per year,(no leap days)
Returns
-------
DEC : float
The declination of the Sun
References
----------
.. [1] http://pysolar.org/
"""
return constants.earth_axis_inclination * math.sin((2 * math.pi / (TY)) * ((when.utctimetuple().tm_yday) - 81))
def solarelevation_function_clear(latitude_deg, longitude_deg, when,temperature = constants.standard_temperature,
pressure = constants.standard_pressure, elevation = elevation_default):
"""Equation calculates Solar elevation function for clear sky type.
Parameters
----------
latitude_deg : float
latitude in decimal degree. A geographical term denoting
the north/south angular location of a place on a sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location
in an east-west direction,relative to the Greenwich meridian.
when : datetime.datetime
date/time for which to do the calculation
temperature : float
atmospheric temperature
pressure : float
pressure in pascals
elevation : float
The elevation of a geographic location is its height above a fixed reference point, often the mean
sea level.
Returns
-------
SOLALTC : float
Solar elevation function clear sky
References
----------
.. [1] S. Younes, R.Claywell and el al,"Quality control of solar radiation data: present status
and proposed new approaches", energy 30 (2005), pp 1533 - 1549.
"""
altitude = solar.get_altitude(latitude_deg, longitude_deg,when, elevation, temperature,pressure)
return (0.038175 + (1.5458 * (math.sin(altitude))) + ((-0.59980) * (0.5 * (1 - math.cos(2 * (altitude))))))
def solarelevation_function_overcast(latitude_deg, longitude_deg, when,
elevation = elevation_default, temperature = constants.standard_temperature,
pressure = constants.standard_pressure):
""" The function calculates solar elevation function for overcast sky type.
This associated hourly overcast radiation model is based on the estimation of the
overcast sky transmittance with the sun directly overhead combined with the application
of an over sky elavation function to estimate the overcast day global irradiation
value at any solar elevation.
Parameters
----------
latitude_deg : float
latitude in decimal degree. A geographical term denoting the north/south angular location of a place on a
sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location in an east-west direction,relative to the
Greenwich meridian.
when : datetime.datetime
date/time for which to do the calculation
elevation : float
The elevation of a geographic location is its height above a fixed reference point, often the mean sea level.
temperature : float
atmospheric temperature
pressure : float
pressure in pascals
Returns
-------
SOLALTO : float
Solar elevation function overcast
References
----------
.. [1] Prof. Peter Tregenza,"Solar radiation and daylight models", p.89.
.. [2] Also accessible through Google Books: http://tinyurl.com/5kdbwu
Tariq Muneer, "Solar Radiation and Daylight Models, Second Edition: For the Energy Efficient
Design of Buildings"
"""
altitude = solar.get_altitude(latitude_deg, longitude_deg,when, elevation, temperature,pressure)
return ((-0.0067133) + (0.78600 * (math.sin(altitude)))) + (0.22401 * (0.5 * (1 - math.cos(2 * altitude))))
def diffuse_transmittance(TL = TL_default):
"""Equation calculates the Diffuse_transmittance and the is the Theoretical Diffuse Irradiance on a horizontal
surface when the sun is at the zenith.
Parameters
----------
TL : float
Linke turbidity factor
Returns
-------
DT : float
diffuse_transmittance
References
----------
.. [1] S. Younes, R.Claywell and el al,"Quality control of solar radiation data: present status and proposed
new approaches", energy 30 (2005), pp 1533 - 1549.
"""
return ((-21.657) + (41.752 * (TL)) + (0.51905 * (TL) * (TL)))
def diffuse_underclear(latitude_deg, longitude_deg, when, elevation = elevation_default,
temperature = constants.standard_temperature, pressure = constants.standard_pressure, TL=TL_default):
"""Equation calculates diffuse radiation under clear sky conditions.
Parameters
----------
latitude_deg : float
latitude in decimal degree. A geographical term denoting the north/south angular location of a place on
a sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location in an east-west direction,relative to the
Greenwich meridian.
when : datetime.datetime
date/time for which to do the calculation
elevation : float
The elevation of a geographic location is its height above a fixed reference point, often the mean sea level.
temperature : float
atmospheric temperature
pressure : float
pressure in pascals
TL : float
Linke turbidity factor
Returns
-------
DIFFC : float
Diffuse Irradiation under clear sky
References
----------
.. [1] S. Younes, R.Claywell and el al,"Quality control of solar radiation data: present status and proposed
new approaches", energy 30 (2005), pp 1533 - 1549.
"""
DT = ((-21.657) + (41.752 * (TL)) + (0.51905 * (TL) * (TL)))
altitude = solar.get_altitude(latitude_deg, longitude_deg,when, elevation, temperature,pressure)
return mean_earth_sun_distance(when) * DT * altitude
def diffuse_underovercast(latitude_deg, longitude_deg, when, elevation = elevation_default,
temperature = constants.standard_temperature, pressure = constants.standard_pressure,TL=TL_default):
"""Function calculates the diffuse radiation under overcast conditions.
Parameters
----------
latitude_deg : float
latitude in decimal degree. A geographical term denoting the north/south angular location of a place on a
sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location in an east-west direction,relative to the
Greenwich meridian.
when : datetime.datetime
date/time for which to do the calculation
elevation : float
The elevation of a geographic location is its height above a fixed reference point, often the mean sea level.
temperature : float
atmospheric temperature
pressure : float
pressure in pascals
TL : float
Linke turbidity factor
Returns
-------
DIFOC : float
Diffuse Irradiation under overcast
References
----------
.. [1] S. Younes, R.Claywell and el al,"Quality control of solar radiation data: present status and proposed
new approaches", energy 30 (2005), pp 1533 - 1549.
"""
DT = ((-21.657) + (41.752 * (TL)) + (0.51905 * (TL) * (TL)))
DIFOC = ((mean_earth_sun_distance(when)
)*(DT)*(solar.get_altitude(latitude_deg,longitude_deg, when, elevation,
temperature, pressure)))
return DIFOC
def direct_underclear(latitude_deg, longitude_deg, when,
temperature = constants.standard_temperature, pressure = constants.standard_pressure, TY = TY_default,
AM = AM_default, TL = TL_default,elevation = elevation_default):
"""Equation calculates direct radiation under clear sky conditions.
Parameters
----------
latitude_deg : float
latitude in decimal degree. A geographical term denoting the north/south angular location of a
place on a sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location in an east-west direction,relative to the
Greenwich meridian.
when : datetime.datetime
date/time for which to do the calculation
temperature : float
atmospheric temperature
pressure : float
pressure in pascals
TY : float
Total number of days in a year. eg. 365 days per year,(no leap days)
AM : float
Air mass. An Air Mass is a measure of how far light travels through the Earth's atmosphere. One air mass,
or AM1, is the thickness of the Earth's atmosphere. Air mass zero (AM0) describes solar irradiance in space,
where it is unaffected by the atmosphere. The power density of AM1 light is about 1,000 W/m^2
TL : float
Linke turbidity factor
elevation : float
The elevation of a geographic location is its height above a fixed reference point, often the mean
sea level.
Returns
-------
DIRC : float
Direct Irradiation under clear
References
----------
.. [1] S. Younes, R.Claywell and el al,"Quality control of solar radiation data: present status and proposed
new approaches", energy 30 (2005), pp 1533 - 1549.
"""
KD = mean_earth_sun_distance(when)
DEC = declination_degree(when,TY)
DIRC = (1367 * KD * math.exp(-0.8662 * (AM) * (TL) * (DEC)
) * math.sin(solar.get_altitude(latitude_deg,longitude_deg,
when,elevation ,
temperature , pressure )))
return DIRC
def global_irradiance_clear(DIRC, DIFFC, latitude_deg, longitude_deg, when,
temperature = constants.standard_temperature, pressure = constants.standard_pressure, TY = TY_default,
AM = AM_default, TL = TL_default, elevation = elevation_default):
"""Equation calculates global irradiance under clear sky conditions.
Parameters
----------
DIRC : float
Direct Irradiation under clear
DIFFC : float
Diffuse Irradiation under clear sky
latitude_deg : float
latitude in decimal degree. A geographical term denoting the north/south angular location of a place
on a sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location in an east-west direction,relative to
the Greenwich meridian.
when : datetime.datetime
date/time for which to do the calculation
temperature : float
atmospheric temperature
pressure : float
pressure in pascals
elevation : float
The elevation of a geographic location is its height above a fixed reference point, often the
mean sea level.
TY : float
Total number of days in a year. eg. 365 days per year,(no leap days)
AM : float
Air mass. An Air Mass is a measure of how far light travels through the Earth's atmosphere. One air mass,
or AM1, is the thickness of the Earth's atmosphere. Air mass zero (AM0) describes solar irradiance in
space, where it is unaffected by the atmosphere. The power density of AM1 light is about 1,000 W/m.
TL : float
Linke turbidity factor
elevation : float
The elevation of a geographic location is its height above a fixed reference point, often the mean sea
level.
Returns
-------
ghic : float
Global Irradiation under clear sky
References
----------
.. [1] S. Younes, R.Claywell and el al,"Quality control of solar radiation data: present status and proposed
new approaches", energy 30 (2005), pp 1533 - 1549.
"""
DIRC = direct_underclear(latitude_deg, longitude_deg, when,
TY, AM, TL, elevation, temperature = constants.standard_temperature,
pressure = constants.standard_pressure)
DIFFC = diffuse_underclear(latitude_deg, longitude_deg, when,
elevation, temperature = constants.standard_temperature, pressure= constants.standard_pressure)
ghic = (DIRC + DIFFC)
return ghic
def global_irradiance_overcast(latitude_deg, longitude_deg, when,
elevation = elevation_default, temperature = constants.standard_temperature,
pressure = constants.standard_pressure):
"""Calculated Global is used to compare to the Diffuse under overcast conditions.
Under overcast skies, global and diffuse are expected to be equal due to the absence of the beam
component.
Parameters
----------
latitude_deg : float
latitude in decimal degree. A geographical term denoting the north/south angular location of a
place on a sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location in an east-west direction,relative
to the Greenwich meridian.
when : datetime.datetime
date/time for which to do the calculation
elevation : float
The elevation of a geographic location is its height above a fixed reference point, often the
mean sea level.
temperature : float
atmospheric temperature
pressure : float
pressure in pascals
Returns
-------
ghioc : float
Global Irradiation under overcast sky
References
----------
.. [1] S. Younes, R.Claywell and el al, "Quality
control of solar radiation data: present status
and proposed new approaches", energy 30
(2005), pp 1533 - 1549.
"""
ghioc = (572 * (solar.get_altitude(latitude_deg, longitude_deg, when,
elevation , temperature , pressure )))
return ghioc
def diffuse_ratio(DIFF_data,ghi_data):
"""Function calculates the Diffuse ratio.
Parameters
----------
DIFF_data : array_like
Diffuse horizontal irradiation data
ghi_data : array_like
global horizontal irradiation data array
Returns
-------
K : float
diffuse_ratio
References
----------
.. [1] S. Younes, R.Claywell and el al,"Quality control of solar radiation data: present status and proposed
new approaches", energy 30 (2005), pp 1533 - 1549.
"""
K = DIFF_data/ghi_data
return K
def clear_index(ghi_data, when, latitude_deg, longitude_deg):
"""This calculates the clear index ratio.
Parameters
----------
ghi_data : array_like
global horizontal irradiation data array
when : datetime.datetime
date/time for which to do the calculation
latitude_deg : float
latitude in decimal degree. A geographical term denoting the north/south angular location of a place
on a sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location in an east-west direction,relative to the
Greenwich meridian.
Returns
-------
KT : float
Clear index ratio
References
----------
.. [1] S. Younes, R.Claywell and el al,"Quality control of solar radiation data: present status and proposed
new approaches", energy 30 (2005), pp 1533 - 1549.
"""
EXTR1 = extraterrestrial_irrad(when, latitude_deg, longitude_deg)
KT = (ghi_data/EXTR1)
return KT
| |
#!/usr/bin/python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import zipfile
import zlib
import hashlib
from struct import pack, unpack
POLY_SZ = 20
KEY = "FlagIsInsideThePNG!!" #
def sbin(v, sz):
return bin(v)[2:].rjust(sz, '0')
# Base for any of my ROPs.
def db(v):
return pack("<B", v)
def dw(v):
return pack("<H", v)
def dd(v):
return pack("<I", v)
def dq(v):
return pack("<Q", v)
def rb(v):
return unpack("<B", v[0])[0]
def rw(v):
return unpack("<H", v[:2])[0]
def rd(v):
return unpack("<I", v[:4])[0]
def rq(v):
return unpack("<Q", v[:8])[0]
class BitStream:
def __init__(self, data, sz=None):
if sz is None:
sz = len(data) * 8
self.sz = sz
self.data = bytearray(data)
self.idx = 0
def get_bit(self):
if self.idx >= self.sz:
raise Exception('All bits used. Go away.')
i_byte = self.idx / 8
i_bit = self.idx % 8
bit = (self.data[i_byte] >> i_bit) & 1
self.idx += 1
return bit
def get_bits(self, sz):
v = 0
for i in xrange(sz):
v |= self.get_bit() << i
return v
class LFSR:
def __init__(self, poly, iv, sz):
self.sz = sz
self.poly = poly
self.r = iv
self.mask = (1 << sz) - 1
def get_bit(self):
bit = (self.r >> (self.sz - 1)) & 1
new_bit = 1
masked = self.r & self.poly
for i in xrange(self.sz):
new_bit ^= (masked >> i) & 1
self.r = ((self.r << 1) | new_bit) & self.mask
return bit
class LFSRCipher:
def __init__(self, key, poly_sz=8, key_iv=None, cipher_iv=None):
if len(key) < poly_sz:
raise Exception('LFSRCipher key length must be at least %i' % poly_sz)
key = BitStream(key)
if key_iv is None:
key_iv = os.urandom(poly_sz)
self.key_iv = key_iv
key_iv_stream = BitStream(key_iv)
if cipher_iv is None:
cipher_iv = os.urandom(poly_sz)
self.cipher_iv = cipher_iv
cipher_iv_stream = BitStream(cipher_iv)
self.lfsr = []
for i in xrange(8):
l = LFSR(key.get_bits(poly_sz) ^ key_iv_stream.get_bits(poly_sz),
cipher_iv_stream.get_bits(poly_sz), poly_sz)
self.lfsr.append(l)
def get_keystream_byte(self):
b = 0
for i, l in enumerate(self.lfsr):
b |= l.get_bit() << i
return b
def get_headers(self):
return self.key_iv + self.cipher_iv
def crypt(self, s):
s = bytearray(s)
for i in xrange(len(s)):
s[i] ^= self.get_keystream_byte()
return str(s)
def SETBIT(n):
return 1 << n
class BetterZipCreator:
def __init__(self, arcname, key):
self.key = key
self.arcname = arcname
self.files = []
def add_file(self, fname):
with open(fname, 'rb') as f:
data = f.read()
self.files.append((fname, data))
def write_lfh(self, arc, f):
fname, data = f
crc = zlib.crc32(data) & 0xffffffff
c = LFSRCipher(self.key, POLY_SZ)
crypto_headers = c.get_headers()
encrypted_data = c.crypt(data)
sha256 = hashlib.sha256(data)
encrypted_hash = c.crypt(sha256.digest())
actual_sz = len(crypto_headers) + len(data) + sha256.digest_size
header_to_write = [
"PK\3\4",
dw(90), # The encryption is so good it's version 9.0 at least!
dw(SETBIT(0) | SETBIT(15)), # Super strong encryption enabled!!!
dw(0), # No compression.
dw(0), dw(0), # Time/date, we don't care.
dd(crc),
dd(actual_sz),
dd(len(data)),
dw(len(fname)),
dw(0), # Extra field length.
fname
]
arc.write(''.join(header_to_write))
arc.write(crypto_headers)
arc.write(encrypted_data)
arc.write(encrypted_hash)
def write_cdh(self, arc, f, offset):
fname, data = f
crc = zlib.crc32(data) & 0xffffffff
c = LFSRCipher(self.key, POLY_SZ)
sha256 = hashlib.sha256(data)
actual_sz = len(c.get_headers()) + len(data) + sha256.digest_size
header_to_write = [
"PK\1\2",
dw(90), # The encryption is so good it's version 9.0 at least!
dw(90), # The encryption is so good it's version 9.0 at least!
dw(SETBIT(0) | SETBIT(15)), # Super strong encryption enabled!!!
dw(0), # No compression.
dw(0), dw(0), # Time/date, we don't care.
dd(crc),
dd(actual_sz),
dd(len(data)),
dw(len(fname)),
dw(0), # Extra field length.
dw(0), # Comment field length.
dw(0), # Disk number start.
dw(0), # File attributes.
dd(0), # External file attributes.
dd(offset),
fname
]
arc.write(''.join(header_to_write))
def write_eocdh(self, arc, ent_no, cdh_start, cdh_end):
header_to_write = [
"PK\5\6",
dw(0), # Disk no.
dw(0), # Disk with CDH.
dw(ent_no),
dw(ent_no),
dd(cdh_end - cdh_start),
dd(cdh_start),
dw(0), # Comment length.
]
arc.write(''.join(header_to_write))
def close(self):
with open(self.arcname, "wb") as arc:
offsets = []
crcs = []
for f in self.files:
offset = arc.tell()
offsets.append(offset)
self.write_lfh(arc, f)
cdh_start = arc.tell()
for f, offset in zip(self.files, offsets):
self.write_cdh(arc, f, offset)
cdh_end = arc.tell()
self.write_eocdh(arc, len(self.files), cdh_start, cdh_end)
z = BetterZipCreator("flag.zip", KEY)
z.add_file("flag.png")
z.close()
# Fun fact: this requires a hacked zipfile module which ignores the
# 'encrypted' flag and crc32 errors.
z = zipfile.ZipFile("flag.zip")
data = z.read("flag.png")
key_iv = data[:POLY_SZ]
cipher_iv = data[POLY_SZ:POLY_SZ*2]
d = LFSRCipher(KEY, POLY_SZ, key_iv, cipher_iv)
dec = d.crypt(data[POLY_SZ*2:-32])
dec_hash = d.crypt(data[-32:])
act_hash = hashlib.sha256(dec).digest()
print "hash match:", act_hash == dec_hash
with open("dec_flag.png", "wb") as f:
f.write(dec)
"""
c = LFSRCipher("alamakot", 8)
data = "A" * 4096
with open("dump.bin", "wb") as f:
f.write(c.get_headers())
f.write(c.crypt(data))
with open("dump.bin", "rb") as f:
key_iv = f.read(8)
cipher_iv = f.read(8)
d = LFSRCipher("alamakot", 8, key_iv, cipher_iv)
data = f.read()
with open("dump.dec", "wb") as f:
f.write(d.crypt(data))
"""
| |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUMonitorscopesFetcher
from .fetchers import NUApplicationBindingsFetcher
from bambou import NURESTObject
class NUApplication(NURESTObject):
""" Represents a Application in the VSD
Notes:
represents a application with L4/L7 classification
"""
__rest_name__ = "application"
__resource_name__ = "applications"
## Constants
CONST_POST_CLASSIFICATION_PATH_ANY = "ANY"
CONST_PROTOCOL_NONE = "NONE"
CONST_PERFORMANCE_MONITOR_TYPE_FIRST_PACKET = "FIRST_PACKET"
CONST_PRE_CLASSIFICATION_PATH_PRIMARY = "PRIMARY"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_PRE_CLASSIFICATION_PATH_SECONDARY = "SECONDARY"
CONST_PERFORMANCE_MONITOR_TYPE_CONTINUOUS = "CONTINUOUS"
CONST_OPTIMIZE_PATH_SELECTION_PACKETLOSS = "PACKETLOSS"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_OPTIMIZE_PATH_SELECTION_LATENCY = "LATENCY"
CONST_OPTIMIZE_PATH_SELECTION_JITTER = "JITTER"
CONST_PROTOCOL_UDP = "UDP"
CONST_POST_CLASSIFICATION_PATH_PRIMARY = "PRIMARY"
CONST_POST_CLASSIFICATION_PATH_SECONDARY = "SECONDARY"
CONST_PERFORMANCE_MONITOR_TYPE_FIRST_PACKET_AND_CONTINUOUS = "FIRST_PACKET_AND_CONTINUOUS"
CONST_PROTOCOL_TCP = "TCP"
CONST_PRE_CLASSIFICATION_PATH_DEFAULT = "DEFAULT"
def __init__(self, **kwargs):
""" Initializes a Application instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> application = NUApplication(id=u'xxxx-xxx-xxx-xxx', name=u'Application')
>>> application = NUApplication(data=my_dict)
"""
super(NUApplication, self).__init__()
# Read/Write Attributes
self._dscp = None
self._name = None
self._bandwidth = None
self._last_updated_by = None
self._read_only = None
self._performance_monitor_type = None
self._certificate_common_name = None
self._description = None
self._destination_ip = None
self._destination_port = None
self._network_symmetry = None
self._enable_pps = None
self._one_way_delay = None
self._one_way_jitter = None
self._one_way_loss = None
self._entity_scope = None
self._post_classification_path = None
self._source_ip = None
self._source_port = None
self._app_id = None
self._optimize_path_selection = None
self._pre_classification_path = None
self._protocol = None
self._associated_l7_application_signature_id = None
self._ether_type = None
self._external_id = None
self._symmetry = None
self.expose_attribute(local_name="dscp", remote_name="DSCP", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="bandwidth", remote_name="bandwidth", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="read_only", remote_name="readOnly", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="performance_monitor_type", remote_name="performanceMonitorType", attribute_type=str, is_required=False, is_unique=False, choices=[u'CONTINUOUS', u'FIRST_PACKET', u'FIRST_PACKET_AND_CONTINUOUS'])
self.expose_attribute(local_name="certificate_common_name", remote_name="certificateCommonName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="destination_ip", remote_name="destinationIP", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="destination_port", remote_name="destinationPort", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="network_symmetry", remote_name="networkSymmetry", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="enable_pps", remote_name="enablePPS", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="one_way_delay", remote_name="oneWayDelay", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="one_way_jitter", remote_name="oneWayJitter", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="one_way_loss", remote_name="oneWayLoss", attribute_type=float, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="post_classification_path", remote_name="postClassificationPath", attribute_type=str, is_required=False, is_unique=False, choices=[u'ANY', u'PRIMARY', u'SECONDARY'])
self.expose_attribute(local_name="source_ip", remote_name="sourceIP", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="source_port", remote_name="sourcePort", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="app_id", remote_name="appId", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="optimize_path_selection", remote_name="optimizePathSelection", attribute_type=str, is_required=False, is_unique=False, choices=[u'JITTER', u'LATENCY', u'PACKETLOSS'])
self.expose_attribute(local_name="pre_classification_path", remote_name="preClassificationPath", attribute_type=str, is_required=False, is_unique=False, choices=[u'DEFAULT', u'PRIMARY', u'SECONDARY'])
self.expose_attribute(local_name="protocol", remote_name="protocol", attribute_type=str, is_required=False, is_unique=False, choices=[u'NONE', u'TCP', u'UDP'])
self.expose_attribute(local_name="associated_l7_application_signature_id", remote_name="associatedL7ApplicationSignatureID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="ether_type", remote_name="etherType", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
self.expose_attribute(local_name="symmetry", remote_name="symmetry", attribute_type=bool, is_required=False, is_unique=False)
# Fetchers
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.monitorscopes = NUMonitorscopesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.application_bindings = NUApplicationBindingsFetcher.fetcher_with_object(parent_object=self, relationship="member")
self._compute_args(**kwargs)
# Properties
@property
def dscp(self):
""" Get dscp value.
Notes:
DSCP match condition to be set in the rule. It is either * or from 0-63.
This attribute is named `DSCP` in VSD API.
"""
return self._dscp
@dscp.setter
def dscp(self, value):
""" Set dscp value.
Notes:
DSCP match condition to be set in the rule. It is either * or from 0-63.
This attribute is named `DSCP` in VSD API.
"""
self._dscp = value
@property
def name(self):
""" Get name value.
Notes:
name of the application
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
name of the application
"""
self._name = value
@property
def bandwidth(self):
""" Get bandwidth value.
Notes:
Minimum Failover Bandwidth of the application.
"""
return self._bandwidth
@bandwidth.setter
def bandwidth(self, value):
""" Set bandwidth value.
Notes:
Minimum Failover Bandwidth of the application.
"""
self._bandwidth = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def read_only(self):
""" Get read_only value.
Notes:
determines whether this entity is read only. Read only objects cannot be modified or deleted.
This attribute is named `readOnly` in VSD API.
"""
return self._read_only
@read_only.setter
def read_only(self, value):
""" Set read_only value.
Notes:
determines whether this entity is read only. Read only objects cannot be modified or deleted.
This attribute is named `readOnly` in VSD API.
"""
self._read_only = value
@property
def performance_monitor_type(self):
""" Get performance_monitor_type value.
Notes:
Describes the trigger for the application.
This attribute is named `performanceMonitorType` in VSD API.
"""
return self._performance_monitor_type
@performance_monitor_type.setter
def performance_monitor_type(self, value):
""" Set performance_monitor_type value.
Notes:
Describes the trigger for the application.
This attribute is named `performanceMonitorType` in VSD API.
"""
self._performance_monitor_type = value
@property
def certificate_common_name(self):
""" Get certificate_common_name value.
Notes:
Describes the certificate common name
This attribute is named `certificateCommonName` in VSD API.
"""
return self._certificate_common_name
@certificate_common_name.setter
def certificate_common_name(self, value):
""" Set certificate_common_name value.
Notes:
Describes the certificate common name
This attribute is named `certificateCommonName` in VSD API.
"""
self._certificate_common_name = value
@property
def description(self):
""" Get description value.
Notes:
description of Application
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
description of Application
"""
self._description = value
@property
def destination_ip(self):
""" Get destination_ip value.
Notes:
destination IP in CIDR format
This attribute is named `destinationIP` in VSD API.
"""
return self._destination_ip
@destination_ip.setter
def destination_ip(self, value):
""" Set destination_ip value.
Notes:
destination IP in CIDR format
This attribute is named `destinationIP` in VSD API.
"""
self._destination_ip = value
@property
def destination_port(self):
""" Get destination_port value.
Notes:
value should be either * or single port number
This attribute is named `destinationPort` in VSD API.
"""
return self._destination_port
@destination_port.setter
def destination_port(self, value):
""" Set destination_port value.
Notes:
value should be either * or single port number
This attribute is named `destinationPort` in VSD API.
"""
self._destination_port = value
@property
def network_symmetry(self):
""" Get network_symmetry value.
Notes:
Network symmetry flag
This attribute is named `networkSymmetry` in VSD API.
"""
return self._network_symmetry
@network_symmetry.setter
def network_symmetry(self, value):
""" Set network_symmetry value.
Notes:
Network symmetry flag
This attribute is named `networkSymmetry` in VSD API.
"""
self._network_symmetry = value
@property
def enable_pps(self):
""" Get enable_pps value.
Notes:
Enable the performance probe for this application
This attribute is named `enablePPS` in VSD API.
"""
return self._enable_pps
@enable_pps.setter
def enable_pps(self, value):
""" Set enable_pps value.
Notes:
Enable the performance probe for this application
This attribute is named `enablePPS` in VSD API.
"""
self._enable_pps = value
@property
def one_way_delay(self):
""" Get one_way_delay value.
Notes:
one way Delay
This attribute is named `oneWayDelay` in VSD API.
"""
return self._one_way_delay
@one_way_delay.setter
def one_way_delay(self, value):
""" Set one_way_delay value.
Notes:
one way Delay
This attribute is named `oneWayDelay` in VSD API.
"""
self._one_way_delay = value
@property
def one_way_jitter(self):
""" Get one_way_jitter value.
Notes:
one way Jitter
This attribute is named `oneWayJitter` in VSD API.
"""
return self._one_way_jitter
@one_way_jitter.setter
def one_way_jitter(self, value):
""" Set one_way_jitter value.
Notes:
one way Jitter
This attribute is named `oneWayJitter` in VSD API.
"""
self._one_way_jitter = value
@property
def one_way_loss(self):
""" Get one_way_loss value.
Notes:
one way loss
This attribute is named `oneWayLoss` in VSD API.
"""
return self._one_way_loss
@one_way_loss.setter
def one_way_loss(self, value):
""" Set one_way_loss value.
Notes:
one way loss
This attribute is named `oneWayLoss` in VSD API.
"""
self._one_way_loss = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def post_classification_path(self):
""" Get post_classification_path value.
Notes:
default set to any , possible values primary/secondary/any
This attribute is named `postClassificationPath` in VSD API.
"""
return self._post_classification_path
@post_classification_path.setter
def post_classification_path(self, value):
""" Set post_classification_path value.
Notes:
default set to any , possible values primary/secondary/any
This attribute is named `postClassificationPath` in VSD API.
"""
self._post_classification_path = value
@property
def source_ip(self):
""" Get source_ip value.
Notes:
source IP address
This attribute is named `sourceIP` in VSD API.
"""
return self._source_ip
@source_ip.setter
def source_ip(self, value):
""" Set source_ip value.
Notes:
source IP address
This attribute is named `sourceIP` in VSD API.
"""
self._source_ip = value
@property
def source_port(self):
""" Get source_port value.
Notes:
source Port ,value should be either * or single port number
This attribute is named `sourcePort` in VSD API.
"""
return self._source_port
@source_port.setter
def source_port(self, value):
""" Set source_port value.
Notes:
source Port ,value should be either * or single port number
This attribute is named `sourcePort` in VSD API.
"""
self._source_port = value
@property
def app_id(self):
""" Get app_id value.
Notes:
a unique 2 byte id generated when a application is created and used by VRS for probing.
This attribute is named `appId` in VSD API.
"""
return self._app_id
@app_id.setter
def app_id(self, value):
""" Set app_id value.
Notes:
a unique 2 byte id generated when a application is created and used by VRS for probing.
This attribute is named `appId` in VSD API.
"""
self._app_id = value
@property
def optimize_path_selection(self):
""" Get optimize_path_selection value.
Notes:
with values being Latency, Jitter, PacketLoss
This attribute is named `optimizePathSelection` in VSD API.
"""
return self._optimize_path_selection
@optimize_path_selection.setter
def optimize_path_selection(self, value):
""" Set optimize_path_selection value.
Notes:
with values being Latency, Jitter, PacketLoss
This attribute is named `optimizePathSelection` in VSD API.
"""
self._optimize_path_selection = value
@property
def pre_classification_path(self):
""" Get pre_classification_path value.
Notes:
default set to primary , possible values primary/secondary
This attribute is named `preClassificationPath` in VSD API.
"""
return self._pre_classification_path
@pre_classification_path.setter
def pre_classification_path(self, value):
""" Set pre_classification_path value.
Notes:
default set to primary , possible values primary/secondary
This attribute is named `preClassificationPath` in VSD API.
"""
self._pre_classification_path = value
@property
def protocol(self):
""" Get protocol value.
Notes:
Protocol number that must be matched
"""
return self._protocol
@protocol.setter
def protocol(self, value):
""" Set protocol value.
Notes:
Protocol number that must be matched
"""
self._protocol = value
@property
def associated_l7_application_signature_id(self):
""" Get associated_l7_application_signature_id value.
Notes:
associated Layer7 Application Type ID
This attribute is named `associatedL7ApplicationSignatureID` in VSD API.
"""
return self._associated_l7_application_signature_id
@associated_l7_application_signature_id.setter
def associated_l7_application_signature_id(self, value):
""" Set associated_l7_application_signature_id value.
Notes:
associated Layer7 Application Type ID
This attribute is named `associatedL7ApplicationSignatureID` in VSD API.
"""
self._associated_l7_application_signature_id = value
@property
def ether_type(self):
""" Get ether_type value.
Notes:
Ether type of the packet to be matched. etherType can be * or a valid hexadecimal value
This attribute is named `etherType` in VSD API.
"""
return self._ether_type
@ether_type.setter
def ether_type(self, value):
""" Set ether_type value.
Notes:
Ether type of the packet to be matched. etherType can be * or a valid hexadecimal value
This attribute is named `etherType` in VSD API.
"""
self._ether_type = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
@property
def symmetry(self):
""" Get symmetry value.
Notes:
Maintain path symmetry during SLA violation
"""
return self._symmetry
@symmetry.setter
def symmetry(self, value):
""" Set symmetry value.
Notes:
Maintain path symmetry during SLA violation
"""
self._symmetry = value
| |
"""
This is the code that needs to be integrated into collectd when run in
production. It contains the python code that integrates into the python module
for collectd. It connects to one or more vCenter Servers and gathers the
configured metrics from ESXi hosts and Virtual Machines.
The file is organized in multiple sections. The first section implements the
callback functions executed be collectd which is followed be a couple of helper
functions that separate out some code to make the rest more readable. The
helper classes section provides threads that are used to parallelize things and
make the plugin a lot faster.
"""
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import datetime
import re
import ssl
import time
import tzlocal
from pyVim.connect import SmartConnect, Disconnect
from pyVmomi import vim
import collectd
################################################################################
# CONFIGURE ME
################################################################################
INTERVAL = 300
################################################################################
# DO NOT CHANGE BEYOND THIS POINT!
################################################################################
CONFIGS = [] # Stores the configuration as passed from collectd
ENVIRONMENT = {} # Runtime data and object cache
################################################################################
# IMPLEMENTATION OF COLLECTD CALLBACK FUNCTIONS
################################################################################
def convert_folder_tree_to_list(folder_tree):
result = list()
for leaf in folder_tree:
if leaf._wsdlName == "Folder":
result.extend(convert_folder_tree_to_list(leaf.childEntity))
else:
result.append(leaf)
return result
def configure_callback(conf):
"""Receive configuration block. This is called by collectd for every
configuration block it finds for this module."""
# Set some sensible default values
name = None
host = None
port = 443
verbose = None
verify_cert = None
use_friendly_name = None
username = None
password = None
host_counters = []
vm_counters = []
inventory_refresh_interval = 600
for node in conf.children:
key = node.key.lower()
val = node.values
if key == 'name':
name = val[0]
elif key == 'host':
host = val[0]
elif key == 'port':
port = int(val[0])
elif key == 'verbose':
verbose = bool(val[0])
elif key == 'verifycertificate':
verify_cert = bool(val[0])
elif key == 'usefriendlyname':
use_friendly_name = bool(val[0])
elif key == 'username':
username = val[0]
elif key == 'password':
password = val[0]
elif key == 'host_counters':
counters = val[0]
if not counters == "all":
values = counters.split(',')
for value in values:
if len(value) > 0:
host_counters.append(value.strip())
else :
host_counters = "all"
elif key == 'vm_counters':
counters = val[0]
if not counters == "all":
values = counters.split(',')
for value in values:
if len(value) > 0:
vm_counters.append(value.strip())
else:
vm_counters = "all"
elif key == 'inventory_refresh_interval':
inventory_refresh_interval = int(val[0])
else:
collectd.warning('collectsphere plugin: Unknown config key: %s.'
% key)
continue
log_message = \
'configure_callback: Loaded config: name=%s, host=%s, port=%s, ' \
'verbose=%s, username=%s, password=%s, host_metrics=%s, ' \
'vm_metrics=%s, inventory_refresh_interval=%s' % (
name, host, port, verbose, username, "******", len(host_counters),
len(vm_counters), inventory_refresh_interval
)
collectd.info(
log_message
)
CONFIGS.append({
'name': name,
'host': host,
'port': port,
'verbose': verbose,
'verify_cert': verify_cert,
'use_friendly_name': use_friendly_name,
'username': username,
'password': password,
'host_counters': host_counters,
'vm_counters': vm_counters,
'inventory_refresh_interval': inventory_refresh_interval
})
def init_callback():
""" In this method we create environments for every configured vCenter
Server. This includes creating the connection, reading in counter ID
mapping tables """
# For every set of configuration received from collectd, a environment must
# be created.
for config in CONFIGS:
env = create_environment(config)
# The environment is stored under the name of the config block
ENVIRONMENT[config.get("name")] = env
def read_callback():
""" This function is regularly executed by collectd. It is important to
minimize the execution time of the function which is why a lot of caching
is performed using the environment objects. """
# Walk through the existing environments
for name in ENVIRONMENT:
env = ENVIRONMENT[name]
collectd.info("read_callback: entering environment: " + name)
# Connects to vCenter Server
service_instance = SmartConnect(
host=env["host"], user=env["username"], pwd=env["password"]
)
performance_manager = service_instance \
.RetrieveServiceContent() \
.perfManager
# Walk through all Clusters of Datacenter
for datacenter in service_instance \
.RetrieveServiceContent() \
.rootFolder.childEntity:
if datacenter._wsdlName == "Datacenter":
for compute_resource in datacenter.hostFolder.childEntity:
if compute_resource._wsdlName == \
"ComputeResource" \
or compute_resource._wsdlName == \
"ClusterComputeResource":
cluster_name = \
compute_resource.name if env['use_friendly_name'] \
else compute_resource._moId
# Walk throug all hosts in cluster, collect its metrics
# and dispatch them
collectd.info(
"read_callback: found %d hosts in cluster %s" % (
len(compute_resource.host),
compute_resource.name
)
)
if len(env['host_counter_ids']) > 0:
collet_metrics_for_entities(
service_instance,
performance_manager,
env['host_counter_ids'],
compute_resource.host,
cluster_name,
env
)
# Walk throug all vms in host, collect its metrics and
# dispatch them
for host in compute_resource.host:
if host._wsdlName == "HostSystem":
collectd.info(
"read_callback: found %d vms in host %s" % (
len(host.vm), host.name
)
)
if len(env['vm_counter_ids']) > 0:
collet_metrics_for_entities(
service_instance,
performance_manager,
env['vm_counter_ids'],
host.vm,
cluster_name,
env
)
Disconnect(service_instance)
def shutdown_callback():
""" Called by collectd on shutdown. """
pass
################################################################################
# HELPER FUNCTIONS
################################################################################
def collet_metrics_for_entities(service_instance, performance_manager,
filtered_metric_ids, entities, cluster_name,
env):
# Definition of the queries for getting performance data from vCenter
query_specs = []
# Define the default time range in which the data should be collected (from
# now to INTERVAL seconds)
end_time = service_instance.CurrentTime()
start_time = end_time - datetime.timedelta(seconds=INTERVAL)
# For any entity there has to be an own query.
if len(entities) == 0:
return
for entity in entities:
query_spec = vim.PerformanceManager.QuerySpec()
query_spec.metricId = filtered_metric_ids
query_spec.format = "normal"
query_spec.endTime = end_time
query_spec.startTime = start_time
# Define the interval, in seconds, for the performance statistics. This
# means for any entity and any metric there will be
# INTERVAL / query_spec.intervalId values collected. Leave blank or use
# performanceManager.historicalInterval[i].samplingPeriod for
# aggregated values
query_spec.intervalId = 20
query_spec.entity = entity
query_specs.append(query_spec)
# Retrieves the performance metrics for the specified entity (or entities)
# based on the properties specified in the query_specs
collectd.info("GetMetricsForEntities: collecting its stats")
perf_entity_metrics = performance_manager.QueryPerf(query_specs)
cd_value = collectd.Values(plugin="collectsphere")
# Walk throug all entites of query
# for perf_entity_metric in perf_entity_metrics:
for perf_entity_metric_count in range(len(perf_entity_metrics)):
perf_entity_metric = perf_entity_metrics[perf_entity_metric_count]
perf_sample_infos = perf_entity_metric.sampleInfo
perf_metric_series_list = perf_entity_metric.value
# For every queried metric per entity, get an array consisting of
# performance counter information for the specified counterIds.
queried_counter_ids_per_entity = []
for perf_metric_series in perf_metric_series_list:
queried_counter_ids_per_entity.append(
perf_metric_series.id.counterId)
perf_counter_info_list = performance_manager.QueryPerfCounter(
queried_counter_ids_per_entity)
dispatched_values = list()
# for perf_metric_series in perf_metric_series_list:
for perf_metric_series_count in range(len(perf_metric_series_list)):
perf_metric_series = perf_metric_series_list[
perf_metric_series_count]
perf_counter_info = perf_counter_info_list[perf_metric_series_count]
counter = perf_counter_info.nameInfo.key
group = perf_counter_info.groupInfo.key
unit = perf_counter_info.unitInfo.key
rollup_type = perf_counter_info.rollupType
instance = perf_metric_series.id.instance
# if instance in instances:
# continue
# else:
# instances.append(counter + "." + group + "." + instance)
# for perf_metric in perf_metric_series.value:
for perf_metric_count in range(len(perf_metric_series.value)):
perf_metric = perf_metric_series.value[perf_metric_count]
timestamp = float(time.mktime(
perf_sample_infos[perf_metric_count]
.timestamp.astimezone(
tzlocal.get_localzone()
).timetuple()
))
entity = entities[perf_entity_metric_count]
# When the instance value is empty, the vSphere API references a
# total. Example: A host has multiple cores for each of which we
# get a single stat object. An additional stat object will be
# returned by the vSphere API with an empty string for
# "instance".
# This is the overall value accross all logical CPUs.
# if(len(stat.instance.strip()) == 0):
# instance = 'all'
instance = "all" if instance == "" else instance
type_instance_str = \
cluster_name + "." + re.sub(
pattern=r'[^A-Za-z0-9_]',
repl='_',
string=
(
entity.name
if env['use_friendly_name']
else
entity._moId
)
) + "." + instance
dispatched_value = \
str(timestamp) + type_instance_str + str(perf_metric)
if (dispatched_value) in dispatched_values:
continue
else:
dispatched_values.append(dispatched_value)
# now dispatch to collectd
collectd.info("dispatch " + str(
timestamp) + "\t" + type_instance_str + "\t" + str(
long(perf_metric)))
cd_value.type = re.sub(
pattern=r'[^A-Za-z0-9_]',
repl='_',
string=
entity._wsdlName
) \
+ "." + group \
+ "." + rollup_type \
+ "." + counter \
+ "." + unit
try:
cd_value.dispatch(time=timestamp,
type_instance=type_instance_str,
values=[long(perf_metric)])
except Exception:
continue
def create_environment(config):
"""
Creates a runtime environment from a given configuration block. As the
structure of an environment is a bit complicates, this is the time to
document it:
A single environment is a dictionary that stores runtime information about
the connection, metrics, etc for a single vCenter Server. This is the
structure pattern:
{
'host': <FQDN OR IP ADDRESS OF VCENTER SERVER>,
'username': <USER FOR LOGIN IN VCENTER SERVER>,
'password': <PASSWORD FOR LOGIN IN VCENTER SERVER>,
# This is a dictionary that stores mappings of performance counter
# names to their respective IDs in vCenter.
'lookup_host': {
'NAME': <ID>, # Example: 'cpu.usage': 2
...
},
# The same lookup dictionary must be available for virtual machines:
'lookup_vm': {
'NAME': <ID>,
...
},
# This stores the IDs of the counter names passed via the
# configuration block. We used the lookup tables above to fill in
# the IDs.
'host_counter_ids': [<ID>, <ID>, ...],
'vm_counter_ids': [<ID>, <ID>, ...],
}
"""
if not config.get('verify_cert'):
ssl._create_default_https_context = ssl._create_unverified_context
# Connect to vCenter Server
service_instance = SmartConnect(host=config.get("host"),
user=config.get("username"),
pwd=config.get("password"))
# If we could not connect abort here
if not service_instance:
print("Could not connect to the specified host using specified "
"username and password")
return -1
# Set up the environment. We fill in the rest afterwards.
env = {}
env["host"] = config.get("host")
env["username"] = config.get("username")
env["password"] = config.get("password")
env['use_friendly_name'] = config.get('use_friendly_name')
performance_manager = service_instance.RetrieveServiceContent().perfManager
# We need at least one host and one virtual machine, which are poweredOn, in
# the vCenter to be able to fetch the Counter IDs and establish the lookup
# table.
# Fetch the Counter IDs
filtered_counter_ids = []
ids_counters_dict = {}
for perf_counter in performance_manager.perfCounter:
counter_key = \
perf_counter.groupInfo.key + "." + perf_counter.nameInfo.key
ids_counters_dict[perf_counter.key] = counter_key
host = None
virtual_machine = None
for child in service_instance \
.RetrieveServiceContent() \
.rootFolder.childEntity:
if child._wsdlName == "Datacenter":
for host_folder_child in convert_folder_tree_to_list(
child.hostFolder.childEntity
):
host = host_folder_child.host[0] if (
(len(host_folder_child.host) != 0) and
host_folder_child
.host[0]
.summary
.runtime
.powerState
== vim.HostSystem.PowerState.poweredOn
) else host
if virtual_machine is not None and host is None:
break
vm_list = child.vmFolder.childEntity
for tmp in vm_list:
if tmp._wsdlName == "VirtualMachine":
if tmp.summary.runtime.powerState == \
vim.VirtualMachine.PowerState.poweredOn:
virtual_machine = tmp
if virtual_machine is not None and host is not None:
break
elif tmp._wsdlName == "Folder":
vm_list += tmp.childEntity
elif tmp._wsdlName == "VirtualApp":
vm_list += tmp.vm
if host is None:
collectd.info("create_environment: vCenter " + config.get(
"name") + " does not contain any hosts. Cannot continue")
return
if virtual_machine is None:
collectd.info("create_environment: vCenter " + config.get(
"name") + " does not contain any VMs. Cannot continue")
return
# Get all queryable aggregated and realtime metrics for an entity
env['lookup_host'] = []
env['lookup_vm'] = []
if len(performance_manager.historicalInterval) is not 0:
performance_interval = performance_manager.historicalInterval[0]
samplingPeriod = performance_interval.samplingPeriod
performance_interval.level = 2
# Update performance interval to get all rolluptypes
performance_manager.UpdatePerfInterval(performance_interval)
else:
samplingPeriod = None
# Query aggregated qureyable mertics for host and virtual_machine
env['lookup_host'] += performance_manager.QueryAvailablePerfMetric(
host,
None,
None,
samplingPeriod
)
env['lookup_vm'] += performance_manager.QueryAvailablePerfMetric(
virtual_machine,
None,
None,
samplingPeriod
)
# Query aggregated realtime mertics for host and virtual_machine
env['lookup_host'] += performance_manager.QueryAvailablePerfMetric(
host,
None,
None,
20
)
env['lookup_vm'] += performance_manager.QueryAvailablePerfMetric(
virtual_machine,
None,
None,
20
)
# Now use the lookup tables to find out the IDs of the counter names given
# via the configuration and store them as an array in the environment.
# If host_counters or vm_counters is empty, select all.
env['host_counter_ids'] = []
if config['host_counters'] == "all":
collectd.info(
"create_environment: configured to grab all host counters")
env['host_counter_ids'] = env['lookup_host']
else:
for metric in env['lookup_host']:
if ids_counters_dict[metric.counterId] in config['host_counters']:
env['host_counter_ids'].append(metric)
collectd.info("create_environment: configured to grab %d host counters" % (
len(env['host_counter_ids'])))
env['vm_counter_ids'] = []
if config['vm_counters'] == "all":
env['vm_counter_ids'] = env['lookup_vm']
else:
for metric in env['lookup_vm']:
if ids_counters_dict[metric.counterId] in config['vm_counters']:
env['vm_counter_ids'].append(metric)
collectd.info(
"create_environment: configured to grab %d virtual_machine counters" % (
len(env['vm_counter_ids'])))
Disconnect(service_instance)
return env
################################################################################
# COLLECTD REGISTRATION
################################################################################
collectd.register_config(configure_callback)
collectd.register_init(init_callback)
collectd.register_read(callback=read_callback, interval=INTERVAL)
collectd.register_shutdown(shutdown_callback)
| |
from sqlalchemy.testing import eq_, assert_raises_message
from sqlalchemy import exc
from sqlalchemy.databases import firebird
from sqlalchemy.exc import ProgrammingError
from sqlalchemy.sql import table, column
from sqlalchemy import types as sqltypes
from sqlalchemy.testing import fixtures, AssertsExecutionResults, AssertsCompiledSQL
from sqlalchemy import testing
from sqlalchemy.testing import engines
from sqlalchemy import String, VARCHAR, NVARCHAR, Unicode, Integer,\
func, insert, update, MetaData, select, Table, Column, text,\
Sequence, Float
from sqlalchemy import schema
class DomainReflectionTest(fixtures.TestBase, AssertsExecutionResults):
"Test Firebird domains"
__only_on__ = 'firebird'
@classmethod
def setup_class(cls):
con = testing.db.connect()
try:
con.execute('CREATE DOMAIN int_domain AS INTEGER DEFAULT '
'42 NOT NULL')
con.execute('CREATE DOMAIN str_domain AS VARCHAR(255)')
con.execute('CREATE DOMAIN rem_domain AS BLOB SUB_TYPE TEXT'
)
con.execute('CREATE DOMAIN img_domain AS BLOB SUB_TYPE '
'BINARY')
except ProgrammingError, e:
if not 'attempt to store duplicate value' in str(e):
raise e
con.execute('''CREATE GENERATOR gen_testtable_id''')
con.execute('''CREATE TABLE testtable (question int_domain,
answer str_domain DEFAULT 'no answer',
remark rem_domain DEFAULT '',
photo img_domain,
d date,
t time,
dt timestamp,
redundant str_domain DEFAULT NULL)''')
con.execute("ALTER TABLE testtable "
"ADD CONSTRAINT testtable_pk PRIMARY KEY "
"(question)")
con.execute("CREATE TRIGGER testtable_autoid FOR testtable "
" ACTIVE BEFORE INSERT AS"
" BEGIN"
" IF (NEW.question IS NULL) THEN"
" NEW.question = gen_id(gen_testtable_id, 1);"
" END")
@classmethod
def teardown_class(cls):
con = testing.db.connect()
con.execute('DROP TABLE testtable')
con.execute('DROP DOMAIN int_domain')
con.execute('DROP DOMAIN str_domain')
con.execute('DROP DOMAIN rem_domain')
con.execute('DROP DOMAIN img_domain')
con.execute('DROP GENERATOR gen_testtable_id')
def test_table_is_reflected(self):
from sqlalchemy.types import Integer, Text, BLOB, String, Date, \
Time, DateTime
metadata = MetaData(testing.db)
table = Table('testtable', metadata, autoload=True)
eq_(set(table.columns.keys()), set([
'question',
'answer',
'remark',
'photo',
'd',
't',
'dt',
'redundant',
]),
"Columns of reflected table didn't equal expected "
"columns")
eq_(table.c.question.primary_key, True)
# disabled per http://www.sqlalchemy.org/trac/ticket/1660
# eq_(table.c.question.sequence.name, 'gen_testtable_id')
assert isinstance(table.c.question.type, Integer)
eq_(table.c.question.server_default.arg.text, '42')
assert isinstance(table.c.answer.type, String)
assert table.c.answer.type.length == 255
eq_(table.c.answer.server_default.arg.text, "'no answer'")
assert isinstance(table.c.remark.type, Text)
eq_(table.c.remark.server_default.arg.text, "''")
assert isinstance(table.c.photo.type, BLOB)
assert table.c.redundant.server_default is None
# The following assume a Dialect 3 database
assert isinstance(table.c.d.type, Date)
assert isinstance(table.c.t.type, Time)
assert isinstance(table.c.dt.type, DateTime)
class BuggyDomainReflectionTest(fixtures.TestBase, AssertsExecutionResults):
"""Test Firebird domains (and some other reflection bumps),
see [ticket:1663] and http://tracker.firebirdsql.org/browse/CORE-356"""
__only_on__ = 'firebird'
# NB: spacing and newlines are *significant* here!
# PS: this test is superfluous on recent FB, where the issue 356 is probably fixed...
AUTOINC_DM = """\
CREATE DOMAIN AUTOINC_DM
AS
NUMERIC(18,0)
"""
MONEY_DM = """\
CREATE DOMAIN MONEY_DM
AS
NUMERIC(15,2)
DEFAULT 0
CHECK (VALUE BETWEEN -
9999999999999.99 AND +9999999999999.99)
"""
NOSI_DM = """\
CREATE DOMAIN
NOSI_DM AS
CHAR(1)
DEFAULT 'N'
NOT NULL
CHECK (VALUE IN
('S', 'N'))
"""
RIT_TESORERIA_CAPITOLO_DM = """\
CREATE DOMAIN RIT_TESORERIA_CAPITOLO_DM
AS
VARCHAR(6)
CHECK ((VALUE IS NULL) OR (VALUE =
UPPER(VALUE)))
"""
DEF_ERROR_TB = """\
CREATE TABLE DEF_ERROR (
RITENUTAMOV_ID AUTOINC_DM
NOT NULL,
RITENUTA MONEY_DM,
INTERESSI MONEY_DM
DEFAULT
0,
STAMPATO_MODULO NOSI_DM DEFAULT 'S',
TESORERIA_CAPITOLO
RIT_TESORERIA_CAPITOLO_DM)
"""
DEF_ERROR_NODOM_TB = """\
CREATE TABLE
DEF_ERROR_NODOM (
RITENUTAMOV_ID INTEGER NOT NULL,
RITENUTA NUMERIC(15,2) DEFAULT 0,
INTERESSI NUMERIC(15,2)
DEFAULT
0,
STAMPATO_MODULO CHAR(1) DEFAULT 'S',
TESORERIA_CAPITOLO
CHAR(1))
"""
DOM_ID = """
CREATE DOMAIN DOM_ID INTEGER NOT NULL
"""
TABLE_A = """\
CREATE TABLE A (
ID DOM_ID /* INTEGER NOT NULL */ DEFAULT 0 )
"""
# the 'default' keyword is lower case here
TABLE_B = """\
CREATE TABLE B (
ID DOM_ID /* INTEGER NOT NULL */ default 0 )
"""
@classmethod
def setup_class(cls):
con = testing.db.connect()
con.execute(cls.AUTOINC_DM)
con.execute(cls.MONEY_DM)
con.execute(cls.NOSI_DM)
con.execute(cls.RIT_TESORERIA_CAPITOLO_DM)
con.execute(cls.DEF_ERROR_TB)
con.execute(cls.DEF_ERROR_NODOM_TB)
con.execute(cls.DOM_ID)
con.execute(cls.TABLE_A)
con.execute(cls.TABLE_B)
@classmethod
def teardown_class(cls):
con = testing.db.connect()
con.execute('DROP TABLE a')
con.execute("DROP TABLE b")
con.execute('DROP DOMAIN dom_id')
con.execute('DROP TABLE def_error_nodom')
con.execute('DROP TABLE def_error')
con.execute('DROP DOMAIN rit_tesoreria_capitolo_dm')
con.execute('DROP DOMAIN nosi_dm')
con.execute('DROP DOMAIN money_dm')
con.execute('DROP DOMAIN autoinc_dm')
def test_tables_are_reflected_same_way(self):
metadata = MetaData(testing.db)
table_dom = Table('def_error', metadata, autoload=True)
table_nodom = Table('def_error_nodom', metadata, autoload=True)
eq_(table_dom.c.interessi.server_default.arg.text,
table_nodom.c.interessi.server_default.arg.text)
eq_(table_dom.c.ritenuta.server_default.arg.text,
table_nodom.c.ritenuta.server_default.arg.text)
eq_(table_dom.c.stampato_modulo.server_default.arg.text,
table_nodom.c.stampato_modulo.server_default.arg.text)
def test_intermixed_comment(self):
metadata = MetaData(testing.db)
table_a = Table('a', metadata, autoload=True)
eq_(table_a.c.id.server_default.arg.text, "0")
def test_lowercase_default_name(self):
metadata = MetaData(testing.db)
table_b = Table('b', metadata, autoload=True)
eq_(table_b.c.id.server_default.arg.text, "0")
class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = firebird.FBDialect()
def test_alias(self):
t = table('sometable', column('col1'), column('col2'))
s = select([t.alias()])
self.assert_compile(s,
'SELECT sometable_1.col1, sometable_1.col2 '
'FROM sometable AS sometable_1')
dialect = firebird.FBDialect()
dialect._version_two = False
self.assert_compile(s,
'SELECT sometable_1.col1, sometable_1.col2 '
'FROM sometable sometable_1',
dialect=dialect)
def test_varchar_raise(self):
for type_ in (
String,
VARCHAR,
String(),
VARCHAR(),
Unicode,
Unicode(),
):
type_ = sqltypes.to_instance(type_)
assert_raises_message(
exc.CompileError,
"VARCHAR requires a length on dialect firebird",
type_.compile,
dialect=firebird.dialect())
t1 = Table('sometable', MetaData(),
Column('somecolumn', type_)
)
assert_raises_message(
exc.CompileError,
r"\(in table 'sometable', column 'somecolumn'\)\: "
r"(?:N)?VARCHAR requires a length on dialect firebird",
schema.CreateTable(t1).compile,
dialect=firebird.dialect()
)
def test_function(self):
self.assert_compile(func.foo(1, 2), 'foo(:foo_1, :foo_2)')
self.assert_compile(func.current_time(), 'CURRENT_TIME')
self.assert_compile(func.foo(), 'foo')
m = MetaData()
t = Table('sometable', m, Column('col1', Integer), Column('col2'
, Integer))
self.assert_compile(select([func.max(t.c.col1)]),
'SELECT max(sometable.col1) AS max_1 FROM '
'sometable')
def test_substring(self):
self.assert_compile(func.substring('abc', 1, 2),
'SUBSTRING(:substring_1 FROM :substring_2 '
'FOR :substring_3)')
self.assert_compile(func.substring('abc', 1),
'SUBSTRING(:substring_1 FROM :substring_2)')
def test_update_returning(self):
table1 = table('mytable', column('myid', Integer), column('name'
, String(128)), column('description',
String(128)))
u = update(table1, values=dict(name='foo'
)).returning(table1.c.myid, table1.c.name)
self.assert_compile(u,
'UPDATE mytable SET name=:name RETURNING '
'mytable.myid, mytable.name')
u = update(table1, values=dict(name='foo')).returning(table1)
self.assert_compile(u,
'UPDATE mytable SET name=:name RETURNING '
'mytable.myid, mytable.name, '
'mytable.description')
u = update(table1, values=dict(name='foo'
)).returning(func.length(table1.c.name))
self.assert_compile(u,
'UPDATE mytable SET name=:name RETURNING '
'char_length(mytable.name) AS length_1')
def test_insert_returning(self):
table1 = table('mytable', column('myid', Integer), column('name'
, String(128)), column('description',
String(128)))
i = insert(table1, values=dict(name='foo'
)).returning(table1.c.myid, table1.c.name)
self.assert_compile(i,
'INSERT INTO mytable (name) VALUES (:name) '
'RETURNING mytable.myid, mytable.name')
i = insert(table1, values=dict(name='foo')).returning(table1)
self.assert_compile(i,
'INSERT INTO mytable (name) VALUES (:name) '
'RETURNING mytable.myid, mytable.name, '
'mytable.description')
i = insert(table1, values=dict(name='foo'
)).returning(func.length(table1.c.name))
self.assert_compile(i,
'INSERT INTO mytable (name) VALUES (:name) '
'RETURNING char_length(mytable.name) AS '
'length_1')
def test_charset(self):
"""Exercise CHARACTER SET options on string types."""
columns = [(firebird.CHAR, [1], {}, 'CHAR(1)'), (firebird.CHAR,
[1], {'charset': 'OCTETS'},
'CHAR(1) CHARACTER SET OCTETS'), (firebird.VARCHAR,
[1], {}, 'VARCHAR(1)'), (firebird.VARCHAR, [1],
{'charset': 'OCTETS'},
'VARCHAR(1) CHARACTER SET OCTETS')]
for type_, args, kw, res in columns:
self.assert_compile(type_(*args, **kw), res)
class TypesTest(fixtures.TestBase):
__only_on__ = 'firebird'
@testing.provide_metadata
def test_infinite_float(self):
metadata = self.metadata
t = Table('t', metadata,
Column('data', Float)
)
metadata.create_all()
t.insert().execute(data=float('inf'))
eq_(t.select().execute().fetchall(),
[(float('inf'),)]
)
class MiscTest(fixtures.TestBase):
__only_on__ = 'firebird'
@testing.provide_metadata
def test_strlen(self):
metadata = self.metadata
# On FB the length() function is implemented by an external UDF,
# strlen(). Various SA tests fail because they pass a parameter
# to it, and that does not work (it always results the maximum
# string length the UDF was declared to accept). This test
# checks that at least it works ok in other cases.
t = Table('t1', metadata, Column('id', Integer,
Sequence('t1idseq'), primary_key=True), Column('name'
, String(10)))
metadata.create_all()
t.insert(values=dict(name='dante')).execute()
t.insert(values=dict(name='alighieri')).execute()
select([func.count(t.c.id)], func.length(t.c.name)
== 5).execute().first()[0] == 1
def test_version_parsing(self):
for string, result in [
("WI-V1.5.0.1234 Firebird 1.5", (1, 5, 1234, 'firebird')),
("UI-V6.3.2.18118 Firebird 2.1", (2, 1, 18118, 'firebird')),
("LI-V6.3.3.12981 Firebird 2.0", (2, 0, 12981, 'firebird')),
("WI-V8.1.1.333", (8, 1, 1, 'interbase')),
("WI-V8.1.1.333 Firebird 1.5", (1, 5, 333, 'firebird')),
]:
eq_(
testing.db.dialect._parse_version_info(string),
result
)
@testing.provide_metadata
def test_rowcount_flag(self):
metadata = self.metadata
engine = engines.testing_engine(options={'enable_rowcount'
: True})
assert engine.dialect.supports_sane_rowcount
metadata.bind = engine
t = Table('t1', metadata, Column('data', String(10)))
metadata.create_all()
r = t.insert().execute({'data': 'd1'}, {'data': 'd2'}, {'data'
: 'd3'})
r = t.update().where(t.c.data == 'd2').values(data='d3'
).execute()
eq_(r.rowcount, 1)
r = t.delete().where(t.c.data == 'd3').execute()
eq_(r.rowcount, 2)
r = \
t.delete().execution_options(enable_rowcount=False).execute()
eq_(r.rowcount, -1)
engine = engines.testing_engine(options={'enable_rowcount'
: False})
assert not engine.dialect.supports_sane_rowcount
metadata.bind = engine
r = t.insert().execute({'data': 'd1'}, {'data': 'd2'}, {'data'
: 'd3'})
r = t.update().where(t.c.data == 'd2').values(data='d3'
).execute()
eq_(r.rowcount, -1)
r = t.delete().where(t.c.data == 'd3').execute()
eq_(r.rowcount, -1)
r = t.delete().execution_options(enable_rowcount=True).execute()
eq_(r.rowcount, 1)
def test_percents_in_text(self):
for expr, result in (text("select '%' from rdb$database"), '%'
), (text("select '%%' from rdb$database"),
'%%'), \
(text("select '%%%' from rdb$database"), '%%%'), \
(text("select 'hello % world' from rdb$database"),
'hello % world'):
eq_(testing.db.scalar(expr), result)
| |
# -*- coding: utf-8 -*-
import importlib
import re
import six
from fixtures_mongoengine.exceptions import FixturesMongoengineException
_fixture_registry = {}
def get_fixture_class(name):
doc = _fixture_registry.get(name, None)
if not doc:
# Possible old style name
single_end = name.split('.')[-1]
compound_end = '.%s' % single_end
possible_match = [k for k in _fixture_registry.keys()
if k.endswith(compound_end) or k == single_end]
if len(possible_match) == 1:
doc = _fixture_registry.get(possible_match.pop(), None)
if not doc:
raise FixturesMongoengineException('"{}" has not been registered in the fixture registry.'.format(name))
return doc
def getattr_recursive(o, path):
"""
:type o:
:type path: list
:rtype:
"""
name = path.pop(0)
if len(path) == 0:
return getattr(o, name)
else:
return getattr_recursive(getattr(o, name), path)
class MetaFixture(type):
def __new__(mcs, name, bases, attrs):
new_class = super(MetaFixture, mcs).__new__(mcs, name, bases, attrs)
_fixture_registry[new_class.__name__] = new_class
return new_class
class BaseFixture(object):
depends = {}
data_file = None
data = None
attr_name = 'fixture_data'
validate = True
pk_field_name = None
def __init__(self):
super(BaseFixture, self).__init__()
self._loaded = False
self._data = {}
self._depend_fixtures = {}
self.depend_re = re.compile('\{([^\}]+)\}')
def __getitem__(self, item):
if isinstance(item, six.string_types) and item in self._data:
return self._data[item]
raise KeyError('There is no key "{}" in fixture {}'.format(item, self.__class__.__name__))
def __contains__(self, item):
return item in self._data
@property
def loaded(self):
return self._loaded
def init_depended_fixtures(self, fixtures):
"""
:param fixtures: dict, {fixture_class: fixture}
"""
for name, fixture_class in six.iteritems(self.depends):
if fixture_class not in fixtures:
raise FixturesMongoengineException('Depended fixture "{}: {}" not found in fixtures dict.'
.format(name, fixture_class.__name__))
self._depend_fixtures[name] = fixtures[fixture_class]
def before_load(self):
pass
def load(self):
pass
def after_load(self):
pass
def before_unload(self):
pass
def unload(self):
pass
def after_unload(self):
pass
def _validate_depend_fixtures(self):
if not self.depends:
return
for name, fixture_class in six.iteritems(self.depends):
if name not in self._depend_fixtures:
raise FixturesMongoengineException('Depended fixture "{}" wasn\'t created.'.format(name))
fixture = self._depend_fixtures[name]
if not fixture.loaded:
raise FixturesMongoengineException('Depended fixture "{}" wasn\'t loaded.'.format(name))
def _get_raw_data(self):
"""
:rtype: dict
"""
if self.data is not None:
return self.data
else:
data_module = importlib.import_module(self.data_file)
return getattr(data_module, self.attr_name)
def _resolve_depends(self, value):
if isinstance(value, six.string_types):
return self._get_resolved_value(value)
elif isinstance(value, dict):
return self._get_resolved_dict(value)
elif isinstance(value, list):
return self._get_resolved_list(value)
return value
def _get_resolved_dict(self, value):
copy = dict(value)
for key, value in six.iteritems(copy):
copy[key] = self._resolve_depends(value)
return copy
def _get_resolved_list(self, value):
copy = []
for item in value:
copy.append(self._resolve_depends(item))
return copy
def _get_resolved_value(self, value):
match = self.depend_re.match(value)
if not match:
return value
ref = match.group(1)
parts = ref.split('.')
if len(parts) < 2:
msg = 'Wrong depend reference "{}" in fixture "{}"'.format(ref, self.__class__.__name__)
raise FixturesMongoengineException(msg)
ref_fixture = parts.pop(0)
if ref_fixture not in self._depend_fixtures:
msg = 'Fixture "{}" not fount in depended fixtures.'.format(ref_fixture)
raise FixturesMongoengineException(msg)
fixture = self._depend_fixtures[ref_fixture]
ref_model = parts.pop(0)
if ref_model not in fixture:
msg = 'Model "{}" not fount in depended fixture "{}".'.format(ref_model, ref_fixture)
raise FixturesMongoengineException(msg)
if len(parts) == 0:
return getattr(fixture[ref_model], fixture.pk_field_name)
else:
return getattr_recursive(fixture[ref_model], parts)
class Fixture(six.with_metaclass(MetaFixture, BaseFixture)):
document_class = None
pk_field_name = 'pk'
def __init__(self):
super(Fixture, self).__init__()
if self.document_class is None:
raise FixturesMongoengineException('"document_class" must be set.')
def load(self):
self._validate_depend_fixtures()
raw_data = self._get_raw_data()
for key, row in six.iteritems(raw_data):
if self.depends:
row = self._resolve_depends(row)
model = self.document_class(**row)
model.save(self.validate)
self._data[key] = model
self._loaded = True
def unload(self):
self._data = {}
self.document_class.objects().delete()
| |
# Quantity comparisons for Nugacious (https://nugacious.mpetroff.net/)
# (c) 2014-2016, Matthew Petroff (https://mpetroff.net/)
import pickle
import gzip
import re
import random
import string
import fractions
import os
import itertools
import bisect
import numpy as np
import pint
import simpleeval
class UnsupportedDimensionsError(Exception):
'''
Raised when comparator doesn't support the entered dimensions.
'''
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
class NoDimensionsError(Exception):
'''
Raised when the query is dimensionless.
'''
def __init__(self):
pass
class match(object):
'''
A quantity match returned by the comparator.
'''
def __init__(self, label, wiki, ratio, mag, unit, dimension, category):
'''
Sets label, Wikipedia URL, ratio, magnitude, unit, dimension,
and category.
'''
self.label = label
self.wiki = wiki
self.ratio = ratio
self.magnitude = mag
self.unit = unit
self.dimension = dimension
self.category = category
def __str__(self):
'''
Returns comma seperated string of match's values
'''
return '%s, %s, %f, %s, %s' % (self.label, self.wiki, self.ratio,
self.magnitude, self.unit, self.dimension, self.category)
def natural_language(self):
'''
Returns natural language string describing match.
'''
properties = {
'area.area': 'the area of %s',
'area.floorArea': 'the floor area of %s',
'area.surfaceArea': 'the surface area of %s',
'area.areaLand': 'the land area of %s',
'area.watershed': 'the watershed area of %s',
'area.areaMetro': 'the metro area of %s',
'area.areaOfCatchment': 'the area of catchment of %s',
'area.areaTotal': 'the total area of %s',
'area.areaWater': 'the water area of %s',
'area.areaUrban': 'the urban area of %s',
'area.campusSize': 'the campus size of %s',
'area.areaRural': 'the rural area of %s',
'density.density': 'the density of %s',
'frequency.frequency': 'the frequency of %s',
'length.maximumBoatLength': 'the maximum boat length of the %s',
'length.waistSize': 'the waist size of %s',
'length.wheelbase': 'the wheelbase of the %s',
'length.course': 'the course length of the %s',
'length.mouthElevation': 'the mouth elevation of the %s',
'length.hipSize': 'the hip size of %s',
'length.meanRadius': 'the mean radius of %s',
'length.originalMaximumBoatBeam':
'the original maximum boat beam of the %s',
'length.height': 'the height of %s',
'length.originalMaximumBoatLength':
'the original maximum boat length of the %s',
'length.periapsis': 'the periapsis of %s',
'length.distanceTraveled': 'the distance traveled by the %s',
'length.bustSize': 'the bust size of %s',
'length.shipDraft': 'the ship draft of the %s',
'length.pistonStroke': 'the area of the %s',
'length.trackLength': 'the area of the %s',
'length.capitalElevation': 'the capital elevation of %s',
'length.prominence': 'the topographic prominence of %s',
'length.minimumElevation': 'the minimum elevation of %s',
'length.shoreLength': 'the shore length of %s',
'length.elevation': 'the elevation of %s',
'length.runwayLength': 'the length of a runway at %s',
'length.sourceConfluenceElevation':
'the source confluence elevation of the %s',
'length.maximumElevation': 'the maximum elevation of %s',
'length.cylinderBore': 'the cylinder bore of %s',
'length.railGauge': 'the rail gauge of the %s',
'length.diameter': 'the diameter of %s',
'length.maximumBoatBeam': 'the maximum boat beam of the %s',
'length.depth': 'the depth of %s',
'length.length': 'the length of %s',
'length.shipBeam': 'the ship beam of the %s',
'length.wavelength': 'the wavelength of %s',
'length.sourceElevation': 'the source elevation of the %s',
'length.lineLength': 'the length of the %s',
'length.apoapsis': 'the apoapsis of %s',
'length.width': 'the width of %s',
'length.distance': 'the distance of the %s',
'length.heightAboveAverageTerrain':
'the height above average terrain of %s\'s transmitter',
'length.mainspan': 'the mainspan of the %s',
'length.originalMaximumBoatLength':
'the original maximum boat length of the %s',
'length.maximumBoatLength': 'the maximum boat length of the %s',
'mass.mass': 'the mass of %s',
'mass.loadLimit': 'the load limit of the %s',
'mass.weight': 'the weight of %s',
'mass.shipDisplacement': 'the displacement of the %s',
'mass.lowerEarthOrbitPayload':
'the low earth orbit payload capacity of the %s rocket',
'power.effectiveRadiatedPower':
'the effective radiated power of %s\'s transmitter',
'power.powerOutput': 'the power output of the %s',
'power.installedCapacity': 'the installed capacity of the %s',
'voltage.voltageOfElectrification':
'the voltage of electrification of the %s',
'speed.topSpeed': 'the top speed of the %s',
'speed.averageSpeed': 'the average speed of %s',
'speed.escapeVelocity': 'the escape velocity of %s',
'temperature.minimumTemperature': 'the minimum temperature of %s',
'temperature.maximumTemperature': 'the maximum temperature of %s',
'temperature.temperature': 'the temperature of %s',
'temperature.meanTemperature': 'the mean temperature of %s',
'time.missionDuration': 'the mission duration of the %s',
'time.orbitalPeriod': 'the orbital period of %s',
'time.rotationPeriod': 'the rotation period of %s',
'time.timeInSpace': 'the time spent in space by %s',
'time.runtime': 'the runtime of %s',
'torque.torqueOutput': 'the torque output of the %s',
'volume.volume': 'the volume of %s',
'volume.fuelCapacity': 'the fuel capacity of the %s',
'volume.displacement': 'the displacement of the %s'
}
ratio = '{:.3g}'.format(self.ratio)
if re.search('[e]', ratio):
a = ratio[:ratio.index('e') + 2] # Strip leading zeros on exponent
b = ratio[ratio.index('e') + 2:]
b = re.sub('^(0*)', '', b)
ratio = '<span>' + a + b + '</sup></span>'
if self.ratio < 1:
frac = str(fractions.Fraction(self.ratio).limit_denominator(100))
if frac != '0':
f = frac.split('/')
if len(f) == 2:
ratio += ' (<sup>' + f[0] + '</sup>⁄<sub>' \
+ f[1] + '</sub>)'
label = '<a href="' + '//en.wikipedia.org/wiki/' + self.wiki + \
'">' + self.label + '</a>'
properties[self.dimension + '.' + self.category] = \
properties[self.dimension + '.' \
+ self.category].replace("'", '’')
if re.search('[e]', ratio):
ratio = '(' + ratio + ')'
ratio = re.sub('(e\+)', ' × 10<sup>', ratio)
ratio = re.sub('(e\-)', ' × 10<sup>-', ratio)
prefix = '≈ %s × '
mag = '{:.3g}'.format(self.magnitude)
if re.search('[e]', mag):
a = mag[:mag.index('e') + 2]
b = mag[mag.index('e') + 2:]
b = re.sub('^(0*)', '', b)
mag = '<span>' + a + b + '</sup></span>'
mag = re.sub('(e\+)', ' × 10<sup>', mag)
mag = re.sub('(e\-)', ' × 10<sup>-', mag)
suffix = ' <small><span class="text-muted">(%s %s)</span></small>'
units = ''
for k in self.unit.keys():
units += '<span>' + k
if self.unit[k] != 1:
units += '<sup>' + str(int(self.unit[k])) + '</sup>'
units += '</span> '
units = units.strip().replace('_', ' ')
return (prefix + properties[self.dimension + '.' + self.category]
+ suffix) % (ratio, label, mag, units)
class comparator(object):
'''
Compares quantities.
'''
def __init__(self):
'''
Initalizes comparator with pickled data and initalizes unit registry.
'''
pkl_file = gzip.open(os.path.join(os.path.dirname(
os.path.abspath(__file__)), 'data.pkl.gz'), 'rb')
self.data = pickle.load(pkl_file)
pkl_file.close()
self.ureg = pint.UnitRegistry()
def compare(self, input_string, close_count=5, random_count=5):
'''
Parses quantity described by input string and compares it to other
quantities. The number of close matches (closest 10% of database) and
the number of random matches can be specified.
'''
# Since Pint uses eval, whitelist certain characters for security
alphabet = string.ascii_letters + string.digits + re.escape(' */^+-.')
escaped_string = re.sub('[^' + alphabet + ']', '', input_string)
# Parse number words
nums = {
'zero': 0,
'one': 1,
'two': 2,
'three': 3,
'four': 4,
'five': 5,
'six': 6,
'seven': 7,
'eight': 8,
'nine': 9,
'ten': 10,
'eleven': 11,
'twelve': 12,
'thirteen': 13,
'fourteen': 14,
'fifteen': 15,
'sixteen': 16,
'seventeen': 17,
'eighteen': 18,
'nineteen': 19,
'twenty': 20,
'thirty': 30,
'forty': 40,
'fifty': 50,
'sixty': 60,
'seventy': 70,
'eighty': 80,
'ninety': 90,
'hundred': 1e2,
'thousand': 1e3,
'million': 1e6,
'billion': 1e9,
'trillion': 1e12,
'quadrillion': 1e15,
'quintillion': 1e18,
'sextillion': 1e21,
'septillion': 1e24,
'octillion': 1e27,
'nonillion': 1e30,
'decillion': 1e33,
'dozen': 12,
'gross': 144
}
# Replace number words with numerals
s = escaped_string.replace('-', ' ')
s = s.replace('stone', 'rock') # Escape stone unit
for n in nums:
r = re.compile(re.escape(n), re.IGNORECASE)
s = r.sub('*' + '{:f}'.format(nums[n]), s)
s = s.replace('rock', 'stone') # Unescape stone unit
if (s[0] == '*'):
s = '1' + s
# Allow caret power notation to work
s = s.replace('^', '**')
# Split at first remaining letter
i = re.search('[^a-zA-Z]*', s).end()
# Parse numerical expression
if i != 0:
s = s.replace(s[:i], str(simpleeval.simple_eval(s[:i])) + ' ')
# Parse escaped input expression
try:
q = self.ureg.parse_expression(s)
except pint.UndefinedUnitError:
split = s.split(' ')
length = len(split) - 1
for i in itertools.product("_ ", repeat=length):
joined = split[0]
for j in range(length):
joined += i[j] + split[j+1]
try:
q = self.ureg.parse_expression(joined)
break
except pint.UndefinedUnitError:
pass
if 'q' not in vars():
self.ureg.parse_expression(s)
if not hasattr(q, 'magnitude'):
raise NoDimensionsError()
# Input interpretation
mag = '{:.3g}'.format(q.magnitude)
if re.search('[e]', mag):
a = mag[:mag.index('e') + 2]
b = mag[mag.index('e') + 2:]
b = re.sub('^(0*)', '', b)
mag = '<span>' + a + b + '</sup></span>'
mag = re.sub('(e\+)', ' × 10<sup>', mag)
mag = re.sub('(e\-)', ' × 10<sup>-', mag)
units = ''
for k in q.units.keys():
units += '<span>' + k
if q.units[k] != 1:
units += '<sup>' + str(int(q.units[k])) + '</sup>'
units += '</span> '
units = units.strip().replace('_', ' ')
input_interp = '%s %s' % (mag, units)
base_units = {
'area': (self.ureg.meter ** 2),
'density': (self.ureg.kilogram / self.ureg.meter ** 3),
'frequency': (self.ureg.hertz),
'length': (self.ureg.meter),
'mass': (self.ureg.gram),
'power': (self.ureg.watt),
'voltage': (self.ureg.volt),
'speed': (self.ureg.kph),
'temperature': (self.ureg.kelvin),
'time': (self.ureg.second),
'torque': (self.ureg.newton * self.ureg.meter),
'volume': (self.ureg.meter ** 3)
}
dimension = None
for u in base_units:
if hasattr(q, 'dimensionality') \
and base_units[u].dimensionality == q.dimensionality:
qb = q.to(base_units[u].units)
dimension = u
break
if not dimension:
raise UnsupportedDimensionsError(input_interp)
# Either closest or 2nd closest value (good enough and very fast)
index = np.searchsorted(self.data[dimension][2], qb.magnitude)
index = min(index, len(self.data[dimension][2]) - 1)
index = max(index, 0)
closest_match = match(self.data[dimension][0][index],
self.data[dimension][1][index],
qb.magnitude / self.data[dimension][2][index],
(self.data[dimension][2][index]
* base_units[u]).to(q.units).magnitude,
q.units, dimension, self.data[dimension][3][index])
# Close comparisons
close_matches = []
prev_close_matches = []
length = len(self.data[dimension][2])
dist = min(250, int(0.05 * length))
indicies = (index + max(-dist, 0), index + min(dist, length))
popularity = self.data[dimension][4][indicies[0]:indicies[1]]
pop_dist = list(itertools.accumulate(popularity))
for i in range(close_count):
while True:
if i < close_count - 1:
ri = random.random() * pop_dist[-1]
ri = indicies[0] + bisect.bisect(pop_dist, ri)
else:
ri = random.randint(*indicies)
if ri not in prev_close_matches:
break
prev_close_matches.append(ri)
close_matches.append(match(self.data[dimension][0][ri],
self.data[dimension][1][ri],
qb.magnitude / self.data[dimension][2][ri],
(self.data[dimension][2][ri]
* base_units[u]).to(q.units).magnitude,
q.units, dimension, self.data[dimension][3][ri]))
# Random comparisons
random_matches = []
prev_random_matches = []
indicies = (0, len(self.data[dimension][2]) - 1)
popularity = self.data[dimension][4]
pop_dist = list(itertools.accumulate(popularity))
for i in range(random_count):
while True:
if i < close_count - 1:
ri = random.random() * pop_dist[-1]
ri = indicies[0] + bisect.bisect(pop_dist, ri)
else:
ri = random.randint(*indicies)
if ri not in prev_random_matches:
break
random_matches.append(match(self.data[dimension][0][ri],
self.data[dimension][1][ri],
qb.magnitude / self.data[dimension][2][ri],
(self.data[dimension][2][ri]
* base_units[u]).to(q.units).magnitude,
q.units, dimension, self.data[dimension][3][ri]))
return input_interp, closest_match, close_matches, random_matches
def statistics(self):
'''
Returns size of quantity database.
'''
quantity_count = 0
for t in self.data:
quantity_count += len(self.data[t][2])
return quantity_count
| |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from datetime import timedelta, time
from airflow import DAG, configuration, settings
from airflow import exceptions
from airflow.exceptions import AirflowSensorTimeout
from airflow.models import TaskInstance, DagBag
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.sensors.external_task_sensor import ExternalTaskSensor
from airflow.sensors.time_sensor import TimeSensor
from airflow.utils.state import State
from airflow.utils.timezone import datetime
configuration.load_test_config()
DEFAULT_DATE = datetime(2015, 1, 1)
TEST_DAG_ID = 'unit_test_dag'
TEST_TASK_ID = 'time_sensor_check'
DEV_NULL = '/dev/null'
class ExternalTaskSensorTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
self.dagbag = DagBag(
dag_folder=DEV_NULL,
include_examples=True
)
self.args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
def test_time_sensor(self):
t = TimeSensor(
task_id=TEST_TASK_ID,
target_time=time(0),
dag=self.dag
)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor(self):
self.test_time_sensor()
t = ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
dag=self.dag
)
t.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_ti_state=True
)
def test_external_dag_sensor(self):
other_dag = DAG(
'other_dag',
default_args=self.args,
end_date=DEFAULT_DATE,
schedule_interval='@once')
other_dag.create_dagrun(
run_id='test',
start_date=DEFAULT_DATE,
execution_date=DEFAULT_DATE,
state=State.SUCCESS)
t = ExternalTaskSensor(
task_id='test_external_dag_sensor_check',
external_dag_id='other_dag',
external_task_id=None,
dag=self.dag
)
t.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_ti_state=True
)
def test_templated_sensor(self):
dag = DAG(TEST_DAG_ID, self.args)
with dag:
sensor = ExternalTaskSensor(
task_id='templated_task',
external_dag_id='dag_{{ ds }}',
external_task_id='task_{{ ds }}',
start_date=DEFAULT_DATE
)
instance = TaskInstance(sensor, DEFAULT_DATE)
instance.render_templates()
self.assertEqual(sensor.external_dag_id,
"dag_{}".format(DEFAULT_DATE.date()))
self.assertEqual(sensor.external_task_id,
"task_{}".format(DEFAULT_DATE.date()))
def test_external_task_sensor_fn_multiple_execution_dates(self):
bash_command_code = """
{% set s=execution_date.time().second %}
echo "second is {{ s }}"
if [[ $(( {{ s }} % 60 )) == 1 ]]
then
exit 1
fi
exit 0
"""
dag_external_id = TEST_DAG_ID + '_external'
dag_external = DAG(
dag_external_id,
default_args=self.args,
schedule_interval=timedelta(seconds=1))
task_external_with_failure = BashOperator(
task_id="task_external_with_failure",
bash_command=bash_command_code,
retries=0,
dag=dag_external)
task_external_without_failure = DummyOperator(
task_id="task_external_without_failure",
retries=0,
dag=dag_external)
task_external_without_failure.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + timedelta(seconds=1),
ignore_ti_state=True)
session = settings.Session()
TI = TaskInstance
try:
task_external_with_failure.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + timedelta(seconds=1),
ignore_ti_state=True)
# The test_with_failure task is excepted to fail
# once per minute (the run on the first second of
# each minute).
except Exception as e:
failed_tis = session.query(TI).filter(
TI.dag_id == dag_external_id,
TI.state == State.FAILED,
TI.execution_date == DEFAULT_DATE + timedelta(seconds=1)).all()
if len(failed_tis) == 1 and \
failed_tis[0].task_id == 'task_external_with_failure':
pass
else:
raise e
dag_id = TEST_DAG_ID
dag = DAG(
dag_id,
default_args=self.args,
schedule_interval=timedelta(minutes=1))
task_without_failure = ExternalTaskSensor(
task_id='task_without_failure',
external_dag_id=dag_external_id,
external_task_id='task_external_without_failure',
execution_date_fn=lambda dt: [dt + timedelta(seconds=i)
for i in range(2)],
allowed_states=['success'],
retries=0,
timeout=1,
poke_interval=1,
dag=dag)
task_with_failure = ExternalTaskSensor(
task_id='task_with_failure',
external_dag_id=dag_external_id,
external_task_id='task_external_with_failure',
execution_date_fn=lambda dt: [dt + timedelta(seconds=i)
for i in range(2)],
allowed_states=['success'],
retries=0,
timeout=1,
poke_interval=1,
dag=dag)
task_without_failure.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_ti_state=True)
with self.assertRaises(AirflowSensorTimeout):
task_with_failure.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_external_task_sensor_delta(self):
self.test_time_sensor()
t = ExternalTaskSensor(
task_id='test_external_task_sensor_check_delta',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_delta=timedelta(0),
allowed_states=['success'],
dag=self.dag
)
t.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_ti_state=True
)
def test_external_task_sensor_fn(self):
self.test_time_sensor()
# check that the execution_fn works
t = ExternalTaskSensor(
task_id='test_external_task_sensor_check_delta',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_date_fn=lambda dt: dt + timedelta(0),
allowed_states=['success'],
dag=self.dag
)
t.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_ti_state=True
)
# double check that the execution is being called by failing the test
t2 = ExternalTaskSensor(
task_id='test_external_task_sensor_check_delta',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_date_fn=lambda dt: dt + timedelta(days=1),
allowed_states=['success'],
timeout=1,
poke_interval=1,
dag=self.dag
)
with self.assertRaises(exceptions.AirflowSensorTimeout):
t2.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_ti_state=True
)
def test_external_task_sensor_error_delta_and_fn(self):
self.test_time_sensor()
# Test that providing execution_delta and a function raises an error
with self.assertRaises(ValueError):
ExternalTaskSensor(
task_id='test_external_task_sensor_check_delta',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_delta=timedelta(0),
execution_date_fn=lambda dt: dt,
allowed_states=['success'],
dag=self.dag
)
| |
# Copyright (c) 2013 Hortonworks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from oslo.config import cfg
import six
from sahara import exceptions as e
from sahara.plugins.general import exceptions as ex
from sahara.plugins.general import utils
from sahara.swift import swift_helper as h
from sahara.topology import topology_helper as th
CONF = cfg.CONF
TOPOLOGY_CONFIG = {
"topology.node.switch.mapping.impl":
"org.apache.hadoop.net.ScriptBasedMapping",
"topology.script.file.name":
"/etc/hadoop/conf/topology.sh"
}
def create_service(name):
for cls in Service.__subclasses__():
if cls.get_service_id() == name:
return cls()
# no subclass found, return service base class
return Service(name)
class Service(object):
def __init__(self, name):
self.name = name
self.configurations = set(['global', 'core-site'])
self.components = []
self.users = []
self.deployed = False
def add_component(self, component):
self.components.append(component)
def add_user(self, user):
self.users.append(user)
def validate(self, cluster_spec, cluster):
pass
def finalize_configuration(self, cluster_spec):
pass
def register_user_input_handlers(self, ui_handlers):
pass
def register_service_urls(self, cluster_spec, url_info):
return url_info
def pre_service_start(self, cluster_spec, ambari_info, started_services):
pass
def finalize_ng_components(self, cluster_spec):
pass
def is_user_template_component(self, component):
return True
def is_mandatory(self):
return False
def _replace_config_token(self, cluster_spec, token, value, props):
for config_name, props in six.iteritems(props):
config = cluster_spec.configurations[config_name]
for prop in props:
config[prop] = config[prop].replace(token, value)
def _update_config_values(self, configurations, value, props):
for absolute_prop_name in props:
tokens = absolute_prop_name.split('/')
config_name = tokens[0]
prop_name = tokens[1]
config = configurations[config_name]
config[prop_name] = value
def _get_common_paths(self, node_groups):
if len(node_groups) == 1:
paths = node_groups[0].storage_paths()
else:
sets = [set(ng.storage_paths()) for ng in node_groups]
paths = list(set.intersection(*sets))
if len(paths) > 1 and '/mnt' in paths:
paths.remove('/mnt')
return paths
def _generate_storage_path(self, storage_paths, path):
return ",".join([p + path for p in storage_paths])
def _get_port_from_cluster_spec(self, cluster_spec, service, prop_name):
address = cluster_spec.configurations[service][prop_name]
return utils.get_port_from_address(address)
class HdfsService(Service):
def __init__(self):
super(HdfsService, self).__init__(HdfsService.get_service_id())
self.configurations.add('hdfs-site')
@classmethod
def get_service_id(cls):
return 'HDFS'
def validate(self, cluster_spec, cluster):
# check for a single NAMENODE
count = cluster_spec.get_deployed_node_group_count('NAMENODE')
if count != 1:
raise ex.InvalidComponentCountException('NAMENODE', 1, count)
def finalize_configuration(self, cluster_spec):
nn_hosts = cluster_spec.determine_component_hosts('NAMENODE')
if nn_hosts:
props = {'core-site': ['fs.default.name'],
'hdfs-site': ['dfs.http.address', 'dfs.https.address']}
self._replace_config_token(
cluster_spec, '%NN_HOST%', nn_hosts.pop().fqdn(), props)
snn_hosts = cluster_spec.determine_component_hosts(
'SECONDARY_NAMENODE')
if snn_hosts:
props = {'hdfs-site': ['dfs.secondary.http.address']}
self._replace_config_token(
cluster_spec, '%SNN_HOST%', snn_hosts.pop().fqdn(), props)
# add swift properties to configuration
core_site_config = cluster_spec.configurations['core-site']
for prop in self._get_swift_properties():
core_site_config[prop['name']] = prop['value']
# add topology properties to configuration, if enabled
if CONF.enable_data_locality:
for prop in th.vm_awareness_core_config():
core_site_config[prop['name']] = prop['value']
core_site_config.update(TOPOLOGY_CONFIG)
# process storage paths to accommodate ephemeral or cinder storage
nn_ng = cluster_spec.get_node_groups_containing_component(
'NAMENODE')[0]
dn_node_groups = cluster_spec.get_node_groups_containing_component(
'DATANODE')
common_paths = []
if dn_node_groups:
common_paths = self._get_common_paths(dn_node_groups)
hdfs_site_config = cluster_spec.configurations['hdfs-site']
global_config = cluster_spec.configurations['global']
hdfs_site_config['dfs.name.dir'] = self._generate_storage_path(
nn_ng.storage_paths(), '/hadoop/hdfs/namenode')
global_config['dfs_name_dir'] = self._generate_storage_path(
nn_ng.storage_paths(), '/hadoop/hdfs/namenode')
if common_paths:
hdfs_site_config['dfs.data.dir'] = self._generate_storage_path(
common_paths, '/hadoop/hdfs/data')
global_config['dfs_data_dir'] = self._generate_storage_path(
common_paths, '/hadoop/hdfs/data')
def register_service_urls(self, cluster_spec, url_info):
namenode_ip = cluster_spec.determine_component_hosts(
'NAMENODE').pop().management_ip
ui_port = self._get_port_from_cluster_spec(cluster_spec, 'hdfs-site',
'dfs.http.address')
nn_port = self._get_port_from_cluster_spec(cluster_spec, 'core-site',
'fs.default.name')
url_info['HDFS'] = {
'Web UI': 'http://%s:%s' % (namenode_ip, ui_port),
'NameNode': 'hdfs://%s:%s' % (namenode_ip, nn_port)
}
return url_info
def is_mandatory(self):
return True
def _get_swift_properties(self):
return h.get_swift_configs()
class MapReduceService(Service):
def __init__(self):
super(MapReduceService, self).__init__(
MapReduceService.get_service_id())
self.configurations.add('mapred-site')
@classmethod
def get_service_id(cls):
return 'MAPREDUCE'
def validate(self, cluster_spec, cluster):
count = cluster_spec.get_deployed_node_group_count('JOBTRACKER')
if count != 1:
raise ex.InvalidComponentCountException('JOBTRACKER', 1, count)
count = cluster_spec.get_deployed_node_group_count('TASKTRACKER')
if not count:
raise ex.InvalidComponentCountException(
'TASKTRACKER', '> 0', count)
def finalize_configuration(self, cluster_spec):
jt_hosts = cluster_spec.determine_component_hosts('JOBTRACKER')
if jt_hosts:
props = {'mapred-site': ['mapred.job.tracker',
'mapred.job.tracker.http.address',
'mapreduce.history.server.http.address']}
self._replace_config_token(
cluster_spec, '%JT_HOST%', jt_hosts.pop().fqdn(), props)
# HISTORYSERVER component now a part of MapReduce 1 in Ambari 1.6.0
hs_hosts = cluster_spec.determine_component_hosts('HISTORYSERVER')
if hs_hosts:
props = {'mapred-site': ['mapreduce.jobhistory.webapp.address']}
self._replace_config_token(
cluster_spec, '%HS_HOST%', hs_hosts.pop().fqdn(), props)
# data locality/rack awareness prop processing
mapred_site_config = cluster_spec.configurations['mapred-site']
if CONF.enable_data_locality:
for prop in th.vm_awareness_mapred_config():
mapred_site_config[prop['name']] = prop['value']
# process storage paths to accommodate ephemeral or cinder storage
# NOTE: mapred.system.dir is an HDFS namespace path (not a filesystem
# path) so the default path should suffice
tt_node_groups = cluster_spec.get_node_groups_containing_component(
'TASKTRACKER')
if tt_node_groups:
global_config = cluster_spec.configurations['global']
common_paths = self._get_common_paths(tt_node_groups)
mapred_site_config['mapred.local.dir'] = (
self._generate_storage_path(common_paths, '/hadoop/mapred'))
global_config['mapred_local_dir'] = self._generate_storage_path(
common_paths, '/hadoop/mapred')
def finalize_ng_components(self, cluster_spec):
# add HISTORYSERVER, since HDP 1.3.2 stack was
# modified in Ambari 1.5.1/1.6.0 to include this component
# in the MAPREDUCE service
ambari_server_ngs = (
cluster_spec.get_node_groups_containing_component('JOBTRACKER'))
for ng in ambari_server_ngs:
if 'HISTORYSERVER' not in ng.components:
ng.components.append('HISTORYSERVER')
def register_service_urls(self, cluster_spec, url_info):
jobtracker_ip = cluster_spec.determine_component_hosts(
'JOBTRACKER').pop().management_ip
ui_port = self._get_port_from_cluster_spec(
cluster_spec, 'mapred-site', 'mapreduce.jobhistory.webapp.address')
jt_port = self._get_port_from_cluster_spec(
cluster_spec, 'mapred-site', 'mapred.job.tracker')
url_info['MapReduce'] = {
'Web UI': 'http://%s:%s' % (jobtracker_ip, ui_port),
'JobTracker': '%s:%s' % (jobtracker_ip, jt_port)
}
return url_info
def is_mandatory(self):
return True
class HiveService(Service):
def __init__(self):
super(HiveService, self).__init__(HiveService.get_service_id())
self.configurations.add('hive-site')
@classmethod
def get_service_id(cls):
return 'HIVE'
def validate(self, cluster_spec, cluster):
count = cluster_spec.get_deployed_node_group_count('HIVE_SERVER')
if count != 1:
raise ex.InvalidComponentCountException('HIVE_SERVER', 1, count)
def finalize_configuration(self, cluster_spec):
hive_servers = cluster_spec.determine_component_hosts('HIVE_SERVER')
if hive_servers:
props = {'global': ['hive_hostname'],
'core-site': ['hadoop.proxyuser.hive.hosts'],
'hive-site': ['javax.jdo.option.ConnectionURL']}
self._replace_config_token(
cluster_spec, '%HIVE_HOST%', hive_servers.pop().fqdn(), props)
hive_ms = cluster_spec.determine_component_hosts('HIVE_METASTORE')
if hive_ms:
self._replace_config_token(
cluster_spec, '%HIVE_METASTORE_HOST%', hive_ms.pop().fqdn(),
{'hive-site': ['hive.metastore.uris']})
hive_mysql = cluster_spec.determine_component_hosts('MYSQL_SERVER')
if hive_mysql:
self._replace_config_token(
cluster_spec, '%HIVE_MYSQL_HOST%', hive_mysql.pop().fqdn(),
{'global': ['hive_jdbc_connection_url']})
def register_user_input_handlers(self, ui_handlers):
ui_handlers['hive-site/javax.jdo.option.ConnectionUserName'] = (
self._handle_user_property_metastore_user)
ui_handlers['hive-site/javax.jdo.option.ConnectionPassword'] = (
self._handle_user_property_metastore_pwd)
def _handle_user_property_metastore_user(self, user_input, configurations):
hive_site_config_map = configurations['hive-site']
hive_site_config_map['javax.jdo.option.ConnectionUserName'] = (
user_input.value)
global_config_map = configurations['global']
global_config_map['hive_metastore_user_name'] = user_input.value
def _handle_user_property_metastore_pwd(self, user_input, configurations):
hive_site_config_map = configurations['hive-site']
hive_site_config_map['javax.jdo.option.ConnectionPassword'] = (
user_input.value)
global_config_map = configurations['global']
global_config_map['hive_metastore_user_passwd'] = user_input.value
def finalize_ng_components(self, cluster_spec):
hive_ng = cluster_spec.get_node_groups_containing_component(
'HIVE_SERVER')[0]
components = hive_ng.components
if not cluster_spec.get_deployed_node_group_count('HIVE_METASTORE'):
components.append('HIVE_METASTORE')
if not cluster_spec.get_deployed_node_group_count('MYSQL_SERVER'):
components.append('MYSQL_SERVER')
if not cluster_spec.get_deployed_node_group_count('ZOOKEEPER_SERVER'):
zk_service = next(service for service in cluster_spec.services
if service.name == 'ZOOKEEPER')
zk_service.deployed = True
components.append('ZOOKEEPER_SERVER')
def pre_service_start(self, cluster_spec, ambari_info, started_services):
# this code is needed because of a bug in Ambari where hdfs dir's
# are only created at NN initial startup. Remove this code when
# the bug is fixed in Ambari.
if 'HDFS' not in started_services:
return
# get any instance
with cluster_spec.servers[0].remote() as r:
r.execute_command('su -c "hadoop fs -mkdir /user/hive" '
'-s /bin/sh hdfs')
r.execute_command('su -c "hadoop fs -chown -R '
'hive:hdfs /user/hive" -s /bin/sh hdfs')
r.execute_command('su -c "hadoop fs -mkdir /apps/hive" '
'-s /bin/sh hdfs')
r.execute_command('su -c "hadoop fs -chmod -R 755 /apps/hive" '
'-s /bin/sh hdfs')
r.execute_command('su -c "hadoop fs -mkdir /apps/hive/warehouse" '
'-s /bin/sh hdfs')
r.execute_command('su -c "hadoop fs -chown -R hive:hdfs '
'/apps/hive/warehouse" -s /bin/sh hdfs')
r.execute_command('su -c "hadoop fs -chmod -R 777 '
'/apps/hive/warehouse" -s /bin/sh hdfs')
class WebHCatService(Service):
def __init__(self):
super(WebHCatService, self).__init__(WebHCatService.get_service_id())
self.configurations.add('webhcat-site')
@classmethod
def get_service_id(cls):
return 'WEBHCAT'
def validate(self, cluster_spec, cluster):
count = cluster_spec.get_deployed_node_group_count('WEBHCAT_SERVER')
if count != 1:
raise ex.InvalidComponentCountException('WEBHCAT_SERVER', 1, count)
def finalize_configuration(self, cluster_spec):
webhcat_servers = cluster_spec.determine_component_hosts(
'WEBHCAT_SERVER')
if webhcat_servers:
self._replace_config_token(
cluster_spec, '%WEBHCAT_HOST%', webhcat_servers.pop().fqdn(),
{'core-site': ['hadoop.proxyuser.hcat.hosts']})
hive_ms_servers = cluster_spec.determine_component_hosts(
'HIVE_METASTORE')
if hive_ms_servers:
self._replace_config_token(
cluster_spec, '%HIVE_METASTORE_HOST%',
hive_ms_servers.pop().fqdn(),
{'webhcat-site': ['templeton.hive.properties']})
zk_servers = cluster_spec.determine_component_hosts('ZOOKEEPER_SERVER')
if zk_servers:
self._replace_config_token(
cluster_spec, '%ZOOKEEPER_HOST%', zk_servers.pop().fqdn(),
{'webhcat-site': ['templeton.zookeeper.hosts']})
def finalize_ng_components(self, cluster_spec):
webhcat_ng = cluster_spec.get_node_groups_containing_component(
'WEBHCAT_SERVER')[0]
components = webhcat_ng.components
if 'HDFS_CLIENT' not in components:
components.append('HDFS_CLIENT')
if 'MAPREDUCE_CLIENT' not in components:
components.append('MAPREDUCE_CLIENT')
if 'ZOOKEEPER_CLIENT' not in components:
# if zk server isn't in cluster, add to ng
if not cluster_spec.get_deployed_node_group_count(
'ZOOKEEPER_SERVER'):
zk_service = next(service for service in cluster_spec.services
if service.name == 'ZOOKEEPER')
zk_service.deployed = True
components.append('ZOOKEEPER_SERVER')
components.append('ZOOKEEPER_CLIENT')
def pre_service_start(self, cluster_spec, ambari_info, started_services):
# this code is needed because of a bug in Ambari where hdfs dir's
# are only created at NN initial startup. Remove this code when
# the bug is fixed in Ambari.
if 'HDFS' not in started_services:
return
# get any instance
with cluster_spec.servers[0].remote() as r:
r.execute_command('su -c "hadoop fs -mkdir /user/hcat" '
'-s /bin/sh hdfs')
r.execute_command('su -c "hadoop fs -chown -R hcat:hdfs '
'/user/hcat" -s /bin/sh hdfs')
r.execute_command('su -c "hadoop fs -chmod -R 755 /user/hcat" '
'-s /bin/sh hdfs')
r.execute_command('su -c "hadoop fs -mkdir /apps/webhcat" '
'-s /bin/sh hdfs')
r.execute_command('su -c "hadoop fs -chown -R hcat:hdfs '
'/apps/webhcat" -s /bin/sh hdfs')
r.execute_command('su -c "hadoop fs -chmod -R 755 /apps/webhcat" '
'-s /bin/sh hdfs')
class HBaseService(Service):
property_map = {
'hbase-site/hbase.tmp.dir': [
'hbase-site/hbase.tmp.dir', 'global/hbase_tmp_dir'],
'hbase-site/hbase.regionserver.global.memstore.upperLimit': [
'hbase-site/hbase.regionserver.global.memstore.upperLimit',
'global/regionserver_memstore_upperlimit'],
'hbase-site/hbase.hstore.blockingStoreFiles': [
'hbase-site/hbase.hstore.blockingStoreFiles',
'global/hstore_blockingstorefiles'],
'hbase-site/hbase.hstore.compactionThreshold': [
'hbase-site/hbase.hstore.compactionThreshold',
'global/hstore_compactionthreshold'],
'hbase-site/hfile.block.cache.size': [
'hbase-site/hfile.block.cache.size',
'global/hfile_blockcache_size'],
'hbase-site/hbase.hregion.max.filesize': [
'hbase-site/hbase.hregion.max.filesize',
'global/hstorefile_maxsize'],
'hbase-site/hbase.regionserver.handler.count': [
'hbase-site/hbase.regionserver.handler.count',
'global/regionserver_handlers'],
'hbase-site/hbase.hregion.majorcompaction': [
'hbase-site/hbase.hregion.majorcompaction',
'global/hregion_majorcompaction'],
'hbase-site/hbase.regionserver.global.memstore.lowerLimit': [
'hbase-site/hbase.regionserver.global.memstore.lowerLimit',
'global/regionserver_memstore_lowerlimit'],
'hbase-site/hbase.hregion.memstore.block.multiplier': [
'hbase-site/hbase.hregion.memstore.block.multiplier',
'global/hregion_blockmultiplier'],
'hbase-site/hbase.hregion.memstore.mslab.enabled': [
'hbase-site/hbase.hregion.memstore.mslab.enabled',
'global/regionserver_memstore_lab'],
'hbase-site/hbase.hregion.memstore.flush.size': [
'hbase-site/hbase.hregion.memstore.flush.size',
'global/hregion_memstoreflushsize'],
'hbase-site/hbase.client.scanner.caching': [
'hbase-site/hbase.client.scanner.caching',
'global/client_scannercaching'],
'hbase-site/zookeeper.session.timeout': [
'hbase-site/zookeeper.session.timeout',
'global/zookeeper_sessiontimeout'],
'hbase-site/hbase.client.keyvalue.maxsize': [
'hbase-site/hbase.client.keyvalue.maxsize',
'global/hfile_max_keyvalue_size'],
'hdfs-site/dfs.support.append': [
'hdfs-site/dfs.support.append',
'hbase-site/dfs.support.append',
'global/hdfs_support_append'],
'hbase-site/dfs.client.read.shortcircuit': [
'hbase-site/dfs.client.read.shortcircuit',
'global/hdfs_enable_shortcircuit_read']
}
def __init__(self):
super(HBaseService, self).__init__(
HBaseService.get_service_id())
self.configurations.add('hbase-site')
@classmethod
def get_service_id(cls):
return 'HBASE'
def validate(self, cluster_spec, cluster):
# check for a single HBASE_SERVER
count = cluster_spec.get_deployed_node_group_count('HBASE_MASTER')
if count != 1:
raise ex.InvalidComponentCountException('HBASE_MASTER', 1, count)
def register_service_urls(self, cluster_spec, url_info):
master_ip = cluster_spec.determine_component_hosts(
'HBASE_MASTER').pop().management_ip
hbase_config = cluster_spec.configurations['hbase-site']
info_port = hbase_config['hbase.master.info.port']
url_info['HBase'] = {
'Web UI': 'http://%s:%s/master-status' % (master_ip, info_port),
'Logs': 'http://%s:%s/logs' % (master_ip, info_port),
'Zookeeper Info': 'http://%s:%s/zk.jsp' % (master_ip, info_port),
'JMX': 'http://%s:%s/jmx' % (master_ip, info_port),
'Debug Dump': 'http://%s:%s/dump' % (master_ip, info_port),
'Thread Stacks': 'http://%s:%s/stacks' % (master_ip, info_port)
}
return url_info
def register_user_input_handlers(self, ui_handlers):
for prop_name in self.property_map:
ui_handlers[prop_name] = (
self._handle_config_property_update)
ui_handlers['hbase-site/hbase.rootdir'] = (
self._handle_user_property_root_dir)
def _handle_config_property_update(self, user_input, configurations):
self._update_config_values(configurations, user_input.value,
self.property_map[user_input.config.name])
def _handle_user_property_root_dir(self, user_input, configurations):
configurations['hbase-site']['hbase.rootdir'] = user_input.value
match = re.search('(^hdfs://)(.*?)(/.*)', user_input.value)
if match:
configurations['global']['hbase_hdfs_root_dir'] = match.group(3)
else:
raise e.InvalidDataException(
"Invalid value for property 'hbase-site/hbase.rootdir' : %s" %
user_input.value)
def finalize_configuration(self, cluster_spec):
nn_servers = cluster_spec.determine_component_hosts('NAMENODE')
if nn_servers:
self._replace_config_token(
cluster_spec, '%NN_HOST%', nn_servers.pop().fqdn(),
{'hbase-site': ['hbase.rootdir']})
zk_servers = cluster_spec.determine_component_hosts('ZOOKEEPER_SERVER')
if zk_servers:
self._replace_config_token(
cluster_spec, '%ZOOKEEPER_HOST%', zk_servers.pop().fqdn(),
{'hbase-site': ['hbase.zookeeper.quorum']})
def finalize_ng_components(self, cluster_spec):
hbase_ng = cluster_spec.get_node_groups_containing_component(
'HBASE_MASTER')[0]
components = hbase_ng.components
if 'HDFS_CLIENT' not in components:
components.append('HDFS_CLIENT')
if not cluster_spec.get_deployed_node_group_count('ZOOKEEPER_SERVER'):
zk_service = next(service for service in cluster_spec.services
if service.name == 'ZOOKEEPER')
zk_service.deployed = True
components.append('ZOOKEEPER_SERVER')
class ZookeeperService(Service):
def __init__(self):
super(ZookeeperService, self).__init__(
ZookeeperService.get_service_id())
@classmethod
def get_service_id(cls):
return 'ZOOKEEPER'
def validate(self, cluster_spec, cluster):
count = cluster_spec.get_deployed_node_group_count('ZOOKEEPER_SERVER')
if count != 1:
raise ex.InvalidComponentCountException(
'ZOOKEEPER_SERVER', 1, count)
class OozieService(Service):
def __init__(self):
super(OozieService, self).__init__(OozieService.get_service_id())
self.configurations.add('oozie-site')
@classmethod
def get_service_id(cls):
return 'OOZIE'
def validate(self, cluster_spec, cluster):
count = cluster_spec.get_deployed_node_group_count('OOZIE_SERVER')
if count != 1:
raise ex.InvalidComponentCountException(
'OOZIE_SERVER', 1, count)
count = cluster_spec.get_deployed_node_group_count('OOZIE_CLIENT')
if not count:
raise ex.InvalidComponentCountException(
'OOZIE_CLIENT', '1+', count)
def finalize_configuration(self, cluster_spec):
oozie_servers = cluster_spec.determine_component_hosts('OOZIE_SERVER')
if oozie_servers:
oozie_server = oozie_servers.pop()
name_list = [oozie_server.fqdn(), oozie_server.internal_ip,
oozie_server.management_ip]
self._replace_config_token(
cluster_spec, '%OOZIE_HOST%', oozie_server.fqdn(),
{'global': ['oozie_hostname'],
'oozie-site': ['oozie.base.url']})
self._replace_config_token(
cluster_spec, '%OOZIE_HOST%', ",".join(name_list),
{'core-site': ['hadoop.proxyuser.oozie.hosts']})
def finalize_ng_components(self, cluster_spec):
oozie_ng = cluster_spec.get_node_groups_containing_component(
'OOZIE_SERVER')[0]
components = oozie_ng.components
if 'HDFS_CLIENT' not in components:
components.append('HDFS_CLIENT')
if 'MAPREDUCE_CLIENT' not in components:
components.append('MAPREDUCE_CLIENT')
# ensure that mr and hdfs clients are colocated with oozie client
client_ngs = cluster_spec.get_node_groups_containing_component(
'OOZIE_CLIENT')
for ng in client_ngs:
components = ng.components
if 'HDFS_CLIENT' not in components:
components.append('HDFS_CLIENT')
if 'MAPREDUCE_CLIENT' not in components:
components.append('MAPREDUCE_CLIENT')
def register_service_urls(self, cluster_spec, url_info):
oozie_ip = cluster_spec.determine_component_hosts(
'OOZIE_SERVER').pop().management_ip
port = self._get_port_from_cluster_spec(cluster_spec, 'oozie-site',
'oozie.base.url')
url_info['JobFlow'] = {
'Oozie': 'http://%s:%s' % (oozie_ip, port)
}
return url_info
def register_user_input_handlers(self, ui_handlers):
ui_handlers['oozie-site/oozie.service.JPAService.jdbc.username'] = (
self._handle_user_property_db_user)
ui_handlers['oozie.service.JPAService.jdbc.password'] = (
self._handle_user_property_db_pwd)
def _handle_user_property_db_user(self, user_input, configurations):
oozie_site_config_map = configurations['oozie-site']
oozie_site_config_map['oozie.service.JPAService.jdbc.username'] = (
user_input.value)
global_config_map = configurations['global']
global_config_map['oozie_metastore_user_name'] = user_input.value
def _handle_user_property_db_pwd(self, user_input, configurations):
oozie_site_config_map = configurations['oozie-site']
oozie_site_config_map['oozie.service.JPAService.jdbc.password'] = (
user_input.value)
global_config_map = configurations['global']
global_config_map['oozie_metastore_user_passwd'] = user_input.value
class GangliaService(Service):
def __init__(self):
super(GangliaService, self).__init__(GangliaService.get_service_id())
@classmethod
def get_service_id(cls):
return 'GANGLIA'
def validate(self, cluster_spec, cluster):
count = cluster_spec.get_deployed_node_group_count('GANGLIA_SERVER')
if count != 1:
raise ex.InvalidComponentCountException('GANGLIA_SERVER', 1, count)
def is_user_template_component(self, component):
return component.name != 'GANGLIA_MONITOR'
def finalize_ng_components(self, cluster_spec):
for ng in cluster_spec.node_groups.values():
if 'GANGLIA_MONITOR' not in ng.components:
ng.components.append('GANGLIA_MONITOR')
class AmbariService(Service):
def __init__(self):
super(AmbariService, self).__init__(AmbariService.get_service_id())
self.configurations.add('ambari')
# TODO(jspeidel): don't hard code default admin user
self.admin_user_name = 'admin'
@classmethod
def get_service_id(cls):
return 'AMBARI'
def validate(self, cluster_spec, cluster):
count = cluster_spec.get_deployed_node_group_count('AMBARI_SERVER')
if count != 1:
raise ex.InvalidComponentCountException('AMBARI_SERVER', 1, count)
def register_service_urls(self, cluster_spec, url_info):
ambari_ip = cluster_spec.determine_component_hosts(
'AMBARI_SERVER').pop().management_ip
port = cluster_spec.configurations['ambari'].get(
'server.port', '8080')
url_info['Ambari Console'] = {
'Web UI': 'http://{0}:{1}'.format(ambari_ip, port)
}
return url_info
def is_user_template_component(self, component):
return component.name != 'AMBARI_AGENT'
def register_user_input_handlers(self, ui_handlers):
ui_handlers['ambari-stack/ambari.admin.user'] = (
self._handle_user_property_admin_user)
ui_handlers['ambari-stack/ambari.admin.password'] = (
self._handle_user_property_admin_password)
def is_mandatory(self):
return True
def _handle_user_property_admin_user(self, user_input, configurations):
admin_user = next(user for user in self.users
if user.name == 'admin')
admin_user.name = user_input.value
self.admin_user_name = user_input.value
def _handle_user_property_admin_password(self, user_input, configurations):
admin_user = next(user for user in self.users
if user.name == self.admin_user_name)
admin_user.password = user_input.value
class SqoopService(Service):
def __init__(self):
super(SqoopService, self).__init__(SqoopService.get_service_id())
@classmethod
def get_service_id(cls):
return 'SQOOP'
def finalize_ng_components(self, cluster_spec):
sqoop_ngs = cluster_spec.get_node_groups_containing_component('SQOOP')
for ng in sqoop_ngs:
if 'HDFS_CLIENT' not in ng.components:
ng.components.append('HDFS_CLIENT')
if 'MAPREDUCE_CLIENT' not in ng.components:
ng.components.append('MAPREDUCE_CLIENT')
| |
# Copyright 2016 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package manipulates OCI image configuration metadata."""
from collections import namedtuple
import copy
import json
import os
import os.path
import sys
from tools.build_defs.docker import utils
from third_party.py import gflags
gflags.DEFINE_string('base', None, 'The parent image')
gflags.DEFINE_string('output', None, 'The output file to generate')
gflags.MarkFlagAsRequired('output')
gflags.DEFINE_multistring('layer', [],
'Layer sha256 hashes that make up this image')
gflags.DEFINE_list('entrypoint', None,
'Override the "Entrypoint" of the previous image')
gflags.DEFINE_list('command', None, 'Override the "Cmd" of the previous image')
gflags.DEFINE_string('user', None, 'The username to run commands under')
gflags.DEFINE_list('labels', None, 'Augment the "Label" of the previous image')
gflags.DEFINE_list('ports', None,
'Augment the "ExposedPorts" of the previous image')
gflags.DEFINE_list('volumes', None,
'Augment the "Volumes" of the previous image')
gflags.DEFINE_string('workdir', None, 'Set the working directory for the image')
gflags.DEFINE_list('env', None, 'Augment the "Env" of the previous image')
FLAGS = gflags.FLAGS
_ConfigOptionsT = namedtuple('ConfigOptionsT', ['layers', 'entrypoint', 'cmd',
'env', 'labels', 'ports',
'volumes', 'workdir', 'user'])
class ConfigOptions(_ConfigOptionsT):
"""Docker image configuration options."""
def __new__(cls,
layers=None,
entrypoint=None,
cmd=None,
user=None,
labels=None,
env=None,
ports=None,
volumes=None,
workdir=None):
"""Constructor."""
return super(ConfigOptions, cls).__new__(cls,
layers=layers,
entrypoint=entrypoint,
cmd=cmd,
user=user,
labels=labels,
env=env,
ports=ports,
volumes=volumes,
workdir=workdir)
_PROCESSOR_ARCHITECTURE = 'amd64'
_OPERATING_SYSTEM = 'linux'
def Resolve(value, environment):
"""Resolves environment variables embedded in the given value."""
outer_env = os.environ
try:
os.environ = environment
return os.path.expandvars(value)
finally:
os.environ = outer_env
def DeepCopySkipNull(data):
"""Do a deep copy, skipping null entry."""
if isinstance(data, dict):
return dict((DeepCopySkipNull(k), DeepCopySkipNull(v))
for k, v in data.items() if v is not None)
return copy.deepcopy(data)
def KeyValueToDict(pair):
"""Converts an iterable object of key=value pairs to dictionary."""
d = dict()
for kv in pair:
(k, v) = kv.split('=', 1)
d[k] = v
return d
def CreateImageConfig(data, options):
"""Create an image config possibly based on an existing one.
Args:
data: A dict of Docker image config to base on top of.
options: Options specific to this image which will be merged with any
existing data
Returns:
Image config for the new image
"""
defaults = DeepCopySkipNull(data)
# dont propagate non-spec keys
output = dict()
output['created'] = '0001-01-01T00:00:00Z'
output['author'] = 'Bazel'
output['architecture'] = _PROCESSOR_ARCHITECTURE
output['os'] = _OPERATING_SYSTEM
output['config'] = defaults.get('config', {})
if options.entrypoint:
output['config']['Entrypoint'] = options.entrypoint
if options.cmd:
output['config']['Cmd'] = options.cmd
if options.user:
output['config']['User'] = options.user
def Dict2ConfigValue(d):
return ['%s=%s' % (k, d[k]) for k in sorted(d.keys())]
if options.env:
# Build a dictionary of existing environment variables (used by Resolve).
environ_dict = KeyValueToDict(output['config'].get('Env', []))
# Merge in new environment variables, resolving references.
for k, v in options.env.items():
# Resolve handles scenarios like "PATH=$PATH:...".
environ_dict[k] = Resolve(v, environ_dict)
output['config']['Env'] = Dict2ConfigValue(environ_dict)
# TODO(babel-team) Label is currently docker specific
if options.labels:
label_dict = KeyValueToDict(output['config'].get('Label', []))
for k, v in options.labels.items():
label_dict[k] = v
output['config']['Label'] = Dict2ConfigValue(label_dict)
if options.ports:
if 'ExposedPorts' not in output['config']:
output['config']['ExposedPorts'] = {}
for p in options.ports:
if '/' in p:
# The port spec has the form 80/tcp, 1234/udp
# so we simply use it as the key.
output['config']['ExposedPorts'][p] = {}
else:
# Assume tcp
output['config']['ExposedPorts'][p + '/tcp'] = {}
if options.volumes:
if 'Volumes' not in output['config']:
output['config']['Volumes'] = {}
for p in options.volumes:
output['config']['Volumes'][p] = {}
if options.workdir:
output['config']['WorkingDir'] = options.workdir
# diff_ids are ordered from bottom-most to top-most
diff_ids = defaults.get('rootfs', {}).get('diff_ids', [])
layers = options.layers if options.layers else []
diff_ids += ['sha256:%s' % l for l in layers]
output['rootfs'] = {
'type': 'layers',
'diff_ids': diff_ids,
}
# history is ordered from bottom-most layer to top-most layer
history = defaults.get('history', [])
# docker only allows the child to have one more history entry than the parent
history += [{
'created': '0001-01-01T00:00:00Z',
'created_by': 'bazel build ...',
'author': 'Bazel'}]
output['history'] = history
return output
def main(unused_argv):
base_json = '{}'
manifest = utils.GetLatestManifestFromTar(FLAGS.base)
if manifest:
config_file = manifest['Config']
base_json = utils.GetTarFile(FLAGS.base, config_file)
data = json.loads(base_json)
layers = []
for layer in FLAGS.layer:
layers.append(utils.ExtractValue(layer))
labels = KeyValueToDict(FLAGS.labels)
for label, value in labels.items():
if value.startswith('@'):
with open(value[1:], 'r') as f:
labels[label] = f.read()
output = CreateImageConfig(data,
ConfigOptions(layers=layers,
entrypoint=FLAGS.entrypoint,
cmd=FLAGS.command,
user=FLAGS.user,
labels=labels,
env=KeyValueToDict(FLAGS.env),
ports=FLAGS.ports,
volumes=FLAGS.volumes,
workdir=FLAGS.workdir))
with open(FLAGS.output, 'w') as fp:
json.dump(output, fp, sort_keys=True)
fp.write('\n')
if __name__ == '__main__':
main(FLAGS(sys.argv))
| |
"""Support for Nederlandse Spoorwegen public transport."""
from datetime import datetime, timedelta
import logging
import ns_api
from ns_api import RequestParametersError
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_API_KEY, CONF_NAME
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by NS"
CONF_ROUTES = "routes"
CONF_FROM = "from"
CONF_TO = "to"
CONF_VIA = "via"
CONF_TIME = "time"
ICON = "mdi:train"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=120)
ROUTE_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_FROM): cv.string,
vol.Required(CONF_TO): cv.string,
vol.Optional(CONF_VIA): cv.string,
vol.Optional(CONF_TIME): cv.time,
}
)
ROUTES_SCHEMA = vol.All(cv.ensure_list, [ROUTE_SCHEMA])
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_API_KEY): cv.string, vol.Optional(CONF_ROUTES): ROUTES_SCHEMA}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the departure sensor."""
nsapi = ns_api.NSAPI(config[CONF_API_KEY])
try:
stations = nsapi.get_stations()
except (
requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
) as error:
_LOGGER.error("Could not connect to the internet: %s", error)
raise PlatformNotReady()
except RequestParametersError as error:
_LOGGER.error("Could not fetch stations, please check configuration: %s", error)
return
sensors = []
for departure in config.get(CONF_ROUTES):
if not valid_stations(
stations,
[departure.get(CONF_FROM), departure.get(CONF_VIA), departure.get(CONF_TO)],
):
continue
sensors.append(
NSDepartureSensor(
nsapi,
departure.get(CONF_NAME),
departure.get(CONF_FROM),
departure.get(CONF_TO),
departure.get(CONF_VIA),
departure.get(CONF_TIME),
)
)
if sensors:
add_entities(sensors, True)
def valid_stations(stations, given_stations):
"""Verify the existence of the given station codes."""
for station in given_stations:
if station is None:
continue
if not any(s.code == station.upper() for s in stations):
_LOGGER.warning("Station '%s' is not a valid station.", station)
return False
return True
class NSDepartureSensor(Entity):
"""Implementation of a NS Departure Sensor."""
def __init__(self, nsapi, name, departure, heading, via, time):
"""Initialize the sensor."""
self._nsapi = nsapi
self._name = name
self._departure = departure
self._via = via
self._heading = heading
self._time = time
self._state = None
self._trips = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the icon for the frontend."""
return ICON
@property
def state(self):
"""Return the next departure time."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
if not self._trips:
return
if self._trips[0].trip_parts:
route = [self._trips[0].departure]
for k in self._trips[0].trip_parts:
route.append(k.destination)
# Static attributes
attributes = {
"going": self._trips[0].going,
"departure_time_planned": None,
"departure_time_actual": None,
"departure_delay": False,
"departure_platform_planned": self._trips[0].departure_platform_planned,
"departure_platform_actual": self._trips[0].departure_platform_actual,
"arrival_time_planned": None,
"arrival_time_actual": None,
"arrival_delay": False,
"arrival_platform_planned": self._trips[0].arrival_platform_planned,
"arrival_platform_actual": self._trips[0].arrival_platform_actual,
"next": None,
"status": self._trips[0].status.lower(),
"transfers": self._trips[0].nr_transfers,
"route": route,
"remarks": None,
ATTR_ATTRIBUTION: ATTRIBUTION,
}
# Planned departure attributes
if self._trips[0].departure_time_planned is not None:
attributes["departure_time_planned"] = self._trips[
0
].departure_time_planned.strftime("%H:%M")
# Actual departure attributes
if self._trips[0].departure_time_actual is not None:
attributes["departure_time_actual"] = self._trips[
0
].departure_time_actual.strftime("%H:%M")
# Delay departure attributes
if (
attributes["departure_time_planned"]
and attributes["departure_time_actual"]
and attributes["departure_time_planned"]
!= attributes["departure_time_actual"]
):
attributes["departure_delay"] = True
# Planned arrival attributes
if self._trips[0].arrival_time_planned is not None:
attributes["arrival_time_planned"] = self._trips[
0
].arrival_time_planned.strftime("%H:%M")
# Actual arrival attributes
if self._trips[0].arrival_time_actual is not None:
attributes["arrival_time_actual"] = self._trips[
0
].arrival_time_actual.strftime("%H:%M")
# Delay arrival attributes
if (
attributes["arrival_time_planned"]
and attributes["arrival_time_actual"]
and attributes["arrival_time_planned"] != attributes["arrival_time_actual"]
):
attributes["arrival_delay"] = True
# Next attributes
if len(self._trips) > 1:
if self._trips[1].departure_time_actual is not None:
attributes["next"] = self._trips[1].departure_time_actual.strftime(
"%H:%M"
)
elif self._trips[1].departure_time_planned is not None:
attributes["next"] = self._trips[1].departure_time_planned.strftime(
"%H:%M"
)
return attributes
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the trip information."""
# If looking for a specific trip time, update around that trip time only.
if self._time and (
(datetime.now() + timedelta(minutes=30)).time() < self._time
or (datetime.now() - timedelta(minutes=30)).time() > self._time
):
self._state = None
self._trips = None
return
# Set the search parameter to search from a specific trip time or to just search for next trip.
if self._time:
trip_time = (
datetime.today()
.replace(hour=self._time.hour, minute=self._time.minute)
.strftime("%d-%m-%Y %H:%M")
)
else:
trip_time = datetime.now().strftime("%d-%m-%Y %H:%M")
try:
self._trips = self._nsapi.get_trips(
trip_time, self._departure, self._via, self._heading, True, 0, 2
)
if self._trips:
if self._trips[0].departure_time_actual is None:
planned_time = self._trips[0].departure_time_planned
self._state = planned_time.strftime("%H:%M")
else:
actual_time = self._trips[0].departure_time_actual
self._state = actual_time.strftime("%H:%M")
except (
requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
) as error:
_LOGGER.error("Couldn't fetch trip info: %s", error)
| |
# Copyright (c) 2021, Frappe Technologies and Contributors
# License: MIT. See LICENSE
import unittest
import frappe
from frappe.test_runner import make_test_records
from frappe.utils import random_string
class TestAutoAssign(unittest.TestCase):
@classmethod
def setUpClass(cls):
frappe.db.delete("Assignment Rule")
@classmethod
def tearDownClass(cls):
frappe.db.rollback()
def setUp(self):
make_test_records("User")
days = [
dict(day = 'Sunday'),
dict(day = 'Monday'),
dict(day = 'Tuesday'),
dict(day = 'Wednesday'),
dict(day = 'Thursday'),
dict(day = 'Friday'),
dict(day = 'Saturday'),
]
self.days = days
self.assignment_rule = get_assignment_rule([days, days])
clear_assignments()
def test_round_robin(self):
note = make_note(dict(public=1))
# check if auto assigned to first user
self.assertEqual(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'allocated_to'), 'test@example.com')
note = make_note(dict(public=1))
# check if auto assigned to second user
self.assertEqual(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'allocated_to'), 'test1@example.com')
clear_assignments()
note = make_note(dict(public=1))
# check if auto assigned to third user, even if
# previous assignments where closed
self.assertEqual(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'allocated_to'), 'test2@example.com')
# check loop back to first user
note = make_note(dict(public=1))
self.assertEqual(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'allocated_to'), 'test@example.com')
def test_load_balancing(self):
self.assignment_rule.rule = 'Load Balancing'
self.assignment_rule.save()
for _ in range(30):
note = make_note(dict(public=1))
# check if each user has 10 assignments (?)
for user in ('test@example.com', 'test1@example.com', 'test2@example.com'):
self.assertEqual(len(frappe.get_all('ToDo', dict(allocated_to = user, reference_type = 'Note'))), 10)
# clear 5 assignments for first user
# can't do a limit in "delete" since postgres does not support it
for d in frappe.get_all('ToDo', dict(reference_type = 'Note', allocated_to = 'test@example.com'), limit=5):
frappe.db.delete("ToDo", {"name": d.name})
# add 5 more assignments
for i in range(5):
make_note(dict(public=1))
# check if each user still has 10 assignments
for user in ('test@example.com', 'test1@example.com', 'test2@example.com'):
self.assertEqual(len(frappe.get_all('ToDo', dict(allocated_to = user, reference_type = 'Note'))), 10)
def test_based_on_field(self):
self.assignment_rule.rule = 'Based on Field'
self.assignment_rule.field = 'owner'
self.assignment_rule.save()
frappe.set_user('test1@example.com')
note = make_note(dict(public=1))
# check if auto assigned to doc owner, test1@example.com
self.assertEqual(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'owner'), 'test1@example.com')
frappe.set_user('test2@example.com')
note = make_note(dict(public=1))
# check if auto assigned to doc owner, test2@example.com
self.assertEqual(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'owner'), 'test2@example.com')
frappe.set_user('Administrator')
def test_assign_condition(self):
# check condition
note = make_note(dict(public=0))
self.assertEqual(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'allocated_to'), None)
def test_clear_assignment(self):
note = make_note(dict(public=1))
# check if auto assigned to first user
todo = frappe.get_list('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), limit=1)[0]
todo = frappe.get_doc('ToDo', todo['name'])
self.assertEqual(todo.allocated_to, 'test@example.com')
# test auto unassign
note.public = 0
note.save()
todo.load_from_db()
# check if todo is cancelled
self.assertEqual(todo.status, 'Cancelled')
def test_close_assignment(self):
note = make_note(dict(public=1, content="valid"))
# check if auto assigned
todo = frappe.get_list('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), limit=1)[0]
todo = frappe.get_doc('ToDo', todo['name'])
self.assertEqual(todo.allocated_to, 'test@example.com')
note.content="Closed"
note.save()
todo.load_from_db()
# check if todo is closed
self.assertEqual(todo.status, 'Closed')
# check if closed todo retained assignment
self.assertEqual(todo.allocated_to, 'test@example.com')
def check_multiple_rules(self):
note = make_note(dict(public=1, notify_on_login=1))
# check if auto assigned to test3 (2nd rule is applied, as it has higher priority)
self.assertEqual(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'allocated_to'), 'test@example.com')
def check_assignment_rule_scheduling(self):
frappe.db.delete("Assignment Rule")
days_1 = [dict(day = 'Sunday'), dict(day = 'Monday'), dict(day = 'Tuesday')]
days_2 = [dict(day = 'Wednesday'), dict(day = 'Thursday'), dict(day = 'Friday'), dict(day = 'Saturday')]
get_assignment_rule([days_1, days_2], ['public == 1', 'public == 1'])
frappe.flags.assignment_day = "Monday"
note = make_note(dict(public=1))
self.assertIn(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'allocated_to'), ['test@example.com', 'test1@example.com', 'test2@example.com'])
frappe.flags.assignment_day = "Friday"
note = make_note(dict(public=1))
self.assertIn(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'allocated_to'), ['test3@example.com'])
def test_assignment_rule_condition(self):
frappe.db.delete("Assignment Rule")
# Add expiry_date custom field
from frappe.custom.doctype.custom_field.custom_field import create_custom_field
df = dict(fieldname='expiry_date', label='Expiry Date', fieldtype='Date')
create_custom_field('Note', df)
assignment_rule = frappe.get_doc(dict(
name = 'Assignment with Due Date',
doctype = 'Assignment Rule',
document_type = 'Note',
assign_condition = 'public == 0',
due_date_based_on = 'expiry_date',
assignment_days = self.days,
users = [
dict(user = 'test@example.com'),
]
)).insert()
expiry_date = frappe.utils.add_days(frappe.utils.nowdate(), 2)
note1 = make_note({'expiry_date': expiry_date})
note2 = make_note({'expiry_date': expiry_date})
note1_todo = frappe.get_all('ToDo', filters=dict(
reference_type = 'Note',
reference_name = note1.name,
status = 'Open'
))[0]
note1_todo_doc = frappe.get_doc('ToDo', note1_todo.name)
self.assertEqual(frappe.utils.get_date_str(note1_todo_doc.date), expiry_date)
# due date should be updated if the reference doc's date is updated.
note1.expiry_date = frappe.utils.add_days(expiry_date, 2)
note1.save()
note1_todo_doc.reload()
self.assertEqual(frappe.utils.get_date_str(note1_todo_doc.date), note1.expiry_date)
# saving one note's expiry should not update other note todo's due date
note2_todo = frappe.get_all('ToDo', filters=dict(
reference_type = 'Note',
reference_name = note2.name,
status = 'Open'
), fields=['name', 'date'])[0]
self.assertNotEqual(frappe.utils.get_date_str(note2_todo.date), note1.expiry_date)
self.assertEqual(frappe.utils.get_date_str(note2_todo.date), expiry_date)
assignment_rule.delete()
def clear_assignments():
frappe.db.delete("ToDo", {"reference_type": "Note"})
def get_assignment_rule(days, assign=None):
frappe.delete_doc_if_exists('Assignment Rule', 'For Note 1')
if not assign:
assign = ['public == 1', 'notify_on_login == 1']
assignment_rule = frappe.get_doc(dict(
name = 'For Note 1',
doctype = 'Assignment Rule',
priority = 0,
document_type = 'Note',
assign_condition = assign[0],
unassign_condition = 'public == 0 or notify_on_login == 1',
close_condition = '"Closed" in content',
rule = 'Round Robin',
assignment_days = days[0],
users = [
dict(user = 'test@example.com'),
dict(user = 'test1@example.com'),
dict(user = 'test2@example.com'),
]
)).insert()
frappe.delete_doc_if_exists('Assignment Rule', 'For Note 2')
# 2nd rule
frappe.get_doc(dict(
name = 'For Note 2',
doctype = 'Assignment Rule',
priority = 1,
document_type = 'Note',
assign_condition = assign[1],
unassign_condition = 'notify_on_login == 0',
rule = 'Round Robin',
assignment_days = days[1],
users = [
dict(user = 'test3@example.com')
]
)).insert()
return assignment_rule
def make_note(values=None):
note = frappe.get_doc(dict(
doctype = 'Note',
title = random_string(10),
content = random_string(20)
))
if values:
note.update(values)
note.insert()
return note
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.