text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""
This is a secrete schema conversion func for zon metrics migration.
We have two types of metrics need to migrate which should follow our
new metric schema pattern.
"""
|
{
"content_hash": "4a3cee790bc3fb732cd5b3aa3ee08530",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 68,
"avg_line_length": 24.857142857142858,
"alnum_prop": 0.7701149425287356,
"repo_name": "yunstanford/metrics-migration",
"id": "3387bde5b600e469694aa4f857d044a0b9ae3f22",
"size": "174",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migration/schema_func.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24271"
}
],
"symlink_target": ""
}
|
from six import string_types
from pypif.obj.common.pio import Pio
class FileReference(Pio):
"""
Information about a file.
"""
def __init__(self, relative_path=None, mime_type=None, sha256=None, md5=None, tags=None, **kwargs):
"""
Constructor.
:param relative_path: String with the relative path (from the location of this file) of the file.
:param mime_type: String with the mime type of the file.
:param sha256: String with the SHA-256 hash of the file.
:param md5: String with the MD5 hash of the file.
:param tags: List of strings or numbers that are tags for this object.
:param kwargs: Dictionary of fields that are not supported.
"""
super(FileReference, self).__init__(tags=tags, **kwargs)
self._relative_path = None
self.relative_path = relative_path
self._mime_type = None
self.mime_type = mime_type
self._sha256 = None
self.sha256 = None
self._md5 = None
self.md5 = None
@property
def relative_path(self):
return self._relative_path
@relative_path.setter
def relative_path(self, relative_path):
self._validate_type('relative_path', relative_path, string_types)
self._relative_path = relative_path
@relative_path.deleter
def relative_path(self):
self._relative_path = None
@property
def mime_type(self):
return self._mime_type
@mime_type.setter
def mime_type(self, mime_type):
self._validate_type('mime_type', mime_type, string_types)
self._mime_type = mime_type
@mime_type.deleter
def mime_type(self):
self._mime_type = None
@property
def sha256(self):
return self._sha256
@sha256.setter
def sha256(self, sha256):
self._validate_type('sha256', sha256, string_types)
self._sha256 = sha256
@sha256.deleter
def sha256(self):
self._sha256 = None
@property
def md5(self):
return self._md5
@md5.setter
def md5(self, md5):
self._validate_type('md5', md5, string_types)
self._md5 = md5
@md5.deleter
def md5(self):
self._md5 = None
|
{
"content_hash": "0c04442a6bdab772cd94a518c0f11a30",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 105,
"avg_line_length": 27.567901234567902,
"alnum_prop": 0.6112852664576802,
"repo_name": "kjaym/pypif",
"id": "29fdb0ed0c90f5dfbbb1e5dceea1593676c91c6c",
"size": "2233",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pypif/obj/common/file_reference.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "96858"
}
],
"symlink_target": ""
}
|
"""
Volume interface (1.1 extension).
"""
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
import six
from cinderclient import base
class Volume(base.Resource):
"""A volume is an extra block level storage to the OpenStack instances."""
def __repr__(self):
return "<Volume: %s>" % self.id
def delete(self):
"""Delete this volume."""
self.manager.delete(self)
def update(self, **kwargs):
"""Update the display_name or display_description for this volume."""
self.manager.update(self, **kwargs)
def attach(self, instance_uuid, mountpoint):
"""Set attachment metadata.
:param instance_uuid: uuid of the attaching instance.
:param mountpoint: mountpoint on the attaching instance.
"""
return self.manager.attach(self, instance_uuid, mountpoint)
def detach(self):
"""Clear attachment metadata."""
return self.manager.detach(self)
def reserve(self, volume):
"""Reserve this volume."""
return self.manager.reserve(self)
def unreserve(self, volume):
"""Unreserve this volume."""
return self.manager.unreserve(self)
def begin_detaching(self, volume):
"""Begin detaching volume."""
return self.manager.begin_detaching(self)
def roll_detaching(self, volume):
"""Roll detaching volume."""
return self.manager.roll_detaching(self)
def initialize_connection(self, volume, connector):
"""Initialize a volume connection.
:param connector: connector dict from nova.
"""
return self.manager.initialize_connection(self, connector)
def terminate_connection(self, volume, connector):
"""Terminate a volume connection.
:param connector: connector dict from nova.
"""
return self.manager.terminate_connection(self, connector)
def set_metadata(self, volume, metadata):
"""Set or Append metadata to a volume.
:param type : The :class: `Volume` to set metadata on
:param metadata: A dict of key/value pairs to set
"""
return self.manager.set_metadata(self, metadata)
def upload_to_image(self, force, image_name, container_format,
disk_format):
"""Upload a volume to image service as an image."""
return self.manager.upload_to_image(self, force, image_name,
container_format, disk_format)
def force_delete(self):
"""Delete the specified volume ignoring its current state.
:param volume: The UUID of the volume to force-delete.
"""
self.manager.force_delete(self)
def reset_state(self, state):
"""Update the volume with the provided state."""
self.manager.reset_state(self, state)
def extend(self, volume, new_size):
"""Extend the size of the specified volume.
:param volume: The UUID of the volume to extend
:param new_size: The desired size to extend volume to.
"""
self.manager.extend(self, volume, new_size)
class VolumeManager(base.ManagerWithFind):
"""
Manage :class:`Volume` resources.
"""
resource_class = Volume
def create(self, size, snapshot_id=None, source_volid=None,
display_name=None, display_description=None,
volume_type=None, user_id=None,
project_id=None, availability_zone=None,
metadata=None, imageRef=None):
"""
Create a volume.
:param size: Size of volume in GB
:param snapshot_id: ID of the snapshot
:param display_name: Name of the volume
:param display_description: Description of the volume
:param volume_type: Type of volume
:rtype: :class:`Volume`
:param user_id: User id derived from context
:param project_id: Project id derived from context
:param availability_zone: Availability Zone to use
:param metadata: Optional metadata to set on volume creation
:param imageRef: reference to an image stored in glance
:param source_volid: ID of source volume to clone from
"""
if metadata is None:
volume_metadata = {}
else:
volume_metadata = metadata
body = {'volume': {'size': size,
'snapshot_id': snapshot_id,
'display_name': display_name,
'display_description': display_description,
'volume_type': volume_type,
'user_id': user_id,
'project_id': project_id,
'availability_zone': availability_zone,
'status': "creating",
'attach_status': "detached",
'metadata': volume_metadata,
'imageRef': imageRef,
'source_volid': source_volid,
}}
return self._create('/volumes', body, 'volume')
def get(self, volume_id):
"""
Get a volume.
:param volume_id: The ID of the volume to delete.
:rtype: :class:`Volume`
"""
return self._get("/volumes/%s" % volume_id, "volume")
def list(self, detailed=True, search_opts=None):
"""
Get a list of all volumes.
:rtype: list of :class:`Volume`
"""
if search_opts is None:
search_opts = {}
qparams = {}
for opt, val in six.iteritems(search_opts):
if val:
qparams[opt] = val
query_string = "?%s" % urlencode(qparams) if qparams else ""
detail = ""
if detailed:
detail = "/detail"
return self._list("/volumes%s%s" % (detail, query_string),
"volumes")
def delete(self, volume):
"""
Delete a volume.
:param volume: The :class:`Volume` to delete.
"""
self._delete("/volumes/%s" % base.getid(volume))
def update(self, volume, **kwargs):
"""
Update the display_name or display_description for a volume.
:param volume: The :class:`Volume` to delete.
"""
if not kwargs:
return
body = {"volume": kwargs}
self._update("/volumes/%s" % base.getid(volume), body)
def _action(self, action, volume, info=None, **kwargs):
"""
Perform a volume "action."
"""
body = {action: info}
self.run_hooks('modify_body_for_action', body, **kwargs)
url = '/volumes/%s/action' % base.getid(volume)
return self.api.client.post(url, body=body)
def attach(self, volume, instance_uuid, mountpoint):
"""
Set attachment metadata.
:param volume: The :class:`Volume` (or its ID)
you would like to attach.
:param instance_uuid: uuid of the attaching instance.
:param mountpoint: mountpoint on the attaching instance.
"""
return self._action('os-attach',
volume,
{'instance_uuid': instance_uuid,
'mountpoint': mountpoint})
def detach(self, volume):
"""
Clear attachment metadata.
:param volume: The :class:`Volume` (or its ID)
you would like to detach.
"""
return self._action('os-detach', volume)
def reserve(self, volume):
"""
Reserve this volume.
:param volume: The :class:`Volume` (or its ID)
you would like to reserve.
"""
return self._action('os-reserve', volume)
def unreserve(self, volume):
"""
Unreserve this volume.
:param volume: The :class:`Volume` (or its ID)
you would like to unreserve.
"""
return self._action('os-unreserve', volume)
def begin_detaching(self, volume):
"""
Begin detaching this volume.
:param volume: The :class:`Volume` (or its ID)
you would like to detach.
"""
return self._action('os-begin_detaching', volume)
def roll_detaching(self, volume):
"""
Roll detaching this volume.
:param volume: The :class:`Volume` (or its ID)
you would like to roll detaching.
"""
return self._action('os-roll_detaching', volume)
def initialize_connection(self, volume, connector):
"""
Initialize a volume connection.
:param volume: The :class:`Volume` (or its ID).
:param connector: connector dict from nova.
"""
return self._action('os-initialize_connection', volume,
{'connector': connector})[1]['connection_info']
def terminate_connection(self, volume, connector):
"""
Terminate a volume connection.
:param volume: The :class:`Volume` (or its ID).
:param connector: connector dict from nova.
"""
self._action('os-terminate_connection', volume,
{'connector': connector})
def set_metadata(self, volume, metadata):
"""
Update/Set a volumes metadata.
:param volume: The :class:`Volume`.
:param metadata: A list of keys to be set.
"""
body = {'metadata': metadata}
return self._create("/volumes/%s/metadata" % base.getid(volume),
body, "metadata")
def delete_metadata(self, volume, keys):
"""
Delete specified keys from volumes metadata.
:param volume: The :class:`Volume`.
:param metadata: A list of keys to be removed.
"""
for k in keys:
self._delete("/volumes/%s/metadata/%s" % (base.getid(volume), k))
def upload_to_image(self, volume, force, image_name, container_format,
disk_format):
"""
Upload volume to image service as image.
:param volume: The :class:`Volume` to upload.
"""
return self._action('os-volume_upload_image',
volume,
{'force': force,
'image_name': image_name,
'container_format': container_format,
'disk_format': disk_format})
def force_delete(self, volume):
return self._action('os-force_delete', base.getid(volume))
def reset_state(self, volume, state):
"""Update the provided volume with the provided state."""
return self._action('os-reset_status', volume, {'status': state})
def extend(self, volume, new_size):
return self._action('os-extend',
base.getid(volume),
{'new_size': new_size})
|
{
"content_hash": "5eae3fdccf40784687715284ad568277",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 78,
"avg_line_length": 32.610619469026545,
"alnum_prop": 0.5536861148801447,
"repo_name": "tylertian/Openstack",
"id": "9c870cb30a1db778e017e5db56ab1725d1c41a9a",
"size": "11691",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "openstack F/python-cinderclient/cinderclient/v1/volumes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "239919"
},
{
"name": "JavaScript",
"bytes": "156942"
},
{
"name": "Python",
"bytes": "16949418"
},
{
"name": "Shell",
"bytes": "96743"
}
],
"symlink_target": ""
}
|
from . import PelicanPluginTestCase
class Gh(PelicanPluginTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, '', *args, **kwargs)
def test(self):
self.run_pelican({
'PLUGINS': ['m.htmlsanity', 'm.gh']
})
self.assertEqual(*self.actual_expected_contents('page.html'))
|
{
"content_hash": "5ed1ec922c64ff903b87c6c2a778dc61",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 69,
"avg_line_length": 28.75,
"alnum_prop": 0.5739130434782609,
"repo_name": "mosra/m.css",
"id": "091ce3a36e34fe4f0b68d0cf69a62e7ae974f6b0",
"size": "1575",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/m/test/test_gh.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "29"
},
{
"name": "CSS",
"bytes": "139738"
},
{
"name": "HTML",
"bytes": "39793"
},
{
"name": "Python",
"bytes": "340270"
},
{
"name": "Shell",
"bytes": "461"
}
],
"symlink_target": ""
}
|
from __future__ import (division, print_function, absolute_import,
unicode_literals)
__all__ = ["MPIPool"]
# On some systems mpi4py is available but broken
# we avoid crashes by importing it only when
# an MPI Pool is explicitly created.
#Still make it a global to avoid messing up other things.
MPI = None
class _close_pool_message(object):
def __repr__(self):
return "<Close pool message>"
class _function_wrapper(object):
def __init__(self, function):
self.function = function
def _error_function(task):
raise RuntimeError("Pool was sent tasks before being told what "
"function to apply.")
class MPIPool(object):
"""
A pool that distributes tasks over a set of MPI processes. MPI is an
API for distributed memory parallelism. This pool will let you run
emcee without shared memory, letting you use much larger machines
with emcee.
The pool only support the :func:`map` method at the moment because
this is the only functionality that emcee needs. That being said,
this pool is fairly general and it could be used for other purposes.
Contributed by `Joe Zuntz <https://github.com/joezuntz>`_.
:param comm: (optional)
The ``mpi4py`` communicator.
:param debug: (optional)
If ``True``, print out a lot of status updates at each step.
:param loadbalance: (optional)
if ``True`` and ntask > Ncpus, tries to loadbalance by sending
out one task to each cpu first and then sending out the rest
as the cpus get done.
"""
def __init__(self, comm=None, debug=False, loadbalance=False):
global MPI
try:
import mpi4py.MPI
MPI = mpi4py.MPI
except ImportError:
#re-raise with a more user-friendly error
raise ImportError("Please install mpi4py")
self.comm = MPI.COMM_WORLD if comm is None else comm
self.rank = self.comm.Get_rank()
self.size = self.comm.Get_size() - 1
self.debug = debug
self.function = _error_function
self.loadbalance = loadbalance
if self.size == 0:
raise ValueError("Tried to create an MPI pool, but there "
"was only one MPI process available. "
"Need at least two.")
def is_master(self):
"""
Is the current process the master?
"""
return self.rank == 0
def wait(self):
"""
If this isn't the master process, wait for instructions.
"""
if self.is_master():
raise RuntimeError("Master node told to await jobs.")
status = MPI.Status()
while True:
# Event loop.
# Sit here and await instructions.
if self.debug:
print("Worker {0} waiting for task.".format(self.rank))
# Blocking receive to wait for instructions.
task = self.comm.recv(source=0, tag=MPI.ANY_TAG, status=status)
if self.debug:
print("Worker {0} got task {1} with tag {2}."
.format(self.rank, task, status.tag))
# Check if message is special sentinel signaling end.
# If so, stop.
if isinstance(task, _close_pool_message):
if self.debug:
print("Worker {0} told to quit.".format(self.rank))
break
# Check if message is special type containing new function
# to be applied
if isinstance(task, _function_wrapper):
self.function = task.function
if self.debug:
print("Worker {0} replaced its task function: {1}."
.format(self.rank, self.function))
continue
# If not a special message, just run the known function on
# the input and return it asynchronously.
result = self.function(task)
if self.debug:
print("Worker {0} sending answer {1} with tag {2}."
.format(self.rank, result, status.tag))
self.comm.isend(result, dest=0, tag=status.tag)
def map(self, function, tasks):
"""
Like the built-in :func:`map` function, apply a function to all
of the values in a list and return the list of results.
:param function:
The function to apply to the list.
:param tasks:
The list of elements.
"""
ntask = len(tasks)
# If not the master just wait for instructions.
if not self.is_master():
self.wait()
return
if function is not self.function:
if self.debug:
print("Master replacing pool function with {0}."
.format(function))
self.function = function
F = _function_wrapper(function)
# Tell all the workers what function to use.
requests = []
for i in range(self.size):
r = self.comm.isend(F, dest=i + 1)
requests.append(r)
# Wait until all of the workers have responded. See:
# https://gist.github.com/4176241
MPI.Request.waitall(requests)
if (not self.loadbalance) or (ntask <= self.size):
# Do not perform load-balancing - the default load-balancing
# scheme emcee uses.
# Send all the tasks off and wait for them to be received.
# Again, see the bug in the above gist.
requests = []
for i, task in enumerate(tasks):
worker = i % self.size + 1
if self.debug:
print("Sent task {0} to worker {1} with tag {2}."
.format(task, worker, i))
r = self.comm.isend(task, dest=worker, tag=i)
requests.append(r)
MPI.Request.waitall(requests)
# Now wait for the answers.
results = []
for i in range(ntask):
worker = i % self.size + 1
if self.debug:
print("Master waiting for worker {0} with tag {1}"
.format(worker, i))
result = self.comm.recv(source=worker, tag=i)
results.append(result)
return results
else:
# Perform load-balancing. The order of the results are likely to
# be different from the previous case.
for i, task in enumerate(tasks[0:self.size]):
worker = i+1
if self.debug:
print("Sent task {0} to worker {1} with tag {2}."
.format(task, worker, i))
# Send out the tasks asynchronously.
self.comm.isend(task, dest=worker, tag=i)
ntasks_dispatched = self.size
results = [None]*ntask
for itask in range(ntask):
status = MPI.Status()
# Receive input from workers.
result = self.comm.recv(source=MPI.ANY_SOURCE,
tag=MPI.ANY_TAG, status=status)
worker = status.source
i = status.tag
results[i] = result
if self.debug:
print("Master received from worker {0} with tag {1}"
.format(worker, i))
# Now send the next task to this idle worker (if there are any
# left).
if ntasks_dispatched < ntask:
task = tasks[ntasks_dispatched]
i = ntasks_dispatched
if self.debug:
print("Sent task {0} to worker {1} with tag {2}."
.format(task, worker, i))
# Send out the tasks asynchronously.
self.comm.isend(task, dest=worker, tag=i)
ntasks_dispatched += 1
return results
def bcast(self, *args, **kwargs):
"""
Equivalent to mpi4py :func:`bcast` collective operation.
"""
return self.comm.bcast(*args, **kwargs)
def close(self):
"""
Just send a message off to all the pool members which contains
the special :class:`_close_pool_message` sentinel.
"""
if self.is_master():
for i in range(self.size):
self.comm.isend(_close_pool_message(), dest=i + 1)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
|
{
"content_hash": "0a95eaf82e3b8fee3008f75048296b36",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 78,
"avg_line_length": 35.008,
"alnum_prop": 0.5324497257769653,
"repo_name": "elizabethswann/RR_fitter",
"id": "021e13a7f72ee31c1711acb35a77aa22c3b1307f",
"size": "8799",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "codes/emcee_1/mpi_pool.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6231"
},
{
"name": "Python",
"bytes": "177531"
},
{
"name": "TeX",
"bytes": "107733"
}
],
"symlink_target": ""
}
|
"""
head_tracker.py - Version 2.0 2013-08-23
Move the head to track an object pose published on the /target PoseStamped topic.
Works with either the dynamixel_motor and abotix package.
Created for the Pi Robot Project: http://www.pirobot.org
Copyright (c) 2013 Patrick Goebel. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.html
"""
import rospy
from sensor_msgs.msg import JointState
from std_msgs.msg import Float64
from geometry_msgs.msg import PointStamped, Point, Pose, PoseStamped
from math import radians
import tf
import os, thread
# Initialize the node
rospy.init_node("head_tracker")
# Are we running in fake mode?
FAKE = rospy.get_param('~sim', False)
# For fake mode we use the arbotix controller package and position tracking
if FAKE:
CONTROLLER_TYPE = 'arbotix'
TRACKER_TYPE = 'position'
else:
# Specify either 'dynamixel_motor' or 'arbotix' for the controller package
CONTROLLER_TYPE = rospy.get_param('~controller_type', 'arbotix')
# Specify either 'speed' or 'position' for the type of tracking
TRACKER_TYPE = rospy.get_param('~tracker_type', 'speed')
# Import the appropriate services for the type of controller
if CONTROLLER_TYPE == 'arbotix':
from arbotix_msgs.srv import *
else:
from dynamixel_controllers.srv import *
class HeadTracker():
def __init__(self):
rospy.on_shutdown(self.shutdown)
self.rate = rospy.get_param('~rate', 10)
r = rospy.Rate(self.rate)
self.tick = 1.0 / self.rate
self.enable_target_search = rospy.get_param('~enable_target_search', False)
# How long we are willing to wait (in seconds) before searching?
self.search_target_timeout = rospy.get_param('~search_target_timeout', 10)
# How long we are willing to wait (in seconds) before re-centering the servos?
self.recenter_timeout = rospy.get_param('~recenter_timeout', 10)
# What are the names of the pan and tilt joints in the list of dynamixels?
self.head_pan_joint = rospy.get_param('~head_pan_joint', 'head_pan_joint')
self.head_tilt_joint = rospy.get_param('~head_tilt_joint', 'head_tilt_joint')
self.joints = [self.head_pan_joint, self.head_tilt_joint]
# What is the name of the camera link?
self.camera_link = rospy.get_param('~camera_link', 'camera_link')
# Joint speeds are given in radians per second
self.default_joint_speed = rospy.get_param('~default_joint_speed', 0.3)
self.max_joint_speed = rospy.get_param('~max_joint_speed', 0.5)
self.min_joint_speed = rospy.get_param('~min_joint_speed', 0.01)
# Only update the speed if it differs by this much (rad/s) from the last update
# If this is set too low (i.e. the updates are too fast), the servo can behave
# unpredictably.
self.speed_update_threshold = rospy.get_param('~speed_update_threshold', 0.1)
# How far ahead or behind the target (in radians) should we aim for?
self.lead_target_angle = rospy.get_param('~lead_target_angle', 0.5)
# How far ahead or behind the target (in radians) should we aim for?
self.max_lead_target_angle = rospy.get_param('~max_lead_target_angle', 0.5)
# The pan/tilt thresholds indicate how far (in radians) the target needs to be off-center
# before we make a movement.
self.pan_threshold = rospy.get_param('~pan_threshold', 0.02)
self.tilt_threshold = rospy.get_param('~tilt_threshold', 0.02)
# The gain_pan and gain_tilt parameter determine how responsive the servo movements are.
# If these are set too high, oscillation can result.
self.gain_pan = rospy.get_param('~gain_pan', 1.5)
self.gain_tilt = rospy.get_param('~gain_tilt', 1.5)
# For simulated position tracking, setting the gain too high causes oscillations
if FAKE and TRACKER_TYPE == 'position':
self.gain_pan = 1.5
self.gain_tilt = 1.5
# Set limits on how far we can pan or tilt
self.max_pan = rospy.get_param('~max_pan', radians(125))
self.min_pan = rospy.get_param('~min_pan', radians(-125))
self.max_tilt = rospy.get_param('~max_tilt', radians(90))
self.min_tilt = rospy.get_param('~min_tilt', radians(-90))
# Initialize the servo services and publishers
self.init_servos()
# Initialize tf listener
self.tf = tf.TransformListener()
# Allow tf to catch up
rospy.sleep(2)
# Set a flag to indicate when the target has been lost
self.target_visible = False
# Set a timer to determine how long a target is no longer visible
self.target_lost_time = 0.0
# A flag to indicate whether we're in wait mode
self.waiting = False
# Get a lock for updating the self.move_cmd values
self.lock = thread.allocate_lock()
# Subscribe the the 'joint_states' topic so we can know how the joints are positioned
rospy.loginfo("Subscribing to joint_states...")
self.joint_state = JointState()
rospy.Subscriber('joint_states', JointState, self.update_joint_state)
# Wait until we actually have joint state values
while self.joint_state == JointState():
rospy.sleep(1)
# Center the pan and tilt servos
self.center_head_servos()
# Initialize the pan and tilt speeds
if TRACKER_TYPE == 'position':
pan_speed = tilt_speed = self.max_joint_speed
self.set_servo_speed(self.head_pan_joint, pan_speed)
self.set_servo_speed(self.head_tilt_joint, tilt_speed)
else:
pan_speed = tilt_speed = 0.0
# Wait for the target topic to become alive
rospy.loginfo("Waiting for target topic...")
rospy.wait_for_message('target_topic', PoseStamped)
# Subscribe to the target_pose topic queue no more than 1 message
if CONTROLLER_TYPE == 'arbotix' and TRACKER_TYPE == 'position':
rospy.Subscriber('target_topic', PoseStamped, self.update_joint_positions, queue_size=1)
else:
rospy.Subscriber('target_topic', PoseStamped, self.update_joint_speeds, queue_size=1)
rospy.loginfo("Target messages detected. Starting tracker...")
while not rospy.is_shutdown():
# Acquire the lock
self.lock.acquire()
try:
# If we have lost the target, stop the servos incrementally for smoother tracking
if not self.target_visible:
if not self.waiting:
pan_speed /= 1.1
tilt_speed /= 1.1
# Keep track of how long the target is lost
self.target_lost_time += self.tick
else:
pan_speed = self.pan_speed
tilt_speed = self.tilt_speed
self.target_visible = False
self.waiting = False
self.target_lost_time = 0.0
# If the target is lost long enough, re-center the servos
if self.target_lost_time > self.recenter_timeout:
rospy.loginfo("Cannot find target.")
self.center_head_servos()
self.waiting = True
self.target_lost_time = 0.0
rospy.loginfo("Waiting for target to reappear...")
# If the target is lost for a bit, search for it
elif self.enable_target_search and self.target_lost_time > self.search_target_timeout:
rospy.loginfo("Searching for target...")
self.search_for_target()
self.target_lost_time += self.recenter_timeout
else:
# Only update the pan speed if it differs enough from the last value
if TRACKER_TYPE == 'speed' and abs(self.last_pan_speed - pan_speed) > self.speed_update_threshold:
self.set_servo_speed(self.head_pan_joint, pan_speed)
self.last_pan_speed = pan_speed
# Update the pan position
self.set_servo_position(self.head_pan_joint, self.pan_position)
# Only update the tilt speed if it differs enough from the last value
if TRACKER_TYPE == 'speed' and abs(self.last_tilt_speed - tilt_speed) > self.speed_update_threshold:
self.set_servo_speed(self.head_tilt_joint, tilt_speed)
self.last_tilt_speed = tilt_speed
# Update the tilt position
self.set_servo_position(self.head_tilt_joint, self.tilt_position)
finally:
# Release the lock
self.lock.release()
r.sleep()
def init_servos(self):
# Create dictionaries to hold the speed, position and torque controllers
self.servo_speed = dict()
self.servo_position = dict()
self.torque_enable = dict()
# Connect to the set_speed services and define a position publisher for each servo
rospy.loginfo("Waiting for joint controllers services...")
for joint in sorted(self.joints):
# The set_speed services
set_speed_service = '/' + joint + '/set_speed'
rospy.wait_for_service(set_speed_service)
self.servo_speed[joint] = rospy.ServiceProxy(set_speed_service, SetSpeed, persistent=True)
# Initialize the servo speed to the default_joint_speed
self.servo_speed[joint](self.default_joint_speed)
# The position controllers
self.servo_position[joint] = rospy.Publisher('/' + joint + '/command', Float64)
# A service to relax (disable torque) a servo
if CONTROLLER_TYPE == 'arbotix':
torque_service = '/' + joint + '/relax'
rospy.wait_for_service(torque_service)
self.torque_enable[joint] = rospy.ServiceProxy(torque_service, Relax)
# Start the servo in the relaxed state
self.torque_enable[joint]()
else:
torque_service = '/' + joint + '/torque_enable'
rospy.wait_for_service(torque_service)
self.torque_enable[joint] = rospy.ServiceProxy(torque_service, TorqueEnable)
self.torque_enable[joint](False)
self.pan_position = 0
self.tilt_position = 0
self.pan_speed = 0.0
self.tilt_speed = 0.0
self.last_tilt_speed = 0
self.last_pan_speed = 0
def set_servo_speed(self, servo, speed):
self.servo_speed[servo](speed)
def set_servo_position(self, servo, position):
self.servo_position[servo].publish(position)
def update_joint_speeds(self, msg):
# Acquire the lock
self.lock.acquire()
try:
# If message is empty, return immediately
if msg == PointStamped():
return
# If we get this far, the target is visible
self.target_visible = True
# Get position component of the message
target = PointStamped()
target.header.frame_id = msg.header.frame_id
target.point = msg.pose.position
# Project the target point onto the camera link
camera_target = self.tf.transformPoint(self.camera_link, target)
# The virtual camera image is in the y-z plane
pan = -camera_target.point.y
tilt = -camera_target.point.z
# Compute the distance to the target in the x direction
distance = float(abs(camera_target.point.x))
# Convert the pan and tilt values from meters to radians
try:
pan /= distance
tilt /= distance
except:
# Check for exceptions (NaNs) and use minumum range as fallback
pan /= 0.5
tilt /= 0.5
# Get the current pan and tilt position
try:
current_pan = self.joint_state.position[self.joint_state.name.index(self.head_pan_joint)]
current_tilt = self.joint_state.position[self.joint_state.name.index(self.head_tilt_joint)]
except:
return
# Pan the camera only if the displacement of the target point exceeds the threshold
if abs(pan) > self.pan_threshold:
# Set the pan speed proportion to the horizontal displacement of the target """
self.pan_speed = trunc(min(self.max_joint_speed, max(self.min_joint_speed, self.gain_pan * abs(pan))), 2)
if pan > 0:
self.pan_position = max(self.min_pan, current_pan - self.lead_target_angle)
else:
self.pan_position = min(self.max_pan, current_pan + self.lead_target_angle)
else:
self.pan_position = current_pan
self.pan_speed = self.min_joint_speed
# Tilt the camera only if the displacement of the target point exceeds the threshold
if abs(tilt) > self.tilt_threshold:
# Set the tilt speed proportion to the vertical displacement of the target
self.tilt_speed = trunc(min(self.max_joint_speed, max(self.min_joint_speed, self.gain_tilt * abs(tilt))), 2)
if tilt < 0:
self.tilt_position = max(self.min_tilt, current_tilt - self.lead_target_angle)
else:
self.tilt_position = min(self.max_tilt, current_tilt + self.lead_target_angle)
else:
self.tilt_position = current_tilt
self.tilt_speed = self.min_joint_speed
finally:
# Release the lock
self.lock.release()
def update_joint_positions(self, msg):
# Acquire the lock
self.lock.acquire()
try:
# Some publishers will continue to publish and empty message even when there is no
# point present. In this case we want to return without setting the target_visible flag.
if msg == PointStamped():
return
# If we get this far, the target is visible
self.target_visible = True
# We only need the position component of the target pose for tracking which can
# be stored as a PointStamped() message.
target = PointStamped()
target.header.frame_id = msg.header.frame_id
target.point = msg.pose.position
# Project the target point onto the camera link
camera_target = self.tf.transformPoint(self.camera_link, target)
# The virtual camera image is in the y-z plane
pan = -camera_target.point.y
tilt = -camera_target.point.z
# Compute the distance to the target in the x direction
distance = float(abs(camera_target.point.x))
# Convert the pan and tilt values from meters to radians by dividing by the distance to the target.
# Since the Kinect is or Xtion is blind to distance within 0.5 meters, check for an exception and
# use 0.5 meters as a fall back.
try:
pan /= distance
tilt /= distance
except:
pan /= 0.5
tilt /= 0.5
# Pan the camera only if the displacement of the target point exceeds the threshold.
if abs(pan) > self.pan_threshold:
current_pan = self.joint_state.position[self.joint_state.name.index(self.head_pan_joint)]
delta_pan = min(self.max_lead_target_angle, 0.25 * self.gain_pan * abs(pan))
if pan > 0:
self.pan_position = max(self.min_pan, current_pan - delta_pan)
else:
self.pan_position = min(self.max_pan, current_pan + delta_pan)
else:
self.pan_position = max(self.min_pan, min(self.max_pan, pan))
# Tilt the camera only if the displacement of the target point exceeds the threshold.
if abs(tilt) > self.tilt_threshold:
current_tilt = self.joint_state.position[self.joint_state.name.index(self.head_tilt_joint)]
delta_tilt = min(self.max_lead_target_angle, 0.25 * self.gain_tilt * abs(tilt))
if tilt < 0:
self.tilt_position = max(self.min_tilt, current_tilt - delta_tilt)
else:
self.tilt_position = min(self.max_tilt, current_tilt + delta_tilt)
else:
self.tilt_position = max(self.min_tilt, min(self.max_tilt, tilt))
finally:
self.lock.release()
def search_for_target(self):
# First pan one way with the head down a bit
self.servo_speed[self.head_pan_joint](self.default_joint_speed)
self.servo_speed[self.head_tilt_joint](self.default_joint_speed)
self.servo_position[self.head_pan_joint].publish(self.max_pan)
self.servo_position[self.head_tilt_joint].publish(0.1)
current_pan = self.joint_state.position[self.joint_state.name.index(self.head_pan_joint)]
while not self.target_visible and not rospy.is_shutdown() and current_pan < 0.9 * self.max_pan:
self.servo_position[self.head_pan_joint].publish(self.max_pan)
rospy.sleep(1)
current_pan = self.joint_state.position[self.joint_state.name.index(self.head_pan_joint)]
# Now pan the other way
self.servo_position[self.head_pan_joint].publish(self.min_pan)
self.servo_position[self.head_tilt_joint].publish(0.1)
current_pan = self.joint_state.position[self.joint_state.name.index(self.head_pan_joint)]
while not self.target_visible and not rospy.is_shutdown() and current_pan > 0.9 * self.min_pan:
self.servo_position[self.head_pan_joint].publish(self.min_pan)
rospy.sleep(1)
current_pan = self.joint_state.position[self.joint_state.name.index(self.head_pan_joint)]
def center_head_servos(self):
rospy.loginfo("Centering servos...")
self.servo_speed[self.head_pan_joint](self.default_joint_speed)
self.servo_speed[self.head_tilt_joint](self.default_joint_speed)
current_tilt = self.joint_state.position[self.joint_state.name.index(self.head_tilt_joint)]
current_pan = self.joint_state.position[self.joint_state.name.index(self.head_pan_joint)]
while abs(current_tilt) > 0.05 or abs(current_pan) > 0.05:
self.servo_position[self.head_pan_joint].publish(0)
self.servo_position[self.head_tilt_joint].publish(0)
rospy.sleep(0.5)
current_tilt = self.joint_state.position[self.joint_state.name.index(self.head_tilt_joint)]
current_pan = self.joint_state.position[self.joint_state.name.index(self.head_pan_joint)]
self.set_servo_speed(self.head_pan_joint, 0)
self.set_servo_speed(self.head_tilt_joint, 0)
def stop_servos(self):
rospy.loginfo("Stopping servos...")
current_tilt = self.joint_state.position[self.joint_state.name.index(self.head_tilt_joint)]
current_pan = self.joint_state.position[self.joint_state.name.index(self.head_pan_joint)]
self.servo_position[self.head_pan_joint].publish(current_tilt)
self.servo_position[self.head_tilt_joint].publish(current_pan)
def update_joint_state(self, msg):
try:
test = msg.name.index(self.head_pan_joint)
self.joint_state = msg
except:
pass
def shutdown(self):
rospy.loginfo("Shutting down head tracking node...")
self.center_head_servos()
# Relax all servos to give them a rest.
rospy.loginfo("Relaxing pan and tilt servos.")
for servo in self.joints:
if CONTROLLER_TYPE == 'arbotix':
self.torque_enable[servo]()
else:
self.torque_enable[servo](False)
def trunc(f, n):
'''Truncates/pads a float f to n decimal places without rounding'''
slen = len('%.*f' % (n, f))
return float(str(f)[:slen])
if __name__ == '__main__':
try:
HeadTracker()
except rospy.ROSInterruptException:
rospy.loginfo("Head tracking node terminated.")
|
{
"content_hash": "edbe95e55ee9730698485b1e8cf84ecd",
"timestamp": "",
"source": "github",
"line_count": 522,
"max_line_length": 124,
"avg_line_length": 43.35057471264368,
"alnum_prop": 0.5660435724070882,
"repo_name": "fujy/ROS-Project",
"id": "cb05e1aba30b28cab6f9b5c7326ede909dfeefa9",
"size": "22652",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/rbx2/rbx2_dynamixels/nodes/head_tracker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "14439"
},
{
"name": "C++",
"bytes": "1896853"
},
{
"name": "CMake",
"bytes": "12338"
},
{
"name": "CSS",
"bytes": "322251"
},
{
"name": "HTML",
"bytes": "49036"
},
{
"name": "JavaScript",
"bytes": "157070"
},
{
"name": "Makefile",
"bytes": "41"
},
{
"name": "Python",
"bytes": "841517"
},
{
"name": "Shell",
"bytes": "2523"
}
],
"symlink_target": ""
}
|
from os.path import splitext
import numpy as np
import matplotlib.pylab as plt
import seaborn as sbn
# enumerate results files
result_files = np.loadtxt('result_filenames.np', dtype=str)
model_tech_time = {}
# plot and save results to png
for result_filename in result_files:
# save ave k-fold cpu time to dictionary
model_tech_time[splitext(result_filename)] = np.load(result_filename)[-1]
plt.bar(range(len(model_tech_time)),
model_tech_time.values(),
align='center')
plt.xticks(range(len(model_tech_time)),
model_tech_time.keys())
plt.savefig('../images/model_tech_time.png')
|
{
"content_hash": "21ea0dbb47134fe90487b26603c33409",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 77,
"avg_line_length": 26.91304347826087,
"alnum_prop": 0.7075928917609047,
"repo_name": "jvpoulos/cs289-project",
"id": "79c1123cbdce8a23d1a36be7a18e14ecc415636b",
"size": "619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/plot_cpu_time.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "115653"
},
{
"name": "R",
"bytes": "11839"
},
{
"name": "TeX",
"bytes": "98759"
}
],
"symlink_target": ""
}
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['None'] , ['Lag1Trend'] , ['Seasonal_DayOfWeek'] , ['NoAR'] );
|
{
"content_hash": "c15f84f7af843f2976daf8966b9ec1a7",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 84,
"avg_line_length": 39.25,
"alnum_prop": 0.7070063694267515,
"repo_name": "antoinecarme/pyaf",
"id": "f55d9f4a3089fe7c3b040c86d477de961d8a542e",
"size": "157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_None/model_control_one_enabled_None_Lag1Trend_Seasonal_DayOfWeek_NoAR.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
"""
Unified Volume driver for IBM XIV and DS8K Storage Systems.
"""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from cinder import exception
from cinder.volume import driver
from cinder.volume.drivers.san import san
xiv_ds8k_opts = [
cfg.StrOpt(
'xiv_ds8k_proxy',
default='xiv_ds8k_openstack.nova_proxy.XIVDS8KNovaProxy',
help='Proxy driver that connects to the IBM Storage Array'),
cfg.StrOpt(
'xiv_ds8k_connection_type',
default='iscsi',
choices=['fibre_channel', 'iscsi'],
help='Connection type to the IBM Storage Array'),
cfg.StrOpt(
'xiv_chap',
default='disabled',
choices=['disabled', 'enabled'],
help='CHAP authentication mode, effective only for iscsi'
' (disabled|enabled)'),
cfg.StrOpt(
'management_ips',
default='',
help='List of Management IP addresses (separated by commas)'),
]
CONF = cfg.CONF
CONF.register_opts(xiv_ds8k_opts)
LOG = logging.getLogger(__name__)
class XIVDS8KDriver(san.SanDriver,
driver.ManageableVD,
driver.ExtendVD,
driver.SnapshotVD,
driver.MigrateVD,
driver.ReplicaVD,
driver.ConsistencyGroupVD,
driver.CloneableVD,
driver.CloneableImageVD,
driver.RetypeVD,
driver.TransferVD):
"""Unified IBM XIV and DS8K volume driver."""
def __init__(self, *args, **kwargs):
"""Initialize the driver."""
super(XIVDS8KDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(xiv_ds8k_opts)
proxy = importutils.import_class(self.configuration.xiv_ds8k_proxy)
# NOTE: All Array specific configurations are prefixed with:
# "xiv_ds8k_array_"
# These additional flags should be specified in the cinder.conf
# preferably in each backend configuration.
self.xiv_ds8k_proxy = proxy(
{
"xiv_ds8k_user": self.configuration.san_login,
"xiv_ds8k_pass": self.configuration.san_password,
"xiv_ds8k_address": self.configuration.san_ip,
"xiv_ds8k_vol_pool": self.configuration.san_clustername,
"xiv_ds8k_connection_type":
self.configuration.xiv_ds8k_connection_type,
"xiv_chap": self.configuration.xiv_chap,
"management_ips": self.configuration.management_ips
},
LOG,
exception,
driver=self)
def do_setup(self, context):
"""Setup and verify IBM XIV and DS8K Storage connection."""
self.xiv_ds8k_proxy.setup(context)
def ensure_export(self, context, volume):
"""Ensure an export."""
return self.xiv_ds8k_proxy.ensure_export(context, volume)
def create_export(self, context, volume, connector):
"""Create an export."""
return self.xiv_ds8k_proxy.create_export(context, volume)
def create_volume(self, volume):
"""Create a volume on the IBM XIV and DS8K Storage system."""
return self.xiv_ds8k_proxy.create_volume(volume)
def delete_volume(self, volume):
"""Delete a volume on the IBM XIV and DS8K Storage system."""
self.xiv_ds8k_proxy.delete_volume(volume)
def remove_export(self, context, volume):
"""Disconnect a volume from an attached instance."""
return self.xiv_ds8k_proxy.remove_export(context, volume)
def initialize_connection(self, volume, connector):
"""Map the created volume."""
return self.xiv_ds8k_proxy.initialize_connection(volume, connector)
def terminate_connection(self, volume, connector, **kwargs):
"""Terminate a connection to a volume."""
return self.xiv_ds8k_proxy.terminate_connection(volume, connector)
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from a snapshot."""
return self.xiv_ds8k_proxy.create_volume_from_snapshot(
volume,
snapshot)
def create_snapshot(self, snapshot):
"""Create a snapshot."""
return self.xiv_ds8k_proxy.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
"""Delete a snapshot."""
return self.xiv_ds8k_proxy.delete_snapshot(snapshot)
def get_volume_stats(self, refresh=False):
"""Get volume stats."""
return self.xiv_ds8k_proxy.get_volume_stats(refresh)
def create_cloned_volume(self, tgt_volume, src_volume):
"""Create Cloned Volume."""
return self.xiv_ds8k_proxy.create_cloned_volume(tgt_volume, src_volume)
def extend_volume(self, volume, new_size):
"""Extend Created Volume."""
self.xiv_ds8k_proxy.extend_volume(volume, new_size)
def migrate_volume(self, context, volume, host):
"""Migrate the volume to the specified host."""
return self.xiv_ds8k_proxy.migrate_volume(context, volume, host)
def manage_existing(self, volume, existing_ref):
"""Brings an existing backend storage object under Cinder management.
existing_ref is passed straight through from the API request's
manage_existing_ref value, and it is up to the driver how this should
be interpreted. It should be sufficient to identify a storage object
that the driver should somehow associate with the newly-created cinder
volume structure.
In the case of XIV, the existing_ref consists of a single field named
'existing_ref' representing the name of the volume on the storage.
There are two ways to do this:
1. Rename the backend storage object so that it matches the,
volume['name'] which is how drivers traditionally map between a
cinder volume and the associated backend storage object.
2. Place some metadata on the volume, or somewhere in the backend, that
allows other driver requests (e.g. delete, clone, attach, detach...)
to locate the backend storage object when required.
If the existing_ref doesn't make sense, or doesn't refer to an existing
backend storage object, raise a ManageExistingInvalidReference
exception.
The volume may have a volume_type, and the driver can inspect that and
compare against the properties of the referenced backend storage
object. If they are incompatible, raise a
ManageExistingVolumeTypeMismatch, specifying a reason for the failure.
"""
return self.xiv_ds8k_proxy.manage_volume(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing."""
return self.xiv_ds8k_proxy.manage_volume_get_size(volume, existing_ref)
def unmanage(self, volume):
"""Removes the specified volume from Cinder management."""
return self.xiv_ds8k_proxy.unmanage_volume(volume)
def reenable_replication(self, context, volume):
"""Re-enable volume replication. """
return self.xiv_ds8k_proxy.reenable_replication(context, volume)
def get_replication_status(self, context, volume):
"""Return replication status."""
return self.xiv_ds8k_proxy.get_replication_status(context, volume)
def promote_replica(self, context, volume):
"""Promote the replica to be the primary volume."""
return self.xiv_ds8k_proxy.promote_replica(context, volume)
def create_replica_test_volume(self, volume, src_vref):
"""Creates a test replica clone of the specified replicated volume."""
return self.xiv_ds8k_proxy.create_replica_test_volume(volume, src_vref)
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
return self.xiv_ds8k_proxy.retype(ctxt, volume, new_type, diff, host)
def create_consistencygroup(self, context, group):
"""Creates a consistency group."""
return self.xiv_ds8k_proxy.create_consistencygroup(context, group)
def delete_consistencygroup(self, context, group):
"""Deletes a consistency group."""
return self.xiv_ds8k_proxy.delete_consistencygroup(context, group)
def create_cgsnapshot(self, context, cgsnapshot):
"""Creates a consistency group snapshot."""
return self.xiv_ds8k_proxy.create_cgsnapshot(context, cgsnapshot)
def delete_cgsnapshot(self, context, cgsnapshot):
"""Deletes a consistency group snapshot."""
return self.xiv_ds8k_proxy.delete_cgsnapshot(context, cgsnapshot)
|
{
"content_hash": "82152f002a59747c69a79d44a865aa75",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 79,
"avg_line_length": 35.93469387755102,
"alnum_prop": 0.6497046796910495,
"repo_name": "CloudServer/cinder",
"id": "93a24e206e46aa77cce034a121174092d233ceb8",
"size": "9559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/ibm/xiv_ds8k.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12078537"
},
{
"name": "Shell",
"bytes": "8111"
}
],
"symlink_target": ""
}
|
"""
Contains helper classes for conversion between different NF-FG representations.
"""
import json
import logging
import re
import sys
from escape.util.virtualizer_helper import _res_parser
try:
# Import for ESCAPEv2
from escape.nffg_lib.nffg import AbstractNFFG, NFFG, NodeSAP, NFFGToolBox, \
VERSION as N_VERSION
from escape.nffg_lib.nffg_elements import Constraints
from escape.util.misc import VERBOSE, unicode_to_str, remove_units
except (ImportError, AttributeError):
import os
for p in ("../nffg_lib/",
"../util/"):
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), p)))
# Import for standalone running
from nffg import AbstractNFFG, NFFG, NFFGToolBox, VERSION as N_VERSION
from nffg_elements import Constraints
from misc import VERBOSE, unicode_to_str, remove_units
try:
# Import for ESCAPEv2
import virtualizer as virt_lib
from virtualizer import __version__ as V_VERSION, Virtualizer
except (ImportError, AttributeError):
import os
sys.path.append(os.path.abspath(
os.path.join(os.path.dirname(__file__), "../../../unify_virtualizer/")))
# Import for standalone running
import virtualizer as virt_lib
from virtualizer import __version__ as V_VERSION, Virtualizer
# noinspection PyShadowingNames
class NFFGConverter(object):
"""
Convert different representation of NFFG in both ways.
"""
# port types in Virtualizer
TYPE_VIRTUALIZER_PORT_ABSTRACT = "port-abstract"
TYPE_VIRTUALIZER_PORT_SAP = "port-sap"
# General option names in mapped NFFG assembled by the Mapping algorithm
OP_TAG = 'TAG'
OP_UNTAG = 'UNTAG'
OP_INPORT = 'in_port'
OP_OUTPUT = 'output'
OP_FLOWCLASS = "flowclass"
GENERAL_OPERATIONS = (OP_INPORT, OP_OUTPUT, OP_TAG, OP_UNTAG, OP_FLOWCLASS)
# Specific tags
TAG_SG_HOP = "sg_hop"
# SAP id storing prefix
SAP_NAME_PREFIX = 'SAP'
# Operation formats in Virtualizer
MATCH_TAG = r"dl_tag"
ACTION_PUSH_TAG = r"push_tag"
ACTION_POP_TAG = r"pop_tag"
# Operand delimiters
LABEL_DELIMITER = '|'
OP_DELIMITER = ';'
KV_DELIMITER = '='
# Other delimiters
UNIQUE_ID_DELIMITER = '@'
# Field types
TYPE_MATCH = "MATCH"
TYPE_ACTION = "ACTION"
# Hard-coded constants
REQUIREMENT_PREFIX = "REQ"
def __init__ (self, domain=None, logger=None, unique_bb_id=False,
unique_nf_id=False):
"""
Init.
:param domain: domain name
:type domain: str
:param logger: optional logger
:type logger: str or :any:`logging.Logger`
:param unique_bb_id: generate unique id for nodes
:type unique_bb_id: bool
:return: None
"""
# Save domain name for define domain attribute in infras
self.domain = domain
# If clarify_id is True, add domain name as a prefix to the node ids
self.__unique_bb_id = unique_bb_id
self.__unique_nf_id = unique_nf_id
self.log = logger if logger is not None else logging.getLogger(__name__)
self.log.debug('Created NFFGConverter with domain name: %s' % self.domain)
def disable_unique_bb_id (self):
self.log.debug("Disable unique BiSBiS id recreation!")
self.__unique_bb_id = False
@classmethod
def field_splitter (cls, type, field):
"""
Split the match/action field into a dict-based format for flowrule creation.
:param type: the name of the field ('MATCH' or 'ACTION')
:type type: str
:param field: field data
:type field: str
:return: splitted data structure
:rtype: dict
"""
ret = {}
parts = field.split(cls.OP_DELIMITER)
if len(parts) < 1:
raise RuntimeError(
"Wrong format: %s! Separator (%s) not found!" % (
field, cls.OP_DELIMITER))
for part in parts:
kv = part.split(cls.KV_DELIMITER, 1)
if len(kv) != 2:
if kv[0] == cls.OP_UNTAG and type.upper() == cls.TYPE_ACTION:
ret['vlan_pop'] = True
continue
else:
raise RuntimeError("Not a key-value pair: %s" % part)
if kv[0] == cls.OP_INPORT:
try:
ret['in_port'] = int(kv[1])
except ValueError:
# self.log.warning(
# "in_port is not a valid port number: %s! Skip "
# "converting..." % kv[1])
ret['in_port'] = kv[1]
elif kv[0] == cls.OP_TAG:
if type.upper() == cls.TYPE_MATCH:
ret['vlan_id'] = kv[1].split(cls.LABEL_DELIMITER)[-1]
elif type.upper() == cls.TYPE_ACTION:
ret['vlan_push'] = kv[1].split(cls.LABEL_DELIMITER)[-1]
else:
raise RuntimeError('Not supported field type: %s!' % type)
elif kv[0] == cls.OP_OUTPUT:
ret['out'] = kv[1]
elif kv[0] == cls.OP_FLOWCLASS and type.upper() == cls.TYPE_MATCH:
ret['flowclass'] = kv[1]
else:
raise RuntimeError("Unrecognizable key: %s" % kv[0])
return ret
def _gen_unique_bb_id (self, v_node):
"""
Generate a unique identifier based on original ID, delimiter and marker.
:param v_node: virtualizer node object
:return: unique ID
:rtype: str
"""
if self.__unique_bb_id and self.domain:
return "%s%s%s" % (v_node.id.get_value(),
self.UNIQUE_ID_DELIMITER,
self.domain)
else:
return v_node.id.get_as_text()
def _gen_unique_nf_id (self, v_vnf, bb_id=None):
if self.__unique_nf_id:
if bb_id is None:
bb_id = self._gen_unique_bb_id(v_node=v_vnf.get_parent().get_parent())
return "%s%s%s" % (v_vnf.id.get_value(),
self.UNIQUE_ID_DELIMITER,
bb_id)
else:
return v_vnf.id.get_as_text()
def recreate_bb_id (self, id):
"""
Recreate original ID based by removing trailing unique marker.
:param id: unique id
:type id: str
:return: original ID
:rtype: str
"""
if self.__unique_bb_id:
return str(id).rsplit(self.UNIQUE_ID_DELIMITER, 1)[0]
else:
return str(id)
def recreate_nf_id (self, id):
"""
Recreate original ID based by removing trailing unique marker.
:param id: unique id
:type id: str
:return: original ID
:rtype: str
"""
if self.__unique_nf_id:
return str(id).split(self.UNIQUE_ID_DELIMITER, 1)[0]
else:
return str(id)
def _convert_flowrule_match (self, match):
"""
Convert Flowrule match field from NFFG format to a unified format used by
the Virtualizer.
Based on Open vSwitch syntax:
http://openvswitch.org/support/dist-docs/ovs-ofctl.8.txt
:param match: flowrule match field
:type match: str
:return: converted data
:rtype: str
"""
# E.g.: "match": "in_port=1;TAG=SAP1|comp|1" -->
# E.g.: "match": "in_port=SAP2|fwd|1;TAG=SAP1|comp|1" -->
# <match>(in_port=1)dl_tag=1</match>
ret = []
match_part = match.split(';')
if len(match_part) < 2:
if not match_part[0].startswith("in_port"):
self.log.warning("Invalid match field: %s" % match)
return
for kv in match_part:
op = kv.split('=', 1)
if op[0] not in self.GENERAL_OPERATIONS:
self.log.warning("Unsupported match operand: %s" % op[0])
continue
if op[0] == self.OP_TAG:
vlan_tag = op[1].split('|')[-1]
try:
vlan_tag = int(vlan_tag)
ret.append("%s=%s" % (self.MATCH_TAG, format(vlan_tag, '#06x')))
except ValueError:
# self.log.warning("Wrong VLAN format: %s!" % op[1])
ret.append("%s=%s" % (self.MATCH_TAG, vlan_tag))
# elif op[0] == self.OP_SGHOP:
# ret.append(kv)
elif op[0] == self.OP_FLOWCLASS:
ret.append(op[1])
return self.OP_DELIMITER.join(ret)
def _convert_flowrule_action (self, action):
"""
Convert Flowrule action field from NFFG format to a unified format used by
the Virtualizer.
Based on Open vSwitch syntax:
http://openvswitch.org/support/dist-docs/ovs-ofctl.8.txt
:param action: flowrule action field
:type action: str
:return: converted data
:rtype: str
"""
# E.g.: "action": "output=2;UNTAG"
ret = []
action_part = action.split(';')
if len(action_part) < 2:
if not action_part[0].startswith("output"):
self.log.warning("Invalid action field: %s" % action)
return
for kv in action_part:
op = kv.split('=')
if op[0] not in self.GENERAL_OPERATIONS:
# self.log.warning("Unsupported action operand: %s" % op[0])
# return
self.log.debug("Explicit action operand detected: %s" % op[0])
ret.append(kv)
continue
if op[0] == self.OP_TAG:
# E.g.: <action>push_tag:0x0037</action>
vlan = op[1].split('|')[-1]
try:
vlan = int(vlan)
ret.append("%s:%s" % (self.ACTION_PUSH_TAG, format(vlan, '#06x')))
except ValueError:
# self.log.warning(
# "Wrong VLAN format: %s! Using raw flowrule id: %s" % (op[1], vlan))
ret.append("%s:%s" % (self.ACTION_PUSH_TAG, vlan))
elif op[0] == self.OP_UNTAG:
# E.g.: <action>strip_vlan</action>
ret.append(self.ACTION_POP_TAG)
return self.OP_DELIMITER.join(ret)
def _parse_virtualizer_node_ports (self, nffg, infra, vnode):
"""
Parse ports from a Virtualizer node into an :any:`NodeInfra` node.
:param nffg: Container NFFG
:type nffg: :class:`NFFG`
:param infra: infrastructure node
:type infra: :any:`NodeInfra`
:param vnode: Virtualizer node
:type vnode: Infra_node
:return: None
"""
# Add ports to Infra Node
for vport in vnode.ports:
# If it is a port connected to a SAP: <port-type>port-sap</port-type>
if vport.port_type.get_value() == self.TYPE_VIRTUALIZER_PORT_SAP:
# If inter-domain SAP -> id = <sap> tag: <sap>SAP14</sap>
if vport.sap.is_initialized() and vport.sap.get_value():
# Use unique SAP tag as the id of the SAP
sap_id = vport.sap.get_value() # Optional port.sap
self.log.debug("Detected SAP id from sap field: %s" % sap_id)
# Regular SAP
elif vport.id.get_as_text().startswith(self.SAP_NAME_PREFIX):
# port.id is mandatory now
# Use port name as the SAP.id if it is set else generate one
# SAP.id <--> virtualizer.node.port.id
sap_id = vport.id.get_value()
self.log.debug("Detected SAP id as id field: %s" % sap_id)
# SAP.id <--> virtualizer.node.port.name
elif vport.name.is_initialized() and \
vport.name.get_as_text().upper().startswith(
self.SAP_NAME_PREFIX + ":"):
sap_id = vport.name.get_as_text()[len(self.SAP_NAME_PREFIX + ":"):]
self.log.debug("Detected SAP id from name field: %s" % sap_id)
elif vport.name.is_initialized() and \
vport.name.get_as_text().upper().startswith(self.SAP_NAME_PREFIX):
sap_id = vport.name.get_as_text()
self.log.debug("Detected SAP id as name field: %s" % sap_id)
else:
# Backup SAP id generation
# sap_id = "SAP%s" % len([s for s in nffg.saps])
sap_id = "%s" % vport.id.get_value()
self.log.debug(
"No explicit SAP id was detected! Generated: %s" % sap_id)
# Add port names
if vport.name.is_initialized():
sap_prefix = "%s:" % self.SAP_NAME_PREFIX
if vport.name.get_as_text().startswith(sap_prefix):
sap_name = vport.name.get_as_text()[len(sap_prefix):]
else:
sap_name = vport.name.get_as_text()
else:
sap_name = sap_id
# Create SAP and Add port to SAP
sap = nffg.add_sap(id=sap_id, name=sap_name)
self.log.debug("Created SAP node: %s" % sap)
try:
# Use port id of the Infra node as the SAP port id
# because sap port info was lost during NFFG->Virtualizer conversion
sap_port_id = int(vport.id.get_value()) # Mandatory port.id
except ValueError:
sap_port_id = vport.id.get_value()
sap_port = sap.add_port(id=sap_port_id)
self.log.debug("Added SAP port: %s" % sap_port)
# Add port properties as metadata to SAP port
if vport.sap.is_initialized():
# Add sap value to properties to be backward compatible for adaptation
# layer
sap_port.add_property("type", "inter-domain")
sap_port.add_property("sap", vport.sap.get_value())
sap_port.sap = str(vport.sap.get_value())
# Create and add the port of the opposite Infra node
try:
infra_port_id = int(vport.id.get_value())
except ValueError:
infra_port_id = vport.id.get_value()
# Add port to Infra
infra_port = infra.add_port(id=infra_port_id)
self.log.debug("Added infra port: %s" % infra_port)
if vport.sap.is_initialized():
# For internal use and backward compatibility
infra_port.add_property("sap", vport.sap.get_value())
infra_port.sap = vport.sap.get_value()
# Add port names
if vport.name.is_initialized():
sap_port.name = infra_port.name = vport.name.get_as_text()
# Fill SAP-specific data
# Add infra port capabilities
if vport.capability.is_initialized():
sap_port.capability = infra_port.capability = \
vport.capability.get_value()
self.log.debug("Added capability: %s" % sap_port.capability)
if vport.sap_data.is_initialized():
if vport.sap_data.technology.is_initialized():
sap_port.technology = infra_port.technology = \
vport.sap_data.technology.get_value()
self.log.debug("Added technology: %s" % sap_port.technology)
if vport.sap_data.role.is_initialized():
sap_port.role = infra_port.role = vport.sap_data.role.get_value()
self.log.debug("Added role: %s" % sap_port.role)
if vport.sap_data.resources.is_initialized():
if vport.sap_data.resources.delay.is_initialized():
try:
sap_port.delay = infra_port.delay = float(
vport.sap_data.resources.delay.get_value())
except ValueError:
sap_port.delay = infra_port.delay = \
vport.sap_data.resources.delay.get_value()
self.log.debug("Added delay: %s" % sap_port.delay)
if vport.sap_data.resources.bandwidth.is_initialized():
try:
sap_port.bandwidth = infra_port.bandwidth = float(
vport.sap_data.resources.bandwidth.get_value())
except ValueError:
sap_port.bandwidth = infra_port.bandwidth = \
vport.sap_data.resources.bandwidth.get_value()
self.log.debug("Added bandwidth: %s" % sap_port.bandwidth)
if vport.sap_data.resources.cost.is_initialized():
try:
sap_port.cost = infra_port.cost = float(
vport.sap_data.resources.cost.get_value())
except ValueError:
sap_port.cost = infra_port.cost = \
vport.sap_data.resources.cost.get_value()
self.log.debug("Added cost: %s" % sap_port.cost)
if vport.sap_data.resources.qos.is_initialized():
try:
sap_port.qos = infra_port.qos = \
vport.sap_data.resources.qos.get_value()
except ValueError:
sap_port.qos = infra_port.qos = \
vport.sap_data.resources.qos.get_value()
self.log.debug("Added qos: %s" % sap_port.qos)
if vport.control.is_initialized():
sap_port.controller = infra_port.controller = \
vport.control.controller.get_value()
self.log.debug("Added controller: %s" % sap_port.controller)
sap_port.orchestrator = infra_port.orchestrator = \
vport.control.orchestrator.get_value()
self.log.debug("Added orchestrator: %s" % sap_port.orchestrator)
if vport.addresses.is_initialized():
self.log.debug("Translate addresses...")
sap_port.l2 = infra_port.l2 = vport.addresses.l2.get_value()
sap_port.l4 = infra_port.l4 = vport.addresses.l4.get_value()
for l3 in vport.addresses.l3.itervalues():
sap_port.l3.add_l3address(id=l3.id.get_value(),
name=l3.name.get_value(),
configure=l3.configure.get_value(),
client=l3.client.get_value(),
requested=l3.requested.get_value(),
provided=l3.provided.get_value())
infra_port.l3.add_l3address(id=l3.id.get_value(),
name=l3.name.get_value(),
configure=l3.configure.get_value(),
client=l3.client.get_value(),
requested=l3.requested.get_value(),
provided=l3.provided.get_value())
# Add metadata from infra port metadata to sap metadata
for key in vport.metadata: # Optional - port.metadata
sap_port.add_metadata(name=key,
value=vport.metadata[key].value.get_value())
infra_port.add_metadata(name=key,
value=vport.metadata[key].value.get_value())
self.log.debug("Added port for SAP -> %s" % infra_port)
# Add connection between infra - SAP
# SAP-Infra is static link --> create link for both direction
l1, l2 = nffg.add_undirected_link(
p1p2id="%s-%s-link" % (sap_id, infra.id),
p2p1id="%s-%s-link-back" % (sap_id, infra.id),
port1=sap_port,
port2=infra_port,
delay=sap_port.delay,
bandwidth=sap_port.bandwidth,
cost=sap_port.cost, qos=sap_port.qos)
# Handle operation tag
if vport.get_operation() is not None:
self.log.debug("Found operation tag: %s for port: %s" % (
vport.get_operation(), vport.id.get_value()))
sap.operation = vport.get_operation()
sap_port.operation = vport.get_operation()
# l1.operation = vport.get_operation()
# l2.operation = vport.get_operation()
self.log.debug("Added SAP-Infra connection: %s" % l1)
self.log.debug("Added Infra-SAP connection: %s" % l2)
# If it is not SAP port and probably connected to another infra
elif vport.port_type.get_value() == self.TYPE_VIRTUALIZER_PORT_ABSTRACT:
# Add Infra port
try:
infra_port_id = int(vport.id.get_value())
except ValueError:
infra_port_id = vport.id.get_value()
# Add port properties as property to Infra port
infra_port = infra.add_port(id=infra_port_id)
if vport.sap.is_initialized():
self.log.debug("Detected port as merged inter-domain port")
infra_port.add_property("type", "inter-domain")
self.log.debug("Added infra port: %s" % infra_port)
self.__copy_vport_attrs(port=infra_port, vport=vport)
self.log.debug("Added static %s" % infra_port)
else:
self.log.warning("Port type is not defined for port: %s " % vport)
def _parse_virtualizer_node_nfs (self, nffg, infra, vnode):
"""
Parse VNFs from a Virtualizer nodes into :any:`NodeNF` list.
:param nffg: Container NFFG
:type nffg: :class:`NFFG`
:param infra: infrastructure node
:type infra: :any:`NodeInfra`
:param vnode: Virtualizer node
:type vnode: Infra_node
:return: None
"""
# Create NF instances
for v_vnf in vnode.NF_instances:
# Get NF params
nf_id = self._gen_unique_nf_id(v_vnf=v_vnf, bb_id=infra.id) # Mandatory
nf_name = v_vnf.name.get_value() # Optional - nf.name, default = None
nf_ftype = v_vnf.type.get_value() # Optional - nf.type, default = None
# No deployment_type in Virtualizer try to get if from metadata
if 'deployment_type' in v_vnf.metadata.keys():
nf_dep_type = v_vnf.metadata['deployment_type'].value.get_value()
else:
nf_dep_type = None
# Add NF resources, remove optional units
if v_vnf.resources.is_initialized():
if v_vnf.resources.cpu.is_initialized():
nf_cpu = v_vnf.resources.cpu.get_as_text().split(' ')[0]
else:
nf_cpu = None
if v_vnf.resources.mem.is_initialized():
nf_mem = v_vnf.resources.mem.get_as_text().split(' ')[0]
else:
nf_mem = None
if v_vnf.resources.storage.is_initialized():
nf_storage = v_vnf.resources.storage.get_as_text().split(' ')[0]
else:
nf_storage = None
if v_vnf.resources.cost.is_initialized():
nf_cost = v_vnf.resources.cost.get_as_text().split(' ')[0]
else:
nf_cost = None
try:
nf_cpu = _res_parser(nf_cpu) if nf_cpu is not None else None
except ValueError as e:
self.log.warning("Resource cpu value is not valid number: %s" % e)
try:
nf_mem = _res_parser(nf_mem) if nf_mem is not None else None
except ValueError as e:
self.log.warning("Resource mem value is not valid number: %s" % e)
try:
nf_storage = _res_parser(
nf_storage) if nf_storage is not None else None
except ValueError as e:
self.log.warning(
"Resource storage value is not valid number: %s" % e)
try:
nf_cost = _res_parser(nf_cost) if nf_cost is not None else None
except ValueError as e:
self.log.warning(
"Resource cost value is not valid number: %s" % e)
else:
nf_cpu = nf_mem = nf_storage = nf_cost = None
# Get remained NF resources from metadata
if 'delay' in v_vnf.metadata.keys():
nf_delay = v_vnf.metadata['delay'].value.get_value()
else:
nf_delay = None
if 'bandwidth' in v_vnf.metadata.keys():
nf_bandwidth = v_vnf.metadata['bandwidth'].value.get_value()
else:
nf_bandwidth = None
# Create NodeNF
nf = nffg.add_nf(id=nf_id, name=nf_name, func_type=nf_ftype,
dep_type=nf_dep_type, cpu=nf_cpu, mem=nf_mem,
storage=nf_storage, delay=nf_delay, cost=nf_cost,
bandwidth=nf_bandwidth)
if v_vnf.status.is_initialized():
nf.status = v_vnf.status.get_value()
self.log.debug("Created NF: %s" % nf)
self.log.debug("Parse NF constraints...")
if v_vnf.constraints.is_initialized():
# Add affinity list
if v_vnf.constraints.affinity.is_initialized():
for aff in v_vnf.constraints.affinity.values():
try:
aff_id = self._gen_unique_nf_id(v_vnf=aff.object.get_target())
aff = nf.constraints.add_affinity(id=aff.id.get_value(),
value=aff_id)
self.log.debug("Add affinity: %s to %s" % (aff, nf.id))
except StandardError as e:
self.log.exception(
"Skip affinity conversion due to error: %s" % e)
# Add antiaffinity list
if v_vnf.constraints.antiaffinity.is_initialized():
for naff in v_vnf.constraints.antiaffinity.values():
try:
naff_id = self._gen_unique_nf_id(v_vnf=naff.object.get_target(),
bb_id=infra.id)
naff = nf.constraints.add_antiaffinity(id=naff.id.get_value(),
value=naff_id)
self.log.debug("Add antiaffinity: %s to %s" % (naff, nf.id))
except StandardError as e:
self.log.exception(
"Skip anti-affinity conversion due to error: %s" % e)
# Add variables dict
if v_vnf.constraints.variable.is_initialized():
for var in v_vnf.constraints.variable.values():
try:
var_id = self._gen_unique_nf_id(v_vnf=var.object.get_target(),
bb_id=infra.id)
var = nf.constraints.add_variable(key=var.id.get_value(),
id=var_id)
self.log.debug("Add variable: %s to %s" % (var, nf.id))
except StandardError as e:
self.log.exception(
"Skip variable conversion due to error: %s" % e)
# Add constraint list
if v_vnf.constraints.constraint.is_initialized():
for constraint in v_vnf.constraints.constraint.values():
try:
formula = nf.constraints.add_constraint(
id=constraint.id.get_value(),
formula=constraint.formula.get_value())
self.log.debug("Add constraint: %s to %s" % (formula, nf.id))
except StandardError as e:
self.log.exception(
"Skip constraint conversion due to error: %s" % e)
if v_vnf.constraints.restorability.is_initialized():
nf.constraints.restorability = \
v_vnf.constraints.restorability.get_as_text()
self.log.debug("Add restorability: %s to %s"
% (nf.constraints.restorability, nf.id))
# Add NF metadata
for key in v_vnf.metadata:
if key not in ('delay', 'bandwidth'):
nf.add_metadata(name=key,
value=v_vnf.metadata[key].value.get_value())
# Handle operation tag
if v_vnf.get_operation() is not None:
self.log.debug("Found operation tag: %s for NF: %s" % (
v_vnf.get_operation(), v_vnf.id.get_value()))
nf.operation = v_vnf.get_operation()
# Create NF ports
for vport in v_vnf.ports:
# Add VNF port
try:
nf_port_id = int(vport.id.get_value())
except ValueError:
nf_port_id = vport.id.get_value()
# Create and Add port
nf_port = nf.add_port(id=nf_port_id)
# Fill SAP-specific data
# Add port properties
if vport.name.is_initialized():
nf_port.name = vport.name.get_value()
# Store specific SAP port in NFs transparently
if vport.port_type.is_initialized():
if vport.sap.is_initialized():
nf_port.sap = vport.sap.get_value()
else:
self.log.warning("Port type is missing from node: %s" % vport.id)
# Add infra port capabilities
if vport.capability.is_initialized():
nf_port.capability = vport.capability.get_value()
if vport.sap_data.is_initialized():
if vport.sap_data.technology.is_initialized():
nf_port.technology = vport.sap_data.technology.get_value()
if vport.sap_data.role.is_initialized():
nf_port.role = vport.sap_data.role.get_value()
if vport.sap_data.resources.is_initialized():
if vport.sap_data.resources.delay.is_initialized():
try:
nf_port.delay = float(
vport.sap_data.resources.delay.get_value())
except ValueError:
nf_port.delay = vport.sap_data.resources.delay.get_value()
if vport.sap_data.resources.bandwidth.is_initialized():
try:
nf_port.bandwidth = float(
vport.sap_data.resources.bandwidth.get_value())
except ValueError:
nf_port.bandwidth = \
vport.sap_data.resources.bandwidth.get_value()
if vport.sap_data.resources.cost.is_initialized():
try:
nf_port.cost = float(
vport.sap_data.resources.cost.get_value())
except ValueError:
nf_port.cost = vport.sap_data.resources.cost.get_value()
if vport.control.is_initialized():
if vport.control.controller.is_initialized():
nf_port.controller = vport.control.controller.get_value()
if vport.control.orchestrator.is_initialized():
nf_port.orchestrator = vport.control.orchestrator.get_value()
if vport.addresses.is_initialized():
nf_port.l2 = vport.addresses.l2.get_value()
nf_port.l4 = vport.addresses.l4.get_value()
for l3 in vport.addresses.l3.itervalues():
nf_port.l3.add_l3address(id=l3.id.get_value(),
name=l3.name.get_value(),
configure=l3.configure.get_value(),
client=l3.client.get_value(),
requested=l3.requested.get_value(),
provided=l3.provided.get_value())
# Add port metadata
for key in vport.metadata:
nf_port.add_metadata(name=key,
value=vport.metadata[key].value.get_value())
# VNF port can not be a SAP port -> skip <port_type> saving
# VNF port can not be a SAP port -> skip <sap> saving
# Handle operation tag
if vport.get_operation() is not None:
self.log.debug("Found operation tag: %s for port: %s" % (
vport.get_operation(), vport.id.get_value()))
nf_port.operation = vport.get_operation()
self.log.debug("Added NF port: %s" % nf_port)
# Add connection between Infra - NF
# Infra - NF port on Infra side is always a dynamically generated port
dyn_port = self.LABEL_DELIMITER.join((infra.id,
nf_id,
vport.id.get_as_text()))
# Add Infra-side port
infra_port = infra.add_port(id=dyn_port)
self.log.debug("Added dynamic port for NF -> %s" % infra_port)
# NF-Infra is dynamic link --> create special undirected link
l1, l2 = nffg.add_undirected_link(port1=nf_port,
port2=infra_port,
dynamic=True)
self.log.debug("Added dynamic VNF-Infra connection: %s" % l1)
self.log.debug("Added dynamic Infra-VNF connection: %s" % l2)
@staticmethod
def __parse_external_port (flowentry):
"""
:param flowentry:
:return: (domain name, node id, port id)
"""
res = re.match(r"(.*)://.*node\[id=(.*?)\].*port\[id=(.*?)\].*", flowentry)
if len(res.groups()) != 3:
log.error("Missing id from external flowrule: %s" % flowentry)
return None, None, None
return res.groups()
def _parse_virtualizer_node_flowentries (self, nffg, infra, vnode):
"""
Parse FlowEntries from a Virtualizer Node into an :any:`InfraPort`.
:param nffg: Container NFFG
:type nffg: :class:`NFFG`
:param infra: infrastructure node
:type infra: :any:`NodeInfra`
:param vnode: Virtualizer node
:type vnode: Infra_node
:return: None
"""
# Create Flowrules
for flowentry in vnode.flowtable:
vport = vport_id = None
fr_external = False
fr_id = flowentry.id.get_value() # Mandatory flowentry.id
try:
fr_id = int(fr_id)
except ValueError:
self.log.error("Parsed flowentry id is not valid integer!")
continue
# e.g. in_port=1(;TAG=SAP1|comp|1)
fr_match = "in_port="
if not flowentry.port.is_initialized():
self.log.error("Port attribute is missing from flowrule:\n%s"
% flowentry.xml())
continue
# Check if the in port is an external port (that does not exist)
if "://" in flowentry.port.get_as_text():
self.log.debug("Detected external in port reference: %s"
% flowentry.port.get_as_text())
# Mark flowrule as external so SG recreation can skip it
fr_external = True
ext_domain, ext_node, ext_port = self.__parse_external_port(
flowentry.port.get_value())
vport_id = "EXTERNAL:%s" % ext_port
bb_node = nffg[self._gen_unique_bb_id(vnode)]
if vport_id in bb_node.ports:
self.log.debug("External port: %s already exits! Skip creating..."
% vport_id)
vport = bb_node.ports[vport_id]
else:
vport = bb_node.add_port(id=vport_id)
self.log.debug("Added external in port: %s" % vport)
# Mark dynamic port as external for later processing
try:
ext_port = int(ext_port)
except ValueError:
pass
vport.role = "EXTERNAL"
vport.add_property("domain", ext_domain)
vport.add_property("node", ext_node)
vport.add_property("port", ext_port)
vport.add_property("path", flowentry.port.get_as_text())
fr_match += vport_id
# Add SAP to request
if vport_id in nffg and vport_id in nffg[vport_id].ports:
# SAP with port already exist
if nffg[vport_id].ports[vport_id].role != "EXTERNAL":
self.log.error("SAP: %s already exists but it is not an external "
"SAP!" % nffg[vport_id].ports[vport_id])
else:
self.log.debug("External SAP: %s already exists! Skip creation..."
% nffg[vport_id].ports[vport_id])
else:
ext_sap = nffg.add_sap(id=vport_id)
ext_sap_port = ext_sap.add_port(id=vport_id)
ext_sap_port.role = "EXTERNAL"
ext_sap_port.add_property("path", flowentry.port.get_as_text())
nffg.add_undirected_link(port1=vport, port2=ext_sap_port)
self.log.debug("Created external SAP: %s" % ext_sap)
# Set v_fe_port for further use
v_fe_port = None
else:
try:
v_fe_port = flowentry.port.get_target()
except StandardError:
self.log.exception("Got unexpected exception during acquisition of "
"IN Port in Flowentry: %s!" % flowentry.xml())
continue
# Check if src port is a VNF port --> create the tagged port name
if "NF_instances" in flowentry.port.get_as_text():
v_src_nf = v_fe_port.get_parent().get_parent()
v_src_node = v_src_nf.get_parent().get_parent()
# Add domain name to the node id if unique_id is set
src_node = self._gen_unique_bb_id(v_src_node)
src_nf = self._gen_unique_nf_id(v_vnf=v_src_nf, bb_id=infra.id)
fr_match += self.LABEL_DELIMITER.join((src_node, src_nf,
v_fe_port.id.get_as_text()))
else:
# Else just Infra port --> add only the port number
fr_match += v_fe_port.id.get_as_text()
# Pre-check target-less dst port flowrule
fr_action = "output="
if not flowentry.out.is_initialized():
self.log.error("Out attribute is missing from flowrule:\n%s"
% flowentry.xml())
continue
if "://" in flowentry.out.get_as_text():
self.log.debug("Detected external out port reference: %s"
% flowentry.out.get_as_text())
# Mark flowrule as external so SG recreation can skip it
fr_external = True
ext_domain, ext_node, ext_port = self.__parse_external_port(
flowentry.out.get_value())
ext_port_id = "EXTERNAL:%s" % ext_port
bb_node = nffg[self._gen_unique_bb_id(vnode)]
if ext_port_id in bb_node.ports:
self.log.debug("External port: %s already exits! Skip creating..." %
ext_port_id)
ext_vport = bb_node.ports[ext_port_id]
else:
ext_vport = bb_node.add_port(id=ext_port_id)
self.log.debug("Added external out port: %s" % ext_vport)
# Mark dynamic port as external for later processing
try:
ext_port = int(ext_port)
except ValueError:
pass
ext_vport.role = "EXTERNAL"
# ext_vport.sap = ext_port
ext_vport.add_property("domain", ext_domain)
ext_vport.add_property("node", ext_node)
ext_vport.add_property("port", ext_port)
ext_vport.add_property("path", flowentry.out.get_as_text())
fr_action += ext_port_id
# Add SAP to request
if ext_port_id in nffg and ext_port_id in nffg[ext_port_id].ports:
# SAP with port already exist
if nffg[ext_port_id].ports[ext_port_id].role != "EXTERNAL":
self.log.error("SAP: %s already exists but it is not an external "
"SAP!" % nffg[ext_port_id].ports[ext_port_id])
else:
self.log.debug("External SAP: %s already exists! Skip creating..."
% nffg[ext_port_id].ports[ext_port_id])
else:
ext_sap = nffg.add_sap(id=ext_port_id)
ext_sap_port = ext_sap.add_port(id=ext_port_id)
ext_sap_port.role = "EXTERNAL"
ext_sap_port.add_property("path", flowentry.out.get_as_text())
nffg.add_undirected_link(port1=ext_vport, port2=ext_sap_port)
self.log.debug("Created external SAP: %s" % ext_sap)
# Set v_fe_out for further use
v_fe_out = None
else:
try:
v_fe_out = flowentry.out.get_target()
except StandardError:
self.log.exception(
"Got unexpected exception during acquisition of OUT "
"Port in Flowentry: %s!" % flowentry.xml())
continue
# Check if dst port is a VNF port --> create the tagged port name
if "NF_instances" in flowentry.out.get_as_text():
v_dst_nf = v_fe_out.get_parent().get_parent()
v_dst_node = v_dst_nf.get_parent().get_parent()
dst_node = self._gen_unique_bb_id(v_dst_node)
dst_nf = self._gen_unique_nf_id(v_vnf=v_dst_nf, bb_id=infra.id)
fr_action += self.LABEL_DELIMITER.join((dst_node, dst_nf,
v_fe_out.id.get_as_text()))
else:
# Else just Infra port --> add only the port number
fr_action += v_fe_out.id.get_as_text()
# Check if there is a matching operation -> currently just TAG is used
if flowentry.match.is_initialized() and flowentry.match.get_value():
for op in flowentry.match.get_as_text().split(self.OP_DELIMITER):
if op.startswith(self.OP_INPORT):
pass
# e.g. <match>dl_tag=0x0004</match> --> in_port=1;TAG=SAP2|fwd|4
elif op.startswith(self.MATCH_TAG):
# if src or dst was a SAP: SAP.id == port.name
# if scr or dst is a VNF port name of parent of port
if v_fe_port is None:
_src_name = "external"
elif v_fe_port.port_type.get_as_text() == \
self.TYPE_VIRTUALIZER_PORT_SAP:
# If port is an inter-domain SAP port --> port.sap
if v_fe_port.sap.is_initialized() and v_fe_port.sap.get_value():
_src_name = v_fe_port.sap.get_as_text()
# If port is local SAP --> SAP:<sap_name>
elif v_fe_port.name.is_initialized() and str(
v_fe_port.name.get_value()).startswith(self.SAP_NAME_PREFIX):
_src_name = v_fe_port.name.get_as_text()[
len(self.SAP_NAME_PREFIX + ":"):]
else:
_src_name = str(v_fe_port.name.get_value())
else:
_src_name = v_fe_port.get_parent().get_parent().id.get_as_text()
# If port is an inter-domain SAP port --> port.sap
if v_fe_out is None:
_dst_name = "external"
elif v_fe_out.port_type.get_as_text() == \
self.TYPE_VIRTUALIZER_PORT_SAP:
# If port is an inter-domain SAP port --> port.sap
if v_fe_out.sap.is_initialized() and v_fe_out.sap.get_value():
_dst_name = v_fe_out.sap.get_as_text()
# If port is local SAP --> SAP:<sap_name>
elif v_fe_out.name.is_initialized() and str(
v_fe_out.name.get_value()).startswith(self.SAP_NAME_PREFIX):
_dst_name = v_fe_out.name.get_as_text()[
len(self.SAP_NAME_PREFIX + ':'):]
else:
_dst_name = v_fe_out.name.get_as_text()
else:
_dst_name = v_fe_out.get_parent().get_parent().id.get_as_text()
# Convert from int/hex to int
_tag = int(op.split('=')[1], base=0)
fr_match += ";%s=%s" % (self.OP_TAG, self.LABEL_DELIMITER.join(
(_src_name, _dst_name, str(_tag))))
else:
# Everything else is must come from flowclass
fr_match += ";%s=%s" % (self.OP_FLOWCLASS, op)
# Check if there is an action operation
if flowentry.action.is_initialized() and flowentry.action.get_value():
for op in flowentry.action.get_as_text().split(self.OP_DELIMITER):
# e.g. <action>push_tag:0x0003</action> -->
# output=1;TAG=decomp|SAP2|3
if op.startswith(self.ACTION_PUSH_TAG):
# tag: src element name | dst element name | tag
# if src or dst was a SAP: SAP.id == port.name
# if scr or dst is a VNF port name of parent of port
if v_fe_port is None:
_src_name = "external"
elif v_fe_port.port_type.get_as_text() == \
self.TYPE_VIRTUALIZER_PORT_SAP:
# If port is an inter-domain SAP port --> port.sap
if v_fe_port.sap.is_initialized() and v_fe_port.sap.get_value():
_src_name = v_fe_port.sap.get_as_text()
# If port is local SAP --> SAP:<sap_name>
elif v_fe_port.name.is_initialized() and str(
v_fe_port.name.get_value()).startswith(self.SAP_NAME_PREFIX):
_src_name = v_fe_port.name.get_as_text()[
len(self.SAP_NAME_PREFIX + ':'):]
else:
_src_name = v_fe_port.name.get_as_text()
else:
_src_name = v_fe_port.get_parent().get_parent().id.get_as_text()
if v_fe_out is None:
_dst_name = "external"
elif v_fe_out.port_type.get_as_text() == \
self.TYPE_VIRTUALIZER_PORT_SAP:
# If port is an inter-domain SAP port --> port.sap
if v_fe_out.sap.is_initialized() and v_fe_out.sap.get_value():
_dst_name = v_fe_out.sap.get_as_text()
elif v_fe_out.name.is_initialized() and str(
v_fe_out.name.get_value()).startswith(self.SAP_NAME_PREFIX):
_dst_name = v_fe_out.name.get_as_text()[
len(self.SAP_NAME_PREFIX + ':'):]
else:
_dst_name = v_fe_out.name.get_as_text()
else:
_dst_name = v_fe_out.get_parent().get_parent().id.get_as_text()
# Convert from int/hex to int
_tag = int(op.split(':')[1], base=0)
fr_action += ";%s=%s" % (self.OP_TAG, self.LABEL_DELIMITER.join(
(_src_name, _dst_name, str(_tag))))
# e.g. <action>strip_vlan</action> --> output=EE2|fwd|1;UNTAG
elif op.startswith(self.ACTION_POP_TAG):
fr_action += ";%s" % self.OP_UNTAG
else:
fr_action += ";%s" % op
# Get the src (port where fr need to store) and dst port id
if vport_id is None:
try:
vport_id = int(v_fe_port.id.get_value())
except ValueError:
vport_id = v_fe_port.id.get_value()
# Get port from NFFG in which need to store the fr
if vport is None:
try:
# If the port is an Infra port
if "NF_instances" not in flowentry.port.get_as_text():
vport = nffg[infra.id].ports[vport_id]
# If the port is a VNF port -> get the dynamic port in the Infra
else:
_vnf_id = self._gen_unique_nf_id(
v_vnf=v_fe_port.get_parent().get_parent(), bb_id=infra.id)
_dyn_port = [l.dst.id for u, v, l in
nffg.network.edges_iter([_vnf_id], data=True) if
l.type == NFFG.TYPE_LINK_DYNAMIC and str(l.src.id) ==
str(vport_id)]
if len(_dyn_port) > 1:
self.log.warning("Multiple dynamic link detected for NF(id: %s) "
"Use first link ..." % _vnf_id)
elif len(_dyn_port) < 1:
raise RuntimeError("Missing infra-vnf dynamic link for vnf: %s" %
_vnf_id)
# Get dynamic port from infra
vport = nffg[infra.id].ports[_dyn_port[0]]
except RuntimeError as e:
self.log.error("Port: %s is not found in the NFFG: "
"%s from the flowrule:\n%s" %
(vport_id, e.message, flowentry.xml()))
continue
# Get resource values
self.log.debug("Parse flowrule resources...")
if flowentry.resources.is_initialized():
if flowentry.resources.bandwidth.is_initialized():
try:
fr_bw = float(flowentry.resources.bandwidth.get_value())
except ValueError:
fr_bw = flowentry.resources.bandwidth.get_value()
else:
fr_bw = None
if flowentry.resources.delay.is_initialized():
try:
fr_delay = float(flowentry.resources.delay.get_value())
except ValueError:
fr_delay = flowentry.resources.delay.get_value()
else:
fr_delay = None
if flowentry.resources.cost.is_initialized():
try:
fr_cost = float(flowentry.resources.cost.get_value())
except ValueError:
fr_cost = flowentry.resources.cost.get_value()
else:
fr_cost = None
if flowentry.resources.qos.is_initialized():
fr_qos = flowentry.resources.qos.get_value()
else:
fr_qos = None
else:
fr_bw = fr_delay = fr_cost = fr_qos = None
# Add constraints
self.log.debug("Parse flowrule constraints...")
fr_constraints = Constraints()
if flowentry.constraints.is_initialized():
# Add affinity list
if flowentry.constraints.affinity.is_initialized():
for aff in flowentry.constraints.affinity.values():
aff = fr_constraints.add_affinity(id=aff.id.get_value(),
value=aff.object.get_value())
self.log.debug("Add affinity: %s to %s" % (aff, fr_id))
# Add antiaffinity list
if flowentry.constraints.antiaffinity.is_initialized():
for naff in flowentry.constraints.antiaffinity.values():
naff = fr_constraints.add_antiaffinity(id=naff.id.get_value(),
value=naff.object.get_value())
self.log.debug("Add antiaffinity: %s to %s" % (naff, fr_id))
# Add variables dict
if flowentry.constraints.variable.is_initialized():
for var in flowentry.constraints.variable.values():
var = fr_constraints.add_variable(key=var.id.get_value(),
id=var.object.get_value())
self.log.debug("Add variable: %s to %s" % (var, fr_id))
# Add constraint list
if flowentry.constraints.constraint.is_initialized():
for constraint in flowentry.constraints.constraint.values():
formula = fr_constraints.add_constraint(
id=constraint.id.get_value(),
formula=constraint.formula.get_value())
self.log.debug("Add constraint: %s to %s" % (formula, fr_id))
if flowentry.constraints.restorability.is_initialized():
fr_constraints.restorability = \
flowentry.constraints.restorability.get_as_text()
self.log.debug("Add restorability: %s to %s"
% (fr_constraints.restorability, fr_id))
# Add flowrule to port
fr = vport.add_flowrule(id=fr_id, match=fr_match, action=fr_action,
bandwidth=fr_bw, delay=fr_delay, cost=fr_cost,
qos=fr_qos, external=fr_external,
constraints=fr_constraints)
# Handle operation tag
if flowentry.get_operation() is not None:
self.log.debug("Found operation tag: %s for flowentry: %s" % (
flowentry.get_operation(), flowentry.id.get_value()))
fr.operation = flowentry.get_operation()
self.log.debug("Added %s" % fr)
def _parse_virtualizer_nodes (self, nffg, virtualizer):
"""
Parse Infrastructure node from Virtualizer.
:param nffg: Container NFFG
:type nffg: :class:`NFFG`
:param virtualizer: Virtualizer object
:type virtualizer: Virtualizer
:return: None
"""
# Iterate over virtualizer/nodes --> node = Infra
for vnode in virtualizer.nodes:
# Node params
# Add domain name to the node id if unique_id is set
node_id = self._gen_unique_bb_id(vnode)
if vnode.name.is_initialized(): # Optional - node.name
node_name = vnode.name.get_value()
else:
node_name = None
node_domain = self.domain # Set domain as the domain of the Converter
node_type = vnode.type.get_value() # Mandatory - virtualizer.type
# Node-resources params
if vnode.resources.is_initialized():
# Remove units and store the value only
node_cpu = vnode.resources.cpu.get_as_text().split(' ')[0]
node_mem = vnode.resources.mem.get_as_text().split(' ')[0]
node_storage = vnode.resources.storage.get_as_text().split(' ')[0]
node_cost = vnode.resources.cost.get_value()
node_zone = vnode.resources.zone.get_value()
try:
node_cpu = _res_parser(node_cpu) if node_cpu is not None else None
except ValueError as e:
self.log.warning("Resource cpu value is not valid number: %s" % e)
try:
node_mem = _res_parser(node_mem) if node_mem is not None else None
except ValueError as e:
self.log.warning("Resource mem value is not valid number: %s" % e)
try:
node_storage = _res_parser(
node_storage) if node_storage is not None else None
except ValueError as e:
self.log.warning("Resource storage value is not valid number: %s" % e)
else:
# Default value for cpu,mem,storage: None
node_cpu = node_mem = node_storage = node_cost = node_zone = None
# Try to get bw value from metadata
if 'bandwidth' in vnode.metadata:
# Converted to float in Infra constructor
node_bw = vnode.metadata['bandwidth'].value.get_value()
else:
# Iterate over links to summarize bw value for infra node
node_bw = [
float(vlink.resources.bandwidth.get_value())
for vlink in vnode.links if vlink.resources.is_initialized() and
vlink.resources.bandwidth.is_initialized()]
# Default value: None
node_bw = min(node_bw) if node_bw else None
try:
if node_bw is not None:
node_bw = float(node_bw)
except ValueError as e:
self.log.warning(
"Resource bandwidth value is not valid number: %s" % e)
if 'delay' in vnode.metadata:
# Converted to float in Infra constructor
node_delay = vnode.metadata['delay'].value.get_value()
else:
# Iterate over links to summarize delay value for infra node
node_delay = [
float(vlink.resources.delay.get_value())
for vlink in vnode.links if vlink.resources.is_initialized() and
vlink.resources.delay.is_initialized()]
# Default value: None
node_delay = max(node_delay) if node_delay else None
try:
if node_delay is not None:
node_delay = float(node_delay)
except ValueError as e:
self.log.warning("Resource delay value is not valid number: %s" % e)
# Add Infra Node to NFFG
infra = nffg.add_infra(id=node_id, name=node_name, domain=node_domain,
infra_type=node_type, cpu=node_cpu, mem=node_mem,
cost=node_cost, zone=node_zone,
storage=node_storage, delay=node_delay,
bandwidth=node_bw)
self.log.debug("Created INFRA node: %s" % infra)
self.log.debug("Parsed resources: %s" % infra.resources)
for vlink in vnode.links:
if vlink.resources.is_initialized() and \
vlink.resources.delay.is_initialized():
try:
dm_src = vlink.src.get_target().id.get_value()
dm_dst = vlink.dst.get_target().id.get_value()
except StandardError:
self.log.exception(
"Got unexpected exception during acquisition of src/dst "
"Port in Link: %s!" % vlink.xml())
continue
dm_delay = float(vlink.resources.delay.get_value())
infra.delay_matrix.add_delay(src=dm_src, dst=dm_dst, delay=dm_delay)
self.log.debug("Added delay: %s to delay matrix [%s --> %s]"
% (dm_delay, dm_src, dm_dst))
# Add supported types shrinked from the supported NF list
for sup_nf in vnode.capabilities.supported_NFs:
infra.add_supported_type(sup_nf.type.get_value())
# Handle operation tag
if vnode.get_operation() is not None:
self.log.debug("Found operation tag: %s for node: %s" % (
vnode.get_operation(), vnode.id.get_value()))
infra.operation = vnode.get_operation()
# Parse Ports
self._parse_virtualizer_node_ports(nffg=nffg, infra=infra, vnode=vnode)
# Parse NF_instances
self._parse_virtualizer_node_nfs(nffg=nffg, infra=infra, vnode=vnode)
# Parse Flowentries
self._parse_virtualizer_node_flowentries(nffg=nffg, infra=infra,
vnode=vnode)
self.log.debug("Parse INFRA node constraints...")
if vnode.constraints.is_initialized():
# Add affinity list
if vnode.constraints.affinity.is_initialized():
for aff in vnode.constraints.affinity.values():
aff = infra.constraints.add_affinity(
id=aff.id.get_value(),
value=aff.object.get_value())
self.log.debug("Add affinity: %s to %s" % (aff, infra.id))
# Add antiaffinity list
if vnode.constraints.antiaffinity.is_initialized():
for naff in vnode.constraints.antiaffinity.values():
naff = infra.constraints.add_antiaffinity(
id=naff.id.get_value(),
value=naff.object.get_value())
self.log.debug("Add antiaffinity: %s to %s" % (naff, infra.id))
# Add variables dict
if vnode.constraints.variable.is_initialized():
for var in vnode.constraints.variable.values():
var = infra.constraints.add_variable(
key=var.id.get_value(),
id=var.object.get_value())
self.log.debug("Add variable: %s to %s" % (var, infra.id))
# Add constraint list
if vnode.constraints.constraint.is_initialized():
for constraint in vnode.constraints.constraint.values():
formula = infra.constraints.add_constraint(
id=constraint.id.get_value(),
formula=constraint.formula.get_value())
self.log.debug("Add constraint: %s to %s" % (formula, infra.id))
# Copy metadata
self.log.debug("Parse Infra node metadata...")
for key in vnode.metadata: # Optional - node.metadata
if key in ('bandwidth', 'delay'):
# Internally used metadata --> already processed
pass
elif str(key).startswith("constraint"):
self.log.debug("Constraint entry detected!")
raw = vnode.metadata[key].value.get_value()
values = json.loads(raw.replace("'", '"'))
self.log.log(VERBOSE, "Parsed metadata:\n%s" % values)
bandwidth = path = delay = None
if "bandwidth" in values:
try:
bandwidth = float(values['bandwidth']['value'])
except ValueError:
self.log.warning("Bandwidth in requirement metadata: %s is not a "
"valid float value!" % values['bandwidth'])
path = values['bandwidth']['path']
if "delay" in values:
try:
delay = float(values['delay']['value'])
except ValueError:
self.log.warning("Delay in requirement metadata: %s is not a "
"valid float value!" % values['delay'])
if path != values['delay']['path']:
self.log.warning(
"Delay/bandwidth path entry is different in E2E requirement "
"metadata: %s!" % raw)
continue
src_port = dst_port = None
if path is None:
continue
sg_id = int(path[0])
for p in infra.ports:
for f in p.flowrules:
if f.id == sg_id:
src_port = p
self.log.debug("Found src port: %s" % p.id)
break
sg_id = int(path[-1])
for f in infra.flowrules():
if f.id == sg_id:
dst_port_id = f.action.split(';')[0].split('=')[1]
dst_port = infra.ports[dst_port_id]
self.log.debug("Found dst port: %s" % dst_port_id)
break
if src_port is None or dst_port is None:
self.log.warning(
"Port reference is missing for Requirement link!")
continue
req_id = str(key).split(':')[1]
req = nffg.add_req(id=req_id,
src_port=src_port,
dst_port=dst_port,
bandwidth=bandwidth,
delay=delay,
sg_path=path)
self.log.debug("Created Requirement link: %s" % req)
else:
infra.add_metadata(name=key,
value=vnode.metadata[key].value.get_value())
def _parse_virtualizer_links (self, nffg, virtualizer):
"""
Parse links from Virtualizer.
:param nffg: Container NFFG
:type nffg: :class:`NFFG`
:param virtualizer: Virtualizer object
:type virtualizer: Virtualizer
:return: None
"""
# Store added link in a separate structure for simplicity and speed
added_links = []
# Add links connecting infras
for vlink in virtualizer.links:
try:
src_port = vlink.src.get_target()
except StandardError:
self.log.exception(
"Got unexpected exception during acquisition of link's src Port!")
src_node = src_port.get_parent().get_parent()
# Add domain name to the node id if unique_id is set
src_node_id = self._gen_unique_bb_id(src_node)
try:
dst_port = vlink.dst.get_target()
except StandardError:
self.log.exception(
"Got unexpected exception during acquisition of link's dst Port!")
dst_node = dst_port.get_parent().get_parent()
# Add domain name to the node id if unique_id is set
dst_node_id = self._gen_unique_bb_id(dst_node)
try:
src_port_id = int(src_port.id.get_value())
except ValueError:
# self.log.warning("Source port id is not a valid number: %s" % e)
src_port_id = src_port.id.get_value()
try:
dst_port_id = int(dst_port.id.get_value())
except ValueError:
# self.log.warning("Destination port id is not a valid number: %s" % e)
dst_port_id = dst_port.id.get_value()
params = dict()
params['id'] = vlink.id.get_value() # Mandatory - link.id
if vlink.resources.is_initialized():
params['delay'] = float(vlink.resources.delay.get_value()) \
if vlink.resources.delay.is_initialized() else None
params['bandwidth'] = float(vlink.resources.bandwidth.get_value()) \
if vlink.resources.bandwidth.is_initialized() else None
params['cost'] = vlink.resources.cost.get_value()
params['qos'] = vlink.resources.qos.get_value()
# Check the link is a possible backward link
possible_backward = (
"%s:%s-%s:%s" % (dst_node_id, dst_port_id, src_node_id, src_port_id))
if possible_backward in added_links:
params['backward'] = True
# Add unidirectional link
l1 = nffg.add_link(src_port=nffg[src_node_id].ports[src_port_id],
dst_port=nffg[dst_node_id].ports[dst_port_id],
**params)
self.log.debug("Add static %slink: %s" % (
"backward " if "backward" in params else "", l1))
# Handle operation tag
if vlink.get_operation() is not None:
self.log.debug("Found operation tag: %s for link: %s" % (
vlink.get_operation(), vlink.get_value()))
l1.operation = vlink.get_operation()
# Register the added link
added_links.append(
"%s:%s-%s:%s" % (src_node, src_port, dst_node, dst_port))
@staticmethod
def _parse_virtualizer_metadata (nffg, virtualizer):
"""
Parse metadata from Virtualizer.
Optionally can parse requirement links if they are stored in metadata.
:param nffg: Container NFFG
:type nffg: :class:`NFFG`
:param virtualizer: Virtualizer object
:type virtualizer: Virtualizer
:return: None
"""
for key in virtualizer.metadata:
nffg.add_metadata(name=key,
value=virtualizer.metadata[key].value.get_value())
def __process_variables (self, infra, variables):
frs = []
type = set()
for var in variables:
var = var.strip()
for fr in infra.flowrules():
if fr.delay == var:
frs.append(fr)
type.add("delay")
if fr.bandwidth == var:
frs.append(fr)
type.add("bandwidth")
if len(type) != 1:
self.log.warning("Variables: %s refer to multiple type of fields: %s"
% (variables, type))
return None, None
type = type.pop()
return frs, type
def _parse_virtualizer_requirement (self, nffg):
self.log.debug("Process requirement formulas...")
reqs = {}
for infra in nffg.infras:
deletable_ids = []
for i, (id, formula) in enumerate(
infra.constraints.constraint.iteritems()):
self.log.debug("Detected formula: %s" % formula)
try:
splitted = formula.split('|')
variables = splitted[0].strip().split('+')
value = float(splitted[-1])
except:
self.log.warning("Wrong formula format: %s" % formula)
continue
frs, type = self.__process_variables(infra=infra, variables=variables)
if not (frs or type):
continue
# Recreate sg_path
sg_path = [fr.id for fr in frs]
self.log.debug("Recreated sg_hop list: %s" % sg_path)
try:
sport = NFFGToolBox.get_inport_of_flowrule(infra, frs[0].id)
dport = NFFGToolBox.get_output_port_of_flowrule(infra, frs[-1])
except RuntimeError as e:
self.log.error("Referred port is missing from infra node: %s" % e)
continue
if (sport, dport) not in reqs:
req_link = nffg.add_req(src_port=sport,
dst_port=dport,
id="req%s" % i,
sg_path=sg_path)
self.log.debug("Created requirement link: %s" % req_link)
reqs[(sport, dport)] = req_link
else:
req_link = reqs[(sport, dport)]
setattr(req_link, type, value)
self.log.debug("Set requirement value: %s --> %s" % (type, value))
# Remove variables from flowrules
for fr in frs:
setattr(fr, type, None)
# Mark formula for deletion
deletable_ids.append(id)
for formula_id in deletable_ids:
infra.constraints.del_constraint(id=formula_id)
def _parse_sghops_from_flowrules (self, nffg):
"""
Recreate the SG hop links based on the flowrules.
Use the flowrule id as the is of the SG hop link.
:param nffg: Container NFFG
:type nffg: :class:`NFFG`
:return: None
"""
if not nffg.is_SBB():
return
self.log.debug(
"Detected SingleBiSBiS view! Recreate SG hop links based on flowrules...")
for sbb in nffg.infras:
for flowrule in sbb.flowrules():
# Get source port / in_port
in_port = None
flowclass = None
fr_id = flowrule.id
for item in flowrule.match.split(';'):
if item.startswith('in_port'):
in_port = item.split('=')[1]
elif item.startswith('TAG') or item.startswith('UNTAG'):
pass
elif item.startswith('flowclass'):
flowclass = item.split('=', 1)[1]
else:
flowclass = item
if in_port is not None:
# Detect the connected NF/SAP port for sg_hop
opposite_node = [l.dst for u, v, l in nffg.real_out_edges_iter(sbb.id)
if l.src.id == in_port]
if len(opposite_node) == 1:
in_port = opposite_node.pop()
self.log.debug("Detected src port for SG hop: %s" % in_port)
else:
self.log.warning(
"src port for SG hop: %s cannot be detected! Possible ports: %s" %
(fr_id, opposite_node))
continue
else:
self.log.warning(
"in_port for SG hop link cannot be determined from: %s. Skip SG "
"hop recreation..." % flowrule)
return
# Get destination port / output
output = None
for item in flowrule.action.split(';'):
if item.startswith('output'):
output = item.split('=')[1]
if output is not None:
# Detect the connected NF/SAP port for sg_hop
opposite_node = [l.dst for u, v, l in nffg.real_out_edges_iter(sbb.id)
if l.src.id == output]
if len(opposite_node) == 1:
output = opposite_node.pop()
self.log.debug("Detected dst port for SG hop: %s" % output)
else:
self.log.warning(
"dst port for SG hop: %s cannot be detected! Possible ports: %s" %
(fr_id, opposite_node))
continue
else:
self.log.warning(
"output for SG hop link cannot be determined from: %s. Skip SG "
"hop recreation..." % flowrule)
return
sg = nffg.add_sglink(id=fr_id,
src_port=in_port,
dst_port=output,
flowclass=flowclass,
delay=flowrule.delay,
bandwidth=flowrule.bandwidth,
constraints=flowrule.constraints)
self.log.debug("Recreated SG hop: %s" % sg)
def parse_from_Virtualizer (self, vdata, with_virt=False,
create_sg_hops=False):
"""
Convert Virtualizer3-based XML str --> NFFGModel based NFFG object
:param vdata: XML plain data or Virtualizer object
:type: vdata: str or Virtualizer
:param with_virt: return with the Virtualizer object too (default: False)
:type with_virt: bool
:param create_sg_hops: create the SG hops (default: False)
:type create_sg_hops: bool
:return: created NF-FG
:rtype: :class:`NFFG`
"""
self.log.debug(
"START conversion: Virtualizer(ver: %s) --> NFFG(ver: %s)" % (
V_VERSION, N_VERSION))
# Already in Virtualizer format
if isinstance(vdata, virt_lib.Virtualizer):
virtualizer = vdata
# Plain XML string
elif isinstance(vdata, basestring):
try:
self.log.debug("Converting data to graph-based NFFG structure...")
virtualizer = virt_lib.Virtualizer.parse_from_text(text=vdata)
except Exception as e:
self.log.error("Got ParseError during XML->Virtualizer conversion!")
raise RuntimeError('ParseError: %s' % e.message)
else:
self.log.error("Not supported type for vdata: %s" % type(vdata))
return
# Get NFFG init params
nffg_id = virtualizer.id.get_value() # Mandatory - virtualizer.id
if virtualizer.name.is_initialized(): # Optional - virtualizer.name
nffg_name = virtualizer.name.get_value()
else:
nffg_name = None
self.log.debug("Construct NFFG based on Virtualizer(id=%s, name=%s)" % (
nffg_id, nffg_name))
# Create NFFG
nffg = NFFG(id=nffg_id, name=nffg_name)
# Parse Infrastructure Nodes from Virtualizer
self._parse_virtualizer_nodes(nffg=nffg, virtualizer=virtualizer)
# Parse Infrastructure links from Virtualizer
self._parse_virtualizer_links(nffg=nffg, virtualizer=virtualizer)
# Parse Metadata and from Virtualizer
self._parse_virtualizer_metadata(nffg=nffg, virtualizer=virtualizer)
# Parse requirement links from Virtualizer
self._parse_virtualizer_requirement(nffg=nffg)
# If the received NFFG is a SingleBiSBiS, recreate the SG hop links
# which are in compliance with flowrules in SBB node
if create_sg_hops:
self._parse_sghops_from_flowrules(nffg=nffg)
else:
self.log.debug("Skip SG hop recreation...")
self.log.debug("END conversion: Virtualizer(ver: %s) --> NFFG(ver: %s)" % (
V_VERSION, N_VERSION))
return (nffg, virtualizer) if with_virt else nffg
def _convert_nffg_infras (self, nffg, virtualizer):
"""
Convert infras in the given :class:`NFFG` into the given Virtualizer.
:param nffg: NFFG object
:type nffg: :class:`NFFG`
:param virtualizer: Virtualizer object
:type virtualizer: Virtualizer
:return: None
"""
self.log.debug("Converting infras...")
for infra in nffg.infras:
# Check in it's needed to remove domain from the end of id
v_node_id = self.recreate_bb_id(id=infra.id)
v_node_name = infra.name # optional
v_node_type = infra.infra_type # Mandatory
v_node = virt_lib.Infra_node(id=v_node_id,
name=v_node_name,
type=v_node_type)
# Add resources nodes/node/resources
v_node.resources.cpu.set_value(infra.resources.cpu)
v_node.resources.mem.set_value(infra.resources.mem)
v_node.resources.storage.set_value(infra.resources.storage)
v_node.resources.cost.set_value(infra.resources.cost)
v_node.resources.zone.set_value(infra.resources.zone)
# Migrate metadata
for key, value in infra.metadata.iteritems():
v_node.metadata.add(virt_lib.MetadataMetadata(key=key, value=value))
# Add remained NFFG-related information into metadata
if infra.resources.delay is not None:
v_node.metadata.add(virt_lib.MetadataMetadata(
key="delay", value=infra.resources.delay))
if infra.resources.bandwidth is not None:
v_node.metadata.add(virt_lib.MetadataMetadata(
key="bandwidth", value=infra.resources.bandwidth))
if infra.operation is not None:
self.log.debug("Convert operation tag: %s for infra: %s" % (
infra.operation, infra.id))
v_node.set_operation(operation=infra.operation, recursive=False)
self.log.debug("Converted %s" % infra)
# Add ports to infra
for port in infra.ports:
# Check if the port is a dynamic port : 23412423523445 or sap1|comp|1
# If it is a dynamic port, skip conversion
try:
if not int(port.id) < 65536:
# Dynamic port connected to a VNF - skip
continue
except ValueError:
# port is not a number
if '|' in str(port.id):
continue
if str(port.id).startswith("EXTERNAL"):
self.log.debug("Port: %s in infra %s is EXTERNAL. Skip adding..."
% (port.id, infra.id))
continue
v_port = virt_lib.Port(id=str(port.id))
# If SAP property is exist: this port connected to a SAP
if port.sap is not None:
v_port.sap.set_value(port.sap)
elif port.get_property('sap'):
v_port.sap.set_value(port.get_property('sap'))
# Set default port-type to port-abstract
# during SAP detection the SAP<->Node port-type will be overridden
v_port.port_type.set_value(self.TYPE_VIRTUALIZER_PORT_ABSTRACT)
# Additional values of SAP/NF will be set later
# Migrate port attributes
self.__copy_port_attrs(v_port=v_port, port=port)
# port_type: port-abstract & sap: - --> regular port
# port_type: port-abstract & sap: <SAP...> --> was connected to
# an inter-domain port - set this data in Virtualizer
if port.operation is not None:
self.log.debug("Convert operation tag: %s for port: %s" % (
port.operation, port.id))
v_port.set_operation(operation=port.operation, recursive=False)
v_node.ports.add(v_port)
self.log.debug("Added static %s" % port)
# Add minimalistic Nodes for supported NFs based on supported list of NFFG
for sup in infra.supported:
v_node.capabilities.supported_NFs.add(virt_lib.Node(id=sup, type=sup))
# Add infra to virtualizer
if v_node_id in virtualizer.nodes.node.keys():
self.log.warning("Virtualizer node: %s already exists in Virtualizer: "
"%s!" % (v_node_id, virtualizer.id.get_value()))
else:
virtualizer.nodes.add(v_node)
# Add intra-node link based on delay_matrix
for src, dst, delay in infra.delay_matrix:
if src in v_node.ports.port.keys():
v_link_src = v_node.ports[src]
else:
# self.log.warning("Missing port: %s from Virtualizer node: %s"
# % (src, v_node_id))
continue
if dst in v_node.ports.port.keys():
v_link_dst = v_node.ports[dst]
else:
# self.log.warning("Missing port: %s from Virtualizer node: %s"
# % (dst, v_node_id))
continue
v_link = virt_lib.Link(id="link-%s-%s" % (v_link_src.id.get_value(),
v_link_dst.id.get_value()),
src=v_link_src,
dst=v_link_dst,
resources=virt_lib.Link_resource(
delay=delay))
v_node.links.add(v_link)
self.log.debug("Added intra-BiSBiS resource link [%s --> %s] "
"with delay: %s" % (src, dst, delay))
def __copy_port_attrs (self, v_port, port):
# Set sap.name if it has not used for storing SAP.id
if port.name is not None:
v_port.name.set_value(port.name)
self.log.debug("Added name: %s" % v_port.name.get_value())
# Convert other SAP-port-specific data
v_port.capability.set_value(port.capability)
v_port.sap_data.technology.set_value(port.technology)
v_port.sap_data.role.set_value(port.role)
v_port.sap_data.resources.delay.set_value(port.delay)
v_port.sap_data.resources.bandwidth.set_value(port.bandwidth)
v_port.sap_data.resources.cost.set_value(port.cost)
v_port.sap_data.resources.qos.set_value(port.qos)
v_port.control.controller.set_value(port.controller)
v_port.control.orchestrator.set_value(port.orchestrator)
v_port.addresses.l2.set_value(port.l2)
v_port.addresses.l4.set_value(port.l4)
for l3 in port.l3:
v_port.addresses.l3.add(virt_lib.L3_address(id=l3.id,
name=l3.name,
configure=l3.configure,
requested=l3.requested,
provided=l3.provided))
# Migrate metadata
for key, value in port.metadata.iteritems():
v_port.metadata.add(virt_lib.MetadataMetadata(key=key,
value=value))
if port.operation is not None:
self.log.debug("Convert operation tag: %s for port: %s" % (
port.operation, port.id))
v_port.set_operation(operation=port.operation,
recursive=False)
def __copy_vport_attrs (self, port, vport):
if vport.name.is_initialized():
# infra_port.add_property("name", vport.name.get_value())
port.name = vport.name.get_value()
self.log.debug("Added name: %s" % port.name)
# If sap is set and port_type is port-abstract -> this port
# connected to an inter-domain SAP before -> save this metadata
if vport.sap.is_initialized():
port.add_property("sap", vport.sap.get_value())
port.sap = vport.sap.get_value()
if vport.capability.is_initialized():
port.capability = vport.capability.get_value()
self.log.debug("Added capability: %s" % port.capability)
if vport.sap_data.is_initialized():
if vport.sap_data.technology.is_initialized():
port.technology = vport.sap_data.technology.get_value()
self.log.debug("Added technology: %s" % port.technology)
if vport.sap_data.role.is_initialized():
port.role = vport.sap_data.technology.get_value()
self.log.debug("Added role: %s" % port.role)
if vport.sap_data.resources.is_initialized():
if vport.sap_data.resources.delay.is_initialized():
port.delay = vport.sap_data.resources.delay.get_value()
self.log.debug("Added delay: %s" % port.delay)
if vport.sap_data.resources.bandwidth.is_initialized():
port.bandwidth = vport.sap_data.resources.bandwidth.get_value()
self.log.debug("Added bandwidth: %s" % port.bandwidth)
if vport.sap_data.resources.cost.is_initialized():
port.cost = vport.sap_data.resources.cost.get_value()
self.log.debug("Added cost: %s" % port.cost)
if vport.sap_data.resources.qos.is_initialized():
port.qos = vport.sap_data.resources.qos.get_value()
self.log.debug("Added qos: %s" % port.qos)
if vport.control.is_initialized():
if vport.control.controller.is_initialized():
port.controller = vport.conntrol.controller.get_value()
self.log.debug("Added controller: %s" % port.controller)
if vport.control.orchestrator.is_initialized():
port.orchestrator = vport.conntrol.orchestrator.get_value()
self.log.debug("Added orchestrator: %s" % port.orchestrator)
if vport.addresses.is_initialized():
self.log.debug("Translate addresses...")
port.l2 = vport.addresses.l2.get_value()
port.l4 = vport.addresses.l4.get_value()
for l3 in vport.addresses.l3.itervalues():
port.l3.add_l3address(id=l3.id.get_value(),
name=l3.name.get_value(),
configure=l3.configure.get_value(),
client=l3.client.get_value(),
requested=l3.requested.get_value(),
provided=l3.provided.get_value())
# Add metadata from non-sap port to infra port metadata
for key in vport.metadata:
port.add_metadata(name=key, value=vport.metadata[key].value.get_value())
# Handle operation tag
if vport.get_operation() is not None:
self.log.debug("Found operation tag: %s for port: %s" % (
vport.get_operation(), vport.id.get_value()))
port.operation = vport.get_operation()
pass
def _convert_nffg_saps (self, nffg, virtualizer):
"""
Convert SAPs in the given :class:`NFFG` into the given Virtualizer.
:param nffg: NFFG object
:type nffg: :class:`NFFG`
:param virtualizer: Virtualizer object
:type virtualizer: Virtualizer
:return: None
"""
self.log.debug("Converting SAPs...")
# Rewrite SAP - Node ports to add SAP to Virtualizer
for sap in nffg.saps:
if str(sap.id).startswith("EXTERNAL"):
self.log.debug("SAP: %s is an EXTERNAL dynamic SAP. Skipping..."
% sap.id)
continue
for s, n, link in nffg.network.edges_iter([sap.id], data=True):
if link.type != NFFG.TYPE_LINK_STATIC:
continue
sap_port = link.src
# Rewrite port-type to port-sap
infra_id = self.recreate_bb_id(id=n)
v_sap_port = virtualizer.nodes[infra_id].ports[str(link.dst.id)]
if link.src.role == "EXTERNAL":
self.log.debug("SAP: %s is an EXTERNAL dynamic SAP. Removing..."
% sap.id)
virtualizer.nodes[infra_id].ports.remove(v_sap_port)
continue
v_sap_port.port_type.set_value(self.TYPE_VIRTUALIZER_PORT_SAP)
# Check if the SAP is an inter-domain SAP
if sap_port.sap is not None:
# Set static 'sap' value
v_sap_port.sap.set_value(sap_port.sap)
elif sap_port.has_property("type") == "inter-domain":
# If sap metadata is set by merge, use this value else the SAP.id
if sap_port.has_property('sap'):
v_sap_port.sap.set_value(sap_port.get_property('sap'))
else:
v_sap_port.sap.set_value(sap.id)
# Check if the SAP is a bound, inter-domain SAP (no sap and port
# property are set in this case)
elif sap.binding is not None:
v_sap_port.sap.set_value(s)
self.log.debug(
"Set port: %s in infra: %s as an inter-domain SAP with"
" 'sap' value: %s" % (link.dst.id, n,
v_sap_port.sap.get_value()))
else:
# If sap is not inter-domain SAP, use name field to store sap id and
v_sap_port.name.set_value("%s:%s" % (self.SAP_NAME_PREFIX, sap.id))
self.__copy_port_attrs(v_port=v_sap_port, port=sap_port)
self.log.debug(
"Converted %s to port: %s in infra: %s" % (sap, link.dst.id, n))
def _convert_nffg_edges (self, nffg, virtualizer):
"""
Convert edge links in the given :class:`NFFG` into the given Virtualizer.
:param nffg: NFFG object
:type nffg: :class:`NFFG`
:param virtualizer: Virtualizer object
:type virtualizer: Virtualizer
:return: None
"""
self.log.debug("Converting edges...")
# Add edge-link to Virtualizer
for link in nffg.links:
# Skip backward and non-static link conversion <-- Virtualizer links
# are bidirectional
if link.type != NFFG.TYPE_LINK_STATIC:
continue
# SAP - Infra links are not stored in Virtualizer format
if link.src.node.type == NFFG.TYPE_SAP or \
link.dst.node.type == NFFG.TYPE_SAP:
continue
self.log.debug(
"Added link: Node: %s, port: %s <--> Node: %s, port: %s" % (
link.src.node.id, link.src.id, link.dst.node.id, link.dst.id))
src_node_id = self.recreate_bb_id(id=link.src.node.id)
dst_node_id = self.recreate_bb_id(id=link.dst.node.id)
v_link = virt_lib.Link(
id=link.id,
src=virtualizer.nodes[src_node_id].ports[str(link.src.id)],
dst=virtualizer.nodes[dst_node_id].ports[str(link.dst.id)],
resources=virt_lib.Link_resource(delay=link.delay,
bandwidth=link.bandwidth,
cost=link.cost, qos=link.qos))
# Handel operation tag
if link.operation is not None:
self.log.debug(
"Convert operation tag: %s for link: %s" % (link.operation, link.id))
v_link.set_operation(operation=link.operation, recursive=False)
# Call bind to resolve src,dst references to workaround a bug
# v_link.bind()
virtualizer.links.add(v_link)
def _convert_nffg_reqs (self, nffg, virtualizer):
"""
Convert requirement links in the given :class:`NFFG` into given Virtualizer
using infra node's metadata list.
:param nffg: NFFG object
:type nffg: :class:`NFFG`
:param virtualizer: Virtualizer object
:type virtualizer: Virtualizer
:return: None
"""
self.log.debug("Converting requirement links...")
for req in nffg.reqs:
self.log.debug('Converting requirement link: %s' % req)
# Get container node
if req.src.node.id != req.dst.node.id:
self.log.warning("Requirement link has wrong format: src/dst port is "
"not connected to the same BiSBiS node!")
continue
infra_id = self.recreate_bb_id(id=req.src.node.id)
self.log.debug("Detected infra node: %s for requirement link: %s" %
(infra_id, req))
# Assembly delay req
if req.delay is not None:
self.log.debug("Creating formula for delay requirement...")
formula = []
for hop in req.sg_path:
try:
v_fe = virtualizer.nodes[infra_id].flowtable[str(hop)]
except:
self.log.warning("Flowrule: %s was not found in Virtualizer!" % hop)
continue
try:
var_delay = v_fe.resources.delay.get_value()
except:
var_delay = None
if not (var_delay and str(var_delay).startswith('$')):
dvar = "$d" + str(v_fe.id.get_value())
self.log.debug("Delay value: %s is not a variable! "
"Replacing with: %s" % (var_delay, dvar))
v_fe.resources.delay.set_value(dvar)
formula.append("$d" + str(v_fe.id.get_value()))
else:
formula.append(var_delay)
log.debug("Registered delay variable: %s" % var_delay)
formula = '+'.join(formula) + "|max|%s" % req.delay
self.log.debug("Generated delay formula: %s" % formula)
virtualizer.nodes[infra_id].constraints.constraint.add(
virt_lib.ConstraintsConstraint(id="delay-" + str(req.id),
formula=formula))
# Assemble bandwidth req
if req.bandwidth is not None:
self.log.debug("Creating formula for bandwidth requirement...")
formula = []
for hop in req.sg_path:
try:
v_fe = virtualizer.nodes[infra_id].flowtable[str(hop)]
except:
self.log.warning("Flowrule: %s was not found in Virtualizer!" % hop)
continue
try:
var_bw = v_fe.resources.bandwidth.get_value()
except:
var_bw = None
if not (var_bw and str(var_bw).startswith('$')):
bwvar = "$bw" + str(v_fe.id.get_value())
self.log.debug("Bandwidth value: %s is not a variable! "
"Replacing with: %s" % (var_bw, bwvar))
v_fe.resources.bandwidth.set_value(bwvar)
formula.append("$bw" + str(v_fe.id.get_value()))
else:
formula.append(var_bw)
self.log.debug("Registered bandwidth variable: %s" % var_bw)
formula = '+'.join(formula) + "||%s" % req.bandwidth
self.log.debug("Generated bandwidth formula: %s" % formula)
virtualizer.nodes[infra_id].constraints.constraint.add(
virt_lib.ConstraintsConstraint(id="bandwidth-" + str(req.id),
formula=formula))
def __assemble_virt_nf (self, nf):
v_nf_id = self.recreate_nf_id(nf.id)
# Create Node object for NF
v_nf = virt_lib.Node(id=v_nf_id,
name=nf.name,
type=nf.functional_type,
status=nf.status,
resources=virt_lib.Software_resource(
cpu=nf.resources.cpu,
mem=nf.resources.mem,
storage=nf.resources.storage,
cost=nf.resources.cost,
zone=nf.resources.zone))
# Set deployment type, delay, bandwidth as a metadata
if nf.deployment_type is not None:
v_nf.metadata.add(
virt_lib.MetadataMetadata(key='deployment_type',
value=nf.deployment_type))
if nf.resources.delay is not None:
v_nf.metadata.add(
virt_lib.MetadataMetadata(key='delay',
value=nf.resources.delay))
if nf.resources.bandwidth is not None:
v_nf.metadata.add(
virt_lib.MetadataMetadata(key='bandwidth',
value=nf.resources.bandwidth))
# Migrate metadata
for key, value in nf.metadata.iteritems():
if key not in ('deployment_type', 'delay', 'bandwidth'):
v_nf.metadata.add(
virt_lib.MetadataMetadata(key=key, value=value))
# Handle operation tag
if nf.operation is not None:
self.log.debug("Convert operation tag: %s for NF: %s" %
(nf.operation, nf.id))
v_nf.set_operation(operation=nf.operation, recursive=False)
return v_nf
def __assemble_virt_nf_port (self, port):
v_nf_port = virt_lib.Port(id=str(port.id),
port_type=self.TYPE_VIRTUALIZER_PORT_ABSTRACT)
# Convert other SAP-specific data
v_nf_port.name.set_value(port.name)
if 'port-type' in port.properties:
self.log.warning("Unexpected inter-domain port in NF: %s" % port)
if 'sap' in port.properties:
v_nf_port.sap.set_value(port.properties['sap'])
v_nf_port.port_type.set_value(self.TYPE_VIRTUALIZER_PORT_SAP)
elif port.sap is not None:
v_nf_port.sap.set_value(port.sap)
v_nf_port.port_type.set_value(self.TYPE_VIRTUALIZER_PORT_SAP)
if port.capability:
self.log.debug("Translate capability...")
v_nf_port.capability.set_value(port.capability)
if any((port.technology, port.role, port.delay, port.bandwidth,
port.cost)):
self.log.debug("Translate sap_data...")
v_nf_port.sap_data.technology.set_value(port.technology)
v_nf_port.sap_data.role.set_value(port.role)
v_nf_port.sap_data.resources.delay.set_value(port.delay)
v_nf_port.sap_data.resources.bandwidth.set_value(
port.bandwidth)
v_nf_port.sap_data.resources.cost.set_value(port.cost)
v_nf_port.sap_data.resources.qos.set_value(port.qos)
if any((port.controller, port.orchestrator)):
self.log.debug("Translate controller...")
v_nf_port.control.controller.set_value(port.controller)
v_nf_port.control.orchestrator.set_value(port.orchestrator)
if any((port.l2, port.l4, len(port.l3))):
self.log.debug("Translate addresses...")
v_nf_port.addresses.l2.set_value(port.l2)
v_nf_port.addresses.l4.set_value(port.l4)
for l3 in port.l3:
v_nf_port.addresses.l3.add(
virt_lib.L3_address(id=l3.id,
name=l3.name,
configure=l3.configure,
requested=l3.requested,
provided=l3.provided))
# Migrate metadata
if len(port.metadata):
self.log.debug("Translate metadata...")
for property, value in port.metadata.iteritems():
v_nf_port.metadata.add(virt_lib.MetadataMetadata(key=property,
value=value))
# Handle operation tag
if port.operation is not None:
self.log.debug("Convert operation tag: %s for port: %s" % (
port.operation, port.id))
v_nf_port.set_operation(operation=port.operation,
recursive=False)
return v_nf_port
def _convert_nffg_nfs (self, virtualizer, nffg):
"""
Convert NFs in the given :class:`NFFG` into the given Virtualizer.
:param virtualizer: Virtualizer object based on ETH's XML/Yang version.
:param nffg: splitted NFFG (not necessarily in valid syntax)
:return: modified Virtualizer object
:rtype: :class:`Virtualizer`
"""
self.log.debug("Converting NFs...")
# Check every infra Node
for infra in nffg.infras:
# Cache discovered NF to avoid multiple detection of NF which has more
# than one port
discovered_nfs = []
# Recreate the original Node id
v_node_id = self.recreate_bb_id(id=infra.id)
# Check in Infra exists in the Virtualizer
if v_node_id not in virtualizer.nodes.node.keys():
self.log.warning(
"InfraNode: %s is not in the Virtualizer(nodes: %s)! Skip related "
"initiations..." % (infra, virtualizer.nodes.node.keys()))
continue
# Get Infra node from Virtualizer
v_node = virtualizer.nodes[v_node_id]
# Check every outgoing edge and observe only the NF neighbours
for nf in nffg.running_nfs(infra.id):
v_nf_id = self.recreate_nf_id(nf.id)
# Skip already detected NFs
if v_nf_id in discovered_nfs:
continue
# Check if the NF is exist in the InfraNode
if v_nf_id not in v_node.NF_instances.node.keys():
self.log.debug("Found uninitiated NF: %s in mapped NFFG" % nf)
# Create Node object for NF
v_nf = self.__assemble_virt_nf(nf=nf)
# Add NF to Infra object
v_node.NF_instances.add(v_nf)
# Cache discovered NF
discovered_nfs.append(v_nf_id)
self.log.debug(
"Added NF: %s to Infra node(id=%s, name=%s, type=%s)" % (
nf, v_node.id.get_as_text(),
v_node.name.get_as_text(),
v_node.type.get_as_text()))
# Add NF ports
for port in nf.ports:
v_nf_port = self.__assemble_virt_nf_port(port=port)
v_node.NF_instances[v_nf.id.get_value()].ports.add(v_nf_port)
self.log.debug("Added Port: %s to NF node: %s" %
(port, v_nf.id.get_as_text()))
else:
self.log.debug("%s already exists in the Virtualizer(id=%s, "
"name=%s)" % (nf, virtualizer.id.get_as_text(),
virtualizer.name.get_as_text()))
# noinspection PyDefaultArgument
def _convert_nffg_flowrules (self, virtualizer, nffg):
"""
Convert flowrules in the given :class:`NFFG` into the given Virtualizer.
:param virtualizer: Virtualizer object based on ETH's XML/Yang version.
:param nffg: splitted NFFG (not necessarily in valid syntax)
:return: modified Virtualizer object
:rtype: :class:`Virtualizer`
"""
self.log.debug("Converting flowrules...")
# Check every infra Node
for infra in nffg.infras:
# Recreate the original Node id
v_node_id = self.recreate_bb_id(id=infra.id)
# Check in Infra exists in the Virtualizer
if v_node_id not in virtualizer.nodes.node.keys():
self.log.warning(
"InfraNode: %s is not in the Virtualizer(nodes: %s)! Skip related "
"initiations..." % (infra, virtualizer.nodes.node.keys()))
continue
# Get Infra node from Virtualizer
v_node = virtualizer.nodes[v_node_id]
# traverse every port in the Infra node
for port in infra.ports:
# Check every flowrule
for fr in port.flowrules:
self.log.debug("Converting flowrule: %s..." % fr)
# Mandatory id
fe_id = fr.id
# Define constant priority
# fe_pri = str(100)
fe_pri = None
# Check if match starts with in_port
fe = fr.match.split(';')
if fe[0].split('=')[0] != "in_port":
self.log.warning("Missing 'in_port' from match in %s. Skip "
"flowrule conversion..." % fr)
continue
# Check if the src port is a physical or virtual port
in_port = fe[0].split('=')[1]
if in_port in v_node.ports.port.keys():
# Flowrule in_port is a phy port in Infra Node
in_port = v_node.ports[in_port]
self.log.debug("Identify in_port: %s in match as a physical port "
"in the Virtualizer" % in_port.id.get_as_text())
else:
ext_sap = [l.dst for u, v, l in
nffg.network.out_edges_iter([infra.id], data=True)
if l.dst.node.type == "SAP" and
str(l.src.id) == in_port and l.dst.role == "EXTERNAL"]
if len(ext_sap) > 0:
self.log.debug("Identify in_port: %s in match as an EXTERNAL "
"port." % in_port)
in_port = ext_sap[0].get_property("path")
else:
self.log.debug("Identify in_port: %s in match as a dynamic port. "
"Tracking associated NF port in the "
"Virtualizer..." % in_port)
# in_port is a dynamic port --> search for connected NF's port
v_nf_port = [l.dst for u, v, l in
nffg.network.out_edges_iter([infra.id], data=True)
if l.type == NFFG.TYPE_LINK_DYNAMIC and
str(l.src.id) == in_port]
# There should be only one link between infra and NF
if len(v_nf_port) < 1:
self.log.warning("NF port is not found for dynamic Infra port: "
"%s defined in match field! Skip flowrule "
"conversion..." % in_port)
continue
v_nf_port = v_nf_port[0]
v_nf_id = self.recreate_nf_id(v_nf_port.node.id)
in_port = v_node.NF_instances[v_nf_id].ports[str(v_nf_port.id)]
self.log.debug("Found associated NF port: node=%s, port=%s" % (
in_port.get_parent().get_parent().id.get_as_text(),
in_port.id.get_as_text()))
# Process match field
match = self._convert_flowrule_match(fr.match)
# Check if action starts with outport
fe = fr.action.split(';')
if fe[0].split('=')[0] != "output":
self.log.warning("Missing 'output' from action in %s."
"Skip flowrule conversion..." % fr)
continue
# Check if the dst port is a physical or virtual port
out_port = fe[0].split('=')[1]
if out_port in v_node.ports.port.keys():
# Flowrule output is a phy port in Infra Node
out_port = v_node.ports[out_port]
self.log.debug("Identify outport: %s in action as a physical port "
"in the Virtualizer" % out_port.id.get_as_text())
else:
ext_sap = [l.dst for u, v, l in
nffg.network.out_edges_iter([infra.id], data=True)
if l.dst.node.type == "SAP" and
str(l.src.id) == out_port and l.dst.role == "EXTERNAL"]
if len(ext_sap) > 0:
self.log.debug("Identify out_port: %s in action as an EXTERNAL "
"port." % out_port)
out_port = ext_sap[0].get_property("path")
else:
self.log.debug(
"Identify outport: %s in action as a dynamic port. "
"Track associated NF port in the Virtualizer..." %
out_port)
# out_port is a dynamic port --> search for connected NF's port
v_nf_port = [l.dst for u, v, l in
nffg.network.out_edges_iter([infra.id], data=True)
if l.type == NFFG.TYPE_LINK_DYNAMIC and
str(l.src.id) == out_port]
if len(v_nf_port) < 1:
self.log.warning("NF port is not found for dynamic Infra port: "
"%s defined in action field! Skip flowrule "
"conversion..." % out_port)
continue
v_nf_port = v_nf_port[0]
v_nf_id = self.recreate_nf_id(v_nf_port.node.id)
out_port = v_node.NF_instances[v_nf_id].ports[str(v_nf_port.id)]
self.log.debug("Found associated NF port: node=%s, port=%s" % (
out_port.get_parent().get_parent().id.get_as_text(),
out_port.id.get_as_text()))
# Process action field
action = self._convert_flowrule_action(fr.action)
# Process resource fields
_resources = virt_lib.Link_resource(
delay=fr.delay if fr.delay else None,
bandwidth=fr.bandwidth if fr.bandwidth else None,
cost=fr.cost if fr.cost else None,
qos=fr.qos if fr.qos else None)
# Flowrule name is not used
v_fe_name = None
# Add Flowentry with converted params
virt_fe = virt_lib.Flowentry(id=fe_id, priority=fe_pri, port=in_port,
match=match, action=action, out=out_port,
resources=_resources, name=v_fe_name)
self.log.log(VERBOSE, "Generated Flowentry:\n%s" %
v_node.flowtable.add(virt_fe).xml())
# Handel operation tag
if fr.operation is not None:
self.log.debug("Convert operation tag: %s for flowrule: %s" % (
fr.operation, fr.id))
virt_fe.set_operation(operation=str(fr.operation), recursive=False)
def _get_vnode_by_id (self, virtualizer, id):
for vnode in virtualizer.nodes:
bb_node_id = self.recreate_bb_id(id)
if vnode.id.get_as_text() == bb_node_id:
return vnode
for vnf in vnode.NF_instances:
vnf_id = self.recreate_nf_id(id)
if vnf.id.get_value() == vnf_id:
return vnf
def __set_vnode_constraints (self, vnode, infra, virtualizer):
# Add affinity
for id, aff in infra.constraints.affinity.iteritems():
v_aff_node = self._get_vnode_by_id(virtualizer=virtualizer, id=aff)
if v_aff_node is None:
self.log.warning("Referenced Node: %s is not found for affinity!"
% aff)
continue
self.log.debug(
"Found reference for affinity: %s in Infra: %s" % (aff, infra.id))
vnode.constraints.affinity.add(
virt_lib.ConstraintsAffinity(id=str(id),
object=v_aff_node.get_path()))
# Add antiaffinity
for id, naff in infra.constraints.antiaffinity.iteritems():
v_naff_node = self._get_vnode_by_id(virtualizer=virtualizer, id=naff)
if v_naff_node is None:
self.log.warning("Referenced Node: %s is not found for anti-affinity!"
% naff)
continue
self.log.debug(
"Found reference for antiaffinity: %s in Infra: %s" % (
naff, infra.id))
vnode.constraints.antiaffinity.add(
virt_lib.ConstraintsAntiaffinity(id=str(id),
object=v_naff_node.get_path()))
# Add variable
for key, value in infra.constraints.variable.iteritems():
v_var_node = self._get_vnode_by_id(virtualizer=virtualizer, id=value)
if v_var_node is None:
self.log.warning("Referenced Node: %s is not found for variable: "
"%s!" % (value, key))
continue
self.log.debug(
"Found reference for variable: %s in Infra: %s" % (key, infra.id))
vnode.constraints.constraint.add(
virt_lib.ConstraintsVariable(id=str(key),
object=v_var_node.get_path()))
# Add constraint
for id, cons in infra.constraints.constraint.iteritems():
self.log.debug("Add formula: %s to Infra: %s" % (cons, infra.id))
vnode.constraints.constraint.add(
virt_lib.ConstraintsConstraint(id=str(id),
formula=cons))
# Add restorability
if infra.constraints.restorability is not None:
self.log.debug("Add restorability: %s to Infra: %s"
% (infra.constraints.restorability, infra.id))
vnode.constraints.restorability.set_value(infra.constraints.restorability)
def __set_vnf_constraints (self, vnode, nf, virtualizer):
v_nf_id = self.recreate_nf_id(nf.id)
vnf = vnode.NF_instances[v_nf_id]
# Add affinity
for id, aff in nf.constraints.affinity.iteritems():
v_aff_node = self._get_vnode_by_id(virtualizer=virtualizer, id=aff)
if v_aff_node is None:
self.log.warning("Referenced Node: %s is not found for affinity!"
% aff)
continue
self.log.debug(
"Found reference for affinity: %s in NF: %s" % (aff, nf.id))
vnf.constraints.affinity.add(
virt_lib.ConstraintsAffinity(id=str(id),
object=v_aff_node.get_path()))
# Add antiaffinity
for id, naff in nf.constraints.antiaffinity.iteritems():
v_naff_node = self._get_vnode_by_id(virtualizer=virtualizer, id=naff)
if v_naff_node is None:
self.log.warning(
"Referenced Node: %s is not found for anti-affinity!"
% naff)
continue
self.log.debug(
"Found reference for antiaffinity: %s in NF: %s" % (naff, nf.id))
vnf.constraints.antiaffinity.add(
virt_lib.ConstraintsAntiaffinity(id=str(id),
object=v_naff_node.get_path()))
# Add variable
for key, value in nf.constraints.variable.iteritems():
v_var_node = self._get_vnode_by_id(virtualizer=virtualizer, id=value)
if v_var_node is None:
self.log.warning("Referenced Node: %s is not found for variable: "
"%s!" % (value, key))
continue
self.log.debug(
"Found reference for variable: %s in NF: %s" % (key, nf.id))
vnf.constraints.constraint.add(
virt_lib.ConstraintsVariable(id=str(key),
object=v_var_node.get_path()))
# Add constraint
for id, cons in nf.constraints.constraint.iteritems():
self.log.debug("Add formula: %s to NF: %s" % (cons, nf.id))
vnf.constraints.constraint.add(
virt_lib.ConstraintsConstraint(id=str(id),
formula=cons))
# Add restorability
if nf.constraints.restorability is not None:
self.log.debug("Add restorability: %s to NF: %s"
% (nf.constraints.restorability, nf.id))
vnf.constraints.restorability.set_value(nf.constraints.restorability)
def __set_flowentry_constraints (self, vnode, flowrule, virtualizer):
v_fe = vnode.flowtable[str(flowrule.id)]
# Add affinity
for id, aff in flowrule.constraints.affinity.iteritems():
v_aff_node = self._get_vnode_by_id(virtualizer=virtualizer, id=aff)
if v_aff_node is None:
self.log.warning("Referenced Node: %s is not found for affinity!" % aff)
continue
self.log.debug("Found reference for affinity: %s in Flowrule: %s"
% (aff, flowrule.id))
v_fe.constraints.affinity.add(
virt_lib.ConstraintsAffinity(id=str(id),
object=v_aff_node.get_path()))
# Add antiaffinity
for id, naff in flowrule.constraints.antiaffinity.iteritems():
v_naff_node = self._get_vnode_by_id(virtualizer=virtualizer, id=naff)
if v_naff_node is None:
self.log.warning("Referenced Node: %s is not found for anti-affinity!"
% naff)
continue
self.log.debug("Found reference for antiaffinity: %s in Flowrule: %s"
% (naff, flowrule.id))
v_fe.constraints.antiaffinity.add(
virt_lib.ConstraintsAntiaffinity(id=str(id),
object=v_naff_node.get_path()))
# Add variable
for key, value in flowrule.constraints.variable.iteritems():
v_var_node = self._get_vnode_by_id(virtualizer=virtualizer, id=value)
if v_var_node is None:
self.log.warning("Referenced Node: %s is not found for variable: %s!"
% (value, key))
continue
self.log.debug("Found reference for variable: %s in Flowrule: %s"
% (key, flowrule.id))
v_fe.constraints.constraint.add(
virt_lib.ConstraintsVariable(id=str(key),
object=v_var_node.get_path()))
# Add constraint
for id, cons in flowrule.constraints.constraint.iteritems():
self.log.debug("Add constraint: %s:%s to Flowrule: %s"
% (id, cons, flowrule.id))
v_fe.constraints.constraint.add(
virt_lib.ConstraintsConstraint(id=str(id),
formula=cons))
# Add restorability
if flowrule.constraints.restorability is not None:
self.log.debug("Add restorability: %s to Flowrule: %s"
% (flowrule.constraints.restorability, flowrule.id))
v_fe.constraints.restorability.set_value(
flowrule.constraints.restorability)
def _convert_nffg_constraints (self, virtualizer, nffg):
self.log.debug("Convert constraints...")
for infra in nffg.infras:
# Recreate the original Node id
v_node_id = self.recreate_bb_id(id=infra.id)
# Check if Infra exists in the Virtualizer
if v_node_id not in virtualizer.nodes.node.keys():
self.log.warning(
"InfraNode: %s is not in the Virtualizer(nodes: %s)! Skip related "
"initiations..." % (infra, virtualizer.nodes.node.keys()))
continue
# Get Infra node from Virtualizer
vnode = virtualizer.nodes[v_node_id]
self.__set_vnode_constraints(vnode=vnode,
infra=infra,
virtualizer=virtualizer)
# Check connected NF constraints
for nf in nffg.running_nfs(infra.id):
self.__set_vnf_constraints(vnode=vnode,
nf=nf,
virtualizer=virtualizer)
for flowrule in infra.flowrules():
self.__set_flowentry_constraints(vnode=vnode,
flowrule=flowrule,
virtualizer=virtualizer)
def dump_to_Virtualizer (self, nffg):
"""
Convert given :class:`NFFG` to Virtualizer format.
:param nffg: topology description
:type nffg: :class:`NFFG`
:return: topology in Virtualizer format
:rtype: Virtualizer
"""
self.log.debug(
"START conversion: NFFG(ver: %s) --> Virtualizer(ver: %s)" % (
N_VERSION, V_VERSION))
self.log.debug("Converting data to XML-based Virtualizer structure...")
# Create Virtualizer with default id,name
v_id = str(nffg.id)
v_name = str(nffg.name) if nffg.name else None
virtualizer = virt_lib.Virtualizer(id=v_id, name=v_name)
self.log.debug("Creating Virtualizer based on %s" % nffg)
# Convert NFFG metadata
for key, value in nffg.metadata.iteritems():
meta_key = str(key)
meta_value = str(value) if value is not None else None
virtualizer.metadata.add(item=virt_lib.MetadataMetadata(key=meta_key,
value=meta_value))
# Convert Infras
self._convert_nffg_infras(nffg=nffg, virtualizer=virtualizer)
# Convert SAPs
self._convert_nffg_saps(nffg=nffg, virtualizer=virtualizer)
# Convert edge links
self._convert_nffg_edges(nffg=nffg, virtualizer=virtualizer)
# Convert NFs
self._convert_nffg_nfs(nffg=nffg, virtualizer=virtualizer)
# Convert Flowrules
self._convert_nffg_flowrules(nffg=nffg, virtualizer=virtualizer)
# Convert requirement links as metadata
self._convert_nffg_reqs(nffg=nffg, virtualizer=virtualizer)
# Convert constraints
self._convert_nffg_constraints(nffg=nffg, virtualizer=virtualizer)
# explicitly call bind to resolve relative paths for safety reason
virtualizer.bind(relative=True)
self.log.debug(
"END conversion: NFFG(ver: %s) --> Virtualizer(ver: %s)" % (
N_VERSION, V_VERSION))
# Return with created Virtualizer
return virtualizer
@staticmethod
def clear_installed_elements (virtualizer):
"""
Remove NFs and flowrules from given Virtualizer.
:param virtualizer: Virtualizer object need to clear
:type virtualizer: Virtualizer
:return: cleared original virtualizer
:rtype: Virtualizer
"""
for vnode in virtualizer.nodes:
vnode.NF_instances.node.clear_data()
vnode.flowtable.flowentry.clear_data()
# explicitly call bind to resolve absolute paths for safety reason
# virtualizer.bind(relative=True)
return virtualizer
def adapt_mapping_into_Virtualizer (self, virtualizer, nffg, reinstall=False):
"""
Install the mapping related modification into a Virtualizer and return
with the new Virtualizer object.
:param virtualizer: Virtualizer object based on ETH's XML/Yang version.
:param nffg: splitted NFFG (not necessarily in valid syntax)
:param reinstall: need to clear every NF/flowrules from given virtualizer
:type reinstall: bool
:return: modified Virtualizer object
:rtype: :class:`Virtualizer`
"""
virt = virtualizer.yang_copy()
# Remove previously installed NFs and flowrules from Virtualizer for
# e.g. correct diff calculation
if reinstall:
self.log.debug("Remove pre-installed NFs/flowrules...")
self.clear_installed_elements(virtualizer=virt)
self.log.debug(
"START adapting modifications from %s into Virtualizer(id=%s, name=%s)"
% (nffg, virt.id.get_as_text(), virt.name.get_as_text()))
self._convert_nffg_nfs(virtualizer=virt, nffg=nffg)
self._convert_nffg_flowrules(virtualizer=virt, nffg=nffg)
self._convert_nffg_reqs(virtualizer=virt, nffg=nffg)
self._convert_nffg_constraints(virtualizer=virt, nffg=nffg)
# explicitly call bind to resolve absolute paths for safety reason
virt.bind(relative=True)
# virt.bind(relative=True)
self.log.debug(
"END adapting modifications from %s into Virtualizer(id=%s, name=%s)" % (
nffg, virt.id.get_as_text(), virt.name.get_as_text()))
# Return with modified Virtualizer
return virt
@staticmethod
def unescape_output_hack (data):
return data.replace("<", "<").replace(">", ">")
def _generate_sbb_base (self, request):
"""
Generate a SingleBiSBiS node for service request conversion utilize the
topology specific data and SAPs from the given `request`.
:param request: utilized service request
:type request: :class:`NFFG`
:return: generated SBB
:rtype: :class:`Virtualizer`
"""
# Generate base SBB node
self.log.debug("Add main Virtualizer...")
base = Virtualizer(id="SingleBiSBiS", name="Single-BiSBiS-View")
self.log.debug("Add SBB node...")
sbb = base.nodes.add(item=virt_lib.Infra_node(id="SingleBiSBiS",
name="SingleBiSBiS",
type="BiSBiS"))
sbb.metadata.add(virt_lib.MetadataMetadata(key="generated", value=True))
self.log.debug("Add SAPs from request...")
# Add topology specific SAPs from request
for sap in request.saps:
v_sap_port = sbb.ports.add(
virt_lib.Port(id=sap.id,
name=sap.name,
port_type=self.TYPE_VIRTUALIZER_PORT_SAP))
if len(sap.ports) > 1:
self.log.warning("Multiple SAP port detected!")
sap_port = sap.ports.container[0]
self.__copy_port_attrs(v_port=v_sap_port, port=sap_port)
self.log.debug("Added SAP port: %s" % v_sap_port.id.get_value())
return base
def convert_service_request_init (self, request, base=None, reinstall=False):
"""
Convert service requests (given in NFFG format) into Virtualizer format
using the given `base` Virtualizer.
:param request: service request
:type request: :class:`NFFG`
:param base: base Virtualizer
:type base: :class:`Virtualizer`
:param reinstall: need to clear every NF/flowrules from given virtualizer
:type reinstall: bool
:return: converted service request
:rtype: :class:`Virtualizer`
"""
if base is not None:
self.log.debug("Using given base Virtualizer: %s" % base.id.get_value())
base = base.yang_copy()
# Remove previously installed NFs and flowrules from Virtualizer for
# e.g. correct diff calculation
if reinstall:
self.log.debug("Remove pre-installed NFs/flowrules...")
self.clear_installed_elements(virtualizer=base)
else:
self.log.debug("No base Virtualizer is given! Generating SingleBiSBiS...")
base = self._generate_sbb_base(request=request)
self.log.debug("Transfer service request ID...")
base.id.set_value(request.id)
base.name.set_value(request.name)
if base.nodes.node.length() < 1:
self.log.warning("No BiSBiS node was detected!")
return base
elif base.nodes.node.length() > 1:
self.log.warning(
"Multiple BiSBiS nodes were detected in the Virtualizer!")
sbb = base.nodes.node[base.nodes.node.keys().pop()]
self.log.debug("Detected SBB node: %s" % sbb.id.get_value())
# Add NFs
self.log.debug("Converting NFs...")
for nf in request.nfs:
if str(nf.id) in sbb.NF_instances.node.keys():
self.log.error("%s already exists in the Virtualizer!" % nf.id)
continue
# Create Node object for NF
v_nf = self.__assemble_virt_nf(nf=nf)
# Add NF to Infra object
sbb.NF_instances.add(v_nf)
self.log.debug("Added NF: %s to Infra node(id=%s)"
% (nf.id, sbb.id.get_as_text()))
# Add NF ports
for port in nf.ports:
v_nf_port = self.__assemble_virt_nf_port(port=port)
sbb.NF_instances[str(nf.id)].ports.add(v_nf_port)
self.log.debug("Added Port: %s to NF node: %s" %
(port, v_nf.id.get_as_text()))
self.log.log(VERBOSE, "Created NF:\n%s" % v_nf.xml())
# Add flowrules
self.log.debug("Converting SG hops into flowrules...")
for hop in request.sg_hops:
# Get src port
if isinstance(hop.src.node, NodeSAP):
v_src = sbb.ports[str(hop.src.node.id)]
else:
v_src = sbb.NF_instances[str(hop.src.node.id)].ports[str(hop.src.id)]
# Get dst port
if isinstance(hop.dst.node, NodeSAP):
v_dst = sbb.ports[str(hop.dst.node.id)]
else:
v_dst = sbb.NF_instances[str(hop.dst.node.id)].ports[str(hop.dst.id)]
fe = sbb.flowtable.add(item=virt_lib.Flowentry(id=hop.id,
priority=100,
port=v_src,
out=v_dst,
match=hop.flowclass))
fe.resources.delay.set_value(hop.delay)
fe.resources.bandwidth.set_value(hop.bandwidth)
self.log.debug("Added flowrule: %s" % fe.id.get_value())
self.log.log(VERBOSE, "Created Flowrule:\n%s" % fe.xml())
# Add requirements
self._convert_nffg_reqs(nffg=request, virtualizer=base)
# Check connected NF constraints
self.log.debug("Converting constraints...")
for nf in request.nfs:
self.__set_vnf_constraints(vnode=sbb,
nf=nf,
virtualizer=base)
# Convert NFFG metadata
for key, value in request.metadata.iteritems():
meta_key = str(key)
meta_value = str(value) if value is not None else None
base.metadata.add(item=virt_lib.MetadataMetadata(key=meta_key,
value=meta_value))
base.bind(relative=True)
return base
def convert_service_request_del (self, request, base):
"""
Delete given service request from given virtualizer.
:param request: service request
:type request: :class:`NFFG`
:param base: base Virtualizer
:type base: :class:`Virtualizer`
:return: generated delete request
:rtype: :class:`Virtualizer`
"""
self.log.debug("Using given base Virtualizer: %s" % base.id.get_value())
base = base.full_copy()
self.log.debug("Transfer service request ID...")
base.id.set_value(request.id)
base.name.set_value(request.name)
if base.nodes.node.length() > 1:
self.log.warning("Multiple BiSBiS node detected in the Virtualizer!")
sbb = base.nodes.node[base.nodes.node.keys().pop()]
self.log.debug("Detected SBB node: %s" % sbb.id.get_value())
# Add NFs
self.log.debug("Removing NFs...")
for nf in request.nfs:
if str(nf.id) not in sbb.NF_instances.node.keys():
self.log.error("NF: %s is missing from Virtualizer!" % nf.id)
continue
deleted = sbb.NF_instances.remove(nf.id)
self.log.debug("Removed NF: %s" % deleted.id.get_value())
# Add flowrules
self.log.debug("Removing flowrules...")
for hop in request.sg_hops:
if str(hop.id) not in sbb.flowtable.flowentry.keys():
self.log.error("Flowrule: %s is missing from Virtualizer!" % hop.id)
continue
deleted = sbb.flowtable.remove(str(hop.id))
self.log.debug("Removed flowrule: %s" % deleted.id.get_value())
return base
# noinspection PyShadowingNames
class UC3MNFFGConverter(object):
"""
Convert JSON-based UC3M format to :class:`NFFG`.
Currently the format contains limited information therefore the conversion
is very rudimentary.
"""
def __init__ (self, domain=None, logger=None):
"""
Init.
:param domain: domain name
:type domain: str
:param logger: optional logger
:type logger: str or :any:`logging.Logger`
:return: None
"""
# Save domain name for define domain attribute in infras
self.domain = domain
self.log = logger if logger is not None else logging.getLogger(__name__)
self.log.debug("Created UC3MCNFFGConverter with domain name: %s" %
self.domain)
def parse_from_raw (self, raw_data, filter_empty_nodes=False,
level=logging.DEBUG):
"""
Convert JSON-based Virtualizer-like format to NFFG.
:param raw_data: raw data
:type raw_data: str
:param filter_empty_nodes: skip nodes which only exist for valid
inter-BiSBiS links (default: False)
:type filter_empty_nodes: bool
:param level: optional logging level (default: DEBUG)
:type level: int
:return: converted NFFG
:rtype: :class:`NFFG`
"""
try:
topo = json.loads(raw_data, object_hook=unicode_to_str)
return self.parse_from_json(data=topo,
filter_empty_nodes=filter_empty_nodes,
level=level)
except ValueError:
self.log.error("Received data is not valid JSON!")
def parse_from_json (self, data, filter_empty_nodes=False,
level=logging.DEBUG):
"""
Convert JSON-based Virtualizer-like format to NFFG.
:param data: parsed JSON
:type data: dict
:param filter_empty_nodes: skip nodes which only exist for valid
inter-BiSBiS links (default: False)
:type filter_empty_nodes: bool
:param level: logging level
:return: converted NFFG
:rtype: :class:`NFFG`
"""
self.log.log(level, "START conversion: TADS topology --> NFFG")
try:
# Create main NFFG
nffg = NFFG()
for node in data['nodes']['node']:
node_id = node['id']
self.log.log(level, 'Detected node: %s' % node_id)
if filter_empty_nodes:
if len(node['links']['link']) == 0 and \
len(node.get('resources', ())) == 0:
# There is no intra BisBis link --> empty BB
self.log.log(level, "Node is a bare BiSBiS! Jump to next node...")
continue
# Add Infra BiSBiS
infra = nffg.add_infra(id=node_id,
name="ExternalNode",
domain=self.domain,
infra_type=NFFG.TYPE_INFRA_BISBIS)
if 'resources' in node:
# Add resources if detected node is not a bare Node
infra.resources.cpu = _res_parser(node['resources'].get('cpu'))
infra.resources.mem = _res_parser(node['resources'].get('mem'))
infra.resources.storage = _res_parser(
node['resources'].get('storage'))
# Add Infra BiSBiS metadata
for meta in node['metadata']:
self.log.log(level, "Add metadata to Infra node: %s" % meta)
infra.add_metadata(name=meta['key'], value=meta['value'])
# Add ports
for port in node['ports']['port']:
port_id = port['id']
self.log.log(level, "Add port to Infra node: %s" % port_id)
infra.add_port(id=port_id)
for meta in port['metadata']:
port.add_metadata(name=meta['key'], value=meta['value'])
# Add main metadata
for meta in data['metadata']:
self.log.log(level, "Add metadata to NFFG: %s" % meta)
nffg.add_metadata(name=meta['key'], value=meta['value'])
for link in data['links']['link']:
src_node, src_port = self.__parse_abs_link(link_data=link['src'])
dst_node, dst_port = self.__parse_abs_link(link_data=link['dst'])
# In the received Virtualizer-like format there are in most cases links
# which are contains references to nodes of external BGP domains which
# are not in the topology --> broken links, skip processing
try:
nffg.add_link(src_port=nffg[src_node].ports[src_port],
dst_port=nffg[dst_node].ports[dst_port])
except KeyError as e:
self.log.log(VERBOSE, "Got broken link with non-existent element: %s!"
" Skip processing link..." % e)
pass
self.log.log(level, "END conversion: TADS topology --> NFFG")
return nffg
except ValueError:
self.log.error("Received data from BGP-LS speaker is not valid JSON!")
except KeyError as e:
self.log.exception("Missing required field from topology data: %s!" % e)
@staticmethod
def __parse_abs_link (link_data):
"""
Parse the link ports and return the (node_id, port_id) tuple
:param link_data: link port path
:type link_data: str
:return: tuple of node, port ids
:rtype: tuple
"""
node_id = None
port_id = None
for item in link_data.split('/'):
if item.startswith('node[id='):
node_id = item.split('=')[1].rstrip(']')
if item.startswith('port[id='):
port_id = item.split('=')[1].rstrip(']')
return node_id, port_id
if __name__ == "__main__":
# test_NFFGConverter()
# test_UC3MNFFGConverter()
import argparse
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
parser = argparse.ArgumentParser(
description="Converter NFFG <--> Virtualizer",
add_help=True)
parser.add_argument("-p", "--path", metavar="path", type=str,
help="file path")
parser.add_argument("-u", "--uc3m", action="store_true", default=False,
help="set to convert UC3M format vice-versa")
args = parser.parse_args()
if args.uc3m:
print "Not implemented this parameter!"
else:
if str(args.path).endswith('.nffg'):
c = NFFGConverter(domain="TEST",
# ensure_unique_id=True,
logger=log)
nffg = NFFG.parse_from_file(args.path)
log.info("Parsed NFFG:\n%s" % nffg.dump())
virt = c.dump_to_Virtualizer(nffg=nffg)
log.info("Reconverted Virtualizer:\n%s" % virt.xml())
elif str(args.path).endswith('.xml'):
c = NFFGConverter(domain="TEST",
# ensure_unique_id=True,
logger=log)
virt = Virtualizer.parse_from_file(args.path)
virt.bind()
log.info("Parsed Virtualizer:\n%s" % virt.xml())
nffg = c.parse_from_Virtualizer(vdata=virt.xml())
log.info("Reconverted NFFG:\n%s" % nffg.dump())
|
{
"content_hash": "0da33b43d518466e3f8b92360c3fedf2",
"timestamp": "",
"source": "github",
"line_count": 2956,
"max_line_length": 81,
"avg_line_length": 43.68335588633288,
"alnum_prop": 0.5763351093488631,
"repo_name": "hsnlab/escape",
"id": "36ed2ce679e3e827719617b1dd27897dadb05963",
"size": "129746",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "escape/escape/util/conversion.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "381"
},
{
"name": "C",
"bytes": "9773701"
},
{
"name": "C++",
"bytes": "1144774"
},
{
"name": "Dockerfile",
"bytes": "4497"
},
{
"name": "HTML",
"bytes": "423218"
},
{
"name": "JavaScript",
"bytes": "9048"
},
{
"name": "Makefile",
"bytes": "121260"
},
{
"name": "Objective-C",
"bytes": "2964"
},
{
"name": "Python",
"bytes": "2856844"
},
{
"name": "Roff",
"bytes": "80820"
},
{
"name": "Shell",
"bytes": "190566"
}
],
"symlink_target": ""
}
|
from django.db import IntegrityError, transaction
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from .fields import MyWrapper
from .models import Bar, Business, CustomAutoFieldModel, Employee, Foo
class BasicCustomPKTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.dan = Employee.objects.create(
employee_code=123,
first_name="Dan",
last_name="Jones",
)
cls.fran = Employee.objects.create(
employee_code=456,
first_name="Fran",
last_name="Bones",
)
cls.business = Business.objects.create(name="Sears")
cls.business.employees.add(cls.dan, cls.fran)
def test_querysets(self):
"""
Both pk and custom attribute_name can be used in filter and friends
"""
self.assertSequenceEqual(Employee.objects.filter(pk=123), [self.dan])
self.assertSequenceEqual(Employee.objects.filter(employee_code=123), [self.dan])
self.assertSequenceEqual(
Employee.objects.filter(pk__in=[123, 456]),
[self.fran, self.dan],
)
self.assertSequenceEqual(Employee.objects.all(), [self.fran, self.dan])
self.assertQuerySetEqual(
Business.objects.filter(name="Sears"), ["Sears"], lambda b: b.name
)
self.assertQuerySetEqual(
Business.objects.filter(pk="Sears"),
[
"Sears",
],
lambda b: b.name,
)
def test_querysets_related_name(self):
"""
Custom pk doesn't affect related_name based lookups
"""
self.assertSequenceEqual(
self.business.employees.all(),
[self.fran, self.dan],
)
self.assertQuerySetEqual(
self.fran.business_set.all(),
[
"Sears",
],
lambda b: b.name,
)
def test_querysets_relational(self):
"""
Queries across tables, involving primary key
"""
self.assertSequenceEqual(
Employee.objects.filter(business__name="Sears"),
[self.fran, self.dan],
)
self.assertSequenceEqual(
Employee.objects.filter(business__pk="Sears"),
[self.fran, self.dan],
)
self.assertQuerySetEqual(
Business.objects.filter(employees__employee_code=123),
[
"Sears",
],
lambda b: b.name,
)
self.assertQuerySetEqual(
Business.objects.filter(employees__pk=123),
[
"Sears",
],
lambda b: b.name,
)
self.assertQuerySetEqual(
Business.objects.filter(employees__first_name__startswith="Fran"),
[
"Sears",
],
lambda b: b.name,
)
def test_get(self):
"""
Get can accept pk or the real attribute name
"""
self.assertEqual(Employee.objects.get(pk=123), self.dan)
self.assertEqual(Employee.objects.get(pk=456), self.fran)
with self.assertRaises(Employee.DoesNotExist):
Employee.objects.get(pk=42)
# Use the name of the primary key, rather than pk.
self.assertEqual(Employee.objects.get(employee_code=123), self.dan)
def test_pk_attributes(self):
"""
pk and attribute name are available on the model
No default id attribute is added
"""
# pk can be used as a substitute for the primary key.
# The primary key can be accessed via the pk property on the model.
e = Employee.objects.get(pk=123)
self.assertEqual(e.pk, 123)
# Or we can use the real attribute name for the primary key:
self.assertEqual(e.employee_code, 123)
with self.assertRaisesMessage(
AttributeError, "'Employee' object has no attribute 'id'"
):
e.id
def test_in_bulk(self):
"""
Custom pks work with in_bulk, both for integer and non-integer types
"""
emps = Employee.objects.in_bulk([123, 456])
self.assertEqual(emps[123], self.dan)
self.assertEqual(
Business.objects.in_bulk(["Sears"]),
{
"Sears": self.business,
},
)
def test_save(self):
"""
custom pks do not affect save
"""
fran = Employee.objects.get(pk=456)
fran.last_name = "Jones"
fran.save()
self.assertSequenceEqual(
Employee.objects.filter(last_name="Jones"),
[self.dan, fran],
)
class CustomPKTests(TestCase):
def test_custom_pk_create(self):
"""
New objects can be created both with pk and the custom name
"""
Employee.objects.create(employee_code=1234, first_name="Foo", last_name="Bar")
Employee.objects.create(pk=1235, first_name="Foo", last_name="Baz")
Business.objects.create(name="Bears")
Business.objects.create(pk="Tears")
def test_unicode_pk(self):
# Primary key may be Unicode string.
Business.objects.create(name="jaźń")
def test_unique_pk(self):
# The primary key must also be unique, so trying to create a new object
# with the same primary key will fail.
Employee.objects.create(
employee_code=123, first_name="Frank", last_name="Jones"
)
with self.assertRaises(IntegrityError):
with transaction.atomic():
Employee.objects.create(
employee_code=123, first_name="Fred", last_name="Jones"
)
def test_zero_non_autoincrement_pk(self):
Employee.objects.create(employee_code=0, first_name="Frank", last_name="Jones")
employee = Employee.objects.get(pk=0)
self.assertEqual(employee.employee_code, 0)
def test_custom_field_pk(self):
# Regression for #10785 -- Custom fields can be used for primary keys.
new_bar = Bar.objects.create()
new_foo = Foo.objects.create(bar=new_bar)
f = Foo.objects.get(bar=new_bar.pk)
self.assertEqual(f, new_foo)
self.assertEqual(f.bar, new_bar)
f = Foo.objects.get(bar=new_bar)
self.assertEqual(f, new_foo),
self.assertEqual(f.bar, new_bar)
# SQLite lets objects be saved with an empty primary key, even though an
# integer is expected. So we can't check for an error being raised in that
# case for SQLite. Remove it from the suite for this next bit.
@skipIfDBFeature("supports_unspecified_pk")
def test_required_pk(self):
# The primary key must be specified, so an error is raised if you
# try to create an object without it.
with self.assertRaises(IntegrityError):
with transaction.atomic():
Employee.objects.create(first_name="Tom", last_name="Smith")
def test_auto_field_subclass_create(self):
obj = CustomAutoFieldModel.objects.create()
self.assertIsInstance(obj.id, MyWrapper)
@skipUnlessDBFeature("can_return_rows_from_bulk_insert")
def test_auto_field_subclass_bulk_create(self):
obj = CustomAutoFieldModel()
CustomAutoFieldModel.objects.bulk_create([obj])
self.assertIsInstance(obj.id, MyWrapper)
|
{
"content_hash": "26d3db21d56927166e17f08695631b2f",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 88,
"avg_line_length": 33.75454545454546,
"alnum_prop": 0.5861836789657958,
"repo_name": "manhhomienbienthuy/django",
"id": "47838714ca7a965e968300a7d74b9ac650dcbf65",
"size": "7428",
"binary": false,
"copies": "9",
"ref": "refs/heads/main",
"path": "tests/custom_pk/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "91648"
},
{
"name": "HTML",
"bytes": "238916"
},
{
"name": "JavaScript",
"bytes": "158214"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Python",
"bytes": "16134531"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "392"
}
],
"symlink_target": ""
}
|
from splinter import Browser
class UreportApplication:
def __init__(self):
self.browser = Browser()
def close(self):
self.browser.quit()
def navigate_to_home_page(self):
return HomePage(self.browser).load()
class HomePage():
def __init__(self, browser):
self.browser = browser
def load(self):
self.browser.visit("http://localhost:8088/")
return self
def get_number_of_previous_polls(self):
list_of_polls = self.browser.find_by_id("list_of_polls")
return len(list_of_polls.find_by_name("poll"))
|
{
"content_hash": "07e9fe0b6b9ec54d033b007614eb4b64",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 64,
"avg_line_length": 21.925925925925927,
"alnum_prop": 0.6199324324324325,
"repo_name": "mbanje/ureport_uganda",
"id": "642ae785b424367e77b57edd92786aecb78055d6",
"size": "592",
"binary": false,
"copies": "2",
"ref": "refs/heads/branch_of_tests",
"path": "functional_test/fixtures/home_page.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "44849"
},
{
"name": "Shell",
"bytes": "16165"
}
],
"symlink_target": ""
}
|
from pyjamas.ui.Sink import Sink, SinkInfo
from pyjamas.ui.MenuBar import MenuBar
from pyjamas.ui.MenuItem import MenuItem
from pyjamas import Window
from pyjamas.ui.HTML import HTML
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas import DOM
from pyjamas.ui.ContextMenuPopupPanel import ContextMenuPopupPanel
class Menus(Sink):
def __init__(self):
Sink.__init__(self)
self.menu = MenuBar()
subMenu = MenuBar(True)
subMenu.addItem("<code>Code</code>", True, self)
subMenu.addItem("<strike>Strikethrough</strike>", True, self)
subMenu.addItem("<u>Underlined</u>", True, self)
menu0 = MenuBar(True)
menu0.addItem("<b>Bold</b>", True, self)
menu0.addItem("<i>Italicized</i>", True, self)
menu0.addItem("More »", True, subMenu)
menu1 = MenuBar(True)
menu1.addItem("<font color='#FF0000'><b>Apple</b></font>", True, self)
menu1.addItem("<font color='#FFFF00'><b>Banana</b></font>", True, self)
menu1.addItem("<font color='#FFFFFF'><b>Coconut</b></font>", True, self)
menu1.addItem("<font color='#8B4513'><b>Donut</b></font>", True, self)
menu2 = MenuBar(True)
menu2.addItem("Bling", self)
menu2.addItem("Ginormous", self)
menu2.addItem("<code>w00t!</code>", True, self)
self.menu.addItem(MenuItem("Style", menu0))
self.menu.addItem(MenuItem("Fruit", menu1))
self.menu.addItem(MenuItem("Term", menu2))
self.menu.setWidth("100%")
self.panel = VerticalPanel()
self.context = HTML("""Right-Click me<br/>to show a context menu.<br />
Left-click me<br />to do absolutely nothing.""")
self.panel.add(self.menu)
self.panel.add(self.context)
self.initWidget(self.panel)
self.context.setContextMenu(self)
def onContextMenu(self, sender):
event = DOM.eventGetCurrentEvent()
subMenu = MenuBar(True)
subMenu.addItem("<code>Code</code>", True, self)
subMenu.addItem("<strike>Strikethrough</strike>", True, self)
subMenu.addItem("<u>Underlined</u>", True, self)
x = DOM.eventGetClientX(event) + 2
y = DOM.eventGetClientY(event) + 2
popup = ContextMenuPopupPanel(subMenu)
popup.showAt(x, y)
def execute(self):
Window.alert("Thank you for selecting a menu item.")
def onShow(self):
pass
def init():
return SinkInfo("Menus", "The GWT <code>MenuBar</code> class makes it easy to build menus, including cascading sub-menus.", Menus)
|
{
"content_hash": "580e17a0eb5eb9128710877e5a4bb2a4",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 134,
"avg_line_length": 34.906666666666666,
"alnum_prop": 0.627196333078686,
"repo_name": "pombredanne/pyjs",
"id": "1d354575453140312373aaa815cd18a057572f81",
"size": "2618",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "examples/kitchensink/sink/Menus.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4640"
},
{
"name": "Groff",
"bytes": "6633"
},
{
"name": "HTML",
"bytes": "10106"
},
{
"name": "JavaScript",
"bytes": "63385"
},
{
"name": "Makefile",
"bytes": "453"
},
{
"name": "Python",
"bytes": "5515375"
},
{
"name": "Shell",
"bytes": "4264"
}
],
"symlink_target": ""
}
|
from toontown.estate.DistributedClosetAI import DistributedClosetAI
from toontown.toon.ToonDNA import ToonDNA, HAT, GLASSES, BACKPACK, SHOES
from direct.distributed.ClockDelta import globalClockDelta
import ClosetGlobals
class DistributedTrunkAI(DistributedClosetAI):
notify = directNotify.newCategory('DistributedTrunkAI')
def __init__(self, air, furnitureMgr, itemType):
DistributedClosetAI.__init__(self, air, furnitureMgr, itemType)
self.hatList = []
self.glassesList = []
self.backpackList = []
self.shoesList = []
self.removedHats = []
self.removedGlasses = []
self.removedBackpacks = []
self.removedShoes = []
def generate(self):
if self.furnitureMgr.ownerId:
owner = self.air.doId2do.get(self.furnitureMgr.ownerId)
if owner:
self.hatList = owner.hatList
self.glassesList = owner.glassesList
self.backpackList = owner.backpackList
self.shoesList = owner.shoesList
self.gender = owner.dna.gender
else:
self.air.dbInterface.queryObject(self.air.dbId, self.furnitureMgr.ownerId, self.__gotOwner)
def __gotOwner(self, dclass, fields):
if dclass != self.air.dclassesByName['DistributedToonAI']:
self.notify.warning('Got object of wrong type!')
return
self.hatList = fields['setHatList'][0]
self.glassesList = fields['setGlassesList'][0]
self.backpackList = fields['setBackpackList'][0]
self.shoesList = fields['setShoesList'][0]
dna = ToonDNA(str=fields['setDNAString'][0])
self.gender = dna.gender
def __verifyAvatarInMyZone(self, av):
return av.getLocation() == self.getLocation()
def setState(self, mode, avId, ownerId, gender, hatList, glassesList, backpackList, shoesList):
self.sendUpdate('setState', [mode, avId, ownerId, gender, hatList, glassesList, backpackList, shoesList])
def removeItem(self, itemIdx, textureIdx, colorIdx, which):
avId = self.air.getAvatarIdFromSender()
if avId != self.furnitureMgr.ownerId:
self.air.writeServerEvent('suspicious', avId=avId, issue='Tried to remove item from someone else\'s closet!')
return
if avId != self.avId:
self.air.writeServerEvent('suspicious', avId=avId, issue='Tried to remove item while not interacting with closet!')
return
av = self.air.doId2do.get(avId)
if not av:
self.air.writeServerEvent('suspicious', avId=avId, issue='Tried to interact with a closet from another shard!')
return
if which == HAT:
self.removedHats.append((itemIdx, textureIdx, colorIdx))
elif which == GLASSES:
self.removedGlasses.append((itemIdx, textureIdx, colorIdx))
elif which == BACKPACK:
self.removedBackpacks.append((itemIdx, textureIdx, colorIdx))
elif which == SHOES:
self.removedShoes.append((itemIdx, textureIdx, colorIdx))
def setDNA(self, hatIdx, hatTexture, hatColor, glassesIdx, glassesTexture, glassesColor, backpackIdx, backpackTexture, backpackColor, shoesIdx, shoesTexture, shoesColor, finished, which):
avId = self.air.getAvatarIdFromSender()
if avId != self.avId:
self.air.writeServerEvent('suspicious', avId, 'Tried to set DNA from closet while not using it!')
return
av = self.air.doId2do.get(avId)
if not av:
self.air.writeServerEvent('suspicious', avId, 'Interacted with a closet from another shard!')
return
if not self.__verifyAvatarInMyZone(av):
self.air.writeServerEvent('suspicious', avId, 'Tried to setDNA while in another zone!')
return
if not finished:
# They changed one of their accessories.
if which == HAT:
av.b_setHat(hatIdx, hatTexture, hatColor)
if which == GLASSES:
av.b_setGlasses(glassesIdx, glassesTexture, glassesColor)
if which == BACKPACK:
av.b_setBackpack(backpackIdx, backpackTexture, backpackColor)
if which == SHOES:
av.b_setShoes(shoesIdx, shoesTexture, shoesColor)
elif finished == 1:
# The user pressed the cancel button. All we need to do is free him.
# Reset the removed items and our user.
av.b_setHat(hatIdx, hatTexture, hatColor)
av.b_setGlasses(glassesIdx, glassesTexture, glassesColor)
av.b_setBackpack(backpackIdx, backpackTexture, backpackColor)
av.b_setShoes(shoesIdx, shoesTexture, shoesColor)
self.removedHats = []
self.removedGlasses = []
self.removedBackpacks = []
self.removedShoes = []
self.avId = None
# Free the user.
self.d_setMovie(ClosetGlobals.CLOSET_MOVIE_COMPLETE, avId, globalClockDelta.getRealNetworkTime())
self.setState(ClosetGlobals.CLOSED, 0, self.furnitureMgr.ownerId, self.gender, self.hatList, self.glassesList, self.backpackList, self.shoesList)
elif finished == 2:
# They are done using the trunk. Update their removed items.
# Is the user actually the owner?
if avId != self.furnitureMgr.ownerId:
self.air.writeServerEvent('suspicious', avId, 'Tried to set their clothes from somebody else\'s closet!')
return
# Put on the accessories they want...
if which & HAT:
av.b_setHat(hatIdx, hatTexture, hatColor)
if which & GLASSES:
av.b_setGlasses(glassesIdx, glassesTexture, glassesColor)
if which & BACKPACK:
av.b_setBackpack(backpackIdx, backpackTexture, backpackColor)
if which & SHOES:
av.b_setShoes(shoesIdx, shoesTexture, shoesColor)
# Delete all their items they want to be deleted...
for hat in self.removedHats:
id, texture, color = hat
av.removeItemInAccessoriesList(HAT, id, texture, color)
for glasses in self.removedGlasses:
id, texture, color = glasses
av.removeItemInAccessoriesList(GLASSES, id, texture, color)
for backpack in self.removedBackpacks:
id, texture, color = backpack
av.removeItemInAccessoriesList(BACKPACK, id, texture, color)
for shoe in self.removedShoes:
id, texture, color = shoe
av.removeItemInAccessoriesList(SHOES, id, texture, color)
# Regenerate the available accessories...
self.removedHats = []
self.removedGlasses = []
self.removedBackpacks = []
self.removedShoes = []
self.generate()
self.avId = None
# We are done, free the user!
self.d_setMovie(ClosetGlobals.CLOSET_MOVIE_COMPLETE, avId, globalClockDelta.getRealNetworkTime())
self.setState(ClosetGlobals.CLOSED, 0, self.furnitureMgr.ownerId, self.gender, self.hatList, self.glassesList, self.backpackList, self.shoesList)
def enterAvatar(self):
avId = self.air.getAvatarIdFromSender()
if self.avId:
if self.avId == avId:
self.air.writeServerEvent('suspicious', avId=avId, issue='Tried to use closet twice!')
self.sendUpdateToAvatarId(avId, 'freeAvatar', [])
return
av = self.air.doId2do.get(avId)
if not av:
self.air.writeServerEvent('suspicious', avId=avId, issue='Not in same shard as closet!')
return
if not self.__verifyAvatarInMyZone(av):
self.air.writeServerEvent('suspicious', avId=avId, issue='Not in same zone as closet!')
return
self.avId = avId
self.setState(ClosetGlobals.OPEN, avId, self.furnitureMgr.ownerId, self.gender, self.hatList, self.glassesList, self.backpackList, self.shoesList)
|
{
"content_hash": "1058fc21d606869264d5b58057029965",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 191,
"avg_line_length": 47.71345029239766,
"alnum_prop": 0.6240960902071332,
"repo_name": "silly-wacky-3-town-toon/SOURCE-COD",
"id": "80e4beca5da83a4bb8872855e686ac9fb215a3d8",
"size": "8159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/estate/DistributedTrunkAI.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10249"
},
{
"name": "C",
"bytes": "1752256"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "5485400"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Makefile",
"bytes": "895"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "NSIS",
"bytes": "1009050"
},
{
"name": "Objective-C",
"bytes": "21821"
},
{
"name": "PLSQL",
"bytes": "10200"
},
{
"name": "Pascal",
"bytes": "4986"
},
{
"name": "Perl6",
"bytes": "30612"
},
{
"name": "Puppet",
"bytes": "259"
},
{
"name": "Python",
"bytes": "33566014"
},
{
"name": "Shell",
"bytes": "14642"
},
{
"name": "Tcl",
"bytes": "2084458"
}
],
"symlink_target": ""
}
|
import re
import os
import six
from builtins import zip
from gevent import subprocess
def _pattern2re(pattern):
"""Transforms a GA pattern to a regular expression."""
i = 0
escaped = False
regex = ''
# Keep track of the index where the character class started, None if we're not
# currently parsing a character class.
charclass_start = None
while i < len(pattern):
to_skip = 1
to_add = pattern[i]
if escaped:
escaped = False
elif pattern[i] == '\\':
escaped = True
elif pattern[i] == '[':
charclass_start = i
elif pattern[i] == ']':
# When ']' is the first character after a character class starts, it
# doesn't end it, it just means ']' is part of the character class.
if charclass_start < i - 1:
charclass_start = None
elif pattern[i] == '?' and charclass_start is None:
# '?' shouldn't be replaced inside character classes.
to_add = '[^/]'
elif pattern[i] == '*' and charclass_start is None:
# '*' shouldn't be replaced inside character classes.
if pattern[i:i+3] == '**/':
to_add = '((.+/)?)'
to_skip = 3
elif pattern[i:i+2] == '**':
to_add = '.+'
to_skip = 2
else:
to_add = '[^/]*'
elif charclass_start is None:
to_add = re.escape(pattern[i])
regex += to_add
i += to_skip
if regex.startswith(r'\/'):
regex = '^' + regex
else:
regex = '/' + regex
return regex + '$'
def _parse_gitattr_line(line):
"""Parses a line in a GA files.
Args:
line (str) - A line in a GA file.
Returns:
If the line is empty, a comment, or doesn't modify the 'recipes' attribute,
this function returns None.
Otherwise, it returns a pair with |pattern| and |has_recipes|, where
|pattern| is a regex encoding the pattern, and |has_recipes| is True if the
'recipes' attribute was set and False if it was unset (-) or unspecified (!)
"""
line = line.strip()
if not line or line.startswith('#'):
return None
if line.startswith((r'\#', r'\!')):
line = line[1:]
if not line.startswith('"'):
line = line.split()
pattern = line[0]
attributes = line[1:]
else:
is_escaped = False
pattern = ''
for i, c in enumerate(line[1:], 1):
if is_escaped:
pattern += c
is_escaped = False
elif c == '\\':
is_escaped = True
elif c == '"':
attributes = line[i+1:].strip().split()
break
else:
pattern += c
has_recipes = None
for attribute in reversed(attributes):
action = True
if attribute.startswith(('-', '!')):
action = False
attribute = attribute[1:]
if attribute == 'recipes':
has_recipes = action
break
if has_recipes is None:
return None
return _pattern2re(pattern), has_recipes
class AttrChecker(object):
def __init__(self, repo, shortcircuit=True):
self._repo = repo
# Shortcircuit means we only care about whether any of the files we check
# has the 'recipes' attribute set (which is useful when checking if the
# revision is interesting), and not about the results for each individual
# file (which is useful for testing).
self._shortcircuit = shortcircuit
# A map from the git blob hash of a .gitattributes file to a list of the
# rules specified in that file that affect the 'recipes' attribute.
# Each rule is a pair of (pattern, action) where |pattern| is a compiled
# regex that matches the affected files, and action is True if the 'recipes'
# attributes is to be set or False otherwise.
# Rules are stored in the order they appear in the .gitattributes file.
self._gitattr_files_cache = {}
# Stores the gitattributes files for the current revision.
self._gitattr_files = None
def _git(self, cmd, stdin=None):
"""Executes a git command and returns the standard output."""
p = subprocess.Popen(['git'] + cmd, cwd=self._repo, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, _ = p.communicate(six.ensure_binary(stdin) if stdin else None)
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, ['git'] + cmd, None)
return six.ensure_str(stdout).strip().splitlines()
def _get_directories(self, files):
"""Lists all the directories touched by any of the |files|."""
dirs = set([''])
for f in files:
f = os.path.dirname(f)
while f and f not in dirs:
dirs.add(f)
f = os.path.dirname(f)
return dirs
def _ensure_gitattributes_files_loaded(self, revision, files):
"""Loads and parses all the .gitattributes files in the given revision."""
self._gitattr_files = []
# We list all the directories that were touched by any of the files, and
# search for .gitattributes files in them.
touched_dirs = self._get_directories(files)
possible_gitattr_paths = '\n'.join(
'%s:%s' % (revision, os.path.join(d, '.gitattributes'))
for d in touched_dirs)
# We ask git to list the hashes for all the .gitattributes files we listed
# above. If the file doesn't exist, git returns '<object> missing', where
# object is the revision and .gitattribute file we asked for.
possible_gitattr_blobs = self._git(
['cat-file', '--batch-check=%(objectname)'],
possible_gitattr_paths)
for line, d in zip(possible_gitattr_blobs, touched_dirs):
if line.endswith(' missing'):
continue
if d != '':
d += '/'
self._gitattr_files.append(('/' + d, self._parse_gitattr_file(line)))
# Store the paths in desc. order of length.
self._gitattr_files.sort()
self._gitattr_files.reverse()
def _parse_gitattr_file(self, blob_hash):
"""Returns a list of patterns and actions parsed from the GA file.
Parses the .gitattributes file pointed at by |blob_hash|, and returns the
patterns that set, unset or unspecify the 'recipes' attribute.
Args:
blob_hash (sha1) - A hash that points to a .gitattributes file in the git
repository.
Returns:
A list of |(pattern, action)| where |pattern| is a compiled regular
expression encoding a pattern in the GA file, and |action| is True if
'recipes' was set, and False if it was unset (-) or unspecified (!).
"""
if blob_hash in self._gitattr_files_cache:
return self._gitattr_files_cache[blob_hash]
rules = []
for line in self._git(['cat-file', 'blob', blob_hash]):
parsed_line = _parse_gitattr_line(line)
if parsed_line is None:
continue
pattern, attr_value = parsed_line
if rules and rules[-1][1] == attr_value:
rules[-1][0] = '((%s)|(%s))' % (rules[-1][0], pattern)
else:
if rules:
rules[-1][0] = re.compile(rules[-1][0])
rules.append([pattern, attr_value])
if rules:
rules[-1][0] = re.compile(rules[-1][0])
self._gitattr_files_cache[blob_hash] = rules
return rules
def _check_file(self, f):
"""Check whether |f| has the 'recipes' attribute set.
Returns True if the file |f| has the 'recipes' attribute set, and False
otherwise.
"""
# If the file path starts with the GA path, then the path is a parent of
# the file. Note that since the GA paths are sorted desc. according to
# length, the first we find will be the most specific one.
for path, rules in self._gitattr_files:
if not f.startswith(path):
continue
# Iterate over the rules in reverse, so the last rule comes first and we
# can return early.
result = None
for pattern, action in reversed(rules):
if pattern.search(f):
result = action
break
# If the result is not None, then the GA told us how to handle the file
# and we can stop looking.
if result is not None:
return result
# No GA specified a rule for the file, so the attribute is unspecified and
# not set.
return False
def check_files(self, revision, files):
"""Checks the 'recipes' attribute for the |files| at the given |revision|.
If |shortcircuit| was specified when creating this object, returns True if
any of the |files| has the 'recipes' attribute set.
Otherwise, returns a list with an entry for each file |f| specifying
whether it has the 'recipes' attribute set or not.
"""
# Make sure the gitattribute files are loaded at the right revision.
self._ensure_gitattributes_files_loaded(revision, files)
results = (self._check_file('/' + f) for f in files)
if self._shortcircuit:
return any(results)
return list(results)
|
{
"content_hash": "78e9e1d66c230bc7514dc0f51fa5e305",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 80,
"avg_line_length": 34.48412698412698,
"alnum_prop": 0.6271576524741082,
"repo_name": "luci/recipes-py",
"id": "298d05d84386fd456767ae76e454f275308a3eb5",
"size": "8864",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "recipe_engine/internal/gitattr_checker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "26"
},
{
"name": "Python",
"bytes": "900422"
},
{
"name": "Shell",
"bytes": "5746"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from django.test import TestCase
from django.core.cache import cache
from django.core import mail
from model_mommy import mommy
from system.models import Configuration
from dbaas.tests.helpers import DatabaseHelper
class EmailBaseTest(TestCase):
action = 'update_ssl'
def setUp(self):
cache.clear()
mail.outbox = []
self.email_from = Configuration(
name='email_addr_from', value='dbaas@mail.com'
)
self.email_from.save()
self.email_adm = Configuration(
name='new_user_notify_email', value='adm@mail.com'
)
self.email_adm.save()
self.team = mommy.make(
'Team',
name='team_1',
email='team_1@email.test.com',
contacts='contact_1',
role__name='fake_role',
organization__name='fake_organization'
)
self.database = DatabaseHelper.create(
name='fake_db_name', team=self.team
)
self.task_schedule = mommy.make(
'TaskSchedule',
method_path=self.action,
database=self.database
)
|
{
"content_hash": "c64fa7c2b68fb240113736f722cd2895",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 62,
"avg_line_length": 27.41860465116279,
"alnum_prop": 0.5869380831212893,
"repo_name": "globocom/database-as-a-service",
"id": "85ccb44c9336c71f6dba05380189864467d5243e",
"size": "1179",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbaas/util/tests/test_email_notification/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "243568"
},
{
"name": "Dockerfile",
"bytes": "1372"
},
{
"name": "HTML",
"bytes": "310401"
},
{
"name": "JavaScript",
"bytes": "988830"
},
{
"name": "Makefile",
"bytes": "5199"
},
{
"name": "Python",
"bytes": "9674426"
},
{
"name": "Shell",
"bytes": "215115"
}
],
"symlink_target": ""
}
|
"""
Entry point for raw requests. Can be used to compute queries of API parts for which specific methods have not been yet implemented, and still benefit from the core methods.
for instance, client.raw()
"""
def raw(client, url, extra_params=None, verbose=False, multipage=None, page_limit=10, count_per_page=100):
if not multipage:
return client._get(url=url, extra_params=extra_params, verbose=verbose)
else:
return client._get_multipage(url=url, extra_params=extra_params, verbose=verbose, page_limit=page_limit, count=count_per_page)
|
{
"content_hash": "d9575757556c076c306b689fda278d2c",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 172,
"avg_line_length": 47.083333333333336,
"alnum_prop": 0.7380530973451327,
"repo_name": "leonardbinet/navitia_client",
"id": "cf70face3c7a912f22254fb707ac7338c8f28bec",
"size": "565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "navitia_client/raw.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50783"
}
],
"symlink_target": ""
}
|
"""Support for the Philips Hue system."""
import ipaddress
import logging
from aiohue.util import normalize_bridge_id
import voluptuous as vol
from homeassistant import config_entries, core
from homeassistant.components import persistent_notification
from homeassistant.const import CONF_HOST
from homeassistant.helpers import config_validation as cv, device_registry as dr
from .bridge import HueBridge
from .const import (
CONF_ALLOW_HUE_GROUPS,
CONF_ALLOW_UNREACHABLE,
DEFAULT_ALLOW_HUE_GROUPS,
DEFAULT_ALLOW_UNREACHABLE,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
CONF_BRIDGES = "bridges"
DATA_CONFIGS = "hue_configs"
PHUE_CONFIG_FILE = "phue.conf"
BRIDGE_CONFIG_SCHEMA = vol.Schema(
{
# Validate as IP address and then convert back to a string.
vol.Required(CONF_HOST): vol.All(ipaddress.ip_address, cv.string),
vol.Optional(CONF_ALLOW_UNREACHABLE): cv.boolean,
vol.Optional(CONF_ALLOW_HUE_GROUPS): cv.boolean,
vol.Optional("filename"): str,
}
)
CONFIG_SCHEMA = vol.Schema(
vol.All(
cv.deprecated(DOMAIN, invalidation_version="0.115.0"),
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_BRIDGES): vol.All(
cv.ensure_list, [BRIDGE_CONFIG_SCHEMA],
)
}
)
},
),
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the Hue platform."""
conf = config.get(DOMAIN)
if conf is None:
conf = {}
hass.data[DOMAIN] = {}
hass.data[DATA_CONFIGS] = {}
# User has not configured bridges
if CONF_BRIDGES not in conf:
return True
bridges = conf[CONF_BRIDGES]
configured_hosts = {
entry.data.get("host") for entry in hass.config_entries.async_entries(DOMAIN)
}
for bridge_conf in bridges:
host = bridge_conf[CONF_HOST]
# Store config in hass.data so the config entry can find it
hass.data[DATA_CONFIGS][host] = bridge_conf
if host in configured_hosts:
continue
# No existing config entry found, trigger link config flow. Because we're
# inside the setup of this component we'll have to use hass.async_add_job
# to avoid a deadlock: creating a config entry will set up the component
# but the setup would block till the entry is created!
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"host": bridge_conf[CONF_HOST]},
)
)
return True
async def async_setup_entry(
hass: core.HomeAssistant, entry: config_entries.ConfigEntry
):
"""Set up a bridge from a config entry."""
host = entry.data["host"]
config = hass.data[DATA_CONFIGS].get(host)
# Migrate allow_unreachable from config entry data to config entry options
if (
CONF_ALLOW_UNREACHABLE not in entry.options
and CONF_ALLOW_UNREACHABLE in entry.data
and entry.data[CONF_ALLOW_UNREACHABLE] != DEFAULT_ALLOW_UNREACHABLE
):
options = {
**entry.options,
CONF_ALLOW_UNREACHABLE: entry.data[CONF_ALLOW_UNREACHABLE],
}
data = entry.data.copy()
data.pop(CONF_ALLOW_UNREACHABLE)
hass.config_entries.async_update_entry(entry, data=data, options=options)
# Migrate allow_hue_groups from config entry data to config entry options
if (
CONF_ALLOW_HUE_GROUPS not in entry.options
and CONF_ALLOW_HUE_GROUPS in entry.data
and entry.data[CONF_ALLOW_HUE_GROUPS] != DEFAULT_ALLOW_HUE_GROUPS
):
options = {
**entry.options,
CONF_ALLOW_HUE_GROUPS: entry.data[CONF_ALLOW_HUE_GROUPS],
}
data = entry.data.copy()
data.pop(CONF_ALLOW_HUE_GROUPS)
hass.config_entries.async_update_entry(entry, data=data, options=options)
# Overwrite from YAML configuration
if config is not None:
options = {}
if CONF_ALLOW_HUE_GROUPS in config and (
CONF_ALLOW_HUE_GROUPS not in entry.options
or config[CONF_ALLOW_HUE_GROUPS] != entry.options[CONF_ALLOW_HUE_GROUPS]
):
options[CONF_ALLOW_HUE_GROUPS] = config[CONF_ALLOW_HUE_GROUPS]
if CONF_ALLOW_UNREACHABLE in config and (
CONF_ALLOW_UNREACHABLE not in entry.options
or config[CONF_ALLOW_UNREACHABLE] != entry.options[CONF_ALLOW_UNREACHABLE]
):
options[CONF_ALLOW_UNREACHABLE] = config[CONF_ALLOW_UNREACHABLE]
if options:
hass.config_entries.async_update_entry(
entry, options={**entry.options, **options},
)
bridge = HueBridge(hass, entry)
if not await bridge.async_setup():
return False
hass.data[DOMAIN][entry.entry_id] = bridge
config = bridge.api.config
# For backwards compat
unique_id = normalize_bridge_id(config.bridgeid)
if entry.unique_id is None:
hass.config_entries.async_update_entry(entry, unique_id=unique_id)
# For recovering from bug where we incorrectly assumed homekit ID = bridge ID
elif entry.unique_id != unique_id:
# Find entries with this unique ID
other_entry = next(
(
entry
for entry in hass.config_entries.async_entries(DOMAIN)
if entry.unique_id == unique_id
),
None,
)
if other_entry is None:
# If no other entry, update unique ID of this entry ID.
hass.config_entries.async_update_entry(entry, unique_id=unique_id)
elif other_entry.source == config_entries.SOURCE_IGNORE:
# There is another entry but it is ignored, delete that one and update this one
hass.async_create_task(
hass.config_entries.async_remove(other_entry.entry_id)
)
hass.config_entries.async_update_entry(entry, unique_id=unique_id)
else:
# There is another entry that already has the right unique ID. Delete this entry
hass.async_create_task(hass.config_entries.async_remove(entry.entry_id))
return False
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, config.mac)},
identifiers={(DOMAIN, config.bridgeid)},
manufacturer="Signify",
name=config.name,
model=config.modelid,
sw_version=config.swversion,
)
if config.modelid == "BSB002" and config.swversion < "1935144040":
persistent_notification.async_create(
hass,
"Your Hue hub has a known security vulnerability ([CVE-2020-6007](https://cve.circl.lu/cve/CVE-2020-6007)). Go to the Hue app and check for software updates.",
"Signify Hue",
"hue_hub_firmware",
)
elif config.swupdate2_bridge_state == "readytoinstall":
err = (
"Please check for software updates of the bridge in the Philips Hue App.",
"Signify Hue",
"hue_hub_firmware",
)
_LOGGER.warning(err)
return True
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
bridge = hass.data[DOMAIN].pop(entry.entry_id)
return await bridge.async_reset()
|
{
"content_hash": "bf3c1bbabc4604d5e83c0d2112941e7b",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 171,
"avg_line_length": 33.05701754385965,
"alnum_prop": 0.6221308212816771,
"repo_name": "titilambert/home-assistant",
"id": "1131d68baeccc8d9e569d9e412e5f5f4bac3884b",
"size": "7537",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/hue/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "25849092"
},
{
"name": "Shell",
"bytes": "4410"
}
],
"symlink_target": ""
}
|
my_<caret>
from source import *
|
{
"content_hash": "dc6b567f9c81a85b3fbc093adf8f1960",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 20,
"avg_line_length": 11,
"alnum_prop": 0.696969696969697,
"repo_name": "smmribeiro/intellij-community",
"id": "f077e8e6c4e027e1175bbf2e0df81d5893d9d417",
"size": "33",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "python/testData/completion/beforeImport/beforeStarImport.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import controller
# bool that specifies wheter it is a development environment
DEBUG = True
# list of controller modules
CONTROLLERS = [controller]
|
{
"content_hash": "422795ffe4131f357e39b56388a6cbf2",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 60,
"avg_line_length": 21.428571428571427,
"alnum_prop": 0.7933333333333333,
"repo_name": "ral99/weppy",
"id": "1931ce10aa7de26248d50ab62a6f36e9267e7771",
"size": "150",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/weppy_project/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1671"
},
{
"name": "Python",
"bytes": "72716"
}
],
"symlink_target": ""
}
|
""" Event views for editorial app.
editorial/views/eventviews.py
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from actstream import action
from braces.views import LoginRequiredMixin, FormMessagesMixin
from django.conf import settings
from django.shortcuts import redirect, get_object_or_404
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from django.views.generic import UpdateView, CreateView, DeleteView, View
from editorial.forms import (
EventForm,
CommentForm,
NoteForm,
SimpleImageForm,
SimpleDocumentForm,
SimpleImageLibraryAssociateForm,
SimpleDocumentLibraryAssociateForm,
)
from editorial.models import (
Organization,
Project,
Story,
Event,
Discussion,
)
# ----------------------------------------------------------------------#
# Events Views
# ----------------------------------------------------------------------#
# ACCESS: Any org user, or user from an organization that is in collaborate_with
# should be able to create an event for P, Sr, St, F.
# Contractors should only be able to create events for P, Sr or St they are
# assigned to.
class EventCreateView(LoginRequiredMixin, FormMessagesMixin, CreateView):
"""A logged in user can create a event.
Events are used to manage information about events.
Events can either manage events that an organization are hosting
or events that an organization is reporting on.
Ex: Hosting = A townhall discussion hosted by an organization
Ex: Reporting = A press conference at city hall covered for a story.
Ex. Administrative = An internal event like an org or team meeting.
Events have a connection to either a Project, Story or Event.
"""
model = Event
form_class = EventForm
template_name = "editorial/events/event_form.html"
form_invalid_message = "Check the form."
form_valid_message = "Event created."
def get_form_kwargs(self):
"""Pass user organization to the form."""
kw = super(EventCreateView, self).get_form_kwargs()
kw.update({'organization': self.request.user.organization})
return kw
def form_valid(self, form):
"""Save -- but first adding owner and organization."""
self.object = event = form.save(commit=False)
# create and set discussion
discussion = Discussion.objects.create_discussion("EV")
event.discussion = discussion
# set user specific values
event.owner = self.request.user
event.organization = self.request.user.organization
event.save()
form.save_m2m()
# record action for activity stream
action.send(self.request.user, verb="created", action_object=event)
return redirect(self.get_success_url())
# ACCESS: Any org user, or user from an organization that is in collaborate_with
# should be able to edit an event for P, Sr, St, F.
# Contractors should only be able to edit events for P, Sr or St they are
# assigned to.
class EventUpdateView(LoginRequiredMixin, FormMessagesMixin, UpdateView):
""" The detail page for a event.
Displays the event information.
"""
model = Event
form_class = EventForm
template_name = "editorial/events/event_form.html"
form_invalid_message = "Something went wrong. Check the form."
form_valid_message = "Changes saved."
def get_form_kwargs(self):
"""Pass organization to form."""
kw = super(EventUpdateView, self).get_form_kwargs()
kw.update({'organization': self.request.user.organization, 'event': self.object})
return kw
def event_discussion(self):
"""Get discussion, comments and comment form for the event."""
self.object = self.get_object()
discussion = self.object.discussion
comments = discussion.comment_set.all().order_by('date')
form = CommentForm()
return {'discussion': discussion, 'comments': comments, 'form': form, }
def event_notes(self):
"""Get notes and note form for event."""
self.object = self.get_object()
notes = self.object.notes.all().order_by('-creation_date')
form = NoteForm()
return {'notes': notes, 'form': form}
def simple_images(self):
"""Return simple images."""
images = self.object.simple_image_assets.all()
form = SimpleImageForm()
addform = SimpleImageLibraryAssociateForm(organization=self.request.user.organization)
return {'images': images, 'form': form, 'addform': addform,}
def simple_documents(self):
"""Return simple documents."""
documents = self.object.simple_document_assets.all()
form = SimpleDocumentForm()
addform = SimpleDocumentLibraryAssociateForm(organization=self.request.user.organization)
return {'documents': documents, 'form': form, 'addform': addform,}
def get_success_url(self):
action.send(self.request.user, verb="edited", action_object=self.object)
return super(EventUpdateView, self).get_success_url()
# ACCESS: Any org user that is an admin or editor should be able to delete an
# event associated with their org, or an org PSS.
class EventDeleteView(LoginRequiredMixin, FormMessagesMixin, DeleteView):
"""View for handling deletion of an event.
In this project, we expect deletion to be done via a JS pop-up UI; we don't expect to
actually use the "do you want to delete this?" Django-generated page. However, this is
available if useful.
"""
model = Event
template_name = "editorial/events/event_delete.html"
form_valid_message = "Deleted."
form_invalid_message = "Please check form."
def get_success_url(self):
"""Post-deletion, return to the task parent URL."""
if self.object.project:
project = self.object.project
return reverse('project_event_list', kwargs={'pk': project.id})
if self.object.story:
story = self.object.story
return reverse('story_event_list', kwargs={'pk': story.id})
if self.object.evt_organization:
organization = self.object.evt_organization
return reverse('organization_event_list', kwargs={'pk': organization.id})
# ----------------------------------------------------------------------#
# Content Event Views
# ----------------------------------------------------------------------#
# ACCESS: Any org user should be able to create an event associated
# with their organization
class OrganizationEventView(LoginRequiredMixin, CreateView):
"""Display all the events associated with an organization.
"""
context_object_name = 'events'
template_name = 'editorial/events/event_list.html'
form_class = EventForm
def get_form_kwargs(self):
"""Pass organization to form."""
kw = super(OrganizationEventView, self).get_form_kwargs()
kw.update({'organization': self.request.user.organization})
return kw
def get_context_data(self, **kwargs):
"""Return events belonging to the organization."""
context = super(OrganizationEventView, self).get_context_data(**kwargs)
organization = get_object_or_404(Organization, id=self.kwargs['pk'])
events = organization.event_set.all()
reporting_ct = organization.event_set.filter(event_type="Reporting").count()
hosting_ct = organization.event_set.filter(event_type="Hosting").count()
administrative_ct = organization.event_set.filter(event_type="Administrative").count()
other_ct = organization.event_set.filter(event_type="Other").count()
context['organization'] = organization
context['events'] = events
context['reporting_ct'] = reporting_ct
context['hosting_ct'] = hosting_ct
context['administrative_ct'] = administrative_ct
context['other_ct'] = other_ct
return context
# TODO
# class OrganizationEventSchedule(View):
# """Return JSON of organization event schedule."""
#
# def get(self, request, *args, **kwargs):
# org_id = self.kwargs['pk']
# org = Organization.objects.get(id=org_id)
# org_event_cal = org.get_org_event_schedule()
#
# return HttpResponse(json.dumps(org_event_cal), content_type='application/json')
# ACCESS: Any org user should be able to view/create an event associated a project owned
# by their organization
# A user from an organization that is in collaborate_with on a project
# should be able to view/create an event for a project they have access to.
class ProjectEventView(LoginRequiredMixin, CreateView):
"""Display all the events associated with a project.
"""
context_object_name = 'events'
template_name = 'editorial/events/event_list.html'
form_class = EventForm
def get_form_kwargs(self):
"""Pass organization to form."""
kw = super(ProjectEventView, self).get_form_kwargs()
kw.update({'organization': self.request.user.organization})
return kw
def get_context_data(self, **kwargs):
"""Return events belonging to the project."""
context = super(ProjectEventView, self).get_context_data(**kwargs)
project = get_object_or_404(Project, id=self.kwargs['pk'])
events = project.event_set.all()
reporting_ct = project.event_set.filter(event_type="Reporting").count()
hosting_ct = project.event_set.filter(event_type="Hosting").count()
administrative_ct = project.event_set.filter(event_type="Administrative").count()
other_ct = project.event_set.filter(event_type="Other").count()
context['project'] = project
context['events'] = events
context['reporting_ct'] = reporting_ct
context['hosting_ct'] = hosting_ct
context['administrative_ct'] = administrative_ct
context['other_ct'] = other_ct
return context
class ProjectEventSchedule(View):
"""Return JSON of project event schedule.
displayed at /project/pk/events/
"""
def get(self, request, *args, **kwargs):
project_id = self.kwargs['pk']
project = Project.objects.get(id=project_id)
project_event_cal = project.get_project_event_schedule()
return HttpResponse(json.dumps(project_event_cal), content_type='application/json')
# ACCESS: Any org user should be able to view/create an event associated a story owned
# by their organization
# A user from an organization that is in collaborate_with on a story
# should be able to view/create an event for a story they have access to.
class StoryEventView(LoginRequiredMixin, CreateView):
"""Display all the events associated with a story."""
context_object_name = 'events'
template_name = 'editorial/events/event_list.html'
form_class = EventForm
def get_form_kwargs(self):
"""Pass organization to form."""
kw = super(StoryEventView, self).get_form_kwargs()
kw.update({'organization': self.request.user.organization})
return kw
def get_context_data(self, **kwargs):
"""Return events belonging to the project."""
context = super(StoryEventView, self).get_context_data(**kwargs)
story = get_object_or_404(Story, id=self.kwargs['pk'])
events = story.event_set.all()
reporting_ct = story.event_set.filter(event_type="Reporting").count()
hosting_ct = story.event_set.filter(event_type="Hosting").count()
administrative_ct = story.event_set.filter(event_type="Administrative").count()
other_ct = story.event_set.filter(event_type="Other").count()
context['story'] = story
context['events'] = events
context['reporting_ct'] = reporting_ct
context['hosting_ct'] = hosting_ct
context['administrative_ct'] = administrative_ct
context['other_ct'] = other_ct
return context
class StoryEventSchedule(View):
"""Return JSON of story event schedule.
displayed at /story/pk/events/
"""
def get(self, request, *args, **kwargs):
story_id = self.kwargs['pk']
story = Story.objects.get(id=story_id)
story_event_cal = story.get_story_event_schedule()
return HttpResponse(json.dumps(story_event_cal), content_type='application/json')
|
{
"content_hash": "12804af2eed47ba7f44cff47aee41cfc",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 97,
"avg_line_length": 36.51327433628319,
"alnum_prop": 0.6572144126676361,
"repo_name": "ProjectFacet/facet",
"id": "6120a4f308a0759a91db29726469fe5be4d040dc",
"size": "12378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/editorial/views/events.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4350483"
},
{
"name": "HTML",
"bytes": "1677386"
},
{
"name": "JavaScript",
"bytes": "1120019"
},
{
"name": "Python",
"bytes": "804022"
},
{
"name": "Ruby",
"bytes": "225"
},
{
"name": "Shell",
"bytes": "889"
}
],
"symlink_target": ""
}
|
"""Test the DuckDNS component."""
import asyncio
from datetime import timedelta
import pytest
from homeassistant.setup import async_setup_component
from homeassistant.components import duckdns
from homeassistant.util.dt import utcnow
from tests.common import async_fire_time_changed
DOMAIN = 'bla'
TOKEN = 'abcdefgh'
@pytest.fixture
def setup_duckdns(hass, aioclient_mock):
"""Fixture that sets up DuckDNS."""
aioclient_mock.get(duckdns.UPDATE_URL, params={
'domains': DOMAIN,
'token': TOKEN
}, text='OK')
hass.loop.run_until_complete(async_setup_component(
hass, duckdns.DOMAIN, {
'duckdns': {
'domain': DOMAIN,
'access_token': TOKEN
}
}))
@asyncio.coroutine
def test_setup(hass, aioclient_mock):
"""Test setup works if update passes."""
aioclient_mock.get(duckdns.UPDATE_URL, params={
'domains': DOMAIN,
'token': TOKEN
}, text='OK')
result = yield from async_setup_component(hass, duckdns.DOMAIN, {
'duckdns': {
'domain': DOMAIN,
'access_token': TOKEN
}
})
assert result
assert aioclient_mock.call_count == 1
async_fire_time_changed(hass, utcnow() + timedelta(minutes=5))
yield from hass.async_block_till_done()
assert aioclient_mock.call_count == 2
@asyncio.coroutine
def test_setup_fails_if_update_fails(hass, aioclient_mock):
"""Test setup fails if first update fails."""
aioclient_mock.get(duckdns.UPDATE_URL, params={
'domains': DOMAIN,
'token': TOKEN
}, text='KO')
result = yield from async_setup_component(hass, duckdns.DOMAIN, {
'duckdns': {
'domain': DOMAIN,
'access_token': TOKEN
}
})
assert not result
assert aioclient_mock.call_count == 1
@asyncio.coroutine
def test_service_set_txt(hass, aioclient_mock, setup_duckdns):
"""Test set txt service call."""
# Empty the fixture mock requests
aioclient_mock.clear_requests()
aioclient_mock.get(duckdns.UPDATE_URL, params={
'domains': DOMAIN,
'token': TOKEN,
'txt': 'some-txt',
}, text='OK')
assert aioclient_mock.call_count == 0
yield from hass.components.duckdns.async_set_txt('some-txt')
assert aioclient_mock.call_count == 1
@asyncio.coroutine
def test_service_clear_txt(hass, aioclient_mock, setup_duckdns):
"""Test clear txt service call."""
# Empty the fixture mock requests
aioclient_mock.clear_requests()
aioclient_mock.get(duckdns.UPDATE_URL, params={
'domains': DOMAIN,
'token': TOKEN,
'txt': '',
'clear': 'true',
}, text='OK')
assert aioclient_mock.call_count == 0
yield from hass.components.duckdns.async_set_txt(None)
assert aioclient_mock.call_count == 1
|
{
"content_hash": "f2c3caaa8358df6252b071fe8b002388",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 69,
"avg_line_length": 26.92452830188679,
"alnum_prop": 0.6320953048353188,
"repo_name": "stefan-jonasson/home-assistant",
"id": "d64ffbca81f0da2024edd1ddffb6230d32a858a6",
"size": "2854",
"binary": false,
"copies": "10",
"ref": "refs/heads/dev",
"path": "tests/components/test_duckdns.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4056"
},
{
"name": "Python",
"bytes": "8360711"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "12658"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import argparse, datetime, json, multiprocessing, os, platform, shutil, subprocess, sys, traceback, tempfile
# Find physical core count of the machine.
if platform.system() == 'Linux':
lines = subprocess.check_output(['lscpu', '--parse=core'])
physical_cores = len(set(line for line in lines.strip().split('\n')
if not line.startswith('#')))
elif platform.system() == 'Darwin':
physical_cores = int(
subprocess.check_output(['sysctl', '-n', 'hw.physicalcpu']))
else:
raise Exception('Unknown platform: %s' % platform.system())
# Choose a reasonable number of application cores given the
# available physical cores.
app_cores = max(physical_cores - 2, 1)
legion_cxx_tests = [
# Tutorial
['tutorial/00_hello_world/hello_world', []],
['tutorial/01_tasks_and_futures/tasks_and_futures', []],
['tutorial/02_index_tasks/index_tasks', []],
['tutorial/03_global_vars/global_vars', []],
['tutorial/04_logical_regions/logical_regions', []],
['tutorial/05_physical_regions/physical_regions', []],
['tutorial/06_privileges/privileges', []],
['tutorial/07_partitioning/partitioning', []],
['tutorial/08_multiple_partitions/multiple_partitions', []],
['tutorial/09_custom_mapper/custom_mapper', []],
# Examples
['examples/circuit/circuit', []],
['examples/dynamic_registration/dynamic_registration', []],
['examples/ghost/ghost', ['-ll:cpu', '4']],
['examples/ghost_pull/ghost_pull', ['-ll:cpu', '4']],
['examples/realm_saxpy/realm_saxpy', []],
['examples/spmd_cgsolver/spmd_cgsolver', ['-ll:cpu', '4', '-perproc']],
# Tests
['test/attach_file_mini/attach_file_mini', []],
#['test/garbage_collection_mini/garbage_collection_mini', []], # FIXME: Broken: https://github.com/StanfordLegion/legion/issues/220
#['test/matrix_multiply/matrix_multiply', []], # FIXME: Broken: https://github.com/StanfordLegion/legion/issues/222
#['test/predspec/predspec', []], # FIXME: Broken: https://github.com/StanfordLegion/legion/issues/223
#['test/read_write/read_write', []], # FIXME: Broken: https://github.com/StanfordLegion/legion/issues/224
#['test/rendering/rendering', []], # FIXME: Broken: https://github.com/StanfordLegion/legion/issues/225
]
legion_hdf_cxx_tests = [
# Examples
['examples/attach_file/attach_file', []],
# Tests
#['test/hdf_attach/hdf_attach', []], # FIXME: Broken: https://github.com/StanfordLegion/legion/issues/221
]
legion_cxx_perf_tests = [
# Circuit: Heavy Compute
['examples/circuit/circuit',
['-l', '10', '-p', str(app_cores), '-npp', '2500', '-wpp', '10000', '-ll:cpu', str(app_cores)]],
# Circuit: Light Compute
['examples/circuit/circuit',
['-l', '10', '-p', '100', '-npp', '2', '-wpp', '4', '-ll:cpu', '2']],
]
regent_perf_tests = [
# Circuit: Heavy Compute
['language/examples/circuit_sparse.rg',
['-l', '10', '-p', str(app_cores), '-npp', '2500', '-wpp', '10000', '-ll:cpu', str(app_cores),
'-fflow-spmd-shardsize', str(app_cores)]],
# Circuit: Light Compute
['language/examples/circuit_sparse.rg',
['-l', '10', '-p', '100', '-npp', '2', '-wpp', '4', '-ll:cpu', '2',
'-fflow-spmd-shardsize', '2']],
# PENNANT: Heavy Compute
['language/examples/pennant_fast.rg',
['pennant.tests/sedovbig3x30/sedovbig.pnt',
'-seq_init', '0', '-par_init', '1', '-print_ts', '1', '-prune', '5',
'-npieces', str(app_cores), '-numpcx', '1', '-numpcy', str(app_cores),
'-ll:csize', '8192', '-ll:cpu', str(app_cores), '-fflow-spmd-shardsize', str(app_cores)]],
]
def cmd(command, env=None, cwd=None):
print(' '.join(command))
return subprocess.check_call(command, env=env, cwd=cwd)
def run_test_regent(launcher, root_dir, tmp_dir, bin_dir, env, thread_count):
cmd([os.path.join(root_dir, 'language/travis.py')], env=env)
def run_cxx(tests, flags, launcher, root_dir, bin_dir, env, thread_count):
for test_file, test_flags in tests:
test_dir = os.path.dirname(os.path.join(root_dir, test_file))
if bin_dir:
test_path = os.path.join(bin_dir, os.path.basename(test_file))
else:
test_path = os.path.join(root_dir, test_file)
cmd(['make', '-C', test_dir, '-j', str(thread_count)], env=env)
cmd(launcher + [test_path] + flags + test_flags, env=env, cwd=test_dir)
def run_regent(tests, flags, launcher, root_dir, env, thread_count):
for test_file, test_flags in tests:
test_dir = os.path.dirname(os.path.join(root_dir, test_file))
test_path = os.path.join(root_dir, test_file)
cmd(launcher + [test_path] + flags + test_flags, env=env, cwd=test_dir)
def run_test_legion_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count):
flags = ['-logfile', 'out_%.log']
run_cxx(legion_cxx_tests, flags, launcher, root_dir, bin_dir, env, thread_count)
def run_test_legion_hdf_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count):
flags = ['-logfile', 'out_%.log']
run_cxx(legion_hdf_cxx_tests, flags, launcher, root_dir, bin_dir, env, thread_count)
def run_test_fuzzer(launcher, root_dir, tmp_dir, bin_dir, env, thread_count):
env = dict(list(env.items()) + [('WARN_AS_ERROR', '0')])
fuzz_dir = os.path.join(tmp_dir, 'fuzz-tester')
cmd(['git', 'clone', 'https://github.com/StanfordLegion/fuzz-tester', fuzz_dir])
cmd(['python', 'main.py'], env=env, cwd=fuzz_dir)
def run_test_realm(launcher, root_dir, tmp_dir, bin_dir, env, thread_count):
test_dir = os.path.join(root_dir, 'test/realm')
cmd(['make', '-C', test_dir, 'DEBUG=0', 'SHARED_LOWLEVEL=0', 'USE_CUDA=0', 'USE_GASNET=0', 'clean'], env=env)
cmd(['make', '-C', test_dir, 'DEBUG=0', 'SHARED_LOWLEVEL=0', 'USE_CUDA=0', 'USE_GASNET=0', 'run_all'], env=env)
perf_dir = os.path.join(root_dir, 'test/performance/realm')
cmd(['make', '-C', perf_dir, 'DEBUG=0', 'SHARED_LOWLEVEL=0', 'clean_all'], env=env)
cmd(['make', '-C', perf_dir, 'DEBUG=0', 'SHARED_LOWLEVEL=0', 'run_all'], env=env)
def run_test_external(launcher, root_dir, tmp_dir, bin_dir, env, thread_count):
flags = ['-logfile', 'out_%.log']
# Fast Direct Solver
# Contact: Chao Chen <cchen10@stanford.edu>
solver_dir = os.path.join(tmp_dir, 'fastSolver2')
cmd(['git', 'clone', 'https://github.com/Charles-Chao-Chen/fastSolver2.git', solver_dir])
solver = [[os.path.join(solver_dir, 'spmd_benchMark/solver'),
['-machine', '1', '-core', '8', '-mtxlvl', '6', '-ll:cpu', '8']]]
run_cxx(solver, flags, launcher, root_dir, None, env, thread_count)
# Parallel Research Kernels: Stencil
# Contact: Wonchan Lee <wonchan@cs.stanford.edu>
prk_dir = os.path.join(tmp_dir, 'prk')
cmd(['git', 'clone', 'https://github.com/magnatelee/PRK.git', prk_dir])
# This uses a custom Makefile that requires additional
# configuration. Rather than go to that trouble it's easier to
# just use a copy of the standard Makefile template.
stencil_dir = os.path.join(prk_dir, 'LEGION', 'Stencil')
stencil_env = dict(list(env.items()) + [
('OUTFILE', 'stencil'),
('GEN_SRC', 'stencil.cc'),
('CC_FLAGS', (env['CC_FLAGS'] if 'CC_FLAGS' in env else '') +
' -DRADIUS=2 -DRESTRICT_KEYWORD -DDISABLE_BARRIER_MIGRATION'),
])
makefile = os.path.join(root_dir, 'apps/Makefile.template')
cmd(['make', '-f', makefile, '-C', stencil_dir, '-j', str(thread_count)], env=stencil_env)
stencil = os.path.join(stencil_dir, 'stencil')
cmd([stencil, '4', '10', '1000'])
# SNAP
# Contact: Mike Bauer <mbauer@nvidia.com>
snap_dir = os.path.join(tmp_dir, 'snap')
cmd(['git', 'clone', 'https://github.com/StanfordLegion/Legion-SNAP.git', snap_dir])
# This can't handle flags before application arguments, so place
# them after.
snap = [[os.path.join(snap_dir, 'src/snap'),
[os.path.join(snap_dir, 'input/mms.in')] + flags]]
run_cxx(snap, [], launcher, root_dir, None, env, thread_count)
def run_test_private(launcher, root_dir, tmp_dir, bin_dir, env, thread_count):
flags = ['-logfile', 'out_%.log']
# MiniAero
# Contact: Wonchan Lee <wonchan@cs.stanford.edu>
miniaero_dir = os.path.join(tmp_dir, 'miniaero-spmd')
cmd(['git', 'clone', '-b', 'spmd_flattened_superblocks',
'git@github.com:magnatelee/miniaero-spmd.git', miniaero_dir])
cmd(['make', '-C', miniaero_dir, '-j', str(thread_count)], env=env,
cwd=miniaero_dir)
for test in ['3D_Sod', '3D_Sod_2nd_Order'
# These tests take a long time so skip them by default.
# , 'FlatPlate', 'Ramp'
]:
test_dir = os.path.join(miniaero_dir, 'tests', test)
cmd([os.path.join(test_dir, 'test.sh')], env=env, cwd=test_dir)
def hostname():
return subprocess.check_output(['hostname']).strip()
def git_commit_id(repo_dir):
return subprocess.check_output(
['git', 'rev-parse', 'HEAD'], cwd=repo_dir).strip()
def git_branch_name(repo_dir):
proc = subprocess.Popen(
['git', 'symbolic-ref', '--short', 'HEAD'], cwd=repo_dir,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, errors = proc.communicate()
if proc.returncode == 0:
return output.strip()
return None
def run_test_perf(launcher, root_dir, tmp_dir, bin_dir, env, thread_count):
flags = ['-logfile', 'out_%.log']
# Performance test configuration:
metadata = {
'host': (os.environ['CI_RUNNER_DESCRIPTION']
if 'CI_RUNNER_DESCRIPTION' in os.environ else hostname()),
'commit': (os.environ['CI_BUILD_REF'] if 'CI_BUILD_REF' in os.environ
else git_commit_id(root_dir)),
'branch': (os.environ['CI_BUILD_REF_NAME'] if 'CI_BUILD_REF_NAME' in os.environ
else git_branch_name(root_dir)),
}
cxx_measurements = {
# Hack: Use the command name as the benchmark name.
'benchmark': {
'type': 'argv',
'index': 0,
'filter': 'basename',
},
# Capture command line arguments following flags.
'argv': {
'type': 'argv',
'start': 1 + len(flags),
},
# Record running time in seconds.
'time_seconds': {
'type': 'regex',
'pattern': r'^ELAPSED TIME\s*=\s*(.*) s$',
'multiline': True,
}
}
regent_measurements = {
# Hack: Use the command name as the benchmark name.
'benchmark': {
'type': 'argv',
'index': 1,
'filter': 'basename',
},
# Capture command line arguments following flags.
'argv': {
'type': 'argv',
'start': 2,# + len(flags), # FIXME: Skipping flags, see below.
},
# Record running time in seconds.
'time_seconds': {
'type': 'command',
'args': [
os.path.join(root_dir, 'language/scripts/summarize.py'),
'--machine-readable', '-',
],
}
}
env = dict(list(env.items()) + [
('PERF_OWNER', 'StanfordLegion'),
('PERF_REPOSITORY', 'perf-data'),
('PERF_METADATA', json.dumps(metadata)),
])
cxx_env = dict(list(env.items()) + [
('PERF_MEASUREMENTS', json.dumps(cxx_measurements)),
# Launch through perf.py
('PERF_LAUNCHER', ' '.join(launcher)),
('LAUNCHER', ''),
])
regent_env = dict(list(env.items()) + [
('PERF_MEASUREMENTS', json.dumps(regent_measurements)),
# Launch through regent.py
('PERF_LAUNCHER', ''),
('LAUNCHER', ' '.join(launcher)),
])
# Build Regent first to avoid recompiling later.
cmd([os.path.join(root_dir, 'language/travis.py'), '--install-only'], env=env)
# Run Legion C++ performance tests.
runner = os.path.join(root_dir, 'perf.py')
launcher = [runner] # Note: LAUNCHER is still passed via the environment
run_cxx(legion_cxx_perf_tests, flags, launcher, root_dir, bin_dir, cxx_env, thread_count)
# Run Regent performance tests.
regent_path = os.path.join(root_dir, 'language/regent.py')
# FIXME: PENNANT can't handle the -logfile flag coming first, so just skip it.
run_regent(regent_perf_tests, [], [runner, regent_path], root_dir, regent_env, thread_count)
# Render the final charts.
subprocess.check_call(
[os.path.join(root_dir, 'tools', 'perf_chart.py'),
'https://github.com/StanfordLegion/perf-data.git'],
env=env)
def build_cmake(root_dir, tmp_dir, env, thread_count, test_legion_cxx, test_perf):
build_dir = os.path.join(tmp_dir, 'build')
install_dir = os.path.join(tmp_dir, 'install')
os.mkdir(build_dir)
os.mkdir(install_dir)
cmd(['cmake', '-DCMAKE_INSTALL_PREFIX=%s' % install_dir] +
(['-DLegion_BUILD_TUTORIAL=ON',
'-DLegion_BUILD_EXAMPLES=ON',
'-DLegion_BUILD_TESTS=ON',
] if test_legion_cxx or test_perf else []) +
[root_dir],
env=env, cwd=build_dir)
cmd(['make', '-C', build_dir, '-j', str(thread_count)], env=env)
cmd(['make', '-C', build_dir, 'install'], env=env)
return os.path.join(build_dir, 'bin')
def clean_cxx(tests, root_dir, env, thread_count):
for test_file, test_flags in tests:
test_dir = os.path.dirname(os.path.join(root_dir, test_file))
cmd(['make', '-C', test_dir, 'clean'], env=env)
def build_make_clean(root_dir, env, thread_count, test_legion_cxx, test_perf,
test_external, test_private):
# External and private also require cleaning, even though they get
# built separately.
if test_legion_cxx or test_perf or test_external or test_private:
clean_cxx(legion_cxx_tests, root_dir, env, thread_count)
def option_enabled(option, options, var_prefix='', default=True):
if options is not None: return option in options
option_var = '%s%s' % (var_prefix, option.upper())
if option_var in os.environ: return os.environ[option_var] == '1'
return default
class Stage(object):
__slots__ = ['name', 'begin_time']
def __init__(self, name):
self.name = name
def __enter__(self):
self.begin_time = datetime.datetime.now()
print()
print('#'*60)
print('### Entering Stage: %s' % self.name)
print('#'*60)
print()
sys.stdout.flush()
def __exit__(self, exc_type, exc_val, exc_tb):
end_time = datetime.datetime.now()
print()
print('#'*60)
print('### Exiting Stage: %s' % self.name)
print('### * Exception Type: %s' % exc_type)
print('### * Elapsed Time: %s' % (end_time - self.begin_time))
print('#'*60)
print()
sys.stdout.flush()
def report_mode(test_regent, test_legion_cxx, test_fuzzer, test_realm,
test_external, test_private, test_perf, use_gasnet,
use_cuda, use_llvm, use_hdf, use_spy, use_cmake, use_rdir):
print()
print('#'*60)
print('### Test Suite Configuration')
print('###')
print('### Running Tests:')
print('### * Regent: %s' % test_regent)
print('### * Legion C++: %s' % test_legion_cxx)
print('### * Fuzzer: %s' % test_fuzzer)
print('### * Realm: %s' % test_realm)
print('### * External: %s' % test_external)
print('### * Private: %s' % test_private)
print('### * Perf: %s' % test_perf)
print('###')
print('### Build Flags:')
print('### * GASNet: %s' % use_gasnet)
print('### * CUDA: %s' % use_cuda)
print('### * LLVM: %s' % use_llvm)
print('### * HDF5: %s' % use_hdf)
print('### * SPY: %s' % use_spy)
print('### * CMake: %s' % use_cmake)
print('### * RDIR: %s' % use_rdir)
print('#'*60)
print()
sys.stdout.flush()
def run_tests(test_modules=None,
debug=True,
use_features=None,
launcher=None,
thread_count=None,
root_dir=None,
keep_tmp_dir=False,
verbose=False):
if thread_count is None:
thread_count = multiprocessing.cpu_count()
if root_dir is None:
root_dir = os.path.dirname(os.path.realpath(__file__))
# Determine which test modules to run.
def module_enabled(module, default=True):
return option_enabled(module, test_modules, 'TEST_', default)
test_regent = module_enabled('regent')
test_legion_cxx = module_enabled('legion_cxx')
test_fuzzer = module_enabled('fuzzer', debug)
test_realm = module_enabled('realm', not debug)
test_external = module_enabled('external', False)
test_private = module_enabled('private', False)
test_perf = module_enabled('perf', False)
# Determine which features to build with.
def feature_enabled(feature, default=True):
return option_enabled(feature, use_features, 'USE_', default)
use_gasnet = feature_enabled('gasnet', False)
use_cuda = feature_enabled('cuda', False)
use_llvm = feature_enabled('llvm', False)
use_hdf = feature_enabled('hdf', False)
use_spy = feature_enabled('spy', False)
use_cmake = feature_enabled('cmake', False)
use_rdir = feature_enabled('rdir', True)
if test_perf and debug:
raise Exception('Performance tests requested but DEBUG is enabled')
if use_gasnet and launcher is None:
raise Exception('GASNet is enabled but launcher is not set (use --launcher or LAUNCHER)')
launcher = launcher.split() if launcher is not None else []
# Normalize the test environment.
env = dict(list(os.environ.items()) + [
('DEBUG', '1' if debug else '0'),
('LAUNCHER', ' '.join(launcher)),
('USE_GASNET', '1' if use_gasnet else '0'),
('USE_CUDA', '1' if use_cuda else '0'),
('USE_LLVM', '1' if use_llvm else '0'),
('USE_HDF', '1' if use_hdf else '0'),
('TEST_HDF', '1' if use_hdf else '0'),
('USE_SPY', '1' if use_spy else '0'),
('TEST_SPY', '1' if use_spy else '0'),
('USE_RDIR', '1' if use_rdir else '0'),
('LG_RT_DIR', os.path.join(root_dir, 'runtime')),
])
report_mode(test_regent, test_legion_cxx, test_fuzzer, test_realm,
test_external, test_private, test_perf, use_gasnet,
use_cuda, use_llvm, use_hdf, use_spy, use_cmake, use_rdir)
tmp_dir = tempfile.mkdtemp(dir=root_dir)
if verbose:
print('Using build directory: %s' % tmp_dir)
print()
try:
# Build tests.
with Stage('build'):
if use_cmake:
bin_dir = build_cmake(
root_dir, tmp_dir, env, thread_count, test_legion_cxx, test_perf)
else:
# With GNU Make, builds happen inline. But clean here.
build_make_clean(
root_dir, env, thread_count, test_legion_cxx, test_perf,
# These configurations also need to be cleaned first.
test_external, test_private)
bin_dir = None
# Run tests.
if test_regent:
with Stage('regent'):
run_test_regent(launcher, root_dir, tmp_dir, bin_dir, env, thread_count)
if test_legion_cxx:
with Stage('legion_cxx'):
run_test_legion_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count)
if use_hdf:
run_test_legion_hdf_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count)
if test_fuzzer:
with Stage('fuzzer'):
run_test_fuzzer(launcher, root_dir, tmp_dir, bin_dir, env, thread_count)
if test_realm:
with Stage('realm'):
run_test_realm(launcher, root_dir, tmp_dir, bin_dir, env, thread_count)
if test_external:
with Stage('external'):
run_test_external(launcher, root_dir, tmp_dir, bin_dir, env, thread_count)
if test_private:
with Stage('private'):
run_test_private(launcher, root_dir, tmp_dir, bin_dir, env, thread_count)
if test_perf:
with Stage('perf'):
run_test_perf(launcher, root_dir, tmp_dir, bin_dir, env, thread_count)
finally:
if keep_tmp_dir:
print('Leaving build directory:')
print(' %s' % tmp_dir)
else:
if verbose:
print('Removing build directory:')
print(' %s' % tmp_dir)
shutil.rmtree(tmp_dir)
report_mode(test_regent, test_legion_cxx, test_fuzzer, test_realm,
test_external, test_private, test_perf, use_gasnet,
use_cuda, use_llvm, use_hdf, use_spy, use_cmake, use_rdir)
def driver():
parser = argparse.ArgumentParser(
description = 'Legion test suite')
# What tests to run:
parser.add_argument(
'--test', dest='test_modules', action='append',
choices=['regent', 'legion_cxx', 'fuzzer', 'realm', 'external', 'private', 'perf'],
default=None,
help='Test modules to run (also via TEST_*).')
# Build options:
parser.add_argument(
'--debug', dest='debug', action='store_true',
default=os.environ['DEBUG'] == '1' if 'DEBUG' in os.environ else True,
help='Build Legion in debug mode (also via DEBUG).')
parser.add_argument(
'--use', dest='use_features', action='append',
choices=['gasnet', 'cuda', 'llvm', 'hdf', 'spy', 'cmake', 'rdir'],
default=None,
help='Build Legion with features (also via USE_*).')
parser.add_argument(
'--launcher', dest='launcher', action='store',
default=os.environ['LAUNCHER'] if 'LAUNCHER' in os.environ else None,
help='Launcher for Legion tests (also via LAUNCHER).')
parser.add_argument(
'-C', '--directory', dest='root_dir', metavar='DIR', action='store', required=False,
help='Legion root directory.')
parser.add_argument(
'-j', dest='thread_count', nargs='?', type=int,
help='Number threads used to compile.')
parser.add_argument(
'--keep', dest='keep_tmp_dir', action='store_true',
help='Keep temporary directory.')
parser.add_argument(
'-v', '--verbose', dest='verbose', action='store_true',
help='Print more debugging information.')
args = parser.parse_args()
run_tests(**vars(args))
if __name__ == '__main__':
driver()
|
{
"content_hash": "1f869b3b2e7c1f964eb44e1b578ea74b",
"timestamp": "",
"source": "github",
"line_count": 544,
"max_line_length": 135,
"avg_line_length": 41.76654411764706,
"alnum_prop": 0.5844373046960961,
"repo_name": "sdalton1/legion",
"id": "548e72f852729c5b8e8405899c258764bdd9ae1c",
"size": "23330",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable",
"path": "test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "162932"
},
{
"name": "C++",
"bytes": "9794902"
},
{
"name": "CMake",
"bytes": "134317"
},
{
"name": "Cuda",
"bytes": "9992"
},
{
"name": "Lua",
"bytes": "93790"
},
{
"name": "Makefile",
"bytes": "60540"
},
{
"name": "Objective-C",
"bytes": "5007"
},
{
"name": "Perl",
"bytes": "235404"
},
{
"name": "Perl6",
"bytes": "55956"
},
{
"name": "Python",
"bytes": "336322"
},
{
"name": "Rouge",
"bytes": "1400496"
},
{
"name": "Shell",
"bytes": "161"
},
{
"name": "Terra",
"bytes": "1139181"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from shipping.models import Country
class CountryAdmin(admin.ModelAdmin):
list_display = ('__str__', 'full_name', 'code', 'code_a2', 'code_a3')
search_fields = ['name', 'full_name', 'code', 'code_a2', 'code_a3']
admin.site.register(Country)
|
{
"content_hash": "444c5be43efde268f49ea0f83ff6569a",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 73,
"avg_line_length": 31.555555555555557,
"alnum_prop": 0.6795774647887324,
"repo_name": "juntatalor/qexx",
"id": "560c7dd910f7dcc2db3b0ad54c5fbaf78862fe51",
"size": "284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shipping/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6277"
},
{
"name": "HTML",
"bytes": "54211"
},
{
"name": "JavaScript",
"bytes": "8331"
},
{
"name": "Python",
"bytes": "219660"
}
],
"symlink_target": ""
}
|
import os
import platform
import sys
from distutils import sysconfig
from distutils.command import build
from distutils.command.build_ext import build_ext
from distutils.spawn import spawn
from setuptools import Extension, find_packages, setup
import versioneer
_version_module = None
try:
from packaging import version as _version_module
except ImportError:
try:
from setuptools._vendor.packaging import version as _version_module
except ImportError:
pass
min_python_version = "3.7"
max_python_version = "3.11" # exclusive
min_numpy_build_version = "1.11"
min_numpy_run_version = "1.18"
min_llvmlite_version = "0.40.0dev0"
max_llvmlite_version = "0.41"
if sys.platform.startswith('linux'):
# Patch for #2555 to make wheels without libpython
sysconfig.get_config_vars()['Py_ENABLE_SHARED'] = 0
def _guard_py_ver():
if _version_module is None:
return
parse = _version_module.parse
min_py = parse(min_python_version)
max_py = parse(max_python_version)
cur_py = parse('.'.join(map(str, sys.version_info[:3])))
if not min_py <= cur_py < max_py:
msg = ('Cannot install on Python version {}; only versions >={},<{} '
'are supported.')
raise RuntimeError(msg.format(cur_py, min_py, max_py))
_guard_py_ver()
class build_doc(build.build):
description = "build documentation"
def run(self):
spawn(['make', '-C', 'docs', 'html'])
versioneer.VCS = 'git'
versioneer.versionfile_source = 'numba/_version.py'
versioneer.versionfile_build = 'numba/_version.py'
versioneer.tag_prefix = ''
versioneer.parentdir_prefix = 'numba-'
cmdclass = versioneer.get_cmdclass()
cmdclass['build_doc'] = build_doc
extra_link_args = []
install_name_tool_fixer = []
if sys.platform == 'darwin':
install_name_tool_fixer += ['-headerpad_max_install_names']
if platform.machine() == 'ppc64le':
extra_link_args += ['-pthread']
build_ext = cmdclass.get('build_ext', build_ext)
numba_be_user_options = [
('werror', None, 'Build extensions with -Werror'),
('wall', None, 'Build extensions with -Wall'),
('noopt', None, 'Build extensions without optimization'),
]
class NumbaBuildExt(build_ext):
user_options = build_ext.user_options + numba_be_user_options
boolean_options = build_ext.boolean_options + ['werror', 'wall', 'noopt']
def initialize_options(self):
super().initialize_options()
self.werror = 0
self.wall = 0
self.noopt = 0
def run(self):
extra_compile_args = []
if self.noopt:
if sys.platform == 'win32':
extra_compile_args.append('/Od')
else:
extra_compile_args.append('-O0')
if self.werror:
extra_compile_args.append('-Werror')
if self.wall:
extra_compile_args.append('-Wall')
for ext in self.extensions:
ext.extra_compile_args.extend(extra_compile_args)
super().run()
cmdclass['build_ext'] = NumbaBuildExt
def is_building():
"""
Parse the setup.py command and return whether a build is requested.
If False is returned, only an informational command is run.
If True is returned, information about C extensions will have to
be passed to the setup() function.
"""
if len(sys.argv) < 2:
# User forgot to give an argument probably, let setuptools handle that.
return True
build_commands = ['build', 'build_py', 'build_ext', 'build_clib'
'build_scripts', 'install', 'install_lib',
'install_headers', 'install_scripts', 'install_data',
'sdist', 'bdist', 'bdist_dumb', 'bdist_rpm',
'bdist_wininst', 'check', 'build_doc', 'bdist_wheel',
'bdist_egg', 'develop', 'easy_install', 'test']
return any(bc in sys.argv[1:] for bc in build_commands)
def get_ext_modules():
"""
Return a list of Extension instances for the setup() call.
"""
# Note we don't import Numpy at the toplevel, since setup.py
# should be able to run without Numpy for pip to discover the
# build dependencies
import numpy.distutils.misc_util as np_misc
# Inject required options for extensions compiled against the Numpy
# C API (include dirs, library dirs etc.)
np_compile_args = np_misc.get_info('npymath')
ext_devicearray = Extension(name='numba._devicearray',
sources=['numba/_devicearray.cpp'],
depends=['numba/_pymodule.h',
'numba/_devicearray.h'],
include_dirs=['numba'],
extra_compile_args=['-std=c++11'],
)
ext_dynfunc = Extension(name='numba._dynfunc',
sources=['numba/_dynfuncmod.c'],
depends=['numba/_pymodule.h',
'numba/_dynfunc.c'])
ext_dispatcher = Extension(name="numba._dispatcher",
sources=['numba/_dispatcher.cpp',
'numba/_typeof.cpp',
'numba/_hashtable.cpp',
'numba/core/typeconv/typeconv.cpp'],
depends=["numba/_pymodule.h",
"numba/_typeof.h",
"numba/_hashtable.h"],
extra_compile_args=['-std=c++11'],
**np_compile_args)
ext_helperlib = Extension(name="numba._helperlib",
sources=["numba/_helpermod.c",
"numba/cext/utils.c",
"numba/cext/dictobject.c",
"numba/cext/listobject.c",
],
# numba/_random.c needs pthreads
extra_link_args=install_name_tool_fixer +
extra_link_args,
depends=["numba/_pymodule.h",
"numba/_helperlib.c",
"numba/_lapack.c",
"numba/_npymath_exports.c",
"numba/_random.c",
"numba/mathnames.inc",
],
**np_compile_args)
ext_typeconv = Extension(name="numba.core.typeconv._typeconv",
sources=["numba/core/typeconv/typeconv.cpp",
"numba/core/typeconv/_typeconv.cpp"],
depends=["numba/_pymodule.h"],
extra_compile_args=['-std=c++11'],
)
ext_np_ufunc = Extension(name="numba.np.ufunc._internal",
sources=["numba/np/ufunc/_internal.c"],
depends=["numba/np/ufunc/_ufunc.c",
"numba/np/ufunc/_internal.h",
"numba/_pymodule.h"],
**np_compile_args)
ext_npyufunc_num_threads = Extension(name="numba.np.ufunc._num_threads",
sources=[
"numba/np/ufunc/_num_threads.c"],
depends=["numba/_pymodule.h"],
)
ext_np_ufunc_backends = []
def check_file_at_path(path2file):
"""
Takes a list as a path, a single glob (*) is permitted as an entry which
indicates that expansion at this location is required (i.e. version
might not be known).
"""
found = None
path2check = [os.path.split(os.path.split(sys.executable)[0])[0]]
path2check += [os.getenv(n, '') for n in ['CONDA_PREFIX', 'PREFIX']]
if sys.platform.startswith('win'):
path2check += [os.path.join(p, 'Library') for p in path2check]
for p in path2check:
if p:
if '*' in path2file:
globloc = path2file.index('*')
searchroot = os.path.join(*path2file[:globloc])
try:
potential_locs = os.listdir(os.path.join(p, searchroot))
except BaseException:
continue
searchfor = path2file[globloc + 1:]
for x in potential_locs:
potpath = os.path.join(p, searchroot, x, *searchfor)
if os.path.isfile(potpath):
found = p # the latest is used
elif os.path.isfile(os.path.join(p, *path2file)):
found = p # the latest is used
return found
# Set various flags for use in TBB and openmp. On OSX, also find OpenMP!
have_openmp = True
if sys.platform.startswith('win'):
if 'MSC' in sys.version:
cpp11flags = []
ompcompileflags = ['-openmp']
omplinkflags = []
else:
# For non-MSVC toolchain e.g. gcc and clang with mingw
cpp11flags = ['-std=c++11']
ompcompileflags = ['-fopenmp']
omplinkflags = ['-fopenmp']
elif sys.platform.startswith('darwin'):
cpp11flags = ['-std=c++11']
# This is a bit unusual but necessary...
# llvm (clang) OpenMP is used for headers etc at compile time
# Intel OpenMP (libiomp5) provides the link library.
# They are binary compatible and may not safely coexist in a process, as
# libiomp5 is more prevalent and often linked in for NumPy it is used
# here!
ompcompileflags = ['-fopenmp']
omplinkflags = ['-fopenmp=libiomp5']
omppath = ['lib', 'clang', '*', 'include', 'omp.h']
have_openmp = check_file_at_path(omppath)
else:
cpp11flags = ['-std=c++11']
ompcompileflags = ['-fopenmp']
if platform.machine() == 'ppc64le':
omplinkflags = ['-fopenmp']
else:
omplinkflags = ['-fopenmp']
# Disable tbb if forced by user with NUMBA_DISABLE_TBB=1
if os.getenv("NUMBA_DISABLE_TBB"):
print("TBB disabled")
else:
# Search for Intel TBB, first check env var TBBROOT then conda locations
tbb_root = os.getenv('TBBROOT')
if not tbb_root:
tbb_root = check_file_at_path(['include', 'tbb', 'tbb.h'])
if tbb_root:
print("Using Intel TBB from:", tbb_root)
ext_np_ufunc_tbb_backend = Extension(
name='numba.np.ufunc.tbbpool',
sources=[
'numba/np/ufunc/tbbpool.cpp',
'numba/np/ufunc/gufunc_scheduler.cpp',
],
depends=['numba/np/ufunc/workqueue.h'],
include_dirs=[os.path.join(tbb_root, 'include')],
extra_compile_args=cpp11flags,
extra_link_args=extra_link_args,
libraries=['tbb'], # TODO: if --debug or -g, use 'tbb_debug'
library_dirs=[
# for Linux
os.path.join(tbb_root, 'lib', 'intel64', 'gcc4.4'),
# for MacOS
os.path.join(tbb_root, 'lib'),
# for Windows
os.path.join(tbb_root, 'lib', 'intel64', 'vc_mt'),
],
)
ext_np_ufunc_backends.append(ext_np_ufunc_tbb_backend)
else:
print("TBB not found")
# Disable OpenMP if forced by user with NUMBA_DISABLE_OPENMP=1
if os.getenv('NUMBA_DISABLE_OPENMP'):
print("OpenMP disabled")
elif have_openmp:
print("Using OpenMP from:", have_openmp)
# OpenMP backed work queue
ext_np_ufunc_omppool_backend = Extension(
name='numba.np.ufunc.omppool',
sources=[
'numba/np/ufunc/omppool.cpp',
'numba/np/ufunc/gufunc_scheduler.cpp',
],
depends=['numba/np/ufunc/workqueue.h'],
extra_compile_args=ompcompileflags + cpp11flags,
extra_link_args=omplinkflags,
)
ext_np_ufunc_backends.append(ext_np_ufunc_omppool_backend)
else:
print("OpenMP not found")
# Build the Numba workqueue implementation irrespective of whether the TBB
# version is built. Users can select a backend via env vars.
ext_np_ufunc_workqueue_backend = Extension(
name='numba.np.ufunc.workqueue',
sources=['numba/np/ufunc/workqueue.c',
'numba/np/ufunc/gufunc_scheduler.cpp'],
depends=['numba/np/ufunc/workqueue.h'],
extra_link_args=extra_link_args)
ext_np_ufunc_backends.append(ext_np_ufunc_workqueue_backend)
ext_mviewbuf = Extension(name='numba.mviewbuf',
extra_link_args=install_name_tool_fixer,
sources=['numba/mviewbuf.c'])
ext_nrt_python = Extension(name='numba.core.runtime._nrt_python',
sources=['numba/core/runtime/_nrt_pythonmod.c',
'numba/core/runtime/nrt.cpp'],
depends=['numba/core/runtime/nrt.h',
'numba/_pymodule.h',
'numba/core/runtime/_nrt_python.c'],
**np_compile_args)
ext_jitclass_box = Extension(name='numba.experimental.jitclass._box',
sources=['numba/experimental/jitclass/_box.c'],
depends=['numba/experimental/_pymodule.h'],
)
ext_cuda_extras = Extension(name='numba.cuda.cudadrv._extras',
sources=['numba/cuda/cudadrv/_extras.c'],
depends=['numba/_pymodule.h'],
include_dirs=["numba"])
ext_modules = [ext_dynfunc, ext_dispatcher, ext_helperlib,
ext_typeconv, ext_np_ufunc, ext_npyufunc_num_threads,
ext_mviewbuf, ext_nrt_python, ext_jitclass_box,
ext_cuda_extras, ext_devicearray]
ext_modules += ext_np_ufunc_backends
return ext_modules
packages = find_packages(include=["numba", "numba.*"])
build_requires = ['numpy >={}'.format(min_numpy_build_version)]
install_requires = [
'llvmlite >={},<{}'.format(min_llvmlite_version, max_llvmlite_version),
'numpy >={}'.format(min_numpy_run_version),
'setuptools <60',
'importlib_metadata; python_version < "3.9"',
]
metadata = dict(
name='numba',
description="compiling Python code using LLVM",
version=versioneer.get_version(),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Software Development :: Compilers",
],
package_data={
# HTML templates for type annotations
"numba.core.annotations": ["*.html"],
# Various test data
"numba.cuda.tests.cudadrv.data": ["*.ptx", "*.cu"],
"numba.cuda.tests.doc_examples.ffi": ["*.cu"],
"numba.tests": ["pycc_distutils_usecase/*.py"],
# Some C files are needed by pycc
"numba": ["*.c", "*.h"],
"numba.pycc": ["*.c", "*.h"],
"numba.core.runtime": ["*.cpp", "*.c", "*.h"],
"numba.cext": ["*.c", "*.h"],
# numba gdb hook init command language file
"numba.misc": ["cmdlang.gdb"],
"numba.typed": ["py.typed"],
},
scripts=["bin/numba"],
url="https://numba.pydata.org",
packages=packages,
setup_requires=build_requires,
install_requires=install_requires,
python_requires=">={}".format(min_python_version),
license="BSD",
cmdclass=cmdclass,
)
with open('README.rst') as f:
metadata['long_description'] = f.read()
if is_building():
metadata['ext_modules'] = get_ext_modules()
setup(**metadata)
|
{
"content_hash": "50f31465ccbe60cfbedcd909ab9a8ba4",
"timestamp": "",
"source": "github",
"line_count": 431,
"max_line_length": 80,
"avg_line_length": 38.832946635730856,
"alnum_prop": 0.525840951185995,
"repo_name": "cpcloud/numba",
"id": "298b3695f6b49575fd3d30a7fa88914ecc64bb8e",
"size": "16737",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3699"
},
{
"name": "C",
"bytes": "573767"
},
{
"name": "C++",
"bytes": "166526"
},
{
"name": "Cuda",
"bytes": "1110"
},
{
"name": "GDB",
"bytes": "101"
},
{
"name": "HTML",
"bytes": "3464"
},
{
"name": "Python",
"bytes": "9320077"
},
{
"name": "Shell",
"bytes": "13454"
}
],
"symlink_target": ""
}
|
"""
Module morphdemo -- Demonstrations
-------------------------------------------------------------------
morphdemo is a set of Demonstrations for pymorph package
-------------------------------------------------------------------
airport() -- Detecting runways in satellite airport imagery.
area() -- Remove objects with small areas in binary images.
asp() -- Detect the missing aspirin tablets in a card of aspirin
tablets.
beef() -- Detect the lean meat region in a beef steak image.
blob() -- Demonstrate blob measurements and display.
brain() -- Extract the lateral ventricle from an MRI image of the
brain.
calc() -- Extract the keys of a calculator.
cells() -- Extract blood cells and separate them.
chickparts() -- Classify chicken parts in breast, legs+tights and wings
concrete() -- Aggregate and anhydrous phase extraction from a concrete
section observed by a SEM image.
cookies() -- Detect broken rounded biscuits.
cornea() -- Cornea cells marking.
fabric() -- Detection of vertical weave in fabrics.
fila() -- Detect Filarial Worms.
flatzone() -- Flat-zone image simplification by connected filtering.
flow() -- Detect water in a static image of an oil-water flow
experiment.
gear() -- Detect the teeth of a gear
holecenter() -- Hole center misalignment in PCB.
labeltext() -- Segmenting letters, words and paragraphs.
leaf() -- Segment a leaf from the background
lith() -- Detect defects in a microelectronic circuit.
pcb() -- Decompose a printed circuit board in its main parts.
pieces() -- Classify two dimensional pieces.
potatoes() -- Grade potato quality by shape and skin spots.
robotop() -- Detect marks on a robot.
ruler() -- Detect defects in a ruler.
soil() -- Detect fractures in soil.
"""
from pymorph import *
import numpy
print '''\
*********************** WARNING ******************************
The demo is not as well maintained as the rest of the package.
*********************** WARNING ******************************
The demo has not been updated to the newer interfaces.
'''
def readgray(imgname):
import pylab
return pylab.imread('pymorph/data/' + imgname)
def show(f, f1=None, f2=None, f3=None, f4=None, f5=None, f6=None):
import pylab
pylab.ion()
pylab.imshow(overlay(f,f1,f2,f3,f4,f5,f6))
pylab.draw()
# =========================================================================
#
# airport - Detecting runways in satellite airport imagery.
#
# =========================================================================
def airport():
print
print '''Detecting runways in satellite airport imagery.'''
print
#
print '========================================================================='
print '''
The satellite image of the airport is read.
'''
print '========================================================================='
#0
print '''
f = readgray('galeao.jpg')
show(f)'''
f = readgray('galeao.jpg')
show(f)
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The disk of radius 5 (diameter 11) is chosen to detect features
smaller than this size. For visualization, the top-hat image is
brightened by 150 gray-levels.
'''
print '========================================================================='
#0
print '''
th=openth(f,sedisk(5))
show(addm(th, 150))'''
th=openth(f,sedisk(5))
show(addm(th, 150))
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
A thresholding is applied to detect the features enhanced by the
top-hat. This is a standard top-hat sequence.
'''
print '========================================================================='
#0
print '''
bin=threshad(th,30)
show(f,bin)'''
bin=threshad(th,30)
show(f,bin)
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The thinning (red) and pruning (green) detect closed structures
which characterized the runways structure. The area open (blue)
selects only very long features, with more than 1000 pixels.
'''
print '========================================================================='
#0
print '''
m1=thin(bin)
m2=thin(m1,endpoints())
m=areaopen(m2,1000,sebox())
show(f,m1,m2,m)'''
m1=thin(bin)
m2=thin(m1,endpoints())
m=areaopen(m2,1000,sebox())
show(f,m1,m2,m)
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The previous result is a sample of the runway pixels. It is used as
a marker for gray-scale morphological reconstruction. The runways
are enhanced in the reconstructed image.
'''
print '========================================================================='
#0
print '''
g=infrec(gray(m), th)
show(g)'''
g=infrec(gray(m), th)
show(g)
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
A thresholding is applied to the reconstructed image, detecting the
airport runways.
'''
print '========================================================================='
#0
print '''
final=threshad(g, 20)
show(f, final)'''
final=threshad(g, 20)
show(f, final)
print
raw_input(4*' '+'Please press return to continue...')
print
print
#
return
# =========================================================================
#
# area - Remove objects with small areas in binary images.
#
# =========================================================================
def area():
print
print '''Remove objects with small areas in binary images.'''
print
#
print '========================================================================='
print '''
The binary image to be processed is read.
'''
print '========================================================================='
#0
print '''
a = readgray('circuit_bw.tif')
show(a)'''
a = readgray('circuit_bw.tif')
show(a)
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The procedure areaopen removes the objects with area less than the
specified parameter (i.e., 200).
'''
print '========================================================================='
#0
print '''
b = areaopen(a,200)
show(b)'''
b = areaopen(a,200)
show(b)
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
For displaying purposes the filtered image is superposed over the
original image.
'''
print '========================================================================='
#0
print '''
show(a,b)'''
show(a,b)
print
raw_input(4*' '+'Please press return to continue...')
print
print
#
return
# =========================================================================
#
# asp - Detect the missing aspirin tablets in a card of aspirin tablets.
#
# =========================================================================
def asp():
print
print '''Detect the missing aspirin tablets in a card of aspirin tablets.'''
print
#
print '========================================================================='
print '''
The aspirin tablet binary image is read.
'''
print '========================================================================='
#0
print '''
a = readgray('astablet.tif')
show(a)'''
a = readgray('astablet.tif')
show(a)
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The image can be model as a topographical surface where white
regions corresponds to high altitude and dark regions to lower
altitute. The regional maxima of the image is normally very noisy as
can be seen below.
'''
print '========================================================================='
#0
print '''
b = surf(a)
show(b)
c = regmax(a,sebox())
show(b,c)'''
b = surf(a)
show(b)
c = regmax(a,sebox())
show(b,c)
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Opening the original image by a disk a little smaller than the
tablets removes all the noisy regional maxima. The only regional
maxima in the opened image are the aspirin tablets as they are the
only regionally brighter regions of shape larger than the disk of
radius 20 pixels.
'''
print '========================================================================='
#0
print '''
d = open(a, sedisk(20))
e = surf(d)
show(e)
f = regmax(d,sebox())
show(e,f)'''
d = open(a, sedisk(20))
e = surf(d)
show(e)
f = regmax(d,sebox())
show(e,f)
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Here it is shown the input and output result. Note that the binary
image of the aspirin tablets was obtained using just one parameter:
the radius of the circular structuring element. The problem was
solved as treating the image formed by circular bright regions.
'''
print '========================================================================='
#0
print '''
show(a)
show(f)'''
show(a)
show(f)
print
raw_input(4*' '+'Please press return to continue...')
print
print
#
return
# =========================================================================
#
# labeltext - Segmenting letters, words and paragraphs.
#
# =========================================================================
def labeltext():
print
print '''Segmenting letters, words and paragraphs.'''
print
#
print '========================================================================='
print '''
The text image is read.
'''
print '========================================================================='
#0
print '''
f = readgray('stext.tif')
show(f)'''
f = readgray('stext.tif')
show(f)
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The letters are the main connected components in the image. So we
use the classical 8-connectivity criteria for identify each letter.
'''
print '========================================================================='
#0
print '''
fl=label(f,sebox())
lblshow(fl)'''
fl=label(f,sebox())
lblshow(fl)
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The words are made of closed letters. In this case we use a
connectivity specified by a rectangle structuring element of 7
pixels high and 11 pixels width, so any two pixels that can be hit
by this rectangle, belong to the same connected component. The
values 7 and 11 were chosen experimentally and depend on the font
size.
'''
print '========================================================================='
#0
print '''
from numpy.oldnumeric import ones
sew = img2se(binary(ones((7,11))))
seshow(sew)
fw=label(f,sew)
lblshow(fw)'''
from numpy.oldnumeric import ones
sew = img2se(binary(ones((7,11))))
seshow(sew)
fw=label(f,sew)
lblshow(fw)
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Similarly, paragraphs are closed words. In this case the
connectivity is given by a rectangle of 35 by 20 pixels.
'''
print '========================================================================='
#0
print '''
sep = img2se(binary(ones((20,35))))
fp=label(f,sep)
lblshow(fp)'''
sep = img2se(binary(ones((20,35))))
fp=label(f,sep)
lblshow(fp)
print
raw_input(4*' '+'Please press return to continue...')
print
print
#
return
# =========================================================================
#
# beef - Detect the lean meat region in a beef steak image.
#
# =========================================================================
def beef():
print
print '''Detect the lean meat region in a beef steak image.'''
print
#
print '========================================================================='
print '''
The gray-scale image of the beef steak is read.
'''
print '========================================================================='
#0
print '''
a = readgray('beef.tif');
show(a);'''
a = readgray('beef.tif');
show(a);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The input image is simplified by the application of a a small
closing. The dark area (inner lean part) is closed from the fat
white area.
'''
print '========================================================================='
#0
print '''
b=close(a,sedisk(2));
show(b);'''
b=close(a,sedisk(2));
show(b);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The external marker is built from the detection of the complete beef
region and the extraction of its internal edge. As the beef is dark,
it is detected by a low value threshold. After this threshold, small
residual regions are eliminated by the binary areaclose operator.
'''
print '========================================================================='
#0
print '''
c = threshad(a,uint8(10));
d = areaclose(c,200);
show(d);'''
c = threshad(a,uint8(10));
d = areaclose(c,200);
show(d);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The internal edge generated is 13 points thick. It is created by the
residues of an erosion by a large structuring element.
'''
print '========================================================================='
#0
print '''
e = gradm(d,secross(1),sebox(13));
show(e);'''
e = gradm(d,secross(1),sebox(13));
show(e);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The internal marker is a severe erosion of the steak. Both markers
are combined by union and displayed as overlay on the gradient image
'''
print '========================================================================='
#0
print '''
f= erode(d,secross(80));
g = union(e,f);
h = gradm(b);
show(h,g);'''
f= erode(d,secross(80));
g = union(e,f);
h = gradm(b);
show(h,g);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Constrained watershed of the gradient of the smoothed image,
restricted to the internal and external markers
'''
print '========================================================================='
#0
print '''
i=cwatershed(h,g);
show(i);'''
i=cwatershed(h,g);
show(i);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Superposition of the dilated detected contour on the original image.
'''
print '========================================================================='
#0
print '''
show(a,dilate(i));'''
show(a,dilate(i));
print
raw_input(4*' '+'Please press return to continue...')
print
print
#
return
# =========================================================================
#
# blob - Demonstrate blob measurements and display.
#
# =========================================================================
def blob():
print
print '''Demonstrate blob measurements and display.'''
print
#
print '========================================================================='
print '''
The binary image is read and then labeled. The number of blobs is
measured as the maximum label value. Both images are displayed.
'''
print '========================================================================='
#0
print '''
f = readgray('blob3.tif')
fr = label(f)
show(f)
lblshow(fr,'border')
nblobs=stats(fr,'max')
print nblobs'''
f = readgray('blob3.tif')
fr = label(f)
show(f)
lblshow(fr,'border')
nblobs=stats(fr,'max')
print nblobs
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The centroids are computed from the labeled image. After, the
centroid image is labeled, so that each centroid point has a label
value varying from 1 to the maximum number of blobs. For display
illustration, the centroids are overlayed on the original blob image
on the left and the labeled centroids are enlarged and displayed on
the right.
'''
print '========================================================================='
#0
print '''
c = blob(fr,'centroid')
cr = label(c)
show(f,c)
lblshow(dilate(cr))'''
c = blob(fr,'centroid')
cr = label(c)
show(f,c)
lblshow(dilate(cr))
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
To place a particular number on a particular blob, a number image is
generated using the function and converted to a structuring element.
A particular centroid is selected by comparing the image with the
labeled number. This output image is a binary image with a single
point at that centroid. Dilating this image by the structuring
element will "stamp" the structuring element on the centroid.
'''
print '========================================================================='
#0
print '''
fbin = cmp(cr,'==',uint16(5))
f5 = text('5')
print f5
b5 = img2se(f5)
fb5 = dilate(fbin,b5)
show(dilate(fbin))
show(f,fb5)'''
fbin = cmp(cr,'==',uint16(5))
f5 = text('5')
print f5
b5 = img2se(f5)
fb5 = dilate(fbin,b5)
show(dilate(fbin))
show(f,fb5)
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
To automate the process just described, a loop scans every label
value and "stamp" its number in a final image. The stamps are
accumulated with the function. The area is computed and plotted
against each label blob number.
'''
print '========================================================================='
#0
print '''
facc=subm(f,f)
for i in range(1,nblobs+1):
fbin = cmp(cr,'==',uint16(i))
fi = text(str(i))
bi = img2se(fi)
fbi = dilate(fbin,bi)
facc = union(facc,fbi)
show(f,facc)
darea = blob(fr,'area','data')
plot([[darea]], [['style','impulses']])'''
facc=subm(f,f)
for i in range(1,nblobs+1):
fbin = cmp(cr,'==',uint16(i))
fi = text(str(i))
bi = img2se(fi)
fbi = dilate(fbin,bi)
facc = union(facc,fbi)
show(f,facc)
darea = blob(fr,'area','data')
plot([[darea]], [['style','impulses']])
print
raw_input(4*' '+'Please press return to continue...')
print
print
#
return
# =========================================================================
#
# brain - Extract the lateral ventricle from an MRI image of the brain.
#
# =========================================================================
def brain():
print
print '''Extract the lateral ventricle from an MRI image of the brain.'''
print
#
print '========================================================================='
print '''
The MRI image of a brain slice is read.
'''
print '========================================================================='
#0
print '''
a = readgray('mribrain.tif');
show(a);'''
a = readgray('mribrain.tif');
show(a);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The ventricle is enhanced using an opening with a disk of radius 10
followed by a reconstruction.
'''
print '========================================================================='
#0
print '''
b = open(a,sedisk(10));
c = infrec(b,a);
show(b);
show(c);'''
b = open(a,sedisk(10));
c = infrec(b,a);
show(b);
show(c);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The result of the open by reconstruction is subtracted from the
original image. Note that the three operations: open, reconstruction
and the subtraction could be done at once using the (open by
reconstruction top-hat) function. On the right, the enhanced
ventricle is thresholded.
'''
print '========================================================================='
#0
print '''
d = subm(a,c);
show(d);
e = cmp(d,'>=',uint8(50));
show(e);'''
d = subm(a,c);
show(d);
e = cmp(d,'>=',uint8(50));
show(e);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Finally, the ventricle is selected as the connected object with area
larger than 70 pixels. For visualization purposes, the result of the
segmentation is overlayed on the original brain image.
'''
print '========================================================================='
#0
print '''
f= areaopen(e,70);
show(f);
show(a,f);'''
f= areaopen(e,70);
show(f);
show(a,f);
print
raw_input(4*' '+'Please press return to continue...')
print
print
#
return
# =========================================================================
#
# calc - Extract the keys of a calculator.
#
# =========================================================================
def calc():
print
print '''Extract the keys of a calculator.'''
print
#
print '========================================================================='
print '''
The gray-scale image of the calculator is read.
'''
print '========================================================================='
#0
print '''
a = readgray('keyb.tif');
show(a);'''
a = readgray('keyb.tif');
show(a);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The image edges are enhanced by the gradient operator.
'''
print '========================================================================='
#0
print '''
b = gradm(a, sebox());
show(b);'''
b = gradm(a, sebox());
show(b);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The opening top-hat procedure enhances the small objects relatively
to its background. In the calculator image, the digits are enhanced.
'''
print '========================================================================='
#0
print '''
c = openth(a,sebox(5));
show(c);'''
c = openth(a,sebox(5));
show(c);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The threshold operator is used to separated the enhanced objects.
This procedure is quite robust, since the background was reduced to
very low levels with the opening top-hat.
'''
print '========================================================================='
#0
print '''
d = threshad(c, uint8(150));
show(d);'''
d = threshad(c, uint8(150));
show(d);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
In order to have just one object (i.e., connected component) inside
each key, a dilation is applied.
'''
print '========================================================================='
#0
print '''
e = dilate(d, sebox(3));
show(e);'''
e = dilate(d, sebox(3));
show(e);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The outside markers are built by taking the watershed (skiz) of the
complement of internal markers image.
'''
print '========================================================================='
#0
print '''
f = watershed(neg(e));
show(f);'''
f = watershed(neg(e));
show(f);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The markers used are the union of the internal and external markers
detected. They are displayed as overlay on the gradient image.
'''
print '========================================================================='
#0
print '''
g = union(e,f);
show(b,g);'''
g = union(e,f);
show(b,g);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The calculator keys are extracted by applying the watershed operator
on the gradient image, constrained by the markers detected.
'''
print '========================================================================='
#0
print '''
h = cwatershed(b,g,sebox());
show(h);'''
h = cwatershed(b,g,sebox());
show(h);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Superposition of the detected contours on the input image.
'''
print '========================================================================='
#0
print '''
show(a,h);'''
show(a,h);
print
raw_input(4*' '+'Please press return to continue...')
print
print
#
return
# =========================================================================
#
# cells - Extract blood cells and separate them.
#
# =========================================================================
def cells():
print
print '''Extract blood cells and separate them.'''
print
#
print '========================================================================='
print '''
First, the blood cells image is read. Then, the gray-scale area open
operator is applied for removing small white pores over the cells.
'''
print '========================================================================='
#0
print '''
a = readgray('bloodcells.tif');
show(a);
b = areaopen(a, 200);
show(b);'''
a = readgray('bloodcells.tif');
show(a);
b = areaopen(a, 200);
show(b);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The threshold of dark areas produces the segmented image (i.e., the
region where there are cells). Then the opening by a small disk
performs smoothing of the cells borders.
'''
print '========================================================================='
#0
print '''
c = cmp( uint8(0), '<=', b, '<=', uint8(140));
show(c);
d = open(c,sedisk(2,'2D','OCTAGON'));
show(d);'''
c = cmp( uint8(0), '<=', b, '<=', uint8(140));
show(c);
d = open(c,sedisk(2,'2D','OCTAGON'));
show(d);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
A marker for each cell is obtained by dilating the regional maximum
of the distance transform. For visualization illustration, the
distance transform is viewed as a topographic surface shading on the
left and the dilated regional maximum is displayed in read overlayed
on the surface view.
'''
print '========================================================================='
#0
print '''
e1 = dist(d, sebox(),'EUCLIDEAN');
e2 = surf(e1);
show( e2);
e3 = regmax(e1);
e = dilate(e3);
show( e2, e);'''
e1 = dist(d, sebox(),'EUCLIDEAN');
e2 = surf(e1);
show( e2);
e3 = regmax(e1);
e = dilate(e3);
show( e2, e);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The watershed, constrained by the makers image, is applied to the
negation of the distance function. The result of this procedure is
also called geodesic SKIZ. For visualization, on the left the negate
distance function is displayed as a topographic surface, and on the
right this surface is superposed by the markers and the detected
watershed lines.
'''
print '========================================================================='
#0
print '''
f = neg(e1);
fs = surf(f);
show(fs);
g = cwatershed( f, e, sebox());
show(fs, g, e);'''
f = neg(e1);
fs = surf(f);
show(fs);
g = cwatershed( f, e, sebox());
show(fs, g, e);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The geodesic SKIZ (i.e., watershed division lines) is subtracted
from the segmented image, separating the cells. On the left the
detected watershed lines is overlayed on the cells binary image, and
on the right, the cells image separated using the watershed lines.
'''
print '========================================================================='
#0
print '''
show(c,g);
h = intersec(c,neg(g));
show(h);'''
show(c,g);
h = intersec(c,neg(g));
show(h);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The cells that touch the frame of the image are removed.
'''
print '========================================================================='
#0
print '''
i = edgeoff(h);
show(i);'''
i = edgeoff(h);
show(i);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Superposition of the contour of the detected cells on the original
image.
'''
print '========================================================================='
#0
print '''
j=gradm(i);
show(a,j);'''
j=gradm(i);
show(a,j);
print
raw_input(4*' '+'Please press return to continue...')
print
print
#
return
# =========================================================================
#
# chickparts - Classify chicken parts in breast, legs+tights and wings
#
# =========================================================================
def chickparts():
print
print '''Classify chicken parts in breast, legs+tights and wings'''
print
#
print '========================================================================='
print '''
The input image is read.
'''
print '========================================================================='
#0
print '''
a = readgray('chickparts.tif');
show(a);'''
a = readgray('chickparts.tif');
show(a);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Convert to binary objects by thresholding and then labeling the
objects.
'''
print '========================================================================='
#0
print '''
b = cmp(a,'>=', uint8(100));
show(b);
c = label(b);
lblshow(c,'border');'''
b = cmp(a,'>=', uint8(100));
show(b);
c = label(b);
lblshow(c,'border');
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Measure the area o each object and put this value as the pixel
object value. For displaying purpose, overlay the background as red
in the right image below.
'''
print '========================================================================='
#0
print '''
d = blob(c,'area');
show(d);
show(d, cmp(d,'==',0));'''
d = blob(c,'area');
show(d);
show(d, cmp(d,'==',0));
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The wings are detected by finding objects with area 100 and 2500
pixels. The tights are selected as connected objects with area
between 2500 and 5500 pixels.
'''
print '========================================================================='
#0
print '''
wings = cmp( uint16(100),'<=',d, '<=', uint16(2500));
show(wings);
tights = cmp( uint16(2500),'<',d, '<=', uint16(5500));
show(tights);'''
wings = cmp( uint16(100),'<=',d, '<=', uint16(2500));
show(wings);
tights = cmp( uint16(2500),'<',d, '<=', uint16(5500));
show(tights);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The legs+tights have area larger than 5500 and smaller than 8500
pixels and the breast is the largest connected object with area
larger than 8500 pixels
'''
print '========================================================================='
#0
print '''
legs = cmp( uint16(5500), '<', d, '<=', uint16(8500));
show(legs);
breast = cmp( d,'>', uint16(8500));
show(breast);'''
legs = cmp( uint16(5500), '<', d, '<=', uint16(8500));
show(legs);
breast = cmp( d,'>', uint16(8500));
show(breast);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Overlay the contour of the detected parts over the original image
'''
print '========================================================================='
#0
print '''
show(a, gradm(wings), gradm(tights), gradm(legs),gradm(breast));'''
show(a, gradm(wings), gradm(tights), gradm(legs),gradm(breast));
print
raw_input(4*' '+'Please press return to continue...')
print
print
#
return
# =========================================================================
#
# concrete - Aggregate and anhydrous phase extraction from a concrete section observed by a SEM image.
#
# =========================================================================
def concrete():
print
print '''Aggregate and anhydrous phase extraction from a concrete section
observed by a SEM image.'''
print
#
print '========================================================================='
print '''
The SEM image of a polished concrete section is read. The anhydrous
phase are the white pores, while the aggregate are the medium-gray
homogeneous pores.
'''
print '========================================================================='
#0
print '''
f = readgray('csample.jpg')
show(f)'''
f = readgray('csample.jpg')
show(f)
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The histogram has a small peak in the white region related to the
anhydrous phase.
'''
print '========================================================================='
#0
print '''
h = histogram(f)
plot([[h]])'''
h = histogram(f)
plot([[h]])
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The threshold value is extracted using the watershed technique. The
aim is to detect the middle valley of the histogram. If the
histogram is negated, we need to extract the middle peak of the 1D
signal. This is accomplished by find proper markers on the valleys.
These markers are extracted by detecting the regional minima of the
filtered signal (alternating sequential filtering, closing followed
by opening of length 5 pixels). To discard the detection of peaks
near the limits of the histogram, an intersection is done using the
function. For illustrative purpose, a plot of all the signals
involved is displayed.
'''
print '========================================================================='
#0
print '''
hf = asf(neg(h),'co',seline(5))
ws = watershed(hf)
wsf = intersec(neg(frame(ws,20)),ws)
t = nonzero(wsf)
print t
ax = stats(h,'max')
hf_plot = neg(hf)
ws_plot = gray(ws, 'uint16', ax)
wsf_plot = gray(wsf, 'uint16', ax)
plot([[hf_plot],[ws_plot],[wsf_plot]])'''
hf = asf(neg(h),'co',seline(5))
ws = watershed(hf)
wsf = intersec(neg(frame(ws,20)),ws)
t = nonzero(wsf)
print t
ax = stats(h,'max')
hf_plot = neg(hf)
ws_plot = gray(ws, 'uint16', ax)
wsf_plot = gray(wsf, 'uint16', ax)
plot([[hf_plot],[ws_plot],[wsf_plot]])
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The threshold value found in the previous step is applied. After, a
filter to remove blobs smaller then 20 pixels is applied. For
illustrative, the contour of the anhydrous grains are displayed as
an overlay on the original image.
'''
print '========================================================================='
#0
print '''
aux = threshad( f, t, 255)
anidro = areaopen(aux, 20)
show( f, gradm(anidro))'''
aux = threshad( f, t, 255)
anidro = areaopen(aux, 20)
show( f, gradm(anidro))
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The watershed applied on the gradient using the markers from
filtered regional minima of the gradient is a standard watershed
based technique. In this case the filter was chosen to be a contrast
.
'''
print '========================================================================='
#0
print '''
g=gradm(f)
m=regmin(hmin(g,10))
ws=cwatershed(g,m)
show(ws)'''
g=gradm(f)
m=regmin(hmin(g,10))
ws=cwatershed(g,m)
show(ws)
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The result of the watershed in the previous step is the detection of
a large number of regions. The larger ones are the aggregate and the
anhydrous. So first the regions are filtered out using an area
criterion of 300 pixels. Small holes (area <= 50) are closed. The
aggregate is obtained by removing the anhydrous phase.
'''
print '========================================================================='
#0
print '''
aux1=areaopen(neg(ws),300)
aux2=areaclose(aux1,50)
aggr=subm(aux2,anidro)
show(f, gradm(aggr))'''
aux1=areaopen(neg(ws),300)
aux2=areaclose(aux1,50)
aggr=subm(aux2,anidro)
show(f, gradm(aggr))
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Finally each phase is measured and an illustrative display is
constructed. The grains contoured by red are the aggregate and those
contoured by green, the anhydrous.
'''
print '========================================================================='
#0
print '''
n = product(shape(f))
anidro_phase = stats(anidro,'sum')/n
print 'anidro=',anidro_phase
aggr_phase = stats(aggr,'sum')/n;
print 'aggr=',aggr_phase
show( f, gradm(aggr), gradm(anidro))'''
n = product(shape(f))
anidro_phase = stats(anidro,'sum')/n
print 'anidro=',anidro_phase
aggr_phase = stats(aggr,'sum')/n;
print 'aggr=',aggr_phase
show( f, gradm(aggr), gradm(anidro))
print
raw_input(4*' '+'Please press return to continue...')
print
print
#
return
# =========================================================================
#
# cookies - Detect broken rounded biscuits.
#
# =========================================================================
def cookies():
print
print '''Detect broken rounded biscuits.'''
print
#
print '========================================================================='
print '''
The input image is read.
'''
print '========================================================================='
#0
print '''
a = readgray('cookies.tif');
show(a);'''
a = readgray('cookies.tif');
show(a);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Convert to binary objects by thresholding
'''
print '========================================================================='
#0
print '''
b = threshad(a, uint8(100));
show(b);'''
b = threshad(a, uint8(100));
show(b);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The tophat of the binary image by an octagon disk with a radius fits
the good biscuit but does not fit in the broken biscuit can detect
the broken one.
'''
print '========================================================================='
#0
print '''
c = openth(b,sedisk(55,'2D','OCTAGON'));
show(c);'''
c = openth(b,sedisk(55,'2D','OCTAGON'));
show(c);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Clean the residues from the octagon disk and the rounded shaped
biscuits by eliminating small connected regions
'''
print '========================================================================='
#0
print '''
d = areaopen(c,400);
show(d);'''
d = areaopen(c,400);
show(d);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Display the detected broken biscuit
'''
print '========================================================================='
#0
print '''
show(a,d);'''
show(a,d);
print
raw_input(4*' '+'Please press return to continue...')
print
print
#
return
# =========================================================================
#
# cornea - Cornea cells marking.
#
# =========================================================================
def cornea():
print
print '''Cornea cells marking.'''
print
#
print '========================================================================='
print '''
The gray-scale image of the cornea is read and displayed. A
topographic model is also displayed. We can notice that the cells
are formed by small hills in the topographic model. We can also
notice that the image is very noisy.
'''
print '========================================================================='
#0
print '''
a = readgray('corneacells.tif');
show(a);
b = surf(a);
show(b);'''
a = readgray('corneacells.tif');
show(a);
b = surf(a);
show(b);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The image is filtered by an alternating sequential filtering with
size 2. This filter is composed by openings and closings, removing
small peaks and valleys. Next, the regional maxima are detected. For
illustrative purpose, they are displayed overlayed on the
topographic image view. These regional maxima are the markers for
each cell. If anything goes wrong in this step, the error will be
propagated throughout the process.
'''
print '========================================================================='
#0
print '''
c = asf(a,'oc',secross(),2);
d = regmax( c);
show(surf(c));
show(surf(c), d);'''
c = asf(a,'oc',secross(),2);
d = regmax( c);
show(surf(c));
show(surf(c), d);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Following the paradigm of segmentation by watershed, the background
marker is detected by applying the constrained watershed on the
negation of the cells image using the markers detected in the last
step. These watershed lines partition the image in regions of
influence of each cell. For illustrative display, the negative of
the cell image is displayed overlayed by the markers on the left,
and also overlayed by the watershed lines on the right.
'''
print '========================================================================='
#0
print '''
e = neg(a);
f = cwatershed(e, d, sebox());
show(e,d);
show(e,f,d);'''
e = neg(a);
f = cwatershed(e, d, sebox());
show(e,d);
show(e,f,d);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
As the internal and external markers can be touching, we combine the
external marker with value 1 with the labeling of the internal
markers added by 1. The labeled marker image is shown on the left.
The final watershed will be applied on the gradient of the original
image, which is shown on the right.
'''
print '========================================================================='
#0
print '''
g = gray(f, 'uint16', 1);
h1 = addm(label(d), uint16(1));
h = intersec(gray(d,'uint16'), h1);
i = union( g, h);
lblshow(i);
j = gradm( a);
show(j);'''
g = gray(f, 'uint16', 1);
h1 = addm(label(d), uint16(1));
h = intersec(gray(d,'uint16'), h1);
i = union( g, h);
lblshow(i);
j = gradm( a);
show(j);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Apply the constrained watershed on the gradient from the labeled
internal and external markers. Show the watershed lines on the left
and the results overlayed on the original image, on the right.
'''
print '========================================================================='
#0
print '''
k = cwatershed(j, i);
show( k);
show(a, k, k);'''
k = cwatershed(j, i);
show( k);
show(a, k, k);
print
raw_input(4*' '+'Please press return to continue...')
print
print
#
return
# =========================================================================
#
# fabric - Detection of vertical weave in fabrics.
#
# =========================================================================
def fabric():
print
print '''Detection of vertical weave in fabrics.'''
print
#
print '========================================================================='
print '''
The image to be processed is read.
'''
print '========================================================================='
#0
print '''
a = readgray('fabric.tif');
show(a);'''
a = readgray('fabric.tif');
show(a);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
A sequence of dilations (by a disk and two line segments) is applied
to enhance the white stripes
'''
print '========================================================================='
#0
print '''
b = dilate(a,sedisk(4));
c = dilate(b,seline(25,90));
d = dilate(c,seline(25,-90));
show(d);'''
b = dilate(a,sedisk(4));
c = dilate(b,seline(25,90));
d = dilate(c,seline(25,-90));
show(d);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The markers are the regional minima with contrast 25.
'''
print '========================================================================='
#0
print '''
e = hmin(d,25);
f = regmin(e);
show(f);'''
e = hmin(d,25);
f = regmin(e);
show(f);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Watershed of the original image from the dark stripes markers just
created. Show the result overlayed on the original image.
'''
print '========================================================================='
#0
print '''
g = cwatershed(a,f);
show(a,dilate(g));'''
g = cwatershed(a,f);
show(a,dilate(g));
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Create a new marker by taking the union of the dark markers and the
watershed lines just created. The gradient of the original image is
computed.
'''
print '========================================================================='
#0
print '''
h = union(g,f);
i = gradm(a);
show(h);
show(i);'''
h = union(g,f);
i = gradm(a);
show(h);
show(i);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The watershed of the gradient of the original image, taking the
marker just created, gives the extend of the white regions.
'''
print '========================================================================='
#0
print '''
j = cwatershed(i,h,sebox());
show(a,j);'''
j = cwatershed(i,h,sebox());
show(a,j);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The watershed regions area computed. The area of each watershed
regions is measured and displayed.
'''
print '========================================================================='
#0
print '''
k = cwatershed(i,h,sebox(),'REGIONS');
lblshow(k,'border');
l = blob(k,'area');
show(l);'''
k = cwatershed(i,h,sebox(),'REGIONS');
lblshow(k,'border');
l = blob(k,'area');
show(l);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
To select only the fabric spacing, select the regions with small
area (less than 2000). Label the narrow regions.
'''
print '========================================================================='
#0
print '''
m = cmp(l,'<=',2000);
show(m);
n = label(m,sebox());
lblshow(n,'border');'''
m = cmp(l,'<=',2000);
show(m);
n = label(m,sebox());
lblshow(n,'border');
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Compute the area of each region and plot them. Also display the
original image for illustration. From the plot, we can notice that
the two rightmost weave spacing are significantly larger than the
others.
'''
print '========================================================================='
#0
print '''
show(a);
o = blob(n,'area','data');
plot([[o]],[['style','impulses']])'''
show(a);
o = blob(n,'area','data');
plot([[o]],[['style','impulses']])
print
raw_input(4*' '+'Please press return to continue...')
print
print
#
return
# =========================================================================
#
# fila - Detect Filarial Worms.
#
# =========================================================================
def fila():
print
print '''Detect Filarial Worms.'''
print
#
print '========================================================================='
print '''
A microscopic gray-scale image, with two filarial worms, is read.
'''
print '========================================================================='
#0
print '''
a = readgray('danaus.tif');
show(a);'''
a = readgray('danaus.tif');
show(a);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The Close by Reconstruction Top-Hat operator is applied to
regularize the image background.
'''
print '========================================================================='
#0
print '''
b = closerecth(a,sebox(5));
show(b);'''
b = closerecth(a,sebox(5));
show(b);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The gray-scale opening by the elementary cross is applied to remove
narrow objects.
'''
print '========================================================================='
#0
print '''
c = open(b);
show(c);'''
c = open(b);
show(c);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The gray-scale area open operator is applied to remove small
objects.
'''
print '========================================================================='
#0
print '''
d = areaopen(c,200);
show(d);'''
d = areaopen(c,200);
show(d);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The threshold operator is applied to extract a reduced set of
structures that include the two worms present in the image.
'''
print '========================================================================='
#0
print '''
e = threshad(d,50);
show(e);'''
e = threshad(d,50);
show(e);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The objective of the sequence of transformations, that begin with
the homotopic skeleton, is to eliminateg the structures that are not
worms. The information used for the filtering is that the worms are
longer than any other structure found.
'''
print '========================================================================='
#0
print '''
f = thin(e);
show(f);'''
f = thin(e);
show(f);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The first 12 points of the skeleton branches, counting from their
extremities, are eliminated. The structures that were not eliminated
will be the markers for extracting the two worms.
'''
print '========================================================================='
#0
print '''
g = thin(f,endpoints(), 12);
show(g);'''
g = thin(f,endpoints(), 12);
show(g);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The binary reconstruction operator is applied to reconstruct the
binary image produced by the threshold from the marker image.
'''
print '========================================================================='
#0
print '''
h = infrec(g,e);
show(h);'''
h = infrec(g,e);
show(h);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The structures extracted are overlaid to the input gray-scale image.
'''
print '========================================================================='
#0
print '''
show(a,h);'''
show(a,h);
print
raw_input(4*' '+'Please press return to continue...')
print
print
#
return
# =========================================================================
#
# flatzone - Flat-zone image simplification by connected filtering.
#
# =========================================================================
def flatzone():
print
print '''Flat-zone image simplification by connected filtering.'''
print
#
print '========================================================================='
print '''
The input image is read.
'''
print '========================================================================='
#0
print '''
a = readgray('cameraman.tif')
show(a)'''
a = readgray('cameraman.tif')
show(a)
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Obtain the flat zones (8-connectivity) and compute its number. The
number of flat zones is determined by the maximum labeling value (
starting from flat zone one).
'''
print '========================================================================='
#0
print '''
b = labelflat(a,sebox())
nfz=stats(b,'max')
print nfz
show(a)
lblshow(b)'''
b = labelflat(a,sebox())
nfz=stats(b,'max')
print nfz
show(a)
lblshow(b)
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Apply the alternating sequential filter by reconstruction with
increasing sizes. They constitute a connected pyramid.
'''
print '========================================================================='
#0
print '''
c=asfrec(a,'CO',sebox(),sebox(),2)
d=asfrec(a,'CO',sebox(),sebox(),4)
e=asfrec(a,'CO',sebox(),sebox(),16)
show(c)
show(d)
show(e)'''
c=asfrec(a,'CO',sebox(),sebox(),2)
d=asfrec(a,'CO',sebox(),sebox(),4)
e=asfrec(a,'CO',sebox(),sebox(),16)
show(c)
show(d)
show(e)
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
These figures show the image simplification in a connected pyramid.
Notice how the shapes are well preserved along the scale space. The
number of flat zones at each level of the pyramid are computed to
illustrate the flat zone number reduction.
'''
print '========================================================================='
#0
print '''
c_lab=labelflat(c,sebox())
d_lab=labelflat(d,sebox())
e_lab=labelflat(e,sebox())
print stats(c_lab,'max')
print stats(d_lab,'max')
print stats(e_lab,'max')
lblshow(c_lab)
lblshow(d_lab)
lblshow(e_lab)'''
c_lab=labelflat(c,sebox())
d_lab=labelflat(d,sebox())
e_lab=labelflat(e,sebox())
print stats(c_lab,'max')
print stats(d_lab,'max')
print stats(e_lab,'max')
lblshow(c_lab)
lblshow(d_lab)
lblshow(e_lab)
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
In this experiment we select a particular flat zone, the flat zone
to which the pixel (90,60) belongs, and display it at each level of
the connected pyramid. Notice the flat zone inclusion property.
'''
print '========================================================================='
#0
print '''
c_v=c_lab[89,59]
c_flat=cmp(c_lab,'==',c_v)
d_v=d_lab[89,59]
d_flat=cmp(d_lab,'==',d_v)
e_v=e_lab[89,59]
e_flat=cmp(e_lab,'==',e_v)
show(a,e_flat,d_flat,c_flat)'''
c_v=c_lab[89,59]
c_flat=cmp(c_lab,'==',c_v)
d_v=d_lab[89,59]
d_flat=cmp(d_lab,'==',d_v)
e_v=e_lab[89,59]
e_flat=cmp(e_lab,'==',e_v)
show(a,e_flat,d_flat,c_flat)
print
raw_input(4*' '+'Please press return to continue...')
print
print
#
return
# =========================================================================
#
# flow - Detect water in a static image of an oil-water flow experiment.
#
# =========================================================================
def flow():
print
print '''Detect water in a static image of an oil-water flow experiment.'''
print
#
print '========================================================================='
print '''
The gray-scale image of the water-oil flow experiment is read.
'''
print '========================================================================='
#0
print '''
a = readgray('flow.tif')
show(a)'''
a = readgray('flow.tif')
show(a)
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The dark region of the image is enhanced by the close top-hat
operator.
'''
print '========================================================================='
#0
print '''
b=closeth(a,seline(50,90))
show(b)'''
b=closeth(a,seline(50,90))
show(b)
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
A connected filtering is applied to remove small artifacts present
in the image.
'''
print '========================================================================='
#0
print '''
c=closerec(b,sebox(5))
show(c)'''
c=closerec(b,sebox(5))
show(c)
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
An alternated sequential filtering is used for shape smoothing.
'''
print '========================================================================='
#0
print '''
d=asf(c,'co',secross())
show(d)'''
d=asf(c,'co',secross())
show(d)
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The original and thresholded image overlayed on the original are
presented successively.
'''
print '========================================================================='
#0
print '''
e=threshad(d,100)
show(a)
show(a,e)'''
e=threshad(d,100)
show(a)
show(a,e)
print
raw_input(4*' '+'Please press return to continue...')
print
print
#
return
# =========================================================================
#
# gear - Detect the teeth of a gear
#
# =========================================================================
def gear():
print
print '''Detect the teeth of a gear'''
print
#
print '========================================================================='
print '''
The binary image of a gear is read.
'''
print '========================================================================='
#0
print '''
a = readgray('gear.tif');
show(a);'''
a = readgray('gear.tif');
show(a);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Opening of the input image by an Euclidean disk of radius 20. The
sequence opening-subtraction is called opening top-hat. The opening
top-hat could be executed in a single coand: c = (a,(20));
'''
print '========================================================================='
#0
print '''
b = open(a,sedisk(20));
show(b);
c = subm(a,b);
show(c);'''
b = open(a,sedisk(20));
show(b);
c = subm(a,b);
show(c);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The teeth detected are labeled. The maximum pixel value in the
labeled image gives the number of connected objects (n. of teeth).
'''
print '========================================================================='
#0
print '''
d = label(c);
nteeth=stats(d,'max')
lblshow(d,'border');'''
d = label(c);
nteeth=stats(d,'max')
lblshow(d,'border');
print
raw_input(4*' '+'Please press return to continue...')
print
print
#
return
# =========================================================================
#
# holecenter - Hole center misalignment in PCB.
#
# =========================================================================
def holecenter():
print
print '''Hole center misalignment in PCB.'''
print
#
print '========================================================================='
print '''
The image of the PCB is read.
'''
print '========================================================================='
#0
print '''
a = readgray('pcbholes.tif')
show(a)'''
a = readgray('pcbholes.tif')
show(a)
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Use the close hole function to remove the holes. Note that one hole
is open. This is not considered in this experiment. The regional
maxima of the distance transform gives the radius of the largest
disk inside the pad. We are interested only in radius larger than 20
pixels.
'''
print '========================================================================='
#0
print '''
b = clohole(a)
show(b)
d = dist(b,secross(),'EUCLIDEAN')
e = regmax(d,sebox())
f = threshad(d, uint16([20])) # radius larger than 20 pixels
g = intersec(e,f)
h = blob(label(g,sebox()),'CENTROID'); # pad center
show(b,dilate(h))'''
b = clohole(a)
show(b)
d = dist(b,secross(),'EUCLIDEAN')
e = regmax(d,sebox())
f = threshad(d, uint16([20])) # radius larger than 20 pixels
g = intersec(e,f)
h = blob(label(g,sebox()),'CENTROID'); # pad center
show(b,dilate(h))
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The holes are given by the difference of the pad image from the
original image. Repeat the same procedure to find the center of the
pads to find now the center of the holes.
'''
print '========================================================================='
#0
print '''
i = subm(b,a)
show(i)
j = dist(i,secross(),'EUCLIDEAN')
k = regmax(j,sebox())
l = blob(label(k,sebox()),'CENTROID') # hole center
show(i,dilate(l))'''
i = subm(b,a)
show(i)
j = dist(i,secross(),'EUCLIDEAN')
k = regmax(j,sebox())
l = blob(label(k,sebox()),'CENTROID') # hole center
show(i,dilate(l))
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
First both centers (pads and holes) are displayed together. Then the
actual misalignment is computed using the distance from one point to
the other.
'''
print '========================================================================='
#0
print '''
from numpy.oldnumeric import nonzero
show(a,h,l)
m = dist(neg(l),secross(),'EUCLIDEAN')
n = intersec(gray(h),uint8(m))
show(n,a)
i = nonzero(n.ravel())
x = i / n.shape[1]
y = i % n.shape[1]
for k in range(len(i)):
print 'displacement of %d at (%d,%d)\n' %(n[x[k],y[k]],x[k],y[k])'''
from numpy.oldnumeric import nonzero
show(a,h,l)
m = dist(neg(l),secross(),'EUCLIDEAN')
n = intersec(gray(h),uint8(m))
show(n,a)
i = nonzero(n.ravel())
x = i / n.shape[1]
y = i % n.shape[1]
for k in range(len(i)):
print 'displacement of %d at (%d,%d)\n' %(n[x[k],y[k]],x[k],y[k])
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
First, the thinning to compute the skeleton of the PCB image, then
remove iteratively all the end points of the skeleton so just the
skeleton loop around the holes remains. Find the minimum distance of
these loops to the border and display their location.
'''
print '========================================================================='
#0
print '''
o=thin(a)
p=thin(o,endpoints())
show(a,p)
q = dist(a,secross(),'EUCLIDEAN')
r = grain(label(p,sebox()),q,'min') # minimum
s = grain(label(p,sebox()),q,'min','data') # minimum
from numpy.oldnumeric import ravel
for k in ravel(s):
print 'Minimum distance: %d pixels' %(2*k+1)
t = intersec(cmp(r,'==',q),a)
show(a,dilate(t))'''
o=thin(a)
p=thin(o,endpoints())
show(a,p)
q = dist(a,secross(),'EUCLIDEAN')
r = grain(label(p,sebox()),q,'min') # minimum
s = grain(label(p,sebox()),q,'min','data') # minimum
from numpy.oldnumeric import ravel
for k in ravel(s):
print 'Minimum distance: %d pixels' %(2*k+1)
t = intersec(cmp(r,'==',q),a)
show(a,dilate(t))
print
raw_input(4*' '+'Please press return to continue...')
print
print
#
return
# =========================================================================
#
# leaf - Segment a leaf from the background
#
# =========================================================================
def leaf():
print
print '''Segment a leaf from the background'''
print
#
print '========================================================================='
print '''
The gray scale image to be processed is read.
'''
print '========================================================================='
#0
print '''
f = readgray('leaf.tif')
show(f)'''
f = readgray('leaf.tif')
show(f)
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Although the leaf was pictured on a light background, it is not
possible to fully segment the leaf using a simple thresholding
'''
print '========================================================================='
#0
print '''
f_low=threshad(f,100)
f_med=threshad(f,128)
f_high=threshad(f,160)
show(f_low)
show(f_med)
show(f_high)'''
f_low=threshad(f,100)
f_med=threshad(f,128)
f_high=threshad(f,160)
show(f_low)
show(f_med)
show(f_high)
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The toggle is a non-linear image enhancement that changes the pixel
value to the maximum or the minimum in the neighborhood given by the
structure element, depending which one is the nearest value. The
result of the toggle is that near the edges, the image is better
defined.
'''
print '========================================================================='
#0
print '''
f1=toggle(f,erode(f,sedisk(7)),dilate(f,sedisk(7)))
show(f1)'''
f1=toggle(f,erode(f,sedisk(7)),dilate(f,sedisk(7)))
show(f1)
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The thresholding can now be applied resulting a good definition of
the leaf boarder. Small white spots can be removed by the area open
filter.
'''
print '========================================================================='
#0
print '''
f2=threshad(f1,100)
f3=areaopen(f2,80)
show(f2)
show(f3)'''
f2=threshad(f1,100)
f3=areaopen(f2,80)
show(f2)
show(f3)
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
For illustration, the contour of the segmented leaf is overlayed in
red in the original image
'''
print '========================================================================='
#0
print '''
f4=gradm(f3)
show(f,f4)'''
f4=gradm(f3)
show(f,f4)
print
raw_input(4*' '+'Please press return to continue...')
print
print
#
return
# =========================================================================
#
# lith - Detect defects in a microelectronic circuit.
#
# =========================================================================
def lith():
print
print '''Detect defects in a microelectronic circuit.'''
print
#
print '========================================================================='
print '''
The input image is read. The image is also displayed as a surface
model.
'''
print '========================================================================='
#0
print '''
a = readgray('r4x2_256.tif');
show(a);
show(surf(a));'''
a = readgray('r4x2_256.tif');
show(a);
show(surf(a));
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Closing of the image by a vertical line of length 25 pixels. Then
subtract it from the original. The sequence closing-subtraction is
called closing top-hat. (This could be executed in a single coand:
c=(a,(25,90));).
'''
print '========================================================================='
#0
print '''
b = close(a,seline(25,90));
show(b);
show(surf(b));'''
b = close(a,seline(25,90));
show(b);
show(surf(b));
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Subtraction of the closing from the original is called closing
top-hat. It shows the discrepancies of the image where the
structuring element cannot fit the surface. In this case, it
highlights vertical depression with length longer than 25 pixels.
'''
print '========================================================================='
#0
print '''
c = subm(b,a);
show(c);
show(surf(c));'''
c = subm(b,a);
show(c);
show(surf(c));
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Threshold on the residues image. Elimination of the small objects by
area open.
'''
print '========================================================================='
#0
print '''
d = cmp(c,'>=',50);
e = areaopen(d,5);
show(d);
show(e);'''
d = cmp(c,'>=',50);
e = areaopen(d,5);
show(d);
show(e);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Overlay the detected defects over the original image, and over the
surface display.
'''
print '========================================================================='
#0
print '''
show(a,e);
show(surf(a),e);'''
show(a,e);
show(surf(a),e);
print
raw_input(4*' '+'Please press return to continue...')
print
print
#
return
# =========================================================================
#
# pcb - Decompose a printed circuit board in its main parts.
#
# =========================================================================
def pcb():
print
print '''Decompose a printed circuit board in its main parts.'''
print
#
print '========================================================================='
print '''
The binary image of a printed circuit board is read.
'''
print '========================================================================='
#0
print '''
a = readgray('pcb1bin.tif');
show(a);'''
a = readgray('pcb1bin.tif');
show(a);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
A new image is created by filling the holes. The input image is
subtracted from this new image with holes. The resulting residues
are the holes.
'''
print '========================================================================='
#0
print '''
b = clohole(a);
holes = subm(b,a);
show(b);
show(a, holes);'''
b = clohole(a);
holes = subm(b,a);
show(b);
show(a, holes);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The square islands are detected using an opening by a square of size
17x17.
'''
print '========================================================================='
#0
print '''
c = open(b,sebox(8));
square = cdil(c, a);
show(b, c);
show(holes, square);'''
c = open(b,sebox(8));
square = cdil(c, a);
show(b, c);
show(holes, square);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The circle islands are detected using an opening by an Euclidean
disk on a residues image.
'''
print '========================================================================='
#0
print '''
f = subm(b, c);
g = open(f, sedisk(8));
circle = cdil(g,a);
show(f, g);
show(holes, square, circle);'''
f = subm(b, c);
g = open(f, sedisk(8));
circle = cdil(g,a);
show(f, g);
show(holes, square, circle);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The rectangular islands are detected using an opening by a rectangle
of size 25 x 8 on a residues image. The rectangle structuring
element is built from the composition of vertical and horizontal
lines.
'''
print '========================================================================='
#0
print '''
i = subm(f, g);
m = open(i,sedil(seline(8,90), seline(25)));
rect = cdil(m,a);
show(i, m);
show(holes, square, circle, rect);'''
i = subm(f, g);
m = open(i,sedil(seline(8,90), seline(25)));
rect = cdil(m,a);
show(i, m);
show(holes, square, circle, rect);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The thick connections are detected using an opening by a square on a
residues image.
'''
print '========================================================================='
#0
print '''
o = subm(i,m);
p = open(o, sebox(2));
thin = cdil(p,a);
show(o, p);
show(holes, square, circle, rect, thin);'''
o = subm(i,m);
p = open(o, sebox(2));
thin = cdil(p,a);
show(o, p);
show(holes, square, circle, rect, thin);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The thin connections are detected using an opening by a square on a
residues image.
'''
print '========================================================================='
#0
print '''
r = subm(o,p);
s = open(r, sebox());
thick = cdil(s,a);
show(r, s);
show(holes, square, circle, rect, thin, thick);'''
r = subm(o,p);
s = open(r, sebox());
thick = cdil(s,a);
show(r, s);
show(holes, square, circle, rect, thin, thick);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The main components of the circuit are overlayed and presented in a
single image.
'''
print '========================================================================='
#0
print '''
show(holes, square, circle, rect, thin, thick);'''
show(holes, square, circle, rect, thin, thick);
print
raw_input(4*' '+'Please press return to continue...')
print
print
#
return
# =========================================================================
#
# pieces - Classify two dimensional pieces.
#
# =========================================================================
def pieces():
print
print '''Classify two dimensional pieces.'''
print
#
print '========================================================================='
print '''
The binary image of the pieces is read.
'''
print '========================================================================='
#0
print '''
a = readgray('pieces_bw.tif');
show(a);'''
a = readgray('pieces_bw.tif');
show(a);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
An homotopic thickening is applied to reduce contour noise.
'''
print '========================================================================='
#0
print '''
seA = img2se(binary([[0,1,0],[1,0,1],[0,0,0]]))
seB = img2se(binary([[0,0,0],[0,1,0],[0,0,0]]))
iAB = se2hmt(seA,seB);
print intershow(iAB)
b = thick(a, iAB);
show(b);'''
seA = img2se(binary([[0,1,0],[1,0,1],[0,0,0]]))
seB = img2se(binary([[0,0,0],[0,1,0],[0,0,0]]))
iAB = se2hmt(seA,seB);
print intershow(iAB)
b = thick(a, iAB);
show(b);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The homotopic skeleton by thinning is created.
'''
print '========================================================================='
#0
print '''
c = thin(b);
show(c);'''
c = thin(b);
show(c);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The open lines of the skeleton are pruned by the end point thinning.
The remaining skeleton components will be loops, identifying the
rings.
'''
print '========================================================================='
#0
print '''
d = thin(c,endpoints());
show(c,d);'''
d = thin(c,endpoints());
show(c,d);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Extraction of the rings by reconstruction of the thicked image from
the filtered skeleton.
'''
print '========================================================================='
#0
print '''
e = infrec(d,b);
show(e);'''
e = infrec(d,b);
show(e);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Restriction of the objects detected to the input-image.
'''
print '========================================================================='
#0
print '''
f = intersec(a,e);
show(f);'''
f = intersec(a,e);
show(f);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
It eliminates the skeleton of the rings.
'''
print '========================================================================='
#0
print '''
g = subm(c,e);
show(g);'''
g = subm(c,e);
show(g);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
It removes sucessively 4 end-points to let T junctions just on
T-pins.
'''
print '========================================================================='
#0
print '''
h = thin(g, endpoints(), 4);
show(h);'''
h = thin(g, endpoints(), 4);
show(h);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
It detects triple points, applying the union of matchings with two
templates. These points will identify (mark) the T-pins.
'''
print '========================================================================='
#0
print '''
seA1 = img2se(binary([[0,1,0],[0,1,0],[1,0,1]]))
seB1 = img2se(binary([[0,0,0],[1,0,1],[0,1,0]]))
seA2 = img2se(binary([[0,1,0],[1,1,1],[0,0,0]]))
seB2 = img2se(binary([[1,0,1],[0,0,0],[0,1,0]]))
i1 = supcanon(h, se2hmt(seA1,seB1));
i2 = supcanon(h, se2hmt(seA2,seB2));
i = union(i1,i2);
show(h,dilate(i,sedisk(2)));'''
seA1 = img2se(binary([[0,1,0],[0,1,0],[1,0,1]]))
seB1 = img2se(binary([[0,0,0],[1,0,1],[0,1,0]]))
seA2 = img2se(binary([[0,1,0],[1,1,1],[0,0,0]]))
seB2 = img2se(binary([[1,0,1],[0,0,0],[0,1,0]]))
i1 = supcanon(h, se2hmt(seA1,seB1));
i2 = supcanon(h, se2hmt(seA2,seB2));
i = union(i1,i2);
show(h,dilate(i,sedisk(2)));
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Detection of the T-pins by reconstruction of the ticked image from
the T-pin markers.
'''
print '========================================================================='
#0
print '''
j = infrec(i,b,sebox());
show(j);'''
j = infrec(i,b,sebox());
show(j);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Restriction of the objects detect to the input image
'''
print '========================================================================='
#0
print '''
k = intersec(a,j);
show(k);'''
k = intersec(a,j);
show(k);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The nails are imediatly detected by the subtration of the images of
the rings and T-pints from the input image.
'''
print '========================================================================='
#0
print '''
l = subm(subm(a,f),k);
show(l);'''
l = subm(subm(a,f),k);
show(l);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The result of the classification is presented in a pseudo color
image.
'''
print '========================================================================='
#0
print '''
m = gray(f,'uint8',1);
n = gray(k,'uint8',2);
o = gray(l,'uint8',3);
p = union(m,n,o);
lblshow(p);'''
m = gray(f,'uint8',1);
n = gray(k,'uint8',2);
o = gray(l,'uint8',3);
p = union(m,n,o);
lblshow(p);
print
raw_input(4*' '+'Please press return to continue...')
print
print
#
return
# =========================================================================
#
# potatoes - Grade potato quality by shape and skin spots.
#
# =========================================================================
def potatoes():
print
print '''Grade potato quality by shape and skin spots.'''
print
#
print '========================================================================='
print '''
The input image is read.
'''
print '========================================================================='
#0
print '''
a = readgray('potatoes.tif');
show(a);'''
a = readgray('potatoes.tif');
show(a);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Convert to binary objects by thresholding
'''
print '========================================================================='
#0
print '''
b = threshad(a,90);
show(b);'''
b = threshad(a,90);
show(b);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The binary image is thinned and the result overlayed on the original
image
'''
print '========================================================================='
#0
print '''
c = thin(b);
show(a,c);'''
c = thin(b);
show(a,c);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
To detect the skin spots, a closing tophat can enhance the dark
areas of the image
'''
print '========================================================================='
#0
print '''
d = closeth(a,sedisk(5));
show(d);'''
d = closeth(a,sedisk(5));
show(d);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The tophat is thresholded and the result is masked with the binary
image of the potatoes as we are interested only on the spots inside
them
'''
print '========================================================================='
#0
print '''
e = threshad(d,20);
f = intersec(e,b);
show(f);'''
e = threshad(d,20);
f = intersec(e,b);
show(f);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Show both results: skeleton and skin spots overlayed on the original
image
'''
print '========================================================================='
#0
print '''
show(a);
show(a,f,c);'''
show(a);
show(a,f,c);
print
raw_input(4*' '+'Please press return to continue...')
print
print
#
return
# =========================================================================
#
# robotop - Detect marks on a robot.
#
# =========================================================================
def robotop():
print
print '''Detect marks on a robot.'''
print
#
print '========================================================================='
print '''
The gray-scale image of the robot top view is read.
'''
print '========================================================================='
#0
print '''
a = readgray('robotop.tif');
show(a);'''
a = readgray('robotop.tif');
show(a);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
It detects white regions smaller than a square of radius 4.
'''
print '========================================================================='
#0
print '''
b = openth(a,sebox(4));
show(b);'''
b = openth(a,sebox(4));
show(b);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
It removes white objects smaller than a square of radius 1.
'''
print '========================================================================='
#0
print '''
c = open(b,sebox());
show(c);'''
c = open(b,sebox());
show(c);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
It detects the robot markers. This is a very robust thresholding
(i.e., the result is not sensible to small changes in the value of
the threshold parameter). The original image is overlayed by the
detected robot markers.
'''
print '========================================================================='
#0
print '''
d = threshad(c,100);
show(a,d);'''
d = threshad(c,100);
show(a,d);
print
raw_input(4*' '+'Please press return to continue...')
print
print
#
return
# =========================================================================
#
# ruler - Detect defects in a ruler.
#
# =========================================================================
def ruler():
print
print '''Detect defects in a ruler.'''
print
#
print '========================================================================='
print '''
The gray-scale image of the ruler is read.
'''
print '========================================================================='
#0
print '''
a = readgray('3.tif');
show(a);'''
a = readgray('3.tif');
show(a);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The close top-hat operator followed by a thresholding is applied.
'''
print '========================================================================='
#0
print '''
b = threshad( closeth(a,sebox(5)),40);
show(b);'''
b = threshad( closeth(a,sebox(5)),40);
show(b);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The vertical lines longer than 50 pixels are detected.
'''
print '========================================================================='
#0
print '''
c = open(b,seline(50,90));
show(c);'''
c = open(b,seline(50,90));
show(c);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
It closes ruler tick marks gaps.
'''
print '========================================================================='
#0
print '''
d =close(c,seline(15));
show(d);'''
d =close(c,seline(15));
show(d);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
It detects all objects connected to the ruler tick markers.
'''
print '========================================================================='
#0
print '''
e = infrec(d,b);
show(e);'''
e = infrec(d,b);
show(e);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
It detects all objects vertically connected to the ruler tick mark.
Note that the 3x1 rectangle is used as structuring element in the
vertical reconstruction.
'''
print '========================================================================='
#0
print '''
f = infrec(d,b,seline(3,90));
show(f);'''
f = infrec(d,b,seline(3,90));
show(f);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The residues obtained from the previous image.
'''
print '========================================================================='
#0
print '''
g = subm(e,f);
show(g);'''
g = subm(e,f);
show(g);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
It uses an opening by an elementary cross structuring element to
eliminate the artifacts.
'''
print '========================================================================='
#0
print '''
h = open(g);
show(h);'''
h = open(g);
show(h);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
It detects the objects connected to ruler tick marks. A
reconstruction from the ruler marks detected is applied.
'''
print '========================================================================='
#0
print '''
i = infrec(h, b);
show(i);'''
i = infrec(h, b);
show(i);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Overlay the detected defect over the original image
'''
print '========================================================================='
#0
print '''
show(a,i);'''
show(a,i);
print
raw_input(4*' '+'Please press return to continue...')
print
print
#
return
# =========================================================================
#
# soil - Detect fractures in soil.
#
# =========================================================================
def soil():
print
print '''Detect fractures in soil.'''
print
#
print '========================================================================='
print '''
The image of fractures in soil is read.
'''
print '========================================================================='
#0
print '''
a = readgray('soil.tif');
show(a);'''
a = readgray('soil.tif');
show(a);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The fracture lines are enhanced by the close top-hat operator.
'''
print '========================================================================='
#0
print '''
b = closeth(a,sebox(2));
show(b);'''
b = closeth(a,sebox(2));
show(b);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Small connected bright regions are removed by the gray-scale area
open operator. Note the connectivity used (: 8-connected).
'''
print '========================================================================='
#0
print '''
c= areaopen(b,80,sebox());
show(c);'''
c= areaopen(b,80,sebox());
show(c);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
The fracture lines are detected. This threshold is very robust.
'''
print '========================================================================='
#0
print '''
d = threshad(c,15);
show(d);'''
d = threshad(c,15);
show(d);
print
raw_input(4*' '+'Please press return to continue...')
print
print
##
print '========================================================================='
print '''
Overlay of the fracture lines over the original image.
'''
print '========================================================================='
#0
print '''
show(a,d);'''
show(a,d);
print
raw_input(4*' '+'Please press return to continue...')
print
print
#
return
# =====================================================================
#
# script execution
#
# =====================================================================
#all demonstrations - initialization
_alldemos = [
'airport',
'area',
'asp',
'labeltext',
'beef',
'blob',
'brain',
'calc',
'cells',
'chickparts',
'concrete',
'cookies',
'cornea',
'fabric',
'fila',
'flatzone',
'flow',
'gear',
'holecenter',
'leaf',
'lith',
'pcb',
'pieces',
'potatoes',
'robotop',
'ruler',
'soil',
]
def main():
import sys
print '\npymorph Demonstrations -- SDC Morphology Toolbox\n'
print 'Available Demonstrations: \n' + str(_alldemos) + '\n'
if len(sys.argv) > 1:
for demo in sys.argv[1:]:
if demo in _alldemos:
eval(demo + '()')
else:
print "Demonstration " + demo + " is not in this package. Please use help for details\n"
else:
print "\nUsage: python %s <demo_name>\n\n" % sys.argv[0]
if __name__ == '__main__':
main()
|
{
"content_hash": "3922bc5d95bfa2cec96714674f8d87a3",
"timestamp": "",
"source": "github",
"line_count": 3628,
"max_line_length": 104,
"avg_line_length": 30.318632855567806,
"alnum_prop": 0.41563329575620933,
"repo_name": "luispedro/pymorph",
"id": "c33028f4db3911ef30c892a5f05172a36cea711c",
"size": "109996",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "morphdemo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "360042"
}
],
"symlink_target": ""
}
|
"""
Created on Wed May 01 11:44:49 2013
@author: Martin Siggel <martin.siggel@dlr.de>
"""
import sys, os, shutil, glob
filepath = os.path.dirname(os.path.realpath(__file__))
tixipath = filepath + '/../..'
sys.path.append(tixipath + '/bindings')
import bindings_generator.matlab_generator as MG
import bindings_generator.cheader_parser as CP
blacklist = ['tixiGetRawInterface', 'tixiAddDoubleListWithAttributes', 'tixiSetPrintMsgFunc']
if __name__ == '__main__':
# parse the file
#ann = CP.Annotation('#annotate out: 3, 4A(3)')
# copy handwritten *.m files into current directory
for mfile in glob.glob(filepath + r'/*.m'):
shutil.copy(mfile, '.')
parser = CP.CHeaderFileParser()
# set the handle string that the parser can identify the handles
parser.handle_str = 'TixiDocumentHandle'
parser.returncode_str ='ReturnCode'
parser.typedefs = {'TixiPrintMsgFnc': 'void'}
parser.parse_header_file(tixipath + '/src/tixi.h')
# create the wrapper
generator = MG.MatlabGenerator(parser, 'tixi3', 'tixi.h')
generator.blacklist = blacklist
generator.create_wrapper()
|
{
"content_hash": "bc8c65ba9d65c9c4793be0e4d600ab12",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 93,
"avg_line_length": 28.414634146341463,
"alnum_prop": 0.6686695278969957,
"repo_name": "melven/tixi",
"id": "298b32ff16587b34a14d9b1c3630115825c9d810",
"size": "1190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bindings/matlab/make_tixi_matlab.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "312276"
},
{
"name": "C++",
"bytes": "347700"
},
{
"name": "CMake",
"bytes": "54000"
},
{
"name": "Fortran",
"bytes": "22477"
},
{
"name": "M",
"bytes": "1003"
},
{
"name": "MATLAB",
"bytes": "1019"
},
{
"name": "Makefile",
"bytes": "1517"
},
{
"name": "Python",
"bytes": "133917"
},
{
"name": "Shell",
"bytes": "465"
},
{
"name": "XSLT",
"bytes": "1713"
}
],
"symlink_target": ""
}
|
import os
import socket
import sys
import ssl
from mock import Mock, patch
import requests_mock
from libcloud.test import unittest
from libcloud.common.base import Connection, CertificateConnection
from libcloud.http import LibcloudBaseConnection
from libcloud.http import LibcloudConnection
from libcloud.http import SignedHTTPSAdapter
from libcloud.utils.misc import retry
class BaseConnectionClassTestCase(unittest.TestCase):
def test_parse_proxy_url(self):
conn = LibcloudBaseConnection()
proxy_url = 'http://127.0.0.1:3128'
result = conn._parse_proxy_url(proxy_url=proxy_url)
self.assertEqual(result[0], 'http')
self.assertEqual(result[1], '127.0.0.1')
self.assertEqual(result[2], 3128)
self.assertEqual(result[3], None)
self.assertEqual(result[4], None)
proxy_url = 'http://user1:pass1@127.0.0.1:3128'
result = conn._parse_proxy_url(proxy_url=proxy_url)
self.assertEqual(result[0], 'http')
self.assertEqual(result[1], '127.0.0.1')
self.assertEqual(result[2], 3128)
self.assertEqual(result[3], 'user1')
self.assertEqual(result[4], 'pass1')
proxy_url = 'https://127.0.0.1:3128'
expected_msg = 'Only http proxies are supported'
self.assertRaisesRegexp(ValueError, expected_msg,
conn._parse_proxy_url,
proxy_url=proxy_url)
proxy_url = 'http://127.0.0.1'
expected_msg = 'proxy_url must be in the following format'
self.assertRaisesRegexp(ValueError, expected_msg,
conn._parse_proxy_url,
proxy_url=proxy_url)
proxy_url = 'http://@127.0.0.1:3128'
expected_msg = 'URL is in an invalid format'
self.assertRaisesRegexp(ValueError, expected_msg,
conn._parse_proxy_url,
proxy_url=proxy_url)
proxy_url = 'http://user@127.0.0.1:3128'
expected_msg = 'URL is in an invalid format'
self.assertRaisesRegexp(ValueError, expected_msg,
conn._parse_proxy_url,
proxy_url=proxy_url)
def test_constructor(self):
conn = LibcloudConnection(host='localhost', port=80)
self.assertEqual(conn.proxy_scheme, None)
self.assertEqual(conn.proxy_host, None)
self.assertEqual(conn.proxy_port, None)
proxy_url = 'http://127.0.0.3:3128'
conn.set_http_proxy(proxy_url=proxy_url)
self.assertEqual(conn.proxy_scheme, 'http')
self.assertEqual(conn.proxy_host, '127.0.0.3')
self.assertEqual(conn.proxy_port, 3128)
proxy_url = 'http://127.0.0.4:3128'
conn = LibcloudConnection(host='localhost', port=80,
proxy_url=proxy_url)
self.assertEqual(conn.proxy_scheme, 'http')
self.assertEqual(conn.proxy_host, '127.0.0.4')
self.assertEqual(conn.proxy_port, 3128)
os.environ['http_proxy'] = proxy_url
proxy_url = 'http://127.0.0.5:3128'
conn = LibcloudConnection(host='localhost', port=80,
proxy_url=proxy_url)
self.assertEqual(conn.proxy_scheme, 'http')
self.assertEqual(conn.proxy_host, '127.0.0.5')
self.assertEqual(conn.proxy_port, 3128)
def test_connection_to_unusual_port(self):
conn = LibcloudConnection(host='localhost', port=8080)
self.assertEqual(conn.proxy_scheme, None)
self.assertEqual(conn.proxy_host, None)
self.assertEqual(conn.proxy_port, None)
self.assertEqual(conn.host, 'http://localhost:8080')
conn = LibcloudConnection(host='localhost', port=80)
self.assertEqual(conn.host, 'http://localhost')
def test_connection_url_merging(self):
"""
Test that the connection class will parse URLs correctly
"""
conn = Connection(url='http://test.com/')
conn.connect()
self.assertEqual(conn.connection.host, 'http://test.com')
with requests_mock.mock() as m:
m.get('http://test.com/test', text='data')
response = conn.request('/test')
self.assertEqual(response.body, 'data')
def test_morph_action_hook(self):
conn = Connection(url="http://test.com")
conn.request_path = ''
self.assertEqual(conn.morph_action_hook('/test'), '/test')
self.assertEqual(conn.morph_action_hook('test'), '/test')
conn.request_path = '/v1'
self.assertEqual(conn.morph_action_hook('/test'), '/v1/test')
self.assertEqual(conn.morph_action_hook('test'), '/v1/test')
conn.request_path = '/v1'
self.assertEqual(conn.morph_action_hook('/test'), '/v1/test')
self.assertEqual(conn.morph_action_hook('test'), '/v1/test')
conn.request_path = 'v1'
self.assertEqual(conn.morph_action_hook('/test'), '/v1/test')
self.assertEqual(conn.morph_action_hook('test'), '/v1/test')
conn.request_path = 'v1/'
self.assertEqual(conn.morph_action_hook('/test'), '/v1/test')
self.assertEqual(conn.morph_action_hook('test'), '/v1/test')
def test_connect_with_prefix(self):
"""
Test that a connection with a base path (e.g. /v1/) will
add the base path to requests
"""
conn = Connection(url='http://test.com/')
conn.connect()
conn.request_path = '/v1'
self.assertEqual(conn.connection.host, 'http://test.com')
with requests_mock.mock() as m:
m.get('http://test.com/v1/test', text='data')
response = conn.request('/test')
self.assertEqual(response.body, 'data')
def test_secure_connection_unusual_port(self):
"""
Test that the connection class will default to secure (https) even
when the port is an unusual (non 443, 80) number
"""
conn = Connection(secure=True, host='localhost', port=8081)
conn.connect()
self.assertEqual(conn.connection.host, 'https://localhost:8081')
conn2 = Connection(url='https://localhost:8081')
conn2.connect()
self.assertEqual(conn2.connection.host, 'https://localhost:8081')
def test_secure_by_default(self):
"""
Test that the connection class will default to secure (https)
"""
conn = Connection(host='localhost', port=8081)
conn.connect()
self.assertEqual(conn.connection.host, 'https://localhost:8081')
def test_implicit_port(self):
"""
Test that the port is not included in the URL if the protocol implies
the port, e.g. http implies 80
"""
conn = Connection(secure=True, host='localhost', port=443)
conn.connect()
self.assertEqual(conn.connection.host, 'https://localhost')
conn2 = Connection(secure=False, host='localhost', port=80)
conn2.connect()
self.assertEqual(conn2.connection.host, 'http://localhost')
def test_insecure_connection_unusual_port(self):
"""
Test that the connection will allow unusual ports and insecure
schemes
"""
conn = Connection(secure=False, host='localhost', port=8081)
conn.connect()
self.assertEqual(conn.connection.host, 'http://localhost:8081')
conn2 = Connection(url='http://localhost:8081')
conn2.connect()
self.assertEqual(conn2.connection.host, 'http://localhost:8081')
class ConnectionClassTestCase(unittest.TestCase):
def setUp(self):
self.originalConnect = Connection.connect
self.originalResponseCls = Connection.responseCls
Connection.connect = Mock()
Connection.responseCls = Mock()
Connection.allow_insecure = True
def tearDown(self):
Connection.connect = self.originalConnect
Connection.responseCls = Connection.responseCls
Connection.allow_insecure = True
def test_dont_allow_insecure(self):
Connection.allow_insecure = True
Connection(secure=False)
Connection.allow_insecure = False
expected_msg = (r'Non https connections are not allowed \(use '
'secure=True\)')
self.assertRaisesRegexp(ValueError, expected_msg, Connection,
secure=False)
def test_cache_busting(self):
params1 = {'foo1': 'bar1', 'foo2': 'bar2'}
params2 = [('foo1', 'bar1'), ('foo2', 'bar2')]
con = Connection()
con.connection = Mock()
con.pre_connect_hook = Mock()
con.pre_connect_hook.return_value = {}, {}
con.cache_busting = False
con.request(action='/path', params=params1)
args, kwargs = con.pre_connect_hook.call_args
self.assertFalse('cache-busting' in args[0])
self.assertEqual(args[0], params1)
con.request(action='/path', params=params2)
args, kwargs = con.pre_connect_hook.call_args
self.assertFalse('cache-busting' in args[0])
self.assertEqual(args[0], params2)
con.cache_busting = True
con.request(action='/path', params=params1)
args, kwargs = con.pre_connect_hook.call_args
self.assertTrue('cache-busting' in args[0])
con.request(action='/path', params=params2)
args, kwargs = con.pre_connect_hook.call_args
self.assertTrue('cache-busting' in args[0][len(params2)])
def test_context_is_reset_after_request_has_finished(self):
context = {'foo': 'bar'}
def responseCls(connection, response):
connection.called = True
self.assertEqual(connection.context, context)
con = Connection()
con.called = False
con.connection = Mock()
con.responseCls = responseCls
con.set_context(context)
self.assertEqual(con.context, context)
con.request('/')
# Context should have been reset
self.assertTrue(con.called)
self.assertEqual(con.context, {})
# Context should also be reset if a method inside request throws
con = Connection(timeout=1, retry_delay=0.1)
con.connection = Mock()
con.set_context(context)
self.assertEqual(con.context, context)
con.connection.request = Mock(side_effect=ssl.SSLError())
try:
con.request('/')
except ssl.SSLError:
pass
self.assertEqual(con.context, {})
con.connection = Mock()
con.set_context(context)
self.assertEqual(con.context, context)
con.responseCls = Mock(side_effect=ValueError())
try:
con.request('/')
except ValueError:
pass
self.assertEqual(con.context, {})
def _raise_socket_error(self):
raise socket.gaierror('')
def test_retry_with_sleep(self):
con = Connection()
con.connection = Mock()
connect_method = 'libcloud.common.base.Connection.request'
with patch(connect_method) as mock_connect:
mock_connect.__name__ = 'mock_connect'
with self.assertRaises(socket.gaierror):
mock_connect.side_effect = socket.gaierror('')
retry_request = retry(timeout=1, retry_delay=.1,
backoff=1)
retry_request(con.request)(action='/')
self.assertGreater(mock_connect.call_count, 1,
'Retry logic failed')
def test_retry_with_timeout(self):
con = Connection()
con.connection = Mock()
connect_method = 'libcloud.common.base.Connection.request'
with patch(connect_method) as mock_connect:
mock_connect.__name__ = 'mock_connect'
with self.assertRaises(socket.gaierror):
mock_connect.side_effect = socket.gaierror('')
retry_request = retry(timeout=2, retry_delay=.1,
backoff=1)
retry_request(con.request)(action='/')
self.assertGreater(mock_connect.call_count, 1,
'Retry logic failed')
def test_retry_with_backoff(self):
con = Connection()
con.connection = Mock()
connect_method = 'libcloud.common.base.Connection.request'
with patch(connect_method) as mock_connect:
mock_connect.__name__ = 'mock_connect'
with self.assertRaises(socket.gaierror):
mock_connect.side_effect = socket.gaierror('')
retry_request = retry(timeout=2, retry_delay=.1,
backoff=1)
retry_request(con.request)(action='/')
self.assertGreater(mock_connect.call_count, 1,
'Retry logic failed')
class CertificateConnectionClassTestCase(unittest.TestCase):
def setUp(self):
self.connection = CertificateConnection(cert_file='test.pem',
url='https://test.com/test')
self.connection.connect()
def test_adapter_internals(self):
adapter = self.connection.connection.session.adapters['https://']
self.assertTrue(isinstance(adapter, SignedHTTPSAdapter))
self.assertEqual(adapter.cert_file, 'test.pem')
if __name__ == '__main__':
sys.exit(unittest.main())
|
{
"content_hash": "01f0d746bd486e64a5657ad5c74d6f3d",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 77,
"avg_line_length": 37.08791208791209,
"alnum_prop": 0.5980740740740741,
"repo_name": "pquentin/libcloud",
"id": "147f5b4ad07bfac3c0ddb988d32cafcc2cf38548",
"size": "14308",
"binary": false,
"copies": "9",
"ref": "refs/heads/trunk",
"path": "libcloud/test/test_connection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2545"
},
{
"name": "PowerShell",
"bytes": "410"
},
{
"name": "Python",
"bytes": "6369895"
},
{
"name": "Shell",
"bytes": "5936"
}
],
"symlink_target": ""
}
|
import codecs
import csv
from datetime import date
from django.db import models
from django.utils.http import urlquote
from election.models import ElectionManager
from electoral_district.controllers import electoral_district_import_from_xml_data
from exception.models import handle_exception
from import_export_ctcl.controllers import create_candidate_selection_rows, retrieve_candidate_from_candidate_selection
import json
import magic
from organization.models import ORGANIZATION_TYPE_CHOICES, UNKNOWN, alphanumeric
from party.controllers import retrieve_all_party_names_and_ids_api, party_import_from_xml_data
from politician.models import GENDER_CHOICES, UNKNOWN
import urllib
from urllib.request import Request, urlopen
from voter_guide.models import ORGANIZATION_WORD
import wevote_functions.admin
from wevote_functions.functions import positive_value_exists, LANGUAGE_CODE_ENGLISH, LANGUAGE_CODE_SPANISH
import xml.etree.ElementTree as ElementTree
POSITION = 'POSITION'
ANY_STANCE = 'ANY_STANCE' # This is a way to indicate when we want to return any stance (support, oppose, no_stance)
SUPPORT = 'SUPPORT'
STILL_DECIDING = 'STILL_DECIDING'
NO_STANCE = 'NO_STANCE' # DALE 2016-8-29 We will want to deprecate NO_STANCE and replace with INFORMATION_ONLY
INFORMATION_ONLY = 'INFO_ONLY'
OPPOSE = 'OPPOSE'
PERCENT_RATING = 'PERCENT_RATING'
POSITION_CHOICES = (
# ('SUPPORT_STRONG', 'Strong Supports'), # I do not believe we will be offering 'SUPPORT_STRONG' as an option
(SUPPORT, 'Supports'),
(STILL_DECIDING, 'Still deciding'), # Still undecided
(NO_STANCE, 'No stance'), # We don't know the stance
(INFORMATION_ONLY, 'Information only'), # This entry is meant as food-for-thought and is not advocating
(OPPOSE, 'Opposes'),
(PERCENT_RATING, 'Percentage point rating'),
# ('OPPOSE_STRONG', 'Strongly Opposes'), # I do not believe we will be offering 'OPPOSE_STRONG' as an option
)
NO_STANCE = 'NO_STANCE'
CANDIDATE = 'CANDIDATE'
CONTEST_OFFICE = 'CONTEST_OFFICE'
ELECTED_OFFICE = 'ELECTED_OFFICE'
IMPORT_BALLOT_ITEM = 'IMPORT_BALLOT_ITEM'
IMPORT_VOTER = 'IMPORT_VOTER'
MEASURE = 'MEASURE'
POLITICIAN = 'POLITICIAN'
KIND_OF_BATCH_CHOICES = (
(MEASURE, 'Measure'),
(ELECTED_OFFICE, 'ElectedOffice'),
(CONTEST_OFFICE, 'ContestOffice'),
(CANDIDATE, 'Candidate'),
(ORGANIZATION_WORD, 'Organization'),
(POSITION, 'Position'),
(POLITICIAN, 'Politician'),
(IMPORT_BALLOT_ITEM, 'Ballot Returned'),
)
IMPORT_TO_BE_DETERMINED = 'IMPORT_TO_BE_DETERMINED'
DO_NOT_PROCESS = 'DO_NOT_PROCESS'
CLEAN_DATA_MANUALLY = 'CLEAN_DATA_MANUALLY'
IMPORT_CREATE = 'IMPORT_CREATE'
IMPORT_ADD_TO_EXISTING = 'IMPORT_ADD_TO_EXISTING'
IMPORT_DATA_ALREADY_MATCHING = 'IMPORT_DATA_ALREADY_MATCHING'
IMPORT_QUERY_ERROR = 'IMPORT_QUERY_ERROR'
KIND_OF_ACTION_CHOICES = (
(IMPORT_TO_BE_DETERMINED, 'To Be Determined'),
(DO_NOT_PROCESS, 'Do not process'),
(IMPORT_CREATE, 'Create'),
(IMPORT_ADD_TO_EXISTING, 'Add to Existing'),
)
BATCH_SET_SOURCE_CTCL = 'CTCL'
BATCH_SET_SOURCE_IMPORT_EXPORT_ENDORSEMENTS = 'IMPORT_EXPORT_ENDORSEMENTS'
BATCH_SET_SOURCE_IMPORT_BALLOTPEDIA_BALLOT_ITEMS = 'IMPORT_BALLOTPEDIA_BALLOT_ITEMS'
BATCH_IMPORT_KEYS_ACCEPTED_FOR_CANDIDATES = {
'ballotpedia_candidate_id': 'ballotpedia_candidate_id',
'ballotpedia_candidate_name': 'ballotpedia_candidate_name',
'ballotpedia_candidate_summary': 'ballotpedia_candidate_summary',
'ballotpedia_candidate_url': 'ballotpedia_candidate_url',
'ballotpedia_election_id': 'ballotpedia_election_id',
'ballotpedia_image_id': 'ballotpedia_image_id',
'ballotpedia_office_id': 'ballotpedia_office_id * (elected_office)', # For matching only
'ballotpedia_person_id': 'ballotpedia_person_id',
'ballotpedia_race_id': 'ballotpedia_race_id * (contest_office)', # For matching only
'birth_day_text': 'birth_day_text',
'candidate_batch_id': 'candidate_batch_id',
'candidate_ctcl_uuid': 'candidate_ctcl_uuid',
'candidate_ctcl_person_id': 'candidate_ctcl_person_id',
'candidate_email': 'candidate_email',
'candidate_gender': 'candidate_gender',
'candidate_is_top_ticket': 'candidate_is_top_ticket',
'candidate_is_incumbent': 'candidate_is_incumbent',
'candidate_name': 'candidate_name',
'candidate_participation_status': 'candidate_participation_status',
'candidate_party_name': 'candidate_party_name',
'candidate_profile_image_url': 'candidate_profile_image_url',
'candidate_twitter_handle': 'candidate_twitter_handle',
'candidate_url': 'candidate_url (website)',
'candidate_contact_form_url': 'candidate_contact_form_url',
'contest_office_name': 'contest_office_name *', # For matching only
'contest_office_we_vote_id': 'contest_office_we_vote_id *', # For matching only
'crowdpac_candidate_id': 'crowdpac_candidate_id',
'election_day': 'election_day',
'facebook_url': 'facebook_url',
'google_civic_election_id': 'google_civic_election_id',
'state_code': 'state_code',
}
# We Vote contest office key on the left, and Ballotpedia field name on right
# This gives us the option of putting the same field from a remote source into two We Vote fields
BATCH_HEADER_MAP_CANDIDATES_TO_BALLOTPEDIA_CANDIDATES = {
'ballotpedia_candidate_id': 'ballotpedia_candidate_id',
'ballotpedia_candidate_name': 'ballotpedia_candidate_name',
'ballotpedia_candidate_summary': 'ballotpedia_candidate_summary',
'ballotpedia_candidate_url': 'ballotpedia_candidate_url',
'ballotpedia_election_id': 'ballotpedia_election_id',
'ballotpedia_image_id': 'ballotpedia_image_id',
'ballotpedia_office_id': 'ballotpedia_office_id',
'ballotpedia_person_id': 'ballotpedia_person_id',
'ballotpedia_race_id': 'ballotpedia_race_id',
'birth_day_text': 'birth_day_text',
'candidate_email': 'candidate_email',
'candidate_gender': 'candidate_gender',
'candidate_is_incumbent': 'is_incumbent',
'candidate_participation_status': 'candidate_participation_status',
'candidate_party_name': 'candidate_party_name',
'candidate_twitter_handle': 'candidate_twitter_handle',
'candidate_url': 'candidate_url',
'candidate_contact_form_url': 'candidate_contact_form_url',
'crowdpac_candidate_id': 'crowdpac_candidate_id',
'facebook_url': 'facebook_url',
'state_code': 'state_code',
}
BATCH_IMPORT_KEYS_ACCEPTED_FOR_CONTEST_OFFICES = {
'ballotpedia_candidate_id': 'ballotpedia_candidate_id *', # For matching only
'ballotpedia_district_id': 'ballotpedia_district_id',
'ballotpedia_election_id': 'ballotpedia_election_id',
'ballotpedia_office_id': 'ballotpedia_office_id',
'ballotpedia_office_name': 'ballotpedia_office_name',
'ballotpedia_office_url': 'ballotpedia_office_url',
'ballotpedia_person_id': 'ballotpedia_person_id *', # For matching only
'ballotpedia_race_id': 'ballotpedia_race_id',
'ballotpedia_race_office_level': 'ballotpedia_race_office_level',
'candidate_name': 'candidate_name *', # For matching only
'candidate_selection_id1': 'candidate_selection_id1 *', # For matching only
'candidate_selection_id2': 'candidate_selection_id2 *', # For matching only
'candidate_selection_id3': 'candidate_selection_id3 *', # For matching only
'candidate_selection_id4': 'candidate_selection_id4 *', # For matching only
'candidate_selection_id5': 'candidate_selection_id5 *', # For matching only
'candidate_selection_id6': 'candidate_selection_id6 *', # For matching only
'candidate_selection_id7': 'candidate_selection_id7 *', # For matching only
'candidate_selection_id8': 'candidate_selection_id8 *', # For matching only
'candidate_selection_id9': 'candidate_selection_id9 *', # For matching only
'candidate_selection_id10': 'candidate_selection_id10 *', # For matching only
'contest_office_name': 'contest_office_name',
'contest_office_batch_id': 'contest_office_batch_id',
'contest_office_ctcl_uuid': 'contest_office_ctcl_uuid',
'contest_office_votes_allowed': 'contest_office_votes_allowed',
'contest_office_number_elected': 'contest_office_number_elected',
'contest_office_district_name': 'contest_office_district_name',
'elected_office_id': 'elected_office_id',
'election_day': 'election_day',
'electoral_district_id': 'electoral_district_id',
'google_civic_election_id': 'google_civic_election_id',
'is_ballotpedia_general_election': 'is_ballotpedia_general_election',
'is_ballotpedia_general_runoff_election': 'is_ballotpedia_general_runoff_election',
'is_ballotpedia_primary_election': 'is_ballotpedia_primary_election',
'is_ballotpedia_primary_runoff_election': 'is_ballotpedia_primary_runoff_election',
'state_code': 'state_code',
}
# We Vote contest office key on the left, and Ballotpedia field name on right
# This gives us the option of putting the same field from a remote source into two We Vote fields
BATCH_HEADER_MAP_CONTEST_OFFICES_TO_BALLOTPEDIA_RACES = {
'ballotpedia_district_id': 'ballotpedia_district_id',
'ballotpedia_election_id': 'ballotpedia_election_id',
'ballotpedia_office_id': 'ballotpedia_office_id',
'ballotpedia_office_name': 'office_name',
'ballotpedia_race_id': 'ballotpedia_race_id',
'ballotpedia_race_office_level': 'office_level',
'ballotpedia_office_url': 'url',
'contest_office_number_elected': 'number_of_seats',
'contest_office_district_name': 'office_district_name',
'election_day': 'election_date',
'is_ballotpedia_general_election': 'is_ballotpedia_general_election',
'is_ballotpedia_general_runoff_election': 'is_ballotpedia_general_runoff_election',
'is_ballotpedia_primary_election': 'is_ballotpedia_primary_election',
'is_ballotpedia_primary_runoff_election': 'is_ballotpedia_primary_runoff_election',
'state_code': 'office_district_state',
}
BATCH_IMPORT_KEYS_ACCEPTED_FOR_ELECTED_OFFICES = {
'elected_office_name': 'elected_office_name',
'electoral_district_id': 'electoral_district_id',
'state_code': 'state_code',
'elected_office_ctcl_uuid': 'elected_office_ctcl_uuid',
'elected_office_description': 'elected_office_description',
'elected_office_is_partisan': 'elected_office_is_partisan',
'elected_office_name_es': 'elected_office_name_es',
'elected_office_description_es': 'elected_office_description_es',
'elected_office_batch_id': 'elected_office_batch_id',
}
BATCH_IMPORT_KEYS_ACCEPTED_FOR_MEASURES = {
'ballotpedia_district_id': 'ballotpedia_district_id',
'ballotpedia_election_id': 'ballotpedia_election_id',
'ballotpedia_measure_id': 'ballotpedia_measure_id',
'ballotpedia_measure_name': 'ballotpedia_measure_name',
'ballotpedia_measure_status': 'ballotpedia_measure_status',
'ballotpedia_measure_summary': 'ballotpedia_measure_summary',
'ballotpedia_measure_text': 'ballotpedia_measure_text',
'ballotpedia_measure_url': 'ballotpedia_measure_url',
'ballotpedia_yes_vote_description': 'ballotpedia_yes_vote_description',
'ballotpedia_no_vote_description': 'ballotpedia_no_vote_description',
'ctcl_uuid': 'ctcl_uuid',
'election_day_text': 'election_day_text',
'electoral_district_id': 'electoral_district_id',
'measure_title': 'measure_title',
'measure_name': 'measure_name',
'measure_text': 'measure_text',
'measure_subtitle': 'measure_subtitle',
'state_code': 'state_code',
}
# We Vote contest office key on the left, and Ballotpedia field name on right
# This gives us the option of putting the same field from a remote source into two We Vote fields
BATCH_HEADER_MAP_MEASURES_TO_BALLOTPEDIA_MEASURES = {
'ballotpedia_district_id': 'ballotpedia_district_id',
'ballotpedia_election_id': 'ballotpedia_election_id',
'ballotpedia_measure_id': 'ballotpedia_measure_id',
'ballotpedia_measure_name': 'name',
'ballotpedia_measure_status': 'status',
'ballotpedia_measure_summary': 'summary',
'ballotpedia_measure_text': 'text',
'ballotpedia_measure_url': 'ballotpedia_measure_url',
'ballotpedia_yes_vote_description': 'ballotpedia_yes_vote_description',
'ballotpedia_no_vote_description': 'ballotpedia_no_vote_description',
'election_day_text': 'election_day_text',
'state_code': 'state_code',
}
BATCH_IMPORT_KEYS_ACCEPTED_FOR_ORGANIZATIONS = {
'organization_address': 'organization_address',
'organization_city': 'organization_city',
'organization_contact_name': 'organization_contact_name',
'organization_facebook': 'organization_facebook',
'organization_instagram': 'organization_instagram',
'organization_name': 'organization_name',
'organization_phone1': 'organization_phone1',
'organization_phone2': 'organization_phone2',
'organization_state': 'organization_state',
'organization_twitter_handle': 'organization_twitter_handle',
'organization_website': 'organization_website',
'organization_we_vote_id': 'organization_we_vote_id',
'organization_zip': 'organization_zip',
'organization_type': 'organization_type',
'state_served_code': 'state_served_code',
}
BATCH_IMPORT_KEYS_ACCEPTED_FOR_POLITICIANS = {
'politician_full_name': 'politician_full_name',
'politician_ctcl_uuid': 'politician_ctcl_uuid',
'politician_twitter_url': 'politician_twitter_url',
'politician_facebook_id': 'politician_facebook_id',
'politician_party_name': 'politician_party_name',
'politician_first_name': 'politician_first_name',
'politician_middle_name': 'politician_middle_name',
'politician_last_name': 'politician_last_name',
'politician_website_url': 'politician_website_url',
'politician_email_address': 'politician_email_address',
'politician_youtube_id': 'politician_youtube_id',
'politician_googleplus_id': 'politician_googleplus_id',
'politician_phone_number': 'politician_phone_number',
'politician_batch_id': 'politician_batch_id',
}
BATCH_IMPORT_KEYS_ACCEPTED_FOR_POSITIONS = {
'position_we_vote_id': 'position_we_vote_id',
'candidate_name': 'candidate_name',
'candidate_twitter_handle': 'candidate_twitter_handle',
'candidate_we_vote_id': 'candidate_we_vote_id',
'contest_office_name': 'contest_office_name',
'contest_measure_title': 'contest_measure_title',
'election_day': 'election_day',
'grade_rating': 'grade_rating',
'google_civic_election_id': 'google_civic_election_id',
'more_info_url': 'more_info_url',
'stance': 'stance (SUPPORT or OPPOSE)',
'support': 'support (TRUE or FALSE)',
'oppose': 'oppose (TRUE or FALSE)',
'percent_rating': 'percent_rating',
'statement_text': 'statement_text',
'state_code': 'state_code',
'organization_name': 'organization_name',
'organization_we_vote_id': 'organization_we_vote_id',
'organization_twitter_handle': 'organization_twitter_handle (position owner)',
}
BATCH_HEADER_MAP_FOR_POSITIONS = {
'position_we_vote_id': 'position_we_vote_id',
'candidate_name': 'candidate_name',
'candidate_twitter_handle': 'candidate_twitter_handle',
'candidate_we_vote_id': 'candidate_we_vote_id',
'contest_office_name': 'contest_office_name',
'contest_measure_title': 'contest_measure_title',
'election_day': 'election_day',
'grade_rating': 'grade_rating',
'google_civic_election_id': 'google_civic_election_id',
'measure_title': 'measure_title',
'measure_we_vote_id': 'measure_we_vote_id',
'more_info_url': 'more_info_url',
'stance': 'stance',
'support': 'support',
'oppose': 'oppose',
'percent_rating': 'percent_rating',
'statement_text': 'statement_text',
'state_code': 'state_code',
'organization_name': 'organization_name',
'organization_we_vote_id': 'organization_we_vote_id',
'organization_twitter_handle': 'organization_twitter_handle',
}
BATCH_IMPORT_KEYS_ACCEPTED_FOR_BALLOT_ITEMS = {
'contest_office_we_vote_id': 'contest_office_we_vote_id',
'contest_office_id': 'contest_office_id',
'contest_office_name': 'contest_office_name',
'candidate_name': 'candidate_name',
'candidate_twitter_handle': 'candidate_twitter_handle',
'contest_measure_we_vote_id': 'contest_measure_we_vote_id',
'contest_measure_id': 'contest_measure_id',
'contest_measure_name': 'contest_measure_name',
'contest_measure_text': 'contest_measure_text',
'contest_measure_url': 'contest_measure_url',
'election_day_text': 'election_day_text',
'local_ballot_order': 'local_ballot_order',
'no_vote_description': 'no_vote_description',
'yes_vote_description': 'yes_vote_description',
'polling_location_we_vote_id': 'polling_location_we_vote_id',
'state_code': 'state_code',
}
# We Vote contest office key on the left, and Ballotpedia field name on right
# This gives us the option of putting the same field from a remote source into two We Vote fields
BATCH_HEADER_MAP_BALLOT_ITEMS_TO_BALLOTPEDIA_VOTER_DISTRICTS = {
'ballotpedia_district_id': 'ballotpedia_district_id',
'ballotpedia_district_name': 'ballotpedia_district_name',
'contest_measure_id': 'contest_measure_id',
'contest_measure_we_vote_id': 'contest_measure_we_vote_id',
'contest_office_we_vote_id': 'contest_office_we_vote_id',
'contest_office_id': 'contest_office_id',
'election_day_text': 'election_day_text',
'local_ballot_order': 'local_ballot_order',
'polling_location_we_vote_id': 'polling_location_we_vote_id',
'state_code': 'state_code',
}
BATCH_IMPORT_KEYS_ACCEPTED_FOR_VOTERS = {
'first_name': 'first_name',
'middle_name': 'middle_name',
'last_name': 'last_name',
'email': 'email',
'we_vote_id': 'we_vote_id',
'twitter_screen_name': 'twitter_screen_name',
}
logger = wevote_functions.admin.get_logger(__name__)
def get_value_if_index_in_list(incoming_list, index):
try:
return incoming_list[index]
except IndexError:
return ""
def get_value_from_dict(structured_json, field_name):
try:
return structured_json[field_name]
except KeyError:
return ""
except IndexError:
return ""
def get_header_map_value_if_index_in_list(incoming_list, index, kind_of_batch=""):
try:
# The header_value is a value like "Organization Name" or "Street Address"
original_header_value = incoming_list[index]
original_header_value_str = str(original_header_value)
original_header_value_str = original_header_value_str.lower()
# We want to check to see if there is a suggested We Vote header for this value
batch_manager = BatchManager()
header_value_recognized_by_we_vote = batch_manager.fetch_batch_header_translation_suggestion(
kind_of_batch, original_header_value_str)
if positive_value_exists(header_value_recognized_by_we_vote):
return header_value_recognized_by_we_vote
else:
return original_header_value_str
except IndexError:
return ""
class BatchManager(models.Model):
def __unicode__(self):
return "BatchManager"
pass
def create_batch_from_uri(self, batch_uri, kind_of_batch, google_civic_election_id, organization_we_vote_id):
# Retrieve the CSV
response = urllib.request.urlopen(batch_uri)
csv_data = csv.reader(codecs.iterdecode(response, 'utf-8'))
batch_file_name = ""
return self.create_batch_from_csv_data(
batch_file_name, csv_data, kind_of_batch, google_civic_election_id, organization_we_vote_id)
def create_batch_from_local_file_upload(
self, batch_file, kind_of_batch, google_civic_election_id, organization_we_vote_id,
polling_location_we_vote_id=""):
if (batch_file.content_type == 'text/csv') or (batch_file.content_type == 'application/vnd.ms-excel'):
csv_data = csv.reader(codecs.iterdecode(batch_file, 'utf-8'), delimiter=',')
batch_file_name = batch_file.name
return self.create_batch_from_csv_data(
batch_file_name, csv_data, kind_of_batch, google_civic_election_id, organization_we_vote_id,
polling_location_we_vote_id)
status = "CREATE_BATCH_FILETYPE_NOT_RECOGNIZED"
results = {
'success': False,
'status': status,
'batch_header_id': 0,
'batch_saved': False,
'number_of_batch_rows': 0,
}
return results
def create_batch_from_voter_object_list(self, objects_list):
"""
Creates a batch from a list of voter objects
:param objects_list: list of voter objects
:return:
"""
status = ''
success = False
number_of_voters = 0
google_civic_election_id = 0
if not objects_list:
results = {
'success': False,
'status': "IMPORT_VOTERS_FAILED",
'number_of_voters': 0,
}
return results
first_line = True
batch_header_id = 0
batch_header_map_id = 0
for one_entry in objects_list:
first_name = one_entry.first_name
middle_name = one_entry.middle_name
last_name = one_entry.last_name
email = one_entry.email
we_vote_id = one_entry.we_vote_id
twitter_screen_name = one_entry.twitter_screen_name
if first_line:
first_line = False
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000=BATCH_IMPORT_KEYS_ACCEPTED_FOR_VOTERS['first_name'],
batch_header_column_001=BATCH_IMPORT_KEYS_ACCEPTED_FOR_VOTERS['middle_name'],
batch_header_column_002=BATCH_IMPORT_KEYS_ACCEPTED_FOR_VOTERS['last_name'],
batch_header_column_003=BATCH_IMPORT_KEYS_ACCEPTED_FOR_VOTERS['email'],
batch_header_column_004=BATCH_IMPORT_KEYS_ACCEPTED_FOR_VOTERS['we_vote_id'],
batch_header_column_005=BATCH_IMPORT_KEYS_ACCEPTED_FOR_VOTERS['twitter_screen_name'],
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000='first_name',
batch_header_map_001='middle_name',
batch_header_map_002='last_name',
batch_header_map_003='email',
batch_header_map_004='we_vote_id',
batch_header_map_005='twitter_screen_name',
)
batch_header_map_id = batch_header_map.id
status += " BATCH_HEADER_MAP_SAVED"
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
batch_name = "IMPORT_VOTERS " + " batch_header_id: " + str(batch_header_id)
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
google_civic_election_id=google_civic_election_id,
kind_of_batch=IMPORT_VOTER,
organization_we_vote_id=organization_we_vote_id,
)
status += " BATCH_DESCRIPTION_SAVED"
success = True
except Exception as e:
batch_header_id = 0
status += " EXCEPTION_BATCH_HEADER"
handle_exception(e, logger=logger, exception_message=status)
break
if not positive_value_exists(batch_header_id):
break
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=first_name,
batch_row_001=middle_name,
batch_row_002=last_name,
batch_row_003=email,
batch_row_004=we_vote_id,
batch_row_005=twitter_screen_name,
)
number_of_voters += 1
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_ROW"
handle_exception(e, logger=logger, exception_message=status)
break
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_voters': number_of_voters,
'google_civic_election_id': google_civic_election_id,
}
return results
def create_batch_from_csv_data(self, file_name, csv_data, kind_of_batch, google_civic_election_id=0,
organization_we_vote_id="", polling_location_we_vote_id=""):
first_line = True
success = False
status = ""
number_of_batch_rows = 0
# limit_for_testing = 5
# Retrieve from JSON
# request = Request(batch_uri, headers={'User-Agent': 'Mozilla/5.0'})
# url_processor = urlopen(request)
# data = url_processor.read()
# incoming_data = data.decode('utf-8')
# structured_json = json.loads(incoming_data)
# for one_entry in structured_json:
batch_header_id = 0
batch_header_map_id = 0
for line in csv_data:
if first_line:
first_line = False
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000=get_value_if_index_in_list(line, 0),
batch_header_column_001=get_value_if_index_in_list(line, 1),
batch_header_column_002=get_value_if_index_in_list(line, 2),
batch_header_column_003=get_value_if_index_in_list(line, 3),
batch_header_column_004=get_value_if_index_in_list(line, 4),
batch_header_column_005=get_value_if_index_in_list(line, 5),
batch_header_column_006=get_value_if_index_in_list(line, 6),
batch_header_column_007=get_value_if_index_in_list(line, 7),
batch_header_column_008=get_value_if_index_in_list(line, 8),
batch_header_column_009=get_value_if_index_in_list(line, 9),
batch_header_column_010=get_value_if_index_in_list(line, 10),
batch_header_column_011=get_value_if_index_in_list(line, 11),
batch_header_column_012=get_value_if_index_in_list(line, 12),
batch_header_column_013=get_value_if_index_in_list(line, 13),
batch_header_column_014=get_value_if_index_in_list(line, 14),
batch_header_column_015=get_value_if_index_in_list(line, 15),
batch_header_column_016=get_value_if_index_in_list(line, 16),
batch_header_column_017=get_value_if_index_in_list(line, 17),
batch_header_column_018=get_value_if_index_in_list(line, 18),
batch_header_column_019=get_value_if_index_in_list(line, 19),
batch_header_column_020=get_value_if_index_in_list(line, 20),
batch_header_column_021=get_value_if_index_in_list(line, 21),
batch_header_column_022=get_value_if_index_in_list(line, 22),
batch_header_column_023=get_value_if_index_in_list(line, 23),
batch_header_column_024=get_value_if_index_in_list(line, 24),
batch_header_column_025=get_value_if_index_in_list(line, 25),
batch_header_column_026=get_value_if_index_in_list(line, 26),
batch_header_column_027=get_value_if_index_in_list(line, 27),
batch_header_column_028=get_value_if_index_in_list(line, 28),
batch_header_column_029=get_value_if_index_in_list(line, 29),
batch_header_column_030=get_value_if_index_in_list(line, 30),
batch_header_column_031=get_value_if_index_in_list(line, 31),
batch_header_column_032=get_value_if_index_in_list(line, 32),
batch_header_column_033=get_value_if_index_in_list(line, 33),
batch_header_column_034=get_value_if_index_in_list(line, 34),
batch_header_column_035=get_value_if_index_in_list(line, 35),
batch_header_column_036=get_value_if_index_in_list(line, 36),
batch_header_column_037=get_value_if_index_in_list(line, 37),
batch_header_column_038=get_value_if_index_in_list(line, 38),
batch_header_column_039=get_value_if_index_in_list(line, 39),
batch_header_column_040=get_value_if_index_in_list(line, 40),
batch_header_column_041=get_value_if_index_in_list(line, 41),
batch_header_column_042=get_value_if_index_in_list(line, 42),
batch_header_column_043=get_value_if_index_in_list(line, 43),
batch_header_column_044=get_value_if_index_in_list(line, 44),
batch_header_column_045=get_value_if_index_in_list(line, 45),
batch_header_column_046=get_value_if_index_in_list(line, 46),
batch_header_column_047=get_value_if_index_in_list(line, 47),
batch_header_column_048=get_value_if_index_in_list(line, 48),
batch_header_column_049=get_value_if_index_in_list(line, 49),
batch_header_column_050=get_value_if_index_in_list(line, 50),
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
# For each line, check for translation suggestions
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000=get_header_map_value_if_index_in_list(line, 0, kind_of_batch),
batch_header_map_001=get_header_map_value_if_index_in_list(line, 1, kind_of_batch),
batch_header_map_002=get_header_map_value_if_index_in_list(line, 2, kind_of_batch),
batch_header_map_003=get_header_map_value_if_index_in_list(line, 3, kind_of_batch),
batch_header_map_004=get_header_map_value_if_index_in_list(line, 4, kind_of_batch),
batch_header_map_005=get_header_map_value_if_index_in_list(line, 5, kind_of_batch),
batch_header_map_006=get_header_map_value_if_index_in_list(line, 6, kind_of_batch),
batch_header_map_007=get_header_map_value_if_index_in_list(line, 7, kind_of_batch),
batch_header_map_008=get_header_map_value_if_index_in_list(line, 8, kind_of_batch),
batch_header_map_009=get_header_map_value_if_index_in_list(line, 9, kind_of_batch),
batch_header_map_010=get_header_map_value_if_index_in_list(line, 10, kind_of_batch),
batch_header_map_011=get_header_map_value_if_index_in_list(line, 11, kind_of_batch),
batch_header_map_012=get_header_map_value_if_index_in_list(line, 12, kind_of_batch),
batch_header_map_013=get_header_map_value_if_index_in_list(line, 13, kind_of_batch),
batch_header_map_014=get_header_map_value_if_index_in_list(line, 14, kind_of_batch),
batch_header_map_015=get_header_map_value_if_index_in_list(line, 15, kind_of_batch),
batch_header_map_016=get_header_map_value_if_index_in_list(line, 16, kind_of_batch),
batch_header_map_017=get_header_map_value_if_index_in_list(line, 17, kind_of_batch),
batch_header_map_018=get_header_map_value_if_index_in_list(line, 18, kind_of_batch),
batch_header_map_019=get_header_map_value_if_index_in_list(line, 19, kind_of_batch),
batch_header_map_020=get_header_map_value_if_index_in_list(line, 20, kind_of_batch),
batch_header_map_021=get_header_map_value_if_index_in_list(line, 21, kind_of_batch),
batch_header_map_022=get_header_map_value_if_index_in_list(line, 22, kind_of_batch),
batch_header_map_023=get_header_map_value_if_index_in_list(line, 23, kind_of_batch),
batch_header_map_024=get_header_map_value_if_index_in_list(line, 24, kind_of_batch),
batch_header_map_025=get_header_map_value_if_index_in_list(line, 25, kind_of_batch),
batch_header_map_026=get_header_map_value_if_index_in_list(line, 26, kind_of_batch),
batch_header_map_027=get_header_map_value_if_index_in_list(line, 27, kind_of_batch),
batch_header_map_028=get_header_map_value_if_index_in_list(line, 28, kind_of_batch),
batch_header_map_029=get_header_map_value_if_index_in_list(line, 29, kind_of_batch),
batch_header_map_030=get_header_map_value_if_index_in_list(line, 30, kind_of_batch),
batch_header_map_031=get_header_map_value_if_index_in_list(line, 31, kind_of_batch),
batch_header_map_032=get_header_map_value_if_index_in_list(line, 32, kind_of_batch),
batch_header_map_033=get_header_map_value_if_index_in_list(line, 33, kind_of_batch),
batch_header_map_034=get_header_map_value_if_index_in_list(line, 34, kind_of_batch),
batch_header_map_035=get_header_map_value_if_index_in_list(line, 35, kind_of_batch),
batch_header_map_036=get_header_map_value_if_index_in_list(line, 36, kind_of_batch),
batch_header_map_037=get_header_map_value_if_index_in_list(line, 37, kind_of_batch),
batch_header_map_038=get_header_map_value_if_index_in_list(line, 38, kind_of_batch),
batch_header_map_039=get_header_map_value_if_index_in_list(line, 39, kind_of_batch),
batch_header_map_040=get_header_map_value_if_index_in_list(line, 40, kind_of_batch),
batch_header_map_041=get_header_map_value_if_index_in_list(line, 41, kind_of_batch),
batch_header_map_042=get_header_map_value_if_index_in_list(line, 42, kind_of_batch),
batch_header_map_043=get_header_map_value_if_index_in_list(line, 43, kind_of_batch),
batch_header_map_044=get_header_map_value_if_index_in_list(line, 44, kind_of_batch),
batch_header_map_045=get_header_map_value_if_index_in_list(line, 45, kind_of_batch),
batch_header_map_046=get_header_map_value_if_index_in_list(line, 46, kind_of_batch),
batch_header_map_047=get_header_map_value_if_index_in_list(line, 47, kind_of_batch),
batch_header_map_048=get_header_map_value_if_index_in_list(line, 48, kind_of_batch),
batch_header_map_049=get_header_map_value_if_index_in_list(line, 49, kind_of_batch),
batch_header_map_050=get_header_map_value_if_index_in_list(line, 50, kind_of_batch),
)
batch_header_map_id = batch_header_map.id
status += "BATCH_HEADER_MAP_SAVED "
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
if positive_value_exists(file_name):
batch_name = str(batch_header_id) + ": " + file_name
if not positive_value_exists(batch_name):
batch_name = str(batch_header_id) + ": " + kind_of_batch
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
google_civic_election_id=google_civic_election_id,
kind_of_batch=kind_of_batch,
organization_we_vote_id=organization_we_vote_id,
polling_location_we_vote_id=polling_location_we_vote_id,
# source_uri=batch_uri,
)
status += "BATCH_DESCRIPTION_SAVED "
success = True
except Exception as e:
# Stop trying to save rows -- break out of the for loop
batch_header_id = 0
status += "EXCEPTION_BATCH_HEADER "
handle_exception(e, logger=logger, exception_message=status)
break
else:
# if number_of_batch_rows >= limit_for_testing:
# break
if positive_value_exists(batch_header_id):
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=get_value_if_index_in_list(line, 0),
batch_row_001=get_value_if_index_in_list(line, 1),
batch_row_002=get_value_if_index_in_list(line, 2),
batch_row_003=get_value_if_index_in_list(line, 3),
batch_row_004=get_value_if_index_in_list(line, 4),
batch_row_005=get_value_if_index_in_list(line, 5),
batch_row_006=get_value_if_index_in_list(line, 6),
batch_row_007=get_value_if_index_in_list(line, 7),
batch_row_008=get_value_if_index_in_list(line, 8),
batch_row_009=get_value_if_index_in_list(line, 9),
batch_row_010=get_value_if_index_in_list(line, 10),
batch_row_011=get_value_if_index_in_list(line, 11),
batch_row_012=get_value_if_index_in_list(line, 12),
batch_row_013=get_value_if_index_in_list(line, 13),
batch_row_014=get_value_if_index_in_list(line, 14),
batch_row_015=get_value_if_index_in_list(line, 15),
batch_row_016=get_value_if_index_in_list(line, 16),
batch_row_017=get_value_if_index_in_list(line, 17),
batch_row_018=get_value_if_index_in_list(line, 18),
batch_row_019=get_value_if_index_in_list(line, 19),
batch_row_020=get_value_if_index_in_list(line, 20),
batch_row_021=get_value_if_index_in_list(line, 21),
batch_row_022=get_value_if_index_in_list(line, 22),
batch_row_023=get_value_if_index_in_list(line, 23),
batch_row_024=get_value_if_index_in_list(line, 24),
batch_row_025=get_value_if_index_in_list(line, 25),
batch_row_026=get_value_if_index_in_list(line, 26),
batch_row_027=get_value_if_index_in_list(line, 27),
batch_row_028=get_value_if_index_in_list(line, 28),
batch_row_029=get_value_if_index_in_list(line, 29),
batch_row_030=get_value_if_index_in_list(line, 30),
batch_row_031=get_value_if_index_in_list(line, 31),
batch_row_032=get_value_if_index_in_list(line, 32),
batch_row_033=get_value_if_index_in_list(line, 33),
batch_row_034=get_value_if_index_in_list(line, 34),
batch_row_035=get_value_if_index_in_list(line, 35),
batch_row_036=get_value_if_index_in_list(line, 36),
batch_row_037=get_value_if_index_in_list(line, 37),
batch_row_038=get_value_if_index_in_list(line, 38),
batch_row_039=get_value_if_index_in_list(line, 39),
batch_row_040=get_value_if_index_in_list(line, 40),
batch_row_041=get_value_if_index_in_list(line, 41),
batch_row_042=get_value_if_index_in_list(line, 42),
batch_row_043=get_value_if_index_in_list(line, 43),
batch_row_044=get_value_if_index_in_list(line, 44),
batch_row_045=get_value_if_index_in_list(line, 45),
batch_row_046=get_value_if_index_in_list(line, 46),
batch_row_047=get_value_if_index_in_list(line, 47),
batch_row_048=get_value_if_index_in_list(line, 48),
batch_row_049=get_value_if_index_in_list(line, 49),
batch_row_050=get_value_if_index_in_list(line, 50),
)
number_of_batch_rows += 1
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += "EXCEPTION_BATCH_ROW "
break
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_batch_rows': number_of_batch_rows,
}
return results
def create_batch_from_json(self, file_name, structured_json_list, mapping_dict, kind_of_batch,
google_civic_election_id=0, organization_we_vote_id="", polling_location_we_vote_id="",
batch_set_id=0, state_code=""):
success = False
status = ""
number_of_batch_rows = 0
# limit_for_testing = 5
batch_header_id = 0
batch_header_map_id = 0
batch_name = ""
if not len(structured_json_list):
# If there aren't any values, don't create a batch
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_batch_rows': number_of_batch_rows,
}
return results
# We want an array with integers 0 - n as the keys, and the field names as the values
we_vote_keys = list(mapping_dict.keys())
remote_source_keys = list(mapping_dict.values())
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000=get_value_if_index_in_list(remote_source_keys, 0),
batch_header_column_001=get_value_if_index_in_list(remote_source_keys, 1),
batch_header_column_002=get_value_if_index_in_list(remote_source_keys, 2),
batch_header_column_003=get_value_if_index_in_list(remote_source_keys, 3),
batch_header_column_004=get_value_if_index_in_list(remote_source_keys, 4),
batch_header_column_005=get_value_if_index_in_list(remote_source_keys, 5),
batch_header_column_006=get_value_if_index_in_list(remote_source_keys, 6),
batch_header_column_007=get_value_if_index_in_list(remote_source_keys, 7),
batch_header_column_008=get_value_if_index_in_list(remote_source_keys, 8),
batch_header_column_009=get_value_if_index_in_list(remote_source_keys, 9),
batch_header_column_010=get_value_if_index_in_list(remote_source_keys, 10),
batch_header_column_011=get_value_if_index_in_list(remote_source_keys, 11),
batch_header_column_012=get_value_if_index_in_list(remote_source_keys, 12),
batch_header_column_013=get_value_if_index_in_list(remote_source_keys, 13),
batch_header_column_014=get_value_if_index_in_list(remote_source_keys, 14),
batch_header_column_015=get_value_if_index_in_list(remote_source_keys, 15),
batch_header_column_016=get_value_if_index_in_list(remote_source_keys, 16),
batch_header_column_017=get_value_if_index_in_list(remote_source_keys, 17),
batch_header_column_018=get_value_if_index_in_list(remote_source_keys, 18),
batch_header_column_019=get_value_if_index_in_list(remote_source_keys, 19),
batch_header_column_020=get_value_if_index_in_list(remote_source_keys, 20),
batch_header_column_021=get_value_if_index_in_list(remote_source_keys, 21),
batch_header_column_022=get_value_if_index_in_list(remote_source_keys, 22),
batch_header_column_023=get_value_if_index_in_list(remote_source_keys, 23),
batch_header_column_024=get_value_if_index_in_list(remote_source_keys, 24),
batch_header_column_025=get_value_if_index_in_list(remote_source_keys, 25),
batch_header_column_026=get_value_if_index_in_list(remote_source_keys, 26),
batch_header_column_027=get_value_if_index_in_list(remote_source_keys, 27),
batch_header_column_028=get_value_if_index_in_list(remote_source_keys, 28),
batch_header_column_029=get_value_if_index_in_list(remote_source_keys, 29),
batch_header_column_030=get_value_if_index_in_list(remote_source_keys, 30),
batch_header_column_031=get_value_if_index_in_list(remote_source_keys, 31),
batch_header_column_032=get_value_if_index_in_list(remote_source_keys, 32),
batch_header_column_033=get_value_if_index_in_list(remote_source_keys, 33),
batch_header_column_034=get_value_if_index_in_list(remote_source_keys, 34),
batch_header_column_035=get_value_if_index_in_list(remote_source_keys, 35),
batch_header_column_036=get_value_if_index_in_list(remote_source_keys, 36),
batch_header_column_037=get_value_if_index_in_list(remote_source_keys, 37),
batch_header_column_038=get_value_if_index_in_list(remote_source_keys, 38),
batch_header_column_039=get_value_if_index_in_list(remote_source_keys, 39),
batch_header_column_040=get_value_if_index_in_list(remote_source_keys, 40),
batch_header_column_041=get_value_if_index_in_list(remote_source_keys, 41),
batch_header_column_042=get_value_if_index_in_list(remote_source_keys, 42),
batch_header_column_043=get_value_if_index_in_list(remote_source_keys, 43),
batch_header_column_044=get_value_if_index_in_list(remote_source_keys, 44),
batch_header_column_045=get_value_if_index_in_list(remote_source_keys, 45),
batch_header_column_046=get_value_if_index_in_list(remote_source_keys, 46),
batch_header_column_047=get_value_if_index_in_list(remote_source_keys, 47),
batch_header_column_048=get_value_if_index_in_list(remote_source_keys, 48),
batch_header_column_049=get_value_if_index_in_list(remote_source_keys, 49),
batch_header_column_050=get_value_if_index_in_list(remote_source_keys, 50),
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
# For each line, check for translation suggestions
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000=get_value_if_index_in_list(we_vote_keys, 0),
batch_header_map_001=get_value_if_index_in_list(we_vote_keys, 1),
batch_header_map_002=get_value_if_index_in_list(we_vote_keys, 2),
batch_header_map_003=get_value_if_index_in_list(we_vote_keys, 3),
batch_header_map_004=get_value_if_index_in_list(we_vote_keys, 4),
batch_header_map_005=get_value_if_index_in_list(we_vote_keys, 5),
batch_header_map_006=get_value_if_index_in_list(we_vote_keys, 6),
batch_header_map_007=get_value_if_index_in_list(we_vote_keys, 7),
batch_header_map_008=get_value_if_index_in_list(we_vote_keys, 8),
batch_header_map_009=get_value_if_index_in_list(we_vote_keys, 9),
batch_header_map_010=get_value_if_index_in_list(we_vote_keys, 10),
batch_header_map_011=get_value_if_index_in_list(we_vote_keys, 11),
batch_header_map_012=get_value_if_index_in_list(we_vote_keys, 12),
batch_header_map_013=get_value_if_index_in_list(we_vote_keys, 13),
batch_header_map_014=get_value_if_index_in_list(we_vote_keys, 14),
batch_header_map_015=get_value_if_index_in_list(we_vote_keys, 15),
batch_header_map_016=get_value_if_index_in_list(we_vote_keys, 16),
batch_header_map_017=get_value_if_index_in_list(we_vote_keys, 17),
batch_header_map_018=get_value_if_index_in_list(we_vote_keys, 18),
batch_header_map_019=get_value_if_index_in_list(we_vote_keys, 19),
batch_header_map_020=get_value_if_index_in_list(we_vote_keys, 20),
batch_header_map_021=get_value_if_index_in_list(we_vote_keys, 21),
batch_header_map_022=get_value_if_index_in_list(we_vote_keys, 22),
batch_header_map_023=get_value_if_index_in_list(we_vote_keys, 23),
batch_header_map_024=get_value_if_index_in_list(we_vote_keys, 24),
batch_header_map_025=get_value_if_index_in_list(we_vote_keys, 25),
batch_header_map_026=get_value_if_index_in_list(we_vote_keys, 26),
batch_header_map_027=get_value_if_index_in_list(we_vote_keys, 27),
batch_header_map_028=get_value_if_index_in_list(we_vote_keys, 28),
batch_header_map_029=get_value_if_index_in_list(we_vote_keys, 29),
batch_header_map_030=get_value_if_index_in_list(we_vote_keys, 30),
batch_header_map_031=get_value_if_index_in_list(we_vote_keys, 31),
batch_header_map_032=get_value_if_index_in_list(we_vote_keys, 32),
batch_header_map_033=get_value_if_index_in_list(we_vote_keys, 33),
batch_header_map_034=get_value_if_index_in_list(we_vote_keys, 34),
batch_header_map_035=get_value_if_index_in_list(we_vote_keys, 35),
batch_header_map_036=get_value_if_index_in_list(we_vote_keys, 36),
batch_header_map_037=get_value_if_index_in_list(we_vote_keys, 37),
batch_header_map_038=get_value_if_index_in_list(we_vote_keys, 38),
batch_header_map_039=get_value_if_index_in_list(we_vote_keys, 39),
batch_header_map_040=get_value_if_index_in_list(we_vote_keys, 40),
batch_header_map_041=get_value_if_index_in_list(we_vote_keys, 41),
batch_header_map_042=get_value_if_index_in_list(we_vote_keys, 42),
batch_header_map_043=get_value_if_index_in_list(we_vote_keys, 43),
batch_header_map_044=get_value_if_index_in_list(we_vote_keys, 44),
batch_header_map_045=get_value_if_index_in_list(we_vote_keys, 45),
batch_header_map_046=get_value_if_index_in_list(we_vote_keys, 46),
batch_header_map_047=get_value_if_index_in_list(we_vote_keys, 47),
batch_header_map_048=get_value_if_index_in_list(we_vote_keys, 48),
batch_header_map_049=get_value_if_index_in_list(we_vote_keys, 49),
batch_header_map_050=get_value_if_index_in_list(we_vote_keys, 50),
)
batch_header_map_id = batch_header_map.id
status += "BATCH_HEADER_MAP_SAVED_FOR_JSON "
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
if positive_value_exists(file_name):
batch_name = str(batch_header_id) + ": " + file_name
if not positive_value_exists(batch_name):
batch_name = str(batch_header_id) + ": " + kind_of_batch
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
batch_set_id=batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_batch=kind_of_batch,
organization_we_vote_id=organization_we_vote_id,
polling_location_we_vote_id=polling_location_we_vote_id,
# source_uri=batch_uri,
)
status += "BATCH_DESCRIPTION_SAVED_FOR_JSON "
success = True
except Exception as e:
# Stop trying to save rows -- break out of the for loop
batch_header_id = 0
status += "EXCEPTION_BATCH_HEADER_FOR_JSON "
handle_exception(e, logger=logger, exception_message=status)
if positive_value_exists(batch_header_id):
for one_dict in structured_json_list:
# if number_of_batch_rows >= limit_for_testing:
# break
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 0)),
batch_row_001=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 1)),
batch_row_002=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 2)),
batch_row_003=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 3)),
batch_row_004=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 4)),
batch_row_005=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 5)),
batch_row_006=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 6)),
batch_row_007=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 7)),
batch_row_008=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 8)),
batch_row_009=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 9)),
batch_row_010=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 10)),
batch_row_011=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 11)),
batch_row_012=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 12)),
batch_row_013=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 13)),
batch_row_014=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 14)),
batch_row_015=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 15)),
batch_row_016=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 16)),
batch_row_017=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 17)),
batch_row_018=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 18)),
batch_row_019=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 19)),
batch_row_020=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 20)),
batch_row_021=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 21)),
batch_row_022=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 22)),
batch_row_023=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 23)),
batch_row_024=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 24)),
batch_row_025=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 25)),
batch_row_026=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 26)),
batch_row_027=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 27)),
batch_row_028=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 28)),
batch_row_029=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 29)),
batch_row_030=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 30)),
batch_row_031=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 31)),
batch_row_032=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 32)),
batch_row_033=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 33)),
batch_row_034=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 34)),
batch_row_035=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 35)),
batch_row_036=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 36)),
batch_row_037=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 37)),
batch_row_038=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 38)),
batch_row_039=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 39)),
batch_row_040=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 40)),
batch_row_041=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 41)),
batch_row_042=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 42)),
batch_row_043=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 43)),
batch_row_044=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 44)),
batch_row_045=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 45)),
batch_row_046=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 46)),
batch_row_047=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 47)),
batch_row_048=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 48)),
batch_row_049=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 49)),
batch_row_050=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 50)),
)
number_of_batch_rows += 1
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += "EXCEPTION_BATCH_ROW_FOR_JSON "
break
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_batch_rows': number_of_batch_rows,
}
return results
# I don't believe this is currently in use. There is also a function of this same name in controllers.py
def create_batch_header_translation_suggestion(
self, kind_of_batch, header_value_recognized_by_we_vote, incoming_alternate_header_value):
"""
:param kind_of_batch:
:param header_value_recognized_by_we_vote:
:param incoming_alternate_header_value:
:return:
"""
success = False
status = ""
suggestion_created = False
suggestion_updated = False
header_value_recognized_by_we_vote = header_value_recognized_by_we_vote.lower()
incoming_alternate_header_value = incoming_alternate_header_value.lower()
if not positive_value_exists(kind_of_batch) or not positive_value_exists(header_value_recognized_by_we_vote) \
or not positive_value_exists(incoming_alternate_header_value):
status += "CREATE_BATCH_HEADER_TRANSLATION_SUGGESTION-MISSING_REQUIRED_VARIABLE "
results = {
'success': success,
'status': status,
'suggestion_created': suggestion_created,
'suggestion_updated': suggestion_updated,
}
return results
if kind_of_batch == CANDIDATE:
batch_import_keys_accepted = BATCH_IMPORT_KEYS_ACCEPTED_FOR_CANDIDATES
elif kind_of_batch == CONTEST_OFFICE:
batch_import_keys_accepted = BATCH_IMPORT_KEYS_ACCEPTED_FOR_CONTEST_OFFICES
elif kind_of_batch == ELECTED_OFFICE:
batch_import_keys_accepted = BATCH_IMPORT_KEYS_ACCEPTED_FOR_ELECTED_OFFICES
elif kind_of_batch == MEASURE:
batch_import_keys_accepted = BATCH_IMPORT_KEYS_ACCEPTED_FOR_MEASURES
elif kind_of_batch == ORGANIZATION_WORD:
batch_import_keys_accepted = BATCH_IMPORT_KEYS_ACCEPTED_FOR_ORGANIZATIONS
elif kind_of_batch == POLITICIAN:
batch_import_keys_accepted = BATCH_IMPORT_KEYS_ACCEPTED_FOR_POLITICIANS
elif kind_of_batch == POSITION:
batch_import_keys_accepted = BATCH_IMPORT_KEYS_ACCEPTED_FOR_POSITIONS
elif kind_of_batch == IMPORT_BALLOT_ITEM:
batch_import_keys_accepted = BATCH_IMPORT_KEYS_ACCEPTED_FOR_BALLOT_ITEMS
elif kind_of_batch == IMPORT_VOTER:
batch_import_keys_accepted = BATCH_IMPORT_KEYS_ACCEPTED_FOR_VOTERS
else:
batch_import_keys_accepted = {}
if incoming_alternate_header_value in batch_import_keys_accepted:
success = True
status += "SUGGESTION_IS_BATCH_IMPORT_KEY "
results = {
'success': success,
'status': status,
'suggestion_created': suggestion_created,
'suggestion_updated': suggestion_updated,
}
return results
try:
batch_header_translation_suggestion, suggestion_created = \
BatchHeaderTranslationSuggestion.objects.update_or_create(
kind_of_batch=kind_of_batch,
header_value_recognized_by_we_vote=header_value_recognized_by_we_vote,
incoming_alternate_header_value=incoming_alternate_header_value)
success = True
status += "BATCH_HEADER_TRANSLATION_SUGGESTION_SAVED "
except Exception as e:
success = False
status += "BATCH_HEADER_TRANSLATION_SUGGESTION_SAVE_FAILED "
results = {
'success': success,
'status': status,
'suggestion_created': suggestion_created,
'suggestion_updated': suggestion_updated,
}
return results
def fetch_batch_row_count(self, batch_header_id):
"""
:param batch_header_id:
:return:
"""
try:
batch_row_query = BatchRow.objects.filter(batch_header_id=batch_header_id)
batch_row_count = batch_row_query.count()
except BatchRow.DoesNotExist:
batch_row_count = 0
except Exception as e:
batch_row_count = 0
return batch_row_count
def fetch_batch_row_action_count(self, batch_header_id, kind_of_batch, kind_of_action=''):
"""
:param batch_header_id:
:param kind_of_batch:
:param kind_of_action:
:return:
"""
batch_row_action_count = 0
try:
if kind_of_batch == CANDIDATE:
batch_row_action_query = BatchRowActionCandidate.objects.filter(batch_header_id=batch_header_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == CONTEST_OFFICE:
batch_row_action_query = BatchRowActionContestOffice.objects.filter(batch_header_id=batch_header_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == ELECTED_OFFICE:
batch_row_action_query = BatchRowActionElectedOffice.objects.filter(batch_header_id=batch_header_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == IMPORT_BALLOT_ITEM:
batch_row_action_query = BatchRowActionBallotItem.objects.filter(batch_header_id=batch_header_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == MEASURE:
batch_row_action_query = BatchRowActionMeasure.objects.filter(batch_header_id=batch_header_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == ORGANIZATION_WORD:
batch_row_action_query = BatchRowActionOrganization.objects.filter(batch_header_id=batch_header_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == POLITICIAN:
batch_row_action_query = BatchRowActionPolitician.objects.filter(batch_header_id=batch_header_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == POSITION:
batch_row_action_query = BatchRowActionPosition.objects.filter(batch_header_id=batch_header_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
except Exception as e:
batch_row_action_count = 0
return batch_row_action_count
def fetch_batch_row_action_count_in_batch_set(self, batch_set_id, kind_of_batch, kind_of_action=''):
"""
:param batch_set_id:
:param kind_of_batch:
:param kind_of_action:
:return:
"""
batch_row_action_count = 0
try:
if kind_of_batch == CANDIDATE:
batch_row_action_query = BatchRowActionCandidate.objects.filter(batch_set_id=batch_set_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == CONTEST_OFFICE:
batch_row_action_query = BatchRowActionContestOffice.objects.filter(batch_set_id=batch_set_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == ELECTED_OFFICE:
batch_row_action_query = BatchRowActionElectedOffice.objects.filter(batch_set_id=batch_set_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == IMPORT_BALLOT_ITEM:
batch_row_action_query = BatchRowActionBallotItem.objects.filter(batch_set_id=batch_set_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == MEASURE:
batch_row_action_query = BatchRowActionMeasure.objects.filter(batch_set_id=batch_set_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == ORGANIZATION_WORD:
batch_row_action_query = BatchRowActionOrganization.objects.filter(batch_set_id=batch_set_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == POLITICIAN:
batch_row_action_query = BatchRowActionPolitician.objects.filter(batch_set_id=batch_set_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == POSITION:
batch_row_action_query = BatchRowActionPosition.objects.filter(batch_set_id=batch_set_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
except Exception as e:
batch_row_action_count = 0
return batch_row_action_count
def retrieve_unprocessed_batch_set_info_by_election_and_set_source(
self, google_civic_election_id, batch_set_source):
batch_set_query = BatchSet.objects.all()
batch_set_query = batch_set_query.filter(google_civic_election_id=google_civic_election_id)
batch_set_query = batch_set_query.filter(batch_set_source__iexact=batch_set_source)
batch_set_query = batch_set_query.order_by('-id')
batch_set_list = list(batch_set_query)
batch_of_ballot_items_not_processed = 0
batch_set_id = 0
total_ballot_locations_count = 0
if positive_value_exists(len(batch_set_list)):
one_batch_set = batch_set_list[0]
batch_set_id = one_batch_set.id
batch_description_query = BatchDescription.objects.all()
batch_description_query = batch_description_query.filter(batch_set_id=one_batch_set.id)
total_ballot_locations_count = batch_description_query.count()
batch_description_list = list(batch_description_query)
for one_batch_description in batch_description_list:
# For each Batch Description, see if there are BatchRowActionBallotItem entries
batch_row_action_ballot_item_query = BatchRowActionBallotItem.objects.all()
batch_row_action_ballot_item_query = batch_row_action_ballot_item_query.filter(
batch_header_id=one_batch_description.batch_header_id)
batch_row_action_ballot_item_query = batch_row_action_ballot_item_query.filter(
kind_of_action=IMPORT_ADD_TO_EXISTING)
# If there aren't any "update" entries, count as unprocessed
if not positive_value_exists(batch_row_action_ballot_item_query.count()):
batch_of_ballot_items_not_processed += 1
results = {
'batches_not_processed': batch_of_ballot_items_not_processed,
'batch_set_id': batch_set_id,
}
return results
def retrieve_batch_header_translation_suggestion(self, kind_of_batch, incoming_alternate_header_value):
"""
We are looking at one header value from a file imported by an admin or volunteer. We want to see if
there are any suggestions for headers already recognized by We Vote. Ex/ "Organization" -> "organization_name"
:param kind_of_batch:
:param incoming_alternate_header_value:
:return:
"""
success = False
status = ""
batch_header_translation_suggestion_found = False
if not positive_value_exists(kind_of_batch) or not positive_value_exists(incoming_alternate_header_value):
status += "RETRIEVE_BATCH_HEADER_TRANSLATION_SUGGESTION-MISSING_REQUIRED_VARIABLE "
results = {
'success': success,
'status': status,
'batch_header_translation_suggestion': BatchHeaderTranslationSuggestion(),
'batch_header_translation_suggestion_found': batch_header_translation_suggestion_found,
}
return results
try:
# Note that we don't care about case sensitivity when we search for the alternate value
batch_header_translation_suggestion = BatchHeaderTranslationSuggestion.objects.get(
kind_of_batch=kind_of_batch,
incoming_alternate_header_value__iexact=incoming_alternate_header_value)
batch_header_translation_suggestion_found = True
success = True
status += "BATCH_HEADER_TRANSLATION_SUGGESTION_SAVED "
except Exception as e:
batch_header_translation_suggestion = BatchHeaderTranslationSuggestion()
success = False
status += "BATCH_HEADER_TRANSLATION_SUGGESTION_SAVE_FAILED "
results = {
'success': success,
'status': status,
'batch_header_translation_suggestion': batch_header_translation_suggestion,
'batch_header_translation_suggestion_found': batch_header_translation_suggestion_found,
}
return results
def create_batch_row_translation_map( # TODO This hasn't been built
self, kind_of_batch, header_value_recognized_by_we_vote, incoming_alternate_header_value):
success = False
status = ""
if not positive_value_exists(kind_of_batch) or not positive_value_exists(header_value_recognized_by_we_vote) \
or not positive_value_exists(incoming_alternate_header_value):
status += "CREATE_BATCH_HEADER_TRANSLATION_SUGGESTION-MISSING_REQUIRED_VARIABLE "
results = {
'success': success,
'status': status,
}
return results
try:
header_value_recognized_by_we_vote = header_value_recognized_by_we_vote.lower()
incoming_alternate_header_value = incoming_alternate_header_value.lower()
batch_header_translation_suggestion, created = BatchHeaderTranslationSuggestion.objects.update_or_create(
kind_of_batch=kind_of_batch,
header_value_recognized_by_we_vote=header_value_recognized_by_we_vote,
incoming_alternate_header_value=incoming_alternate_header_value)
success = True
status += "BATCH_HEADER_TRANSLATION_SUGGESTION_SAVED "
except Exception as e:
success = False
status += "BATCH_HEADER_TRANSLATION_SUGGESTION_SAVE_FAILED "
results = {
'success': success,
'status': status,
}
return results
def retrieve_batch_row_translation_map(self, kind_of_batch, incoming_alternate_header_value):
# TODO This hasn't been built yet
success = False
status = ""
batch_header_translation_suggestion_found = False
if not positive_value_exists(kind_of_batch) or not positive_value_exists(incoming_alternate_header_value):
status += "RETRIEVE_BATCH_HEADER_TRANSLATION_SUGGESTION-MISSING_REQUIRED_VARIABLE "
results = {
'success': success,
'status': status,
'batch_header_translation_suggestion': BatchHeaderTranslationSuggestion(),
'batch_header_translation_suggestion_found': batch_header_translation_suggestion_found,
}
return results
try:
# Note that we don't care about case sensitivity when we search for the alternate value
batch_header_translation_suggestion = BatchHeaderTranslationSuggestion.objects.get(
kind_of_batch=kind_of_batch,
incoming_alternate_header_value__iexact=incoming_alternate_header_value)
batch_header_translation_suggestion_found = True
success = True
status += "BATCH_HEADER_TRANSLATION_SUGGESTION_SAVED "
except Exception as e:
batch_header_translation_suggestion = BatchHeaderTranslationSuggestion()
success = False
status += "BATCH_HEADER_TRANSLATION_SUGGESTION_SAVE_FAILED "
results = {
'success': success,
'status': status,
'batch_header_translation_suggestion': batch_header_translation_suggestion,
'batch_header_translation_suggestion_found': batch_header_translation_suggestion_found,
}
return results
def retrieve_batch_row_action_organization(self, batch_header_id, batch_row_id):
try:
batch_row_action_organization = BatchRowActionOrganization.objects.get(batch_header_id=batch_header_id,
batch_row_id=batch_row_id)
batch_row_action_found = True
success = True
status = "BATCH_ROW_ACTION_ORGANIZATION_RETRIEVED"
except BatchRowActionOrganization.DoesNotExist:
batch_row_action_organization = BatchRowActionOrganization()
batch_row_action_found = False
success = True
status = "BATCH_ROW_ACTION_ORGANIZATION_NOT_FOUND"
except Exception as e:
batch_row_action_organization = BatchRowActionOrganization()
batch_row_action_found = False
success = False
status = "BATCH_ROW_ACTION_ORGANIZATION_RETRIEVE_ERROR"
results = {
'success': success,
'status': status,
'batch_row_action_found': batch_row_action_found,
'batch_row_action_organization': batch_row_action_organization,
}
return results
def retrieve_batch_row_action_measure(self, batch_header_id, batch_row_id):
try:
batch_row_action_measure = BatchRowActionMeasure.objects.get(batch_header_id=batch_header_id,
batch_row_id=batch_row_id)
batch_row_action_found = True
success = True
status = "BATCH_ROW_ACTION_MEASURE_RETRIEVED "
except BatchRowActionMeasure.DoesNotExist:
batch_row_action_measure = BatchRowActionMeasure()
batch_row_action_found = False
success = True
status = "BATCH_ROW_ACTION_MEASURE_NOT_FOUND "
except Exception as e:
batch_row_action_measure = BatchRowActionMeasure()
batch_row_action_found = False
success = False
status = "BATCH_ROW_ACTION_MEASURE_RETRIEVE_ERROR "
results = {
'success': success,
'status': status,
'batch_row_action_found': batch_row_action_found,
'batch_row_action_measure': batch_row_action_measure,
}
return results
def retrieve_batch_row_action_elected_office(self, batch_header_id, batch_row_id):
"""
Retrieves data from BatchRowActionElectedOffice table
:param batch_header_id:
:param batch_row_id:
:return:
"""
try:
batch_row_action_elected_office = BatchRowActionElectedOffice.objects.get(batch_header_id=batch_header_id,
batch_row_id=batch_row_id)
batch_row_action_found = True
success = True
status = "BATCH_ROW_ACTION_ELECTED_OFFICE_RETRIEVED"
except BatchRowActionElectedOffice.DoesNotExist:
batch_row_action_elected_office = BatchRowActionElectedOffice()
batch_row_action_found = False
success = True
status = "BATCH_ROW_ACTION_ELECTED_OFFICE_NOT_FOUND"
except Exception as e:
batch_row_action_elected_office = BatchRowActionElectedOffice()
batch_row_action_found = False
success = False
status = "BATCH_ROW_ACTION_ELECTED_OFFICE_RETRIEVE_ERROR"
results = {
'success': success,
'status': status,
'batch_row_action_found': batch_row_action_found,
'batch_row_action_elected_office': batch_row_action_elected_office,
}
return results
def retrieve_batch_row_action_contest_office(self, batch_header_id, batch_row_id):
"""
Retrieves data from BatchRowActionContestOffice table
:param batch_header_id:
:param batch_row_id:
:return:
"""
try:
batch_row_action_contest_office = BatchRowActionContestOffice.objects.get(batch_header_id=batch_header_id,
batch_row_id=batch_row_id)
batch_row_action_found = True
success = True
status = "BATCH_ROW_ACTION_CONTEST_OFFICE_RETRIEVED"
except BatchRowActionContestOffice.DoesNotExist:
batch_row_action_contest_office = BatchRowActionContestOffice()
batch_row_action_found = False
success = True
status = "BATCH_ROW_ACTION_CONTEST_OFFICE_NOT_FOUND"
except Exception as e:
batch_row_action_contest_office = BatchRowActionContestOffice()
batch_row_action_found = False
success = False
status = "BATCH_ROW_ACTION_CONTEST_OFFICE_RETRIEVE_ERROR"
results = {
'success': success,
'status': status,
'batch_row_action_found': batch_row_action_found,
'batch_row_action_contest_office': batch_row_action_contest_office,
}
return results
def retrieve_batch_row_action_politician(self, batch_header_id, batch_row_id):
"""
Retrieves data from BatchRowActionPolitician table
:param batch_header_id:
:param batch_row_id:
:return:
"""
try:
batch_row_action_politician = BatchRowActionPolitician.objects.get(batch_header_id=batch_header_id,
batch_row_id=batch_row_id)
batch_row_action_found = True
success = True
status = "BATCH_ROW_ACTION_POLITICIAN_RETRIEVED"
except BatchRowActionPolitician.DoesNotExist:
batch_row_action_politician = BatchRowActionPolitician()
batch_row_action_found = False
success = True
status = "BATCH_ROW_ACTION_POLITICIAN_NOT_FOUND"
except Exception as e:
batch_row_action_politician = BatchRowActionPolitician()
batch_row_action_found = False
success = False
status = "BATCH_ROW_ACTION_POLITICIAN_RETRIEVE_ERROR"
results = {
'success': success,
'status': status,
'batch_row_action_found': batch_row_action_found,
'batch_row_action_politician': batch_row_action_politician,
}
return results
def retrieve_batch_row_action_position(self, batch_header_id, batch_row_id):
"""
Retrieves data from BatchRowActionPosition table
:param batch_header_id:
:param batch_row_id:
:return:
"""
try:
batch_row_action_position = BatchRowActionPosition.objects.get(batch_header_id=batch_header_id,
batch_row_id=batch_row_id)
batch_row_action_found = True
success = True
status = "BATCH_ROW_ACTION_POSITION_RETRIEVED"
except BatchRowActionPosition.DoesNotExist:
batch_row_action_position = BatchRowActionPosition()
batch_row_action_found = False
success = True
status = "BATCH_ROW_ACTION_POSITION_NOT_FOUND"
except Exception as e:
batch_row_action_position = BatchRowActionPosition()
batch_row_action_found = False
success = False
status = "BATCH_ROW_ACTION_POSITION_RETRIEVE_ERROR"
results = {
'success': success,
'status': status,
'batch_row_action_found': batch_row_action_found,
'batch_row_action_position': batch_row_action_position,
}
return results
def retrieve_batch_row_action_ballot_item(self, batch_header_id, batch_row_id):
"""
Retrieves data from BatchRowActionBallotItem table
:param batch_header_id:
:param batch_row_id:
:return:
"""
try:
batch_row_action_ballot_item = BatchRowActionBallotItem.objects.get(batch_header_id=batch_header_id,
batch_row_id=batch_row_id)
batch_row_action_found = True
success = True
status = "BATCH_ROW_ACTION_BALLOT_ITEM_RETRIEVED "
except BatchRowActionBallotItem.DoesNotExist:
batch_row_action_ballot_item = BatchRowActionBallotItem()
batch_row_action_found = False
success = True
status = "BATCH_ROW_ACTION_BALLOT_ITEM_NOT_FOUND "
except Exception as e:
batch_row_action_ballot_item = BatchRowActionBallotItem()
batch_row_action_found = False
success = False
status = "BATCH_ROW_ACTION_BALLOT_ITEM_RETRIEVE_ERROR "
results = {
'success': success,
'status': status,
'batch_row_action_found': batch_row_action_found,
'batch_row_action_ballot_item': batch_row_action_ballot_item,
}
return results
def retrieve_batch_row_action_candidate(self, batch_header_id, batch_row_id):
"""
Retrieves data from BatchRowActionCandidate table
:param batch_header_id:
:param batch_row_id:
:return:
"""
try:
batch_row_action_candidate = BatchRowActionCandidate.objects.get(batch_header_id=batch_header_id,
batch_row_id=batch_row_id)
batch_row_action_found = True
success = True
status = "BATCH_ROW_ACTION_CANDIDATE_RETRIEVED"
except BatchRowActionCandidate.DoesNotExist:
batch_row_action_candidate = BatchRowActionCandidate()
batch_row_action_found = False
success = True
status = "BATCH_ROW_ACTION_CANDIDATE_NOT_FOUND"
except Exception as e:
batch_row_action_candidate = BatchRowActionCandidate()
batch_row_action_found = False
success = False
status = "BATCH_ROW_ACTION_CANDIDATE_RETRIEVE_ERROR"
results = {
'success': success,
'status': status,
'batch_row_action_found': batch_row_action_found,
'batch_row_action_candidate': batch_row_action_candidate,
}
return results
def retrieve_value_from_batch_row(self, batch_header_name_we_want, batch_header_map, one_batch_row):
index_number = 0
batch_header_name_we_want = batch_header_name_we_want.lower().strip()
number_of_columns = 50
while index_number < number_of_columns:
index_number_string = "00" + str(index_number)
index_number_string = index_number_string[-3:]
batch_header_map_attribute_name = "batch_header_map_" + index_number_string
# If this position in the batch_header_map matches the batch_header_name_we_want, then we know what column
# to look in within one_batch_row for the value
value_from_batch_header_map = getattr(batch_header_map, batch_header_map_attribute_name)
if value_from_batch_header_map is None:
# Break out when we stop getting batch_header_map values
return ""
if batch_header_name_we_want == value_from_batch_header_map.lower().strip():
one_batch_row_attribute_name = "batch_row_" + index_number_string
value_from_batch_row = getattr(one_batch_row, one_batch_row_attribute_name)
if isinstance(value_from_batch_row, str):
return value_from_batch_row.strip()
else:
return value_from_batch_row
index_number += 1
return ""
def retrieve_column_name_from_batch_row(self, batch_header_name_we_want, batch_header_map):
"""
Given column name from batch_header_map, retrieve equivalent column name from batch row
:param batch_header_name_we_want:
:param batch_header_map:
:return:
"""
index_number = 0
batch_header_name_we_want = batch_header_name_we_want.lower().strip()
number_of_columns = 50
while index_number < number_of_columns:
index_number_string = "00" + str(index_number)
index_number_string = index_number_string[-3:]
batch_header_map_attribute_name = "batch_header_map_" + index_number_string
# If this position in the batch_header_map matches the batch_header_name_we_want, then we know what column
# to look in within one_batch_row for the value, eg: batch_header_map_000 --> measure_batch_id
value_from_batch_header_map = getattr(batch_header_map, batch_header_map_attribute_name)
if value_from_batch_header_map is None:
# Break out when we stop getting batch_header_map values
return ""
if batch_header_name_we_want == value_from_batch_header_map.lower().strip():
one_batch_row_attribute_name = "batch_row_" + index_number_string
return one_batch_row_attribute_name
index_number += 1
return ""
def find_file_type(self, batch_uri):
"""
Determines the file type based on file extension. If no known extension, it gets the file type information from
file magic.
:param batch_uri:
:return: filetype - XML, json, csv
"""
# check for file extension
batch_uri = batch_uri.lower()
file_extension = batch_uri.split('.')
if 'xml' in file_extension:
filetype = 'xml'
elif 'json' in file_extension:
filetype = 'json'
elif 'csv' in file_extension:
filetype = 'csv'
else:
# if the filetype is neither xml, json nor csv, get the file type info from magic
file = urllib.request.urlopen(batch_uri)
filetype = magic.from_buffer(file.read())
file.close()
return filetype
def find_possible_matches(self, kind_of_batch, batch_row_name, incoming_batch_row_value,
google_civic_election_id, state_code):
if kind_of_batch == CONTEST_OFFICE:
# TODO DALE
pass
possible_matches = {
'New York City Mayor': 'New York City Mayor'
}
results = {
'possible_matches_found': True,
'possible_matches': possible_matches
}
return results
def create_batch_vip_xml(self, batch_uri, kind_of_batch, google_civic_election_id, organization_we_vote_id):
"""
Retrieves CTCL data from an xml file - Measure, Office, Candidate, Politician
:param batch_uri:
:param kind_of_batch:
:param google_civic_election_id:
:param organization_we_vote_id:
:return:
"""
# Retrieve from XML
request = urllib.request.urlopen(batch_uri)
# xml_data = request.read()
# xml_data = xmltodict.parse(xml_data)
# # xml_data_list_json = list(xml_data)
# structured_json = json.dumps(xml_data)
xml_tree = ElementTree.parse(request)
request.close()
xml_root = xml_tree.getroot()
if xml_root:
if kind_of_batch == MEASURE:
return self.store_measure_xml(batch_uri, google_civic_election_id, organization_we_vote_id, xml_root)
elif kind_of_batch == ELECTED_OFFICE:
return self.store_elected_office_xml(batch_uri, google_civic_election_id, organization_we_vote_id,
xml_root)
elif kind_of_batch == CONTEST_OFFICE:
return self.store_contest_office_xml(batch_uri, google_civic_election_id, organization_we_vote_id,
xml_root)
elif kind_of_batch == CANDIDATE:
return self.store_candidate_xml(batch_uri, google_civic_election_id, organization_we_vote_id, xml_root)
elif kind_of_batch == POLITICIAN:
return self.store_politician_xml(batch_uri, google_civic_election_id, organization_we_vote_id, xml_root)
else:
results = {
'success': False,
'status': '',
'batch_header_id': 0,
'batch_saved': False,
'number_of_batch_rows': 0,
}
return results
def store_measure_xml(self, batch_uri, google_civic_election_id, organization_we_vote_id, xml_root, batch_set_id=0):
"""
Retrieves Measure data from CTCL xml file
:param batch_uri:
:param google_civic_election_id:
:param organization_we_vote_id:
:param xml_root:
:param batch_set_id:
:return:
"""
# Process BallotMeasureContest data
number_of_batch_rows = 0
first_line = True
success = True
status = ''
limit_for_testing = 0
batch_header_id = 0
# Look for BallotMeasureContest and create the batch_header first. BallotMeasureContest is the direct child node
# of VipObject
ballot_measure_xml_node = xml_root.findall('BallotMeasureContest')
# if ballot_measure_xml_node is not None:
for one_ballot_measure in ballot_measure_xml_node:
if positive_value_exists(limit_for_testing) and number_of_batch_rows >= limit_for_testing:
break
# look for relevant child nodes under BallotMeasureContest: id, BallotTitle, BallotSubTitle,
# ElectoralDistrictId, other::ctcl-uid
ballot_measure_id = one_ballot_measure.attrib['id']
ballot_measure_subtitle_node = one_ballot_measure.find('BallotSubTitle/Text')
if ballot_measure_subtitle_node is not None:
ballot_measure_subtitle = ballot_measure_subtitle_node.text
else:
ballot_measure_subtitle = ''
ballot_measure_title_node = one_ballot_measure.find('BallotTitle')
if ballot_measure_title_node is not None:
ballot_measure_title = one_ballot_measure.find('BallotTitle/Text').text
else:
ballot_measure_title = ''
electoral_district_id_node = one_ballot_measure.find('ElectoralDistrictId')
if electoral_district_id_node is not None:
electoral_district_id = electoral_district_id_node.text
else:
electoral_district_id = ''
ctcl_uuid_node = one_ballot_measure.find(
"./ExternalIdentifiers/ExternalIdentifier/[OtherType='ctcl-uuid']")
if ctcl_uuid_node is not None:
ctcl_uuid = one_ballot_measure.find(
"./ExternalIdentifiers/ExternalIdentifier/[OtherType='ctcl-uuid']/Value").text
else:
ctcl_uuid = ''
ballot_measure_name_node = one_ballot_measure.find('Name')
if ballot_measure_name_node is not None:
ballot_measure_name = ballot_measure_name_node.text
else:
ballot_measure_name = ''
if first_line:
first_line = False
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000='id',
batch_header_column_001='BallotSubTitle',
batch_header_column_002='BallotTitle',
batch_header_column_003='ElectoralDistrictId',
batch_header_column_004='other::ctcl-uuid',
batch_header_column_005='Name',
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000='measure_batch_id',
batch_header_map_001='measure_subtitle',
batch_header_map_002='measure_title',
batch_header_map_003='electoral_district_id',
batch_header_map_004='measure_ctcl_uuid',
batch_header_map_005='measure_name'
)
batch_header_map_id = batch_header_map.id
status += " BATCH_HEADER_MAP_SAVED"
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
batch_name = "MEASURE " + " batch_header_id: " + str(batch_header_id)
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
google_civic_election_id=google_civic_election_id,
kind_of_batch='MEASURE',
organization_we_vote_id=organization_we_vote_id,
source_uri=batch_uri,
batch_set_id=batch_set_id,
)
status += " BATCH_DESCRIPTION_SAVED"
success = True
except Exception as e:
# Stop trying to save rows -- break out of the for loop
batch_header_id = 0
status += " EXCEPTION_BATCH_HEADER"
handle_exception(e, logger=logger, exception_message=status)
break
if not positive_value_exists(batch_header_id):
break
# check for measure_id, title OR subtitle or name AND ctcl_uuid
if (positive_value_exists(ballot_measure_id) and positive_value_exists(ctcl_uuid) and
(positive_value_exists(ballot_measure_subtitle) or positive_value_exists(ballot_measure_title) or
positive_value_exists(ballot_measure_name))):
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=ballot_measure_id,
batch_row_001=ballot_measure_subtitle,
batch_row_002=ballot_measure_title,
batch_row_003=electoral_district_id,
batch_row_004=ctcl_uuid,
batch_row_005=ballot_measure_name
)
number_of_batch_rows += 1
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_ROW"
break
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_batch_rows': number_of_batch_rows,
}
return results
def store_elected_office_xml(self, batch_uri, google_civic_election_id, organization_we_vote_id, xml_root,
batch_set_id=0):
"""
Retrieves Office data from CTCL xml file
:param batch_uri:
:param google_civic_election_id:
:param organization_we_vote_id:
:param xml_root:
:param batch_set_id
:return:
"""
# Process VIP Office data
number_of_batch_rows = 0
first_line = True
success = False
status = ''
limit_for_testing = 0
batch_header_id = 0
# Look for Office and create the batch_header first. Office is the direct child node
# of VipObject
elected_office_xml_node = xml_root.findall('Office')
# if ballot_measure_xml_node is not None:
for one_elected_office in elected_office_xml_node:
if positive_value_exists(limit_for_testing) and number_of_batch_rows >= limit_for_testing:
break
# look for relevant child nodes under Office: id, Name, Description, ElectoralDistrictId,
# IsPartisan, other::ctcl-uid
elected_office_id = one_elected_office.attrib['id']
elected_office_name_node = one_elected_office.find("./Name/Text/[@language='"+LANGUAGE_CODE_ENGLISH+"']")
if elected_office_name_node is not None:
elected_office_name = elected_office_name_node.text
else:
elected_office_name = ""
elected_office_name_es_node = one_elected_office.find("./Name/Text/[@language='"+LANGUAGE_CODE_SPANISH+"']")
if elected_office_name_es_node is not None:
elected_office_name_es = elected_office_name_es_node.text
else:
elected_office_name_es = ""
elected_office_description_node = one_elected_office.find(
"Description/Text/[@language='"+LANGUAGE_CODE_ENGLISH+"']")
if elected_office_description_node is not None:
elected_office_description = elected_office_description_node.text
else:
elected_office_description = ""
elected_office_description_es_node = one_elected_office.find(
"Description/Text/[@language='"+LANGUAGE_CODE_SPANISH+"']")
if elected_office_description_es_node is not None:
elected_office_description_es = elected_office_description_es_node.text
else:
elected_office_description_es = ""
electoral_district_id_node = one_elected_office.find('ElectoralDistrictId')
if electoral_district_id_node is not None:
electoral_district_id = electoral_district_id_node.text
else:
electoral_district_id = ""
elected_office_is_partisan_node = one_elected_office.find('IsPartisan')
if elected_office_is_partisan_node is not None:
elected_office_is_partisan = elected_office_is_partisan_node.text
else:
elected_office_is_partisan = ""
ctcl_uuid = ""
ctcl_uuid_node = one_elected_office.find(
"./ExternalIdentifiers/ExternalIdentifier/[OtherType='ctcl-uuid']")
if ctcl_uuid_node is not None:
ctcl_uuid = one_elected_office.find(
"./ExternalIdentifiers/ExternalIdentifier/[OtherType='ctcl-uuid']/Value").text
if first_line:
first_line = False
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000='id',
batch_header_column_001='NameEnglish',
batch_header_column_002='NameSpanish',
batch_header_column_003='DescriptionEnglish',
batch_header_column_004='DescriptionSpanish',
batch_header_column_005='ElectoralDistrictId',
batch_header_column_006='IsPartisan',
batch_header_column_007='other::ctcl-uuid',
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000='elected_office_batch_id',
batch_header_map_001='elected_office_name',
batch_header_map_002='elected_office_name_es',
batch_header_map_003='elected_office_description',
batch_header_map_004='elected_office_description_es',
batch_header_map_005='electoral_district_id',
batch_header_map_006='elected_office_is_partisan',
batch_header_map_007='elected_office_ctcl_uuid',
)
batch_header_map_id = batch_header_map.id
status += " BATCH_HEADER_MAP_SAVED"
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
batch_name = "ELECTED_OFFICE " + " batch_header_id: " + str(batch_header_id)
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
google_civic_election_id=google_civic_election_id,
kind_of_batch='ELECTED_OFFICE',
organization_we_vote_id=organization_we_vote_id,
source_uri=batch_uri,
batch_set_id=batch_set_id,
)
status += " BATCH_DESCRIPTION_SAVED"
success = True
except Exception as e:
# Stop trying to save rows -- break out of the for loop
batch_header_id = 0
status += " EXCEPTION_BATCH_HEADER"
handle_exception(e, logger=logger, exception_message=status)
break
if not positive_value_exists(batch_header_id):
break
# check for office_batch_id or electoral_district or name AND ctcl_uuid
if positive_value_exists(elected_office_id) and positive_value_exists(ctcl_uuid) and \
(positive_value_exists(electoral_district_id) or positive_value_exists(elected_office_name)) or \
positive_value_exists(elected_office_name_es):
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=elected_office_id,
batch_row_001=elected_office_name,
batch_row_002=elected_office_name_es,
batch_row_003=elected_office_description,
batch_row_004=elected_office_description_es,
batch_row_005=electoral_district_id,
batch_row_006=elected_office_is_partisan,
batch_row_007=ctcl_uuid
)
number_of_batch_rows += 1
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_ROW"
handle_exception(e, logger=logger, exception_message=status)
break
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_batch_rows': number_of_batch_rows,
}
return results
def store_contest_office_xml(self, batch_uri, google_civic_election_id, organization_we_vote_id, xml_root,
batch_set_id=0):
"""
Retrieves ContestOffice data from CTCL xml file
:param batch_uri:
:param google_civic_election_id:
:param organization_we_vote_id:
:param xml_root:
:param batch_set_id
:return:
"""
# Process VIP CandidateContest data
number_of_batch_rows = 0
first_line = True
success = True
status = ''
limit_for_testing = 0
batch_header_id = 0
candidate_selection_id_key_list = [
'candidate_selection_id_1', 'candidate_selection_id_2', 'candidate_selection_id_3',
'candidate_selection_id_4', 'candidate_selection_id_5', 'candidate_selection_id_6',
'candidate_selection_id_7', 'candidate_selection_id_8', 'candidate_selection_id_9',
'candidate_selection_id_10']
# Look for CandidateContest and create the batch_header first. CandidateContest is the direct child node
# of VipObject
contest_office_xml_node = xml_root.findall('CandidateContest')
# if contest_office_xml_node is not None:
for one_contest_office in contest_office_xml_node:
if positive_value_exists(limit_for_testing) and number_of_batch_rows >= limit_for_testing:
break
# look for relevant child nodes under CandidateContest: id, Name, OfficeId, ElectoralDistrictId,
# other::ctcl-uid, VotesAllowed, NumberElected
contest_office_id = one_contest_office.attrib['id']
contest_office_name_node = one_contest_office.find('Name')
if contest_office_name_node is not None:
contest_office_name = contest_office_name_node.text
else:
contest_office_name = ""
contest_office_number_elected_node = one_contest_office.find('NumberElected')
if contest_office_number_elected_node is not None:
contest_office_number_elected = contest_office_number_elected_node.text
else:
contest_office_number_elected = ""
electoral_district_id_node = one_contest_office.find('ElectoralDistrictId')
if electoral_district_id_node is not None:
electoral_district_id = electoral_district_id_node.text
else:
electoral_district_id = ""
contest_office_votes_allowed_node = one_contest_office.find('VotesAllowed')
if contest_office_votes_allowed_node is not None:
contest_office_votes_allowed = contest_office_votes_allowed_node.text
else:
contest_office_votes_allowed = ""
elected_office_id_node = one_contest_office.find('OfficeIds')
if elected_office_id_node is not None:
elected_office_id = elected_office_id_node.text
else:
elected_office_id = ""
ctcl_uuid = ""
ctcl_uuid_node = one_contest_office.find(
"./ExternalIdentifiers/ExternalIdentifier/[OtherType='ctcl-uuid']")
if ctcl_uuid_node is not None:
ctcl_uuid = one_contest_office.find(
"./ExternalIdentifiers/ExternalIdentifier/[OtherType='ctcl-uuid']/Value").text
candidate_selection_ids_dict = {}
ballot_selection_ids_node = one_contest_office.find('./BallotSelectionIds')
if ballot_selection_ids_node is not None:
ballot_selection_ids_str = ballot_selection_ids_node.text
if ballot_selection_ids_str:
ballot_selection_ids_value_list = ballot_selection_ids_str.split()
# for len in ballot_selection_ids_list words,
# Assuming that there are maximum 10 ballot selection ids for a given contest office
ballot_selection_ids_dict = dict(
zip(candidate_selection_id_key_list, ballot_selection_ids_value_list))
# move this to batchrowactionContestOffice create if we run into performance/load issue
candidate_selection_list = []
for key, value in ballot_selection_ids_dict.items():
results = retrieve_candidate_from_candidate_selection(value, batch_set_id)
if results['candidate_id_found']:
candidate_selection_item = results['candidate_selection']
candidate_value = candidate_selection_item.contest_office_id
candidate_selection_list.append(candidate_value)
candidate_selection_ids_dict = dict(zip(candidate_selection_id_key_list, candidate_selection_list))
if first_line:
first_line = False
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000='id',
batch_header_column_001='Name',
batch_header_column_002='OfficeIds',
batch_header_column_003='ElectoralDistrictId',
batch_header_column_004='VotesAllowed',
batch_header_column_005='NumberElected',
batch_header_column_006='other::ctcl-uuid',
batch_header_column_007='CandidateSelectionId1',
batch_header_column_008='CandidateSelectionId2',
batch_header_column_009='CandidateSelectionId3',
batch_header_column_010='CandidateSelectionId4',
batch_header_column_011='CandidateSelectionId5',
batch_header_column_012='CandidateSelectionId6',
batch_header_column_013='CandidateSelectionId7',
batch_header_column_014='CandidateSelectionId8',
batch_header_column_015='CandidateSelectionId9',
batch_header_column_016='CandidateSelectionId10',
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000='contest_office_batch_id',
batch_header_map_001='contest_office_name',
batch_header_map_002='elected_office_id',
batch_header_map_003='electoral_district_id',
batch_header_map_004='contest_office_votes_allowed',
batch_header_map_005='contest_office_number_elected',
batch_header_map_006='contest_office_ctcl_uuid',
batch_header_map_007='candidate_selection_id1',
batch_header_map_008='candidate_selection_id2',
batch_header_map_009='candidate_selection_id3',
batch_header_map_010='candidate_selection_id4',
batch_header_map_011='candidate_selection_id5',
batch_header_map_012='candidate_selection_id6',
batch_header_map_013='candidate_selection_id7',
batch_header_map_014='candidate_selection_id8',
batch_header_map_015='candidate_selection_id9',
batch_header_map_016='candidate_selection_id10',
)
batch_header_map_id = batch_header_map.id
status += " BATCH_HEADER_MAP_SAVED"
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
batch_name = "CONTEST_OFFICE " + " batch_header_id: " + str(batch_header_id)
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
google_civic_election_id=google_civic_election_id,
kind_of_batch='CONTEST_OFFICE',
organization_we_vote_id=organization_we_vote_id,
source_uri=batch_uri,
batch_set_id=batch_set_id,
)
status += " BATCH_DESCRIPTION_SAVED"
success = True
except Exception as e:
# Stop trying to save rows -- break out of the for loop
batch_header_id = 0
status += " EXCEPTION_BATCH_HEADER"
handle_exception(e, logger=logger, exception_message=status)
break
if not positive_value_exists(batch_header_id):
break
# check for contest_office_batch_id or electoral_district or name AND ctcl_uuid
if positive_value_exists(contest_office_id) and positive_value_exists(ctcl_uuid) and \
(positive_value_exists(electoral_district_id) or positive_value_exists(contest_office_name)):
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=contest_office_id,
batch_row_001=contest_office_name,
batch_row_002=elected_office_id,
batch_row_003=electoral_district_id,
batch_row_004=contest_office_votes_allowed,
batch_row_005=contest_office_number_elected,
batch_row_006=ctcl_uuid,
batch_row_007=candidate_selection_ids_dict.get('candidate_selection_id_1', ''),
batch_row_008=candidate_selection_ids_dict.get('candidate_selection_id_2', ''),
batch_row_009=candidate_selection_ids_dict.get('candidate_selection_id_3', ''),
batch_row_010=candidate_selection_ids_dict.get('candidate_selection_id_4', ''),
batch_row_011=candidate_selection_ids_dict.get('candidate_selection_id_5', ''),
batch_row_012=candidate_selection_ids_dict.get('candidate_selection_id_6', ''),
batch_row_013=candidate_selection_ids_dict.get('candidate_selection_id_7', ''),
batch_row_014=candidate_selection_ids_dict.get('candidate_selection_id_8', ''),
batch_row_015=candidate_selection_ids_dict.get('candidate_selection_id_9', ''),
batch_row_016=candidate_selection_ids_dict.get('candidate_selection_id_10', ''),
)
number_of_batch_rows += 1
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_ROW"
handle_exception(e, logger=logger, exception_message=status)
break
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_batch_rows': number_of_batch_rows,
}
return results
def store_politician_xml(self, batch_uri, google_civic_election_id, organization_we_vote_id, xml_root,
batch_set_id=0):
"""
Retrieves Politician data from CTCL xml file
:param batch_uri:
:param google_civic_election_id:
:param organization_we_vote_id:
:param xml_root:
:param batch_set_id
:return:
"""
# Process VIP Person data
number_of_batch_rows = 0
first_line = True
success = True
status = ''
limit_for_testing = 0
batch_header_id = 0
# Get party names and their corresponding party ids
party_details_list = retrieve_all_party_names_and_ids_api()
# Look for Person and create the batch_header first. Person is the direct child node
# of VipObject
person_xml_node = xml_root.findall('Person')
for one_person in person_xml_node:
if positive_value_exists(limit_for_testing) and number_of_batch_rows >= limit_for_testing:
break
# look for relevant child nodes under Person: id, FullName, FirstName, LastName, MiddleName, PartyId, Email,
# PhoneNumber, Website, Twitter, ctcl-uuid
person_id = one_person.attrib['id']
person_full_name_node = one_person.find("./FullName/Text/[@language='"+LANGUAGE_CODE_ENGLISH+"']")
if person_full_name_node is not None:
person_full_name = person_full_name_node.text
else:
person_full_name = ''
person_first_name_node = one_person.find('FirstName')
if person_first_name_node is not None:
person_first_name = person_first_name_node.text
else:
person_first_name = ''
person_middle_name_node = one_person.find('MiddleName')
if person_middle_name_node is not None:
person_middle_name = person_middle_name_node.text
else:
person_middle_name = ''
person_last_name_node = one_person.find('LastName')
if person_last_name_node is not None:
person_last_name = person_last_name_node.text
else:
person_last_name = ''
person_party_name = ''
person_party_id_node = one_person.find('PartyId')
if person_party_id_node is not None:
person_party_id = person_party_id_node.text
# get party name from candidate_party_id
if party_details_list is not None:
# party_details_dict = [entry for entry in party_details_list]
for one_party in party_details_list:
# get the party name matching person_party_id
try:
party_id_temp = one_party.get('party_id_temp')
if person_party_id == party_id_temp:
person_party_name = one_party.get('party_name')
break
except Exception as e:
pass
person_email_id_node = one_person.find('./ContactInformation/Email')
if person_email_id_node is not None:
person_email_id = person_email_id_node.text
else:
person_email_id = ''
person_phone_number_node = one_person.find('./ContactInformation/Phone')
if person_phone_number_node is not None:
person_phone_number = person_phone_number_node.text
else:
person_phone_number = ''
person_website_url_node = one_person.find("./ContactInformation/Uri/[@annotation='website']")
if person_website_url_node is not None:
person_website_url = person_website_url_node.text
else:
person_website_url = ''
person_facebook_id_node = one_person.find("./ContactInformation/Uri/[@annotation='facebook']")
if person_facebook_id_node is not None:
person_facebook_id = person_facebook_id_node.text
else:
person_facebook_id = ''
person_twitter_id_node = one_person.find("./ContactInformation/Uri/[@annotation='twitter']")
if person_twitter_id_node is not None:
person_twitter_id = person_twitter_id_node.text
else:
person_twitter_id = ''
person_youtube_id_node = one_person.find("./ContactInformation/Uri/[@annotation='youtube']")
if person_youtube_id_node is not None:
person_youtube_id = person_youtube_id_node.text
else:
person_youtube_id = ''
person_googleplus_id_node = one_person.find("./ContactInformation/Uri/[@annotation='googleplus']")
if person_googleplus_id_node is not None:
person_googleplus_id = person_googleplus_id_node.text
else:
person_googleplus_id = ''
ctcl_uuid = ""
ctcl_uuid_node = one_person.find(
"./ExternalIdentifiers/ExternalIdentifier/[OtherType='ctcl-uuid']")
if ctcl_uuid_node is not None:
ctcl_uuid = one_person.find(
"./ExternalIdentifiers/ExternalIdentifier/[OtherType='ctcl-uuid']/Value").text
if first_line:
first_line = False
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000='id',
batch_header_column_001='FullName',
batch_header_column_002='FirstName',
batch_header_column_003='MiddleName',
batch_header_column_004='LastName',
batch_header_column_005='PartyName',
batch_header_column_006='Email',
batch_header_column_007='Phone',
batch_header_column_008='uri::website',
batch_header_column_009='uri::facebook',
batch_header_column_010='uri::twitter',
batch_header_column_011='uri::youtube',
batch_header_column_012='uri::googleplus',
batch_header_column_013='other::ctcl-uuid',
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000='politician_batch_id',
batch_header_map_001='politician_full_name',
batch_header_map_002='politician_first_name',
batch_header_map_003='politician_middle_name',
batch_header_map_004='politician_last_name',
batch_header_map_005='politician_party_name',
batch_header_map_006='politician_email_address',
batch_header_map_007='politician_phone_number',
batch_header_map_008='politician_website_url',
batch_header_map_009='politician_facebook_id',
batch_header_map_010='politician_twitter_url',
batch_header_map_011='politician_youtube_id',
batch_header_map_012='politician_googleplus_id',
batch_header_map_013='politician_ctcl_uuid',
)
batch_header_map_id = batch_header_map.id
status += " BATCH_HEADER_MAP_SAVED"
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
batch_name = "POLITICIAN " + " batch_header_id: " + str(batch_header_id)
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
google_civic_election_id=google_civic_election_id,
kind_of_batch='POLITICIAN',
organization_we_vote_id=organization_we_vote_id,
source_uri=batch_uri,
batch_set_id=batch_set_id,
)
status += " BATCH_DESCRIPTION_SAVED"
success = True
except Exception as e:
# Stop trying to save rows -- break out of the for loop
batch_header_id = 0
status += " EXCEPTION_BATCH_HEADER"
handle_exception(e, logger=logger, exception_message=status)
break
if not positive_value_exists(batch_header_id):
break
# check for office_batch_id or electoral_district or name AND ctcl_uuid
# if positive_value_exists(person_id) and ctcl_uuid is not None or person_full_name is not None or \
# person_first_name is not None:
if positive_value_exists(person_id) and positive_value_exists(ctcl_uuid) and \
(positive_value_exists(person_full_name) or positive_value_exists(person_first_name)):
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=person_id,
batch_row_001=person_full_name,
batch_row_002=person_first_name,
batch_row_003=person_middle_name,
batch_row_004=person_last_name,
batch_row_005=person_party_name,
batch_row_006=person_email_id,
batch_row_007=person_phone_number,
batch_row_008=person_website_url,
batch_row_009=person_facebook_id,
batch_row_010=person_twitter_id,
batch_row_011=person_youtube_id,
batch_row_012=person_googleplus_id,
batch_row_013=ctcl_uuid,
)
number_of_batch_rows += 1
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_ROW"
handle_exception(e, logger=logger, exception_message=status)
break
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_batch_rows': number_of_batch_rows,
}
return results
def store_candidate_xml(self, batch_uri, google_civic_election_id, organization_we_vote_id, xml_root,
batch_set_id=0):
"""
Retrieves Candidate data from CTCL xml file
:param batch_uri:
:param google_civic_election_id:
:param organization_we_vote_id:
:param xml_root:
:param batch_set_id
:return:
"""
# Process VIP Candidate data
number_of_batch_rows = 0
first_line = True
success = True
status = ''
limit_for_testing = 0
batch_header_id = 0
# Call party api to get corresponding party name from party id
party_details_list = retrieve_all_party_names_and_ids_api()
# Look for Candidate and create the batch_header first. Candidate is the direct child node
# of VipObject
candidate_xml_node = xml_root.findall('Candidate')
for one_candidate in candidate_xml_node:
if positive_value_exists(limit_for_testing) and number_of_batch_rows >= limit_for_testing:
break
candidate_name_english = None
candidate_ctcl_person_id = ""
candidate_party_name = ""
ctcl_uuid = ""
# look for relevant child nodes under Candidate: id, BallotName, personId, PartyId, isTopTicket,
# other::ctcl-uid
candidate_id = one_candidate.attrib['id']
candidate_selection_id = one_candidate.find("./BallotSelectionIds")
candidate_name_node_english = one_candidate.find("./BallotName/Text/[@language='"+LANGUAGE_CODE_ENGLISH+"']")
if candidate_name_node_english is not None:
candidate_name_english = candidate_name_node_english.text
candidate_ctcl_person_id_node = one_candidate.find('./PersonId')
if candidate_ctcl_person_id_node is not None:
candidate_ctcl_person_id = candidate_ctcl_person_id_node.text
candidate_party_id_node = one_candidate.find('./PartyId')
if candidate_party_id_node is not None:
candidate_party_id = candidate_party_id_node.text
# get party name from candidate_party_id
if party_details_list is not None:
# party_details_dict = [entry for entry in party_details_list]
for one_party in party_details_list:
# get the candidate party name matching candidate_party_id
if candidate_party_id == one_party.get('party_id_temp'):
candidate_party_name = one_party.get('party_name')
break
else:
candidate_party_name = ''
candidate_is_top_ticket_node = one_candidate.find('IsTopTicket')
if candidate_is_top_ticket_node is not None:
candidate_is_top_ticket = candidate_is_top_ticket_node.text
else:
candidate_is_top_ticket = ''
ctcl_uuid_node = one_candidate.find(
"./ExternalIdentifiers/ExternalIdentifier/[OtherType='ctcl-uuid']")
if ctcl_uuid_node is not None:
ctcl_uuid = one_candidate.find(
"./ExternalIdentifiers/ExternalIdentifier/[OtherType='ctcl-uuid']/Value").text
if first_line:
first_line = False
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000='id',
batch_header_column_001='PersonId',
batch_header_column_002='Name',
batch_header_column_003='PartyName',
batch_header_column_004='IsTopTicket',
batch_header_column_005='other::ctcl-uuid',
batch_header_column_006='other::CandidateSelectionId',
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000='candidate_batch_id',
batch_header_map_001='candidate_ctcl_person_id',
batch_header_map_002='candidate_name',
batch_header_map_003='candidate_party_name',
batch_header_map_004='candidate_is_top_ticket',
batch_header_map_005='candidate_ctcl_uuid',
batch_header_map_006='candidate_selection_id',
)
batch_header_map_id = batch_header_map.id
status += " BATCH_HEADER_MAP_SAVED"
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
batch_name = "CANDIDATE " + " batch_header_id: " + str(batch_header_id)
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
google_civic_election_id=google_civic_election_id,
kind_of_batch='CANDIDATE',
organization_we_vote_id=organization_we_vote_id,
source_uri=batch_uri,
batch_set_id=batch_set_id,
)
status += " BATCH_DESCRIPTION_SAVED"
success = True
except Exception as e:
# Stop trying to save rows -- break out of the for loop
batch_header_id = 0
status += " EXCEPTION_BATCH_HEADER"
handle_exception(e, logger=logger, exception_message=status)
break
if not positive_value_exists(batch_header_id):
break
# check for candidate_id or candidate_ctcl_person_id or name AND ctcl_uuid
if positive_value_exists(candidate_id) and positive_value_exists(ctcl_uuid) and \
(positive_value_exists(candidate_ctcl_person_id) or positive_value_exists(candidate_name_english)):
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=candidate_id,
batch_row_001=candidate_ctcl_person_id,
batch_row_002=candidate_name_english,
batch_row_003=candidate_party_name,
batch_row_004=candidate_is_top_ticket,
batch_row_005=ctcl_uuid,
batch_row_006=candidate_selection_id
)
number_of_batch_rows += 1
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_ROW"
handle_exception(e, logger=logger, exception_message=status)
success = False
break
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_batch_rows': number_of_batch_rows,
}
return results
def store_state_data_from_xml(self, batch_uri, google_civic_election_id, organization_we_vote_id, xml_root,
batch_set_id=0):
"""
Retrieves state data from CTCL xml file
:param batch_uri:
:param google_civic_election_id:
:param organization_we_vote_id:
:param xml_root:
:param batch_set_id
:return:
"""
# This state is not used right now. Parsing it for future reference
# Process VIP State data
number_of_batch_rows = 0
first_line = True
success = True
status = ''
limit_for_testing = 0
batch_header_id = 0
# Look for State and create the batch_header first. State is the direct child node of VipObject
# TODO Will this be a single node object or will there be multiple state nodes in a CTCL XML?
state_xml_node = xml_root.findall('State')
for one_state in state_xml_node:
state_name = None
if positive_value_exists(limit_for_testing) and number_of_batch_rows >= limit_for_testing:
break
# look for relevant child nodes under State: id, ocd-id, Name
state_id = one_state.attrib['id']
state_name_node = one_state.find('./Name')
if state_name_node is not None:
state_name = state_name_node.text
ocd_id_node = one_state.find("./ExternalIdentifiers/ExternalIdentifier/[Type='ocd-id']")
if ocd_id_node is not None:
ocd_id = one_state.find("./ExternalIdentifiers/ExternalIdentifier/[Type='ocd-id']/Value").text
else:
ocd_id = ''
if first_line:
first_line = False
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000='id',
batch_header_column_001='Name',
batch_header_column_002='other::ocd-id',
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000='state_id',
batch_header_map_001='state_name',
batch_header_map_002='ocd_id',
)
batch_header_map_id = batch_header_map.id
status += " BATCH_HEADER_MAP_SAVED"
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
batch_name = "STATE " + " batch_header_id: " + str(batch_header_id)
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
google_civic_election_id=google_civic_election_id,
kind_of_batch='STATE',
organization_we_vote_id=organization_we_vote_id,
source_uri=batch_uri,
batch_set_id=batch_set_id,
)
status += " BATCH_DESCRIPTION_SAVED"
success = True
except Exception as e:
# Stop trying to save rows -- break out of the for loop
batch_header_id = 0
status += " EXCEPTION_BATCH_HEADER"
handle_exception(e, logger=logger, exception_message=status)
break
if not positive_value_exists(batch_header_id):
break
# check for state_id or name AND ocd_id
if positive_value_exists(state_id) and (positive_value_exists(state_name)):
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=state_id,
batch_row_001=state_name,
batch_row_002=ocd_id,
)
number_of_batch_rows += 1
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_ROW"
handle_exception(e, logger=logger, exception_message=status)
success = False
break
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_batch_rows': number_of_batch_rows,
}
return results
def store_election_metadata_from_xml(self, batch_uri, google_civic_election_id, organization_we_vote_id, xml_root,
batch_set_id=0):
"""
Retrieves election metadata from CTCL xml file
:param batch_uri:
:param google_civic_election_id:
:param organization_we_vote_id:
:param xml_root:
:param batch_set_id
:return:
"""
# This election metadata is not used right now. Parsing it for future reference
# Process VIP Election metadata
success = True
status = ''
batch_header_id = 0
# Look for Election and create the batch_header first. Election is the direct child node of VipObject
election_xml_node = xml_root.find('Election')
election_date_str = None
# look for relevant child nodes under Election: id, Date, StateId
if not election_xml_node:
results = {
'success': success,
'status': "STORE_ELECTION_METADATA_FROM_XML-ELECTION_NODE_NOT_FOUND",
'batch_header_id': batch_header_id,
'batch_saved': success,
}
return results
election_id = election_xml_node.attrib['id']
election_date_xml_node = election_xml_node.find('./Date')
if election_date_xml_node is not None:
election_date = election_date_xml_node.text
state_id_node = election_xml_node.find("./StateId")
if state_id_node is not None:
state_id = state_id_node.text
else:
state_id = ''
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000='id',
batch_header_column_001='Date',
batch_header_column_002='StateId',
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000='election_id',
batch_header_map_001='election_date',
batch_header_map_002='state_id',
)
batch_header_map_id = batch_header_map.id
status += " BATCH_HEADER_MAP_SAVED"
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
batch_name = "ELECTION " + " batch_header_id: " + str(batch_header_id)
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
google_civic_election_id=google_civic_election_id,
kind_of_batch='ELECTION',
organization_we_vote_id=organization_we_vote_id,
source_uri=batch_uri,
batch_set_id=batch_set_id,
)
status += " BATCH_DESCRIPTION_SAVED"
success = True
except Exception as e:
# Stop trying to save rows -- break out of the for loop
batch_header_id = 0
status += " EXCEPTION_BATCH_HEADER"
handle_exception(e, logger=logger, exception_message=status)
# check for state_id or name AND ocd_id
if positive_value_exists(election_id) and positive_value_exists(election_date) and \
positive_value_exists(state_id):
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=election_id,
batch_row_001=election_date,
batch_row_002=state_id,
)
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_ROW"
handle_exception(e, logger=logger, exception_message=status)
success = False
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
}
return results
def store_source_metadata_from_xml(self, batch_uri, google_civic_election_id, organization_we_vote_id, xml_root,
batch_set_id=0):
"""
Retrieves source metadata from CTCL xml file
:param batch_uri:
:param google_civic_election_id:
:param organization_we_vote_id:
:param xml_root:
:param batch_set_id
:return:
"""
# This source data is not used for now. Parsing it for future reference
# Process VIP Source metadata
success = False
status = ''
batch_header_id = 0
# Look for Source and create the batch_header first. Election is the direct child node of VipObject
source_xml_node = xml_root.find('Source')
source_date_str = None
if not source_xml_node:
results = {
'success': success,
'status': "STORE_SOURCE_METADATA_FROM_XML-SOURCE_NODE_NOT_FOUND",
'batch_header_id': batch_header_id,
'batch_saved': success,
}
return results
# look for relevant child nodes under Source: id, DateTime, Name, OrganizationUri, VipId
source_id = source_xml_node.attrib['id']
source_datetime_xml_node = source_xml_node.find('./DateTime')
if source_datetime_xml_node is not None:
source_datetime = source_datetime_xml_node.text
source_name_node = source_xml_node.find("./Name")
if source_name_node is not None:
source_name = source_xml_node.text
else:
source_name = ''
organization_uri_node = source_xml_node.find("./OrganizationUri")
if organization_uri_node is not None:
organization_uri = source_xml_node.text
vip_id_node = source_xml_node.find("./VipId")
if vip_id_node is not None:
vip_id = source_xml_node.text
else:
vip_id = ''
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000='id',
batch_header_column_001='DateTime',
batch_header_column_002='Name',
batch_header_column_003='OrganizationUri',
batch_header_column_004='VipId',
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000='source_id',
batch_header_map_001='source_datetime',
batch_header_map_002='source_name',
batch_header_map_003='organization_uri',
batch_header_map_004='vip_id'
)
batch_header_map_id = batch_header_map.id
status += " BATCH_HEADER_MAP_SAVED"
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
batch_name = "SOURCE " + " batch_header_id: " + str(batch_header_id)
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
google_civic_election_id=google_civic_election_id,
kind_of_batch='SOURCE',
organization_we_vote_id=organization_we_vote_id,
source_uri=batch_uri,
batch_set_id=batch_set_id,
)
status += " BATCH_DESCRIPTION_SAVED"
success = True
except Exception as e:
# Stop trying to save rows -- break out of the for loop
batch_header_id = 0
status += " EXCEPTION_BATCH_HEADER"
handle_exception(e, logger=logger, exception_message=status)
# check for state_id or name AND ocd_id
if positive_value_exists(source_id) and positive_value_exists(source_datetime) and \
positive_value_exists(source_name) and positive_value_exists(organization_uri):
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=source_id,
batch_row_001=source_datetime,
batch_row_002=source_name,
batch_row_003=organization_uri,
batch_row_004=vip_id
)
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_ROW"
handle_exception(e, logger=logger, exception_message=status)
success = False
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
}
return results
def create_batch_set_vip_xml(self, batch_file, batch_uri, google_civic_election_id, organization_we_vote_id):
"""
Retrieves CTCL Batch Set data from an xml file - Measure, Office, Candidate, Politician
:param batch_file:
:param batch_uri:
:param google_civic_election_id:
:param organization_we_vote_id:
:return:
"""
import_date = date.today()
# Retrieve from XML
if batch_file:
xml_tree = ElementTree.parse(batch_file)
batch_set_name = batch_file.name + " - " + str(import_date)
else:
request = urllib.request.urlopen(batch_uri)
# xml_data = request.read()
# xml_data = xmltodict.parse(xml_data)
# # xml_data_list_json = list(xml_data)
# structured_json = json.dumps(xml_data)
xml_tree = ElementTree.parse(request)
request.close()
# set batch_set_name as file_name
batch_set_name_list = batch_uri.split('/')
batch_set_name = batch_set_name_list[len(batch_set_name_list) - 1] + " - " + str(import_date)
xml_root = xml_tree.getroot()
status = ''
success = False
number_of_batch_rows = 0
batch_set_id = 0
continue_batch_set_processing = True # Set to False if we run into a problem that requires we stop processing
if xml_root:
# create batch_set object
try:
batch_set = BatchSet.objects.create(batch_set_description_text="", batch_set_name=batch_set_name,
batch_set_source=BATCH_SET_SOURCE_CTCL,
google_civic_election_id=google_civic_election_id,
source_uri=batch_uri, import_date=import_date)
batch_set_id = batch_set.id
if positive_value_exists(batch_set_id):
status += " BATCH_SET_SAVED"
success = True
except Exception as e:
# Stop trying to save rows -- break out of the for loop
continue_batch_set_processing = False
batch_set_id = 0
status += " EXCEPTION_BATCH_SET "
handle_exception(e, logger=logger, exception_message=status)
# import Electoral District
skip_electoral_district = False # We can set this to True during development to save time
if continue_batch_set_processing and not skip_electoral_district:
electoral_district_list_found = False
electoral_district_item_list = xml_root.findall('ElectoralDistrict')
if not len(electoral_district_item_list):
continue_batch_set_processing = False
else:
results = electoral_district_import_from_xml_data(electoral_district_item_list)
if results['success']:
status += "CREATE_BATCH_SET_ELECTORAL_DISTRICT_IMPORTED "
number_of_batch_rows += results['saved']
# TODO check this whether it should be only saved or updated Electoral districts
number_of_batch_rows += results['updated']
electoral_district_list_found = True
else:
continue_batch_set_processing = False
status += results['status']
status += " CREATE_BATCH_SET_ELECTORAL_DISTRICT_ERRORS "
# import Party
skip_party = False # We can set this to True during development to save time
if continue_batch_set_processing and not skip_party:
party_list_found = False
party_item_list = xml_root.findall('Party')
if not len(party_item_list):
continue_batch_set_processing = False
status += " CREATE_BATCH_SET-PARTY_IMPORT_ERRORS-NO_party_item_list "
else:
results = party_import_from_xml_data(party_item_list)
if results['success']:
status += "CREATE_BATCH_SET_PARTY_IMPORTED"
number_of_batch_rows += results['saved']
number_of_batch_rows += results['updated']
# TODO check this whether it should be only saved or updated Electoral districts
party_list_found = True
# A given data source may not always have electoral district and/or party data,
# but the referenced electoral district id or party id might be already present
# in the master database tables, hence commenting out below code
# if not electoral_district_list_found or not party_list_found:
# results = {
# 'success': False,
# 'status': status,
# 'batch_header_id': 0,
# 'batch_saved': False,
# 'number_of_batch_rows': 0,
# }
# return results
else:
continue_batch_set_processing = False
status += results['status']
status += " CREATE_BATCH_SET-PARTY_IMPORT_ERRORS "
# look for different data sets in the XML - ElectedOffice, ContestOffice, Candidate, Politician, Measure
# Elected Office
skip_elected_office = False # We can set this to True during development to save time
if continue_batch_set_processing and not skip_elected_office:
results = self.store_elected_office_xml(batch_uri, google_civic_election_id, organization_we_vote_id,
xml_root, batch_set_id)
if results['success']:
# Elected Office data found
status += 'CREATE_BATCH_SET_ELECTED_OFFICE_DATA_FOUND'
number_of_batch_rows += results['number_of_batch_rows']
else:
continue_batch_set_processing = False
status += results['status']
status += " CREATE_BATCH_SET-PARTY_IMPORT_ERRORS "
# Candidate-to-office-mappings
skip_candidate_mapping = False # We can set this to True during development to save time
if continue_batch_set_processing and not skip_candidate_mapping:
results = create_candidate_selection_rows(xml_root, batch_set_id)
if results['success']:
# Elected Office data found
status += 'CREATE_BATCH_SET_CANDIDATE_SELECTION_DATA_FOUND'
number_of_batch_rows += results['number_of_batch_rows']
else:
continue_batch_set_processing = False
status += results['status']
status += " CREATE_BATCH_SET-CANDIDATE_SELECTION_ERRORS "
# ContestOffice entries
skip_contest_office = False # We can set this to True during development to save time
if continue_batch_set_processing and not skip_contest_office:
results = self.store_contest_office_xml(
batch_uri, google_civic_election_id, organization_we_vote_id, xml_root, batch_set_id)
if results['success']:
# Contest Office data found
status += 'CREATE_BATCH_SET_CONTEST_OFFICE_DATA_FOUND'
number_of_batch_rows += results['number_of_batch_rows']
else:
continue_batch_set_processing = False
status += results['status']
status += " CREATE_BATCH_SET-CONTEST_OFFICE_ERRORS "
# Politician entries
skip_politician = False # We can set this to True during development to save time
if continue_batch_set_processing and not skip_politician:
results = self.store_politician_xml(
batch_uri, google_civic_election_id, organization_we_vote_id, xml_root, batch_set_id)
if results['success']:
status += 'CREATE_BATCH_SET_POLITICIAN_DATA_FOUND'
number_of_batch_rows += results['number_of_batch_rows']
else:
continue_batch_set_processing = False
status += results['status']
status += " CREATE_BATCH_SET-POLITICIAN_ERRORS "
# Candidate entries
skip_candidate = False # We can set this to True during development to save time
if continue_batch_set_processing and not skip_candidate:
results = self.store_candidate_xml(
batch_uri, google_civic_election_id, organization_we_vote_id, xml_root, batch_set_id)
if results['success']:
status += 'CREATE_BATCH_SET_CANDIDATE_DATA_FOUND'
number_of_batch_rows += results['number_of_batch_rows']
else:
continue_batch_set_processing = False
status += results['status']
status += " CREATE_BATCH_SET-CANDIDATE_ERRORS "
# Measure entries
skip_measure = False # We can set this to True during development to save time
if continue_batch_set_processing and not skip_measure:
results = self.store_measure_xml(
batch_uri, google_civic_election_id, organization_we_vote_id, xml_root, batch_set_id)
if results['success']:
status += 'CREATE_BATCH_SET_MEASURE_DATA_FOUND'
number_of_batch_rows += results['number_of_batch_rows']
success = True
else:
continue_batch_set_processing = False
status += results['status']
status += " CREATE_BATCH_SET-MEASURE_ERRORS "
# State data entries
if continue_batch_set_processing:
results = self.store_state_data_from_xml(batch_uri, google_civic_election_id, organization_we_vote_id,
xml_root, batch_set_id)
if results['success']:
status += 'CREATE_BATCH_SET_STATE_DATA_FOUND'
number_of_batch_rows += results['number_of_batch_rows']
success = True
else:
continue_batch_set_processing = False
status += results['status']
status += " CREATE_BATCH_SET-STATE_DATA_ERRORS "
# Election metadata entries
if continue_batch_set_processing:
results = self.store_election_metadata_from_xml(
batch_uri, google_civic_election_id, organization_we_vote_id, xml_root, batch_set_id)
if results['success']:
status += ' CREATE_BATCH_SET_ELECTION_METADATA_FOUND '
number_of_batch_rows += 1
success = True
else:
continue_batch_set_processing = False
status += results['status']
status += " CREATE_BATCH_SET-ELECTION_METADATA_ERRORS "
# Source metadata entries
if continue_batch_set_processing:
results = self.store_source_metadata_from_xml(
batch_uri, google_civic_election_id, organization_we_vote_id, xml_root, batch_set_id)
if results['success']:
status += ' CREATE_BATCH_SET_SOURCE_METADATA_FOUND '
number_of_batch_rows += 1
success = True
else:
continue_batch_set_processing = False
status += results['status']
status += " CREATE_BATCH_SET-SOURCE_METADATA_ERRORS "
results = {
'success': success,
'status': status,
'batch_set_id': batch_set_id,
'batch_saved': success,
'number_of_batch_rows': number_of_batch_rows,
}
return results
def count_number_of_batch_action_rows(self, header_id, kind_of_batch):
"""
Return count of batch rows for a given header id
:param header_id:
:return:
"""
number_of_batch_action_rows = 0
if positive_value_exists(header_id):
if kind_of_batch == MEASURE:
number_of_batch_action_rows = BatchRowActionMeasure.objects.filter(batch_header_id=header_id).count()
elif kind_of_batch == ELECTED_OFFICE:
number_of_batch_action_rows = BatchRowActionElectedOffice.objects.filter(batch_header_id=header_id).\
count()
elif kind_of_batch == CONTEST_OFFICE:
number_of_batch_action_rows = BatchRowActionContestOffice.objects.filter(batch_header_id=header_id).\
count()
elif kind_of_batch == CANDIDATE:
number_of_batch_action_rows = BatchRowActionCandidate.objects.filter(batch_header_id=header_id).count()
elif kind_of_batch == POLITICIAN:
number_of_batch_action_rows = BatchRowActionPolitician.objects.filter(batch_header_id=header_id).count()
else:
number_of_batch_action_rows = 0
return number_of_batch_action_rows
def fetch_batch_header_translation_suggestion(self, kind_of_batch, alternate_header_value):
"""
We are looking at one header value from a file imported by an admin or volunteer. We want to see if
there are any suggestions for headers already recognized by We Vote.
:param kind_of_batch:
:param alternate_header_value:
:return:
"""
results = self.retrieve_batch_header_translation_suggestion(kind_of_batch, alternate_header_value)
if results['batch_header_translation_suggestion_found']:
batch_header_translation_suggestion = results['batch_header_translation_suggestion']
return batch_header_translation_suggestion.header_value_recognized_by_we_vote
return ""
# TODO This hasn't been built
def fetch_batch_row_translation_map(self, kind_of_batch, batch_row_name, incoming_alternate_row_value):
results = self.retrieve_batch_row_translation_map(kind_of_batch, incoming_alternate_row_value)
if results['batch_header_translation_suggestion_found']:
batch_header_translation_suggestion = results['batch_header_translation_suggestion']
return batch_header_translation_suggestion.header_value_recognized_by_we_vote
return ""
def fetch_elected_office_name_from_elected_office_ctcl_id(self, elected_office_ctcl_id, batch_set_id):
"""
Take in elected_office_ctcl_id and batch_set_id, look up BatchRow and return elected_office_name
:param elected_office_ctcl_id:
:param batch_set_id:
:return:
"""
elected_office_name = ''
batch_header_id = 0
# From batch_description, get the header_id using batch_set_id
# batch_header_id = get_batch_header_id_from_batch_description(batch_set_id, ELECTED_OFFICE)
try:
if positive_value_exists(batch_set_id):
batch_description_on_stage = BatchDescription.objects.get(batch_set_id=batch_set_id,
kind_of_batch=ELECTED_OFFICE)
if batch_description_on_stage:
batch_header_id = batch_description_on_stage.batch_header_id
except BatchDescription.DoesNotExist:
elected_office_name = ''
pass
# Lookup BatchRow with given header_id and elected_office_ctcl_id. But before doing that, we need to get batch
# row column name that matches 'elected_office_batch_id'
try:
batch_manager = BatchManager()
if positive_value_exists(batch_header_id) and elected_office_ctcl_id:
batch_header_map = BatchHeaderMap.objects.get(batch_header_id=batch_header_id)
# Get the column name in BatchRow that stores elected_office_batch_id - id taken from batch_header_map
# eg: batch_row_000 -> elected_office_batch_id
elected_office_id_column_name = batch_manager.retrieve_column_name_from_batch_row(
"elected_office_batch_id", batch_header_map)
# we found batch row column name corresponding to elected_office_batch_id, now look up batch_row table
# with given batch_header_id and elected_office_batch_id (batch_row_00)
batch_row_on_stage = BatchRow.objects.get(batch_header_id=batch_header_id,
**{ elected_office_id_column_name: elected_office_ctcl_id})
# we know the batch row, next retrieve value for elected_office_name eg: off1 -> NC State Senator
elected_office_name = batch_manager.retrieve_value_from_batch_row('elected_office_name',
batch_header_map, batch_row_on_stage)
except BatchRow.DoesNotExist:
elected_office_name = ''
return elected_office_name
def fetch_state_code_from_person_id_in_candidate(self, person_id, batch_set_id):
"""
Take in person_id, batch_set_id, look up BatchRowActionCandidate and return state_code
:param person_id:
:param batch_set_id:
:return:
"""
state_code = ''
batch_header_id = 0
# From batch_description, get the header_id using batch_set_id
# batch_header_id = get_batch_header_id_from_batch_description(batch_set_id, CANDIDATE)
try:
if positive_value_exists(batch_set_id):
batch_description_on_stage = BatchDescription.objects.get(batch_set_id=batch_set_id,
kind_of_batch=CANDIDATE)
if batch_description_on_stage:
batch_header_id = batch_description_on_stage.batch_header_id
except BatchDescription.DoesNotExist:
pass
try:
if positive_value_exists(batch_header_id) and person_id is not None:
batchrowaction_candidate = BatchRowActionCandidate.objects.get(batch_header_id=batch_header_id,
candidate_ctcl_person_id=person_id)
if batchrowaction_candidate is not None:
state_code = batchrowaction_candidate.state_code
if state_code is None:
return ''
except BatchRowActionCandidate.DoesNotExist:
state_code = ''
return state_code
def retrieve_election_details_from_election_day_or_state_code(self, election_day='', state_code=''):
"""
Retrieve election_name and google_civic_election_id from election_day and/or state_code
:param election_day:
:param state_code:
:return:
"""
success = False
election_name = ''
google_civic_election_id = ''
# election lookup using state & election day, and fetch google_civic_election_id
election_manager = ElectionManager()
election_results = election_manager.retrieve_elections_by_election_date(election_day)
if election_results['success']:
election_list = election_results['election_list']
if len(election_list) == 1:
[election] = election_list
election_name = election.election_name
google_civic_election_id = election.google_civic_election_id
success = True
else:
# use state_code & election_date for lookup. If multiple entries found, do not set
# google_civic_election_id
election_results = election_manager.retrieve_elections_by_state_and_election_date(state_code,
election_day)
if election_results['success']:
election_list = election_results['election_list']
if len(election_list) == 1:
[election] = election_list
election_name = election.election_name
google_civic_election_id = election.google_civic_election_id
success = True
results = {
'success': success,
'election_name': election_name,
'google_civic_election_id': google_civic_election_id,
}
return results
def create_batch_set_organization_endorsements(self, organization):
"""
Create batch set for organization endorsements
:param organization:
:return:
"""
batch_set_id = 0
batch_saved = False
status = ''
success = False
number_of_batch_rows = 0
batch_set_id = 0
election_name = ''
structured_organization_endorsement_json = ''
google_civic_election_id = 0
organization_endorsements_api_url = organization.organization_endorsements_api_url
if not organization_endorsements_api_url:
results = {
'success': False,
'status': "CREATE_BATCH_SET_ORGANIZATION_ENDORSEMENTS-INVALID_URL",
'batch_saved': batch_saved,
'number_of_batch_rows': 0,
'election_name': election_name,
'batch_set_id': batch_set_id,
'google_civic_election_id': google_civic_election_id,
}
import_date = date.today()
try:
endorsement_req = urllib.request.Request(organization_endorsements_api_url,
headers={'User-Agent': 'Mozilla/5.0'})
endorsement_url = urlopen(endorsement_req)
# endorsement_url.close()
# structured_organization_endorsement_json = json.loads(endorsement_url)
organization_endorsement_url = endorsement_url.read()
organization_endorsement_json = organization_endorsement_url.decode('utf-8')
structured_organization_endorsement_json = json.loads(organization_endorsement_json)
batch_set_name_url = urlquote(organization_endorsements_api_url)
except Exception as e:
batch_set_id = 0
status += " EXCEPTION_BATCH_SET "
handle_exception(e, logger=logger, exception_message=status)
if not structured_organization_endorsement_json:
results = {
'success': False,
'status': "CREATE_BATCH_SET_ORGANIZATION_ENDORSEMENT_FAILED",
'batch_saved': batch_saved,
'number_of_batch_rows': 0,
'election_name': election_name,
'batch_set_id': batch_set_id,
'google_civic_election_id': google_civic_election_id,
}
return results
# set batch_set_name as file_name
batch_set_name_list = batch_set_name_url.split('/')
batch_set_name = organization.organization_name + " - " + batch_set_name_list[len(batch_set_name_list) - 1] + \
" - " + str(import_date)
# create batch_set object
try:
batch_set = BatchSet.objects.create(batch_set_description_text="", batch_set_name=batch_set_name,
batch_set_source=BATCH_SET_SOURCE_IMPORT_EXPORT_ENDORSEMENTS,
source_uri=batch_set_name_url, import_date=import_date)
batch_set_id = batch_set.id
if positive_value_exists(batch_set_id):
status += " BATCH_SET_SAVED"
success = True
except Exception as e:
# Stop trying to save rows -- break out of the for loop
batch_set_id = 0
status += " EXCEPTION_BATCH_SET "
handle_exception(e, logger=logger, exception_message=status)
# import metadata like organization name, url, endorsement url, twitter url, org image url, email
# organization_name = structured_organization_endorsement_json['organization_name']
# organization_url = structured_organization_endorsement_json['organization_url']
# organization_endorsements_url = structured_organization_endorsement_json['organization_endorsements_url']
# organization_twitter_url = structured_organization_endorsement_json['organization_twitter_url']
# organization_image_url = structured_organization_endorsement_json['organization_image_url']
# organization_image_url_https = structured_organization_endorsement_json['organization_image_url_https']
# organization_email = structured_organization_endorsement_json['organization_email']
candidate_positions_list = structured_organization_endorsement_json['candidate_positions']
# measure_positions_list = structured_organization_endorsement_json['measure_positions']
organization_we_vote_id = organization.we_vote_id
organization_twitter_handle = organization.organization_twitter_handle
# import Offices from json
results = self.import_offices_from_endorsement_json(batch_set_name_url, batch_set_id, organization_we_vote_id,
candidate_positions_list)
if results['success']:
status += 'CREATE_BATCH_SET_OFFICE_DATA_IMPORTED'
number_of_batch_rows += results['number_of_offices']
else:
continue_batch_set_processing = False
status += results['status']
status += " CREATE_BATCH_SET-OFFICE_ERRORS "
# import Candidates from json
results = self.import_candidates_from_endorsement_json(batch_set_name_url, batch_set_id,
organization_we_vote_id, candidate_positions_list)
if results['success']:
status += 'CREATE_BATCH_SET_CANDIDATE_DATA_IMPORTED'
number_of_batch_rows += results['number_of_candidates']
else:
continue_batch_set_processing = False
status += results['status']
status += " CREATE_BATCH_SET-CANDIDATE_ERRORS "
results = self.import_candidate_positions_from_endorsement_json(batch_set_name_url, batch_set_id,
organization_we_vote_id,
organization_twitter_handle,
candidate_positions_list)
if results['success']:
success = True
status += "CREATE_BATCH_SET_CANDIDATE_POSITIONS_IMPORTED "
number_of_batch_rows += results['number_of_candidate_positions']
# TODO check this whether it should be only saved or updated Candidate positionss
# number_of_batch_rows += results['updated']
batch_saved = True
election_name = results['election_name']
google_civic_election_id = results['google_civic_election_id']
else:
# continue_batch_set_processing = False
status += results['status']
status += " CREATE_BATCH_SET_CANDIDATE_POSITIONS_ERRORS "
results = {
'success': success,
'status': status,
'number_of_batch_rows': number_of_batch_rows,
'batch_saved': batch_saved,
'election_name': election_name,
'batch_set_id': batch_set_id,
'google_civic_election_id': google_civic_election_id,
}
return results
def import_offices_from_endorsement_json(self, batch_uri='', batch_set_id='', organization_we_vote_id='',
candidate_positions_list=''):
"""
Import Offices from organization endorsements json file
:param batch_uri:
:param batch_set_id:
:param organization_we_vote_id:
:param candidate_positions_list:
:return:
"""
status = ''
success = False
number_of_offices = 0
first_line = True
election_day = ''
google_civic_election_id = 0
if not candidate_positions_list:
results = {
'success': False,
'status': "IMPORT_OFFICES_FROM_ENDORSEMENT_JSON-INVALID_DATA",
'number_of_offices': 0,
'election_day': '',
'google_civic_election_id': google_civic_election_id,
}
return results
# else:
for one_entry in candidate_positions_list:
# read office details for each candidate position
office_name = one_entry['office_name']
state_code = one_entry['state_code']
candidate_name = one_entry['name']
election_day = one_entry['election_day']
google_civic_election_id = one_entry['google_civic_election_id']
party = one_entry['party']
office_ocd_division_id = one_entry['office_ocd_division_id']
if first_line:
# create batch_header and batch_header_map for candidate_positions
first_line = False
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000='office_name',
batch_header_column_001='state_code',
batch_header_column_002='candidate_name',
batch_header_column_003='election_day',
batch_header_column_004='google_civic_election_id',
batch_header_column_005='party',
batch_header_column_006='office_ocd_division_id',
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000='contest_office_name',
batch_header_map_001='state_code',
batch_header_map_002='candidate_name',
batch_header_map_003='election_day',
batch_header_map_004='google_civic_election_id',
batch_header_map_005='party',
batch_header_map_006='office_ocd_division_id',
)
batch_header_map_id = batch_header_map.id
status += " BATCH_HEADER_MAP_SAVED"
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
batch_name = "ENDORSEMENTS_JSON_OFFICES " + " batch_header_id: " + str(batch_header_id)
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
google_civic_election_id=google_civic_election_id,
kind_of_batch='CONTEST_OFFICE',
organization_we_vote_id=organization_we_vote_id,
source_uri=batch_uri,
batch_set_id=batch_set_id,
)
status += " BATCH_DESCRIPTION_SAVED"
success = True
except Exception as e:
batch_header_id = 0
status += " EXCEPTION_BATCH_HEADER"
handle_exception(e, logger=logger, exception_message=status)
break
if not positive_value_exists(batch_header_id):
break
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=office_name,
batch_row_001=state_code,
batch_row_002=candidate_name,
batch_row_003=election_day,
batch_row_004=google_civic_election_id,
batch_row_005=party,
batch_row_006=office_ocd_division_id,
)
number_of_offices += 1
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_ROW"
handle_exception(e, logger=logger, exception_message=status)
break
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_offices': number_of_offices,
'election_day': election_day,
'google_civic_election_id': google_civic_election_id,
}
return results
def import_candidates_from_endorsement_json(self, batch_uri='', batch_set_id='', organization_we_vote_id='',
candidate_positions_list=''):
"""
Import Candidates from organization endorsements json file
:param batch_uri:
:param batch_set_id:
:param organization_we_vote_id:
:param candidate_positions_list:
:return:
"""
status = ''
success = False
number_of_candidates = 0
first_line = True
election_day = ''
google_civic_election_id = 0
if not candidate_positions_list:
results = {
'success': False,
'status': "IMPORT_CANDIDATES_FROM_ENDORSEMENT_JSON-INVALID_DATA",
'number_of_candidates': 0,
'election_day': election_day,
'google_civic_election_id': google_civic_election_id,
}
return results
# else:
for one_entry in candidate_positions_list:
# read position details for each candidate
candidate_name = one_entry['name']
candidate_facebook_url = one_entry['facebook_url']
candidate_twitter_url = one_entry['twitter_url']
candidate_website_url = one_entry['website_url']
candidate_contact_form_url = one_entry['candidate_contact_form_url']
party = one_entry['party']
contest_office_name = one_entry['office_name']
candidate_profile_image_url_https = one_entry['profile_image_url_https']
state_code = one_entry['state_code']
election_day = one_entry['election_day']
google_civic_election_id = one_entry['google_civic_election_id']
candidate_ocd_division_id = one_entry['candidate_ocd_division_id']
if first_line:
# create batch_header and batch_header_map for candidate_positions
first_line = False
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000='name',
batch_header_column_001='twitter_url',
batch_header_column_002='facebook_url',
batch_header_column_003='more_info_url',
batch_header_column_004='state_code',
batch_header_column_005='office_name',
batch_header_column_006='profile_image_url_https',
batch_header_column_007='party',
batch_header_column_008='election_day',
batch_header_column_009='google_civic_election_id',
batch_header_column_010='candidate_ocd_division_id',
batch_header_column_011='candidate_contact_form_url'
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000='candidate_name',
batch_header_map_001='candidate_twitter_handle',
batch_header_map_002='facebook_url',
batch_header_map_003='candidate_url',
batch_header_map_004='state_code',
batch_header_map_005='contest_office_name',
batch_header_map_006='candidate_profile_image_url',
batch_header_map_007='candidate_party_name',
batch_header_map_008='election_day',
batch_header_map_009='google_civic_election_id',
batch_header_map_010='candidate_ocd_division_id',
batch_header_map_011='candidate_contact_form_url'
)
batch_header_map_id = batch_header_map.id
status += " BATCH_HEADER_MAP_SAVED"
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
batch_name = "ENDORSEMENTS_JSON_CANDIDATES " + " batch_header_id: " + str(batch_header_id)
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
google_civic_election_id=google_civic_election_id,
kind_of_batch='CANDIDATE',
organization_we_vote_id=organization_we_vote_id,
source_uri=batch_uri,
batch_set_id=batch_set_id,
)
status += " BATCH_DESCRIPTION_SAVED"
success = True
except Exception as e:
batch_header_id = 0
status += " EXCEPTION_BATCH_HEADER"
handle_exception(e, logger=logger, exception_message=status)
break
if not positive_value_exists(batch_header_id):
break
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=candidate_name,
batch_row_001=candidate_twitter_url,
batch_row_002=candidate_facebook_url,
batch_row_003=candidate_website_url,
batch_row_004=state_code,
batch_row_005=contest_office_name,
batch_row_006=candidate_profile_image_url_https,
batch_row_007=party,
batch_row_008=election_day,
batch_row_009=google_civic_election_id,
batch_row_010=candidate_ocd_division_id,
)
number_of_candidates += 1
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_ROW"
handle_exception(e, logger=logger, exception_message=status)
break
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_candidates': number_of_candidates,
'election_day': election_day,
'google_civic_election_id': google_civic_election_id,
}
return results
def import_candidate_positions_from_endorsement_json(self, batch_uri, batch_set_id, organization_we_vote_id,
organization_twitter_handle, candidate_positions_list):
"""
Import candidate positions from organization endorsements json file
:param batch_uri:
:param batch_set_id:
:param organization_we_vote_id:
:param organization_twitter_handle:
:param candidate_positions_list:
:return:
"""
status = ''
success = False
number_of_candidate_positions = 0
first_line = True
election_name = ''
google_civic_election_id = 0
if not candidate_positions_list:
results = {
'success': False,
'status': "IMPORT_CANDIDATE_POSITIONS_FROM_ENDORSEMENT_JSON-INVALID_DATA",
'candidate_positions_saved': False,
'number_of_candidate_positions': 0,
'election_name': election_name,
'google_civic_election_id': google_civic_election_id,
}
return results
# else:
for one_entry in candidate_positions_list:
# read position details for each candidate
candidate_name = one_entry['name']
stance = one_entry['stance']
percent_rating = one_entry['percent_rating']
grade_rating = one_entry['grade_rating']
candidate_twitter_url = one_entry['twitter_url']
candidate_website_url = one_entry['website_url']
candidate_contact_form_url = one_entry['candidate_contact_form_url']
candidate_position_description = one_entry['position_description']
office_name = one_entry['office_name']
state_code = one_entry['state_code']
election_day = one_entry['election_day']
google_civic_election_id = one_entry['google_civic_election_id']
organization_position_url = one_entry['organization_position_url']
if first_line:
# create batch_header and batch_header_map for candidate_positions
first_line = False
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000='name',
batch_header_column_001='stance',
batch_header_column_002='percent_rating',
batch_header_column_003='grade_rating',
batch_header_column_004='organization_twitter_handle',
batch_header_column_005='twitter_url',
batch_header_column_006='more_info_url',
batch_header_column_007='position_description',
batch_header_column_008='office_name',
batch_header_column_009='state_code',
batch_header_column_010='election_day',
batch_header_column_011='google_civic_election_id',
batch_header_column_012='organization_position_url',
batch_header_column_013='candidate_contact_form_url',
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000='candidate_name',
batch_header_map_001='stance',
batch_header_map_002='percent_rating',
batch_header_map_003='grade_rating',
batch_header_map_004='organization_twitter_handle',
batch_header_map_005='candidate_twitter_handle',
batch_header_map_006='more_info_url',
batch_header_map_007='statement_text',
batch_header_map_008='contest_office_name',
batch_header_map_009='state_code',
batch_header_map_010='election_day',
batch_header_map_011='google_civic_election_id',
batch_header_map_012='organization_position_url',
batch_header_map_013='candidate_contact_form_url',
)
batch_header_map_id = batch_header_map.id
status += " BATCH_HEADER_MAP_SAVED"
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
batch_name = "ENDORSEMENTS_JSON_CANDIDATE_POSITIONS " + " batch_header_id: " + str(batch_header_id)
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
google_civic_election_id=google_civic_election_id,
kind_of_batch='POSITION',
organization_we_vote_id=organization_we_vote_id,
source_uri=batch_uri,
batch_set_id=batch_set_id,
)
status += " BATCH_DESCRIPTION_SAVED"
success = True
except Exception as e:
batch_header_id = 0
status += " EXCEPTION_BATCH_HEADER"
handle_exception(e, logger=logger, exception_message=status)
break
if not positive_value_exists(batch_header_id):
break
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=candidate_name,
batch_row_001=stance,
batch_row_002=percent_rating,
batch_row_003=grade_rating,
batch_row_004=organization_twitter_handle,
batch_row_005=candidate_twitter_url,
batch_row_006=candidate_website_url,
batch_row_007=candidate_position_description,
batch_row_008=office_name,
batch_row_009=state_code,
batch_row_010=election_day,
batch_row_011=google_civic_election_id,
batch_row_012=organization_position_url,
batch_row_013=candidate_contact_form_url,
)
number_of_candidate_positions += 1
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_ROW"
handle_exception(e, logger=logger, exception_message=status)
break
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_candidate_positions': number_of_candidate_positions,
'election_name': election_name,
'google_civic_election_id': google_civic_election_id,
}
return results
def import_measure_positions_from_endorsement_json(self, batch_uri, batch_set_id, measure_positions_list):
"""
Import measure positions from organization endorsements json file
:param batch_uri:
:param batch_set_id:
:param measure_positions_list:
:return:
"""
status = ''
success = False
number_of_measure_positions = 0
first_line = True
election_name = ''
google_civic_election_id = 0
if not measure_positions_list:
results = {
'success': False,
'status': "IMPORT_MEASURE_POSITIONS_FROM_ENDORSEMENT_JSON-INVALID_DATA",
'measure_positions_saved': False,
'number_of_measure_positions': 0,
'election_name': election_name,
'google_civic_election_id': google_civic_election_id,
}
return results
# else:
for one_entry in measure_positions_list:
# read position details for each candidate
measure_name = one_entry['name']
stance = one_entry['stance']
measure_ocd_division_id = one_entry['measure_ocd_division_id']
organization_position_url = one_entry['organization_position_url']
measure_id = one_entry['id']
twitter_url = one_entry['twitter_url']
facebook_url = one_entry['facebook_url']
website_url = one_entry['website_url']
image_url = one_entry['image_url']
image_url_https = one_entry['image_url_https']
measure_position_description = one_entry['position_description']
state_code = one_entry['state_code']
election_day = one_entry['election_day']
google_civic_election_id = one_entry['google_civic_election_id']
if first_line:
# create batch_header and batch_header_map for candidate_positions
first_line = False
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000='id',
batch_header_column_001='name',
batch_header_column_002='stance',
batch_header_column_003='measure_ocd_division_id',
batch_header_column_004='organization_position_url',
batch_header_column_005='twitter_url',
batch_header_column_006='facebook_url',
batch_header_column_007='website_url',
batch_header_column_008='image_url',
batch_header_column_009='image_url_https',
batch_header_column_010='position_description',
batch_header_column_011='state_code',
batch_header_column_012='election_day',
batch_header_column_013='google_civic_election_id',
batch_header_column_014='candidate_contact_form_url',
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000='measure_id',
batch_header_map_001='measure_title',
batch_header_map_002='stance',
batch_header_map_003='measure_ocd_division_id',
batch_header_map_004='organization_position_url',
batch_header_map_005='measure_twitter_handle',
batch_header_map_006='facebook_url',
batch_header_map_007='more_info_url',
batch_header_map_008='image_url',
batch_header_map_009='image_url_https',
batch_header_map_010='statement_text',
batch_header_map_011='state_code',
batch_header_map_012='election_day',
batch_header_map_013='google_civic_election_id',
batch_header_map_014='candidate_contact_form_url',
)
batch_header_map_id = batch_header_map.id
status += " BATCH_HEADER_MAP_SAVED"
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
batch_name = "ENDORSEMENTS_JSON_MEASURES " + " batch_header_id: " + str(batch_header_id)
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
# google_civic_election_id=google_civic_election_id,
kind_of_batch='POSITION',
# organization_we_vote_id=organization_we_vote_id,
source_uri=batch_uri,
batch_set_id=batch_set_id,
)
status += " BATCH_DESCRIPTION_SAVED"
success = True
except Exception as e:
batch_header_id = 0
status += " EXCEPTION_BATCH_HEADER"
handle_exception(e, logger=logger, exception_message=status)
break
if not positive_value_exists(batch_header_id):
break
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=measure_id,
batch_row_001=measure_name,
batch_row_002=stance,
batch_row_003=measure_ocd_division_id,
batch_row_004=organization_position_url,
batch_row_005=twitter_url,
batch_row_006=facebook_url,
batch_row_007=website_url,
batch_row_008=image_url,
batch_row_009=image_url_https,
batch_row_010=measure_position_description,
batch_row_011=state_code,
batch_row_012=election_day,
batch_row_013=google_civic_election_id,
batch_row_014=candidate_contact_form_url,
)
number_of_measure_positions += 1
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_ROW"
handle_exception(e, logger=logger, exception_message=status)
break
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_measure_positions': number_of_measure_positions,
'election_name': election_name,
'google_civic_election_id': google_civic_election_id,
}
return results
class BatchSet(models.Model):
"""
We call each imported CSV or JSON a “batch set”, and store basic information about it in this table.
"""
google_civic_election_id = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=True, blank=True)
batch_set_name = models.CharField(max_length=255)
batch_set_description_text = models.CharField(max_length=255)
batch_set_source = models.CharField(max_length=255)
source_uri = models.URLField(blank=True, null=True, verbose_name='uri where data is coming from')
import_date = models.DateTimeField(verbose_name="date when batch set was imported", null=True, auto_now=True)
class BatchDescription(models.Model):
"""
We call each imported CSV or JSON a “batch”, and store basic information about it in this table.
"""
batch_header_id = models.PositiveIntegerField(
verbose_name="unique id of header row", unique=True, null=False)
batch_set_id = models.PositiveIntegerField(
verbose_name="unique id of batch set row", unique=False, null=True)
batch_header_map_id = models.PositiveIntegerField(
verbose_name="unique id of header map", unique=True, null=False)
google_civic_election_id = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=True, blank=True)
batch_name = models.CharField(max_length=255)
kind_of_batch = models.CharField(max_length=32, choices=KIND_OF_BATCH_CHOICES, default=MEASURE)
organization_we_vote_id = models.CharField(
verbose_name="if for positions, the organization's we vote id", max_length=255, null=True, blank=True)
polling_location_we_vote_id = models.CharField(
verbose_name="if for ballot items, the polling location we vote id", max_length=255, null=True, blank=True)
batch_description_text = models.CharField(max_length=255)
# Have the batch rows under this description been analyzed?
batch_description_analyzed = models.BooleanField(default=False)
source_uri = models.URLField(blank=True, null=True, verbose_name='uri where data is coming from')
date_created = models.DateTimeField(verbose_name='date first saved', null=True, auto_now=True)
class BatchHeader(models.Model):
"""
When we get data, it will come with column headers. This table stores the headers from the import file.
"""
batch_header_column_000 = models.TextField(null=True, blank=True)
batch_header_column_001 = models.TextField(null=True, blank=True)
batch_header_column_002 = models.TextField(null=True, blank=True)
batch_header_column_003 = models.TextField(null=True, blank=True)
batch_header_column_004 = models.TextField(null=True, blank=True)
batch_header_column_005 = models.TextField(null=True, blank=True)
batch_header_column_006 = models.TextField(null=True, blank=True)
batch_header_column_007 = models.TextField(null=True, blank=True)
batch_header_column_008 = models.TextField(null=True, blank=True)
batch_header_column_009 = models.TextField(null=True, blank=True)
batch_header_column_010 = models.TextField(null=True, blank=True)
batch_header_column_011 = models.TextField(null=True, blank=True)
batch_header_column_012 = models.TextField(null=True, blank=True)
batch_header_column_013 = models.TextField(null=True, blank=True)
batch_header_column_014 = models.TextField(null=True, blank=True)
batch_header_column_015 = models.TextField(null=True, blank=True)
batch_header_column_016 = models.TextField(null=True, blank=True)
batch_header_column_017 = models.TextField(null=True, blank=True)
batch_header_column_018 = models.TextField(null=True, blank=True)
batch_header_column_019 = models.TextField(null=True, blank=True)
batch_header_column_020 = models.TextField(null=True, blank=True)
batch_header_column_021 = models.TextField(null=True, blank=True)
batch_header_column_022 = models.TextField(null=True, blank=True)
batch_header_column_023 = models.TextField(null=True, blank=True)
batch_header_column_024 = models.TextField(null=True, blank=True)
batch_header_column_025 = models.TextField(null=True, blank=True)
batch_header_column_026 = models.TextField(null=True, blank=True)
batch_header_column_027 = models.TextField(null=True, blank=True)
batch_header_column_028 = models.TextField(null=True, blank=True)
batch_header_column_029 = models.TextField(null=True, blank=True)
batch_header_column_030 = models.TextField(null=True, blank=True)
batch_header_column_031 = models.TextField(null=True, blank=True)
batch_header_column_032 = models.TextField(null=True, blank=True)
batch_header_column_033 = models.TextField(null=True, blank=True)
batch_header_column_034 = models.TextField(null=True, blank=True)
batch_header_column_035 = models.TextField(null=True, blank=True)
batch_header_column_036 = models.TextField(null=True, blank=True)
batch_header_column_037 = models.TextField(null=True, blank=True)
batch_header_column_038 = models.TextField(null=True, blank=True)
batch_header_column_039 = models.TextField(null=True, blank=True)
batch_header_column_040 = models.TextField(null=True, blank=True)
batch_header_column_041 = models.TextField(null=True, blank=True)
batch_header_column_042 = models.TextField(null=True, blank=True)
batch_header_column_043 = models.TextField(null=True, blank=True)
batch_header_column_044 = models.TextField(null=True, blank=True)
batch_header_column_045 = models.TextField(null=True, blank=True)
batch_header_column_046 = models.TextField(null=True, blank=True)
batch_header_column_047 = models.TextField(null=True, blank=True)
batch_header_column_048 = models.TextField(null=True, blank=True)
batch_header_column_049 = models.TextField(null=True, blank=True)
batch_header_column_050 = models.TextField(null=True, blank=True)
class BatchHeaderMap(models.Model):
"""
When we get data, it will come with column headers. This table stores the replacement header that matches
the We Vote internal field names.
"""
batch_header_id = models.PositiveIntegerField(verbose_name="unique id of header row", unique=True, null=False)
batch_header_map_000 = models.TextField(null=True, blank=True)
batch_header_map_001 = models.TextField(null=True, blank=True)
batch_header_map_002 = models.TextField(null=True, blank=True)
batch_header_map_003 = models.TextField(null=True, blank=True)
batch_header_map_004 = models.TextField(null=True, blank=True)
batch_header_map_005 = models.TextField(null=True, blank=True)
batch_header_map_006 = models.TextField(null=True, blank=True)
batch_header_map_007 = models.TextField(null=True, blank=True)
batch_header_map_008 = models.TextField(null=True, blank=True)
batch_header_map_009 = models.TextField(null=True, blank=True)
batch_header_map_010 = models.TextField(null=True, blank=True)
batch_header_map_011 = models.TextField(null=True, blank=True)
batch_header_map_012 = models.TextField(null=True, blank=True)
batch_header_map_013 = models.TextField(null=True, blank=True)
batch_header_map_014 = models.TextField(null=True, blank=True)
batch_header_map_015 = models.TextField(null=True, blank=True)
batch_header_map_016 = models.TextField(null=True, blank=True)
batch_header_map_017 = models.TextField(null=True, blank=True)
batch_header_map_018 = models.TextField(null=True, blank=True)
batch_header_map_019 = models.TextField(null=True, blank=True)
batch_header_map_020 = models.TextField(null=True, blank=True)
batch_header_map_021 = models.TextField(null=True, blank=True)
batch_header_map_022 = models.TextField(null=True, blank=True)
batch_header_map_023 = models.TextField(null=True, blank=True)
batch_header_map_024 = models.TextField(null=True, blank=True)
batch_header_map_025 = models.TextField(null=True, blank=True)
batch_header_map_026 = models.TextField(null=True, blank=True)
batch_header_map_027 = models.TextField(null=True, blank=True)
batch_header_map_028 = models.TextField(null=True, blank=True)
batch_header_map_029 = models.TextField(null=True, blank=True)
batch_header_map_030 = models.TextField(null=True, blank=True)
batch_header_map_031 = models.TextField(null=True, blank=True)
batch_header_map_032 = models.TextField(null=True, blank=True)
batch_header_map_033 = models.TextField(null=True, blank=True)
batch_header_map_034 = models.TextField(null=True, blank=True)
batch_header_map_035 = models.TextField(null=True, blank=True)
batch_header_map_036 = models.TextField(null=True, blank=True)
batch_header_map_037 = models.TextField(null=True, blank=True)
batch_header_map_038 = models.TextField(null=True, blank=True)
batch_header_map_039 = models.TextField(null=True, blank=True)
batch_header_map_040 = models.TextField(null=True, blank=True)
batch_header_map_041 = models.TextField(null=True, blank=True)
batch_header_map_042 = models.TextField(null=True, blank=True)
batch_header_map_043 = models.TextField(null=True, blank=True)
batch_header_map_044 = models.TextField(null=True, blank=True)
batch_header_map_045 = models.TextField(null=True, blank=True)
batch_header_map_046 = models.TextField(null=True, blank=True)
batch_header_map_047 = models.TextField(null=True, blank=True)
batch_header_map_048 = models.TextField(null=True, blank=True)
batch_header_map_049 = models.TextField(null=True, blank=True)
batch_header_map_050 = models.TextField(null=True, blank=True)
class BatchRow(models.Model):
"""
Individual data rows
"""
batch_header_id = models.PositiveIntegerField(verbose_name="unique id of header row", unique=False, null=False)
# This is used when we have one batch_set that brings in election data for a variety of elections
google_civic_election_id = models.PositiveIntegerField(verbose_name="election id", default=0, null=True, blank=True)
# This is useful for filtering while we are processing batch_rows
state_code = models.CharField(verbose_name="state code for this data", max_length=2, null=True, blank=True)
batch_row_analyzed = models.BooleanField(default=False)
batch_row_created = models.BooleanField(default=False)
batch_row_000 = models.TextField(null=True, blank=True)
batch_row_001 = models.TextField(null=True, blank=True)
batch_row_002 = models.TextField(null=True, blank=True)
batch_row_003 = models.TextField(null=True, blank=True)
batch_row_004 = models.TextField(null=True, blank=True)
batch_row_005 = models.TextField(null=True, blank=True)
batch_row_006 = models.TextField(null=True, blank=True)
batch_row_007 = models.TextField(null=True, blank=True)
batch_row_008 = models.TextField(null=True, blank=True)
batch_row_009 = models.TextField(null=True, blank=True)
batch_row_010 = models.TextField(null=True, blank=True)
batch_row_011 = models.TextField(null=True, blank=True)
batch_row_012 = models.TextField(null=True, blank=True)
batch_row_013 = models.TextField(null=True, blank=True)
batch_row_014 = models.TextField(null=True, blank=True)
batch_row_015 = models.TextField(null=True, blank=True)
batch_row_016 = models.TextField(null=True, blank=True)
batch_row_017 = models.TextField(null=True, blank=True)
batch_row_018 = models.TextField(null=True, blank=True)
batch_row_019 = models.TextField(null=True, blank=True)
batch_row_020 = models.TextField(null=True, blank=True)
batch_row_021 = models.TextField(null=True, blank=True)
batch_row_022 = models.TextField(null=True, blank=True)
batch_row_023 = models.TextField(null=True, blank=True)
batch_row_024 = models.TextField(null=True, blank=True)
batch_row_025 = models.TextField(null=True, blank=True)
batch_row_026 = models.TextField(null=True, blank=True)
batch_row_027 = models.TextField(null=True, blank=True)
batch_row_028 = models.TextField(null=True, blank=True)
batch_row_029 = models.TextField(null=True, blank=True)
batch_row_030 = models.TextField(null=True, blank=True)
batch_row_031 = models.TextField(null=True, blank=True)
batch_row_032 = models.TextField(null=True, blank=True)
batch_row_033 = models.TextField(null=True, blank=True)
batch_row_034 = models.TextField(null=True, blank=True)
batch_row_035 = models.TextField(null=True, blank=True)
batch_row_036 = models.TextField(null=True, blank=True)
batch_row_037 = models.TextField(null=True, blank=True)
batch_row_038 = models.TextField(null=True, blank=True)
batch_row_039 = models.TextField(null=True, blank=True)
batch_row_040 = models.TextField(null=True, blank=True)
batch_row_041 = models.TextField(null=True, blank=True)
batch_row_042 = models.TextField(null=True, blank=True)
batch_row_043 = models.TextField(null=True, blank=True)
batch_row_044 = models.TextField(null=True, blank=True)
batch_row_045 = models.TextField(null=True, blank=True)
batch_row_046 = models.TextField(null=True, blank=True)
batch_row_047 = models.TextField(null=True, blank=True)
batch_row_048 = models.TextField(null=True, blank=True)
batch_row_049 = models.TextField(null=True, blank=True)
batch_row_050 = models.TextField(null=True, blank=True)
class BatchHeaderTranslationSuggestion(models.Model):
"""
When we bring in batches of data, we want to try to map non-standard headers to the We Vote recognized headers.
This table stores those mappings.
"""
kind_of_batch = models.CharField(max_length=32, choices=KIND_OF_BATCH_CHOICES, default=MEASURE)
header_value_recognized_by_we_vote = models.TextField(null=True, blank=True)
incoming_alternate_header_value = models.TextField(null=True, blank=True)
class BatchRowTranslationMap(models.Model):
"""
When we bring in batches of data, we want to map different names (for measures, offices, candidates,
or organizations) to the We Vote recognized names. This table stores those mappings. So for example
if one batch uses "Prop A" we want to map it to "Proposition A".
"""
# Are we translating for a Measure, Office, Candidate, or Organization
kind_of_batch = models.CharField(max_length=32, choices=KIND_OF_BATCH_CHOICES, default=MEASURE)
# What is the name of the row? (ex/ contest_office_name)
batch_row_name = models.CharField(verbose_name="name of the the row", max_length=255, null=True, blank=True)
google_civic_election_id = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=True, blank=True)
row_value_recognized_by_we_vote = models.TextField(null=True, blank=True)
incoming_alternate_row_value = models.TextField(null=True, blank=True)
class BatchRowActionMeasure(models.Model):
"""
The definition of the action for importing one Measure.
"""
batch_set_id = models.PositiveIntegerField(verbose_name="unique id of batch set", unique=False, null=True)
batch_header_id = models.PositiveIntegerField(
verbose_name="unique id of header row", unique=False, null=False, db_index=True)
batch_row_id = models.PositiveIntegerField(
verbose_name="unique id of batch row", unique=True, null=False, db_index=True)
kind_of_action = models.CharField(
max_length=40, choices=KIND_OF_ACTION_CHOICES, default=IMPORT_TO_BE_DETERMINED, db_index=True)
# Fields from ContestMeasure
measure_we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, default=None, null=True, blank=True)
maplight_id = models.CharField(verbose_name="maplight unique identifier",
max_length=255, null=True, blank=True, unique=False)
vote_smart_id = models.CharField(verbose_name="votesmart unique identifier",
max_length=200, null=True, blank=True, unique=False)
# The title of the measure (e.g. 'Proposition 42').
measure_title = models.CharField(verbose_name="measure title", max_length=255, null=False, blank=False)
# The measure's title as passed over by Google Civic. We save this so we can match to this measure even
# if we edit the measure's name locally.
google_civic_measure_title = models.CharField(verbose_name="measure name exactly as received from google civic",
max_length=255, null=True, blank=True)
# A brief description of the referendum. This field is only populated for contests of type 'Referendum'.
measure_subtitle = models.TextField(verbose_name="google civic referendum subtitle",
null=True, blank=True, default="")
# The text of the measure. This field is only populated for contests of type 'Referendum'.
measure_text = models.TextField(verbose_name="measure text", null=True, blank=False)
# A link to the referendum. This field is only populated for contests of type 'Referendum'.
measure_url = models.CharField(verbose_name="measure details url", max_length=255, null=True, blank=False)
# The unique ID of the election containing this contest. (Provided by Google Civic)
google_civic_election_id = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=False, blank=False, db_index=True)
ocd_division_id = models.CharField(verbose_name="ocd division id", max_length=255, null=True, blank=True)
# ballot_placement: We store ballot_placement in the BallotItem table instead because it is different for each voter
# If this is a partisan election, the name of the party it is for.
primary_party = models.CharField(verbose_name="primary party", max_length=255, null=True, blank=True)
# The name of the district.
district_name = models.CharField(verbose_name="district name", max_length=255, null=False, blank=False)
# The geographic scope of this district. If unspecified the district's geography is not known.
# One of: national, statewide, congressional, stateUpper, stateLower, countywide, judicial, schoolBoard,
# cityWide, township, countyCouncil, cityCouncil, ward, special
district_scope = models.CharField(verbose_name="district scope", max_length=255, null=False, blank=False)
# An identifier for this district, relative to its scope. For example, the 34th State Senate district
# would have id "34" and a scope of stateUpper.
district_id = models.CharField(verbose_name="google civic district id", max_length=255, null=True, blank=True)
# State code
state_code = models.CharField(verbose_name="state this measure affects", max_length=2, null=True, blank=True)
# Day of the election in YYYY-MM-DD format.
election_day_text = models.CharField(verbose_name="election day", max_length=255, null=True, blank=True)
wikipedia_page_id = models.BigIntegerField(verbose_name="pageid", null=True, blank=True)
wikipedia_page_title = models.CharField(
verbose_name="Page title on Wikipedia", max_length=255, null=True, blank=True)
wikipedia_photo_url = models.URLField(verbose_name='url of wikipedia logo', blank=True, null=True)
ballotpedia_district_id = models.PositiveIntegerField(
verbose_name="ballotpedia district id", default=0, null=False, blank=False)
ballotpedia_election_id = models.PositiveIntegerField(
verbose_name="ballotpedia election id", default=0, null=False, blank=False)
ballotpedia_measure_id = models.PositiveIntegerField(
verbose_name="ballotpedia measure id", default=0, null=False, blank=False)
ballotpedia_measure_name = models.CharField(
verbose_name="ballotpedia measure name", max_length=255, null=True, blank=True)
ballotpedia_measure_status = models.CharField(
verbose_name="ballotpedia measure status", max_length=255, null=True, blank=True)
ballotpedia_measure_summary = models.TextField(
verbose_name="ballotpedia measure summary", null=True, blank=True, default="")
ballotpedia_measure_text = models.TextField(
verbose_name="ballotpedia measure text", null=True, blank=True, default="")
ballotpedia_measure_url = models.URLField(verbose_name='ballotpedia url of measure', blank=True, null=True)
ballotpedia_page_title = models.CharField(
verbose_name="Page title on Ballotpedia", max_length=255, null=True, blank=True)
ballotpedia_photo_url = models.URLField(verbose_name='url of ballotpedia logo', blank=True, null=True)
ballotpedia_yes_vote_description = models.TextField(
verbose_name="what a yes vote means", null=True, blank=True, default=None)
ballotpedia_no_vote_description = models.TextField(
verbose_name="what a no vote means", null=True, blank=True, default=None)
ctcl_uuid = models.CharField(verbose_name="ctcl uuid", max_length=80, null=True, blank=True)
status = models.TextField(verbose_name="batch row action measure status", null=True, blank=True, default="")
class BatchRowActionContestOffice(models.Model):
"""
The definition of the action for importing one Office.
"""
batch_set_id = models.PositiveIntegerField(verbose_name="unique id of batch set", unique=False, null=True)
batch_header_id = models.PositiveIntegerField(
verbose_name="unique id of header row", unique=False, null=False, db_index=True)
batch_row_id = models.PositiveIntegerField(
verbose_name="unique id of batch row", unique=False, null=False, db_index=True)
kind_of_action = models.CharField(
max_length=40, choices=KIND_OF_ACTION_CHOICES, default=IMPORT_TO_BE_DETERMINED, db_index=True)
# Fields from ContestOffice
contest_office_we_vote_id = models.CharField(
verbose_name="we vote permanent id for this contest office", max_length=255, default=None, null=True,
blank=True)
# The name of the office for this contest.
contest_office_name = models.CharField(verbose_name="name of the contest office", max_length=255, null=False,
blank=False)
# TODO: Was the original contest_office_name replaced with a mapped value from BatchRowTranslationMap?
# contest_office_name_mapped = models.BooleanField(verbose_name='office name was replaced', default=False)
# The offices' name as passed over by Google Civic. We save this so we can match to this office even
# if we edit the office's name locally.
google_civic_office_name = models.CharField(verbose_name="office name exactly as received from google civic",
max_length=255, null=True, blank=True)
# The unique ID of the election containing this contest. (Provided by Google Civic)
google_civic_election_id = models.CharField(verbose_name="google civic election id",
max_length=255, null=False, blank=False, db_index=True)
google_civic_election_id_new = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=False, blank=False)
ocd_division_id = models.CharField(verbose_name="ocd division id", max_length=255, null=True, blank=True)
maplight_id = models.CharField(
verbose_name="maplight unique identifier", max_length=255, null=True, blank=True)
# 2018-02-16 It is unclear if we want to keep this field
ballotpedia_id = models.CharField(
verbose_name="ballotpedia unique identifier", max_length=255, null=True, blank=True)
ballotpedia_district_id = models.PositiveIntegerField(
verbose_name="ballotpedia district id", null=True, blank=True)
ballotpedia_election_id = models.PositiveIntegerField(verbose_name="ballotpedia election id", null=True, blank=True)
is_ballotpedia_general_election = models.BooleanField(default=False)
is_ballotpedia_general_runoff_election = models.BooleanField(default=False)
is_ballotpedia_primary_election = models.BooleanField(default=False)
is_ballotpedia_primary_runoff_election = models.BooleanField(default=False)
# Equivalent of elected_office in We Vote
ballotpedia_office_id = models.PositiveIntegerField(
verbose_name="ballotpedia integer id", null=True, blank=True)
# The office's name as passed over by Ballotpedia. This helps us do exact matches when id is missing
ballotpedia_office_name = models.CharField(verbose_name="office name exactly as received from ballotpedia",
max_length=255, null=True, blank=True)
ballotpedia_office_url = models.URLField(verbose_name='url of office on ballotpedia', blank=True, null=True)
# Equivalent of contest_office in We Vote
ballotpedia_race_id = models.PositiveIntegerField(verbose_name="ballotpedia race integer id", null=True, blank=True)
# Federal, State, Local,
ballotpedia_race_office_level = models.CharField(verbose_name="race office level", max_length=255, null=True,
blank=True)
wikipedia_id = models.CharField(verbose_name="wikipedia unique identifier", max_length=255, null=True, blank=True)
# vote_type (ranked choice, majority)
# The number of candidates that a voter may vote for in this contest.
number_voting_for = models.CharField(verbose_name="google civic number of candidates to vote for",
max_length=255, null=True, blank=True)
# The number of candidates that will be elected to office in this contest.
number_elected = models.CharField(verbose_name="google civic number of candidates who will be elected",
max_length=255, null=True, blank=True)
# State code
state_code = models.CharField(verbose_name="state this office serves", max_length=2, null=True, blank=True)
# If this is a partisan election, the name of the party it is for.
primary_party = models.CharField(verbose_name="google civic primary party", max_length=255, null=True, blank=True)
# The name of the district.
district_name = models.CharField(verbose_name="district name", max_length=255, null=True, blank=True)
# The geographic scope of this district. If unspecified the district's geography is not known.
# One of: national, statewide, congressional, stateUpper, stateLower, countywide, judicial, schoolBoard,
# cityWide, township, countyCouncil, cityCouncil, ward, special
district_scope = models.CharField(verbose_name="google civic district scope",
max_length=255, null=True, blank=True)
# An identifier for this district, relative to its scope. For example, the 34th State Senate district
# would have id "34" and a scope of stateUpper.
district_id = models.CharField(verbose_name="google civic district id", max_length=255, null=True, blank=True)
# The levels of government of the office for this contest. There may be more than one in cases where a
# jurisdiction effectively acts at two different levels of government; for example, the mayor of the
# District of Columbia acts at "locality" level, but also effectively at both
# "administrative-area-2" and "administrative-area-1".
contest_level0 = models.CharField(verbose_name="google civic level, option 0",
max_length=255, null=True, blank=True)
contest_level1 = models.CharField(verbose_name="google civic level, option 1",
max_length=255, null=True, blank=True)
contest_level2 = models.CharField(verbose_name="google civic level, option 2",
max_length=255, null=True, blank=True)
# ballot_placement: We store ballot_placement in the BallotItem table instead because it is different for each voter
# A description of any additional eligibility requirements for voting in this contest.
electorate_specifications = models.CharField(verbose_name="google civic primary party",
max_length=255, null=True, blank=True)
# "Yes" or "No" depending on whether this a contest being held outside the normal election cycle.
special = models.CharField(verbose_name="google civic primary party", max_length=255, null=True, blank=True)
ctcl_uuid = models.CharField(verbose_name="ctcl uuid", max_length=80, null=True, blank=True)
elected_office_name = models.CharField(verbose_name="name of the elected office", max_length=255, null=True,
blank=True, default=None)
candidate_selection_id1 = models.CharField(verbose_name="temporary id of candidate selection 1", max_length=255,
null=True, blank=True, default=None)
candidate_selection_id2 = models.CharField(verbose_name="temporary id of candidate selection 2", max_length=255,
null=True, blank=True, default=None)
candidate_selection_id3= models.CharField(verbose_name="temporary id of candidate selection 3", max_length=255,
null=True, blank=True, default=None)
candidate_selection_id4 = models.CharField(verbose_name="temporary id of candidate selection 4", max_length=255,
null=True, blank=True, default=None)
candidate_selection_id5 = models.CharField(verbose_name="temporary id of candidate selection 5", max_length=255,
null=True, blank=True, default=None)
candidate_selection_id6 = models.CharField(verbose_name="temporary id of candidate selection 6", max_length=255,
null=True, blank=True, default=None)
candidate_selection_id7 = models.CharField(verbose_name="temporary id of candidate selection 7", max_length=255,
null=True, blank=True, default=None)
candidate_selection_id8 = models.CharField(verbose_name="temporary id of candidate selection 8", max_length=255,
null=True, blank=True, default=None)
candidate_selection_id9 = models.CharField(verbose_name="temporary id of candidate selection 9", max_length=255,
null=True, blank=True, default=None)
candidate_selection_id10 = models.CharField(verbose_name="temporary id of candidate selection 10", max_length=255,
null=True, blank=True, default=None)
status = models.TextField(verbose_name="batch row action contest office status", null=True, blank=True, default="")
class BatchRowActionElectedOffice(models.Model):
"""
The definition of the action for importing one Office.
"""
batch_set_id = models.PositiveIntegerField(verbose_name="unique id of batch set", unique=False, null=True)
batch_header_id = models.PositiveIntegerField(
verbose_name="unique id of header row", unique=False, null=False, db_index=True)
batch_row_id = models.PositiveIntegerField(
verbose_name="unique id of batch row", unique=False, null=False, db_index=True)
kind_of_action = models.CharField(
max_length=40, choices=KIND_OF_ACTION_CHOICES, default=IMPORT_TO_BE_DETERMINED, db_index=True)
# Fields from ElectedOffice
elected_office_we_vote_id = models.CharField(
verbose_name="we vote permanent id for this elected office", max_length=255, default=None, null=True,
blank=True)
# The name of the office for this contest.
elected_office_name = models.CharField(verbose_name="name of the elected office", max_length=255,
null=False, blank=False)
elected_office_name_es = models.CharField(verbose_name="name of the elected office in Spanish", max_length=255,
null=True, blank=True, default=None)
# The offices' name as passed over by Google Civic. We save this so we can match to this office even
# if we edit the office's name locally.
google_civic_office_name = models.CharField(verbose_name="office name exactly as received from google civic",
max_length=255, null=True, blank=True)
# The unique ID of the election containing this contest. (Provided by Google Civic)
google_civic_election_id = models.CharField(verbose_name="google civic election id",
max_length=255, null=False, blank=False, db_index=True)
google_civic_election_id_new = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=False, blank=False)
ocd_division_id = models.CharField(verbose_name="ocd division id", max_length=255, null=True, blank=True)
maplight_id = models.CharField(
verbose_name="maplight unique identifier", max_length=255, null=True, blank=True)
ballotpedia_id = models.CharField(
verbose_name="ballotpedia unique identifier", max_length=255, null=True, blank=True)
wikipedia_id = models.CharField(verbose_name="wikipedia unique identifier", max_length=255, null=True, blank=True)
# vote_type (ranked choice, majority)
# The number of candidates that a voter may vote for in this contest.
# TODO for now comment out number_voting_for for elected_office table
# number_voting_for = models.CharField(verbose_name="google civic number of candidates to vote for",
# max_length=255, null=True, blank=True)
# The number of candidates that will be elected to office in this contest.
number_elected = models.CharField(verbose_name="google civic number of candidates who will be elected",
max_length=255, null=True, blank=True)
# State code
state_code = models.CharField(verbose_name="state this office serves", max_length=2, null=True, blank=True)
# If this is a partisan election, the name of the party it is for.
primary_party = models.CharField(verbose_name="google civic primary party", max_length=255, null=True, blank=True)
# The name of the district.
district_name = models.CharField(verbose_name="district name", max_length=255, null=True, blank=True)
# The geographic scope of this district. If unspecified the district's geography is not known.
# One of: national, statewide, congressional, stateUpper, stateLower, countywide, judicial, schoolBoard,
# cityWide, township, countyCouncil, cityCouncil, ward, special
district_scope = models.CharField(verbose_name="google civic district scope",
max_length=255, null=True, blank=True)
# An identifier for this district, relative to its scope. For example, the 34th State Senate district
# would have id "34" and a scope of stateUpper.
district_id = models.CharField(verbose_name="google civic district id", max_length=255, null=True, blank=True)
# The levels of government of the office for this contest. There may be more than one in cases where a
# jurisdiction effectively acts at two different levels of government; for example, the mayor of the
# District of Columbia acts at "locality" level, but also effectively at both
# "administrative-area-2" and "administrative-area-1".
contest_level0 = models.CharField(verbose_name="google civic level, option 0",
max_length=255, null=True, blank=True)
contest_level1 = models.CharField(verbose_name="google civic level, option 1",
max_length=255, null=True, blank=True)
contest_level2 = models.CharField(verbose_name="google civic level, option 2",
max_length=255, null=True, blank=True)
# ballot_placement: We store ballot_placement in the BallotItem table instead because it is different for each voter
# A description of any additional eligibility requirements for voting in this contest.
electorate_specifications = models.CharField(verbose_name="google civic primary party",
max_length=255, null=True, blank=True)
# "Yes" or "No" depending on whether this a contest being held outside the normal election cycle.
special = models.CharField(verbose_name="google civic primary party", max_length=255, null=True, blank=True)
ctcl_uuid = models.CharField(verbose_name="ctcl uuid", max_length=80, null=True, blank=True)
elected_office_description = models.CharField(verbose_name="office description", max_length=255,
null=True, blank=True)
elected_office_description_es = models.CharField(verbose_name="office description spanish", max_length=255,
null=True, blank=True)
elected_office_is_partisan = models.BooleanField(verbose_name='office is_partisan', default=False)
elected_office_ctcl_id = models.CharField(verbose_name="we vote permanent id for this elected office",
max_length=255, default=None, null=True, blank=True)
status = models.TextField(verbose_name="batch row action elected office status", null=True, blank=True, default="")
class BatchRowActionPolitician(models.Model):
"""
The definition of the action for importing one Politician.
"""
batch_set_id = models.PositiveIntegerField(verbose_name="unique id of batch set", unique=False, null=True)
batch_header_id = models.PositiveIntegerField(
verbose_name="unique id of header row", unique=False, null=False, db_index=True)
batch_row_id = models.PositiveIntegerField(
verbose_name="unique id of batch row", unique=False, null=False, db_index=True)
kind_of_action = models.CharField(
max_length=40, choices=KIND_OF_ACTION_CHOICES, default=IMPORT_TO_BE_DETERMINED, db_index=True)
# Fields from Politician
politician_we_vote_id = models.CharField(verbose_name="we vote permanent id of this politician", max_length=255,
default=None, null=True, blank=True, unique=False)
# See this url for properties: https://docs.python.org/2/library/functions.html#property
first_name = models.CharField(verbose_name="first name", max_length=255, default=None, null=True, blank=True)
middle_name = models.CharField(verbose_name="middle name", max_length=255, default=None, null=True, blank=True)
last_name = models.CharField(verbose_name="last name", max_length=255, default=None, null=True, blank=True)
politician_name = models.CharField(verbose_name="official full name", max_length=255, default=None, null=True,
blank=True)
# This is the politician's name from GoogleCivicCandidateCampaign
google_civic_candidate_name = models.CharField(verbose_name="full name from google civic", max_length=255,
default=None, null=True, blank=True)
# This is the politician's name assembled from TheUnitedStatesIo first_name + last_name for quick search
full_name_assembled = models.CharField(verbose_name="full name assembled from first_name + last_name",
max_length=255, default=None, null=True, blank=True)
gender = models.CharField("gender", max_length=1, choices=GENDER_CHOICES, default=UNKNOWN)
birth_date = models.DateField("birth date", default=None, null=True, blank=True)
# race = enum?
# official_image_id = ??
bioguide_id = models.CharField(verbose_name="bioguide unique identifier", max_length=200, null=True, unique=False)
thomas_id = models.CharField(verbose_name="thomas unique identifier", max_length=200, null=True, unique=False)
lis_id = models.CharField(verbose_name="lis unique identifier", max_length=200, null=True, blank=True, unique=False)
govtrack_id = models.CharField(verbose_name="govtrack unique identifier", max_length=200, null=True, unique=False)
opensecrets_id = models.CharField(verbose_name="opensecrets unique identifier", max_length=200, null=True,
unique=False)
vote_smart_id = models.CharField(verbose_name="votesmart unique identifier", max_length=200, null=True,
unique=False)
fec_id = models.CharField(verbose_name="fec unique identifier", max_length=200, null=True, unique=False, blank=True)
cspan_id = models.CharField(verbose_name="cspan unique identifier", max_length=200, null=True, blank=True,
unique=False)
wikipedia_id = models.CharField(verbose_name="wikipedia url", max_length=500, default=None, null=True, blank=True)
ballotpedia_id = models.CharField(verbose_name="ballotpedia unique id", max_length=500, default=None, null=True,
blank=True)
house_history_id = models.CharField(verbose_name="house history unique identifier", max_length=200, null=True,
blank=True)
maplight_id = models.CharField(verbose_name="maplight unique identifier", max_length=200, null=True, unique=False,
blank=True)
washington_post_id = models.CharField(verbose_name="washington post unique identifier", max_length=200, null=True,
unique=False)
icpsr_id = models.CharField(verbose_name="icpsr unique identifier", max_length=200, null=True, unique=False)
# The full name of the party the official belongs to.
political_party = models.CharField(verbose_name="politician political party", max_length=255, null=True)
state_code = models.CharField(verbose_name="politician home state", max_length=2, null=True)
politician_url = models.URLField(verbose_name='latest website url of politician', blank=True, null=True)
politician_twitter_handle = models.CharField(verbose_name='politician twitter screen_name', max_length=255,
null=True, unique=False)
we_vote_hosted_profile_image_url_large = models.URLField(verbose_name='we vote hosted large image url',
blank=True, null=True)
we_vote_hosted_profile_image_url_medium = models.URLField(verbose_name='we vote hosted medium image url',
blank=True, null=True)
we_vote_hosted_profile_image_url_tiny = models.URLField(verbose_name='we vote hosted tiny image url', blank=True,
null=True)
ctcl_uuid = models.CharField(verbose_name="ctcl uuid", max_length=80, null=True, blank=True)
politician_facebook_id = models.CharField(verbose_name='politician facebook user name', max_length=255, null=True,
unique=False)
politician_phone_number = models.CharField(verbose_name='politician phone number', max_length=255, null=True,
unique=False)
politician_googleplus_id = models.CharField(verbose_name='politician googleplus profile name', max_length=255,
null=True, unique=False)
politician_youtube_id = models.CharField(verbose_name='politician youtube profile name', max_length=255, null=True,
unique=False)
politician_email_address = models.CharField(verbose_name='politician email address', max_length=80, null=True,
unique=False)
status = models.TextField(verbose_name="batch row action politician status", null=True, blank=True, default="")
class BatchRowActionCandidate(models.Model):
"""
The definition of the action for importing one Candidate.
"""
batch_set_id = models.PositiveIntegerField(verbose_name="unique id of batch set", unique=False, null=True)
batch_header_id = models.PositiveIntegerField(
verbose_name="unique id of header row", unique=False, null=False, db_index=True)
batch_row_id = models.PositiveIntegerField(
verbose_name="unique id of batch row", unique=False, null=False, db_index=True)
kind_of_action = models.CharField(
max_length=40, choices=KIND_OF_ACTION_CHOICES, default=IMPORT_TO_BE_DETERMINED, db_index=True)
# Fields from Candidate
candidate_we_vote_id = models.CharField(
verbose_name="we vote permanent id of this candidate campaign", max_length=255, default=None, null=True,
blank=True)
maplight_id = models.CharField(
verbose_name="maplight candidate id", max_length=255, default=None, null=True, blank=True)
vote_smart_id = models.CharField(
verbose_name="vote smart candidate id", max_length=15, default=None, null=True, blank=True, unique=False)
# The internal We Vote id for the ContestOffice that this candidate is competing for. During setup we need to allow
# this to be null.
contest_office_id = models.CharField(
verbose_name="contest_office_id id", max_length=255, null=True, blank=True)
# We want to link the candidate to the contest with permanent ids so we can export and import
contest_office_we_vote_id = models.CharField(
verbose_name="we vote permanent id for the office this candidate is running for", max_length=255, default=None,
null=True, blank=True, unique=False)
contest_office_name = models.CharField(verbose_name="name of the office", max_length=255, null=True, blank=True)
# politician (internal) link to local We Vote Politician entry. During setup we need to allow this to be null.
politician_id = models.BigIntegerField(verbose_name="politician unique identifier", null=True, blank=True)
# The persistent We Vote unique ID of the Politician, so we can export and import into other databases.
politician_we_vote_id = models.CharField(
verbose_name="we vote politician id", max_length=255, null=True, blank=True)
# The candidate's name.
candidate_name = models.CharField(verbose_name="candidate name", max_length=255, null=False, blank=False)
# The candidate's name as passed over by Google Civic. We save this so we can match to this candidate even
# if we edit the candidate's name locally.
google_civic_candidate_name = models.CharField(verbose_name="candidate name exactly as received from google civic",
max_length=255, null=False, blank=False)
candidate_gender = models.CharField(verbose_name="candidate gender", max_length=255, null=True, blank=True)
# Birthday in YYYY-MM-DD format.
birth_day_text = models.CharField(verbose_name="birth day", max_length=10, null=True, blank=True)
# The full name of the party the candidate is a member of.
party = models.CharField(verbose_name="party", max_length=255, null=True, blank=True)
# A URL for a photo of the candidate.
photo_url = models.CharField(verbose_name="photoUrl", max_length=255, null=True, blank=True)
photo_url_from_maplight = models.URLField(
verbose_name='candidate portrait url of candidate from maplight', blank=True, null=True)
photo_url_from_vote_smart = models.URLField(
verbose_name='candidate portrait url of candidate from vote smart', blank=True, null=True)
# The order the candidate appears on the ballot relative to other candidates for this contest.
order_on_ballot = models.CharField(verbose_name="order on ballot", max_length=255, null=True, blank=True)
# The unique ID of the election containing this contest. (Provided by Google Civic)
google_civic_election_id = models.CharField(
verbose_name="google civic election id", max_length=255, null=True, blank=True, db_index=True)
google_civic_election_id_new = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=True, blank=True)
ocd_division_id = models.CharField(verbose_name="ocd division id", max_length=255, null=True, blank=True)
# State code
state_code = models.CharField(verbose_name="state this candidate serves", max_length=2, null=True, blank=True)
# The URL for the candidate's campaign web site.
candidate_url = models.URLField(verbose_name='website url of candidate campaign', blank=True, null=True)
candidate_contact_form_url = models.URLField(verbose_name='website url of candidate contact form',
blank=True, null=True)
facebook_url = models.URLField(verbose_name='facebook url of candidate campaign', blank=True, null=True)
twitter_url = models.URLField(verbose_name='twitter url of candidate campaign', blank=True, null=True)
twitter_user_id = models.BigIntegerField(verbose_name="twitter id", null=True, blank=True)
candidate_twitter_handle = models.CharField(
verbose_name='candidate twitter screen_name', max_length=255, null=True, unique=False)
twitter_name = models.CharField(
verbose_name="org name from twitter", max_length=255, null=True, blank=True)
twitter_location = models.CharField(
verbose_name="org location from twitter", max_length=255, null=True, blank=True)
twitter_followers_count = models.IntegerField(verbose_name="number of twitter followers",
null=False, blank=True, default=0)
twitter_profile_image_url_https = models.URLField(verbose_name='url of logo from twitter', blank=True, null=True)
twitter_profile_background_image_url_https = models.URLField(verbose_name='tile-able background from twitter',
blank=True, null=True)
twitter_profile_banner_url_https = models.URLField(verbose_name='profile banner image from twitter',
blank=True, null=True)
twitter_description = models.CharField(verbose_name="Text description of this organization from twitter.",
max_length=255, null=True, blank=True)
google_plus_url = models.URLField(verbose_name='google plus url of candidate campaign', blank=True, null=True)
youtube_url = models.URLField(verbose_name='youtube url of candidate campaign', blank=True, null=True)
# The email address for the candidate's campaign.
candidate_email = models.CharField(verbose_name="candidate campaign email", max_length=255, null=True, blank=True)
# The voice phone number for the candidate's campaign office.
candidate_phone = models.CharField(verbose_name="candidate campaign phone", max_length=255, null=True, blank=True)
wikipedia_page_id = models.BigIntegerField(verbose_name="pageid", null=True, blank=True)
wikipedia_page_title = models.CharField(
verbose_name="Page title on Wikipedia", max_length=255, null=True, blank=True)
wikipedia_photo_url = models.URLField(verbose_name='url of wikipedia logo', blank=True, null=True)
ballotpedia_candidate_id = models.PositiveIntegerField(
verbose_name="ballotpedia integer id", null=True, blank=True)
# The candidate's name as passed over by Ballotpedia
ballotpedia_candidate_name = models.CharField(verbose_name="candidate name exactly as received from ballotpedia",
max_length=255, null=True, blank=True)
ballotpedia_candidate_summary = models.TextField(verbose_name="candidate summary from ballotpedia",
null=True, blank=True, default=None)
ballotpedia_candidate_url = models.URLField(verbose_name='url of candidate on ballotpedia', blank=True, null=True)
ballotpedia_election_id = models.PositiveIntegerField(verbose_name="ballotpedia election id", null=True, blank=True)
# The id of the image for retrieval from Ballotpedia API
ballotpedia_image_id = models.PositiveIntegerField(verbose_name="ballotpedia image id", null=True, blank=True)
# Equivalent of elected_office in We Vote
ballotpedia_office_id = models.PositiveIntegerField(
verbose_name="ballotpedia elected office integer id", null=True, blank=True)
# This is just the characters in the Ballotpedia URL
ballotpedia_page_title = models.CharField(
verbose_name="Page title on Ballotpedia", max_length=255, null=True, blank=True)
# Equivalent of politician in We Vote
ballotpedia_person_id = models.PositiveIntegerField(
verbose_name="ballotpedia person integer id", null=True, blank=True)
ballotpedia_photo_url = models.URLField(verbose_name='url of ballotpedia logo', blank=True, null=True)
# Equivalent of contest_office in We Vote
ballotpedia_race_id = models.PositiveIntegerField(verbose_name="ballotpedia race integer id", null=True, blank=True)
# Official Statement from Candidate in Ballot Guide
ballot_guide_official_statement = models.TextField(verbose_name="official candidate statement from ballot guide",
null=True, blank=True, default="")
batch_row_action_office_ctcl_uuid = models.CharField(verbose_name="ctcl uuid", max_length=80, null=True, blank=True)
crowdpac_candidate_id = models.PositiveIntegerField(verbose_name="crowdpac integer id", null=True, blank=True)
ctcl_uuid = models.CharField(verbose_name="ctcl uuid", max_length=80, null=True, blank=True)
candidate_is_top_ticket = models.BooleanField(verbose_name="candidate is top ticket", default=False)
candidate_is_incumbent = models.BooleanField(verbose_name="candidate is currently in the office", default=False)
candidate_participation_status = models.CharField(verbose_name="candidate participation status",
max_length=255, null=True, blank=True)
# From VIP standard format
candidate_ctcl_person_id = models.CharField(verbose_name="candidate person id", max_length=255, null=True, blank=True)
status = models.TextField(verbose_name="batch row action candidate status", null=True, blank=True, default="")
class BatchRowActionOrganization(models.Model):
"""
The definition of the action for importing one Organization.
"""
batch_set_id = models.PositiveIntegerField(verbose_name="unique id of batch set", unique=False, null=True)
batch_header_id = models.PositiveIntegerField(
verbose_name="unique id of header row", unique=False, null=False, db_index=True)
batch_row_id = models.PositiveIntegerField(
verbose_name="unique id of batch row", unique=False, null=False, db_index=True)
kind_of_action = models.CharField(
max_length=40, choices=KIND_OF_ACTION_CHOICES, default=IMPORT_TO_BE_DETERMINED, db_index=True)
# Fields from Organization
organization_we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, null=True, blank=True)
organization_name = models.CharField(
verbose_name="organization name", max_length=255, null=False, blank=False)
organization_website = models.URLField(verbose_name='url of the endorsing organization', blank=True, null=True)
organization_email = models.EmailField(
verbose_name='organization contact email address', max_length=255, unique=False, null=True, blank=True)
organization_contact_name = models.CharField(max_length=255, null=True, unique=False)
organization_facebook = models.URLField(verbose_name='url of facebook page', blank=True, null=True)
organization_image = models.CharField(verbose_name='organization image', max_length=255, null=True, unique=False)
state_served_code = models.CharField(verbose_name="state this organization serves", max_length=2,
null=True, blank=True)
# The vote_smart special interest group sigId for this organization
vote_smart_id = models.BigIntegerField(
verbose_name="vote smart special interest group id", null=True, blank=True)
organization_description = models.TextField(
verbose_name="Text description of this organization.", null=True, blank=True)
organization_address = models.CharField(
verbose_name='organization street address', max_length=255, unique=False, null=True, blank=True)
organization_city = models.CharField(max_length=255, null=True, blank=True)
organization_state = models.CharField(max_length=2, null=True, blank=True)
organization_zip = models.CharField(max_length=255, null=True, blank=True)
organization_phone1 = models.CharField(max_length=255, null=True, blank=True)
organization_phone2 = models.CharField(max_length=255, null=True, blank=True)
organization_fax = models.CharField(max_length=255, null=True, blank=True)
# Facebook session information
facebook_id = models.BigIntegerField(verbose_name="facebook big integer id", null=True, blank=True)
facebook_email = models.EmailField(verbose_name='facebook email address', max_length=255, unique=False,
null=True, blank=True)
fb_username = models.CharField(max_length=50, validators=[alphanumeric], null=True)
facebook_profile_image_url_https = models.URLField(verbose_name='url of image from facebook', blank=True, null=True)
# Twitter information
twitter_user_id = models.BigIntegerField(verbose_name="twitter id", null=True, blank=True)
organization_twitter_handle = models.CharField(
verbose_name='organization twitter screen_name', max_length=255, null=True, unique=False)
twitter_name = models.CharField(
verbose_name="org name from twitter", max_length=255, null=True, blank=True)
twitter_location = models.CharField(
verbose_name="org location from twitter", max_length=255, null=True, blank=True)
twitter_followers_count = models.IntegerField(verbose_name="number of twitter followers",
null=False, blank=True, default=0)
twitter_profile_image_url_https = models.URLField(verbose_name='url of user logo from twitter',
blank=True, null=True)
twitter_profile_background_image_url_https = models.URLField(verbose_name='tile-able background from twitter',
blank=True, null=True)
twitter_profile_banner_url_https = models.URLField(verbose_name='profile banner image from twitter',
blank=True, null=True)
twitter_description = models.CharField(verbose_name="Text description of this organization from twitter.",
max_length=255, null=True, blank=True)
# Instagram
organization_instagram_handle = models.CharField(
verbose_name='organization instagram screen_name', max_length=255, null=True, unique=False)
wikipedia_page_id = models.BigIntegerField(verbose_name="pageid", null=True, blank=True)
wikipedia_page_title = models.CharField(
verbose_name="Page title on Wikipedia", max_length=255, null=True, blank=True)
wikipedia_thumbnail_url = models.URLField(verbose_name='url of wikipedia logo thumbnail', blank=True, null=True)
wikipedia_thumbnail_width = models.IntegerField(verbose_name="width of photo", null=True, blank=True)
wikipedia_thumbnail_height = models.IntegerField(verbose_name="height of photo", null=True, blank=True)
wikipedia_photo_url = models.URLField(verbose_name='url of wikipedia logo', blank=True, null=True)
ballotpedia_page_title = models.CharField(
verbose_name="Page title on Ballotpedia", max_length=255, null=True, blank=True)
ballotpedia_photo_url = models.URLField(verbose_name='url of ballotpedia logo', blank=True, null=True)
organization_type = models.CharField(
verbose_name="type of org", max_length=1, choices=ORGANIZATION_TYPE_CHOICES, default=UNKNOWN)
status = models.TextField(verbose_name="batch row action organization status", null=True, blank=True, default="")
class BatchRowActionPosition(models.Model):
"""
The definition of the action for importing one Position.
"""
batch_set_id = models.PositiveIntegerField(verbose_name="unique id of batch set", unique=False, null=True)
batch_header_id = models.PositiveIntegerField(
verbose_name="unique id of header row", unique=False, null=False, db_index=True)
batch_row_id = models.PositiveIntegerField(
verbose_name="unique id of batch row", unique=False, null=False, db_index=True)
kind_of_action = models.CharField(
max_length=40, choices=KIND_OF_ACTION_CHOICES, default=IMPORT_TO_BE_DETERMINED, db_index=True)
# Fields from Position
position_we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, default=None, null=True, blank=True)
# The id for the generated position that this PositionEntered entry influences
ballot_item_display_name = models.CharField(verbose_name="text name for ballot item",
max_length=255, null=True, blank=True)
# We cache the url to an image for the candidate, measure or office for rapid display
ballot_item_image_url_https = models.URLField(verbose_name='url of https image for candidate, measure or office',
blank=True, null=True)
ballot_item_twitter_handle = models.CharField(verbose_name='twitter screen_name for candidate, measure, or office',
max_length=255, null=True, unique=False)
# What is the organization name, voter name, or public figure name? We cache this here for rapid display
speaker_display_name = models.CharField(
verbose_name="name of the org or person with position", max_length=255, null=True, blank=True, unique=False)
# We cache the url to an image for the org, voter, or public_figure for rapid display
speaker_image_url_https = models.URLField(verbose_name='url of https image for org or person with position',
blank=True, null=True)
speaker_twitter_handle = models.CharField(verbose_name='twitter screen_name for org or person with position',
max_length=255, null=True, unique=False)
date_entered = models.DateTimeField(verbose_name='date entered', null=True, auto_now=True)
# The date the this position last changed
date_last_changed = models.DateTimeField(verbose_name='date last changed', null=True, auto_now=True)
# The organization this position is for
organization_id = models.BigIntegerField(null=True, blank=True)
organization_we_vote_id = models.CharField(
verbose_name="we vote permanent id for the organization", max_length=255, null=True,
blank=True, unique=False)
# The voter expressing the opinion
# Note that for organizations who have friends, the voter_we_vote_id is what we use to link to the friends
# (in the PositionForFriends table).
# Public positions from an organization are shared via organization_we_vote_id (in PositionEntered table), while
# friend's-only positions are shared via voter_we_vote_id.
voter_id = models.BigIntegerField(null=True, blank=True)
voter_we_vote_id = models.CharField(
verbose_name="we vote permanent id for the voter expressing the opinion", max_length=255, null=True,
blank=True, unique=False)
# The unique id of the public figure expressing the opinion. May be null if position is from org or voter
# instead of public figure.
public_figure_we_vote_id = models.CharField(
verbose_name="public figure we vote id", max_length=255, null=True, blank=True, unique=False)
# The unique ID of the election containing this contest. (Provided by Google Civic)
google_civic_election_id = models.CharField(verbose_name="google civic election id",
max_length=255, null=True, blank=False, default=0, db_index=True)
google_civic_election_id_new = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=True, blank=True)
# State code
state_code = models.CharField(verbose_name="us state of the ballot item position is for",
max_length=2, null=True, blank=True)
# ### Values from Vote Smart ###
vote_smart_rating_id = models.BigIntegerField(null=True, blank=True, unique=False)
# Usually in one of these two formats 2015, 2014-2015
vote_smart_time_span = models.CharField(
verbose_name="the period in which the organization stated this position", max_length=255, null=True,
blank=True, unique=False)
vote_smart_rating = models.CharField(
verbose_name="vote smart value between 0-100", max_length=255, null=True,
blank=True, unique=False)
vote_smart_rating_name = models.CharField(max_length=255, null=True, blank=True, unique=False)
# The unique We Vote id of the tweet that is the source of the position
tweet_source_id = models.BigIntegerField(null=True, blank=True)
# This is the office that the position refers to.
# Either contest_measure is filled, contest_office OR candidate_campaign, but not all three
contest_office_id = models.BigIntegerField(verbose_name='id of contest_office', null=True, blank=True)
contest_office_we_vote_id = models.CharField(
verbose_name="we vote permanent id for the contest_office", max_length=255, null=True, blank=True, unique=False)
contest_office_name = models.CharField(verbose_name="name of the office", max_length=255, null=True, blank=True)
# This is the candidate/politician that the position refers to.
# Either candidate_campaign is filled, contest_office OR contest_measure, but not all three
candidate_campaign_id = models.BigIntegerField(verbose_name='id of candidate_campaign', null=True, blank=True)
candidate_campaign_we_vote_id = models.CharField(
verbose_name="we vote permanent id for the candidate_campaign", max_length=255, null=True,
blank=True, unique=False)
# The candidate's name as passed over by Google Civic. We save this so we can match to this candidate if an import
# doesn't include a we_vote_id we recognize.
google_civic_candidate_name = models.CharField(verbose_name="candidate name exactly as received from google civic",
max_length=255, null=True, blank=True)
# The measure's title as passed over by Google Civic. We save this so we can match to this measure if an import
# doesn't include a we_vote_id we recognize.
google_civic_measure_title = models.CharField(verbose_name="measure title exactly as received from google civic",
max_length=255, null=True, blank=True)
# Useful for queries based on Politicians -- not the main table we use for ballot display though
politician_id = models.BigIntegerField(verbose_name='', null=True, blank=True)
politician_we_vote_id = models.CharField(
verbose_name="we vote permanent id for politician", max_length=255, null=True,
blank=True, unique=False)
political_party = models.CharField(verbose_name="political party", max_length=255, null=True)
# This is the measure/initiative/proposition that the position refers to.
# Either contest_measure is filled, contest_office OR candidate_campaign, but not all three
contest_measure_id = models.BigIntegerField(verbose_name='id of contest_measure', null=True, blank=True)
contest_measure_we_vote_id = models.CharField(
verbose_name="we vote permanent id for the contest_measure", max_length=255, null=True,
blank=True, unique=False)
# Strategic denormalization - this is redundant but will make generating the voter guide easier.
# geo = models.ForeignKey(Geo, null=True, related_name='pos_geo')
# issue = models.ForeignKey(Issue, null=True, blank=True, related_name='')
stance = models.CharField(max_length=15, choices=POSITION_CHOICES, default=NO_STANCE) # supporting/opposing
statement_text = models.TextField(null=True, blank=True, )
statement_html = models.TextField(null=True, blank=True, )
# A link to any location with more information about this position
more_info_url = models.URLField(blank=True, null=True, verbose_name='url with more info about this position')
# Did this position come from a web scraper?
from_scraper = models.BooleanField(default=False)
# Was this position certified by an official with the organization?
organization_certified = models.BooleanField(default=False)
# Was this position certified by an official We Vote volunteer?
volunteer_certified = models.BooleanField(default=False)
status = models.TextField(verbose_name="batch row action position status", null=True, blank=True, default="")
class BatchRowActionBallotItem(models.Model):
"""
The definition of the action for importing one ballot item.
"""
batch_set_id = models.PositiveIntegerField(verbose_name="unique id of batch set", unique=False, null=True)
batch_header_id = models.PositiveIntegerField(
verbose_name="unique id of header row", unique=False, null=False, db_index=True)
batch_row_id = models.PositiveIntegerField(
verbose_name="unique id of batch row", unique=True, null=False, db_index=True)
kind_of_action = models.CharField(
max_length=40, choices=KIND_OF_ACTION_CHOICES, default=IMPORT_TO_BE_DETERMINED, db_index=True)
ballot_item_id = models.IntegerField(verbose_name="ballot item unique id", default=0, null=True, blank=True)
# Fields from BallotItem
# The unique id of the voter for which this ballot was retrieved
voter_id = models.IntegerField(verbose_name="the voter unique id", default=0, null=False, blank=False)
# The polling location for which this ballot was retrieved
polling_location_we_vote_id = models.CharField(
verbose_name="we vote permanent id of the polling location", max_length=255, default=None, null=True,
blank=True, unique=False)
# The unique ID of this election. (Provided by Google Civic)
google_civic_election_id = models.CharField(verbose_name="google civic election id",
max_length=20, null=False, db_index=True)
google_civic_election_id_new = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=False)
state_code = models.CharField(verbose_name="state the ballot item is related to", max_length=2, null=True)
google_ballot_placement = models.BigIntegerField(
verbose_name="the order this item should appear on the ballot", null=True, blank=True, unique=False)
local_ballot_order = models.IntegerField(
verbose_name="locally calculated order this item should appear on the ballot", null=True, blank=True)
# The id for this contest office specific to this server.
contest_office_id = models.PositiveIntegerField(verbose_name="local id for this contest office", default=0,
null=True, blank=True)
# The internal We Vote id for the ContestMeasure that this campaign taking a stance on
contest_office_we_vote_id = models.CharField(
verbose_name="we vote permanent id for this office", max_length=255, default=None, null=True,
blank=True, unique=False)
# The local database id for this measure, specific to this server.
contest_measure_id = models.PositiveIntegerField(
verbose_name="contest_measure unique id", default=0, null=True, blank=True)
# The internal We Vote id for the ContestMeasure that this campaign taking a stance on
contest_measure_we_vote_id = models.CharField(
verbose_name="we vote permanent id for this measure", max_length=255, default=None, null=True,
blank=True, unique=False)
# This is a sortable name, either the candidate name or the measure name
ballot_item_display_name = models.CharField(verbose_name="a label we can sort by", max_length=255, null=True,
blank=True)
measure_subtitle = models.TextField(verbose_name="google civic referendum subtitle",
null=True, blank=True, default="")
measure_text = models.TextField(verbose_name="measure text", null=True, blank=True, default="")
measure_url = models.URLField(verbose_name='url of measure', blank=True, null=True)
yes_vote_description = models.TextField(verbose_name="what a yes vote means", null=True, blank=True, default=None)
no_vote_description = models.TextField(verbose_name="what a no vote means", null=True, blank=True, default=None)
status = models.TextField(verbose_name="batch row action ballot item status", null=True, blank=True, default="")
def create_batch_from_json(file_name, structured_json_list, mapping_dict, kind_of_batch,
google_civic_election_id=0, organization_we_vote_id="", polling_location_we_vote_id="",
batch_set_id=0, state_code=""):
batch_manager = BatchManager()
return batch_manager.create_batch_from_json(
file_name, structured_json_list, mapping_dict, kind_of_batch,
google_civic_election_id, organization_we_vote_id, polling_location_we_vote_id, batch_set_id, state_code)
|
{
"content_hash": "32862beb3aad803c01a80ec014ffeb84",
"timestamp": "",
"source": "github",
"line_count": 5211,
"max_line_length": 123,
"avg_line_length": 56.09710228363078,
"alnum_prop": 0.5920936501529136,
"repo_name": "jainanisha90/WeVoteServer",
"id": "54765af5d9169cd310f3971f8d2dd581dea7514b",
"size": "292427",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "import_export_batches/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3612"
},
{
"name": "HTML",
"bytes": "1003027"
},
{
"name": "Python",
"bytes": "7489854"
},
{
"name": "Shell",
"bytes": "611"
}
],
"symlink_target": ""
}
|
import Network
from time import sleep
from threading import Thread
CALL_ROOMLIST = 0
CALL_WEAPLIST = 1
CALL_PLAYERLIST = 2
CALL_NEWPLAYER = 3
CALL_PLAYERLEFT = 4
CALL_CHAT = 5
CALL_PLAYERDAT = 6
CALL_ROOMSTAT = 7
CALL_LEAVEROOM = 8
CALL_SHOOT = 9
CALL_SCORE = 10
class GameClient(Network.Client):
CONNECTING = 0
JOINING_ROOM = 1
LEAVING_ROOM = 2
rooms = []
players = []
weapList= []
scores = {}
response = {}
currRoomInfo = None
main = None
status = -1
charId = 0
roomState = -1
roomId = 0
roomName = ""
stateDict = {
"WAITING":0,
"PLAYING":1,
"DEAD":99
}
invStateDict = {
0:"WAITING",
1:"PLAYING",
99:"DEAD"
}
winnerId = -1
def __init__(self, main):
super(GameClient, self).__init__()
self.main = main
self.rooms = []
self.scores = {}
self.players =[]
self.weapList = []
self.response = {}
def connect(self, name, addr, evt=False): #Blocks
self.status = self.CONNECTING
super(GameClient, self).connect(name, addr)
if evt:
self.onConnect(self.complete(self.CONNECTING))
else:
return self.complete(self.CONNECTING)
def connect_async(self, name, addr): #Doesn't block
t = Thread(target=self.connect, args=[name, addr, True])
t.start()
# NETWORK FUNCTIONS
def complete(self, event, timeout = 2):
waited = 0
while event == self.status and waited <= timeout:
sleep(.1)
waited += .1
if waited >= timeout:
return False
return self.response[event]
def done(self, event, response):
self.response[event] = response
self.status = -1
def playerById(self, pId):
low = 0
high = len(self.players) - 1
while low <= high:
mid = (low + high) >> 1
midId = self.players[mid][0]
if midId < pId:
low = mid + 1
elif midId > pId:
high = mid - 1
else:
return mid
return None
def getPlayers(self):
return self.players
def getRooms(self):
return self.rooms
def clearScores(self):
self.scores = {}
# EVENT FUNCTIONS
def onConnect(self, result):
self.main.onConnect(result)
def onRoomList(self, data):
self.rooms = data
self.main.handleNetworkCall(CALL_ROOMLIST, (self.rooms,))
def onWeapList(self, data):
self.weapList = data
self.main.handleNetworkCall(CALL_WEAPLIST, (self.weapList,))
def onPlayerList(self, playerList, roomId, roomState, yourId):
self.players = playerList
self.playerId = yourId
self.players.sort()
self.roomId = roomId
self.roomState = roomState
if self.status in [self.CONNECTING, self.JOINING_ROOM, self.LEAVING_ROOM]:
self.done(self.status, True)
self.main.handleNetworkCall(CALL_PLAYERLIST, (self.players,))
def onNewPlayer(self, player):
#playername = player[0][:player[0].find('\00')]
self.players.append(player)
self.players.sort()
self.main.handleNetworkCall(CALL_NEWPLAYER, (player,))
def onPlayerLeft(self, data):
playerPos = self.playerById(data[0])
player = self.players[playerPos]
del self.players[playerPos]
if data[2] != -1:
self.players[self.playerById(data[2])] = self.changeTuple(self.players[self.playerById(data[2])], 4, True)
self.main.handleNetworkCall(CALL_PLAYERLEFT, (player,))
def changeTuple(self, tup, key, value):
flist = list(tup)
flist[key] = value
return tuple(flist)
def onChat(self, data):
self.main.handleNetworkCall(CALL_CHAT, (data,))
def onPlayerData(self, data):
self.main.handleNetworkCall(CALL_PLAYERDAT, (data,))
def onRoomStat(self, data):
self.winnerId = data[1]
self.main.handleNetworkCall(CALL_ROOMSTAT, (data,))
#if data[0] == 0:
# self.main.endGame()
#elif data[0] == 1:
# print "starting game"
# self.main.startGame()
def onRoomSwitch(self, action, result):
self.main.onRoomSwitch(action, result)
return result
def onLeaveRoom(self):
if self.status in [self.JOINING_ROOM]:
self.done(self.status, False)
def onShoot(self, bulletdata):
self.main.handleNetworkCall(CALL_SHOOT, (bulletdata,))
def onScore(self, score):
self.scores[score[0]] = score[1], score[2]
self.scores[score[3]] = score[4], score[5]
self.main.handleNetworkCall(CALL_SCORE, (score,))
def onChangeChar(self, charId, playerId):
playerPos = self.playerById(playerId)
player = self.players[playerPos]
self.players[playerPos] = self.changeTuple(self.players[playerPos], 3, charId)
def onDisconnect(self):
self.main.onDisconnect()
## SENDING FUNCTIONS
def joinRoom(self, roomid, roomName, block=False):
if block:
self.status = self.JOINING_ROOM
self.sendDataReliable(Network.Structs.joinRoom.dataType, Network.Structs.joinRoom.pack(roomid)).join()
# This function blocks...
return self.onRoomSwitch(self.JOINING_ROOM, self.complete(self.JOINING_ROOM))
else:
self.winnerId = -1
self.roomName = roomName
Thread(target=self.joinRoom, args=[roomid, roomName, True]).start()
def makeRoom(self, roomName, block=False):
if block:
self.status = self.JOINING_ROOM
self.sendDataReliable(Network.Structs.makeRoom.dataType, Network.Structs.makeRoom.pack(len(roomName))+roomName)
return self.onRoomSwitch(self.JOINING_ROOM, self.complete(self.JOINING_ROOM))
else:
self.winnerId = -1
self.roomName = roomName
Thread(target=self.makeRoom, args=[roomName, True]).start()
def leaveRoom(self, block=False):
if block:
self.status = self.LEAVING_ROOM
self.sendDataReliable(Network.Structs.leaveRoom.dataType, Network.Structs.leaveRoom.pack())
return self.onRoomSwitch(self.LEAVING_ROOM, self.complete(self.LEAVING_ROOM))
else:
self.winnerId = -1
Thread(target=self.leaveRoom, args=[True]).start()
def startGame(self):
self.sendDataReliable(Network.Structs.startGame.dataType, Network.Structs.startGame.pack(0))
def sendGameData(self, gameData):
self.sendData(Network.Structs.playerDat.dataType, gameData)
def sendShoot(self, bullet):
self.sendDataReliable(Network.Structs.shoot.dataType, Network.Structs.shoot.pack(-1, bullet.x, bullet.y, bullet.angle, bullet.type))
def setCharacter(self, charId):
self.sendDataReliable(Network.Structs.setCharacter.dataType, Network.Structs.setCharacter.pack(charId, 0))
self.charId = charId
def sendDeath(self, killerid):
self.sendDataReliable(Network.Structs.onDeath.dataType, Network.Structs.onDeath.pack(killerid))
def sendPicked(self, serverId):
self.sendDataReliable(Network.Structs.takeWeap.dataType, Network.Structs.takeWeap.pack(serverId))
def sendChat(self, data):
self.sendDataReliable(Network.Structs.preChat.dataType, Network.Structs.preChat.pack(len(data)) + data)
def __del__(self):
super(GameClient, self).__del__()
|
{
"content_hash": "3b2960327859e8f5f4c3e70a8d20564c",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 140,
"avg_line_length": 31.458677685950413,
"alnum_prop": 0.6105346118481545,
"repo_name": "nemothekid/Colosseum--Year-3XXX",
"id": "8ded432cc003bf959695cc37486f79259b3c92b0",
"size": "7613",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GameClient.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "231185"
}
],
"symlink_target": ""
}
|
from corehq.apps.linked_domain.keywords import create_linked_keyword, update_keyword
from corehq.apps.app_manager.models import Module
from corehq.apps.linked_domain.tests.test_linked_apps import BaseLinkedAppsTest
from corehq.apps.reminders.models import METHOD_SMS
from corehq.apps.sms.models import Keyword, KeywordAction
class TestLinkedKeywords(BaseLinkedAppsTest):
def setUp(self):
super(TestLinkedKeywords, self).setUp()
module = self.master1.add_module(Module.new_module("M1", None))
master_form = module.new_form("f1", None, self.get_xml("very_simple_form").decode("utf-8"))
self.keyword = Keyword(
domain=self.domain_link.master_domain,
keyword="ping",
description="The description",
override_open_sessions=True,
)
self.keyword.save()
self.keyword.keywordaction_set.create(
recipient=KeywordAction.RECIPIENT_SENDER,
action=METHOD_SMS,
message_content="pong",
app_id=self.master1.get_id,
form_unique_id=master_form.unique_id,
)
def tearDown(self):
self.keyword.delete()
super(TestLinkedKeywords, self).tearDown()
def test_create_keyword_link(self):
new_keyword_id = create_linked_keyword(self.domain_link, self.keyword.id)
new_keyword = Keyword.objects.get(id=new_keyword_id)
self.assertEqual(new_keyword.keyword, self.keyword.keyword)
new_keyword_action = new_keyword.keywordaction_set.first()
self.assertEqual(
new_keyword_action.message_content,
self.keyword.keywordaction_set.first().message_content,
)
self.assertEqual(new_keyword_action.app_id, self.linked_app.get_id)
def test_update_keyword_link(self):
new_keyword_id = create_linked_keyword(self.domain_link, self.keyword.id)
self.keyword.keyword = "foo"
self.keyword.save()
keyword_action = self.keyword.keywordaction_set.first()
keyword_action.message_content = "bar"
keyword_action.save()
update_keyword(self.domain_link, new_keyword_id)
linked_keyword = Keyword.objects.get(id=new_keyword_id)
self.assertEqual(linked_keyword.keyword, "foo")
self.assertEqual(linked_keyword.keywordaction_set.first().message_content, "bar")
|
{
"content_hash": "f1f1cdb2e7193f4a26b730575d36d7ab",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 99,
"avg_line_length": 40.8448275862069,
"alnum_prop": 0.6699029126213593,
"repo_name": "dimagi/commcare-hq",
"id": "3ad1af7b15607a62235237d6e36ad6a84ec6a28f",
"size": "2369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/linked_domain/tests/test_linked_keywords.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
}
|
from blueice.test_helpers import *
from blueice.inference import *
from blueice.likelihood import UnbinnedLogLikelihood as LogLikelihood
def test_fit_minuit():
# Single rate parameter
lf = LogLikelihood(test_conf())
lf.add_rate_parameter('s0')
lf.set_data(lf.base_model.simulate())
fit_result, ll = bestfit_minuit(lf)
assert isinstance(fit_result, dict)
assert 's0_rate_multiplier' in fit_result
# Don't fit
res, ll = bestfit_minuit(lf, s0_rate_multiplier=1)
assert len(res) == 0
assert ll == lf(s0_rate_multiplier=1)
# Single shape parameter
lf = LogLikelihood(test_conf())
lf.add_shape_parameter('some_multiplier', (0.5, 1, 1.5, 2))
lf.prepare()
lf.set_data(lf.base_model.simulate())
fit_result, ll = bestfit_minuit(lf)
assert 'some_multiplier' in fit_result
# Shape and rate parameter
lf = LogLikelihood(test_conf())
lf.add_rate_parameter('s0')
lf.add_shape_parameter('some_multiplier', (0.5, 1, 1.5, 2))
lf.prepare()
lf.set_data(lf.base_model.simulate())
fit_result, ll = bestfit_minuit(lf)
assert 'some_multiplier' in fit_result
assert 's0_rate_multiplier' in fit_result
# Non-numeric shape parameter
lf = LogLikelihood(test_conf())
lf.add_shape_parameter('strlen_multiplier', {1: 'x', 2: 'hi', 3:'wha'}, base_value=1)
lf.prepare()
lf.set_data(lf.base_model.simulate())
fit_result, ll = bestfit_minuit(lf)
assert 'strlen_multiplier' in fit_result
def test_fit_scipy():
# Single rate parameter
lf = LogLikelihood(test_conf())
lf.add_rate_parameter('s0')
lf.set_data(lf.base_model.simulate())
fit_result, ll = bestfit_scipy(lf)
assert isinstance(fit_result, dict)
assert 's0_rate_multiplier' in fit_result
# Don't fit
res, ll = bestfit_scipy(lf, s0_rate_multiplier=1)
assert len(res) == 0
assert ll == lf(s0_rate_multiplier=1)
# Single shape parameter
lf = LogLikelihood(test_conf())
lf.add_shape_parameter('some_multiplier', (0.5, 1, 1.5, 2))
lf.prepare()
lf.set_data(lf.base_model.simulate())
fit_result, ll = bestfit_scipy(lf)
assert 'some_multiplier' in fit_result
# Shape and rate parameter
lf = LogLikelihood(test_conf())
lf.add_rate_parameter('s0')
lf.add_shape_parameter('some_multiplier', (0.5, 1, 1.5, 2))
lf.prepare()
lf.set_data(lf.base_model.simulate())
fit_result, ll = bestfit_scipy(lf)
assert 'some_multiplier' in fit_result
assert 's0_rate_multiplier' in fit_result
# Non-numeric shape parameter
lf = LogLikelihood(test_conf())
lf.add_shape_parameter('strlen_multiplier', {1: 'x', 2: 'hi', 3:'wha'}, base_value=1)
lf.prepare()
lf.set_data(lf.base_model.simulate())
fit_result, ll = bestfit_scipy(lf)
assert 'strlen_multiplier' in fit_result
# def test_plot():
# """Tests the plot_likelihood_space code.
# For now just test that it doesn't crash -- image comparison tests are tricky...
# """
# import matplotlib.pyplot as plt
# lf = LogLikelihood(test_conf())
# lf.add_rate_parameter('s0')
# lf.add_shape_parameter('some_multiplier', (0.5, 1, 1.5, 2))
# lf.prepare()
# lf.set_data(lf.base_model.simulate())
#
# plot_likelihood_ratio(lf, ('s0_rate_multiplier', np.linspace(0.5, 2, 3)))
# plt.close()
# plot_likelihood_ratio(lf,
# ('s0_rate_multiplier', np.linspace(0.5, 2, 3)),
# ('some_multiplier', np.linspace(0.5, 2, 3)))
# plt.close()
def test_limit():
"""Test the limit setting code
For now just tests if it runs, does not test whether the results are correct...
"""
lf = LogLikelihood(test_conf(n_sources=2))
lf.add_rate_parameter('s0')
lf.prepare()
lf.set_data(lf.base_model.simulate())
# Test upper limits
one_parameter_interval(lf, target='s0_rate_multiplier', kind='upper', bound=40)
one_parameter_interval(lf, target='s0_rate_multiplier', kind='lower', bound=0.1)
one_parameter_interval(lf, target='s0_rate_multiplier', kind='central', bound=(0.1, 20))
# Bit tricky to test multiple params, in these simple examples they can compensate completely for each other
# so all values in a subspace seem equally likely once two of them are floating.
|
{
"content_hash": "4fa55a4608f8515114cf36567656352b",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 112,
"avg_line_length": 35.67768595041322,
"alnum_prop": 0.6492934908501274,
"repo_name": "JelleAalbers/blueice",
"id": "3859d8eb3bfc9d0a4c0a79c73e7a885ff62acdba",
"size": "4362",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_inference.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "166166"
},
{
"name": "Python",
"bytes": "122450"
}
],
"symlink_target": ""
}
|
import pdb
import math
import numpy as np
import time
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import cuda
from util import gaussian_kl_divergence_standard
from util import gaussian_logp
from util import gaussian_logp0
from util import bernoulli_logp
class VAE(chainer.Chain):
def __init__(self, dim_in, dim_hidden, dim_latent, num_layers, temperature, num_zsamples=1):
super(VAE, self).__init__()
# initialise first encoder and decoder hidden layer separately because
# the input and output dims differ from the other hidden layers
self.qlin0 = L.Linear(dim_in, dim_hidden)
self.plin0 = L.Linear(dim_latent, dim_hidden)
self._children.append('qlin0')
self._children.append('plin0')
for i in range(num_layers-1):
# encoder
layer_name = 'qlin' + str(i+1)
setattr(self, layer_name, L.Linear(2*dim_hidden, dim_hidden))
self._children.append(layer_name)
# decoder
layer_name = 'plin' + str(i+1)
setattr(self, layer_name, L.Linear(2*dim_hidden, dim_hidden))
self._children.append(layer_name)
# initialise the encoder and decoder output layer separately because
# the input and output dims differ from the other hidden layers
self.qlin_mu = L.Linear(2*dim_hidden, dim_latent)
self.qlin_ln_var = L.Linear(2*dim_hidden, dim_latent)
self.plin_ber_prob = L.Linear(2*dim_hidden, dim_in)
self._children.append('qlin_mu')
self._children.append('qlin_ln_var')
self._children.append('plin_ber_prob')
self.num_layers = num_layers
self.temperature = temperature
self.num_zsamples = num_zsamples
self.epochs_seen = 0
def encode(self, x):
h = F.crelu(self.qlin0(x))
for i in range(self.num_layers-1):
layer_name = 'qlin' + str(i+1)
h = F.crelu(self[layer_name](h))
self.qmu = self.qlin_mu(h)
self.qln_var = self.qlin_ln_var(h)
def decode(self, z):
h = F.crelu(self.plin0(z))
for i in range(self.num_layers-1):
layer_name = 'plin' + str(i+1)
h = F.crelu(self[layer_name](h))
self.p_ber_prob_logit = self.plin_ber_prob(h)
def __call__(self, x):
# Obtain parameters for q(z|x)
encoding_time = time.time()
self.encode(x)
encoding_time = float(time.time() - encoding_time)
decoding_time_average = 0.
xp = cuda.cupy
self.importance_weights = 0
self.w_holder = []
self.kl = 0
self.logp = 0
for j in xrange(self.num_zsamples):
# Sample z ~ q(z|x)
z = F.gaussian(self.qmu, self.qln_var)
# Compute log q(z|x)
encoder_log = gaussian_logp(z, self.qmu, self.qln_var)
# Obtain parameters for p(x|z)
decoding_time = time.time()
self.decode(z)
decoding_time = time.time() - decoding_time
decoding_time_average += decoding_time
# Compute log p(x|z)
decoder_log = bernoulli_logp(x, self.p_ber_prob_logit)
# Compute log p(z).
prior_log = gaussian_logp0(z)
# Store the latest log weight'
current_temperature = min(self.temperature['value'],1.0)
self.w_holder.append(decoder_log + current_temperature*(prior_log - encoder_log))
# Store the KL and Logp equivalents. They are not used for computation but for recording and reporting.
self.kl += (encoder_log-prior_log)
self.logp += (decoder_log)
self.temperature['value'] += self.temperature['increment']
# Compute w' for this sample (batch)
logps = F.stack(self.w_holder)
self.obj_batch = F.logsumexp(logps, axis=0) - np.log(self.num_zsamples)
self.kl /= self.num_zsamples
self.logp /= self.num_zsamples
decoding_time_average /= self.num_zsamples
batch_size = self.obj_batch.shape[0]
self.obj = -F.sum(self.obj_batch)/batch_size
self.timing_info = np.array([encoding_time,decoding_time_average])
return self.obj
|
{
"content_hash": "15554a8d136f01b3213804d1114b3387",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 116,
"avg_line_length": 33.534351145038165,
"alnum_prop": 0.5788754837241066,
"repo_name": "ashwindcruz/dgm",
"id": "0432ccd00c24420aed177a8ff53def36ad36bca1",
"size": "4393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iwae_mnist/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "143490"
},
{
"name": "Shell",
"bytes": "1101"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from typing import Optional
from typing import List
from collections import OrderedDict
from oslo_config import cfg
import six
from mongoengine import ValidationError
from st2common import log as logging
from st2common.constants.action import (
LIVEACTION_STATUSES,
LIVEACTION_STATUS_CANCELED,
LIVEACTION_STATUS_SUCCEEDED,
)
from st2common.exceptions.db import StackStormDBObjectNotFoundError
from st2common.persistence.action import Action
from st2common.persistence.liveaction import LiveAction
from st2common.persistence.runner import RunnerType
from st2common.metrics.base import get_driver
from st2common.util import output_schema
from st2common.util.jsonify import json_encode
LOG = logging.getLogger(__name__)
__all__ = [
"get_action_parameters_specs",
"get_runnertype_by_id",
"get_runnertype_by_name",
"get_action_by_id",
"get_action_by_ref",
"get_liveaction_by_id",
"update_liveaction_status",
"serialize_positional_argument",
"get_args",
]
def get_action_parameters_specs(action_ref):
"""
Retrieve parameters specifications schema for the provided action reference.
Note: This function returns a union of action and action runner parameters.
:param action_ref: Action reference.
:type action_ref: ``str``
:rtype: ``dict``
"""
action_db = get_action_by_ref(ref=action_ref)
parameters = {}
if not action_db:
return parameters
runner_type_name = action_db.runner_type["name"]
runner_type_db = get_runnertype_by_name(runnertype_name=runner_type_name)
# Runner type parameters should be added first before the action parameters.
parameters.update(runner_type_db["runner_parameters"])
parameters.update(action_db.parameters)
return parameters
def get_runnertype_by_id(runnertype_id):
"""
Get RunnerType by id.
On error, raise StackStormDBObjectNotFoundError
"""
try:
runnertype = RunnerType.get_by_id(runnertype_id)
except (ValueError, ValidationError) as e:
LOG.warning(
'Database lookup for runnertype with id="%s" resulted in ' "exception: %s",
runnertype_id,
e,
)
raise StackStormDBObjectNotFoundError(
"Unable to find runnertype with " 'id="%s"' % runnertype_id
)
return runnertype
def get_runnertype_by_name(runnertype_name):
"""
Get an runnertype by name.
On error, raise ST2ObjectNotFoundError.
"""
try:
runnertypes = RunnerType.query(name=runnertype_name)
except (ValueError, ValidationError) as e:
LOG.error(
'Database lookup for name="%s" resulted in exception: %s',
runnertype_name,
e,
)
raise StackStormDBObjectNotFoundError(
'Unable to find runnertype with name="%s"' % runnertype_name
)
if not runnertypes:
raise StackStormDBObjectNotFoundError(
'Unable to find RunnerType with name="%s"' % runnertype_name
)
if len(runnertypes) > 1:
LOG.warning(
"More than one RunnerType returned from DB lookup by name. "
"Result list is: %s",
runnertypes,
)
return runnertypes[0]
def get_action_by_id(action_id):
"""
Get Action by id.
On error, raise StackStormDBObjectNotFoundError
"""
action = None
try:
action = Action.get_by_id(action_id)
except (ValueError, ValidationError) as e:
LOG.warning(
'Database lookup for action with id="%s" resulted in ' "exception: %s",
action_id,
e,
)
raise StackStormDBObjectNotFoundError(
"Unable to find action with " 'id="%s"' % action_id
)
return action
def get_action_by_ref(ref, only_fields: Optional[List[str]] = None):
"""
Returns the action object from db given a string ref.
:param ref: Reference to the trigger type db object.
:type ref: ``str``
:param: only_field: Optional lists if fields to retrieve. If not specified, it defaults to all
fields.
:rtype action: ``object``
"""
try:
return Action.get_by_ref(ref, only_fields=only_fields)
except ValueError as e:
LOG.debug(
'Database lookup for ref="%s" resulted ' + "in exception : %s.",
ref,
e,
exc_info=True,
)
return None
def get_liveaction_by_id(liveaction_id):
"""
Get LiveAction by id.
On error, raise ST2DBObjectNotFoundError.
"""
liveaction = None
try:
liveaction = LiveAction.get_by_id(liveaction_id)
except (ValidationError, ValueError) as e:
LOG.error(
'Database lookup for LiveAction with id="%s" resulted in ' "exception: %s",
liveaction_id,
e,
)
raise StackStormDBObjectNotFoundError(
"Unable to find LiveAction with " 'id="%s"' % liveaction_id
)
return liveaction
def update_liveaction_status(
status=None,
result=None,
context=None,
end_timestamp=None,
liveaction_id=None,
runner_info=None,
liveaction_db=None,
publish=True,
):
"""
Update the status of the specified LiveAction to the value provided in
new_status.
The LiveAction may be specified using either liveaction_id, or as an
liveaction_db instance.
"""
if (liveaction_id is None) and (liveaction_db is None):
raise ValueError(
"Must specify an liveaction_id or an liveaction_db when "
"calling update_LiveAction_status"
)
if liveaction_db is None:
liveaction_db = get_liveaction_by_id(liveaction_id)
if status not in LIVEACTION_STATUSES:
raise ValueError(
'Attempting to set status for LiveAction "%s" '
'to unknown status string. Unknown status is "%s"' % (liveaction_db, status)
)
if (
result
and cfg.CONF.system.validate_output_schema
and status == LIVEACTION_STATUS_SUCCEEDED
):
action_db = get_action_by_ref(liveaction_db.action)
runner_db = get_runnertype_by_name(action_db.runner_type["name"])
result, status = output_schema.validate_output(
runner_db.output_schema,
action_db.output_schema,
result,
status,
runner_db.output_key,
)
# If liveaction_db status is set then we need to decrement the counter
# because it is transitioning to a new state
if liveaction_db.status:
get_driver().dec_counter("action.executions.%s" % (liveaction_db.status))
# If status is provided then we need to increment the timer because the action
# is transitioning into this new state
if status:
get_driver().inc_counter("action.executions.%s" % (status))
extra = {"liveaction_db": liveaction_db}
LOG.debug(
'Updating ActionExection: "%s" with status="%s"',
liveaction_db.id,
status,
extra=extra,
)
# If liveaction is already canceled, then do not allow status to be updated.
if (
liveaction_db.status == LIVEACTION_STATUS_CANCELED
and status != LIVEACTION_STATUS_CANCELED
):
LOG.info(
'Unable to update ActionExecution "%s" with status="%s". '
"ActionExecution is already canceled.",
liveaction_db.id,
status,
extra=extra,
)
return liveaction_db
old_status = liveaction_db.status
liveaction_db.status = status
if result:
liveaction_db.result = result
if context:
liveaction_db.context.update(context)
if end_timestamp:
liveaction_db.end_timestamp = end_timestamp
if runner_info:
liveaction_db.runner_info = runner_info
# TODO: This is not efficient. Perform direct partial update and only update
# manipulated fields
liveaction_db = LiveAction.add_or_update(liveaction_db)
LOG.debug("Updated status for LiveAction object.", extra=extra)
if publish and status != old_status:
LiveAction.publish_status(liveaction_db)
LOG.debug("Published status for LiveAction object.", extra=extra)
return liveaction_db
def serialize_positional_argument(argument_type, argument_value):
"""
Serialize the provided positional argument.
Note: Serialization is NOT performed recursively since it doesn't make much
sense for shell script actions (only the outter / top level value is
serialized).
"""
if argument_type in ["string", "number", "float"]:
if argument_value is None:
argument_value = six.text_type("")
return argument_value
if isinstance(argument_value, (int, float)):
argument_value = str(argument_value)
if not isinstance(argument_value, six.text_type):
# cast string non-unicode values to unicode
argument_value = argument_value.decode("utf-8")
elif argument_type == "boolean":
# Booleans are serialized as string "1" and "0"
if argument_value is not None:
argument_value = "1" if bool(argument_value) else "0"
else:
argument_value = ""
elif argument_type in ["array", "list"]:
# Lists are serialized a comma delimited string (foo,bar,baz)
argument_value = ",".join(map(str, argument_value)) if argument_value else ""
elif argument_type == "object":
# Objects are serialized as JSON
argument_value = json_encode(argument_value) if argument_value else ""
elif argument_type == "null":
# None / null is serialized as en empty string
argument_value = ""
else:
# Other values are simply cast to unicode string
argument_value = six.text_type(argument_value) if argument_value else ""
return argument_value
def get_args(action_parameters, action_db):
"""
Get and serialize positional and named arguments.
:return: (positional_args, named_args)
:rtype: (``str``, ``dict``)
"""
position_args_dict = _get_position_arg_dict(action_parameters, action_db)
action_db_parameters = action_db.parameters or {}
positional_args = []
positional_args_keys = set()
for _, arg in six.iteritems(position_args_dict):
arg_type = action_db_parameters.get(arg, {}).get("type", None)
# Perform serialization for positional arguments
arg_value = action_parameters.get(arg, None)
arg_value = serialize_positional_argument(
argument_type=arg_type, argument_value=arg_value
)
positional_args.append(arg_value)
positional_args_keys.add(arg)
named_args = {}
for param in action_parameters:
if param not in positional_args_keys:
named_args[param] = action_parameters.get(param)
return positional_args, named_args
def _get_position_arg_dict(action_parameters, action_db):
action_db_params = action_db.parameters
args_dict = {}
for param in action_db_params:
param_meta = action_db_params.get(param, None)
if param_meta is not None:
pos = param_meta.get("position")
if pos is not None:
args_dict[pos] = param
args_dict = OrderedDict(sorted(args_dict.items()))
return args_dict
|
{
"content_hash": "332503de584f0c9b0199c847d52d0cb7",
"timestamp": "",
"source": "github",
"line_count": 386,
"max_line_length": 98,
"avg_line_length": 29.72279792746114,
"alnum_prop": 0.6354920247537698,
"repo_name": "nzlosh/st2",
"id": "e6ae0fe4301977695aee78259a905f0691024aaa",
"size": "12101",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "st2common/st2common/util/action_db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jinja",
"bytes": "174532"
},
{
"name": "Makefile",
"bytes": "75242"
},
{
"name": "PowerShell",
"bytes": "856"
},
{
"name": "Python",
"bytes": "6453910"
},
{
"name": "Shell",
"bytes": "93607"
},
{
"name": "Starlark",
"bytes": "7236"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import imp
import os
import sys
import types
import gdb
import pwndbg
import pwndbg.commands
import pwndbg.events
import pwndbg.memoize
try:
from __builtins__ import reload as _reload
except:
from imp import reload as _reload
def rreload(module, mdict=None):
"""Recursively reload modules."""
name = module.__name__
if mdict is None:
mdict = []
for attribute_name in getattr(module, '__all__', []) or []:
attribute = getattr(module, attribute_name, None)
if isinstance(attribute, types.ModuleType) and attribute not in mdict:
mdict.append(attribute)
rreload(attribute, mdict)
try:
_reload(module)
except Exception as e:
pass
@pwndbg.commands.Command
def reload(*a):
pwndbg.events.on_reload()
rreload(pwndbg)
pwndbg.events.after_reload()
@pwndbg.commands.Command
def reinit_pwndbg():
"""
Makes pwndbg reinitialize all state.
"""
pwndbg.memoize.reset()
pwndbg.events.after_reload()
|
{
"content_hash": "3b65169b74d6674d6f34c945dbcc56ce",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 78,
"avg_line_length": 20.892857142857142,
"alnum_prop": 0.67008547008547,
"repo_name": "0xddaa/pwndbg",
"id": "9d397b7ffbf7c21d5f48549411d6102b3c6cb624",
"size": "1216",
"binary": false,
"copies": "4",
"ref": "refs/heads/stable",
"path": "pwndbg/commands/reload.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "584"
},
{
"name": "C",
"bytes": "113"
},
{
"name": "Makefile",
"bytes": "964"
},
{
"name": "Python",
"bytes": "1920581"
},
{
"name": "Shell",
"bytes": "5598"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import os
import argparse
import sys
import threading
import shutil
import glob
import tempfile
import platform
# make sure scripts/internal is on the pythonpath.
sys.path = [os.path.abspath(os.path.dirname(sys.argv[0])) + "/internal"] + sys.path
# for ExitProgram and RunCommand
from pocolm_common import ExitProgram
from pocolm_common import RunCommand
parser = argparse.ArgumentParser(description="Usage: "
"get_counts.py [options] <source-int-dir> <ngram-order> <dest-count-dir>"
"e.g.: get_counts.py data/int 3 data/counts_3"
"This script computes data-counts of the specified n-gram order"
"for each data-source in <source-int-dir>, and puts them all in"
"<dest-counts-dir>.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--dump-counts-parallel", type=str, default='true',
choices=['true', 'false'],
help="If true, while obtaining the original counts, process multiple data-sources "
"in parallel (configurable because 'sort' may use a fair amount of memory).")
parser.add_argument("--verbose", type=str, default='false',
choices=['true', 'false'],
help="If true, print commands as we execute them.")
parser.add_argument("--cleanup", type=str, default='true',
choices=['true', 'false'],
help="If true, remove intermediate files (only relevant if --min-counts option "
"is supplied).")
parser.add_argument("--min-counts", type=str, default='',
help="This string allows you to specify minimum counts to be applied "
"to the stats. You may in general specify min-counts per n-gram order "
"and per data-source, but they applied 'jointly' in a smart way so "
"that, for example, for some order if all data-sources have a min-count "
"of 2, an n-gram will be pruned from all data-sources if the total count "
"over all data-sources is 2. Min-counts may be specified for order 3 "
"and above, in a comma-separated list, with values that must be "
"non-decreasing. E.g. --min-counts=2,3. In case of mismatch with "
"the actual n-gram order, excess min-counts will be truncated and "
"an deficit will be remedied by repeating the last min-count. You "
"may specify different min-counts for different data-sources, e.g. "
"--min-counts='fisher=2,3 swbd1=1,1'. You may also set min-counts for "
"some data-sources and use a default for others, as in "
"--min-counts='fisher=2,3 default=1,1'. You may not set min-counts for "
"the dev set.")
parser.add_argument("--num-min-count-jobs", type=int, default=5,
help="The number of parallel jobs used for applying min-counts (only "
"relevant if --min-counts option is given")
parser.add_argument("--num-count-jobs", type=int, default=4,
help="The number of parallel processes per data source used for "
"getting initial counts")
parser.add_argument("--max-memory", type=str, default='',
help="Memory limitation for sort.")
parser.add_argument("--limit-unk-history", type=str, default='false',
choices=['true', 'false'],
help="Truncate the left n-gram of an <unk> in history.")
parser.add_argument("source_int_dir",
help="Specify <source_int_dir> the data-source")
parser.add_argument("ngram_order", type=int,
help="Specify the order of ngram")
parser.add_argument("dest_count_dir",
help="Specify <dest_count_dir> the destination to puts the counts")
args = parser.parse_args()
# this temporary directory will be used by "sort".
os.environ['TMPDIR'] = args.dest_count_dir
# this reads the 'names' file (which has lines like "1 switchboard", "2 fisher"
# and so on), and returns a dictionary from integer id to name.
def ReadNames(names_file):
try:
f = open(names_file, "r")
except:
sys.exit("get_counts.py: failed to open --names={0}"
" for reading".format(names_file))
number_to_name = {}
for line in f:
try:
[number, name] = line.split()
number = int(number)
except:
sys.exit("get_counts.py: Bad line '{0}' in names file {1}".format(
line[0:-1], names_file))
if number in number_to_name:
sys.exit("get_counts.py: duplicate number {0} in names file {1}".format(
number, names_file))
number_to_name[number] = name
f.close()
return number_to_name
def GetNumTrainSets(source_int_dir):
f = open(source_int_dir + '/num_train_sets')
# the following should not fail, since we validated source_int_dir.
num_train_sets = int(f.readline())
assert f.readline() == ''
f.close()
return num_train_sets
# copy over some meta-info into the 'counts' directory.
def CopyMetaInfo(source_int_dir, dest_count_dir):
for f in ['num_train_sets', 'num_words', 'names', 'words.txt']:
try:
src = source_int_dir + os.path.sep + f
dest = dest_count_dir + os.path.sep + f
shutil.copy(src, dest)
except:
ExitProgram('error copying {0} to {1}'.format(src, dest))
def IsCygwin():
return platform.system()[0:3].lower() == 'win' or platform.system()[0:3].lower() == 'cyg'
# This function, called from FormatMinCounts, takes an array of
# min-counts like [2,3], and normalizes its length to ngram_order - 2
# by either removing elements from the end, or duplicating the last
# element. If it makes any change, it prints a warning.
def NormalizeMinCountsLength(min_counts, ngram_order):
if len(min_counts) == 0:
# this point in the code should not be reached, actually.
sys.exit("get_counts.py: invalid --min-counts string or code error.")
ans = min_counts
# Check that the min-counts are non-decreasing and are >= 1.
for i in range(len(min_counts) - 1):
if min_counts[i] < 1:
sys.exit("get_counts.py: invalid --min-counts string, min-counts must "
"be >= 1.")
if min_counts[i] > min_counts[i+1]:
sys.exit("get_counts.py: invalid --min-counts string, min-counts must "
"not decrease from one n-gram order to the next.")
if len(ans) < ngram_order - 2:
while len(ans) < ngram_order - 2:
ans.append(ans[-1]) # duplicate the last element
print("get_counts.py: extending min-counts from {0} to {1} since "
"ngram order is {2}".format(','.join([str(x) for x in min_counts]),
','.join([str(x) for x in ans]), ngram_order))
if len(ans) > ngram_order - 2:
ans = ans[0:ngram_order-2]
print("get_counts.py: truncating min-counts from {0} to {1} since "
"ngram order is {2}".format(','.join([str(x) for x in min_counts]),
','.join([str(x) for x in ans]), ngram_order))
return ans
# This function converts from the format of --min-counts string accepted by this
# program to the format that is accepted by int-counts-enforce-min-counts; it
# returns a string (such as --min-counts="2,3" -> "2 3", which would be a valid
# string for a 4-gram setup and arbitrary number of inputs; or, supposing
# --min-counts="fisher=2,3 swbd=1,2" and the "names" file maps "fisher" -> 1
# and "swbd" -> 2, this function would return the string "2,1 3,2".
# If the ngram-order is <3, this function will return the empty string, and
# in that case you shouldn't try to apply min-counts.
def FormatMinCounts(source_int_dir, num_train_sets, ngram_order, min_counts):
if len(min_counts) == 0:
sys.exit("get_counts.py: empty --min-counts string.")
if ngram_order < 3:
print("get_counts.py: ignoring --min-counts string since ngram "
"order is {0} and min-counts are only supported for orders "
"3 and greater.".format(ngram_order), file=sys.stderr)
return ''
pieces = min_counts.split()
# 'pieces' is the whitespace-separated pieces of the string.
if len(pieces) == 1 and len(pieces[0].split('=')) == 1:
# the user has specified something like --min-counts=2,3, and we have
# something like pieces = ['2,3']. So there is no attempt to have
# different min-counts for different data sources.
try:
min_counts_per_order = [float(x) for x in pieces[0].split(',')]
except:
sys.exit("get_counts.py: --min-counts={0} has unexpected format".format(
min_counts))
min_counts_per_order = NormalizeMinCountsLength(min_counts_per_order,
ngram_order)
ans = ' '.join([str(int(x)) if x == int(x) else str(x)
for x in min_counts_per_order])
else:
# we expect 'pieces' to be something like [ 'fisher=2,3' 'swbd=1,2' ].
# we'll set up a dictionary from name to min-count array, something
# like name_to_mincounts = [ 'fisher':[2,3], 'swbd':[1,2] ]
name_to_mincounts = dict()
for piece in pieces:
try:
[name, comma_separated_list] = piece.split('=')
this_mincounts = [float(x) for x in comma_separated_list.split(',')]
this_mincounts = NormalizeMinCountsLength(this_mincounts,
ngram_order)
except:
sys.exit("get_counts.py: could not parse --min-counts='{0}'.".format(
min_counts))
if name in name_to_mincounts:
sys.exit("get_counts.py: duplicate entry found in --min-counts='{0}'.".format(
min_counts))
name_to_mincounts[name] = this_mincounts
names_used = set() # the set of keys of 'name_to_mincounts' that have been used.
# names is a map from integer to name, e.g.
# names = [ 1:'fisher', 2:'swbd' ]
names = ReadNames(source_int_dir + "/names")
# min_counts_per_order will be an array (one per order from 2,...)
# of arrays, one per training set, of the respective min-counts per
# dataset, e.g. in our example it would be [ [ 2,1 ], [3,2] ]
min_counts_per_order = []
for o in range(ngram_order - 2):
min_counts_per_order.append([])
for n in range(1, num_train_sets + 1):
# the next line shouldn't fail since the data-dir did validate correctly.
name = names[n]
if name in name_to_mincounts:
this_mincounts = name_to_mincounts[name]
names_used.add(name)
elif 'default' in name_to_mincounts:
this_mincounts = name_to_mincounts['default']
names_used.add('default')
else:
sys.exit("get_counts.py: invalid min-counts --min-counts='{0}' since there "
"is no min-count specified for {1}.".format(min_counts, name))
for o in range(ngram_order - 2):
min_counts_per_order[o].append(this_mincounts[o])
ans = ' '.join([','.join([str(int(x)) if x == int(x) else str(x)
for x in array])
for array in min_counts_per_order])
for name in name_to_mincounts.keys():
if name not in names_used:
sys.exit("get_counts.py: invalid min-counts --min-counts='{0}' since the key "
"{1} is never used.".format(min_counts, name))
if args.verbose == 'true':
print("get_counts.py: converted min-counts from --min-counts='{0}' to '{1}'".format(
min_counts, ans))
# test whether ans is all ones, and warn if so.
a = ans.replace(',', ' ').split()
if a == ['1'] * len(a):
print("get_counts.py: **warning: --min-counts={0} is equivalent to not applying any "
"min-counts, it would be more efficient not to use the option at all, or "
"to set it to the empty string.".format(min_counts))
return ans
# save the n-gram order.
def SaveNgramOrder(dest_count_dir, ngram_order):
try:
f = open('{0}/ngram_order'.format(dest_count_dir), 'w')
except:
ExitProgram('error opening file {0}/ngram_order for writing'.format(dest_count_dir))
assert ngram_order >= 2
print(ngram_order, file=f)
f.close()
# this function dumps the counts to disk.
# if num_splits == 0 [relevant when we're not using min-counts], then it dumps
# its output to {dest_count_dir}/int.{n}.{o} with n = 1..num_train_sets,
# o=2..ngram_order. (note: n is supplied to this function).
#
# If num-splits >= 1 [relevant when we're using min-counts], then it dumps its output
# {dest_count_dir}/int.{n}.split{j} with n = 1..num_train_sets, j=1..num_splits.
def GetCountsSingleProcess(source_int_dir, dest_count_dir, ngram_order, n,
max_mem, num_splits=0):
if num_splits == 0:
int_counts_output = "/dev/null " + " ".join(["{0}/int.{1}.{2}".format(dest_count_dir, n, o)
for o in range(2, ngram_order + 1)])
else:
assert num_splits >= 1
int_counts_output = '/dev/stdout | split-int-counts ' + \
' '.join(["{0}/int.{1}.split{2}".format(dest_count_dir, n, j)
for j in range(1, num_splits + 1)])
command = "bash -c 'set -o pipefail; export LC_ALL=C; gunzip -c {source_int_dir}/{n}.txt.gz | "\
"get-text-counts {limit_unk_history} {ngram_order} | sort {mem_opt}| uniq -c | "\
"get-int-counts {int_counts_output}'".format(source_int_dir=source_int_dir,
n=n, ngram_order=ngram_order,
limit_unk_history="--limit-unk-history" if args.limit_unk_history == 'true' else "",
mem_opt="--buffer-size={0}".format(max_mem) if max_mem != '' else '',
int_counts_output=int_counts_output)
log_file = "{dest_count_dir}/log/get_counts.{n}.log".format(
dest_count_dir=dest_count_dir, n=n)
RunCommand(command, log_file, args.verbose == 'true')
# This function uses multiple parallel processes to dumps the counts to files.
# if num_splits == 0 [relevant when we're not using min-counts], then it dumps its output to
# {dest_count_dir}/int.{n}.{o} with n = 1..num_train_sets, o=2..ngram_order.
# (note: n is supplied to this function).
#
# If num-splits >= 1 [relevant when we're using min-counts], then it dumps its output
# {dest_count_dir}/int.{n}.split{j} with n = 1..num_train_sets, j=1..num_splits.
# This function uses multiple processes (num_proc) in parallel to run
# 'get-text-counts' (this tends to be the bottleneck).
# It will use just one process if the amount of data is quite small or if
# the platform is Cygwin (where named pipes don't work)
def GetCountsMultiProcess(source_int_dir, dest_count_dir, ngram_order, n, num_proc,
max_mem, num_splits=0):
try:
file_size = os.path.getsize('{0}/{1}.txt.gz'.format(source_int_dir, n))
except:
ExitProgram('get_counts.py: error getting file size of '
'{0}/{1}.txt.gz'.format(source_int_dir, n))
if IsCygwin() or num_proc <= 1 or file_size < 1000000:
if num_proc > 1 and file_size >= 1000000:
# it's only because of Cygwin that we're not using multiple
# processes this merits a warning.
print("get_counts.py: cygwin platform detected so named pipes won't work; "
"using a single process (will be slower)")
return GetCountsSingleProcess(source_int_dir, dest_count_dir,
ngram_order, n, max_mem, num_splits)
if num_splits == 0:
int_counts_output = "/dev/null " + " ".join(["{0}/int.{1}.{2}".format(dest_count_dir, n, o)
for o in range(2, ngram_order + 1)])
else:
assert num_splits >= 1
int_counts_output = '/dev/stdout | split-int-counts ' + \
' '.join(["{0}/int.{1}.split{2}".format(dest_count_dir, n, j)
for j in range(1, num_splits + 1)])
try:
# we want a temporary directory on a local file system
# for
tempdir = tempfile.mkdtemp()
except Exception as e:
ExitProgram("Error creating temporary directory: " + str(e))
# This has several pipes for the internal processing that write to and read
# from other internal pipes; and we can't do this using '|' in the shell, we
# need to use mkfifo. This does not work properly on cygwin.
log_dir = "{dest_count_dir}/log".format(dest_count_dir=dest_count_dir)
[os.remove(x) for x in glob.glob("{log_dir}/.{n}.*.error".format(
log_dir=log_dir, n=n))]
log_file = "{log_dir}/get_counts.{n}.log".format(log_dir=log_dir, n=n)
test_command = "bash -c 'set -o pipefail; (echo a; echo b) | "\
"distribute-input-lines /dev/null /dev/null'"
# We run the following command just to make sure distribute-input-lines is
# on the path and compiled, since we get hard-to-debug errors if it fails.
RunCommand(test_command, log_file)
if max_mem == '':
mem_opt = ''
else:
mem_opt = "--buffer-size={0}".format(DivideMemory(max_mem, num_proc + 1))
# we use "bash -c '...'" to make sure it gets run in bash, since
# for example 'set -o pipefail' would only work in bash.
command = ("bash -c 'set -o pipefail; set -e; export LC_ALL=C; mkdir -p {0}; ".format(tempdir) +
''.join(['mkfifo {0}/{1}; '.format(tempdir, p)
for p in range(num_proc)]) +
'trap "rm -r {0}" SIGINT SIGKILL SIGTERM EXIT; '.format(tempdir) +
'gunzip -c {0}/{1}.txt.gz | distribute-input-lines '.format(source_int_dir, n) +
' '.join(['{0}/{1}'.format(tempdir, p) for p in range(num_proc)]) + '& ' +
'sort -m {0} '.format(mem_opt) +
' '.join(['<(get-text-counts {4} {0} <{1}/{2} | sort {3} || touch {5}/.{6}.{2}.error)'.format(ngram_order, tempdir, p, mem_opt,
"--limit-unk-history" if args.limit_unk_history == 'true' else "", log_dir, n)
for p in range(num_proc)]) +
'| uniq -c | get-int-counts {0}'.format(int_counts_output) +
"'") # end the quote from the 'bash -c'.
RunCommand(command, log_file, args.verbose == 'true')
if len(glob.glob("{log_dir}/.{n}.*.error".format(log_dir=log_dir, n=n))) > 0:
ExitProgram("Something went wrong for the get-text-counts or sort command for training set {n}.".format(n=n))
# This function applies the min-counts (it is only called if you supplied the
# --min-counts option to this script). It reads in the data dumped by
# GetCounts. It dumps the files into {dest_count_dir}/int.{n}.split{j}.{o}
# for n = 1...num_train_sets j = 1..num_jobs, and o=2..ngram_order. [note: j is
# supplied to this function].
def EnforceMinCounts(dest_count_dir, formatted_min_counts, ngram_order, num_train_sets, j):
inputs = ' '.join(["{0}/int.{1}.split{2}".format(dest_count_dir, n, j)
for n in range(1, num_train_sets + 1)])
outputs = ' '.join([' '.join(['{0}/int.{1}.split{2}.{3}'.format(dest_count_dir, n, j, o)
for o in range(2, ngram_order + 1)])
for n in range(1, num_train_sets + 1)])
# e.g. suppose j is 2 and ngram_order is 4, outputs would be as follows
# [assuming brace expansion].:
# outputs = dir/int.1.split2.{2,3,4} dir/int.2.split2.{2,3,4} ...
# dir/int.{num_train_sets}.split2.{2,3,4}
command = "int-counts-enforce-min-counts {ngram_order} {formatted_min_counts} {inputs} "\
"{outputs}".format(
ngram_order=ngram_order, formatted_min_counts=formatted_min_counts,
inputs=inputs, outputs=outputs, j=j)
log_file = '{0}/log/enforce_min_counts.{1}.log'.format(dest_count_dir, j)
RunCommand(command, log_file, args.verbose == 'true')
# This function merges counts from multiple jobs, that have been split up by
# most recent history-word (it is only called if you supplied the --min-counts
# option to this script). It reads in the data dumped by EnforceMinCounts.
# it merges the files into {dest_count_dir}/int.{n}.{o}.
def MergeCounts(dest_count_dir, num_jobs, n, o):
if num_jobs > 1:
command = ('merge-int-counts ' +
' '.join(['{0}/int.{1}.split{2}.{3}'.format(dest_count_dir, n, j, o)
for j in range(1, num_jobs + 1)]) +
'>{0}/int.{1}.{2}'.format(dest_count_dir, n, o))
log_file = '{0}/log/merge_counts.{1}.{2}.log'.format(dest_count_dir, n, o)
RunCommand(command, log_file, args.verbose == 'true')
else:
assert num_jobs == 1
# we can just move the file if num-jobs == 1.
try:
os.remove('{0}/int.{1}.{2}'.format(dest_count_dir, n, o))
except:
pass
os.rename('{0}/int.{1}.split1.{2}'.format(dest_count_dir, n, o),
'{0}/int.{1}.{2}'.format(dest_count_dir, n, o))
# we also want to merge the files $dir/int.dev.{2,3,...} into a single file
# that contains all the dev-data's counts; this will be used in likelihood
# evaluation.
def MergeDevData(dest_count_dir, ngram_order):
command = ("merge-int-counts " + ' '.join([dest_count_dir + "/int.dev." + str(n)
for n in range(2, ngram_order + 1)]) +
">{0}/int.dev".format(dest_count_dir))
log_file = dest_count_dir + '/log/merge_dev_counts.log'
RunCommand(command, log_file, args.verbose == 'true')
# this function returns the value and unit of the max_memory
# if max_memory is in format of "integer + letter/%", like "10G", it returns (10, 'G')
# if max_memory contains no letter, like "10000", it returns (10000, '')
# we assume the input string is not empty since when it is empty we never call this function
def ParseMemoryString(s):
if not s[-1].isdigit():
return (int(s[:-1]), s[-1])
else:
return (int(s), '')
def DivideMemory(total, n):
(value, unit) = ParseMemoryString(total)
sub_memory = value / n
if sub_memory != float(value) / n:
if unit in ['K', 'k', '']:
sub_memory = value * 1024 / n
unit = 'b'
elif unit in ['M', 'm']:
sub_memory = value * 1024 / n
unit = 'K'
elif unit in ['G', 'g']:
sub_memory = value * 1024 / n
unit = 'M'
elif (unit in ['B', 'b', '%']) and (sub_memory == 0):
ExitProgram("max_memory for each of the {0} train sets is {1}{2}."
"Please reset a larger max_memory value".format(
n, float(value)/n, unit))
else:
ExitProgram("Invalid format for max_memory. "
"Please 'man sort' to see how to set buffer size.")
return str(int(sub_memory)) + unit
# make sure 'scripts' and 'src' directory are on the path
os.environ['PATH'] = (os.environ['PATH'] + os.pathsep +
os.path.abspath(os.path.dirname(sys.argv[0])) + os.pathsep +
os.path.abspath(os.path.dirname(sys.argv[0])) + "/../src")
if os.system("validate_int_dir.py " + args.source_int_dir) != 0:
ExitProgram("command validate_int_dir.py {0} failed".format(args.source_int_dir))
if args.ngram_order < 2:
ExitProgram("ngram-order is {0}; it must be at least 2. If you "
"want a unigram LM, do it by hand".format(args.ngram_order))
# read the variable 'num_train_sets'
# from the corresponding file in source_int_dir This shouldn't fail
# because we just called validate_int-dir.py..
f = open(args.source_int_dir + "/num_train_sets")
num_train_sets = int(f.readline())
f.close()
if not os.path.isdir(args.dest_count_dir):
try:
os.makedirs(args.dest_count_dir+'/log')
except:
ExitProgram("error creating directory " + args.dest_count_dir)
CopyMetaInfo(args.source_int_dir, args.dest_count_dir)
SaveNgramOrder(args.dest_count_dir, args.ngram_order)
if args.min_counts == '':
# no min-counts specified: use normal pipeline.
print("get_counts.py: dumping counts", file=sys.stderr)
threads = []
if args.max_memory != '':
if args.dump_counts_parallel == 'true':
max_mem = DivideMemory(args.max_memory, num_train_sets + 1)
else:
max_mem = args.max_memory
else:
max_mem = ''
for n in ["dev"] + list(range(1, num_train_sets + 1)):
threads.append(threading.Thread(target=GetCountsMultiProcess,
args=[args.source_int_dir, args.dest_count_dir,
args.ngram_order, str(n), args.num_count_jobs, max_mem]))
threads[-1].start()
if args.dump_counts_parallel == 'false':
threads[-1].join()
if args.dump_counts_parallel == 'true':
for t in threads:
t.join()
MergeDevData(args.dest_count_dir, args.ngram_order)
print("get_counts.py: done", file=sys.stderr)
else:
# First process the dev data, the min-counts aren't relevant here.
GetCountsSingleProcess(args.source_int_dir, args.dest_count_dir,
args.ngram_order, 'dev', args.max_memory)
MergeDevData(args.dest_count_dir, args.ngram_order)
num_mc_jobs = args.num_min_count_jobs
if num_mc_jobs < 1:
ExitProgram("bad option --num-min-count-jobs={0}".format(num_mc_jobs))
formatted_min_counts = FormatMinCounts(args.source_int_dir,
num_train_sets,
args.ngram_order,
args.min_counts)
if not num_mc_jobs >= 1:
sys.exit("get_counts.py: invalid option --num-jobs={0}".format(num_mc_jobs))
# First, dump the counts split up by most-recent-history instead of ngram-order.
print("get_counts.py: dumping counts", file=sys.stderr)
if args.max_memory != '':
if args.dump_counts_parallel == 'true':
max_mem = DivideMemory(args.max_memory, num_train_sets)
else:
max_mem = args.max_memory
else:
max_mem = ''
threads = []
for n in range(1, num_train_sets + 1):
threads.append(threading.Thread(target=GetCountsMultiProcess,
args=[args.source_int_dir, args.dest_count_dir,
args.ngram_order, str(n), args.num_count_jobs, max_mem,
num_mc_jobs]))
threads[-1].start()
if args.dump_counts_parallel == 'false':
threads[-1].join()
if args.dump_counts_parallel == 'true':
for t in threads:
t.join()
# Next, apply the min-counts.
print("get_counts.py: applying min-counts", file=sys.stderr)
threads = []
for j in range(1, num_mc_jobs + 1):
threads.append(threading.Thread(target=EnforceMinCounts,
args=[args.dest_count_dir, formatted_min_counts,
args.ngram_order, num_train_sets, j]))
threads[-1].start()
for t in threads:
t.join()
if args.cleanup == 'true':
for n in range(1, num_train_sets + 1):
for j in range(1, num_mc_jobs + 1):
os.remove("{0}/int.{1}.split{2}".format(
args.dest_count_dir, n, j))
print("get_counts.py: merging counts", file=sys.stderr)
threads = []
for n in range(1, num_train_sets + 1):
for o in range(2, args.ngram_order + 1):
threads.append(threading.Thread(target=MergeCounts,
args=[args.dest_count_dir,
num_mc_jobs, n, o]))
threads[-1].start()
for t in threads:
t.join()
if args.cleanup == 'true':
for n in range(1, num_train_sets + 1):
for j in range(1, args.num_min_count_jobs + 1):
for o in range(2, args.ngram_order + 1):
try:
os.remove("{0}/int.{1}.split{2}.{3}".format(
args.dest_count_dir, n, j, o))
except:
pass
print("get_counts.py: finished.", file=sys.stderr)
if os.system("validate_count_dir.py " + args.dest_count_dir) != 0:
ExitProgram("command validate_count_dir.py {0} failed".format(args.dest_count_dir))
|
{
"content_hash": "0af42049ca04f139dcedca8be8a454e7",
"timestamp": "",
"source": "github",
"line_count": 616,
"max_line_length": 143,
"avg_line_length": 48.19805194805195,
"alnum_prop": 0.5660828561805321,
"repo_name": "wantee/pocolm",
"id": "ac0fe0aee7eee6b3eda4a89d4cff7fa1210e211a",
"size": "29785",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/get_counts.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "356912"
},
{
"name": "Makefile",
"bytes": "1294"
},
{
"name": "Perl",
"bytes": "4912"
},
{
"name": "Python",
"bytes": "274790"
},
{
"name": "Shell",
"bytes": "83576"
}
],
"symlink_target": ""
}
|
"""Copy number detection with CNVkit with specific support for targeted sequencing.
http://cnvkit.readthedocs.org
"""
import copy
import os
import sys
import tempfile
import pybedtools
import numpy as np
import toolz as tz
from bcbio import install, utils
from bcbio.bam import ref
from bcbio.distributed.multi import run_multicore, zeromq_aware_logging
from bcbio.distributed.transaction import file_transaction
from bcbio.heterogeneity import chromhacks
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import config_utils
from bcbio.variation import bedutils, effects, vcfutils
from bcbio.provenance import do
from bcbio.structural import annotate, shared, regions, plot
def run(items, background=None):
"""Detect copy number variations from batched set of samples using CNVkit.
"""
if not background: background = []
return _cnvkit_by_type(items, background)
def _sv_workdir(data):
return utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural",
dd.get_sample_name(data), "cnvkit"))
def _cnvkit_by_type(items, background):
"""Dispatch to specific CNVkit functionality based on input type.
"""
if len(items + background) == 1:
return _run_cnvkit_single(items[0])
elif vcfutils.get_paired_phenotype(items[0]):
return _run_cnvkit_cancer(items, background)
else:
return _run_cnvkit_population(items, background)
def _associate_cnvkit_out(ckouts, items):
"""Associate cnvkit output with individual items.
"""
assert len(ckouts) == len(items)
out = []
for ckout, data in zip(ckouts, items):
ckout = copy.deepcopy(ckout)
ckout["variantcaller"] = "cnvkit"
ckout = _add_seg_to_output(ckout, data)
ckout = _add_gainloss_to_output(ckout, data)
ckout = _add_segmetrics_to_output(ckout, data)
ckout = _add_variantcalls_to_output(ckout, data)
# ckout = _add_coverage_bedgraph_to_output(ckout, data)
ckout = _add_cnr_bedgraph_and_bed_to_output(ckout, data)
if "svplots" in dd.get_tools_on(data):
ckout = _add_plots_to_output(ckout, data)
if "sv" not in data:
data["sv"] = []
data["sv"].append(ckout)
out.append(data)
return out
def _run_cnvkit_single(data, background=None):
"""Process a single input file with BAM or uniform background.
"""
work_dir = _sv_workdir(data)
test_bams = [data["align_bam"]]
if background:
background_bams = [x["align_bam"] for x in background]
background_name = os.path.splitext(os.path.basename(background_bams[0]))[0]
else:
background_bams = []
background_name = None
ckouts = _run_cnvkit_shared([data], test_bams, background_bams, work_dir,
background_name=background_name)
if not ckouts:
return [data]
else:
assert len(ckouts) == 1
return _associate_cnvkit_out(ckouts, [data])
def _run_cnvkit_cancer(items, background):
"""Run CNVkit on a tumor/normal pair.
"""
paired = vcfutils.get_paired_bams([x["align_bam"] for x in items], items)
work_dir = _sv_workdir(paired.tumor_data)
ckouts = _run_cnvkit_shared([paired.tumor_data], [paired.tumor_bam], [paired.normal_bam],
work_dir, background_name=paired.normal_name)
if not ckouts:
return items
assert len(ckouts) == 1
tumor_data = _associate_cnvkit_out(ckouts, [paired.tumor_data])
normal_data = [x for x in items if dd.get_sample_name(x) != paired.tumor_name]
return tumor_data + normal_data
def _run_cnvkit_population(items, background):
"""Run CNVkit on a population of samples.
Tries to calculate background based on case/controls, otherwise uses
a flat background for each sample and calls independently.
"""
assert not background
inputs, background = shared.find_case_control(items)
work_dir = _sv_workdir(inputs[0])
ckouts = _run_cnvkit_shared(inputs, [x["align_bam"] for x in inputs],
[x["align_bam"] for x in background], work_dir,
background_name=dd.get_sample_name(background[0]) if len(background) > 0 else None)
return _associate_cnvkit_out(ckouts, inputs) + background
def _get_cmd():
return os.path.join(os.path.dirname(sys.executable), "cnvkit.py")
def _bam_to_outbase(bam_file, work_dir):
"""Convert an input BAM file into CNVkit expected output.
"""
out_base = os.path.splitext(os.path.basename(bam_file))[0].split(".")[0]
return os.path.join(work_dir, out_base)
def _run_cnvkit_shared(items, test_bams, background_bams, work_dir, background_name=None):
"""Shared functionality to run CNVkit, parallelizing over multiple BAM files.
"""
raw_work_dir = utils.safe_makedir(os.path.join(work_dir, "raw"))
background_cnn = os.path.join(raw_work_dir,
"%s_background.cnn" % (background_name if background_name else "flat"))
ckouts = []
for test_bam in test_bams:
out_base = _bam_to_outbase(test_bam, raw_work_dir)
ckouts.append({"cnr": "%s.cns" % out_base,
"cns": "%s.cns" % out_base,
"back_cnn": background_cnn})
if not utils.file_exists(ckouts[0]["cnr"]):
data = items[0]
cov_interval = dd.get_coverage_interval(data)
raw_target_bed, access_bed = _get_target_access_files(cov_interval, data, work_dir)
# bail out if we ended up with no regions
if not utils.file_exists(raw_target_bed):
return {}
raw_target_bed = annotate.add_genes(raw_target_bed, data)
parallel = {"type": "local", "cores": dd.get_cores(data), "progs": ["cnvkit"]}
target_bed, antitarget_bed = _cnvkit_targets(raw_target_bed, access_bed, cov_interval, raw_work_dir, data)
def _bam_to_itype(bam):
return "background" if bam in background_bams else "evaluate"
split_cnns = run_multicore(_cnvkit_coverage,
[(bam, bed, _bam_to_itype(bam), raw_work_dir, data)
for bam in test_bams + background_bams
for bed in _split_bed(target_bed, data) + _split_bed(antitarget_bed, data)],
data["config"], parallel)
coverage_cnns = _merge_coverage(split_cnns, data)
background_cnn = _cnvkit_background([x["file"] for x in coverage_cnns if x["itype"] == "background"],
background_cnn, target_bed, antitarget_bed, data)
fixed_cnrs = run_multicore(_cnvkit_fix,
[(cnns, background_cnn, data) for cnns in
tz.groupby("bam", [x for x in coverage_cnns
if x["itype"] == "evaluate"]).values()],
data["config"], parallel)
called_segs = run_multicore(_cnvkit_segment,
[(cnr, cov_interval, data) for cnr in fixed_cnrs],
data["config"], parallel)
return ckouts
@utils.map_wrap
@zeromq_aware_logging
def _cnvkit_segment(cnr_file, cov_interval, data):
"""Perform segmentation and copy number calling on normalized inputs
"""
out_file = "%s.cns" % os.path.splitext(cnr_file)[0]
if not utils.file_uptodate(out_file, cnr_file):
with file_transaction(data, out_file) as tx_out_file:
local_sitelib = os.path.join(install.get_defaults().get("tooldir", "/usr/local"),
"lib", "R", "site-library")
cmd = [_get_cmd(), "segment", "-o", tx_out_file, "--rlibpath", local_sitelib, cnr_file]
if cov_interval == "genome":
cmd += ["--threshold", "0.00001"]
# preferentially use conda installed Rscript
export_cmd = "unset R_HOME && export PATH=%s:$PATH && " % os.path.dirname(utils.Rscript_cmd())
do.run(export_cmd + " ".join(cmd), "CNVkit segment")
return out_file
@utils.map_wrap
@zeromq_aware_logging
def _cnvkit_fix(cnns, background_cnn, data):
"""Normalize samples, correcting sources of bias.
"""
assert len(cnns) == 2, "Expected target and antitarget CNNs: %s" % cnns
target_cnn = [x["file"] for x in cnns if x["cnntype"] == "target"][0]
antitarget_cnn = [x["file"] for x in cnns if x["cnntype"] == "antitarget"][0]
out_file = "%scnr" % os.path.commonprefix([target_cnn, antitarget_cnn])
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "fix", "-o", tx_out_file, target_cnn, antitarget_cnn, background_cnn]
do.run(cmd, "CNVkit fix")
return [out_file]
def _cnvkit_background(background_cnns, out_file, target_bed, antitarget_bed, data):
"""Calculate background reference, handling flat case with no normal sample.
"""
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "reference", "-f", dd.get_ref_file(data), "-o", tx_out_file]
if len(background_cnns) == 0:
cmd += ["-t", target_bed, "-a", antitarget_bed]
else:
cmd += background_cnns
do.run(cmd, "CNVkit background")
return out_file
def _split_bed(bed_input, data):
"""Split BED file into sections for processing, allowing better multicore usage.
"""
split_lines = 100000
split_info = []
base, ext = os.path.splitext(bed_input)
base, ext2 = os.path.splitext(base)
ext = ext2 + ext
with open(bed_input) as in_handle:
for cur_index, line_group in enumerate(tz.partition_all(split_lines, in_handle)):
cur_file = "%s-%s%s" % (base, cur_index, ext)
if not utils.file_uptodate(cur_file, bed_input):
with file_transaction(data, cur_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for line in line_group:
out_handle.write(line)
split_info.append({"i": cur_index, "orig": bed_input, "file": cur_file})
if not split_info: # empty input file
split_info.append({"file": bed_input, "orig": bed_input})
return split_info
def _merge_coverage(cnns, data):
"""Merge split CNN outputs into final consolidated output.
"""
out = []
for (out_file, _), members in tz.groupby(lambda x: (x["final_out"], x["bed_orig"]), cnns).items():
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for i, in_file in enumerate([x["file"] for x in sorted(members, key=lambda x: x["bed_i"])]):
with open(in_file) as in_handle:
header = in_handle.readline()
if i == 0:
out_handle.write(header)
for line in in_handle:
out_handle.write(line)
base = copy.deepcopy(members[0])
base = tz.dissoc(base, "final_out", "bed_i", "bed_orig")
base["file"] = out_file
out.append(base)
return out
@utils.map_wrap
@zeromq_aware_logging
def _cnvkit_coverage(bam_file, bed_info, input_type, work_dir, data):
"""Calculate coverage in a BED file for CNVkit.
"""
bed_file = bed_info["file"]
exts = {".target.bed": ("target", "targetcoverage.cnn"),
".antitarget.bed": ("antitarget", "antitargetcoverage.cnn")}
assert bed_file.endswith(tuple(exts.keys())), "Unexpected BED file extension for coverage %s" % bed_file
for orig, (cnntype, ext) in exts.items():
if bed_file.endswith(orig):
break
base = _bam_to_outbase(bam_file, work_dir)
merged_out_file = "%s.%s" % (base, ext)
out_file = "%s-%s.%s" % (base, bed_info["i"], ext) if "i" in bed_info else merged_out_file
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "coverage", bam_file, bed_file, "-o", tx_out_file]
do.run(cmd, "CNVkit coverage")
return [{"itype": input_type, "file": out_file, "bam": bam_file, "cnntype": cnntype,
"final_out": merged_out_file, "bed_i": bed_info.get("i"), "bed_orig": bed_info["orig"]}]
def _cnvkit_targets(raw_target_bed, access_bed, cov_interval, work_dir, data):
"""Create target and antitarget regions from target and access files.
"""
target_bed = os.path.join(work_dir, "%s.target.bed" % os.path.splitext(os.path.basename(raw_target_bed))[0])
if not utils.file_uptodate(target_bed, raw_target_bed):
with file_transaction(data, target_bed) as tx_out_file:
cmd = [_get_cmd(), "target", raw_target_bed, "--split", "-o", tx_out_file]
if cov_interval == "genome":
cmd += ["--avg-size", "500"]
do.run(cmd, "CNVkit target")
antitarget_bed = os.path.join(work_dir, "%s.antitarget.bed" % os.path.splitext(os.path.basename(raw_target_bed))[0])
if not os.path.exists(antitarget_bed):
with file_transaction(data, antitarget_bed) as tx_out_file:
cmd = [_get_cmd(), "antitarget", "-g", access_bed, target_bed, "-o", tx_out_file]
do.run(cmd, "CNVkit antitarget")
return target_bed, antitarget_bed
def _get_target_access_files(cov_interval, data, work_dir):
"""Retrieve target and access files based on the type of data to process.
pick targets, anti-targets and access files based on analysis type
http://cnvkit.readthedocs.org/en/latest/nonhybrid.html
"""
base_regions = regions.get_sv_bed(data)
# if we don't have a configured BED or regions to use for SV caling
if not base_regions:
# For genome calls, subset to regions within 10kb of genes
if cov_interval == "genome":
base_regions = regions.get_sv_bed(data, "transcripts1e4", work_dir)
if base_regions:
base_regions = shared.remove_exclude_regions(base_regions, base_regions, [data])
# Finally, default to the defined variant regions
if not base_regions:
base_regions = dd.get_variant_regions(data)
target_bed = bedutils.merge_overlaps(base_regions, data, out_dir=work_dir)
if cov_interval == "amplicon":
return target_bed, target_bed
elif cov_interval == "genome":
return target_bed, target_bed
else:
access_file = _create_access_file(dd.get_ref_file(data), _sv_workdir(data), data)
return target_bed, access_file
def _add_seg_to_output(out, data):
"""Export outputs to 'seg' format compatible with IGV and GenePattern.
"""
out_file = "%s.seg" % os.path.splitext(out["cns"])[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "export",
"seg", "-o", tx_out_file, out["cns"]]
do.run(cmd, "CNVkit export seg")
out["seg"] = out_file
return out
def _add_cnr_bedgraph_and_bed_to_output(out, data):
cnr_file = out["cnr"]
bedgraph_file = cnr_file + ".bedgraph"
if not utils.file_exists(bedgraph_file):
with file_transaction(data, bedgraph_file) as tx_out_file:
cmd = "sed 1d {cnr_file} | cut -f1,2,3,5 > {tx_out_file}"
do.run(cmd.format(**locals()), "Converting cnr to bedgraph format")
out["cnr_bedgraph"] = bedgraph_file
bed_file = cnr_file + ".bed"
if not utils.file_exists(bed_file):
with file_transaction(data, bed_file) as tx_out_file:
cmd = "sed 1d {cnr_file} | cut -f1,2,3,4,5 > {tx_out_file}"
do.run(cmd.format(**locals()), "Converting cnr to bed format")
out["cnr_bed"] = bed_file
return out
def _add_variantcalls_to_output(out, data):
"""Call ploidy and convert into VCF and BED representations.
"""
call_file = "%s-call%s" % os.path.splitext(out["cns"])
gender = dd.get_gender(data)
if not utils.file_exists(call_file):
with file_transaction(data, call_file) as tx_call_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "call",
"--ploidy", str(dd.get_ploidy(data)),
"-o", tx_call_file, out["cns"]]
if gender:
cmd += ["--gender", gender]
if gender.lower() == "male":
cmd += ["--male-reference"]
do.run(cmd, "CNVkit call ploidy")
calls = {}
for outformat in ["bed", "vcf"]:
out_file = "%s.%s" % (os.path.splitext(call_file)[0], outformat)
calls[outformat] = out_file
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "export",
outformat, "--sample-id", dd.get_sample_name(data),
"--ploidy", str(dd.get_ploidy(data)),
"-o", tx_out_file, call_file]
if gender and gender.lower() == "male":
cmd += ["--male-reference"]
do.run(cmd, "CNVkit export %s" % outformat)
out["call_file"] = call_file
out["vrn_bed"] = annotate.add_genes(calls["bed"], data)
effects_vcf, _ = effects.add_to_vcf(calls["vcf"], data, "snpeff")
out["vrn_file"] = effects_vcf or calls["vcf"]
return out
def _add_segmetrics_to_output(out, data):
"""Add metrics for measuring reliability of CNV estimates.
"""
out_file = "%s-segmetrics.txt" % os.path.splitext(out["cns"])[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "segmetrics",
"--iqr", "--ci", "--pi",
"-s", out["cns"], "-o", tx_out_file, out["cnr"]]
do.run(cmd, "CNVkit segmetrics")
out["segmetrics"] = out_file
return out
def _add_gainloss_to_output(out, data):
"""Add gainloss based on genes, helpful for identifying changes in smaller genes.
"""
out_file = "%s-gainloss.txt" % os.path.splitext(out["cns"])[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "gainloss",
"-s", out["cns"], "-o", tx_out_file, out["cnr"]]
do.run(cmd, "CNVkit gainloss")
out["gainloss"] = out_file
return out
def _add_coverage_bedgraph_to_output(out, data):
"""Add BedGraph representation of coverage to the output
"""
out_file = "%s.coverage.bedgraph" % os.path.splitext(out["cns"])[0]
if utils.file_exists(out_file):
out["bedgraph"] = out_file
return out
bam_file = dd.get_align_bam(data)
bedtools = config_utils.get_program("bedtools", data["config"])
samtools = config_utils.get_program("samtools", data["config"])
cns_file = out["cns"]
bed_file = tempfile.NamedTemporaryFile(suffix=".bed", delete=False).name
with file_transaction(data, out_file) as tx_out_file:
cmd = ("sed 1d {cns_file} | cut -f1,2,3 > {bed_file}; "
"{samtools} view -b -L {bed_file} {bam_file} | "
"{bedtools} genomecov -bg -ibam - -g {bed_file} >"
"{tx_out_file}").format(**locals())
do.run(cmd, "CNVkit bedGraph conversion")
os.remove(bed_file)
out["bedgraph"] = out_file
return out
def _add_plots_to_output(out, data):
"""Add CNVkit plots summarizing called copy number values.
"""
out["plot"] = {}
diagram_plot = _add_diagram_plot(out, data)
if diagram_plot:
out["plot"]["diagram"] = diagram_plot
loh_plot = _add_loh_plot(out, data)
if loh_plot:
out["plot"]["loh"] = loh_plot
scatter_plot = _add_scatter_plot(out, data)
if scatter_plot:
out["plot"]["scatter"] = scatter_plot
return out
def _get_larger_chroms(ref_file):
"""Retrieve larger chromosomes, avoiding the smaller ones for plotting.
"""
from scipy.cluster.vq import kmeans, vq
all_sizes = []
for c in ref.file_contigs(ref_file):
all_sizes.append(float(c.size))
all_sizes.sort()
# separate out smaller chromosomes and haplotypes with kmeans
centroids, _ = kmeans(np.array(all_sizes), 2)
idx, _ = vq(np.array(all_sizes), centroids)
little_sizes = tz.first(tz.partitionby(lambda xs: xs[0], zip(idx, all_sizes)))
little_sizes = [x[1] for x in little_sizes]
# create one more cluster with the smaller, removing the haplotypes
centroids2, _ = kmeans(np.array(little_sizes), 2)
idx2, _ = vq(np.array(little_sizes), centroids2)
little_sizes2 = tz.first(tz.partitionby(lambda xs: xs[0], zip(idx2, little_sizes)))
little_sizes2 = [x[1] for x in little_sizes2]
# get any chromosomes not in haplotype/random bin
thresh = max(little_sizes2)
larger_chroms = []
for c in ref.file_contigs(ref_file):
if c.size > thresh:
larger_chroms.append(c.name)
return larger_chroms
def _remove_haplotype_chroms(in_file, data):
"""Remove shorter haplotype chromosomes from cns/cnr files for plotting.
"""
larger_chroms = set(_get_larger_chroms(dd.get_ref_file(data)))
out_file = "%s-chromfilter%s" % utils.splitext_plus(in_file)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
with open(in_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
if line.startswith("chromosome") or line.split()[0] in larger_chroms:
out_handle.write(line)
return out_file
def _add_scatter_plot(out, data):
out_file = "%s-scatter.pdf" % os.path.splitext(out["cnr"])[0]
priority_regions = dd.get_priority_regions(data)
if not priority_regions:
return None
priority_bed = plot._prioritize_plot_regions(pybedtools.BedTool(priority_regions), data)
if utils.file_exists(out_file):
return out_file
cnr = _remove_haplotype_chroms(out["cnr"], data)
cns = _remove_haplotype_chroms(out["cns"], data)
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "scatter", "-s", cns, "-o", tx_out_file, "-l",
priority_bed, cnr]
do.run(cmd, "CNVkit scatter plot")
return out_file
def _cnx_is_empty(in_file):
"""Check if cnr or cns files are empty (only have a header)
"""
with open(in_file) as in_handle:
for i, line in enumerate(in_handle):
if i > 0:
return False
return True
def _add_diagram_plot(out, data):
out_file = "%s-diagram.pdf" % os.path.splitext(out["cnr"])[0]
cnr = _remove_haplotype_chroms(out["cnr"], data)
cns = _remove_haplotype_chroms(out["cns"], data)
if _cnx_is_empty(cnr) or _cnx_is_empty(cns):
return None
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "diagram", "-s", cns,
"-o", tx_out_file, cnr]
gender = dd.get_gender(data)
if gender and gender.lower() == "male":
cmd += ["--male-reference"]
do.run(cmd, "CNVkit diagram plot")
return out_file
def _add_loh_plot(out, data):
vrn_files = filter(lambda x: x is not None, [x.get("vrn_file") for x in data.get("variants", [])])
if len(vrn_files) > 0:
out_file = "%s-loh.pdf" % os.path.splitext(out["cnr"])[0]
cns = _remove_haplotype_chroms(out["cns"], data)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "loh", "-t", "-s", cns,
"-o", tx_out_file, vrn_files[0]]
do.run(cmd, "CNVkit diagram plot")
return out_file
def _create_access_file(ref_file, out_dir, data):
"""Create genome access file for CNVlib to define available genomic regions.
XXX Can move to installation/upgrade process if too slow here.
"""
out_file = os.path.join(out_dir, "%s-access.bed" % os.path.splitext(os.path.basename(ref_file))[0])
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "genome2access.py"),
ref_file, "-s", "10000", "-o", tx_out_file]
do.run(cmd, "Create CNVkit access file")
return out_file
# ## Theta support
def export_theta(ckout, data):
"""Provide updated set of data with export information for TheTA2 input.
"""
cns_file = chromhacks.bed_to_standardonly(ckout["cns"], data, headers="chromosome")
cnr_file = chromhacks.bed_to_standardonly(ckout["cnr"], data, headers="chromosome")
out_file = "%s-theta.input" % utils.splitext_plus(cns_file)[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "export", "theta", cns_file, cnr_file, "-o", tx_out_file]
do.run(cmd, "Export CNVkit calls as inputs for TheTA2")
ckout["theta_input"] = out_file
return ckout
|
{
"content_hash": "29db8ce56fd1e7caaf91445840f97c6a",
"timestamp": "",
"source": "github",
"line_count": 564,
"max_line_length": 120,
"avg_line_length": 45.705673758865245,
"alnum_prop": 0.5970983008767166,
"repo_name": "guillermo-carrasco/bcbio-nextgen",
"id": "85eac7d490279cb0b16ff4887d91b1999a331f62",
"size": "25778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bcbio/structural/cnvkit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1482215"
},
{
"name": "Ruby",
"bytes": "624"
},
{
"name": "Shell",
"bytes": "13852"
}
],
"symlink_target": ""
}
|
from django.template.loader import render_to_string
from courant.core.mailer.models import MessageJob
try:
from apps.email_subscriptions.models import EmailSubscription
except:
pass
from courant.core.profiles.models import UserProfile
from models import *
def send_email_update(subject, from_address, from_name, data, html_template=None, text_template=None):
raw_subscriptions = EmailSubscription.objects.all().values_list('email', flat=True)
subscribed_users = UserProfile.objects.filter(subscribed=True).values_list('user__email', flat=True)
recipient_list = list(set(raw_subscriptions) | set(subscribed_users)) # union
msg = MessageJob(from_address='%s <%s>' % (from_name, from_address),
subject=subject,
recipient_list=';'.join(recipient_list))
msg.message_body = render_to_string(text_template, {'data': data})
if html_template:
msg.message_body_html = render_to_string(html_template, {'data': data})
msg.save()
return len(recipient_list)
|
{
"content_hash": "e5236ed20744b1ce3d7d64b3a38e80c6",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 104,
"avg_line_length": 42,
"alnum_prop": 0.6721611721611722,
"repo_name": "maxcutler/Courant-News",
"id": "371afa6ef42005d2511a13bd9efa18988662504f",
"size": "1092",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "courant/core/news/notifications.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "47452"
},
{
"name": "Python",
"bytes": "487441"
}
],
"symlink_target": ""
}
|
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
if len(matrix) == 0 or len(matrix[0]) == 0:
return False
low = 0
high = len(matrix) * len(matrix[0])
while low < high:
mid = (low + high) // 2
row = mid // len(matrix[0])
col = mid % len(matrix[0])
if matrix[row][col] < target:
low = mid + 1
elif matrix[row][col] > target:
high = mid
else:
return True
return False
|
{
"content_hash": "19f85c7689233b13b394ba8b9a3163ad",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 73,
"avg_line_length": 30.94736842105263,
"alnum_prop": 0.44387755102040816,
"repo_name": "jiadaizhao/LeetCode",
"id": "dfb75259be7a56650be8978e3c09140dedc04875",
"size": "588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "0001-0100/0074-Search a 2D Matrix/0074-Search a 2D Matrix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1140864"
},
{
"name": "Java",
"bytes": "34062"
},
{
"name": "Python",
"bytes": "758800"
},
{
"name": "Shell",
"bytes": "698"
},
{
"name": "TSQL",
"bytes": "774"
}
],
"symlink_target": ""
}
|
import black
import isort
def black_format_import_section(
contents: str, extension: str, config: isort.settings.Config
) -> str:
"""Formats the given import section using black."""
if extension.lower() not in ("pyi", "py"):
return contents
try:
return black.format_file_contents(
contents,
fast=True,
mode=black.FileMode(
is_pyi=extension.lower() == "pyi",
line_length=config.line_length,
),
)
except black.NothingChanged:
return contents
|
{
"content_hash": "a5b2c641689a61a4c2911f7ffacd6196",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 64,
"avg_line_length": 25.043478260869566,
"alnum_prop": 0.5729166666666666,
"repo_name": "PyCQA/isort",
"id": "f63b817e8ddb2a56ad2d8afa3324430d25dfcb36",
"size": "576",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "example_isort_formatting_plugin/example_isort_formatting_plugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "158"
},
{
"name": "Dockerfile",
"bytes": "682"
},
{
"name": "Python",
"bytes": "848358"
},
{
"name": "Shell",
"bytes": "1260"
}
],
"symlink_target": ""
}
|
default_app_config='voxel_globe.order.visualsfm.apps.VisualSfmConfig'
|
{
"content_hash": "be8f61791de252d3433b95b64ea70048",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 69,
"avg_line_length": 69,
"alnum_prop": 0.855072463768116,
"repo_name": "andyneff/voxel-globe",
"id": "52fc114c3306425c5e1a971e224a6e4895100228",
"size": "69",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "voxel_globe/order/visualsfm/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "21280"
},
{
"name": "Batchfile",
"bytes": "35781"
},
{
"name": "CSS",
"bytes": "1855"
},
{
"name": "HTML",
"bytes": "90597"
},
{
"name": "JavaScript",
"bytes": "131377"
},
{
"name": "Python",
"bytes": "302839"
},
{
"name": "Shell",
"bytes": "17009"
}
],
"symlink_target": ""
}
|
from flask import Flask, request, g, url_for
from flaskext.auth import Auth, AuthUser, logout, Permission, Role, \
permission_required
app = Flask(__name__)
auth = Auth(app, login_url_name='index')
user_create = Permission('user', 'create')
user_view = Permission('user', 'view')
roles = {
'admin': Role('admin', [user_create, user_view]),
'userview': Role('userview', [user_view]),
}
def load_role(role_name):
"""
Function that has to be defined to be able to retrieve the actual role
object from the user.role attribute. In this simple case, we could
actually assign the role object directly to user.role, in which this
function would simply be the identity function (lambda x: x). This extra
step becomes needed however in case the role object is more complex
and it can't be simply pickled anymore.
"""
return roles.get(role_name)
auth.load_role = load_role
@app.before_request
def init_users():
"""
Initializing users by hardcoding password. Another use case is to read
usernames from an external file (like /etc/passwd).
"""
user = AuthUser(username='user')
# Setting and encrypting the hardcoded password.
user.set_and_encrypt_password('password', salt='123')
# Setting role of the user.
user.role = 'userview'
# Doing the same for the admin
admin = AuthUser(username='admin')
admin.set_and_encrypt_password('admin')
admin.role = 'admin'
# Persisting users for this request.
g.users = {'user': user, 'admin': admin, }
@permission_required(resource='user', action='view')
def user_view():
return 'Users are: {0}.'.format(g.users)
@permission_required(resource='user', action='create')
def user_create():
return 'I can create users!'
def index():
if request.method == 'POST':
username = request.form['username']
if username in g.users:
# Authenticate and log in!
if g.users[username].authenticate(request.form['password']):
return '''
<a href="{0}">View users</a><br/>
<a href="{1}">Create users</a><br/>
<a href="{2}">Logout</a>
'''.format(url_for('user_view'),
url_for('user_create'),
url_for('logout'),)
return 'Failure :('
return '''
<form method="POST">
Username: <input type="text" name="username"/><br/>
Password: <input type="password" name="password"/><br/>
<input type="submit" value="Log in"/>
</form>
'''
def logout_view():
user_data = logout()
if user_data is None:
return 'No user to log out.'
return 'Logged out user {0}.'.format(user_data['username'])
# URLs
app.add_url_rule('/', 'index', index, methods=['GET', 'POST'])
app.add_url_rule('/users/view/', 'user_view', user_view)
app.add_url_rule('/users/create/', 'user_create', user_create)
app.add_url_rule('/logout/', 'logout', logout_view)
# Secret key needed to use sessions.
app.secret_key = 'N4BUdSXUzHxNoO8g'
if __name__ == '__main__':
app.run(debug=True)
|
{
"content_hash": "0da322d7f767571e5141fd4982b70da8",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 76,
"avg_line_length": 33.863157894736844,
"alnum_prop": 0.5999378302766553,
"repo_name": "thedekel/flask-auth",
"id": "6b0923db4e206ba7777a5baa103d3045f1e41d8d",
"size": "3217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/permissions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34244"
}
],
"symlink_target": ""
}
|
"""Encryption module for crypto-cookie package
"""
__author__ = "@philipkershaw"
__date__ = "09/07/15"
__copyright__ = "(C) 2015 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = '$Id$'
import os
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
class Encryption(object):
'''Class for handling encryption and decryption. It uses symmetric key
method'''
DEFAULT_MODE = modes.CBC
DEFAULT_ALGORITHM = algorithms.AES
DEFAULT_IV_LEN = 16
DEFAULT_PADDING_CHAR = ' '
DEFAULT_MSG_BLK_SIZE = 16
def __init__(self,
algorithm=DEFAULT_ALGORITHM,
mode=DEFAULT_MODE,
iv_len=DEFAULT_IV_LEN,
padding_char=DEFAULT_PADDING_CHAR,
msg_blk_size=DEFAULT_MSG_BLK_SIZE):
'''Set hash algorithm and encoding method'''
self.algorithm = algorithm
self.mode = mode
self.iv_len = iv_len
self.padding_char = padding_char
self.msg_blk_size = msg_blk_size
def encrypt(self, msg, key):
"""Encrypt the input message with the given key. Strings should be
8-bit and are cast this way by default. Unicode is not supported.
"""
# Ensure 8-bit string
msg_ = str(msg)
backend = default_backend()
iv = os.urandom(self.iv_len)
encryption_cipher = Cipher(self.algorithm(key), self.mode(iv),
backend=backend)
encryptor = encryption_cipher.encryptor()
# Ensure length is an even multiple of block size (default 16)
msg_len = len(msg)
if msg_len % self.msg_blk_size:
factor = msg_len // self.msg_blk_size
n_padding_chars = self.msg_blk_size * (factor + 1) - len(msg)
padded_msg = msg_ + self.padding_char * n_padding_chars
else:
padded_msg = msg_
cipher_text = encryptor.update(padded_msg) + encryptor.finalize()
return cipher_text, iv
def decrypt(self, cipher_text, key, iv):
backend = default_backend()
decryption_cipher = Cipher(self.algorithm(key), self.mode(iv),
backend=backend)
decryptor = decryption_cipher.decryptor()
padded_decrypted_msg = decryptor.update(cipher_text) + \
decryptor.finalize()
decrypted_msg = padded_decrypted_msg.rstrip(self.padding_char)
return decrypted_msg
|
{
"content_hash": "8495c362e92996dc42b0d5eeee9cacab",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 76,
"avg_line_length": 36.52,
"alnum_prop": 0.5819642205184374,
"repo_name": "philipkershaw/crypto-cookie",
"id": "0d3fab6f1a0ae3ba20fafae1e042f23954fbda10",
"size": "2739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crypto_cookie/encryption.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "15832"
}
],
"symlink_target": ""
}
|
""" Neo4j GraphDB flask connector """
import socket
import neo4j
from neomodel import db, config
from flask_ext import BaseExtension, get_logger
from rapydo.utils.logs import re_obscure_pattern
log = get_logger(__name__)
class NeomodelClient():
def __init__(self, db):
self.db = db
def cypher(self, query):
""" Execute normal neo4j queries """
from neomodel import db
try:
results, meta = db.cypher_query(query)
except Exception as e:
raise Exception(
"Failed to execute Cypher Query: %s\n%s" % (query, str(e)))
return False
# log.debug("Graph query.\nResults: %s\nMeta: %s" % (results, meta))
return results
class NeoModel(BaseExtension):
def set_connection_exception(self):
return (
socket.gaierror,
neo4j.bolt.connection.ServiceUnavailable
)
def custom_connection(self, **kwargs):
if len(kwargs) > 0:
variables = kwargs
else:
variables = self.variables
self.uri = "bolt://%s:%s@%s:%s" % \
(
# User:Password
variables.get('user', 'neo4j'),
variables.get('password'),
# Host:Port
variables.get('host'),
variables.get('port'),
)
log.very_verbose("URI IS %s" % re_obscure_pattern(self.uri))
config.DATABASE_URL = self.uri
# Ensure all DateTimes are provided with a timezone
# before being serialised to UTC epoch
config.FORCE_TIMEZONE = True # default False
db.url = self.uri
db.set_connection(self.uri)
client = NeomodelClient(db)
return client
# return db
|
{
"content_hash": "ed63e535df05fa8a1d6e51422c11566f",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 76,
"avg_line_length": 27.045454545454547,
"alnum_prop": 0.5591036414565826,
"repo_name": "EUDAT-B2STAGE/http-api-base",
"id": "8b30d0dfd45dadcf5a3fd67d433191b86f458381",
"size": "1810",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_ext/flask_neo4j.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "539952"
},
{
"name": "HTML",
"bytes": "2613"
},
{
"name": "Python",
"bytes": "397468"
},
{
"name": "Shell",
"bytes": "1872"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.apps import AppConfig
class UserpermConfig(AppConfig):
name = 'userperm'
|
{
"content_hash": "9e88c6807d196f0613672b2e27c17f80",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 39,
"avg_line_length": 18.857142857142858,
"alnum_prop": 0.7575757575757576,
"repo_name": "wxmgcs/devops",
"id": "453667e9f8ee63fadfa1d8842f1d639adbc6be65",
"size": "132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "userperm/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "26631"
},
{
"name": "HTML",
"bytes": "330429"
},
{
"name": "JavaScript",
"bytes": "2154207"
},
{
"name": "Python",
"bytes": "333032"
},
{
"name": "Shell",
"bytes": "77"
}
],
"symlink_target": ""
}
|
import robocup
import constants
import play
import enum
import behavior
import main
import skills.move
import plays.testing.line_up
import time
# A simple binary clock in the form of a Soccer Play,
# using robots to display the current minute in binary
class BinaryClock(play.Play):
# Setting up global class variables for location to put binaryclock.
LeftPoint = -constants.Field.Width / 6
RobotDist = (abs(LeftPoint) / 2)
# Perfect alignment
LeftPoint -= RobotDist / 2
class State(enum.Enum):
# We only need one state, and we'll transition to itself when we want to update.
waiting = 0
dummy = 1
def __init__(self):
super().__init__(continuous=True)
# This is a local variable of this class
# Refer to it with self.current_time
self.current_time = self.get_time()
# Register the states you defined using 'add_state'.
# eg: self.add_state(WhichHalf.State.<???>,
# behavior.Behavior.State.running)
# ----------------------------------------------------
self.add_state(BinaryClock.State.waiting,
behavior.Behavior.State.running)
self.add_state(BinaryClock.State.dummy,
behavior.Behavior.State.running)
# Add your state transitions using 'add_transition'.
# eg: self.add_transition(behavior.Behavior.State.start,
# self.State.<???>, lambda: True,
# 'immediately')
# eg: self.add_transition(self.State.<???>, self.State.<???>,
# lambda: <???>,
# 'state change message')
# ------------------------------------------------------------
# EXAMPLE TRANSITION, YOU MAY WANT TO REPLACE THIS
self.add_transition(behavior.Behavior.State.start,
self.State.waiting, lambda: True, 'immediately')
self.add_transition(
self.State.waiting,
self.State.dummy, lambda: self.current_time != self.get_time(),
'Time in minutes changed')
self.add_transition(self.State.dummy, self.State.waiting, lambda: True,
'immediately')
# Define your own 'on_enter' and 'execute' functions here.
# eg: def on_enter_<???>(self):
# print('Something?')
# eg: def execute_<???>(self):
# print('Something?')
# ---------------------------------------------------------
def get_time(self):
return time.localtime().tm_min
# Demo of moving to a point.
def on_enter_waiting(self):
self.current_time = self.get_time()
binary = format(self.current_time, '06b')
move_point = robocup.Point(BinaryClock.LeftPoint,
constants.Field.Length / 3)
for i in range(6):
if (binary[i] == '1'):
self.add_subbehavior(
skills.move.Move(move_point), 'Robot' + str(i))
move_point = robocup.Point(move_point.x + BinaryClock.RobotDist,
move_point.y)
# Swallow all unused robots
self.add_subbehavior(plays.testing.line_up.LineUp(), 'line up')
def on_exit_waiting(self):
self.remove_all_subbehaviors()
|
{
"content_hash": "daaa737f37ab22b2fc10ccd5f12ab090",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 88,
"avg_line_length": 37.56666666666667,
"alnum_prop": 0.5371191955042887,
"repo_name": "JNeiger/robocup-software",
"id": "b39efad549457256be696f46fd05660f5ecd371e",
"size": "3381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "soccer/gameplay/plays/training/binary_clock.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2990"
},
{
"name": "C++",
"bytes": "1083792"
},
{
"name": "CMake",
"bytes": "112437"
},
{
"name": "Dockerfile",
"bytes": "2872"
},
{
"name": "MATLAB",
"bytes": "31229"
},
{
"name": "Makefile",
"bytes": "5816"
},
{
"name": "Python",
"bytes": "735005"
},
{
"name": "Shell",
"bytes": "21468"
}
],
"symlink_target": ""
}
|
'''Generates Go source files from a mojom.Module.'''
from itertools import chain
import os
import re
from mojom.generate.template_expander import UseJinja
import mojom.generate.generator as generator
import mojom.generate.module as mojom
import mojom.generate.pack as pack
class KindInfo(object):
def __init__(self, go_type, encode_suffix, decode_suffix, bit_size):
self.go_type = go_type
self.encode_suffix = encode_suffix
self.decode_suffix = decode_suffix
self.bit_size = bit_size
_kind_infos = {
mojom.BOOL: KindInfo('bool', 'Bool', 'Bool', 1),
mojom.INT8: KindInfo('int8', 'Int8', 'Int8', 8),
mojom.UINT8: KindInfo('uint8', 'Uint8', 'Uint8', 8),
mojom.INT16: KindInfo('int16', 'Int16', 'Int16', 16),
mojom.UINT16: KindInfo('uint16', 'Uint16', 'Uint16', 16),
mojom.INT32: KindInfo('int32', 'Int32', 'Int32', 32),
mojom.UINT32: KindInfo('uint32', 'Uint32', 'Uint32', 32),
mojom.FLOAT: KindInfo('float32', 'Float32', 'Float32', 32),
mojom.HANDLE: KindInfo(
'system.Handle', 'Handle', 'Handle', 32),
mojom.DCPIPE: KindInfo(
'system.ConsumerHandle', 'Handle', 'ConsumerHandle', 32),
mojom.DPPIPE: KindInfo(
'system.ProducerHandle', 'Handle', 'ProducerHandle', 32),
mojom.MSGPIPE: KindInfo(
'system.MessagePipeHandle', 'Handle', 'MessagePipeHandle', 32),
mojom.SHAREDBUFFER: KindInfo(
'system.SharedBufferHandle', 'Handle', 'SharedBufferHandle', 32),
mojom.NULLABLE_HANDLE: KindInfo(
'system.Handle', 'Handle', 'Handle', 32),
mojom.NULLABLE_DCPIPE: KindInfo(
'system.ConsumerHandle', 'Handle', 'ConsumerHandle', 32),
mojom.NULLABLE_DPPIPE: KindInfo(
'system.ProducerHandle', 'Handle', 'ProducerHandle', 32),
mojom.NULLABLE_MSGPIPE: KindInfo(
'system.MessagePipeHandle', 'Handle', 'MessagePipeHandle', 32),
mojom.NULLABLE_SHAREDBUFFER: KindInfo(
'system.SharedBufferHandle', 'Handle', 'SharedBufferHandle', 32),
mojom.INT64: KindInfo('int64', 'Int64', 'Int64', 64),
mojom.UINT64: KindInfo('uint64', 'Uint64', 'Uint64', 64),
mojom.DOUBLE: KindInfo('float64', 'Float64', 'Float64', 64),
mojom.STRING: KindInfo('string', 'String', 'String', 64),
mojom.NULLABLE_STRING: KindInfo('string', 'String', 'String', 64),
}
_imports = {}
def GetBitSize(kind):
if isinstance(kind, (mojom.Array, mojom.Map, mojom.Struct)):
return 64
if isinstance(kind, (mojom.InterfaceRequest, mojom.Interface)):
kind = mojom.MSGPIPE
if isinstance(kind, mojom.Enum):
kind = mojom.INT32
return _kind_infos[kind].bit_size
# Returns go type corresponding to provided kind. If |nullable| is true
# and kind is nullable adds an '*' to type (example: ?string -> *string).
def GetGoType(kind, nullable = True):
if nullable and mojom.IsNullableKind(kind):
return '*%s' % GetNonNullableGoType(kind)
return GetNonNullableGoType(kind)
# Returns go type corresponding to provided kind. Ignores nullability of
# top-level kind.
def GetNonNullableGoType(kind):
if mojom.IsStructKind(kind):
return '%s' % GetFullName(kind)
if mojom.IsArrayKind(kind):
if kind.length:
return '[%s]%s' % (kind.length, GetGoType(kind.kind))
return '[]%s' % GetGoType(kind.kind)
if mojom.IsMapKind(kind):
return 'map[%s]%s' % (GetGoType(kind.key_kind), GetGoType(kind.value_kind))
if mojom.IsInterfaceKind(kind):
return '%sPointer' % GetFullName(kind)
if mojom.IsInterfaceRequestKind(kind):
return '%sRequest' % GetFullName(kind.kind)
if mojom.IsEnumKind(kind):
return GetNameForNestedElement(kind)
return _kind_infos[kind].go_type
# Splits name to lower-cased parts used for camel-casing
# (example: HTTPEntry2FooBar -> ['http', 'entry2', 'foo', 'bar']).
def NameToComponent(name):
# insert '_' between anything and a Title name (e.g, HTTPEntry2FooBar ->
# HTTP_Entry2_FooBar)
name = re.sub('([^_])([A-Z][^A-Z_]+)', r'\1_\2', name)
# insert '_' between non upper and start of upper blocks (e.g.,
# HTTP_Entry2_FooBar -> HTTP_Entry2_Foo_Bar)
name = re.sub('([^A-Z_])([A-Z])', r'\1_\2', name)
return [x.lower() for x in name.split('_')]
def UpperCamelCase(name):
return ''.join([x.capitalize() for x in NameToComponent(name)])
# Formats a name. If |exported| is true makes name camel-cased with first
# letter capital, otherwise does no camel-casing and makes first letter
# lower-cased (which is used for making internal names more readable).
def FormatName(name, exported=True):
if exported:
return UpperCamelCase(name)
# Leave '_' symbols for unexported names.
return name[0].lower() + name[1:]
# Returns full name of an imported element based on prebuilt dict |_imports|.
# If the |element| is not imported returns formatted name of it.
# |element| should have attr 'name'. |exported| argument is used to make
# |FormatName()| calls only.
def GetFullName(element, exported=True):
if not hasattr(element, 'imported_from') or not element.imported_from:
return FormatName(element.name, exported)
path = 'gen/mojom'
if element.imported_from['namespace']:
path = '/'.join([path] + element.imported_from['namespace'].split('.'))
if path in _imports:
return '%s.%s' % (_imports[path], FormatName(element.name, exported))
return FormatName(element.name, exported)
# Returns a name for nested elements like enum field or constant.
# The returned name consists of camel-cased parts separated by '_'.
def GetNameForNestedElement(element):
if element.parent_kind:
return "%s_%s" % (GetNameForElement(element.parent_kind),
FormatName(element.name))
return GetFullName(element)
def GetNameForElement(element, exported=True):
if (mojom.IsInterfaceKind(element) or mojom.IsStructKind(element)):
return GetFullName(element, exported)
if isinstance(element, (mojom.EnumField,
mojom.Field,
mojom.Method,
mojom.Parameter)):
return FormatName(element.name, exported)
if isinstance(element, (mojom.Enum,
mojom.Constant,
mojom.ConstantValue)):
return GetNameForNestedElement(element)
raise Exception('Unexpected element: %s' % element)
def ExpressionToText(token):
if isinstance(token, mojom.EnumValue):
return "%s_%s" % (GetNameForNestedElement(token.enum),
FormatName(token.name, True))
if isinstance(token, mojom.ConstantValue):
return GetNameForNestedElement(token)
if isinstance(token, mojom.Constant):
return ExpressionToText(token.value)
return token
def DecodeSuffix(kind):
if mojom.IsEnumKind(kind):
return DecodeSuffix(mojom.INT32)
if mojom.IsInterfaceKind(kind) or mojom.IsInterfaceRequestKind(kind):
return DecodeSuffix(mojom.MSGPIPE)
return _kind_infos[kind].decode_suffix
def EncodeSuffix(kind):
if mojom.IsEnumKind(kind):
return EncodeSuffix(mojom.INT32)
if mojom.IsInterfaceKind(kind) or mojom.IsInterfaceRequestKind(kind):
return EncodeSuffix(mojom.MSGPIPE)
return _kind_infos[kind].encode_suffix
def GetPackage(namespace):
if namespace:
return namespace.split('.')[-1]
return 'mojom'
def GetPackagePath(namespace):
path = 'mojom'
for i in namespace.split('.'):
path = os.path.join(path, i)
return path
def GetStructFromMethod(method):
params_class = "%s_%s_Params" % (GetNameForElement(method.interface),
GetNameForElement(method))
struct = mojom.Struct(params_class, module=method.interface.module)
for param in method.parameters:
struct.AddField("in%s" % GetNameForElement(param),
param.kind, param.ordinal)
struct.packed = pack.PackedStruct(struct)
struct.bytes = pack.GetByteLayout(struct.packed)
struct.versions = pack.GetVersionInfo(struct.packed)
return struct
def GetResponseStructFromMethod(method):
params_class = "%s_%s_ResponseParams" % (GetNameForElement(method.interface),
GetNameForElement(method))
struct = mojom.Struct(params_class, module=method.interface.module)
for param in method.response_parameters:
struct.AddField("out%s" % GetNameForElement(param),
param.kind, param.ordinal)
struct.packed = pack.PackedStruct(struct)
struct.bytes = pack.GetByteLayout(struct.packed)
struct.versions = pack.GetVersionInfo(struct.packed)
return struct
def GetAllConstants(module):
data = [module] + module.structs + module.interfaces
constants = [x.constants for x in data]
return [i for i in chain.from_iterable(constants)]
def GetAllEnums(module):
data = [module] + module.structs + module.interfaces
enums = [x.enums for x in data]
return [i for i in chain.from_iterable(enums)]
# Adds an import required to use the provided |element|.
# The required import is stored at '_imports'.
def AddImport(module, element):
if (isinstance(element, mojom.Kind) and
mojom.IsNonInterfaceHandleKind(element)):
_imports['mojo/public/go/system'] = 'system'
return
if isinstance(element, mojom.Kind) and mojom.IsInterfaceRequestKind(element):
AddImport(module, element.kind)
return
if not hasattr(element, 'imported_from') or not element.imported_from:
return
imported = element.imported_from
if imported['namespace'] == module.namespace:
return
path = 'gen/mojom'
name = 'mojom'
if imported['namespace']:
path = '/'.join([path] + imported['namespace'].split('.'))
name = '_'.join([name] + imported['namespace'].split('.'))
while (name in _imports.values() and _imports[path] != path):
name += '_'
_imports[path] = name
# Scans |module| for elements that require imports and adds all found imports
# to '_imports' dict. Returns a list of imports that should include the
# generated go file.
def GetImports(module):
# Imports can only be used in structs, constants, enums, interfaces.
all_structs = list(module.structs)
for i in module.interfaces:
for method in i.methods:
all_structs.append(GetStructFromMethod(method))
if method.response_parameters:
all_structs.append(GetResponseStructFromMethod(method))
if len(all_structs) > 0 or len(module.interfaces) > 0:
_imports['mojo/public/go/bindings'] = 'bindings'
for struct in all_structs:
for field in struct.fields:
AddImport(module, field.kind)
# TODO(rogulenko): add these after generating constants and struct defaults.
# if field.default:
# AddImport(module, field.default)
for enum in GetAllEnums(module):
for field in enum.fields:
if field.value:
AddImport(module, field.value)
# TODO(rogulenko): add these after generating constants and struct defaults.
# for constant in GetAllConstants(module):
# AddImport(module, constant.value)
imports_list = []
for i in _imports:
if i.split('/')[-1] == _imports[i]:
imports_list.append('"%s"' % i)
else:
imports_list.append('%s "%s"' % (_imports[i], i))
return sorted(imports_list)
class Generator(generator.Generator):
go_filters = {
'array': lambda kind: mojom.Array(kind),
'bit_size': GetBitSize,
'decode_suffix': DecodeSuffix,
'encode_suffix': EncodeSuffix,
'go_type': GetGoType,
'expression_to_text': ExpressionToText,
'is_array': mojom.IsArrayKind,
'is_enum': mojom.IsEnumKind,
'is_handle': mojom.IsAnyHandleKind,
'is_handle_owner': lambda kind:
mojom.IsInterfaceKind(kind) or mojom.IsInterfaceRequestKind(kind),
'is_map': mojom.IsMapKind,
'is_none_or_empty': lambda array: array == None or len(array) == 0,
'is_nullable': mojom.IsNullableKind,
'is_pointer': mojom.IsObjectKind,
'is_struct': mojom.IsStructKind,
'name': GetNameForElement,
'response_struct_from_method': GetResponseStructFromMethod,
'struct_from_method': GetStructFromMethod,
'tab_indent': lambda s, size = 1: ('\n' + '\t' * size).join(s.splitlines())
}
def GetParameters(self):
return {
'enums': GetAllEnums(self.module),
'imports': GetImports(self.module),
'interfaces': self.module.interfaces,
'package': GetPackage(self.module.namespace),
'structs': self.GetStructs(),
}
@UseJinja('go_templates/source.tmpl', filters=go_filters)
def GenerateSource(self):
return self.GetParameters()
def GenerateFiles(self, args):
self.Write(self.GenerateSource(), os.path.join("go", "src", "gen",
GetPackagePath(self.module.namespace), '%s.go' % self.module.name))
def GetJinjaParameters(self):
return {
'lstrip_blocks': True,
'trim_blocks': True,
}
def GetGlobals(self):
return {
'namespace': self.module.namespace,
'module': self.module,
}
|
{
"content_hash": "6f1514a484e887e85d3f603fbde8e73d",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 79,
"avg_line_length": 38.550898203592816,
"alnum_prop": 0.6777726001863933,
"repo_name": "sgraham/nope",
"id": "cb5a16e7f2a847be87d0cea2d6fd34dc6ccc54df",
"size": "13039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/mojo/src/mojo/public/tools/bindings/generators/mojom_go_generator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "39967"
},
{
"name": "C",
"bytes": "4061434"
},
{
"name": "C++",
"bytes": "279546186"
},
{
"name": "CMake",
"bytes": "27212"
},
{
"name": "CSS",
"bytes": "919339"
},
{
"name": "Emacs Lisp",
"bytes": "988"
},
{
"name": "Go",
"bytes": "13628"
},
{
"name": "Groff",
"bytes": "5283"
},
{
"name": "HTML",
"bytes": "15989749"
},
{
"name": "Java",
"bytes": "7541683"
},
{
"name": "JavaScript",
"bytes": "32372588"
},
{
"name": "Lua",
"bytes": "16189"
},
{
"name": "Makefile",
"bytes": "40513"
},
{
"name": "Objective-C",
"bytes": "1584184"
},
{
"name": "Objective-C++",
"bytes": "8249988"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "169060"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "427339"
},
{
"name": "Python",
"bytes": "8346306"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "844553"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "VimL",
"bytes": "4075"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
}
|
from twisted.internet.protocol import Factory
from twisted.internet import reactor
from twisted.protocols import basic
from xcaplib.green import XCAPClient
from eventlet.twistedutil import deferToGreenThread
from eventlet.twistedutil import join_reactor
class LineOnlyReceiver(basic.LineOnlyReceiver):
def lineReceived(self, line):
print 'received: %r' % line
if not line:
return
app, context, node = (line + ' ').split(' ', 3)
context = {'u' : 'users', 'g': 'global'}.get(context, context)
d = deferToGreenThread(client._get, app, node, globaltree=context=='global')
def callback(result):
self.transport.write(str(result))
def errback(error):
self.transport.write(error.getTraceback())
d.addCallback(callback)
d.addErrback(errback)
class MyFactory(Factory):
protocol = LineOnlyReceiver
client = XCAPClient('https://xcap.sipthor.net/xcap-root', 'alice@example.com', '123')
reactor.listenTCP(8007, MyFactory())
reactor.run()
|
{
"content_hash": "03612d7d3084c60cb4e0504ab2e522f7",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 85,
"avg_line_length": 33.774193548387096,
"alnum_prop": 0.6838586437440306,
"repo_name": "2013Commons/HUE-SHARK",
"id": "77073d7f926e6fbb98e2094da04d6688f98db1b3",
"size": "1047",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/eventlet-0.9.14/examples/twisted/twisted_xcap_proxy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "9992379"
},
{
"name": "C++",
"bytes": "199612"
},
{
"name": "CSS",
"bytes": "419753"
},
{
"name": "Emacs Lisp",
"bytes": "3171"
},
{
"name": "Java",
"bytes": "3683071"
},
{
"name": "JavaScript",
"bytes": "1076553"
},
{
"name": "Perl",
"bytes": "138710"
},
{
"name": "Python",
"bytes": "40522057"
},
{
"name": "SQL",
"bytes": "522"
},
{
"name": "Shell",
"bytes": "27739"
},
{
"name": "TeX",
"bytes": "126420"
},
{
"name": "XSLT",
"bytes": "190688"
}
],
"symlink_target": ""
}
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['LinearTrend'] , ['Seasonal_Minute'] , ['ARX'] );
|
{
"content_hash": "c2621daaee716ec790e8bee3431cbacf",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 90,
"avg_line_length": 40.75,
"alnum_prop": 0.7177914110429447,
"repo_name": "antoinecarme/pyaf",
"id": "388304d1df3f2e883082f9a2de3b32f0faffc8c2",
"size": "163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_LinearTrend_Seasonal_Minute_ARX.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
import paddle.trainer_config_helpers.networks as conf_nw
import inspect
from config_base import __convert_to_v2__
__all__ = []
def __initialize__():
for each_subnetwork in conf_nw.__all__:
if each_subnetwork in ['inputs', 'outputs']:
continue
func = getattr(conf_nw, each_subnetwork)
if hasattr(func, 'argspec'):
argspec = func.argspec
else:
argspec = inspect.getargspec(func)
if each_subnetwork == 'simple_attention':
parents = ['encoded_sequence', 'encoded_proj', 'decoder_state']
else:
parents = filter(lambda x: x.startswith('input'), argspec.args)
assert len(parents) != 0, each_subnetwork
v2_subnet = __convert_to_v2__(
each_subnetwork,
parent_names=parents,
is_default_name='name' in argspec.args)
globals()[each_subnetwork] = v2_subnet
globals()[each_subnetwork].__name__ = each_subnetwork
global __all__
__all__.append(each_subnetwork)
__initialize__()
|
{
"content_hash": "3a6e9f97432f02dae6f055f51d38fac7",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 75,
"avg_line_length": 33.21875,
"alnum_prop": 0.587958607714017,
"repo_name": "cxysteven/Paddle",
"id": "9e6644196c8242cc3fed7a4fb1503697e5b59ffb",
"size": "1673",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "python/paddle/v2/networks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "226899"
},
{
"name": "C++",
"bytes": "3159636"
},
{
"name": "CMake",
"bytes": "123472"
},
{
"name": "CSS",
"bytes": "21730"
},
{
"name": "Cuda",
"bytes": "511529"
},
{
"name": "HTML",
"bytes": "8941"
},
{
"name": "JavaScript",
"bytes": "1025"
},
{
"name": "Perl",
"bytes": "11452"
},
{
"name": "Protocol Buffer",
"bytes": "43771"
},
{
"name": "Python",
"bytes": "1022473"
},
{
"name": "Shell",
"bytes": "109781"
}
],
"symlink_target": ""
}
|
import copy
from oslo_serialization import jsonutils
import six
import yaml
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources import signal_responder
from heat.engine import support
class Workflow(signal_responder.SignalResponder,
resource.Resource):
"""A resource that implements Mistral workflow.
Workflow represents a process that can be described in a various number of
ways and that can do some job interesting to the end user. Each workflow
consists of tasks (at least one) describing what exact steps should be made
during workflow execution.
For detailed description how to use Workflow, read Mistral documentation.
"""
support_status = support.SupportStatus(version='2015.1')
default_client_name = 'mistral'
entity = 'workflows'
PROPERTIES = (
NAME, TYPE, DESCRIPTION, INPUT, OUTPUT, TASKS, PARAMS,
TASK_DEFAULTS, USE_REQUEST_BODY_AS_INPUT
) = (
'name', 'type', 'description', 'input', 'output', 'tasks', 'params',
'task_defaults', 'use_request_body_as_input'
)
_TASKS_KEYS = (
TASK_NAME, TASK_DESCRIPTION, ON_ERROR, ON_COMPLETE, ON_SUCCESS,
POLICIES, ACTION, WORKFLOW, PUBLISH, TASK_INPUT, REQUIRES,
RETRY, WAIT_BEFORE, WAIT_AFTER, PAUSE_BEFORE, TIMEOUT,
WITH_ITEMS, KEEP_RESULT, TARGET, JOIN
) = (
'name', 'description', 'on_error', 'on_complete', 'on_success',
'policies', 'action', 'workflow', 'publish', 'input', 'requires',
'retry', 'wait_before', 'wait_after', 'pause_before', 'timeout',
'with_items', 'keep_result', 'target', 'join'
)
_TASKS_TASK_DEFAULTS = [
ON_ERROR, ON_COMPLETE, ON_SUCCESS,
REQUIRES, RETRY, WAIT_BEFORE, WAIT_AFTER, PAUSE_BEFORE, TIMEOUT
]
_SIGNAL_DATA_KEYS = (
SIGNAL_DATA_INPUT, SIGNAL_DATA_PARAMS
) = (
'input', 'params'
)
ATTRIBUTES = (
WORKFLOW_DATA, ALARM_URL, EXECUTIONS
) = (
'data', 'alarm_url', 'executions'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Workflow name.')
),
TYPE: properties.Schema(
properties.Schema.STRING,
_('Workflow type.'),
constraints=[
constraints.AllowedValues(['direct', 'reverse'])
],
required=True,
update_allowed=True
),
USE_REQUEST_BODY_AS_INPUT: properties.Schema(
properties.Schema.BOOLEAN,
_('Defines the method in which the request body for signaling a '
'workflow would be parsed. In case this property is set to '
'True, the body would be parsed as a simple json where each '
'key is a workflow input, in other cases body would be parsed '
'expecting a specific json format with two keys: "input" and '
'"params".'),
update_allowed=True,
support_status=support.SupportStatus(version='6.0.0')
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Workflow description.'),
update_allowed=True
),
INPUT: properties.Schema(
properties.Schema.MAP,
_('Dictionary which contains input for workflow.'),
update_allowed=True
),
OUTPUT: properties.Schema(
properties.Schema.MAP,
_('Any data structure arbitrarily containing YAQL '
'expressions that defines workflow output. May be '
'nested.'),
update_allowed=True
),
PARAMS: properties.Schema(
properties.Schema.MAP,
_("Workflow additional parameters. If Workflow is reverse typed, "
"params requires 'task_name', which defines initial task."),
update_allowed=True
),
TASK_DEFAULTS: properties.Schema(
properties.Schema.MAP,
_("Default settings for some of task "
"attributes defined "
"at workflow level."),
support_status=support.SupportStatus(version='5.0.0'),
schema={
ON_SUCCESS: properties.Schema(
properties.Schema.LIST,
_('List of tasks which will run after '
'the task has completed successfully.')
),
ON_ERROR: properties.Schema(
properties.Schema.LIST,
_('List of tasks which will run after '
'the task has completed with an error.')
),
ON_COMPLETE: properties.Schema(
properties.Schema.LIST,
_('List of tasks which will run after '
'the task has completed regardless of whether '
'it is successful or not.')
),
REQUIRES: properties.Schema(
properties.Schema.LIST,
_('List of tasks which should be executed before '
'this task. Used only in reverse workflows.')
),
RETRY: properties.Schema(
properties.Schema.MAP,
_('Defines a pattern how task should be repeated in '
'case of an error.')
),
WAIT_BEFORE: properties.Schema(
properties.Schema.INTEGER,
_('Defines a delay in seconds that Mistral Engine '
'should wait before starting a task.')
),
WAIT_AFTER: properties.Schema(
properties.Schema.INTEGER,
_('Defines a delay in seconds that Mistral Engine '
'should wait after a task has completed before '
'starting next tasks defined in '
'on-success, on-error or on-complete.')
),
PAUSE_BEFORE: properties.Schema(
properties.Schema.BOOLEAN,
_('Defines whether Mistral Engine should put the '
'workflow on hold or not before starting a task.')
),
TIMEOUT: properties.Schema(
properties.Schema.INTEGER,
_('Defines a period of time in seconds after which '
'a task will be failed automatically '
'by engine if hasn\'t completed.')
),
},
update_allowed=True
),
TASKS: properties.Schema(
properties.Schema.LIST,
_('Dictionary containing workflow tasks.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
TASK_NAME: properties.Schema(
properties.Schema.STRING,
_('Task name.'),
required=True
),
TASK_DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Task description.')
),
TASK_INPUT: properties.Schema(
properties.Schema.MAP,
_('Actual input parameter values of the task.')
),
ACTION: properties.Schema(
properties.Schema.STRING,
_('Name of the action associated with the task. '
'Either action or workflow may be defined in the '
'task.')
),
WORKFLOW: properties.Schema(
properties.Schema.STRING,
_('Name of the workflow associated with the task. '
'Can be defined by intrinsic function get_resource '
'or by name of the referenced workflow, i.e. '
'{ workflow: wf_name } or '
'{ workflow: { get_resource: wf_name }}. Either '
'action or workflow may be defined in the task.'),
constraints=[
constraints.CustomConstraint('mistral.workflow')
]
),
PUBLISH: properties.Schema(
properties.Schema.MAP,
_('Dictionary of variables to publish to '
'the workflow context.')
),
ON_SUCCESS: properties.Schema(
properties.Schema.LIST,
_('List of tasks which will run after '
'the task has completed successfully.')
),
ON_ERROR: properties.Schema(
properties.Schema.LIST,
_('List of tasks which will run after '
'the task has completed with an error.')
),
ON_COMPLETE: properties.Schema(
properties.Schema.LIST,
_('List of tasks which will run after '
'the task has completed regardless of whether '
'it is successful or not.')
),
POLICIES: properties.Schema(
properties.Schema.MAP,
_('Dictionary-like section defining task policies '
'that influence how Mistral Engine runs tasks. Must '
'satisfy Mistral DSL v2.'),
support_status=support.SupportStatus(
status=support.DEPRECATED,
version='5.0.0',
message=_('Add needed policies directly to '
'the task, Policy keyword is not '
'needed'),
previous_status=support.SupportStatus(
version='2015.1'))
),
REQUIRES: properties.Schema(
properties.Schema.LIST,
_('List of tasks which should be executed before '
'this task. Used only in reverse workflows.')
),
RETRY: properties.Schema(
properties.Schema.MAP,
_('Defines a pattern how task should be repeated in '
'case of an error.'),
support_status=support.SupportStatus(version='5.0.0')
),
WAIT_BEFORE: properties.Schema(
properties.Schema.INTEGER,
_('Defines a delay in seconds that Mistral Engine '
'should wait before starting a task.'),
support_status=support.SupportStatus(version='5.0.0')
),
WAIT_AFTER: properties.Schema(
properties.Schema.INTEGER,
_('Defines a delay in seconds that Mistral '
'Engine should wait after '
'a task has completed before starting next tasks '
'defined in on-success, on-error or on-complete.'),
support_status=support.SupportStatus(version='5.0.0')
),
PAUSE_BEFORE: properties.Schema(
properties.Schema.BOOLEAN,
_('Defines whether Mistral Engine should '
'put the workflow on hold '
'or not before starting a task.'),
support_status=support.SupportStatus(version='5.0.0')
),
TIMEOUT: properties.Schema(
properties.Schema.INTEGER,
_('Defines a period of time in seconds after which a '
'task will be failed automatically by engine '
'if hasn\'t completed.'),
support_status=support.SupportStatus(version='5.0.0')
),
WITH_ITEMS: properties.Schema(
properties.Schema.STRING,
_('If configured, it allows to run action or workflow '
'associated with a task multiple times '
'on a provided list of items.'),
support_status=support.SupportStatus(version='5.0.0')
),
KEEP_RESULT: properties.Schema(
properties.Schema.BOOLEAN,
_('Allowing not to store action results '
'after task completion.'),
support_status=support.SupportStatus(version='5.0.0')
),
TARGET: properties.Schema(
properties.Schema.STRING,
_('It defines an executor to which task action '
'should be sent to.'),
support_status=support.SupportStatus(version='5.0.0')
),
JOIN: properties.Schema(
properties.Schema.STRING,
_('Allows to synchronize multiple parallel workflow '
'branches and aggregate their data. '
'Valid inputs: all - the task will run only if '
'all upstream tasks are completed. '
'Any numeric value - then the task will run once '
'at least this number of upstream tasks are '
'completed and corresponding conditions have '
'triggered.'),
support_status=support.SupportStatus(version='6.0.0')
),
},
),
required=True,
update_allowed=True,
constraints=[constraints.Length(min=1)]
)
}
attributes_schema = {
WORKFLOW_DATA: attributes.Schema(
_('A dictionary which contains name and input of the workflow.'),
type=attributes.Schema.MAP
),
ALARM_URL: attributes.Schema(
_("A signed url to create executions for workflows specified in "
"Workflow resource."),
type=attributes.Schema.STRING
),
EXECUTIONS: attributes.Schema(
_("List of workflows' executions, each of them is a dictionary "
"with information about execution. Each dictionary returns "
"values for next keys: id, workflow_name, created_at, "
"updated_at, state for current execution state, input, output."),
type=attributes.Schema.LIST
)
}
def get_reference_id(self):
return self._workflow_name()
def _get_inputs_and_params(self, data):
inputs = None
params = None
if self.properties.get(self.USE_REQUEST_BODY_AS_INPUT):
inputs = data
else:
if data is not None:
inputs = data.get(self.SIGNAL_DATA_INPUT)
params = data.get(self.SIGNAL_DATA_PARAMS)
return inputs, params
def _validate_signal_data(self, inputs, params):
if inputs is not None:
if not isinstance(inputs, dict):
message = (_('Input in signal data must be a map, '
'find a %s') % type(inputs))
raise exception.StackValidationFailed(
error=_('Signal data error'),
message=message)
for key in inputs:
if (self.properties.get(self.INPUT) is None or
key not in self.properties.get(self.INPUT)):
message = _('Unknown input %s') % key
raise exception.StackValidationFailed(
error=_('Signal data error'),
message=message)
if params is not None and not isinstance(params, dict):
message = (_('Params must be a map, find a '
'%s') % type(params))
raise exception.StackValidationFailed(
error=_('Signal data error'),
message=message)
def validate(self):
super(Workflow, self).validate()
if self.properties.get(self.TYPE) == 'reverse':
params = self.properties.get(self.PARAMS)
if params is None or not params.get('task_name'):
raise exception.StackValidationFailed(
error=_('Mistral resource validation error'),
path=[self.name,
('properties'
if self.stack.t.VERSION == 'heat_template_version'
else 'Properties'),
self.PARAMS],
message=_("'task_name' is not assigned in 'params' "
"in case of reverse type workflow.")
)
for task in self.properties.get(self.TASKS):
wf_value = task.get(self.WORKFLOW)
action_value = task.get(self.ACTION)
if wf_value and action_value:
raise exception.ResourcePropertyConflict(self.WORKFLOW,
self.ACTION)
if not wf_value and not action_value:
raise exception.PropertyUnspecifiedError(self.WORKFLOW,
self.ACTION)
if (task.get(self.REQUIRES) is not None
and self.properties.get(self.TYPE)) == 'direct':
msg = _("task %(task)s contains property 'requires' "
"in case of direct workflow. Only reverse workflows "
"can contain property 'requires'.") % {
'name': self.name,
'task': task.get(self.TASK_NAME)
}
raise exception.StackValidationFailed(
error=_('Mistral resource validation error'),
path=[self.name,
('properties'
if self.stack.t.VERSION == 'heat_template_version'
else 'Properties'),
self.TASKS,
task.get(self.TASK_NAME),
self.REQUIRES],
message=msg)
if task.get(self.POLICIES) is not None:
for task_item in task.get(self.POLICIES):
if task.get(task_item) is not None:
msg = _('Property %(policies)s and %(item)s cannot be '
'used both at one time.') % {
'policies': self.POLICIES,
'item': task_item
}
raise exception.StackValidationFailed(message=msg)
def _workflow_name(self):
return self.properties.get(self.NAME) or self.physical_resource_name()
def build_tasks(self, props):
for task in props[self.TASKS]:
current_task = {}
wf_value = task.get(self.WORKFLOW)
if wf_value is not None:
if wf_value in [res.resource_id
for res in six.itervalues(self.stack)]:
current_task.update({self.WORKFLOW: wf_value})
else:
msg = _("No such workflow %s") % wf_value
raise ValueError(msg)
# backward support for kilo.
if task.get(self.POLICIES) is not None:
task.update(task.get(self.POLICIES))
task_keys = [key for key in self._TASKS_KEYS
if key not in [
self.WORKFLOW,
self.TASK_NAME,
self.POLICIES
]]
for task_prop in task_keys:
if task.get(task_prop) is not None:
current_task.update(
{task_prop.replace('_', '-'): task[task_prop]})
yield {task[self.TASK_NAME]: current_task}
def prepare_properties(self, props):
"""Prepare correct YAML-formatted definition for Mistral."""
defn_name = self._workflow_name()
definition = {'version': '2.0',
defn_name: {self.TYPE: props.get(self.TYPE),
self.DESCRIPTION: props.get(
self.DESCRIPTION),
self.OUTPUT: props.get(self.OUTPUT)}}
for key in list(definition[defn_name].keys()):
if definition[defn_name][key] is None:
del definition[defn_name][key]
if props.get(self.INPUT) is not None:
definition[defn_name][self.INPUT] = list(props.get(
self.INPUT).keys())
definition[defn_name][self.TASKS] = {}
for task in self.build_tasks(props):
definition.get(defn_name).get(self.TASKS).update(task)
if props.get(self.TASK_DEFAULTS) is not None:
definition[defn_name][self.TASK_DEFAULTS.replace('_', '-')] = {
k.replace('_', '-'): v for k, v in
six.iteritems(props.get(self.TASK_DEFAULTS)) if v}
return yaml.dump(definition, Dumper=yaml.CSafeDumper
if hasattr(yaml, 'CSafeDumper')
else yaml.SafeDumper)
def handle_create(self):
super(Workflow, self).handle_create()
props = self.prepare_properties(self.properties)
try:
workflow = self.client().workflows.create(props)
except Exception as ex:
raise exception.ResourceFailure(ex, self)
# NOTE(prazumovsky): Mistral uses unique names for resource
# identification.
self.resource_id_set(workflow[0].name)
def handle_signal(self, details=None):
inputs, params = self._get_inputs_and_params(details)
self._validate_signal_data(inputs, params)
inputs_result = copy.deepcopy(self.properties[self.INPUT])
params_result = copy.deepcopy(self.properties[self.PARAMS]) or {}
# NOTE(prazumovsky): Signal can contains some data, interesting
# for workflow, e.g. inputs. So, if signal data contains input
# we update override inputs, other leaved defined in template.
if inputs:
inputs_result.update(inputs)
if params:
params_result.update(params)
try:
execution = self.client().executions.create(
self._workflow_name(),
jsonutils.dumps(inputs_result),
**params_result)
except Exception as ex:
raise exception.ResourceFailure(ex, self)
executions = [execution.id]
if self.EXECUTIONS in self.data():
executions.extend(self.data().get(self.EXECUTIONS).split(','))
self.data_set(self.EXECUTIONS, ','.join(executions))
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
update_allowed = [self.INPUT, self.PARAMS, self.DESCRIPTION]
for prop in update_allowed:
if prop in prop_diff:
del prop_diff[prop]
if len(prop_diff) > 0:
props = json_snippet.properties(self.properties_schema,
self.context)
new_props = self.prepare_properties(props)
try:
workflow = self.client().workflows.update(new_props)
except Exception as ex:
raise exception.ResourceFailure(ex, self)
self.data_set(self.NAME, workflow[0].name)
self.resource_id_set(workflow[0].name)
def _delete_executions(self):
if self.data().get(self.EXECUTIONS):
for id in self.data().get(self.EXECUTIONS).split(','):
with self.client_plugin().ignore_not_found:
self.client().executions.delete(id)
self.data_delete('executions')
def handle_delete(self):
self._delete_executions()
return super(Workflow, self).handle_delete()
def _resolve_attribute(self, name):
if name == self.EXECUTIONS:
if self.EXECUTIONS not in self.data():
return []
def parse_execution_response(execution):
return {
'id': execution.id,
'workflow_name': execution.workflow_name,
'created_at': execution.created_at,
'updated_at': execution.updated_at,
'state': execution.state,
'input': jsonutils.loads(six.text_type(execution.input)),
'output': jsonutils.loads(six.text_type(execution.output))
}
return [parse_execution_response(
self.client().executions.get(exec_id))
for exec_id in
self.data().get(self.EXECUTIONS).split(',')]
elif name == self.WORKFLOW_DATA:
return {self.NAME: self.resource_id,
self.INPUT: self.properties.get(self.INPUT)}
elif name == self.ALARM_URL and self.resource_id is not None:
return six.text_type(self._get_ec2_signed_url())
def resource_mapping():
return {
'OS::Mistral::Workflow': Workflow
}
|
{
"content_hash": "474c0bc94679e3ba7c6f57e96c4b97c8",
"timestamp": "",
"source": "github",
"line_count": 593,
"max_line_length": 79,
"avg_line_length": 44.36424957841484,
"alnum_prop": 0.5008742587805991,
"repo_name": "cwolferh/heat-scratch",
"id": "7821e6c50317266d25b85567f4bbed10d59a7b3c",
"size": "26883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/engine/resources/openstack/mistral/workflow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8338769"
},
{
"name": "Shell",
"bytes": "56516"
}
],
"symlink_target": ""
}
|
"""
=============================================================
Online Latent Dirichlet Allocation with variational inference
=============================================================
This implementation is modified from Matthew D. Hoffman's onlineldavb code
Link: http://www.cs.princeton.edu/~mdhoffma/code/onlineldavb.tar
"""
# Author: Chyi-Kwei Yau
# Author: Matthew D. Hoffman (original onlineldavb implementation)
import numpy as np
import scipy.sparse as sp
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import (check_random_state, check_array,
gen_batches, gen_even_slices, _get_n_jobs)
from ..utils.validation import check_non_negative
from ..utils.extmath import logsumexp
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
from ..exceptions import NotFittedError
from ._online_lda import (mean_change, _dirichlet_expectation_1d,
_dirichlet_expectation_2d)
EPS = np.finfo(np.float).eps
def _update_doc_distribution(X, exp_topic_word_distr, doc_topic_prior,
max_iters,
mean_change_tol, cal_sstats, random_state):
"""E-step: update document-topic distribution.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
exp_topic_word_distr : dense matrix, shape=(n_topics, n_features)
Exponential value of expection of log topic word distribution.
In the literature, this is `exp(E[log(beta)])`.
doc_topic_prior : float
Prior of document topic distribution `theta`.
max_iters : int
Max number of iterations for updating document topic distribution in
the E-step.
mean_change_tol : float
Stopping tolerance for updating document topic distribution in E-setp.
cal_sstats : boolean
Parameter that indicate to calculate sufficient statistics or not.
Set `cal_sstats` to `True` when we need to run M-step.
random_state : RandomState instance or None
Parameter that indicate how to initialize document topic distribution.
Set `random_state` to None will initialize document topic distribution
to a constant number.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each document.
In the literature, this is `gamma`. we can calculate `E[log(theta)]`
from it.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, this will be None.
"""
is_sparse_x = sp.issparse(X)
n_samples, n_features = X.shape
n_topics = exp_topic_word_distr.shape[0]
if random_state:
doc_topic_distr = random_state.gamma(100., 0.01, (n_samples, n_topics))
else:
doc_topic_distr = np.ones((n_samples, n_topics))
# In the literature, this is `exp(E[log(theta)])`
exp_doc_topic = np.exp(_dirichlet_expectation_2d(doc_topic_distr))
# diff on `component_` (only calculate it when `cal_diff` is True)
suff_stats = np.zeros(exp_topic_word_distr.shape) if cal_sstats else None
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
for idx_d in xrange(n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
doc_topic_d = doc_topic_distr[idx_d, :]
# The next one is a copy, since the inner loop overwrites it.
exp_doc_topic_d = exp_doc_topic[idx_d, :].copy()
exp_topic_word_d = exp_topic_word_distr[:, ids]
# Iterate between `doc_topic_d` and `norm_phi` until convergence
for _ in xrange(0, max_iters):
last_d = doc_topic_d
# The optimal phi_{dwk} is proportional to
# exp(E[log(theta_{dk})]) * exp(E[log(beta_{dw})]).
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
doc_topic_d = (exp_doc_topic_d *
np.dot(cnts / norm_phi, exp_topic_word_d.T))
# Note: adds doc_topic_prior to doc_topic_d, in-place.
_dirichlet_expectation_1d(doc_topic_d, doc_topic_prior,
exp_doc_topic_d)
if mean_change(last_d, doc_topic_d) < mean_change_tol:
break
doc_topic_distr[idx_d, :] = doc_topic_d
# Contribution of document d to the expected sufficient
# statistics for the M step.
if cal_sstats:
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
suff_stats[:, ids] += np.outer(exp_doc_topic_d, cnts / norm_phi)
return (doc_topic_distr, suff_stats)
class LatentDirichletAllocation(BaseEstimator, TransformerMixin):
"""Latent Dirichlet Allocation with online variational Bayes algorithm
.. versionadded:: 0.17
Read more in the :ref:`User Guide <LatentDirichletAllocation>`.
Parameters
----------
n_topics : int, optional (default=10)
Number of topics.
doc_topic_prior : float, optional (default=None)
Prior of document topic distribution `theta`. If the value is None,
defaults to `1 / n_topics`.
In the literature, this is called `alpha`.
topic_word_prior : float, optional (default=None)
Prior of topic word distribution `beta`. If the value is None, defaults
to `1 / n_topics`.
In the literature, this is called `eta`.
learning_method : 'batch' | 'online', default='online'
Method used to update `_component`. Only used in `fit` method.
In general, if the data size is large, the online update will be much
faster than the batch update.
Valid options::
'batch': Batch variational Bayes method. Use all training data in
each EM update.
Old `components_` will be overwritten in each iteration.
'online': Online variational Bayes method. In each EM update, use
mini-batch of training data to update the ``components_``
variable incrementally. The learning rate is controlled by the
``learning_decay`` and the ``learning_offset`` parameters.
learning_decay : float, optional (default=0.7)
It is a parameter that control learning rate in the online learning
method. The value should be set between (0.5, 1.0] to guarantee
asymptotic convergence. When the value is 0.0 and batch_size is
``n_samples``, the update method is same as batch learning. In the
literature, this is called kappa.
learning_offset : float, optional (default=10.)
A (positive) parameter that downweights early iterations in online
learning. It should be greater than 1.0. In the literature, this is
called tau_0.
max_iter : integer, optional (default=10)
The maximum number of iterations.
total_samples : int, optional (default=1e6)
Total number of documents. Only used in the `partial_fit` method.
batch_size : int, optional (default=128)
Number of documents to use in each EM iteration. Only used in online
learning.
evaluate_every : int optional (default=0)
How often to evaluate perplexity. Only used in `fit` method.
set it to 0 or and negative number to not evalute perplexity in
training at all. Evaluating perplexity can help you check convergence
in training process, but it will also increase total training time.
Evaluating perplexity in every iteration might increase training time
up to two-fold.
perp_tol : float, optional (default=1e-1)
Perplexity tolerance in batch learning. Only used when
``evaluate_every`` is greater than 0.
mean_change_tol : float, optional (default=1e-3)
Stopping tolerance for updating document topic distribution in E-step.
max_doc_update_iter : int (default=100)
Max number of iterations for updating document topic distribution in
the E-step.
n_jobs : int, optional (default=1)
The number of jobs to use in the E-step. If -1, all CPUs are used. For
``n_jobs`` below -1, (n_cpus + 1 + n_jobs) are used.
verbose : int, optional (default=0)
Verbosity level.
random_state : int or RandomState instance or None, optional (default=None)
Pseudo-random number generator seed control.
Attributes
----------
components_ : array, [n_topics, n_features]
Topic word distribution. ``components_[i, j]`` represents word j in
topic `i`. In the literature, this is called lambda.
n_batch_iter_ : int
Number of iterations of the EM step.
n_iter_ : int
Number of passes over the dataset.
References
----------
[1] "Online Learning for Latent Dirichlet Allocation", Matthew D. Hoffman,
David M. Blei, Francis Bach, 2010
[2] "Stochastic Variational Inference", Matthew D. Hoffman, David M. Blei,
Chong Wang, John Paisley, 2013
[3] Matthew D. Hoffman's onlineldavb code. Link:
http://www.cs.princeton.edu/~mdhoffma/code/onlineldavb.tar
"""
def __init__(self, n_topics=10, doc_topic_prior=None,
topic_word_prior=None, learning_method='online',
learning_decay=.7, learning_offset=10., max_iter=10,
batch_size=128, evaluate_every=-1, total_samples=1e6,
perp_tol=1e-1, mean_change_tol=1e-3, max_doc_update_iter=100,
n_jobs=1, verbose=0, random_state=None):
self.n_topics = n_topics
self.doc_topic_prior = doc_topic_prior
self.topic_word_prior = topic_word_prior
self.learning_method = learning_method
self.learning_decay = learning_decay
self.learning_offset = learning_offset
self.max_iter = max_iter
self.batch_size = batch_size
self.evaluate_every = evaluate_every
self.total_samples = total_samples
self.perp_tol = perp_tol
self.mean_change_tol = mean_change_tol
self.max_doc_update_iter = max_doc_update_iter
self.n_jobs = n_jobs
self.verbose = verbose
self.random_state = random_state
def _check_params(self):
"""Check model parameters."""
if self.n_topics <= 0:
raise ValueError("Invalid 'n_topics' parameter: %r"
% self.n_topics)
if self.total_samples <= 0:
raise ValueError("Invalid 'total_samples' parameter: %r"
% self.total_samples)
if self.learning_offset < 0:
raise ValueError("Invalid 'learning_offset' parameter: %r"
% self.learning_offset)
if self.learning_method not in ("batch", "online"):
raise ValueError("Invalid 'learning_method' parameter: %r"
% self.learning_method)
def _init_latent_vars(self, n_features):
"""Initialize latent variables."""
self.random_state_ = check_random_state(self.random_state)
self.n_batch_iter_ = 1
self.n_iter_ = 0
if self.doc_topic_prior is None:
self.doc_topic_prior_ = 1. / self.n_topics
else:
self.doc_topic_prior_ = self.doc_topic_prior
if self.topic_word_prior is None:
self.topic_word_prior_ = 1. / self.n_topics
else:
self.topic_word_prior_ = self.topic_word_prior
init_gamma = 100.
init_var = 1. / init_gamma
# In the literature, this is called `lambda`
self.components_ = self.random_state_.gamma(
init_gamma, init_var, (self.n_topics, n_features))
# In the literature, this is `exp(E[log(beta)])`
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
def _e_step(self, X, cal_sstats, random_init, parallel=None):
"""E-step in EM update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
cal_sstats : boolean
Parameter that indicate whether to calculate sufficient statistics
or not. Set ``cal_sstats`` to True when we need to run M-step.
random_init : boolean
Parameter that indicate whether to initialize document topic
distribution randomly in the E-step. Set it to True in training
steps.
parallel : joblib.Parallel (optional)
Pre-initialized instance of joblib.Parallel.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormailzed topic distribution for each
document. In the literature, this is called `gamma`.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, it will be None.
"""
# Run e-step in parallel
random_state = self.random_state_ if random_init else None
# TODO: make Parallel._effective_n_jobs public instead?
n_jobs = _get_n_jobs(self.n_jobs)
if parallel is None:
parallel = Parallel(n_jobs=n_jobs, verbose=self.verbose)
results = parallel(
delayed(_update_doc_distribution)(X[idx_slice, :],
self.exp_dirichlet_component_,
self.doc_topic_prior_,
self.max_doc_update_iter,
self.mean_change_tol, cal_sstats,
random_state)
for idx_slice in gen_even_slices(X.shape[0], n_jobs))
# merge result
doc_topics, sstats_list = zip(*results)
doc_topic_distr = np.vstack(doc_topics)
if cal_sstats:
# This step finishes computing the sufficient statistics for the
# M-step.
suff_stats = np.zeros(self.components_.shape)
for sstats in sstats_list:
suff_stats += sstats
suff_stats *= self.exp_dirichlet_component_
else:
suff_stats = None
return (doc_topic_distr, suff_stats)
def _em_step(self, X, total_samples, batch_update, parallel=None):
"""EM update for 1 iteration.
update `_component` by batch VB or online VB.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
total_samples : integer
Total umber of documents. It is only used when
batch_update is `False`.
batch_update : boolean
Parameter that controls updating method.
`True` for batch learning, `False` for online learning.
parallel : joblib.Parallel
Pre-initialized instance of joblib.Parallel
Returns
-------
doc_topic_distr : array, shape=(n_samples, n_topics)
Unnormalized document topic distribution.
"""
# E-step
_, suff_stats = self._e_step(X, cal_sstats=True, random_init=True,
parallel=parallel)
# M-step
if batch_update:
self.components_ = self.topic_word_prior_ + suff_stats
else:
# online update
# In the literature, the weight is `rho`
weight = np.power(self.learning_offset + self.n_batch_iter_,
-self.learning_decay)
doc_ratio = float(total_samples) / X.shape[0]
self.components_ *= (1 - weight)
self.components_ += (weight * (self.topic_word_prior_
+ doc_ratio * suff_stats))
# update `component_` related variables
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
self.n_batch_iter_ += 1
return
def _check_non_neg_array(self, X, whom):
"""check X format
check X format and make sure no negative value in X.
Parameters
----------
X : array-like or sparse matrix
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, whom)
return X
def partial_fit(self, X, y=None):
"""Online VB with Mini-Batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.partial_fit")
n_samples, n_features = X.shape
batch_size = self.batch_size
# initialize parameters or check
if not hasattr(self, 'components_'):
self._init_latent_vars(n_features)
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
n_jobs = _get_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=self.verbose) as parallel:
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :],
total_samples=self.total_samples,
batch_update=False,
parallel=parallel)
return self
def fit(self, X, y=None):
"""Learn model for the data X with variational Bayes method.
When `learning_method` is 'online', use mini-batch update.
Otherwise, use batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X, "LatentDirichletAllocation.fit")
n_samples, n_features = X.shape
max_iter = self.max_iter
evaluate_every = self.evaluate_every
learning_method = self.learning_method
batch_size = self.batch_size
# initialize parameters
self._init_latent_vars(n_features)
# change to perplexity later
last_bound = None
n_jobs = _get_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=self.verbose) as parallel:
for i in xrange(max_iter):
if learning_method == 'online':
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :], total_samples=n_samples,
batch_update=False, parallel=parallel)
else:
# batch update
self._em_step(X, total_samples=n_samples,
batch_update=True, parallel=parallel)
# check perplexity
if evaluate_every > 0 and (i + 1) % evaluate_every == 0:
doc_topics_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False)
bound = self.perplexity(X, doc_topics_distr,
sub_sampling=False)
if self.verbose:
print('iteration: %d, perplexity: %.4f'
% (i + 1, bound))
if last_bound and abs(last_bound - bound) < self.perp_tol:
break
last_bound = bound
self.n_iter_ += 1
return self
def transform(self, X):
"""Transform data X according to the fitted model.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
doc_topic_distr : shape=(n_samples, n_topics)
Document topic distribution for X.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
# make sure feature size is the same in fitted model and in X
X = self._check_non_neg_array(X, "LatentDirichletAllocation.transform")
n_samples, n_features = X.shape
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
doc_topic_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False)
return doc_topic_distr
def _approx_bound(self, X, doc_topic_distr, sub_sampling):
"""Estimate the variational bound.
Estimate the variational bound over "all documents" using only the
documents passed in as X. Since log-likelihood of each word cannot
be computed directly, we use this bound to estimate it.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
doc_topic_distr : array, shape=(n_samples, n_topics)
Document topic distribution. In the literature, this is called
gamma.
sub_sampling : boolean, optional, (default=False)
Compensate for subsampling of documents.
It is used in calculate bound in online learning.
Returns
-------
score : float
"""
def _loglikelihood(prior, distr, dirichlet_distr, size):
# calculate log-likelihood
score = np.sum((prior - distr) * dirichlet_distr)
score += np.sum(gammaln(distr) - gammaln(prior))
score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1)))
return score
is_sparse_x = sp.issparse(X)
n_samples, n_topics = doc_topic_distr.shape
n_features = self.components_.shape[1]
score = 0
dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr)
dirichlet_component_ = _dirichlet_expectation_2d(self.components_)
doc_topic_prior = self.doc_topic_prior_
topic_word_prior = self.topic_word_prior_
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
# E[log p(docs | theta, beta)]
for idx_d in xrange(0, n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
temp = (dirichlet_doc_topic[idx_d, :, np.newaxis]
+ dirichlet_component_[:, ids])
norm_phi = logsumexp(temp)
score += np.dot(cnts, norm_phi)
# compute E[log p(theta | alpha) - log q(theta | gamma)]
score += _loglikelihood(doc_topic_prior, doc_topic_distr,
dirichlet_doc_topic, self.n_topics)
# Compensate for the subsampling of the population of documents
if sub_sampling:
doc_ratio = float(self.total_samples) / n_samples
score *= doc_ratio
# E[log p(beta | eta) - log q (beta | lambda)]
score += _loglikelihood(topic_word_prior, self.components_,
dirichlet_component_, n_features)
return score
def score(self, X, y=None):
"""Calculate approximate log-likelihood as score.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
score : float
Use approximate bound as score.
"""
X = self._check_non_neg_array(X, "LatentDirichletAllocation.score")
doc_topic_distr = self.transform(X)
score = self._approx_bound(X, doc_topic_distr, sub_sampling=False)
return score
def perplexity(self, X, doc_topic_distr=None, sub_sampling=False):
"""Calculate approximate perplexity for data X.
Perplexity is defined as exp(-1. * log-likelihood per word)
Parameters
----------
X : array-like or sparse matrix, [n_samples, n_features]
Document word matrix.
doc_topic_distr : None or array, shape=(n_samples, n_topics)
Document topic distribution.
If it is None, it will be generated by applying transform on X.
Returns
-------
score : float
Perplexity score.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.perplexity")
if doc_topic_distr is None:
doc_topic_distr = self.transform(X)
else:
n_samples, n_topics = doc_topic_distr.shape
if n_samples != X.shape[0]:
raise ValueError("Number of samples in X and doc_topic_distr"
" do not match.")
if n_topics != self.n_topics:
raise ValueError("Number of topics does not match.")
current_samples = X.shape[0]
bound = self._approx_bound(X, doc_topic_distr, sub_sampling)
if sub_sampling:
word_cnt = X.sum() * (float(self.total_samples) / current_samples)
else:
word_cnt = X.sum()
perword_bound = bound / word_cnt
return np.exp(-1.0 * perword_bound)
|
{
"content_hash": "b703977eb1d3deaeb6aceaa63ba677f1",
"timestamp": "",
"source": "github",
"line_count": 709,
"max_line_length": 79,
"avg_line_length": 37.44005641748942,
"alnum_prop": 0.571294029007346,
"repo_name": "nmayorov/scikit-learn",
"id": "42bef1968fd9023b1d3a95533f418e9e27c4651a",
"size": "26545",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sklearn/decomposition/online_lda.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1786"
},
{
"name": "C",
"bytes": "385829"
},
{
"name": "C++",
"bytes": "139482"
},
{
"name": "Makefile",
"bytes": "1388"
},
{
"name": "PowerShell",
"bytes": "13427"
},
{
"name": "Python",
"bytes": "5344495"
},
{
"name": "Shell",
"bytes": "4031"
}
],
"symlink_target": ""
}
|
"Memcached cache backend"
import pickle
import re
import time
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
from django.utils.functional import cached_property
class BaseMemcachedCache(BaseCache):
def __init__(self, server, params, library, value_not_found_exception):
super().__init__(params)
if isinstance(server, str):
self._servers = re.split('[;,]', server)
else:
self._servers = server
# The exception type to catch from the underlying library for a key
# that was not found. This is a ValueError for python-memcache,
# pylibmc.NotFound for pylibmc, and cmemcache will return None without
# raising an exception.
self.LibraryValueNotFoundException = value_not_found_exception
self._lib = library
self._options = params.get('OPTIONS') or {}
@property
def _cache(self):
"""
Implement transparent thread-safe access to a memcached client.
"""
if getattr(self, '_client', None) is None:
self._client = self._lib.Client(self._servers, **self._options)
return self._client
def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
"""
Memcached deals with long (> 30 days) timeouts in a special
way. Call this function to obtain a safe value for your timeout.
"""
if timeout == DEFAULT_TIMEOUT:
timeout = self.default_timeout
if timeout is None:
# Using 0 in memcache sets a non-expiring timeout.
return 0
elif int(timeout) == 0:
# Other cache backends treat 0 as set-and-expire. To achieve this
# in memcache backends, a negative timeout must be passed.
timeout = -1
if timeout > 2592000: # 60*60*24*30, 30 days
# See https://github.com/memcached/memcached/wiki/Programming#expiration
# "Expiration times can be set from 0, meaning "never expire", to
# 30 days. Any time higher than 30 days is interpreted as a Unix
# timestamp date. If you want to expire an object on January 1st of
# next year, this is how you do that."
#
# This means that we have to switch to absolute timestamps.
timeout += int(time.time())
return int(timeout)
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
return self._cache.add(key, value, self.get_backend_timeout(timeout))
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
return self._cache.get(key, default)
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
if not self._cache.set(key, value, self.get_backend_timeout(timeout)):
# make sure the key doesn't keep its old value in case of failure to set (memcached's 1MB limit)
self._cache.delete(key)
def delete(self, key, version=None):
key = self.make_key(key, version=version)
return bool(self._cache.delete(key))
def get_many(self, keys, version=None):
key_map = {self.make_key(key, version=version): key for key in keys}
ret = self._cache.get_multi(key_map.keys())
return {key_map[k]: v for k, v in ret.items()}
def close(self, **kwargs):
# Many clients don't clean up connections properly.
self._cache.disconnect_all()
def incr(self, key, delta=1, version=None):
key = self.make_key(key, version=version)
# memcached doesn't support a negative delta
if delta < 0:
return self._cache.decr(key, -delta)
try:
val = self._cache.incr(key, delta)
# python-memcache responds to incr on nonexistent keys by
# raising a ValueError, pylibmc by raising a pylibmc.NotFound
# and Cmemcache returns None. In all cases,
# we should raise a ValueError though.
except self.LibraryValueNotFoundException:
val = None
if val is None:
raise ValueError("Key '%s' not found" % key)
return val
def decr(self, key, delta=1, version=None):
key = self.make_key(key, version=version)
# memcached doesn't support a negative delta
if delta < 0:
return self._cache.incr(key, -delta)
try:
val = self._cache.decr(key, delta)
# python-memcache responds to incr on nonexistent keys by
# raising a ValueError, pylibmc by raising a pylibmc.NotFound
# and Cmemcache returns None. In all cases,
# we should raise a ValueError though.
except self.LibraryValueNotFoundException:
val = None
if val is None:
raise ValueError("Key '%s' not found" % key)
return val
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
safe_data = {}
original_keys = {}
for key, value in data.items():
safe_key = self.make_key(key, version=version)
safe_data[safe_key] = value
original_keys[safe_key] = key
failed_keys = self._cache.set_multi(safe_data, self.get_backend_timeout(timeout))
return [original_keys[k] for k in failed_keys]
def delete_many(self, keys, version=None):
self._cache.delete_multi(self.make_key(key, version=version) for key in keys)
def clear(self):
self._cache.flush_all()
class MemcachedCache(BaseMemcachedCache):
"An implementation of a cache binding using python-memcached"
def __init__(self, server, params):
import memcache
super().__init__(server, params, library=memcache, value_not_found_exception=ValueError)
@property
def _cache(self):
if getattr(self, '_client', None) is None:
client_kwargs = {'pickleProtocol': pickle.HIGHEST_PROTOCOL}
client_kwargs.update(self._options)
self._client = self._lib.Client(self._servers, **client_kwargs)
return self._client
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
return self._cache.touch(key, self.get_backend_timeout(timeout)) != 0
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
val = self._cache.get(key)
# python-memcached doesn't support default values in get().
# https://github.com/linsomniac/python-memcached/issues/159
# Remove this method if that issue is fixed.
if val is None:
return default
return val
def delete(self, key, version=None):
# python-memcached's delete() returns True when key doesn't exist.
# https://github.com/linsomniac/python-memcached/issues/170
# Call _deletetouch() without the NOT_FOUND in expected results.
key = self.make_key(key, version=version)
return bool(self._cache._deletetouch([b'DELETED'], 'delete', key))
class PyLibMCCache(BaseMemcachedCache):
"An implementation of a cache binding using pylibmc"
def __init__(self, server, params):
import pylibmc
super().__init__(server, params, library=pylibmc, value_not_found_exception=pylibmc.NotFound)
@cached_property
def _cache(self):
return self._lib.Client(self._servers, **self._options)
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
if timeout == 0:
return self._cache.delete(key)
return self._cache.touch(key, self.get_backend_timeout(timeout))
def close(self, **kwargs):
# libmemcached manages its own connections. Don't call disconnect_all()
# as it resets the failover state and creates unnecessary reconnects.
pass
|
{
"content_hash": "1f8d0f56028e8682775b5353fcdc5764",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 108,
"avg_line_length": 39.885,
"alnum_prop": 0.629058543312022,
"repo_name": "simonw/django",
"id": "8202005045d359d3788fa86a443ff40d5e9b9f1f",
"size": "7977",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "django/core/cache/backends/memcached.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "85351"
},
{
"name": "HTML",
"bytes": "227641"
},
{
"name": "JavaScript",
"bytes": "258434"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "13501540"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "142"
}
],
"symlink_target": ""
}
|
"""
Sphinx plugins for Django documentation.
"""
import docutils.nodes
import docutils.transforms
import sphinx
import sphinx.addnodes
import sphinx.directives
import sphinx.environment
import sphinx.roles
from docutils import nodes
def setup(app):
app.add_crossref_type(
directivename="setting",
rolename="setting",
indextemplate="pair: %s; setting",
)
app.add_crossref_type(
directivename="templatetag",
rolename="ttag",
indextemplate="pair: %s; template tag",
)
app.add_crossref_type(
directivename="templatefilter",
rolename="tfilter",
indextemplate="pair: %s; template filter",
)
app.add_crossref_type(
directivename="fieldlookup",
rolename="lookup",
indextemplate="pair: %s, field lookup type",
)
app.add_description_unit(
directivename="django-admin",
rolename="djadmin",
indextemplate="pair: %s; django-admin command",
parse_node=parse_django_admin_node,
)
app.add_description_unit(
directivename="django-admin-option",
rolename="djadminopt",
indextemplate="pair: %s; django-admin command-line option",
parse_node=lambda env, sig, signode: \
sphinx.directives.parse_option_desc(signode, sig),
)
app.add_config_value('django_next_version', '0.0', True)
app.add_directive('versionadded', parse_version_directive, 1, (1, 1, 1))
app.add_directive('versionchanged', parse_version_directive, 1, (1, 1, 1))
app.add_transform(SuppressBlockquotes)
def parse_version_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
env = state.document.settings.env
is_nextversion = env.config.django_next_version == arguments[0]
ret = []
node = sphinx.addnodes.versionmodified()
ret.append(node)
if not is_nextversion:
if len(arguments) == 1:
linktext = 'Please, see the release notes <releases-%s>' % (
arguments[0])
xrefs = sphinx.roles.xfileref_role('ref', linktext, linktext,
lineno, state)
node.extend(xrefs[0])
node['version'] = arguments[0]
else:
node['version'] = "Development version"
node['type'] = name
if len(arguments) == 2:
inodes, messages = state.inline_text(arguments[1], lineno + 1)
node.extend(inodes)
if content:
state.nested_parse(content, content_offset, node)
ret = ret + messages
env.note_versionchange(node['type'], node['version'], node, lineno)
return ret
class SuppressBlockquotes(docutils.transforms.Transform):
"""
Remove the default blockquotes that encase indented list, tables, etc.
"""
default_priority = 300
suppress_blockquote_child_nodes = (
docutils.nodes.bullet_list,
docutils.nodes.enumerated_list,
docutils.nodes.definition_list,
docutils.nodes.literal_block,
docutils.nodes.doctest_block,
docutils.nodes.line_block,
docutils.nodes.table,
)
def apply(self):
for node in self.document.traverse(docutils.nodes.block_quote):
if len(node.children) == 1 and \
isinstance(node.children[0],
self.suppress_blockquote_child_nodes):
node.replace_self(node.children[0])
def parse_django_admin_node(env, sig, signode):
command = sig.split(' ')[0]
env._django_curr_admin_command = command
title = "django-admin.py %s" % sig
signode += sphinx.addnodes.desc_name(title, title)
return sig
|
{
"content_hash": "ea3536fa78cce7113908ece2879db8d1",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 78,
"avg_line_length": 33.3125,
"alnum_prop": 0.6228893058161351,
"repo_name": "chrisspen/django-feeds",
"id": "b817a4f67793ccc95ef4d8c464c28f9eba8b4c96",
"size": "3731",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/_ext/djangodocs.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "725765"
},
{
"name": "Python",
"bytes": "250798"
}
],
"symlink_target": ""
}
|
import sys
import numpy
from numexpr import interpreter, expressions, use_vml, is_cpu_amd_intel
from numexpr.utils import CacheDict
# Declare a double type that does not exist in Python space
double = numpy.double
typecode_to_kind = {'b': 'bool', 'i': 'int', 'l': 'long', 'f': 'float',
'd': 'double', 'c': 'complex', 's': 'str', 'n' : 'none'}
kind_to_typecode = {'bool': 'b', 'int': 'i', 'long': 'l', 'float': 'f',
'double': 'd', 'complex': 'c', 'str': 's', 'none' : 'n'}
type_to_typecode = {bool: 'b', int: 'i', long:'l', float:'f',
double: 'd', complex: 'c', str: 's'}
type_to_kind = expressions.type_to_kind
kind_to_type = expressions.kind_to_type
default_type = kind_to_type[expressions.default_kind]
class ASTNode(object):
"""Abstract Syntax Tree node.
Members:
astType -- type of node (op, constant, variable, raw, or alias)
astKind -- the type of the result (bool, float, etc.)
value -- value associated with this node.
An opcode, numerical value, a variable name, etc.
children -- the children below this node
reg -- the register assigned to the result for this node.
"""
cmpnames = ['astType', 'astKind', 'value', 'children']
def __init__(self, astType='generic', astKind='unknown',
value=None, children=()):
object.__init__(self)
self.astType = astType
self.astKind = astKind
self.value = value
self.children = tuple(children)
self.reg = None
def __eq__(self, other):
if self.astType == 'alias':
self = self.value
if other.astType == 'alias':
other = other.value
if not isinstance(other, ASTNode):
return False
for name in self.cmpnames:
if getattr(self, name) != getattr(other, name):
return False
return True
def __hash__(self):
if self.astType == 'alias':
self = self.value
# Fast hash (see issue #43)
return ( hash(self.astType) ^ hash(self.astKind) ^
hash(self.value) ^ hash(self.children) )
def __str__(self):
return 'AST(%s, %s, %s, %s, %s)' % (self.astType, self.astKind,
self.value, self.children, self.reg)
def __repr__(self): return '<AST object at %s>' % id(self)
def key(self):
return (self.astType, self.astKind, self.value, self.children)
def typecode(self):
return kind_to_typecode[self.astKind]
def postorderWalk(self):
for c in self.children:
for w in c.postorderWalk():
yield w
yield self
def allOf(self, *astTypes):
astTypes = set(astTypes)
for w in self.postorderWalk():
if w.astType in astTypes:
yield w
def expressionToAST(ex):
"""Take an expression tree made out of expressions.ExpressionNode,
and convert to an AST tree.
This is necessary as ExpressionNode overrides many methods to act
like a number.
"""
this_ast = ASTNode(ex.astType, ex.astKind, ex.value,
[expressionToAST(c) for c in ex.children])
return this_ast
def sigPerms(s):
"""Generate all possible signatures derived by upcasting the given
signature.
"""
codes = 'bilfdc'
if not s:
yield ''
elif s[0] in codes:
start = codes.index(s[0])
for x in codes[start:]:
for y in sigPerms(s[1:]):
yield x + y
elif s[0] == 's': # numbers shall not be cast to strings
for y in sigPerms(s[1:]):
yield 's' + y
else:
yield s
def typeCompileAst(ast):
"""Assign appropiate types to each node in the AST.
Will convert opcodes and functions to appropiate upcast version,
and add "cast" ops if needed.
"""
children = list(ast.children)
if ast.astType == 'op':
retsig = ast.typecode()
basesig = ''.join(x.typecode() for x in list(ast.children))
# Find some operation that will work on an acceptable casting of args.
for sig in sigPerms(basesig):
value = ast.value + '_' + retsig + sig
if value in interpreter.opcodes:
break
else:
for sig in sigPerms(basesig):
funcname = ast.value + '_' + retsig + sig
if funcname in interpreter.funccodes:
value = 'func_%sn' % (retsig+sig)
children += [ASTNode('raw', 'none',
interpreter.funccodes[funcname])]
break
else:
raise NotImplementedError(
"couldn't find matching opcode for '%s'"
% (ast.value + '_' + retsig+basesig))
# First just cast constants, then cast variables if necessary:
for i, (have, want) in enumerate(zip(basesig, sig)):
if have != want:
kind = typecode_to_kind[want]
if children[i].astType == 'constant':
children[i] = ASTNode('constant', kind, children[i].value)
else:
opname = "cast"
children[i] = ASTNode('op', kind, opname, [children[i]])
else:
value = ast.value
children = ast.children
new_ast = ASTNode(ast.astType, ast.astKind, value,
[typeCompileAst(c) for c in children])
return new_ast
class Register(object):
"""Abstraction for a register in the VM.
Members:
node -- the AST node this corresponds to
temporary -- True if this isn't an input or output
immediate -- not a register, but an immediate value
n -- the physical register number.
None if no number assigned yet.
"""
def __init__(self, astnode, temporary=False):
self.node = astnode
self.temporary = temporary
self.immediate = False
self.n = None
def __str__(self):
if self.temporary:
name = 'Temporary'
else:
name = 'Register'
return '%s(%s, %s, %s)' % (name, self.node.astType,
self.node.astKind, self.n,)
def __repr__(self):
return self.__str__()
class Immediate(Register):
"""Representation of an immediate (integer) operand, instead of
a register.
"""
def __init__(self, astnode):
Register.__init__(self, astnode)
self.immediate = True
def __str__(self):
return 'Immediate(%d)' % (self.node.value,)
def stringToExpression(s, types, context):
"""Given a string, convert it to a tree of ExpressionNode's.
"""
old_ctx = expressions._context.get_current_context()
try:
expressions._context.set_new_context(context)
# first compile to a code object to determine the names
c = compile(s, '<expr>', 'eval')
# make VariableNode's for the names
names = {}
for name in c.co_names:
if name == "None":
names[name] = None
elif name == "True":
names[name] = True
elif name == "False":
names[name] = False
else:
t = types.get(name, default_type)
names[name] = expressions.VariableNode(name, type_to_kind[t])
names.update(expressions.functions)
# now build the expression
ex = eval(c, names)
if expressions.isConstant(ex):
ex = expressions.ConstantNode(ex, expressions.getKind(ex))
elif not isinstance(ex, expressions.ExpressionNode):
raise TypeError("unsupported expression type: %s" % type(ex))
finally:
expressions._context.set_new_context(old_ctx)
return ex
def isReduction(ast):
return ast.value.startswith('sum_') or ast.value.startswith('prod_')
def getInputOrder(ast, input_order=None):
"""Derive the input order of the variables in an expression.
"""
variables = {}
for a in ast.allOf('variable'):
variables[a.value] = a
variable_names = set(variables.keys())
if input_order:
if variable_names != set(input_order):
raise ValueError("input names don't match those found in expression")
ordered_names = input_order
else:
ordered_names = list(variable_names)
ordered_names.sort()
ordered_variables = [variables[v] for v in ordered_names]
return ordered_variables
def convertConstantToKind(x, kind):
# Exception for 'float' types that will return the NumPy float32 type
if kind == 'float':
return numpy.float32(x)
return kind_to_type[kind](x)
def getConstants(ast):
const_map = {}
for a in ast.allOf('constant'):
const_map[(a.astKind, a.value)] = a
ordered_constants = const_map.keys()
ordered_constants.sort()
constants_order = [const_map[v] for v in ordered_constants]
constants = [convertConstantToKind(a.value, a.astKind)
for a in constants_order]
return constants_order, constants
def sortNodesByOrder(nodes, order):
order_map = {}
for i, (_, v, _) in enumerate(order):
order_map[v] = i
dec_nodes = [(order_map[n.value], n) for n in nodes]
dec_nodes.sort()
return [a[1] for a in dec_nodes]
def assignLeafRegisters(inodes, registerMaker):
"""Assign new registers to each of the leaf nodes.
"""
leafRegisters = {}
for node in inodes:
key = node.key()
if key in leafRegisters:
node.reg = leafRegisters[key]
else:
node.reg = leafRegisters[key] = registerMaker(node)
def assignBranchRegisters(inodes, registerMaker):
"""Assign temporary registers to each of the branch nodes.
"""
for node in inodes:
node.reg = registerMaker(node, temporary=True)
def collapseDuplicateSubtrees(ast):
"""Common subexpression elimination.
"""
seen = {}
aliases = []
for a in ast.allOf('op'):
if a in seen:
target = seen[a]
a.astType = 'alias'
a.value = target
a.children = ()
aliases.append(a)
else:
seen[a] = a
# Set values and registers so optimizeTemporariesAllocation
# doesn't get confused
for a in aliases:
while a.value.astType == 'alias':
a.value = a.value.value
a.reg = a.value.reg
def optimizeTemporariesAllocation(ast):
"""Attempt to minimize the number of temporaries needed, by
reusing old ones.
"""
nodes = list(x for x in ast.postorderWalk() if x.reg.temporary)
users_of = dict((n.reg, set()) for n in nodes)
if nodes and nodes[-1] is not ast:
for c in ast.children:
if c.reg.temporary:
users_of[c.reg].add(ast)
for n in reversed(nodes):
for c in n.children:
if c.reg.temporary:
users_of[c.reg].add(n)
unused = {'bool' : set(), 'int' : set(), 'long': set(), 'float' : set(),
'double': set(), 'complex' : set(), 'str': set()}
for n in nodes:
for reg, users in users_of.iteritems():
if n in users:
users.remove(n)
if not users:
unused[reg.node.astKind].add(reg)
if unused[n.astKind]:
reg = unused[n.astKind].pop()
users_of[reg] = users_of[n.reg]
n.reg = reg
def setOrderedRegisterNumbers(order, start):
"""Given an order of nodes, assign register numbers.
"""
for i, node in enumerate(order):
node.reg.n = start + i
return start + len(order)
def setRegisterNumbersForTemporaries(ast, start):
"""Assign register numbers for temporary registers, keeping track of
aliases and handling immediate operands.
"""
seen = 0
signature = ''
aliases = []
for node in ast.postorderWalk():
if node.astType == 'alias':
aliases.append(node)
node = node.value
if node.reg.immediate:
node.reg.n = node.value
continue
reg = node.reg
if reg.n < 0:
reg.n = start + seen
seen += 1
signature += reg.node.typecode()
for node in aliases:
node.reg = node.value.reg
return start + seen, signature
def convertASTtoThreeAddrForm(ast):
"""Convert an AST to a three address form.
Three address form is (op, reg1, reg2, reg3), where reg1 is the
destination of the result of the instruction.
I suppose this should be called three register form, but three
address form is found in compiler theory.
"""
program = []
for node in ast.allOf('op'):
children = node.children
instr = (node.value, node.reg) \
+ tuple([c.reg for c in children])
program.append(instr)
return program
def compileThreeAddrForm(program):
"""Given a three address form of the program, compile it a string that
the VM understands.
"""
def nToChr(reg):
if reg is None:
return '\xff'
elif reg.n < 0:
raise ValueError("negative value for register number %s" % (reg.n,))
else:
return chr(reg.n)
def quadrupleToString(opcode, store, a1=None, a2=None):
cop = chr(interpreter.opcodes[opcode])
cs = nToChr(store)
ca1 = nToChr(a1)
ca2 = nToChr(a2)
return cop + cs + ca1 + ca2
def toString(*args):
while len(args) < 4:
args += (None,)
opcode, store, a1, a2 = args[0:4]
s = quadrupleToString(opcode, store, a1, a2)
l = [s]
args = args[4:]
while args:
s = quadrupleToString('noop', *args[:3])
l.append(s)
args = args[3:]
return ''.join(l)
prog_str = ''.join([toString(*t) for t in program])
return prog_str
context_info = [
('optimization', ('none', 'moderate', 'aggressive'), 'aggressive'),
]
def getContext(map):
context = {}
for name, allowed, default in context_info:
value = map.pop(name, default)
if value in allowed:
context[name] = value
else:
raise ValueError("'%s' must be one of %s" % (name, allowed))
if map:
raise ValueError("Unknown keyword argument '%s'" % map.popitem()[0])
return context
def precompile(ex, signature=(), copy_args=(), **kwargs):
"""Compile the expression to an intermediate form.
"""
types = dict(signature)
input_order = [name for (name, type_) in signature]
context = getContext(kwargs)
if isinstance(ex, str):
ex = stringToExpression(ex, types, context)
# the AST is like the expression, but the node objects don't have
# any odd interpretations
ast = expressionToAST(ex)
# Add a copy for strided or unaligned unidimensional arrays
for a in ast.postorderWalk():
if a.astType == "variable" and a.value in copy_args:
newVar = ASTNode(*a.key())
a.astType, a.value, a.children = ('op', 'copy', (newVar,))
if ex.astType not in ('op'):
ast = ASTNode('op', value='copy', astKind=ex.astKind, children=(ast,))
ast = typeCompileAst(ast)
reg_num = [-1]
def registerMaker(node, temporary=False):
reg = Register(node, temporary=temporary)
reg.n = reg_num[0]
reg_num[0] -= 1
return reg
assignLeafRegisters(ast.allOf('raw'), Immediate)
assignLeafRegisters(ast.allOf('variable', 'constant'), registerMaker)
assignBranchRegisters(ast.allOf('op'), registerMaker)
collapseDuplicateSubtrees(ast)
input_order = getInputOrder(ast, input_order)
constants_order, constants = getConstants(ast)
if isReduction(ast):
ast.reg.temporary = False
optimizeTemporariesAllocation(ast)
ast.reg.temporary = False
r_output = 0
ast.reg.n = 0
r_inputs = r_output + 1
r_constants = setOrderedRegisterNumbers(input_order, r_inputs)
r_temps = setOrderedRegisterNumbers(constants_order, r_constants)
r_end, tempsig = setRegisterNumbersForTemporaries(ast, r_temps)
threeAddrProgram = convertASTtoThreeAddrForm(ast)
input_names = tuple([a.value for a in input_order])
signature = ''.join(type_to_typecode[types.get(x, default_type)]
for x in input_names)
return threeAddrProgram, signature, tempsig, constants, input_names
def NumExpr(ex, signature=(), copy_args=(), **kwargs):
"""
Compile an expression built using E.<variable> variables to a function.
ex can also be specified as a string "2*a+3*b".
The order of the input variables and their types can be specified using the
signature parameter, which is a list of (name, type) pairs.
Returns a `NumExpr` object containing the compiled function.
"""
threeAddrProgram, inputsig, tempsig, constants, input_names = \
precompile(ex, signature, copy_args, **kwargs)
program = compileThreeAddrForm(threeAddrProgram)
return interpreter.NumExpr(inputsig, tempsig, program, constants,
input_names)
def disassemble(nex):
"""
Given a NumExpr object, return a list which is the program disassembled.
"""
rev_opcodes = {}
for op in interpreter.opcodes:
rev_opcodes[interpreter.opcodes[op]] = op
r_constants = 1 + len(nex.signature)
r_temps = r_constants + len(nex.constants)
def getArg(pc, offset):
arg = ord(nex.program[pc+offset])
op = rev_opcodes.get(ord(nex.program[pc]))
try:
code = op.split('_')[1][offset-1]
except IndexError:
return None
if arg == 255:
return None
if code != 'n':
if arg == 0:
return 'r0'
elif arg < r_constants:
return 'r%d[%s]' % (arg, nex.input_names[arg-1])
elif arg < r_temps:
return 'c%d[%s]' % (arg, nex.constants[arg - r_constants])
else:
return 't%d' % (arg,)
else:
return arg
source = []
for pc in range(0, len(nex.program), 4):
op = rev_opcodes.get(ord(nex.program[pc]))
dest = getArg(pc, 1)
arg1 = getArg(pc, 2)
arg2 = getArg(pc, 3)
source.append( (op, dest, arg1, arg2) )
return source
def getType(a):
kind = a.dtype.kind
if kind == 'b':
return bool
if kind in 'iu':
if a.dtype.itemsize > 4:
return long # ``long`` is for integers of more than 32 bits
if kind == 'u' and a.dtype.itemsize == 4:
return long # use ``long`` here as an ``int`` is not enough
return int
if kind == 'f':
if a.dtype.itemsize > 4:
return double # ``double`` is for floats of more than 32 bits
return float
if kind == 'c':
return complex
if kind == 'S':
return str
raise ValueError("unkown type %s" % a.dtype.name)
def getExprNames(text, context):
ex = stringToExpression(text, {}, context)
ast = expressionToAST(ex)
input_order = getInputOrder(ast, None)
#try to figure out if vml operations are used by expression
if not use_vml:
ex_uses_vml = False
else:
for node in ast.postorderWalk():
if node.astType == 'op' \
and node.value in ['sin', 'cos', 'exp', 'log',
'expm1', 'log1p',
'pow', 'div',
'sqrt', 'inv',
'sinh', 'cosh', 'tanh',
'arcsin', 'arccos', 'arctan',
'arccosh', 'arcsinh', 'arctanh',
'arctan2', 'abs']:
ex_uses_vml = True
break
else:
ex_uses_vml = False
return [a.value for a in input_order], ex_uses_vml
# Dictionaries for caching variable names and compiled expressions
_names_cache = CacheDict(256)
_numexpr_cache = CacheDict(256)
def evaluate(ex, local_dict=None, global_dict=None, **kwargs):
"""Evaluate a simple array expression element-wise.
ex is a string forming an expression, like "2*a+3*b". The values for "a"
and "b" will by default be taken from the calling function's frame
(through use of sys._getframe()). Alternatively, they can be specifed
using the 'local_dict' or 'global_dict' arguments.
"""
if not isinstance(ex, str):
raise ValueError("must specify expression as a string")
# Get the names for this expression
expr_key = (ex, tuple(sorted(kwargs.items())))
if expr_key not in _names_cache:
context = getContext(kwargs)
_names_cache[expr_key] = getExprNames(ex, context)
names, ex_uses_vml = _names_cache[expr_key]
# Get the arguments based on the names.
call_frame = sys._getframe(1)
if local_dict is None:
local_dict = call_frame.f_locals
if global_dict is None:
global_dict = call_frame.f_globals
arguments = []
copy_args = []
for name in names:
try:
a = local_dict[name]
except KeyError:
a = global_dict[name]
b = numpy.asarray(a)
# Byteswapped arrays are dealt with in the extension
# All the opcodes can deal with strided arrays directly as
# long as they are undimensional (strides in other
# dimensions are dealt within the extension), so we don't
# need a copy for the strided case.
if not b.flags.aligned:
# Only take actions if CPU is different from AMD and Intel
# as they can deal with unaligned arrays very efficiently.
# If using VML, do the copy as the VML functions works
# much faster with aligned arrays.
if not is_cpu_amd_intel or use_vml:
# For the unaligned case, we have two cases:
if b.ndim == 1:
# For unidimensional arrays we can use the copy
# opcode because it can deal with unaligned arrays
# as long as they are unidimensionals with a
# possible stride (very common case for
# recarrays). This can be up to 2x faster than
# doing a copy using NumPy.
copy_args.append(name)
else:
# For multimensional unaligned arrays do a plain
# copy. We could refine more this and do a plain
# copy only in the case that strides doesn't exist
# in dimensions other than the last one (whose
# case is supported by the copy opcode).
b = b.copy()
elif use_vml and ex_uses_vml: #only make a copy of strided arrays if
#vml is in use
if not b.flags.contiguous:
if b.ndim == 1:
copy_args.append(name)
else:
b = b.copy()
arguments.append(b)
# Create a signature
signature = [(name, getType(arg)) for (name, arg) in zip(names, arguments)]
# Look up numexpr if possible. copy_args *must* be added to the key,
# just in case a non-copy expression is already in cache.
numexpr_key = expr_key + (tuple(signature),) + tuple(copy_args)
try:
compiled_ex = _numexpr_cache[numexpr_key]
except KeyError:
compiled_ex = _numexpr_cache[numexpr_key] = \
NumExpr(ex, signature, copy_args, **kwargs)
return compiled_ex(*arguments)
|
{
"content_hash": "73e8c18578e7858ee2490da5ba561a2a",
"timestamp": "",
"source": "github",
"line_count": 690,
"max_line_length": 81,
"avg_line_length": 34.7463768115942,
"alnum_prop": 0.5675495307612096,
"repo_name": "jsalvatier/numexpr",
"id": "b077fcc9cdaa9b38c312b30f2445ac8cceda1820",
"size": "23975",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "numexpr/necompiler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "88197"
},
{
"name": "Python",
"bytes": "116081"
}
],
"symlink_target": ""
}
|
from conans import ConanFile, CMake
import os
channel = os.getenv("CONAN_CHANNEL", "testing")
username = os.getenv("CONAN_USERNAME", "memsharded")
class ZMQTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
requires = "libzmq/4.2.0@%s/%s" % (username, channel)
generators = "cmake"
def build(self):
cmake = CMake(self.settings)
self.run('cmake "%s" %s' % (self.conanfile_directory, cmake.command_line))
self.run("cmake --build . %s" % cmake.build_config)
def imports(self):
self.copy("*.dll", "bin", "bin")
self.copy("*.dylib", "bin", "lib")
def test(self):
print ("Running test")
os.chdir("bin")
server = ".%sserver" % os.sep
import subprocess
pid = subprocess.Popen(server)
print ("Lets launch client for ", server)
self.run(".%sclient > null" % os.sep)
pid.terminate()
|
{
"content_hash": "da9ff787215edb86272792a086f4ace0",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 82,
"avg_line_length": 31.93103448275862,
"alnum_prop": 0.5907127429805615,
"repo_name": "memsharded/conan-zmq",
"id": "7489e1ca1ab14ba3067ea6e8f3a9946266279dea",
"size": "926",
"binary": false,
"copies": "1",
"ref": "refs/heads/4.2.0",
"path": "test_package/conanfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1212"
},
{
"name": "CMake",
"bytes": "2272"
},
{
"name": "Python",
"bytes": "4304"
},
{
"name": "Shell",
"bytes": "646"
}
],
"symlink_target": ""
}
|
try:
import json
except ImportError:
import sys
sys.path.append('simplejson-2.3.3')
import simplejson as json
try:
import urllib2
from urllib2 import URLError, HTTPError
import httplib
import urlparse
import httplib2
from ConfigParser import ConfigParser
except ImportError:
pass # python 3
import random
import base64
import os
_CT = 'content-type'
_AJ = 'application/json'
_URL_SCHEME = frozenset(['http', 'https'])
def _get_token(user_id, password,
auth_svc='https://nexus.api.globusonline.org/goauth/token?' +
'grant_type=client_credentials'):
# This is bandaid helper function until we get a full
# KBase python auth client released
h = httplib2.Http(disable_ssl_certificate_validation=True)
auth = base64.encodestring(user_id + ':' + password)
headers = {'Authorization': 'Basic ' + auth}
h.add_credentials(user_id, password)
h.follow_all_redirects = True
url = auth_svc
resp, content = h.request(url, 'GET', headers=headers)
status = int(resp['status'])
if status >= 200 and status <= 299:
tok = json.loads(content)
elif status == 403:
raise Exception('Authentication failed: Bad user_id/password ' +
'combination %s:%s' % (user_id, password))
else:
raise Exception(str(resp))
return tok['access_token']
def _read_rcfile(file=os.environ['HOME'] + '/.authrc'): # @ReservedAssignment
# Another bandaid to read in the ~/.authrc file if one is present
authdata = None
if os.path.exists(file):
try:
with open(file) as authrc:
rawdata = json.load(authrc)
# strip down whatever we read to only what is legit
authdata = {x: rawdata.get(x) for x in (
'user_id', 'token', 'client_secret', 'keyfile',
'keyfile_passphrase', 'password')}
except Exception as e:
print("Error while reading authrc file %s: %s" % (file, e))
return authdata
def _read_inifile(file=os.environ.get( # @ReservedAssignment
'KB_DEPLOYMENT_CONFIG', os.environ['HOME'] +
'/.kbase_config')):
# Another bandaid to read in the ~/.kbase_config file if one is present
authdata = None
if os.path.exists(file):
try:
config = ConfigParser()
config.read(file)
# strip down whatever we read to only what is legit
authdata = {x: config.get('authentication', x)
if config.has_option('authentication', x)
else None for x in
('user_id', 'token', 'client_secret',
'keyfile', 'keyfile_passphrase', 'password')}
except Exception as e:
print("Error while reading INI file %s: %s" % (file, e))
return authdata
class ServerError(Exception):
def __init__(self, name, code, message, data=None, error=None):
self.name = name
self.code = code
self.message = '' if message is None else message
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
return json.JSONEncoder.default(self, obj)
class AbstractHandle(object):
def __init__(self, url=None, timeout=30 * 60, user_id=None,
password=None, token=None, ignore_authrc=False):
if url is None:
url = 'http://localhost:7109'
scheme, _, _, _, _, _ = urlparse.urlparse(url)
if scheme not in _URL_SCHEME:
raise ValueError(url + " isn't a valid http url")
self.url = url
self.timeout = int(timeout)
self._headers = dict()
# token overrides user_id and password
if token is not None:
self._headers['AUTHORIZATION'] = token
elif user_id is not None and password is not None:
self._headers['AUTHORIZATION'] = _get_token(user_id, password)
elif 'KB_AUTH_TOKEN' in os.environ:
self._headers['AUTHORIZATION'] = os.environ.get('KB_AUTH_TOKEN')
elif not ignore_authrc:
authdata = _read_inifile()
if authdata is None:
authdata = _read_rcfile()
if authdata is not None:
if authdata.get('token') is not None:
self._headers['AUTHORIZATION'] = authdata['token']
elif(authdata.get('user_id') is not None
and authdata.get('password') is not None):
self._headers['AUTHORIZATION'] = _get_token(
authdata['user_id'], authdata['password'])
if self.timeout < 1:
raise ValueError('Timeout value must be at least 1 second')
def new_handle(self):
arg_hash = {'method': 'AbstractHandle.new_handle',
'params': [],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def localize_handle(self, h1, service_name):
arg_hash = {'method': 'AbstractHandle.localize_handle',
'params': [h1, service_name],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def initialize_handle(self, h1):
arg_hash = {'method': 'AbstractHandle.initialize_handle',
'params': [h1],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def persist_handle(self, h):
arg_hash = {'method': 'AbstractHandle.persist_handle',
'params': [h],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def upload(self, infile):
arg_hash = {'method': 'AbstractHandle.upload',
'params': [infile],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def download(self, h, outfile):
arg_hash = {'method': 'AbstractHandle.download',
'params': [h, outfile],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
pass # nothing to return
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def upload_metadata(self, h, infile):
arg_hash = {'method': 'AbstractHandle.upload_metadata',
'params': [h, infile],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
pass # nothing to return
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def download_metadata(self, h, outfile):
arg_hash = {'method': 'AbstractHandle.download_metadata',
'params': [h, outfile],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
pass # nothing to return
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def ids_to_handles(self, ids):
arg_hash = {'method': 'AbstractHandle.ids_to_handles',
'params': [ids],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def hids_to_handles(self, hids):
arg_hash = {'method': 'AbstractHandle.hids_to_handles',
'params': [hids],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def are_readable(self, arg_1):
arg_hash = {'method': 'AbstractHandle.are_readable',
'params': [arg_1],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def is_readable(self, id):
arg_hash = {'method': 'AbstractHandle.is_readable',
'params': [id],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def list_handles(self):
arg_hash = {'method': 'AbstractHandle.list_handles',
'params': [],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def delete_handles(self, l):
arg_hash = {'method': 'AbstractHandle.delete_handles',
'params': [l],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
pass # nothing to return
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def give(self, user, perm, h):
arg_hash = {'method': 'AbstractHandle.give',
'params': [user, perm, h],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
pass # nothing to return
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def ids_to_handles(self, ids):
arg_hash = {'method': 'AbstractHandle.ids_to_handles',
'params': [ids],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
|
{
"content_hash": "58f8b7bb01c7a59c5ddd240dc6949075",
"timestamp": "",
"source": "github",
"line_count": 705,
"max_line_length": 79,
"avg_line_length": 37.91205673758865,
"alnum_prop": 0.48443579766536965,
"repo_name": "aitatanit/metatlas",
"id": "0b9b3a7c1f32e2bb71c9802c5b1d4d22b87872a5",
"size": "27122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metatlas/kbase/handle_service.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "19353738"
},
{
"name": "Makefile",
"bytes": "1333"
},
{
"name": "Python",
"bytes": "184467"
},
{
"name": "R",
"bytes": "78"
},
{
"name": "Shell",
"bytes": "2327"
}
],
"symlink_target": ""
}
|
"""
Defines classes related to date ranges.
"""
import datetime
try:
import pytz
except ImportError:
pytz = None
from django.db import models
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from django.utils.translation import pgettext_lazy as pgettext
from django.core.exceptions import ValidationError
from django.utils.timezone import is_aware
from lino.utils import last_day_of_month
from lino.api import dd
from lino.core.model import Model
from lino.utils.format_date import fdl, fds
from lino.utils.ranges import isrange
from lino.core.utils import ParameterPanel
from lino.utils.quantities import Duration
def rangefmt(r):
return fds(r[0]) + '...' + fds(r[1])
class CombinedDateTime(dd.Model):
"""
Mixin for models which have at least one couple of date and time
fields which form a kind of editable timestamp field.
"""
class Meta:
abstract = True
def get_time_zone(self):
"""
The time zone for the date and time fields in this model.
Expected to always return an instance of
:class:`lino.modlib.about.choicelists.TimeZone`.
May get overridden to return the author's timezone.
"""
return settings.SITE.models.about.TimeZones.default
# return settings.TIME_ZONE
def set_datetime(self, name, value):
"""
Given a datetime `value`, update the two corresponding fields
`FOO_date` and `FOO_time` (where FOO is specified in `name` which
must be either "start" or "end").
"""
if settings.USE_TZ and is_aware(value):
# tz = pytz.timezone(self.get_time_zone())
# dd.logger.info("20151128 set_datetime(%r, %r)", value, tz)
# value = value.astimezone(tz)
# value = tz.localize(value)
value = value.astimezone(self.get_time_zone().tzinfo)
setattr(self, name + '_date', value.date())
t = value.time()
if not t:
t = None
setattr(self, name + '_time', t)
def get_datetime(self, name, altname=None):
"""
Return a `datetime` value from the two corresponding
date and time fields.
`name` can be 'start' or 'end'.
The optional `altname` can be used e.g. in a single-day calendar event
to support having `end_date` empty, meaning "same as `start_date`".
In that case you should ask ``get_datetime("end", "start")``.
"""
d = getattr(self, name + '_date')
t = getattr(self, name + '_time')
if not d and altname is not None:
d = getattr(self, altname + '_date')
if not t and altname is not None:
t = getattr(self, altname + '_time')
if not d:
return None
if t:
dt = datetime.datetime.combine(d, t)
else:
dt = datetime.datetime(d.year, d.month, d.day)
if settings.USE_TZ:
# tz = pytz.timezone(self.get_time_zone().tzinfo)
# dd.logger.info("20151128 get_datetime() %r %r", dt, tz)
# dt = tz.localize(dt)
dt = self.get_time_zone().tzinfo.localize(dt)
return dt
class Started(CombinedDateTime):
"""
Adds two fields :attr:`start_date` and :attr:`start_time`.
.. attribute:: start_date
.. attribute:: start_time
"""
class Meta:
abstract = True
start_date = models.DateField(
blank=True, null=True,
verbose_name=_("Start date")) # iCal:DTSTART
start_time = dd.TimeField(
blank=True, null=True,
verbose_name=_("Start time")) # iCal:DTSTART
#~ start = dd.FieldSet(_("Start"),'start_date start_time')
def save(self, *args, **kw):
"""
Fills default value "today" to start_date
"""
if not self.start_date:
self.start_date = settings.SITE.today()
super(Started, self).save(*args, **kw)
class Ended(CombinedDateTime):
"""
Mixin for models with two fields :attr:`end_date` and
:attr:`end_time`.
.. attribute:: end_date
.. attribute:: end_time
"""
class Meta:
abstract = True
end_date = models.DateField(
blank=True, null=True,
verbose_name=_("End Date"))
end_time = dd.TimeField(
blank=True, null=True,
verbose_name=_("End Time"))
def get_duration(self):
"""Return the duration in hours."""
if not self.start_date:
return None
if not self.start_time:
return None
if not self.end_time:
return None
ed = self.end_date or self.start_date
st = datetime.datetime.combine(self.start_date, self.start_time)
et = datetime.datetime.combine(ed, self.end_time)
if et < st:
return None # negative duration not supported
# print 20151127, repr(et), repr(st)
return Duration(et - st)
@dd.virtualfield(dd.QuantityField(_("Duration")))
def duration(self, ar):
return self.get_duration()
class DateRangeObservable(Model):
class Meta(object):
abstract = True
get_default_start_date = None
get_default_end_date = None
@classmethod
def setup_parameters(cls, fields):
fields.update(
start_date=models.DateField(
_("Period from"), blank=True, null=True,
default=cls.get_default_start_date,
help_text=_("Start date of observed period")))
fields.update(
end_date=models.DateField(
_("until"),
blank=True, null=True,
default=cls.get_default_end_date,
help_text=_("End date of observed period")))
super(DateRangeObservable, cls).setup_parameters(fields)
class DateRange(DateRangeObservable):
"""
Mixin for models which represent a period whose start and end are
date fields.
Designed for usage with
:class:`lino.modlib.system.PeriodEvents`.
"""
class Meta(object):
abstract = True
empty_period_text = ""
start_date = models.DateField(_("Start date"), blank=True, null=True)
end_date = models.DateField(_("End date"), blank=True, null=True)
def full_clean(self, *args, **kw):
if not isrange(self.start_date, self.end_date):
raise ValidationError(_("Date period ends before it started."))
super(DateRange, self).full_clean(*args, **kw)
def get_period(self):
return (self.start_date, self.end_date)
def is_past(self):
return (self.end_date and self.end_date <= dd.today())
def is_future(self):
return (self.start_date and self.start_date > settings.SITE.today())
def get_period_text(self):
if self.start_date and self.end_date:
if self.start_date == self.end_date:
# s = tostring(E.b(fdl(self.start_date)))
s = fdl(self.start_date)
return pgettext("date", "on %s") % s
else:
kw = dict()
kw.update(a=fdl(self.start_date))
kw.update(b=fdl(self.end_date))
return pgettext("date range", "between %(a)s and %(b)s") % kw
elif self.start_date:
s = fdl(self.start_date)
if self.is_future():
return pgettext("future date range", "from %s") % s
else:
return pgettext("date range", "from %s") % s
elif self.end_date:
s = fdl(self.end_date)
return pgettext("date range", "until %s") % s
return self.empty_period_text
DateRange.set_widget_options('start_date', width=10)
DateRange.set_widget_options('end_date', width=10)
class ObservedDateRange(ParameterPanel):
""":class:`lino.core.param_panel.ParameterPanel` with two fields
`start_date` and `end_date` which default to empty.
Note that you must define yourself a get_request_queryset method in order to
actually use these two parameter fields.
"""
get_default_start_date = None
get_default_end_date = None
def __init__(self,
verbose_name_start=_("Date from"),
verbose_name_end=_("until"), **kwargs):
kwargs.update(
start_date=models.DateField(
verbose_name_start, blank=True, null=True,
default=self.get_default_start_date,
help_text=_("Start of observed date range")),
end_date=models.DateField(
verbose_name_end,
blank=True, null=True,
default=self.get_default_end_date,
help_text=_("End of observed date range")),
)
super(ObservedDateRange, self).__init__(**kwargs)
@classmethod
def param_defaults(cls, ar, **kw):
# Theoretically this would cause default values to also be set when
# using Monthly or Yearly as the parameter panel of an action. Doesn't
# work in extjs because action parameters don't use their default
# values.
kw = super(ObservedDateRange, cls).param_defaults(ar, **kw)
kw.update(start_date=cls.get_default_start_date())
kw.update(end_date=cls.get_default_end_date())
return kw
class Yearly(ObservedDateRange):
"""An :class:`ObservedDateRange` for which `start_date` defaults to Jan
1st and `end_date` to Dec 31 of the current year.
"""
def get_default_start_date(self):
return dd.today().replace(month=1, day=1)
# D = datetime.date
# return D(D.today().year, 1, 1)
def get_default_end_date(self):
return dd.today().replace(month=12, day=31)
# D = datetime.date
# return D(D.today().year, 12, 31)
class Monthly(ObservedDateRange):
"""An :class:`ObservedDateRange` which defaults to the current month.
"""
def get_default_start_date(self):
return dd.today().replace(day=1)
def get_default_end_date(self):
return last_day_of_month(dd.today())
class Weekly(ObservedDateRange):
"""An :class:`ObservedDateRange` which defaults to the current week.
"""
def get_default_start_date(self):
d = dd.today()
return d - datetime.timedelta(days=d.weekday())
def get_default_end_date(self):
d = dd.today()
return d + datetime.timedelta(days=6-d.weekday())
class Today(ParameterPanel):
"""A :class:`ParameterPanel <lino.core.param_panel.ParameterPanel>`
with a field `today` that defaults to today.
"""
def __init__(self, verbose_name=_("Situation on"), **kw):
kw.update(
today=models.DateField(
verbose_name, blank=True, null=True,
default=dd.today,
help_text=_("Date of observation")),
)
super(Today, self).__init__(**kw)
|
{
"content_hash": "57b826312998d9ad07256699345f9a6a",
"timestamp": "",
"source": "github",
"line_count": 351,
"max_line_length": 80,
"avg_line_length": 31.113960113960115,
"alnum_prop": 0.5925281567622013,
"repo_name": "lino-framework/lino",
"id": "26fbbbce868c4807144aa0e35a28e2b6f2c14106",
"size": "11038",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lino/mixins/periods.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "704"
},
{
"name": "CSS",
"bytes": "1281825"
},
{
"name": "Emacs Lisp",
"bytes": "277895"
},
{
"name": "HTML",
"bytes": "928037"
},
{
"name": "Hack",
"bytes": "3416"
},
{
"name": "JavaScript",
"bytes": "1128493"
},
{
"name": "PHP",
"bytes": "53997"
},
{
"name": "Python",
"bytes": "2601694"
},
{
"name": "Shell",
"bytes": "4469"
},
{
"name": "TSQL",
"bytes": "2427"
}
],
"symlink_target": ""
}
|
from runner.koan import *
class AboutDeletingObjects(Koan):
def test_del_can_remove_slices(self):
lottery_nums = [4, 8, 15, 16, 23, 42]
del lottery_nums[1]
del lottery_nums[2:4]
self.assertEqual([4, 15, 42], lottery_nums)
def test_del_can_remove_entire_lists(self):
lottery_nums = [4, 8, 15, 16, 23, 42]
del lottery_nums
with self.assertRaises(NameError): win = lottery_nums
# ====================================================================
class ClosingSale:
def __init__(self):
self.hamsters = 7
self.zebras = 84
def cameras(self):
return 34
def toilet_brushes(self):
return 48
def jellies(self):
return 5
def test_del_can_remove_attributes(self):
crazy_discounts = self.ClosingSale()
del self.ClosingSale.toilet_brushes
del crazy_discounts.hamsters
try:
still_available = crazy_discounts.toilet_brushes()
except AttributeError as e:
err_msg1 = e.args[0]
try:
still_available = crazy_discounts.hamsters
except AttributeError as e:
err_msg2 = e.args[0]
self.assertRegexpMatches(err_msg1, 'no attribute')
self.assertRegexpMatches(err_msg2, 'no attribute')
# ====================================================================
class ClintEastwood:
def __init__(self):
self._name = None
def get_name(self):
try:
return self._name
except:
return "The man with no name"
def set_name(self, name):
self._name = name
def del_name(self):
del self._name
name = property(get_name, set_name, del_name, \
"Mr Eastwood's current alias")
def test_del_works_with_properties(self):
cowboy = self.ClintEastwood()
cowboy.name = 'Senor Ninguno'
self.assertEqual('Senor Ninguno', cowboy.name)
del cowboy.name
self.assertEqual("The man with no name", cowboy.name)
# ====================================================================
class Prisoner:
def __init__(self):
self._name = None
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@name.deleter
def name(self):
self._name = 'Number Six'
def test_another_way_to_make_a_deletable_property(self):
citizen = self.Prisoner()
citizen.name = "Patrick"
self.assertEqual('Patrick', citizen.name)
del citizen.name
self.assertEqual('Number Six', citizen.name)
# ====================================================================
class MoreOrganisedClosingSale(ClosingSale):
def __init__(self):
self.last_deletion = None
super().__init__()
def __delattr__(self, attr_name):
self.last_deletion = attr_name
def tests_del_can_be_overriden(self):
sale = self.MoreOrganisedClosingSale()
self.assertEqual(5, sale.jellies())
del sale.jellies
self.assertEqual('jellies', sale.last_deletion)
|
{
"content_hash": "39b903da9980c5e11677bb31d1b6b83d",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 74,
"avg_line_length": 27.537190082644628,
"alnum_prop": 0.5120048019207684,
"repo_name": "sourabhv/python-koans-solutions",
"id": "c92a39a5a8868c64af52ab9764e68e33b22cbfae",
"size": "3379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python3/koans/about_deleting_objects.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "107742"
}
],
"symlink_target": ""
}
|
from .compiler import COLLECT_CARTESIAN_PRODUCTS # noqa
from .compiler import FROM_LINTING # noqa
from .compiler import NO_LINTING # noqa
from .compiler import WARN_LINTING # noqa
from .expression import Alias # noqa
from .expression import alias # noqa
from .expression import all_ # noqa
from .expression import and_ # noqa
from .expression import any_ # noqa
from .expression import asc # noqa
from .expression import between # noqa
from .expression import bindparam # noqa
from .expression import case # noqa
from .expression import cast # noqa
from .expression import ClauseElement # noqa
from .expression import collate # noqa
from .expression import column # noqa
from .expression import ColumnCollection # noqa
from .expression import ColumnElement # noqa
from .expression import CompoundSelect # noqa
from .expression import cte # noqa
from .expression import Delete # noqa
from .expression import delete # noqa
from .expression import desc # noqa
from .expression import distinct # noqa
from .expression import except_ # noqa
from .expression import except_all # noqa
from .expression import exists # noqa
from .expression import extract # noqa
from .expression import false # noqa
from .expression import False_ # noqa
from .expression import FromClause # noqa
from .expression import func # noqa
from .expression import funcfilter # noqa
from .expression import Insert # noqa
from .expression import insert # noqa
from .expression import intersect # noqa
from .expression import intersect_all # noqa
from .expression import Join # noqa
from .expression import join # noqa
from .expression import label # noqa
from .expression import lateral # noqa
from .expression import literal # noqa
from .expression import literal_column # noqa
from .expression import modifier # noqa
from .expression import not_ # noqa
from .expression import null # noqa
from .expression import nullsfirst # noqa
from .expression import nullslast # noqa
from .expression import or_ # noqa
from .expression import outerjoin # noqa
from .expression import outparam # noqa
from .expression import over # noqa
from .expression import quoted_name # noqa
from .expression import Select # noqa
from .expression import select # noqa
from .expression import Selectable # noqa
from .expression import Subquery # noqa
from .expression import subquery # noqa
from .expression import table # noqa
from .expression import TableClause # noqa
from .expression import TableSample # noqa
from .expression import tablesample # noqa
from .expression import text # noqa
from .expression import true # noqa
from .expression import True_ # noqa
from .expression import tuple_ # noqa
from .expression import type_coerce # noqa
from .expression import union # noqa
from .expression import union_all # noqa
from .expression import Update # noqa
from .expression import update # noqa
from .expression import within_group # noqa
from .visitors import ClauseVisitor # noqa
def __go(lcls):
global __all__
from .. import util as _sa_util
import inspect as _inspect
__all__ = sorted(
name
for name, obj in lcls.items()
if not (name.startswith("_") or _inspect.ismodule(obj))
)
from .annotation import _prepare_annotations
from .annotation import Annotated # noqa
from .elements import AnnotatedColumnElement
from .elements import ClauseList # noqa
from .selectable import AnnotatedFromClause # noqa
from . import base
from . import coercions
from . import elements
from . import events # noqa
from . import selectable
from . import schema
from . import sqltypes
from . import type_api
base.coercions = elements.coercions = coercions
base.elements = elements
base.type_api = type_api
coercions.elements = elements
coercions.schema = schema
coercions.selectable = selectable
coercions.sqltypes = sqltypes
_prepare_annotations(ColumnElement, AnnotatedColumnElement)
_prepare_annotations(FromClause, AnnotatedFromClause)
_prepare_annotations(ClauseList, Annotated)
_sa_util.dependencies.resolve_all("sqlalchemy.sql")
from . import naming # noqa
__go(locals())
|
{
"content_hash": "8af593e0783591d457837e60336b3c20",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 63,
"avg_line_length": 35.082644628099175,
"alnum_prop": 0.7451118963486455,
"repo_name": "jam-py/jam-py",
"id": "488717041dadb38b06317d41d6893b4785deac75",
"size": "4481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jam/third_party/sqlalchemy/sql/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "37683"
},
{
"name": "HTML",
"bytes": "67579"
},
{
"name": "JavaScript",
"bytes": "2789171"
},
{
"name": "Python",
"bytes": "432048"
}
],
"symlink_target": ""
}
|
from twisted.internet import defer
import tests.unittest
import tests.utils
USER_ID = "@user:example.com"
class EventPushActionsStoreTestCase(tests.unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
hs = yield tests.utils.setup_test_homeserver()
self.store = hs.get_datastore()
@defer.inlineCallbacks
def test_get_unread_push_actions_for_user_in_range_for_http(self):
yield self.store.get_unread_push_actions_for_user_in_range_for_http(
USER_ID, 0, 1000, 20
)
@defer.inlineCallbacks
def test_get_unread_push_actions_for_user_in_range_for_email(self):
yield self.store.get_unread_push_actions_for_user_in_range_for_email(
USER_ID, 0, 1000, 20
)
|
{
"content_hash": "853c567e60c7e98755468c418082b163",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 77,
"avg_line_length": 29.076923076923077,
"alnum_prop": 0.6785714285714286,
"repo_name": "TribeMedia/synapse",
"id": "e9044afa2e195d94eb40919586e972202e0fca89",
"size": "1359",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/storage/test_event_push_actions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4376"
},
{
"name": "HTML",
"bytes": "9046"
},
{
"name": "JavaScript",
"bytes": "176441"
},
{
"name": "Perl",
"bytes": "31852"
},
{
"name": "Python",
"bytes": "2748398"
},
{
"name": "Shell",
"bytes": "7827"
}
],
"symlink_target": ""
}
|
from pg_requests.tokens import CommaValue
class Function(object):
"""Basic function implementation. Use this for aggregation functions"""
def __init__(self, name):
self.name = name
# NOTE: python 3 syntax only
# def __call__(self, *args, alias=None):
def __call__(self, *args, **kwargs):
value = CommaValue(args)
alias = kwargs.get('alias')
if alias:
fn_str = "{}({}) AS '{}'".format(self.name, value.eval(), alias)
else:
fn_str = "{}({})".format(self.name, value.eval())
return fn_str
def __repr__(self):
return "%s(name=%s(*args))" % (self.__class__.__name__, self.name)
class FunctionFactory(object):
"""Neat and natural function factory
Usage:
fn.COUNT('*') --> 'COUNT(*)'
fn.COUNT('*', alias='count_all') --> 'COUNT(*) AS count_all'
"""
_FUNCTIONS = ('COUNT', 'AVG', 'MIN', 'MAX', 'SUM')
def __init__(self, rtype=Function):
self.rtype = rtype
def __getattr__(self, name):
# NOTE: probably make sense to restrict function names
# if name.upper() not in self.FUNCTIONS:
# raise AttributeError("Wrong function name '%s'" % name)
return self.rtype(name=name)
fn = FunctionFactory(rtype=Function)
|
{
"content_hash": "c8667153a9a76b665171499848df30af",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 76,
"avg_line_length": 29.477272727272727,
"alnum_prop": 0.5643793369313801,
"repo_name": "prawn-cake/pg_query",
"id": "cc1af7aa33a2a32bdab31f714a0a4681ac61a356",
"size": "1321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pg_requests/functions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54770"
}
],
"symlink_target": ""
}
|
"""setuptools.command.egg_info
Create a distribution's .egg-info directory and contents"""
# This module should be kept compatible with Python 2.3
import os, re, sys
from setuptools import Command
from distutils.errors import *
from distutils import log
from setuptools.command.sdist import sdist
from setuptools.compat import basestring
from setuptools import svn_utils
from distutils.util import convert_path
from distutils.filelist import FileList as _FileList
from pkg_resources import parse_requirements, safe_name, parse_version, \
safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename
from setuptools.command.sdist import walk_revctrl
class egg_info(Command):
description = "create a distribution's .egg-info directory"
user_options = [
('egg-base=', 'e', "directory containing .egg-info directories"
" (default: top of the source tree)"),
('tag-svn-revision', 'r',
"Add subversion revision ID to version number"),
('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"),
('tag-build=', 'b', "Specify explicit tag to add to version number"),
('no-svn-revision', 'R',
"Don't add subversion revision ID [default]"),
('no-date', 'D', "Don't include date stamp [default]"),
]
boolean_options = ['tag-date', 'tag-svn-revision']
negative_opt = {'no-svn-revision': 'tag-svn-revision',
'no-date': 'tag-date'}
def initialize_options(self):
self.egg_name = None
self.egg_version = None
self.egg_base = None
self.egg_info = None
self.tag_build = None
self.tag_svn_revision = 0
self.tag_date = 0
self.broken_egg_info = False
self.vtags = None
def save_version_info(self, filename):
from setuptools.command.setopt import edit_config
edit_config(
filename,
{'egg_info':
{'tag_svn_revision':0, 'tag_date': 0, 'tag_build': self.tags()}
}
)
def finalize_options (self):
self.egg_name = safe_name(self.distribution.get_name())
self.vtags = self.tags()
self.egg_version = self.tagged_version()
try:
list(
parse_requirements('%s==%s' % (self.egg_name,self.egg_version))
)
except ValueError:
raise DistutilsOptionError(
"Invalid distribution name or version syntax: %s-%s" %
(self.egg_name,self.egg_version)
)
if self.egg_base is None:
dirs = self.distribution.package_dir
self.egg_base = (dirs or {}).get('',os.curdir)
self.ensure_dirname('egg_base')
self.egg_info = to_filename(self.egg_name)+'.egg-info'
if self.egg_base != os.curdir:
self.egg_info = os.path.join(self.egg_base, self.egg_info)
if '-' in self.egg_name: self.check_broken_egg_info()
# Set package version for the benefit of dumber commands
# (e.g. sdist, bdist_wininst, etc.)
#
self.distribution.metadata.version = self.egg_version
# If we bootstrapped around the lack of a PKG-INFO, as might be the
# case in a fresh checkout, make sure that any special tags get added
# to the version info
#
pd = self.distribution._patched_dist
if pd is not None and pd.key==self.egg_name.lower():
pd._version = self.egg_version
pd._parsed_version = parse_version(self.egg_version)
self.distribution._patched_dist = None
def write_or_delete_file(self, what, filename, data, force=False):
"""Write `data` to `filename` or delete if empty
If `data` is non-empty, this routine is the same as ``write_file()``.
If `data` is empty but not ``None``, this is the same as calling
``delete_file(filename)`. If `data` is ``None``, then this is a no-op
unless `filename` exists, in which case a warning is issued about the
orphaned file (if `force` is false), or deleted (if `force` is true).
"""
if data:
self.write_file(what, filename, data)
elif os.path.exists(filename):
if data is None and not force:
log.warn(
"%s not set in setup(), but %s exists", what, filename
)
return
else:
self.delete_file(filename)
def write_file(self, what, filename, data):
"""Write `data` to `filename` (if not a dry run) after announcing it
`what` is used in a log message to identify what is being written
to the file.
"""
log.info("writing %s to %s", what, filename)
if sys.version_info >= (3,):
data = data.encode("utf-8")
if not self.dry_run:
f = open(filename, 'wb')
f.write(data)
f.close()
def delete_file(self, filename):
"""Delete `filename` (if not a dry run) after announcing it"""
log.info("deleting %s", filename)
if not self.dry_run:
os.unlink(filename)
def tagged_version(self):
version = self.distribution.get_version()
# egg_info may be called more than once for a distribution,
# in which case the version string already contains all tags.
if self.vtags and version.endswith(self.vtags):
return safe_version(version)
return safe_version(version + self.vtags)
def run(self):
self.mkpath(self.egg_info)
installer = self.distribution.fetch_build_egg
for ep in iter_entry_points('egg_info.writers'):
writer = ep.load(installer=installer)
writer(self, ep.name, os.path.join(self.egg_info,ep.name))
# Get rid of native_libs.txt if it was put there by older bdist_egg
nl = os.path.join(self.egg_info, "native_libs.txt")
if os.path.exists(nl):
self.delete_file(nl)
self.find_sources()
def tags(self):
version = ''
if self.tag_build:
version+=self.tag_build
if self.tag_svn_revision and (
os.path.exists('.svn') or os.path.exists('PKG-INFO')
): version += '-r%s' % self.get_svn_revision()
if self.tag_date:
import time; version += time.strftime("-%Y%m%d")
return version
@staticmethod
def get_svn_revision():
return str(svn_utils.SvnInfo.load(os.curdir).get_revision())
def find_sources(self):
"""Generate SOURCES.txt manifest file"""
manifest_filename = os.path.join(self.egg_info,"SOURCES.txt")
mm = manifest_maker(self.distribution)
mm.manifest = manifest_filename
mm.run()
self.filelist = mm.filelist
def check_broken_egg_info(self):
bei = self.egg_name+'.egg-info'
if self.egg_base != os.curdir:
bei = os.path.join(self.egg_base, bei)
if os.path.exists(bei):
log.warn(
"-"*78+'\n'
"Note: Your current .egg-info directory has a '-' in its name;"
'\nthis will not work correctly with "setup.py develop".\n\n'
'Please rename %s to %s to correct this problem.\n'+'-'*78,
bei, self.egg_info
)
self.broken_egg_info = self.egg_info
self.egg_info = bei # make it work for now
class FileList(_FileList):
"""File list that accepts only existing, platform-independent paths"""
def append(self, item):
if item.endswith('\r'): # Fix older sdists built on Windows
item = item[:-1]
path = convert_path(item)
if sys.version_info >= (3,):
try:
if os.path.exists(path) or os.path.exists(path.encode('utf-8')):
self.files.append(path)
except UnicodeEncodeError:
# Accept UTF-8 filenames even if LANG=C
if os.path.exists(path.encode('utf-8')):
self.files.append(path)
else:
log.warn("'%s' not %s encodable -- skipping", path,
sys.getfilesystemencoding())
else:
if os.path.exists(path):
self.files.append(path)
class manifest_maker(sdist):
template = "MANIFEST.in"
def initialize_options (self):
self.use_defaults = 1
self.prune = 1
self.manifest_only = 1
self.force_manifest = 1
def finalize_options(self):
pass
def run(self):
self.filelist = FileList()
if not os.path.exists(self.manifest):
self.write_manifest() # it must exist so it'll get in the list
self.filelist.findall()
self.add_defaults()
if os.path.exists(self.template):
self.read_template()
self.prune_file_list()
self.filelist.sort()
self.filelist.remove_duplicates()
self.write_manifest()
def write_manifest (self):
"""Write the file list in 'self.filelist' (presumably as filled in
by 'add_defaults()' and 'read_template()') to the manifest file
named by 'self.manifest'.
"""
# The manifest must be UTF-8 encodable. See #303.
if sys.version_info >= (3,):
files = []
for file in self.filelist.files:
try:
file.encode("utf-8")
except UnicodeEncodeError:
log.warn("'%s' not UTF-8 encodable -- skipping" % file)
else:
files.append(file)
self.filelist.files = files
files = self.filelist.files
if os.sep!='/':
files = [f.replace(os.sep,'/') for f in files]
self.execute(write_file, (self.manifest, files),
"writing manifest file '%s'" % self.manifest)
def warn(self, msg): # suppress missing-file warnings from sdist
if not msg.startswith("standard file not found:"):
sdist.warn(self, msg)
def add_defaults(self):
sdist.add_defaults(self)
self.filelist.append(self.template)
self.filelist.append(self.manifest)
rcfiles = list(walk_revctrl())
if rcfiles:
self.filelist.extend(rcfiles)
elif os.path.exists(self.manifest):
self.read_manifest()
ei_cmd = self.get_finalized_command('egg_info')
self.filelist.include_pattern("*", prefix=ei_cmd.egg_info)
def prune_file_list (self):
build = self.get_finalized_command('build')
base_dir = self.distribution.get_fullname()
self.filelist.exclude_pattern(None, prefix=build.build_base)
self.filelist.exclude_pattern(None, prefix=base_dir)
sep = re.escape(os.sep)
self.filelist.exclude_pattern(sep+r'(RCS|CVS|\.svn)'+sep, is_regex=1)
def write_file (filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
contents = "\n".join(contents)
if sys.version_info >= (3,):
contents = contents.encode("utf-8")
f = open(filename, "wb") # always write POSIX-style manifest
f.write(contents)
f.close()
def write_pkg_info(cmd, basename, filename):
log.info("writing %s", filename)
if not cmd.dry_run:
metadata = cmd.distribution.metadata
metadata.version, oldver = cmd.egg_version, metadata.version
metadata.name, oldname = cmd.egg_name, metadata.name
try:
# write unescaped data to PKG-INFO, so older pkg_resources
# can still parse it
metadata.write_pkg_info(cmd.egg_info)
finally:
metadata.name, metadata.version = oldname, oldver
safe = getattr(cmd.distribution,'zip_safe',None)
from setuptools.command import bdist_egg
bdist_egg.write_safety_flag(cmd.egg_info, safe)
def warn_depends_obsolete(cmd, basename, filename):
if os.path.exists(filename):
log.warn(
"WARNING: 'depends.txt' is not used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
def write_requirements(cmd, basename, filename):
dist = cmd.distribution
data = ['\n'.join(yield_lines(dist.install_requires or ()))]
for extra,reqs in (dist.extras_require or {}).items():
data.append('\n\n[%s]\n%s' % (extra, '\n'.join(yield_lines(reqs))))
cmd.write_or_delete_file("requirements", filename, ''.join(data))
def write_toplevel_names(cmd, basename, filename):
pkgs = dict.fromkeys(
[k.split('.',1)[0]
for k in cmd.distribution.iter_distribution_names()
]
)
cmd.write_file("top-level names", filename, '\n'.join(pkgs)+'\n')
def overwrite_arg(cmd, basename, filename):
write_arg(cmd, basename, filename, True)
def write_arg(cmd, basename, filename, force=False):
argname = os.path.splitext(basename)[0]
value = getattr(cmd.distribution, argname, None)
if value is not None:
value = '\n'.join(value)+'\n'
cmd.write_or_delete_file(argname, filename, value, force)
def write_entries(cmd, basename, filename):
ep = cmd.distribution.entry_points
if isinstance(ep,basestring) or ep is None:
data = ep
elif ep is not None:
data = []
for section, contents in ep.items():
if not isinstance(contents,basestring):
contents = EntryPoint.parse_group(section, contents)
contents = '\n'.join(map(str,contents.values()))
data.append('[%s]\n%s\n\n' % (section,contents))
data = ''.join(data)
cmd.write_or_delete_file('entry points', filename, data, True)
def get_pkg_info_revision():
# See if we can get a -r### off of PKG-INFO, in case this is an sdist of
# a subversion revision
#
if os.path.exists('PKG-INFO'):
f = open('PKG-INFO','rU')
for line in f:
match = re.match(r"Version:.*-r(\d+)\s*$", line)
if match:
return int(match.group(1))
f.close()
return 0
#
|
{
"content_hash": "6ce1d5158bb66869f55ec4a64dc6effb",
"timestamp": "",
"source": "github",
"line_count": 458,
"max_line_length": 80,
"avg_line_length": 31.366812227074234,
"alnum_prop": 0.5821383822915216,
"repo_name": "bobeirasa/virtualenvs",
"id": "a0ba530590b5020bef9489c396a59f4d7d0754f7",
"size": "14366",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "pyzabbixhue/lib/python2.7/site-packages/setuptools/command/egg_info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3541254"
},
{
"name": "Shell",
"bytes": "7394"
}
],
"symlink_target": ""
}
|
"""
Axislines includes modified implementation of the Axes class. The
biggest difference is that the artists responsible to draw axis line,
ticks, ticklabel and axis labels are separated out from the mpl's Axis
class, which are much more than artists in the original
mpl. Originally, this change was motivated to support curvilinear
grid. Here are a few reasons that I came up with new axes class.
* "top" and "bottom" x-axis (or "left" and "right" y-axis) can have
different ticks (tick locations and labels). This is not possible
with the current mpl, although some twin axes trick can help.
* Curvilinear grid.
* angled ticks.
In the new axes class, xaxis and yaxis is set to not visible by
default, and new set of artist (AxisArtist) are defined to draw axis
line, ticks, ticklabels and axis label. Axes.axis attribute serves as
a dictionary of these artists, i.e., ax.axis["left"] is a AxisArtist
instance responsible to draw left y-axis. The default Axes.axis contains
"bottom", "left", "top" and "right".
AxisArtist can be considered as a container artist and
has following children artists which will draw ticks, labels, etc.
* line
* major_ticks, major_ticklabels
* minor_ticks, minor_ticklabels
* offsetText
* label
Note that these are separate artists from Axis class of the
original mpl, thus most of tick-related command in the original mpl
won't work, although some effort has made to work with. For example,
color and markerwidth of the ax.axis["bottom"].major_ticks will follow
those of Axes.xaxis unless explicitly specified.
In addition to AxisArtist, the Axes will have *gridlines* attribute,
which obviously draws grid lines. The gridlines needs to be separated
from the axis as some gridlines can never pass any axis.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib.axes as maxes
import matplotlib.artist as martist
import matplotlib.text as mtext
import matplotlib.font_manager as font_manager
from matplotlib.path import Path
from matplotlib.transforms import Affine2D, ScaledTranslation, \
IdentityTransform, TransformedPath, Bbox
from matplotlib.collections import LineCollection
from matplotlib import rcParams
from matplotlib.artist import allow_rasterization
import warnings
import numpy as np
import matplotlib.lines as mlines
from .axisline_style import AxislineStyle
from .axis_artist import AxisArtist, GridlinesCollection
class AxisArtistHelper(object):
"""
AxisArtistHelper should define
following method with given APIs. Note that the first axes argument
will be axes attribute of the caller artist.
# LINE (spinal line?)
def get_line(self, axes):
# path : Path
return path
def get_line_transform(self, axes):
# ...
# trans : transform
return trans
# LABEL
def get_label_pos(self, axes):
# x, y : position
return (x, y), trans
def get_label_offset_transform(self, \
axes,
pad_points, fontprops, renderer,
bboxes,
):
# va : vertical alignment
# ha : horizontal alignment
# a : angle
return trans, va, ha, a
# TICK
def get_tick_transform(self, axes):
return trans
def get_tick_iterators(self, axes):
# iter : iterable object that yields (c, angle, l) where
# c, angle, l is position, tick angle, and label
return iter_major, iter_minor
"""
class _Base(object):
"""
Base class for axis helper.
"""
def __init__(self):
"""
"""
self.delta1, self.delta2 = 0.00001, 0.00001
def update_lim(self, axes):
pass
class Fixed(_Base):
"""
Helper class for a fixed (in the axes coordinate) axis.
"""
_default_passthru_pt = dict(left=(0, 0),
right=(1, 0),
bottom=(0, 0),
top=(0, 1))
def __init__(self,
loc, nth_coord=None,
):
"""
nth_coord = along which coordinate value varies
in 2d, nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
self._loc = loc
if loc not in ["left", "right", "bottom", "top"]:
raise ValueError("%s" % loc)
if nth_coord is None:
if loc in ["left", "right"]:
nth_coord = 1
elif loc in ["bottom", "top"]:
nth_coord = 0
self.nth_coord = nth_coord
super(AxisArtistHelper.Fixed, self).__init__()
self.passthru_pt = self._default_passthru_pt[loc]
_verts = np.array([[0., 0.],
[1., 1.]])
fixed_coord = 1-nth_coord
_verts[:,fixed_coord] = self.passthru_pt[fixed_coord]
# axis line in transAxes
self._path = Path(_verts)
def get_nth_coord(self):
return self.nth_coord
# LINE
def get_line(self, axes):
return self._path
def get_line_transform(self, axes):
return axes.transAxes
# LABEL
def get_axislabel_transform(self, axes):
return axes.transAxes
def get_axislabel_pos_angle(self, axes):
"""
label reference position in transAxes.
get_label_transform() returns a transform of (transAxes+offset)
"""
loc = self._loc
pos, angle_tangent = dict(left=((0., 0.5), 90),
right=((1., 0.5), 90),
bottom=((0.5, 0.), 0),
top=((0.5, 1.), 0))[loc]
return pos, angle_tangent
# TICK
def get_tick_transform(self, axes):
trans_tick = [axes.get_xaxis_transform(),
axes.get_yaxis_transform()][self.nth_coord]
return trans_tick
class Floating(_Base):
def __init__(self, nth_coord,
value):
self.nth_coord = nth_coord
self._value = value
super(AxisArtistHelper.Floating,
self).__init__()
def get_nth_coord(self):
return self.nth_coord
def get_line(self, axes):
raise RuntimeError("get_line method should be defined by the derived class")
class AxisArtistHelperRectlinear(object):
class Fixed(AxisArtistHelper.Fixed):
def __init__(self,
axes, loc, nth_coord=None,
):
"""
nth_coord = along which coordinate value varies
in 2d, nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(AxisArtistHelperRectlinear.Fixed, self).__init__( \
loc, nth_coord)
self.axis = [axes.xaxis, axes.yaxis][self.nth_coord]
# TICK
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
loc = self._loc
if loc in ["bottom", "top"]:
angle_normal, angle_tangent = 90, 0
else:
angle_normal, angle_tangent = 0, 90
major = self.axis.major
majorLocs = major.locator()
major.formatter.set_locs(majorLocs)
majorLabels = [major.formatter(val, i) for i, val in enumerate(majorLocs)]
minor = self.axis.minor
minorLocs = minor.locator()
minor.formatter.set_locs(minorLocs)
minorLabels = [minor.formatter(val, i) for i, val in enumerate(minorLocs)]
trans_tick = self.get_tick_transform(axes)
tr2ax = trans_tick + axes.transAxes.inverted()
def _f(locs, labels):
for x, l in zip(locs, labels):
c = list(self.passthru_pt) # copy
c[self.nth_coord] = x
# check if the tick point is inside axes
c2 = tr2ax.transform_point(c)
#delta=0.00001
if 0. -self.delta1<= c2[self.nth_coord] <= 1.+self.delta2:
yield c, angle_normal, angle_tangent, l
return _f(majorLocs, majorLabels), _f(minorLocs, minorLabels)
class Floating(AxisArtistHelper.Floating):
def __init__(self, axes, nth_coord,
passingthrough_point, axis_direction="bottom"):
super(AxisArtistHelperRectlinear.Floating, self).__init__( \
nth_coord, passingthrough_point)
self._axis_direction = axis_direction
self.axis = [axes.xaxis, axes.yaxis][self.nth_coord]
def get_line(self, axes):
_verts = np.array([[0., 0.],
[1., 1.]])
fixed_coord = 1-self.nth_coord
trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
p = trans_passingthrough_point.transform_point([self._value,
self._value])
_verts[:,fixed_coord] = p[fixed_coord]
return Path(_verts)
def get_line_transform(self, axes):
return axes.transAxes
def get_axislabel_transform(self, axes):
return axes.transAxes
def get_axislabel_pos_angle(self, axes):
"""
label reference position in transAxes.
get_label_transform() returns a transform of (transAxes+offset)
"""
loc = self._axis_direction
#angle = dict(left=0,
# right=0,
# bottom=.5*np.pi,
# top=.5*np.pi)[loc]
if self.nth_coord == 0:
angle = 0
else:
angle = 90
_verts = [0.5, 0.5]
fixed_coord = 1-self.nth_coord
trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
p = trans_passingthrough_point.transform_point([self._value,
self._value])
_verts[fixed_coord] = p[fixed_coord]
if not (0. <= _verts[fixed_coord] <= 1.):
return None, None
else:
return _verts, angle
def get_tick_transform(self, axes):
return axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
loc = self._axis_direction
if loc in ["bottom", "top"]:
angle_normal, angle_tangent = 90, 0
else:
angle_normal, angle_tangent = 0, 90
if self.nth_coord == 0:
angle_normal, angle_tangent = 90, 0
else:
angle_normal, angle_tangent = 0, 90
#angle = 90 - 90 * self.nth_coord
major = self.axis.major
majorLocs = major.locator()
major.formatter.set_locs(majorLocs)
majorLabels = [major.formatter(val, i) for i, val in enumerate(majorLocs)]
minor = self.axis.minor
minorLocs = minor.locator()
minor.formatter.set_locs(minorLocs)
minorLabels = [minor.formatter(val, i) for i, val in enumerate(minorLocs)]
tr2ax = axes.transData + axes.transAxes.inverted()
def _f(locs, labels):
for x, l in zip(locs, labels):
c = [self._value, self._value]
c[self.nth_coord] = x
c1, c2 = tr2ax.transform_point(c)
if 0. <= c1 <= 1. and 0. <= c2 <= 1.:
if 0. - self.delta1 <= [c1, c2][self.nth_coord] <= 1. + self.delta2:
yield c, angle_normal, angle_tangent, l
return _f(majorLocs, majorLabels), _f(minorLocs, minorLabels)
class GridHelperBase(object):
def __init__(self):
self._force_update = True
self._old_limits = None
super(GridHelperBase, self).__init__()
def update_lim(self, axes):
x1, x2 = axes.get_xlim()
y1, y2 = axes.get_ylim()
if self._force_update or self._old_limits != (x1, x2, y1, y2):
self._update(x1, x2, y1, y2)
self._force_update = False
self._old_limits = (x1, x2, y1, y2)
def _update(self, x1, x2, y1, y2):
pass
def invalidate(self):
self._force_update = True
def valid(self):
return not self._force_update
def get_gridlines(self, which, axis):
"""
Return list of grid lines as a list of paths (list of points).
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
return []
def new_gridlines(self, ax):
"""
Create and return a new GridlineCollection instance.
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
gridlines = GridlinesCollection(None, transform=ax.transData,
colors=rcParams['grid.color'],
linestyles=rcParams['grid.linestyle'],
linewidths=rcParams['grid.linewidth'])
ax._set_artist_props(gridlines)
gridlines.set_grid_helper(self)
ax.axes._set_artist_props(gridlines)
# gridlines.set_clip_path(self.axes.patch)
# set_clip_path need to be deferred after Axes.cla is completed.
# It is done inside the cla.
return gridlines
class GridHelperRectlinear(GridHelperBase):
def __init__(self, axes):
super(GridHelperRectlinear, self).__init__()
self.axes = axes
def new_fixed_axis(self, loc,
nth_coord=None,
axis_direction=None,
offset=None,
axes=None,
):
if axes is None:
warnings.warn("'new_fixed_axis' explicitly requires the axes keyword.")
axes = self.axes
_helper = AxisArtistHelperRectlinear.Fixed(axes, loc, nth_coord)
if axis_direction is None:
axis_direction = loc
axisline = AxisArtist(axes, _helper, offset=offset,
axis_direction=axis_direction,
)
return axisline
def new_floating_axis(self, nth_coord, value,
axis_direction="bottom",
axes=None,
):
if axes is None:
warnings.warn("'new_floating_axis' explicitly requires the axes keyword.")
axes = self.axes
passthrough_point = (value, value)
transform = axes.transData
_helper = AxisArtistHelperRectlinear.Floating( \
axes, nth_coord, value, axis_direction)
axisline = AxisArtist(axes, _helper)
axisline.line.set_clip_on(True)
axisline.line.set_clip_box(axisline.axes.bbox)
return axisline
def get_gridlines(self, which="major", axis="both"):
"""
return list of gridline coordinates in data coordinates.
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
gridlines = []
if axis in ["both", "x"]:
locs = []
y1, y2 = self.axes.get_ylim()
#if self.axes.xaxis._gridOnMajor:
if which in ["both", "major"]:
locs.extend(self.axes.xaxis.major.locator())
#if self.axes.xaxis._gridOnMinor:
if which in ["both", "minor"]:
locs.extend(self.axes.xaxis.minor.locator())
for x in locs:
gridlines.append([[x, x], [y1, y2]])
if axis in ["both", "y"]:
x1, x2 = self.axes.get_xlim()
locs = []
if self.axes.yaxis._gridOnMajor:
#if which in ["both", "major"]:
locs.extend(self.axes.yaxis.major.locator())
if self.axes.yaxis._gridOnMinor:
#if which in ["both", "minor"]:
locs.extend(self.axes.yaxis.minor.locator())
for y in locs:
gridlines.append([[x1, x2], [y, y]])
return gridlines
class SimpleChainedObjects(object):
def __init__(self, objects):
self._objects = objects
def __getattr__(self, k):
_a = SimpleChainedObjects([getattr(a, k) for a in self._objects])
return _a
def __call__(self, *kl, **kwargs):
for m in self._objects:
m(*kl, **kwargs)
class Axes(maxes.Axes):
class AxisDict(dict):
def __init__(self, axes):
self.axes = axes
super(Axes.AxisDict, self).__init__()
def __getitem__(self, k):
if isinstance(k, tuple):
r = SimpleChainedObjects([dict.__getitem__(self, k1) for k1 in k])
return r
elif isinstance(k, slice):
if k.start == None and k.stop == None and k.step == None:
r = SimpleChainedObjects(list(six.itervalues(self)))
return r
else:
raise ValueError("Unsupported slice")
else:
return dict.__getitem__(self, k)
def __call__(self, *v, **kwargs):
return maxes.Axes.axis(self.axes, *v, **kwargs)
def __init__(self, *kl, **kw):
helper = kw.pop("grid_helper", None)
self._axisline_on = True
if helper:
self._grid_helper = helper
else:
self._grid_helper = GridHelperRectlinear(self)
super(Axes, self).__init__(*kl, **kw)
self.toggle_axisline(True)
def toggle_axisline(self, b=None):
if b is None:
b = not self._axisline_on
if b:
self._axisline_on = True
for s in self.spines.values():
s.set_visible(False)
self.xaxis.set_visible(False)
self.yaxis.set_visible(False)
else:
self._axisline_on = False
for s in self.spines.values():
s.set_visible(True)
self.xaxis.set_visible(True)
self.yaxis.set_visible(True)
def _init_axis(self):
super(Axes, self)._init_axis()
def _init_axis_artists(self, axes=None):
if axes is None:
axes = self
self._axislines = self.AxisDict(self)
new_fixed_axis = self.get_grid_helper().new_fixed_axis
for loc in ["bottom", "top", "left", "right"]:
self._axislines[loc] = new_fixed_axis(loc=loc, axes=axes,
axis_direction=loc)
for axisline in [self._axislines["top"], self._axislines["right"]]:
axisline.label.set_visible(False)
axisline.major_ticklabels.set_visible(False)
axisline.minor_ticklabels.set_visible(False)
def _get_axislines(self):
return self._axislines
axis = property(_get_axislines)
def new_gridlines(self, grid_helper=None):
"""
Create and return a new GridlineCollection instance.
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
if grid_helper is None:
grid_helper = self.get_grid_helper()
gridlines = grid_helper.new_gridlines(self)
return gridlines
def _init_gridlines(self, grid_helper=None):
# It is done inside the cla.
gridlines = self.new_gridlines(grid_helper)
self.gridlines = gridlines
def cla(self):
# gridlines need to b created before cla() since cla calls grid()
self._init_gridlines()
super(Axes, self).cla()
# the clip_path should be set after Axes.cla() since that's
# when a patch is created.
self.gridlines.set_clip_path(self.axes.patch)
self._init_axis_artists()
def get_grid_helper(self):
return self._grid_helper
def grid(self, b=None, which='major', axis="both", **kwargs):
"""
Toggle the gridlines, and optionally set the properties of the lines.
"""
# their are some discrepancy between the behavior of grid in
# axes_grid and the original mpl's grid, because axes_grid
# explicitly set the visibility of the gridlines.
super(Axes, self).grid(b, which=which, axis=axis, **kwargs)
if not self._axisline_on:
return
if b is None:
if self.axes.xaxis._gridOnMinor or self.axes.xaxis._gridOnMajor or \
self.axes.yaxis._gridOnMinor or self.axes.yaxis._gridOnMajor:
b=True
else:
b=False
self.gridlines.set_which(which)
self.gridlines.set_axis(axis)
self.gridlines.set_visible(b)
if len(kwargs):
martist.setp(self.gridlines, **kwargs)
def get_children(self):
if self._axisline_on:
children = list(six.itervalues(self._axislines)) + [self.gridlines]
else:
children = []
children.extend(super(Axes, self).get_children())
return children
def invalidate_grid_helper(self):
self._grid_helper.invalidate()
def new_fixed_axis(self, loc, offset=None):
gh = self.get_grid_helper()
axis = gh.new_fixed_axis(loc,
nth_coord=None,
axis_direction=None,
offset=offset,
axes=self,
)
return axis
def new_floating_axis(self, nth_coord, value,
axis_direction="bottom",
):
gh = self.get_grid_helper()
axis = gh.new_floating_axis(nth_coord, value,
axis_direction=axis_direction,
axes=self)
return axis
def draw(self, renderer, inframe=False):
if not self._axisline_on:
super(Axes, self).draw(renderer, inframe)
return
orig_artists = self.artists
self.artists = self.artists + list(self._axislines.values()) + [self.gridlines]
super(Axes, self).draw(renderer, inframe)
self.artists = orig_artists
def get_tightbbox(self, renderer, call_axes_locator=True):
bb0 = super(Axes, self).get_tightbbox(renderer, call_axes_locator)
if not self._axisline_on:
return bb0
bb = [bb0]
for axisline in list(six.itervalues(self._axislines)):
if not axisline.get_visible():
continue
bb.append(axisline.get_tightbbox(renderer))
# if axisline.label.get_visible():
# bb.append(axisline.label.get_window_extent(renderer))
# if axisline.major_ticklabels.get_visible():
# bb.extend(axisline.major_ticklabels.get_window_extents(renderer))
# if axisline.minor_ticklabels.get_visible():
# bb.extend(axisline.minor_ticklabels.get_window_extents(renderer))
# if axisline.major_ticklabels.get_visible() or \
# axisline.minor_ticklabels.get_visible():
# bb.append(axisline.offsetText.get_window_extent(renderer))
#bb.extend([c.get_window_extent(renderer) for c in artists \
# if c.get_visible()])
_bbox = Bbox.union([b for b in bb if b and (b.width!=0 or b.height!=0)])
return _bbox
Subplot = maxes.subplot_class_factory(Axes)
class AxesZero(Axes):
def __init__(self, *kl, **kw):
super(AxesZero, self).__init__(*kl, **kw)
def _init_axis_artists(self):
super(AxesZero, self)._init_axis_artists()
new_floating_axis = self._grid_helper.new_floating_axis
xaxis_zero = new_floating_axis(nth_coord=0,
value=0.,
axis_direction="bottom",
axes=self)
xaxis_zero.line.set_clip_path(self.patch)
xaxis_zero.set_visible(False)
self._axislines["xzero"] = xaxis_zero
yaxis_zero = new_floating_axis(nth_coord=1,
value=0.,
axis_direction="left",
axes=self)
yaxis_zero.line.set_clip_path(self.patch)
yaxis_zero.set_visible(False)
self._axislines["yzero"] = yaxis_zero
SubplotZero = maxes.subplot_class_factory(AxesZero)
if 0:
#if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(1, (4,3))
ax = SubplotZero(fig, 1, 1, 1)
fig.add_subplot(ax)
ax.axis["xzero"].set_visible(True)
ax.axis["xzero"].label.set_text("Axis Zero")
for n in ["top", "right"]:
ax.axis[n].set_visible(False)
xx = np.arange(0, 2*np.pi, 0.01)
ax.plot(xx, np.sin(xx))
ax.set_ylabel("Test")
plt.draw()
plt.show()
if __name__ == "__main__":
#if 1:
import matplotlib.pyplot as plt
fig = plt.figure(1, (4,3))
ax = Subplot(fig, 1, 1, 1)
fig.add_subplot(ax)
xx = np.arange(0, 2*np.pi, 0.01)
ax.plot(xx, np.sin(xx))
ax.set_ylabel("Test")
ax.axis["top"].major_ticks.set_tick_out(True) #set_tick_direction("out")
ax.axis["bottom"].major_ticks.set_tick_out(True) #set_tick_direction("out")
#ax.axis["bottom"].set_tick_direction("in")
ax.axis["bottom"].set_label("Tk0")
plt.draw()
plt.show()
|
{
"content_hash": "a36e08ba6486f67d0841374acd5e499b",
"timestamp": "",
"source": "github",
"line_count": 895,
"max_line_length": 92,
"avg_line_length": 29.214525139664804,
"alnum_prop": 0.5340956897540827,
"repo_name": "andyraib/data-storage",
"id": "5b9ff70cd1bff27d47e241886c8c0e5063b9a7c1",
"size": "26147",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "python_scripts/env/lib/python3.6/site-packages/mpl_toolkits/axisartist/axislines.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12403"
}
],
"symlink_target": ""
}
|
import sys
import socket
import serial
import serial.threaded
import time
import datetime
class SerialToNet(serial.threaded.Protocol):
"""serial->socket"""
def __init__(self):
self.socket = None
def __call__(self):
return self
# callback function
# this is called by the ReaderThread on receiving data from
# the serial device
def data_received(self, data):
if self.socket is not None:
# first send data over network
self.socket.sendall(data)
# optionally show it for debug
if (self.debug):
print("r"+datetime.datetime.now().isoformat()+":"+data)
if __name__ == '__main__': # noqa
import argparse
parser = argparse.ArgumentParser(
description='Simple Serial to Network (TCP/IP) redirector.',
epilog="""\
NOTE: no security measures are implemented. Anyone can remotely connect
to this service over the network.
Only one connection at once is supported. When the connection is terminated
it waits for the next connect.
""")
parser.add_argument(
'SERIALPORT',
help="serial port name")
parser.add_argument(
'BAUDRATE',
type=int,
nargs='?',
help='set baud rate, default: %(default)s',
default=9600)
parser.add_argument(
'-d', '--debug',
action='store_true',
help='debug',
default=False)
parser.add_argument(
'-q', '--quiet',
action='store_true',
help='suppress non error messages',
default=False)
parser.add_argument(
'--develop',
action='store_true',
help='Development mode, prints Python internals on errors',
default=False)
group = parser.add_argument_group('serial port')
group.add_argument(
"--parity",
choices=['N', 'E', 'O', 'S', 'M'],
type=lambda c: c.upper(),
help="set parity, one of {N E O S M}, default: N",
default='N')
group.add_argument(
'--rtscts',
action='store_true',
help='enable RTS/CTS flow control (default off)',
default=False)
group.add_argument(
'--xonxoff',
action='store_true',
help='enable software flow control (default off)',
default=False)
group.add_argument(
'--rts',
type=int,
help='set initial RTS line state (possible values: 0, 1)',
default=None)
group.add_argument(
'--dtr',
type=int,
help='set initial DTR line state (possible values: 0, 1)',
default=None)
group = parser.add_argument_group('network settings')
exclusive_group = group.add_mutually_exclusive_group()
exclusive_group.add_argument(
'-P', '--localport',
type=int,
help='local TCP port',
default=7777)
exclusive_group.add_argument(
'-c', '--client',
metavar='HOST:PORT',
help='make the connection as a client, instead of running a server',
default=False)
args = parser.parse_args()
# connect to serial port
ser = serial.serial_for_url(args.SERIALPORT, do_not_open=True)
ser.baudrate = args.BAUDRATE
ser.parity = args.parity
ser.rtscts = args.rtscts
ser.xonxoff = args.xonxoff
if args.rts is not None:
ser.rts = args.rts
if args.dtr is not None:
ser.dtr = args.dtr
if not args.quiet:
sys.stderr.write(
'--- TCP/IP to Serial redirect on {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits} ---\n'
'--- type Ctrl-C / BREAK to quit\n'.format(p=ser))
try:
ser.open()
except serial.SerialException as e:
sys.stderr.write('Could not open serial port {}: {}\n'.format(ser.name, e))
sys.exit(1)
ser_to_net = SerialToNet()
ser_to_net.debug=args.debug
serial_worker = serial.threaded.ReaderThread(ser, ser_to_net)
serial_worker.start()
if not args.client:
# open the socket as a streaming socket
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srv.bind(('', args.localport))
srv.listen(1)
try:
intentional_exit = False
while True:
if args.client:
host, port = args.client.split(':')
sys.stderr.write("Opening connection to {}:{}...\n".format(host, port))
client_socket = socket.socket()
try:
client_socket.connect((host, int(port)))
except socket.error as msg:
sys.stderr.write('WARNING: {}\n'.format(msg))
time.sleep(5) # intentional delay on reconnection as client
continue
sys.stderr.write('Connected\n')
client_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
#~ client_socket.settimeout(5)
else:
sys.stderr.write('Waiting for connection on {}...\n'.format(args.localport))
client_socket, addr = srv.accept()
sys.stderr.write('Connected by {}\n'.format(addr))
# More quickly detect bad clients who quit without closing the
# connection: After 1 second of idle, start sending TCP keep-alive
# packets every 1 second. If 3 consecutive keep-alive packets
# fail, assume the client is gone and close the connection.
try:
client_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 1)
client_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 1)
client_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 3)
client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
except AttributeError:
pass # XXX not available on windows
client_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
try:
ser_to_net.socket = client_socket
# enter network <-> serial loop
while True:
try:
# read data from serial
data = client_socket.recv(2048)
if not data:
break
ser.write(data) # get a bunch of bytes and send them
if (args.debug):
print("s"+datetime.datetime.now().isoformat()+":"+data)
except socket.error as msg:
if args.develop:
raise
sys.stderr.write('ERROR: {}\n'.format(msg))
# probably got disconnected
break
except KeyboardInterrupt:
intentional_exit = True
raise
except socket.error as msg:
if args.develop:
raise
sys.stderr.write('ERROR: {}\n'.format(msg))
finally:
ser_to_net.socket = None
sys.stderr.write('Disconnected\n')
client_socket.close()
if args.client and not intentional_exit:
time.sleep(5) # intentional delay on reconnection as client
except KeyboardInterrupt:
pass
sys.stderr.write('\n--- exit ---\n')
serial_worker.stop()
|
{
"content_hash": "d05de8f0fe8ba1db3bb7050ccfda5d18",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 112,
"avg_line_length": 33.982142857142854,
"alnum_prop": 0.5478192327903311,
"repo_name": "BITPlan/can4eve",
"id": "5e18f067a71f2d0ff9f93ca48617f7a8be5f9811",
"size": "7807",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/tcp_serial_redirect.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2687"
},
{
"name": "Java",
"bytes": "725448"
},
{
"name": "Python",
"bytes": "9863"
},
{
"name": "Shell",
"bytes": "5369"
}
],
"symlink_target": ""
}
|
import time
import traceback
import commands
import threading
import json
import pdb
from datetime import datetime
from collections import defaultdict
# Assumes that there are no empty dependencies
# in the graph. E.g. Foo -> []
def dfs(graph, visit):
nodes = graph.keys()
edge_nodes = set()
for n in nodes:
edge_nodes|=set(graph[n])
sinks = list(edge_nodes - set(nodes))
sources = list(set(nodes) - edge_nodes)
nodes.extend(sinks)
visited = set(sources)
stack = sources
while stack:
current = stack.pop()
visit(current)
for node in graph[current]:
if node not in visited:
stack.append(node)
visited.add(node)
return sources
# Topological sort
# Notes:
# - Uses a stack instead of recursion
# - Forfeits optimization involving tracking currently visited nodes
def toposort(g, steps=None):
# Get set of all nodes, including those without outgoing edges
keys = set(g.keys())
values = set({})
for v in g.values():
values=values | set(v)
all_nodes=list(keys|values)
if (not steps):
steps = all_nodes
# Final order
order = []
# DFS stack, not using recursion
stack = []
# Unmarked set
unmarked = all_nodes
# visiting = [] - skip, don't expect 1000s of nodes, |E|/|V| is small
while unmarked:
stack.insert(0,unmarked[0]) # push first unmarked
while (stack):
n = stack[0]
add = True
try:
for m in g[n]:
if (m in unmarked):
if (m not in stack):
add = False
stack.insert(0,m)
else:
# Should not happen, if so there's a loop
print 'Loop at %s'%m
except KeyError:
pass
if (add):
if (n in steps):
order.append(n)
item = stack.pop(0)
unmarked.remove(item)
noorder = list(set(steps) - set(order))
return order + noorder
def main():
graph_file=open('xos.deps').read()
g = json.loads(graph_file)
print toposort(g)
if (__name__=='__main__'):
main()
#print toposort({'a':'b','b':'c','c':'d','d':'c'},['d','c','b','a'])
|
{
"content_hash": "4c0f569c0900935afbbd5f4caea756b2",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 70,
"avg_line_length": 20.24742268041237,
"alnum_prop": 0.6430753564154786,
"repo_name": "xmaruto/mcord",
"id": "c0ec779912d5a21476afcc4e322cf1449a0432f9",
"size": "1987",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "xos/synchronizers/ec2/toposort.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "847306"
},
{
"name": "HTML",
"bytes": "732024"
},
{
"name": "JavaScript",
"bytes": "5293940"
},
{
"name": "Makefile",
"bytes": "13901"
},
{
"name": "Python",
"bytes": "1937152"
},
{
"name": "Shell",
"bytes": "49250"
}
],
"symlink_target": ""
}
|
import webapp2
import datetime
import os
from google.appengine.ext.webapp import template
import alarm
import gae_util
class IndexPage(webapp2.RequestHandler):
def get(self):
utcnow = datetime.datetime.utcnow()
jstTime = gae_util.Utility.convert_jst_time(utcnow)
if jstTime.hour >= 7:
jstTime = jstTime + datetime.timedelta(days=1)
self.response.out.write(template.render('html/index.html',{'selectedYear': jstTime.year,
'selectedMonth': jstTime.month,
'selectedDate': jstTime.day,
'selectedHourOfDay': 7,
'selectedMinute': 0,
'years': self.get_years(),
'months': self.get_months(),
'dates': self.get_dates(),
'hours': self.get_hours(),
'minutes': self.get_minutes(),
}))
def post(self):
email = self.request.get("email")
nickname = self.request.get("nickname")
year = self.request.get("year")
month = self.request.get("month")
date = self.request.get("date")
hourOfDay = self.request.get("hourOfDay")
minute = self.request.get("minute")
# 入力チェック
hasError = False
emailError = False
nicknameError = False
wakeupDateError = False
if email is None or email== '':
emailError = True
hasError = True
if nickname is None or nickname == '':
nicknameError = True
hasError = True
if not self.is_datetime(year, month, date):
wakeupDateError = True
hasError = True
if hasError:
# テンプレートの ifequal で正しく出力するには型を揃える必要があるため、int型へと変換しておく
self.response.out.write(template.render('html/index.html',{'selectedYear': int(year),
'selectedMonth': int(month),
'selectedDate': int(date),
'selectedHourOfDay': int(hourOfDay),
'selectedMinute': int(minute),
'years': self.get_years(),
'months': self.get_months(),
'dates': self.get_dates(),
'hours': self.get_hours(),
'minutes': self.get_minutes(),
'email': email,
'emailError': emailError,
'nickname': nickname,
'nicknameError': nicknameError,
'wakeupDateError': wakeupDateError,
}))
return
# データストアへの登録
wakeupDate = datetime.datetime(int(year), int(month), int(date), int(hourOfDay), int(minute))
#データストアの時刻は、タイムゾーンを考慮しないUTC時刻のため、タイムゾーンを加味して設定する
jstWakeupDate = gae_util.Utility.convert_jst_time(wakeupDate)
datastore = alarm.Alarm(key_name = email,
email = email,
nickname = nickname,
wakeupDate = jstWakeupDate,
count = 0)
datastore.put()
self.response.out.write(template.render('html/index_post.html',{'email': email,
'nickname': nickname,
'wakeupDate': wakeupDate,
}))
def get_years(self):
#年は2年分
utcnow = datetime.datetime.utcnow()
years = []
years.append(utcnow.year)
years.append(utcnow.year + 1)
return years
def get_months(self):
months = []
for month in range(1, 13):
months.append(month)
return months
def get_dates(self):
dates = []
for date in range(1, 32):
dates.append(date)
return dates
def get_hours(self):
hours = []
for hour in range(0, 24):
hours.append(hour)
return hours
def get_minutes(self):
minutes = []
for minute in range(0, 59, 5):
minutes.append(minute)
return minutes
def is_datetime(self, year, month, date):
try:
datetime.date(int(year), int(month), int(date))
return True
except ValueError:
return False
debug = os.environ.get('SERVER_SOFTWARE', '').startswith('Dev')
app = webapp2.WSGIApplication([('/index.html', IndexPage),
('/', IndexPage)], debug=debug)
|
{
"content_hash": "8e1495aa86557a3577def78ce9cba1e9",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 107,
"avg_line_length": 39.47530864197531,
"alnum_prop": 0.3605942142298671,
"repo_name": "thinkAmi/9784798123028_GAE",
"id": "13ff9878364fb092d95bf5c7b9269318ae8c78ad",
"size": "6626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chap4/index.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "13215"
},
{
"name": "Python",
"bytes": "525558"
}
],
"symlink_target": ""
}
|
import os
import sys
import shutil
import os.path as osp
import json
import time
import datetime
import tempfile
LOG_OUTPUT_FORMATS = ['stdout', 'log', 'json']
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class OutputFormat(object):
def writekvs(self, kvs):
"""
Write key-value pairs
"""
raise NotImplementedError
def writeseq(self, args):
"""
Write a sequence of other data (e.g. a logging message)
"""
pass
def close(self):
return
class HumanOutputFormat(OutputFormat):
def __init__(self, file):
self.file = file
def writekvs(self, kvs):
# Create strings for printing
key2str = {}
for (key, val) in sorted(kvs.items()):
if isinstance(val, float):
valstr = '%-8.3g' % (val,)
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
# Find max widths
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
# Write out the data
dashes = '-' * (keywidth + valwidth + 7)
lines = [dashes]
for (key, val) in sorted(key2str.items()):
lines.append('| %s%s | %s%s |' % (
key,
' ' * (keywidth - len(key)),
val,
' ' * (valwidth - len(val)),
))
lines.append(dashes)
self.file.write('\n'.join(lines) + '\n')
# Flush the output to the file
self.file.flush()
def _truncate(self, s):
return s[:20] + '...' if len(s) > 23 else s
def writeseq(self, args):
for arg in args:
self.file.write(arg)
self.file.write('\n')
self.file.flush()
class JSONOutputFormat(OutputFormat):
def __init__(self, file):
self.file = file
def writekvs(self, kvs):
for k, v in sorted(kvs.items()):
if hasattr(v, 'dtype'):
v = v.tolist()
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + '\n')
self.file.flush()
class TensorBoardOutputFormat(OutputFormat):
"""
Dumps key/value pairs into TensorBoard's numeric format.
"""
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = 'events'
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {'tag': k, 'simple_value': float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = self.step # is there any reason why you'd want to specify the step?
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
def make_output_format(format, ev_dir):
os.makedirs(ev_dir, exist_ok=True)
if format == 'stdout':
return HumanOutputFormat(sys.stdout)
elif format == 'log':
log_file = open(osp.join(ev_dir, 'log.txt'), 'wt')
return HumanOutputFormat(log_file)
elif format == 'json':
json_file = open(osp.join(ev_dir, 'progress.json'), 'wt')
return JSONOutputFormat(json_file)
elif format == 'tensorboard':
return TensorBoardOutputFormat(osp.join(ev_dir, 'tb'))
else:
raise ValueError('Unknown format specified: %s' % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
"""
Logger.CURRENT.logkv(key, val)
def logkvs(d):
"""
Log a dictionary of key-value pairs
"""
for (k, v) in d.items():
logkv(k, v)
def dumpkvs():
"""
Write all of the diagnostics from the current iteration
level: int. (see logger.py docs) If the global logger level is higher than
the level argument here, don't print to stdout.
"""
Logger.CURRENT.dumpkvs()
def getkvs():
return Logger.CURRENT.name2val
def log(*args, level=INFO):
"""
Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
"""
Logger.CURRENT.log(*args, level=level)
def debug(*args):
log(*args, level=DEBUG)
def info(*args):
log(*args, level=INFO)
def warn(*args):
log(*args, level=WARN)
def error(*args):
log(*args, level=ERROR)
def set_level(level):
"""
Set logging threshold on current logger.
"""
Logger.CURRENT.set_level(level)
def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
"""
return Logger.CURRENT.get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
# ================================================================
# Backend
# ================================================================
class Logger(object):
DEFAULT = None # A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
CURRENT = None # Current logger being used by the free functions above
def __init__(self, dir, output_formats):
self.name2val = {} # values this iteration
self.level = INFO
self.dir = dir
self.output_formats = output_formats
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
self.name2val[key] = val
def dumpkvs(self):
if self.level == DISABLED: return
for fmt in self.output_formats:
fmt.writekvs(self.name2val)
self.name2val.clear()
def log(self, *args, level=INFO):
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level):
self.level = level
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
# Misc
# ----------------------------------------
def _do_log(self, args):
for fmt in self.output_formats:
fmt.writeseq(args)
Logger.DEFAULT = Logger.CURRENT = Logger(dir=None, output_formats=[HumanOutputFormat(sys.stdout)])
def configure(dir=None, format_strs=None):
assert Logger.CURRENT is Logger.DEFAULT,\
"Only call logger.configure() when it's in the default state. Try calling logger.reset() first."
prevlogger = Logger.CURRENT
if dir is None:
dir = os.getenv('OPENAI_LOGDIR')
if dir is None:
dir = osp.join(tempfile.gettempdir(),
datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"))
if format_strs is None:
format_strs = LOG_OUTPUT_FORMATS
output_formats = [make_output_format(f, dir) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats)
log('Logging to %s'%dir)
if os.getenv('OPENAI_LOGDIR'):
# if OPENAI_LOGDIR is set, configure the logger on import
# this kind of nasty (unexpected to user), but I don't know how else to inject the logger
# to a script that's getting run in a subprocess
configure(dir=os.getenv('OPENAI_LOGDIR'))
def reset():
Logger.CURRENT = Logger.DEFAULT
log('Reset logger')
# ================================================================
def _demo():
info("hi")
debug("shouldn't appear")
set_level(DEBUG)
debug("should appear")
dir = "/tmp/testlogging"
if os.path.exists(dir):
shutil.rmtree(dir)
with session(dir=dir):
logkv("a", 3)
logkv("b", 2.5)
dumpkvs()
logkv("b", -2.5)
logkv("a", 5.5)
dumpkvs()
info("^^^ should see a = 5.5")
logkv("b", -2.5)
dumpkvs()
logkv("a", "longasslongasslongasslongasslongasslongassvalue")
dumpkvs()
if __name__ == "__main__":
_demo()
|
{
"content_hash": "355cea1de7f728ca8009a9c5a9bbb857",
"timestamp": "",
"source": "github",
"line_count": 314,
"max_line_length": 122,
"avg_line_length": 27.996815286624205,
"alnum_prop": 0.5573882379706518,
"repo_name": "pcchenxi/baseline",
"id": "981c4b2a0625534e76b90efaae5c836988f2dea1",
"size": "8791",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "baselines/logger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2171"
},
{
"name": "C++",
"bytes": "242720"
},
{
"name": "Lua",
"bytes": "43847"
},
{
"name": "Python",
"bytes": "544643"
},
{
"name": "Shell",
"bytes": "2395"
}
],
"symlink_target": ""
}
|
"""autoresizelist.py - list wrapper that automatically expands the list when
indices outside its current range are accessed """
__author__ = 'caleb'
__date__ = "2015-02-17"
class AutoResizeList:
def __init__(self, initial_data=None, fill=None):
if initial_data is None:
self._data = []
else:
self._data = initial_data
self.fill = fill
def __setitem__(self, key, value):
if key >= len(self._data):
self._data += [self.fill] * (key - len(self._data) + 1)
self._data[key] = value
def __getitem__(self, key):
#if key >= len(self._data):
# self._data += [self.fill] * (key - len(self._data) + 1)
return self._data[key]
def __delitem__(self, key):
del self._data[key]
def __repr__(self):
return str(self._data)
def __eq__(self, other):
return self._data == other._data
def __len__(self):
return len(self._data)
def append(self, item):
self._data.append(item)
def prepend(self, item):
self._data = [item] + self._data
if __name__ == "__main__":
import unittest
testsuite = unittest.TestLoader().discover('test', pattern="*autoresizelist*")
unittest.TextTestRunner(verbosity=1).run(testsuite)
|
{
"content_hash": "da69f7039fc21c6e33d5a15731053869",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 82,
"avg_line_length": 27.574468085106382,
"alnum_prop": 0.5632716049382716,
"repo_name": "calebmadrigal/algorithms-in-python",
"id": "3697957b39844926357a064af8d6f8d4a81fd067",
"size": "1296",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autoresizelist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47622"
}
],
"symlink_target": ""
}
|
"""Script for migrating the contents of celery_taskmeta into etl_task."""
import traceback
from sqlalchemy import create_engine
from sqlalchemy.exc import IntegrityError, ProgrammingError
from plenario.settings import DATABASE_CONN
engine = create_engine(DATABASE_CONN)
def main():
rp = engine.execute("""
select * from meta_shape as ms natural join celery_taskmeta as ct
where ms.celery_task_id = ct.task_id
""")
for row in rp.fetchall():
try:
engine.execute(
"""
insert into etl_task (dataset_name, date_done, status, error, type)
values ('{}', '{}', '{}', '{}', '{}')
""".format(row.dataset_name, row.date_done, row.status, row.traceback, 'shape')
)
except (IntegrityError, ProgrammingError):
traceback.print_exc()
pass
if __name__ == '__main__':
main()
|
{
"content_hash": "650bbfdb2f58163ed188808271d0c6a8",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 95,
"avg_line_length": 27.264705882352942,
"alnum_prop": 0.5868392664509169,
"repo_name": "UrbanCCD-UChicago/plenario",
"id": "b1f0809fef4d5d144a1dbe8c435368a2dc141ef7",
"size": "950",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plenario/utils/migrate_celery_tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "22088"
},
{
"name": "Dockerfile",
"bytes": "514"
},
{
"name": "HTML",
"bytes": "100643"
},
{
"name": "JavaScript",
"bytes": "15770"
},
{
"name": "PLpgSQL",
"bytes": "594"
},
{
"name": "Python",
"bytes": "487024"
}
],
"symlink_target": ""
}
|
from flask import Blueprint
profile = Blueprint('profile', __name__)
from . import views
|
{
"content_hash": "d12e4f5b071c29b18896c3edc888b867",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 40,
"avg_line_length": 18.2,
"alnum_prop": 0.7252747252747253,
"repo_name": "ludolatin/ludolatin",
"id": "a66f70a35397aae40607fb70a35a140c5d5c0d5a",
"size": "91",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "app/profile/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10085"
},
{
"name": "HTML",
"bytes": "147541"
},
{
"name": "JavaScript",
"bytes": "29045"
},
{
"name": "Python",
"bytes": "72684"
},
{
"name": "Shell",
"bytes": "155"
}
],
"symlink_target": ""
}
|
import logging
import numpy as np
from packaging import version
import ray.ray_constants as ray_constants
class RayParams:
"""A class used to store the parameters used by Ray.
Attributes:
redis_address (str): The address of the Redis server to connect to. If
this address is not provided, then this command will start Redis, a
raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits.
redis_port (int): The port that the primary Redis shard should listen
to. If None, then a random port will be chosen.
redis_shard_ports: A list of the ports to use for the non-primary Redis
shards.
num_cpus (int): Number of CPUs to configure the raylet with.
num_gpus (int): Number of GPUs to configure the raylet with.
resources: A dictionary mapping the name of a resource to the quantity
of that resource available.
memory: Total available memory for workers requesting memory.
object_store_memory: The amount of memory (in bytes) to start the
object store with.
redis_max_memory: The max amount of memory (in bytes) to allow redis
to use, or None for no limit. Once the limit is exceeded, redis
will start LRU eviction of entries. This only applies to the
sharded redis tables (task and object tables).
object_manager_port int: The port to use for the object manager.
node_manager_port: The port to use for the node manager.
node_ip_address (str): The IP address of the node that we are on.
object_id_seed (int): Used to seed the deterministic generation of
object IDs. The same value can be used across multiple runs of the
same job in order to generate the object IDs in a consistent
manner. However, the same ID should not be used for different jobs.
local_mode (bool): True if the code should be executed serially
without Ray. This is useful for debugging.
redirect_worker_output: True if the stdout and stderr of worker
processes should be redirected to files.
redirect_output (bool): True if stdout and stderr for non-worker
processes should be redirected to files and false otherwise.
num_redis_shards: The number of Redis shards to start in addition to
the primary Redis shard.
redis_max_clients: If provided, attempt to configure Redis with this
maxclients number.
redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
worker_path (str): The path of the source code that will be run by the
worker.
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
include_webui: Boolean flag indicating whether to start the web
UI, which displays the status of the Ray cluster. If this value is
None, then the UI will be started if the relevant dependencies are
present.
webui_host: The host to bind the web UI server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
logging_level: Logging level, default will be logging.INFO.
logging_format: Logging format, default contains a timestamp,
filename, line number, and message. See ray_constants.py.
plasma_store_socket_name (str): If provided, it will specify the socket
name used by the plasma store.
raylet_socket_name (str): If provided, it will specify the socket path
used by the raylet process.
temp_dir (str): If provided, it will specify the root temporary
directory for the Ray process.
include_log_monitor (bool): If True, then start a log monitor to
monitor the log files for all processes on this node and push their
contents to Redis.
autoscaling_config: path to autoscaling config file.
include_java (bool): If True, the raylet backend can also support
Java worker.
java_worker_options (str): The command options for Java worker.
load_code_from_local: Whether load code from local file or from GCS.
use_pickle: Whether data objects should be serialized with cloudpickle.
_internal_config (str): JSON configuration for overriding
RayConfig defaults. For testing purposes ONLY.
"""
def __init__(self,
redis_address=None,
num_cpus=None,
num_gpus=None,
resources=None,
memory=None,
object_store_memory=None,
redis_max_memory=None,
redis_port=None,
redis_shard_ports=None,
object_manager_port=None,
node_manager_port=None,
node_ip_address=None,
object_id_seed=None,
local_mode=False,
driver_mode=None,
redirect_worker_output=None,
redirect_output=None,
num_redis_shards=None,
redis_max_clients=None,
redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,
plasma_directory=None,
worker_path=None,
huge_pages=False,
include_webui=None,
webui_host="localhost",
logging_level=logging.INFO,
logging_format=ray_constants.LOGGER_FORMAT,
plasma_store_socket_name=None,
raylet_socket_name=None,
temp_dir=None,
include_log_monitor=None,
autoscaling_config=None,
include_java=False,
java_worker_options=None,
load_code_from_local=False,
use_pickle=False,
_internal_config=None):
self.object_id_seed = object_id_seed
self.redis_address = redis_address
self.num_cpus = num_cpus
self.num_gpus = num_gpus
self.memory = memory
self.object_store_memory = object_store_memory
self.resources = resources
self.redis_max_memory = redis_max_memory
self.redis_port = redis_port
self.redis_shard_ports = redis_shard_ports
self.object_manager_port = object_manager_port
self.node_manager_port = node_manager_port
self.node_ip_address = node_ip_address
self.local_mode = local_mode
self.driver_mode = driver_mode
self.redirect_worker_output = redirect_worker_output
self.redirect_output = redirect_output
self.num_redis_shards = num_redis_shards
self.redis_max_clients = redis_max_clients
self.redis_password = redis_password
self.plasma_directory = plasma_directory
self.worker_path = worker_path
self.huge_pages = huge_pages
self.include_webui = include_webui
self.webui_host = webui_host
self.plasma_store_socket_name = plasma_store_socket_name
self.raylet_socket_name = raylet_socket_name
self.temp_dir = temp_dir
self.include_log_monitor = include_log_monitor
self.autoscaling_config = autoscaling_config
self.include_java = include_java
self.java_worker_options = java_worker_options
self.load_code_from_local = load_code_from_local
self.use_pickle = use_pickle
self._internal_config = _internal_config
self._check_usage()
def update(self, **kwargs):
"""Update the settings according to the keyword arguments.
Args:
kwargs: The keyword arguments to set corresponding fields.
"""
for arg in kwargs:
if hasattr(self, arg):
setattr(self, arg, kwargs[arg])
else:
raise ValueError("Invalid RayParams parameter in"
" update: %s" % arg)
self._check_usage()
def update_if_absent(self, **kwargs):
"""Update the settings when the target fields are None.
Args:
kwargs: The keyword arguments to set corresponding fields.
"""
for arg in kwargs:
if hasattr(self, arg):
if getattr(self, arg) is None:
setattr(self, arg, kwargs[arg])
else:
raise ValueError("Invalid RayParams parameter in"
" update_if_absent: %s" % arg)
self._check_usage()
def _check_usage(self):
if self.resources is not None:
assert "CPU" not in self.resources, (
"'CPU' should not be included in the resource dictionary. Use "
"num_cpus instead.")
assert "GPU" not in self.resources, (
"'GPU' should not be included in the resource dictionary. Use "
"num_gpus instead.")
if self.redirect_worker_output is not None:
raise DeprecationWarning(
"The redirect_worker_output argument is deprecated. To "
"control logging to the driver, use the 'log_to_driver' "
"argument to 'ray.init()'")
if self.redirect_output is not None:
raise DeprecationWarning(
"The redirect_output argument is deprecated.")
if self.use_pickle:
assert (version.parse(
np.__version__) >= version.parse("1.16.0")), (
"numpy >= 1.16.0 required for use_pickle=True support. "
"You can use ray.init(use_pickle=False) for older numpy "
"versions, but this may be removed in future versions.")
|
{
"content_hash": "ee8dbe5a8eecf85846993a2c50ef058e",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 79,
"avg_line_length": 46.93087557603687,
"alnum_prop": 0.605164964650432,
"repo_name": "stephanie-wang/ray",
"id": "f4750e71dc2b3904bebc8f368af735040aaa54c8",
"size": "10184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/parameter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "29882"
},
{
"name": "C++",
"bytes": "2149909"
},
{
"name": "CSS",
"bytes": "8025"
},
{
"name": "Dockerfile",
"bytes": "5499"
},
{
"name": "Go",
"bytes": "28481"
},
{
"name": "HTML",
"bytes": "30435"
},
{
"name": "Java",
"bytes": "738348"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "1965"
},
{
"name": "Python",
"bytes": "4058862"
},
{
"name": "Shell",
"bytes": "88736"
},
{
"name": "Starlark",
"bytes": "121207"
},
{
"name": "TypeScript",
"bytes": "64161"
}
],
"symlink_target": ""
}
|
"""Support for SCSGate covers."""
import logging
from scsgate.tasks import (
HaltRollerShutterTask,
LowerRollerShutterTask,
RaiseRollerShutterTask,
)
import voluptuous as vol
from homeassistant.components.cover import PLATFORM_SCHEMA, CoverEntity
from homeassistant.const import CONF_DEVICES, CONF_NAME
import homeassistant.helpers.config_validation as cv
from . import CONF_SCS_ID, DOMAIN, SCSGATE_SCHEMA
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_DEVICES): cv.schema_with_slug_keys(SCSGATE_SCHEMA)}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the SCSGate cover."""
devices = config.get(CONF_DEVICES)
covers = []
logger = logging.getLogger(__name__)
scsgate = hass.data[DOMAIN]
if devices:
for _, entity_info in devices.items():
if entity_info[CONF_SCS_ID] in scsgate.devices:
continue
name = entity_info[CONF_NAME]
scs_id = entity_info[CONF_SCS_ID]
logger.info("Adding %s scsgate.cover", name)
cover = SCSGateCover(
name=name, scs_id=scs_id, logger=logger, scsgate=scsgate
)
scsgate.add_device(cover)
covers.append(cover)
add_entities(covers)
class SCSGateCover(CoverEntity):
"""Representation of SCSGate cover."""
def __init__(self, scs_id, name, logger, scsgate):
"""Initialize the cover."""
self._scs_id = scs_id
self._name = name
self._logger = logger
self._scsgate = scsgate
@property
def scs_id(self):
"""Return the SCSGate ID."""
return self._scs_id
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the cover."""
return self._name
@property
def is_closed(self):
"""Return if the cover is closed."""
return None
def open_cover(self, **kwargs):
"""Move the cover."""
self._scsgate.append_task(RaiseRollerShutterTask(target=self._scs_id))
def close_cover(self, **kwargs):
"""Move the cover down."""
self._scsgate.append_task(LowerRollerShutterTask(target=self._scs_id))
def stop_cover(self, **kwargs):
"""Stop the cover."""
self._scsgate.append_task(HaltRollerShutterTask(target=self._scs_id))
def process_event(self, message):
"""Handle a SCSGate message related with this cover."""
self._logger.debug("Cover %s, got message %s", self._scs_id, message.toggled)
|
{
"content_hash": "196084079b674deab96db872d8c8da73",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 85,
"avg_line_length": 28.27659574468085,
"alnum_prop": 0.627915726109857,
"repo_name": "mKeRix/home-assistant",
"id": "0c7d057316cad64c5d7af889752d004e7b8393f5",
"size": "2658",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "homeassistant/components/scsgate/cover.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1466026"
},
{
"name": "Python",
"bytes": "4770710"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "12407"
}
],
"symlink_target": ""
}
|
"""
Definitions for GraphStore abstract base class and type descriptors for labels and vertex/edge IDs.
"""
from typing import NewType, Hashable, Any, Optional, Iterator, NamedTuple, Union
__all__ = [
'GraphStore',
'VertexID',
'EdgeID',
'DirectedEdgeID',
'UndirectedEdgeID',
'Label',
]
VertexID = NewType('VertexID', Union[int, str, bytes])
Label = NewType('Label', Hashable)
class EdgeID:
"""
Common base class for DirectedEdgeID and UndirectedEdgeID.
"""
@property
def vertices(self) -> Iterator[VertexID]:
"""An iterator, guaranteed to yield exactly 2 vertices, in the same order every time."""
raise NotImplementedError()
@property
def is_directed(self) -> bool:
"""Whether or not the edge is directed."""
raise NotImplementedError()
def __iter__(self) -> Iterator[VertexID]:
raise NotImplementedError()
_DirectedEdgeID = NamedTuple('DirectedEdgeId', [('source', VertexID), ('sink', VertexID)])
class DirectedEdgeID(_DirectedEdgeID, EdgeID):
"""
Edge ID signifiers for directed edges.
"""
def __new__(cls, source: VertexID, sink: VertexID, *args, **kwargs):
return _DirectedEdgeID.__new__(cls, source, sink, *args, **kwargs)
@property
def vertices(self) -> Iterator[VertexID]:
"""An iterator, guaranteed to yield exactly 2 vertices, in the same order every time."""
yield from self
@property
def is_directed(self) -> bool:
"""Whether or not the edge is directed."""
return True
def __iter__(self) -> Iterator[VertexID]:
return super().__iter__()
class UndirectedEdgeID(frozenset, EdgeID):
"""
Edge ID signifiers for undirected edges.
"""
def __init__(self, vid1: VertexID, vid2: VertexID):
super().__init__((vid1, vid2))
def __new__(cls, vid1: VertexID, vid2: VertexID, *args, **kwargs):
return frozenset.__new__(cls, (vid1, vid2), *args, **kwargs)
@property
def is_directed(self) -> bool:
"""Whether or not the edge is directed."""
return False
@property
def vertices(self) -> Iterator[VertexID]:
"""An iterator, guaranteed to yield exactly 2 vertices, in the same order every time."""
if len(self) == 1:
for v in self:
yield v
yield v
else:
try:
yield from sorted(self)
except TypeError:
yield from sorted(self, key=repr)
class GraphStore:
"""
The abstract interface for graph stores. All graph stores must support this interface in order to be accessed via
the first-class object interface (the Graph, Vertex, and Edge classes).
"""
@property
def is_open(self) -> bool:
"""A Boolean value indicating whether the graph store is open. When a graph store is closed, it cannot be
accessed."""
return True # By default, always open
def close(self) -> None:
"""Perform a proper shutdown of the graph store, ensuring that if the graph store is persistent, it will be
in a consistent on-disk state."""
pass # By default, a no-op
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
def count_vertices(self) -> int:
"""Return the total number of vertices in the graph."""
raise NotImplementedError()
def count_edges(self) -> int:
"""Return the total number of edges in the graph."""
raise NotImplementedError()
def iter_vertices(self) -> Iterator[VertexID]:
"""Return an iterator over the IDs of every vertex in the graph."""
raise NotImplementedError()
def iter_edges(self) -> Iterator[EdgeID]:
"""Return an iterator over the IDs of every edge in the graph."""
raise NotImplementedError()
def has_inbound(self, sink: VertexID) -> bool:
"""Return a Boolean value indicating whether the given vertex has at least one inbound edge."""
raise NotImplementedError()
def has_outbound(self, source: VertexID) -> bool:
"""Return a Boolean value indicating whether the given vertex has at least one outbound edge."""
raise NotImplementedError()
def has_undirected(self, vid: VertexID) -> bool:
"""Return a Boolean value indicating whether the given vertex has at least one undirected edge."""
raise NotImplementedError()
def iter_inbound(self, sink: VertexID) -> Iterator[DirectedEdgeID]:
"""Return an iterator over the IDs of every inbound directed edge to this vertex."""
raise NotImplementedError()
def iter_outbound(self, source: VertexID) -> Iterator[DirectedEdgeID]:
"""Return an iterator over the IDs of every outbound directed edge from this vertex."""
raise NotImplementedError()
def iter_undirected(self, vid: VertexID) -> Iterator[UndirectedEdgeID]:
"""Return an iterator over the IDs of every undirected edge connected to this vertex."""
raise NotImplementedError()
def count_inbound(self, sink: VertexID) -> int:
"""Return the number of inbound directed edges to this vertex."""
raise NotImplementedError()
def count_outbound(self, source: VertexID) -> int:
"""Return the number of outbound directed edges from this vertex."""
raise NotImplementedError()
def count_undirected(self, vid: VertexID) -> int:
"""Return the number of undirected edges connected to this vertex."""
raise NotImplementedError()
def has_vertex(self, vid: VertexID) -> bool:
"""Return whether the given ID has a vertex associated with it in the graph."""
raise NotImplementedError()
def has_edge(self, eid: EdgeID) -> bool:
"""Return whether the given ID has an edge associated with it in the graph."""
raise NotImplementedError()
def add_vertex(self, vid: VertexID) -> None:
"""
Add a vertex to the graph associated with this ID. If a vertex with the given ID already exists, do nothing.
"""
raise NotImplementedError()
def add_edge(self, eid: EdgeID) -> None:
"""
Add an edge to the graph associated with this ID. If an edge with the given ID already exists, do nothing. If
either the source or sink vertex of the edge does not exist, add it first.
"""
raise NotImplementedError()
def discard_vertex(self, vid: VertexID) -> bool:
"""
Remove the vertex associated with this ID from the graph. If such a vertex does not exist, do nothing. Any
incident edges to the vertex are also removed. Return a Boolean indicating whether the vertex was present to
be removed.
"""
raise NotImplementedError()
def discard_edge(self, eid: EdgeID, ignore: Optional[VertexID] = None) -> bool:
"""
Remove the edge associated with this ID from the graph. If such an edge does not exist, do nothing. The source
and sink vertex are not removed. Return a Boolean indicating whether the edge was present to be removed.
"""
raise NotImplementedError()
def add_vertex_label(self, vid: VertexID, label: Label) -> None:
"""Add a label to the vertex. If the vertex already has the label, do nothing."""
raise NotImplementedError()
def has_vertex_label(self, vid: VertexID, label: Label) -> bool:
"""Return a Boolean indicating whether the vertex has the label."""
raise NotImplementedError()
def discard_vertex_label(self, vid: VertexID, label: Label) -> bool:
"""
Remove the label from the vertex. If the vertex does not have the label, do nothing. Return a Boolean indicating
whether or not a label was removed.
"""
raise NotImplementedError()
def iter_vertex_labels(self, vid: VertexID) -> Iterator[Label]:
"""Return an iterator over the labels for the vertex."""
raise NotImplementedError()
def count_vertex_labels(self, vid: VertexID) -> int:
"""Return the number of labels the vertex has."""
raise NotImplementedError()
def add_edge_label(self, eid: EdgeID, label: Label) -> None:
"""Add a label to the edge. If the edge already has the label, do nothing."""
raise NotImplementedError()
def has_edge_label(self, eid: EdgeID, label: Label) -> bool:
"""Return a Boolean indicating whether or not the edge has the label."""
raise NotImplementedError()
def discard_edge_label(self, eid: EdgeID, label: Label) -> bool:
"""
Remove the label from the edge. If the edge does not have the label, do nothing. Return a Boolean indicating
whether or not a label was removed.
"""
raise NotImplementedError()
def iter_edge_labels(self, eid: EdgeID) -> Iterator[Label]:
"""Return an iterator over the labels for the edge."""
raise NotImplementedError()
def count_edge_labels(self, eid: EdgeID) -> int:
"""Return the number of labels the edge has."""
raise NotImplementedError()
def get_vertex_data(self, vid: VertexID, key: Hashable) -> Any:
"""Return the value stored in the vertex for this key."""
raise NotImplementedError()
def set_vertex_data(self, vid: VertexID, key: Hashable, value: Any) -> None:
"""Store a value in the vertex for this key."""
raise NotImplementedError()
def has_vertex_data(self, vid: VertexID, key: Hashable) -> bool:
"""Return a Boolean indicating whether a value is stored in the vertex for this key."""
raise NotImplementedError()
def discard_vertex_data(self, vid: VertexID, key: Hashable) -> bool:
"""
Remove the value stored in the vertex under this key. If no value is stored for the key, do nothing. Return
a Boolean indicating whether a key/value pair was removed from the vertex.
"""
raise NotImplementedError()
def iter_vertex_data_keys(self, vid: VertexID) -> Iterator[Hashable]:
"""Return an iterator over the keys for which data is stored in the vertex."""
raise NotImplementedError()
def count_vertex_data_keys(self, vid: VertexID) -> int:
"""Return the number of key/value pairs stored in the vertex."""
raise NotImplementedError()
def get_edge_data(self, eid: EdgeID, key: Hashable) -> Any:
"""Return the value stored in the edge for this key."""
raise NotImplementedError()
def set_edge_data(self, eid: EdgeID, key: Hashable, value: Any) -> None:
"""Store a value in the edge for this key."""
raise NotImplementedError()
def has_edge_data(self, eid: EdgeID, key: Hashable) -> bool:
"""Return a Boolean indicating whether a value is stored in the edge for this key."""
raise NotImplementedError()
def discard_edge_data(self, eid: EdgeID, key: Hashable) -> bool:
"""
Remove the value stored in the edge under this key. If no value is stored for the key, do nothing. Return
a Boolean indicating whether a key/value pair was removed from the edge.
"""
raise NotImplementedError()
def iter_edge_data_keys(self, eid: EdgeID) -> Iterator[Hashable]:
"""Return an iterator over the keys for which data is stored in the edge."""
raise NotImplementedError()
def count_edge_data_keys(self, eid: EdgeID) -> int:
"""Return the number of key/value pairs stored in the edge."""
raise NotImplementedError()
|
{
"content_hash": "dc4539a3c5959a6c13d4bf9258d825ad",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 120,
"avg_line_length": 38.08794788273616,
"alnum_prop": 0.643376379030189,
"repo_name": "hosford42/vert",
"id": "c14c701bad3bb09619442b579caf67e239730ea1",
"size": "11799",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vert/stores/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "107803"
}
],
"symlink_target": ""
}
|
"""Main entry point for distributed next-gen sequencing pipelines.
Handles running the full pipeline based on instructions
"""
import abc
from collections import defaultdict
import copy
import os
import sys
import resource
import tempfile
import yaml
from bcbio import log, heterogeneity, structural, utils
from bcbio.distributed import prun
from bcbio.distributed.transaction import tx_tmpdir
from bcbio.log import logger
from bcbio.ngsalign import alignprep
from bcbio.pipeline import (archive, config_utils, disambiguate, region,
run_info, qcsummary, rnaseq)
from bcbio.provenance import profile, system
from bcbio.variation import coverage, ensemble, genotype, population, validate, joint
def run_main(workdir, config_file=None, fc_dir=None, run_info_yaml=None,
parallel=None, workflow=None):
"""Run variant analysis, handling command line options.
"""
workdir = utils.safe_makedir(os.path.abspath(workdir))
os.chdir(workdir)
config, config_file = config_utils.load_system_config(config_file, workdir)
if config.get("log_dir", None) is None:
config["log_dir"] = os.path.join(workdir, "log")
if parallel["type"] in ["local", "clusterk"]:
_setup_resources()
_run_toplevel(config, config_file, workdir, parallel,
fc_dir, run_info_yaml)
elif parallel["type"] == "ipython":
assert parallel["scheduler"] is not None, "IPython parallel requires a specified scheduler (-s)"
if parallel["scheduler"] != "sge":
assert parallel["queue"] is not None, "IPython parallel requires a specified queue (-q)"
elif not parallel["queue"]:
parallel["queue"] = ""
_run_toplevel(config, config_file, workdir, parallel,
fc_dir, run_info_yaml)
else:
raise ValueError("Unexpected type of parallel run: %s" % parallel["type"])
def _setup_resources():
"""Attempt to increase resource limits up to hard limits.
This allows us to avoid out of file handle limits where we can
move beyond the soft limit up to the hard limit.
"""
target_procs = 10240
cur_proc, max_proc = resource.getrlimit(resource.RLIMIT_NPROC)
target_proc = min(max_proc, target_procs) if max_proc > 0 else target_procs
resource.setrlimit(resource.RLIMIT_NPROC, (max(cur_proc, target_proc), max_proc))
cur_hdls, max_hdls = resource.getrlimit(resource.RLIMIT_NOFILE)
target_hdls = min(max_hdls, target_procs) if max_hdls > 0 else target_procs
resource.setrlimit(resource.RLIMIT_NOFILE, (max(cur_hdls, target_hdls), max_hdls))
def _run_toplevel(config, config_file, work_dir, parallel,
fc_dir=None, run_info_yaml=None):
"""
Run toplevel analysis, processing a set of input files.
config_file -- Main YAML configuration file with system parameters
fc_dir -- Directory of fastq files to process
run_info_yaml -- YAML configuration file specifying inputs to process
"""
parallel = log.create_base_logger(config, parallel)
log.setup_local_logging(config, parallel)
dirs = run_info.setup_directories(work_dir, fc_dir, config, config_file)
config_file = os.path.join(dirs["config"], os.path.basename(config_file))
pipelines, config = _pair_samples_with_pipelines(run_info_yaml, config)
system.write_info(dirs, parallel, config)
with tx_tmpdir(config) as tmpdir:
tempfile.tempdir = tmpdir
for pipeline, samples in pipelines.items():
for xs in pipeline.run(config, run_info_yaml, parallel, dirs, samples):
pass
# ## Generic pipeline framework
def _wres(parallel, progs, fresources=None, ensure_mem=None):
"""Add resource information to the parallel environment on required programs and files.
Enables spinning up required machines and operating in non-shared filesystem
environments.
progs -- Third party tools used in processing
fresources -- Required file-based resources needed. These will be transferred on non-shared
filesystems.
ensure_mem -- Dictionary of required minimum memory for programs used. Ensures
enough memory gets allocated on low-core machines.
"""
parallel = copy.deepcopy(parallel)
parallel["progs"] = progs
if fresources:
parallel["fresources"] = fresources
if ensure_mem:
parallel["ensure_mem"] = ensure_mem
return parallel
class AbstractPipeline:
"""
Implement this class to participate in the Pipeline abstraction.
name: the analysis name in the run_info.yaml file:
design:
- analysis: name
run: the steps run to perform the analyses
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def name(self):
return
@abc.abstractmethod
def run(self, config, run_info_yaml, parallel, dirs, samples):
return
class Variant2Pipeline(AbstractPipeline):
"""Streamlined variant calling pipeline for large files.
This is less generalized but faster in standard cases.
The goal is to replace the base variant calling approach.
"""
name = "variant2"
@classmethod
def run(self, config, run_info_yaml, parallel, dirs, samples):
## Alignment and preparation requiring the entire input file (multicore cluster)
with prun.start(_wres(parallel, ["aligner", "samtools", "sambamba"],
(["reference", "fasta"], ["reference", "aligner"], ["files"])),
samples, config, dirs, "multicore",
multiplier=alignprep.parallel_multiplier(samples)) as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
with profile.report("alignment preparation", dirs):
samples = run_parallel("prep_align_inputs", samples)
samples = run_parallel("disambiguate_split", [samples])
with profile.report("alignment", dirs):
samples = run_parallel("process_alignment", samples)
samples = disambiguate.resolve(samples, run_parallel)
samples = alignprep.merge_split_alignments(samples, run_parallel)
with profile.report("callable regions", dirs):
samples = run_parallel("prep_samples", [samples])
samples = run_parallel("postprocess_alignment", samples)
samples = run_parallel("combine_sample_regions", [samples])
samples = region.clean_sample_data(samples)
with profile.report("coverage", dirs):
samples = coverage.summarize_samples(samples, run_parallel)
with profile.report("structural variation initial", dirs):
samples = structural.run(samples, run_parallel, initial_only=True)
## Variant calling on sub-regions of the input file (full cluster)
with prun.start(_wres(parallel, ["gatk", "picard", "variantcaller"]),
samples, config, dirs, "full",
multiplier=region.get_max_counts(samples), max_multicore=1) as run_parallel:
with profile.report("alignment post-processing", dirs):
samples = region.parallel_prep_region(samples, run_parallel)
with profile.report("variant calling", dirs):
samples = genotype.parallel_variantcall_region(samples, run_parallel)
## Finalize variants, BAMs and population databases (per-sample multicore cluster)
with prun.start(_wres(parallel, ["gatk", "gatk-vqsr", "snpeff", "bcbio_variation",
"gemini", "samtools", "fastqc", "bamtools",
"bcbio-variation-recall", "qsignature"]),
samples, config, dirs, "multicore2") as run_parallel:
with profile.report("joint squaring off/backfilling", dirs):
samples = joint.square_off(samples, run_parallel)
with profile.report("variant post-processing", dirs):
samples = run_parallel("postprocess_variants", samples)
samples = run_parallel("split_variants_by_sample", samples)
with profile.report("prepped BAM merging", dirs):
samples = region.delayed_bamprep_merge(samples, run_parallel)
with profile.report("validation", dirs):
samples = run_parallel("compare_to_rm", samples)
samples = genotype.combine_multiple_callers(samples)
with profile.report("ensemble calling", dirs):
samples = ensemble.combine_calls_parallel(samples, run_parallel)
with profile.report("validation summary", dirs):
samples = validate.summarize_grading(samples)
with profile.report("structural variation final", dirs):
samples = structural.run(samples, run_parallel)
with profile.report("heterogeneity", dirs):
samples = heterogeneity.run(samples, run_parallel)
with profile.report("population database", dirs):
samples = population.prep_db_parallel(samples, run_parallel)
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
with prun.start(_wres(parallel, ["gatk"], ensure_mem = {"gatk": 8}),
samples, config, dirs, "coverage") as run_parallel:
with profile.report("report", dirs):
samples = qcsummary.report_summary(samples, run_parallel)
with profile.report("archive", dirs):
samples = archive.compress(samples, run_parallel)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
logger.info("Timing: finished")
return samples
def _debug_samples(i, samples):
print "---", i, len(samples)
for sample in (x[0] for x in samples):
print " ", sample["description"], sample.get("region"), \
utils.get_in(sample, ("config", "algorithm", "variantcaller")), \
utils.get_in(sample, ("config", "algorithm", "jointcaller")), \
utils.get_in(sample, ("metadata", "batch")), \
[x.get("variantcaller") for x in sample.get("variants", [])], \
sample.get("work_bam"), \
sample.get("vrn_file")
class SNPCallingPipeline(Variant2Pipeline):
"""Back compatible: old name for variant analysis.
"""
name = "SNP calling"
class VariantPipeline(Variant2Pipeline):
"""Back compatibility; old name
"""
name = "variant"
class StandardPipeline(AbstractPipeline):
"""Minimal pipeline with alignment and QC.
"""
name = "Standard"
@classmethod
def run(self, config, run_info_yaml, parallel, dirs, samples):
## Alignment and preparation requiring the entire input file (multicore cluster)
with prun.start(_wres(parallel, ["aligner", "samtools", "sambamba"]),
samples, config, dirs, "multicore") as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
with profile.report("alignment", dirs):
samples = run_parallel("process_alignment", samples)
with profile.report("callable regions", dirs):
samples = run_parallel("prep_samples", [samples])
samples = run_parallel("postprocess_alignment", samples)
samples = run_parallel("combine_sample_regions", [samples])
samples = region.clean_sample_data(samples)
## Quality control
with prun.start(_wres(parallel, ["fastqc", "bamtools", "qsignature", "kraken", "gatk"], ensure_mem={"gatk" : 2}),
samples, config, dirs, "multicore2") as run_parallel:
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
with profile.report("report", dirs):
samples = qcsummary.report_summary(samples, run_parallel)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
logger.info("Timing: finished")
return samples
class MinimalPipeline(StandardPipeline):
name = "Minimal"
class SailfishPipeline(AbstractPipeline):
name = "sailfish"
@classmethod
def run(self, config, run_info_yaml, parallel, dirs, samples):
with prun.start(_wres(parallel, ["picard", "cutadapt"]),
samples, config, dirs, "trimming") as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
with profile.report("adapter trimming", dirs):
samples = run_parallel("prepare_sample", samples)
samples = run_parallel("trim_sample", samples)
with prun.start(_wres(parallel, ["sailfish"]), samples, config, dirs,
"sailfish") as run_parallel:
with profile.report("sailfish", dirs):
samples = run_parallel("run_sailfish", samples)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
return samples
class RnaseqPipeline(AbstractPipeline):
name = "RNA-seq"
@classmethod
def run(self, config, run_info_yaml, parallel, dirs, samples):
with prun.start(_wres(parallel, ["picard", "cutadapt"]),
samples, config, dirs, "trimming") as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
with profile.report("adapter trimming", dirs):
samples = run_parallel("prepare_sample", samples)
samples = run_parallel("trim_sample", samples)
with prun.start(_wres(parallel, ["aligner", "picard"],
ensure_mem={"tophat": 8, "tophat2": 8, "star": 2}),
samples, config, dirs, "alignment",
multiplier=alignprep.parallel_multiplier(samples)) as run_parallel:
with profile.report("alignment", dirs):
samples = run_parallel("disambiguate_split", [samples])
samples = run_parallel("process_alignment", samples)
with prun.start(_wres(parallel, ["samtools", "cufflinks"]),
samples, config, dirs, "rnaseqcount") as run_parallel:
with profile.report("disambiguation", dirs):
samples = disambiguate.resolve(samples, run_parallel)
with profile.report("transcript assembly", dirs):
samples = rnaseq.assemble_transcripts(run_parallel, samples)
with profile.report("estimate expression (threaded)", dirs):
samples = rnaseq.quantitate_expression_parallel(samples, run_parallel)
with prun.start(_wres(parallel, ["dexseq", "express"]), samples, config,
dirs, "rnaseqcount-singlethread", max_multicore=1) as run_parallel:
with profile.report("estimate expression (single threaded)", dirs):
samples = rnaseq.quantitate_expression_noparallel(samples, run_parallel)
samples = rnaseq.combine_files(samples)
with prun.start(_wres(parallel, ["gatk"]), samples, config,
dirs, "rnaseq-variation") as run_parallel:
with profile.report("RNA-seq variant calling", dirs):
samples = rnaseq.rnaseq_variant_calling(samples, run_parallel)
with prun.start(_wres(parallel, ["picard", "fastqc", "qualimap", "kraken", "gatk"],
ensure_mem={"qualimap": 4}),
samples, config, dirs, "qc") as run_parallel:
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
logger.info("Timing: finished")
return samples
class smallRnaseqPipeline(AbstractPipeline):
name = "smallRNA-seq"
@classmethod
def run(self, config, run_info_yaml, parallel, dirs, samples):
with prun.start(_wres(parallel, ["picard", "cutadapt", "miraligner"]),
samples, config, dirs, "trimming") as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
with profile.report("adapter trimming", dirs):
samples = run_parallel("prepare_sample", samples)
samples = run_parallel("trim_srna_sample", samples)
samples = run_parallel("seqbuster", samples)
with prun.start(_wres(parallel, ["aligner", "picard"],
ensure_mem={"bowtie": 8, "bowtie2": 8, "star": 2}),
[samples[0]], config, dirs, "alignment") as run_parallel:
with profile.report("prepare", dirs):
samples = run_parallel("seqcluster_prepare", [samples])
with profile.report("alignment", dirs):
samples = run_parallel("srna_alignment", [samples])
with prun.start(_wres(parallel, ["seqcluster"],
ensure_mem={"seqcluster": 8}),
[samples[0]], config, dirs, "cluster") as run_parallel:
with profile.report("cluster", dirs):
samples = run_parallel("seqcluster_cluster", [samples])
with prun.start(_wres(parallel, ["picard", "fastqc"]),
samples, config, dirs, "qc") as run_parallel:
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
return samples
class ChipseqPipeline(AbstractPipeline):
name = "chip-seq"
@classmethod
def run(self, config, run_info_yaml, parallel, dirs, samples):
with prun.start(_wres(parallel, ["aligner", "picard"]),
samples, config, dirs, "multicore",
multiplier=alignprep.parallel_multiplier(samples)) as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
samples = run_parallel("prepare_sample", samples)
samples = run_parallel("trim_sample", samples)
samples = run_parallel("disambiguate_split", [samples])
samples = run_parallel("process_alignment", samples)
with prun.start(_wres(parallel, ["picard", "fastqc"]),
samples, config, dirs, "persample") as run_parallel:
with profile.report("disambiguation", dirs):
samples = disambiguate.resolve(samples, run_parallel)
samples = run_parallel("clean_chipseq_alignment", samples)
samples = qcsummary.generate_parallel(samples, run_parallel)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
return samples
def _get_pipeline(item):
from bcbio.log import logger
SUPPORTED_PIPELINES = {x.name.lower(): x for x in
utils.itersubclasses(AbstractPipeline)}
analysis_type = item.get("analysis", "").lower()
if analysis_type not in SUPPORTED_PIPELINES:
logger.error("Cannot determine which type of analysis to run, "
"set in the run_info under details.")
sys.exit(1)
else:
return SUPPORTED_PIPELINES[analysis_type]
def _pair_samples_with_pipelines(run_info_yaml, config):
"""Map samples defined in input file to pipelines to run.
"""
with open(run_info_yaml) as in_handle:
samples = yaml.safe_load(in_handle)
if isinstance(samples, dict):
resources = samples.pop("resources", {})
samples = samples["details"]
else:
resources = {}
ready_samples = []
for sample in samples:
if "files" in sample:
del sample["files"]
# add any resources to this item to recalculate global configuration
usample = copy.deepcopy(sample)
usample.pop("algorithm", None)
if "resources" not in usample:
usample["resources"] = {}
for prog, pkvs in resources.iteritems():
if prog not in usample["resources"]:
usample["resources"][prog] = {}
for key, val in pkvs.iteritems():
usample["resources"][prog][key] = val
config = config_utils.update_w_custom(config, usample)
sample["resources"] = {}
ready_samples.append(sample)
paired = [(x, _get_pipeline(x)) for x in ready_samples]
d = defaultdict(list)
for x in paired:
d[x[1]].append([x[0]])
return d, config
|
{
"content_hash": "020c81b01a50c5b38e6275cc09e97e62",
"timestamp": "",
"source": "github",
"line_count": 448,
"max_line_length": 121,
"avg_line_length": 50.75,
"alnum_prop": 0.6011171710063336,
"repo_name": "fw1121/bcbio-nextgen",
"id": "d17084d2c71ce30414bfd433371e1c5f322e97a6",
"size": "22736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bcbio/pipeline/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1452773"
},
{
"name": "Ruby",
"bytes": "624"
},
{
"name": "Shell",
"bytes": "11523"
}
],
"symlink_target": ""
}
|
"""Tests for pix2pix."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import pix2pix
class GeneratorTest(tf.test.TestCase):
def _reduced_default_blocks(self):
"""Returns the default blocks, scaled down to make test run faster."""
return [pix2pix.Block(b.num_filters // 32, b.decoder_keep_prob)
for b in pix2pix._default_generator_blocks()]
def test_output_size_nn_upsample_conv(self):
batch_size = 2
height, width = 256, 256
num_outputs = 4
images = tf.ones((batch_size, height, width, 3))
with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
logits, _ = pix2pix.pix2pix_generator(
images, num_outputs, blocks=self._reduced_default_blocks(),
upsample_method='nn_upsample_conv')
with self.test_session() as session:
session.run(tf.global_variables_initializer())
np_outputs = session.run(logits)
self.assertListEqual([batch_size, height, width, num_outputs],
list(np_outputs.shape))
def test_output_size_conv2d_transpose(self):
batch_size = 2
height, width = 256, 256
num_outputs = 4
images = tf.ones((batch_size, height, width, 3))
with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
logits, _ = pix2pix.pix2pix_generator(
images, num_outputs, blocks=self._reduced_default_blocks(),
upsample_method='conv2d_transpose')
with self.test_session() as session:
session.run(tf.global_variables_initializer())
np_outputs = session.run(logits)
self.assertListEqual([batch_size, height, width, num_outputs],
list(np_outputs.shape))
def test_block_number_dictates_number_of_layers(self):
batch_size = 2
height, width = 256, 256
num_outputs = 4
images = tf.ones((batch_size, height, width, 3))
blocks = [
pix2pix.Block(64, 0.5),
pix2pix.Block(128, 0),
]
with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
_, end_points = pix2pix.pix2pix_generator(
images, num_outputs, blocks)
num_encoder_layers = 0
num_decoder_layers = 0
for end_point in end_points:
if end_point.startswith('encoder'):
num_encoder_layers += 1
elif end_point.startswith('decoder'):
num_decoder_layers += 1
self.assertEqual(num_encoder_layers, len(blocks))
self.assertEqual(num_decoder_layers, len(blocks))
class DiscriminatorTest(tf.test.TestCase):
def _layer_output_size(self, input_size, kernel_size=4, stride=2, pad=2):
return (input_size + pad * 2 - kernel_size) // stride + 1
def test_four_layers(self):
batch_size = 2
input_size = 256
output_size = self._layer_output_size(input_size)
output_size = self._layer_output_size(output_size)
output_size = self._layer_output_size(output_size)
output_size = self._layer_output_size(output_size, stride=1)
output_size = self._layer_output_size(output_size, stride=1)
images = tf.ones((batch_size, input_size, input_size, 3))
with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
logits, end_points = pix2pix.pix2pix_discriminator(
images, num_filters=[64, 128, 256, 512])
self.assertListEqual([batch_size, output_size, output_size, 1],
logits.shape.as_list())
self.assertListEqual([batch_size, output_size, output_size, 1],
end_points['predictions'].shape.as_list())
def test_four_layers_no_padding(self):
batch_size = 2
input_size = 256
output_size = self._layer_output_size(input_size, pad=0)
output_size = self._layer_output_size(output_size, pad=0)
output_size = self._layer_output_size(output_size, pad=0)
output_size = self._layer_output_size(output_size, stride=1, pad=0)
output_size = self._layer_output_size(output_size, stride=1, pad=0)
images = tf.ones((batch_size, input_size, input_size, 3))
with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
logits, end_points = pix2pix.pix2pix_discriminator(
images, num_filters=[64, 128, 256, 512], padding=0)
self.assertListEqual([batch_size, output_size, output_size, 1],
logits.shape.as_list())
self.assertListEqual([batch_size, output_size, output_size, 1],
end_points['predictions'].shape.as_list())
def test_four_layers_wrog_paddig(self):
batch_size = 2
input_size = 256
images = tf.ones((batch_size, input_size, input_size, 3))
with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
with self.assertRaises(TypeError):
pix2pix.pix2pix_discriminator(
images, num_filters=[64, 128, 256, 512], padding=1.5)
def test_four_layers_negative_padding(self):
batch_size = 2
input_size = 256
images = tf.ones((batch_size, input_size, input_size, 3))
with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
with self.assertRaises(ValueError):
pix2pix.pix2pix_discriminator(
images, num_filters=[64, 128, 256, 512], padding=-1)
if __name__ == '__main__':
tf.test.main()
|
{
"content_hash": "f2eb7abb1e4e28a303801f5d76e7dbab",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 75,
"avg_line_length": 37.190140845070424,
"alnum_prop": 0.6502556334027646,
"repo_name": "cshallue/models",
"id": "ab5acb5c1979d7b9ab21d5a164055e8ebfc3232b",
"size": "5965",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "research/slim/nets/pix2pix_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1523636"
},
{
"name": "Dockerfile",
"bytes": "9821"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "2829707"
},
{
"name": "Makefile",
"bytes": "4933"
},
{
"name": "Python",
"bytes": "13149300"
},
{
"name": "Shell",
"bytes": "146035"
}
],
"symlink_target": ""
}
|
"""
Forms and validation code for user registration.
Note that all of these forms assume Django's bundle default ``User``
model; since it's not possible for a form to anticipate in advance the
needs of custom user models, you will need to write your own forms if
you're using a custom model.
"""
try:
from django.contrib.auth import get_user_model
except ImportError:
from django.contrib.auth.models import User
def get_user_model():
return User
from django import forms
from django.utils.translation import ugettext_lazy as _
class RegistrationForm(forms.Form):
"""
Form for registering a new user account.
Validates that the requested username is not already in use, and
requires the password to be entered twice to catch typos.
Subclasses should feel free to add any additional validation they
need, but should avoid defining a ``save()`` method -- the actual
saving of collected user data is delegated to the active
registration backend.
"""
required_css_class = 'required'
username = forms.RegexField(regex=r'^[\w.@+-]+$',
max_length=30,
label=_("Username"),
error_messages={'invalid': _("This value may contain only letters, numbers and @/./+/-/_ characters.")})
email = forms.EmailField(label=_("E-mail"))
password1 = forms.CharField(widget=forms.PasswordInput,
label=_("Password"))
password2 = forms.CharField(widget=forms.PasswordInput,
label=_("Password (again)"))
def clean_username(self):
"""
Validate that the username is alphanumeric and is not already
in use.
"""
User = get_user_model()
existing = User.objects.filter(username__iexact=self.cleaned_data['username'])
if existing.exists():
raise forms.ValidationError(_("A user with that username already exists."))
else:
return self.cleaned_data['username']
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise forms.ValidationError(_("The two password fields didn't match."))
return self.cleaned_data
class RegistrationFormTermsOfService(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which adds a required checkbox
for agreeing to a site's Terms of Service.
"""
tos = forms.BooleanField(widget=forms.CheckboxInput,
label=_(u'I have read and agree to the Terms of Service'),
error_messages={'required': _("You must agree to the terms to register")})
class RegistrationFormUniqueEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which enforces uniqueness of
email addresses.
"""
def clean_email(self):
"""
Validate that the supplied email address is unique for the
site.
"""
User = get_user_model()
if User.objects.filter(email__iexact=self.cleaned_data['email']):
raise forms.ValidationError(_("This email address is already in use. Please supply a different email address."))
return self.cleaned_data['email']
class RegistrationFormNoFreeEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which disallows registration with
email addresses from popular free webmail services; moderately
useful for preventing automated spam registrations.
To change the list of banned domains, subclass this form and
override the attribute ``bad_domains``.
"""
bad_domains = ['aim.com', 'aol.com', 'email.com', 'gmail.com',
'googlemail.com', 'hotmail.com', 'hushmail.com',
'msn.com', 'mail.ru', 'mailinator.com', 'live.com',
'yahoo.com']
def clean_email(self):
"""
Check the supplied email address against a list of known free
webmail domains.
"""
email_domain = self.cleaned_data['email'].split('@')[1]
if email_domain in self.bad_domains:
raise forms.ValidationError(_("Registration using free email addresses is prohibited. Please supply a different email address."))
return self.cleaned_data['email']
|
{
"content_hash": "c68290325c168bd5ef66100cb4020438",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 141,
"avg_line_length": 36.13178294573643,
"alnum_prop": 0.6337695773439176,
"repo_name": "gone/django-registration",
"id": "72850268764bc1ea365a2e325f052c744ea5397b",
"size": "4661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "registration/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "77025"
},
{
"name": "Shell",
"bytes": "2985"
}
],
"symlink_target": ""
}
|
""" Finetuning the library models for sequence classification on GLUE."""
# You can also adapt this script on your own text classification task. Pointers for this are left as comments.
import json
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import tensorflow as tf
from datasets import load_dataset
import evaluate
import transformers
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorWithPadding,
DefaultDataCollator,
HfArgumentParser,
PretrainedConfig,
PushToHubCallback,
TFAutoModelForSequenceClassification,
TFTrainingArguments,
create_optimizer,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
from transformers.utils import check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.25.0.dev0")
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
logger = logging.getLogger(__name__)
# region Command-line arguments
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
task_name: str = field(
metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())},
)
predict_file: str = field(
metadata={"help": "A file containing user-supplied examples to make predictions for"},
default=None,
)
max_seq_length: int = field(
default=128,
metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
},
)
def __post_init__(self):
self.task_name = self.task_name.lower()
if self.task_name not in task_to_keys.keys():
raise ValueError("Unknown task, you should pick one in " + ",".join(task_to_keys.keys()))
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
},
)
# endregion
def main():
# region Argument parsing
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_glue", model_args, data_args, framework="tensorflow")
if not (training_args.do_train or training_args.do_eval or training_args.do_predict):
exit("Must specify at least one of --do_train, --do_eval or --do_predict!")
# endregion
# region Checkpoints
checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
checkpoint = get_last_checkpoint(training_args.output_dir)
if checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# endregion
# region Logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# endregion
# region Dataset and labels
# Set seed before initializing model.
set_seed(training_args.seed)
# Downloading and loading a dataset from the hub. In distributed training, the load_dataset function guarantee
# that only one local process can concurrently download the dataset.
datasets = load_dataset(
"glue",
data_args.task_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
is_regression = data_args.task_name == "stsb"
if not is_regression:
label_list = datasets["train"].features["label"].names
num_labels = len(label_list)
else:
num_labels = 1
if data_args.predict_file is not None:
logger.info("Preparing user-supplied file for predictions...")
data_files = {"data": data_args.predict_file}
for key in data_files.keys():
logger.info(f"Loading a local file for {key}: {data_files[key]}")
if data_args.predict_file.endswith(".csv"):
# Loading a dataset from local csv files
user_dataset = load_dataset("csv", data_files=data_files, cache_dir=model_args.cache_dir)
else:
# Loading a dataset from local json files
user_dataset = load_dataset("json", data_files=data_files, cache_dir=model_args.cache_dir)
needed_keys = task_to_keys[data_args.task_name]
for key in needed_keys:
assert key in user_dataset["data"].features, f"Your supplied predict_file is missing the {key} key!"
datasets["user_data"] = user_dataset["data"]
# endregion
# region Load model config and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# endregion
# region Dataset preprocessing
sentence1_key, sentence2_key = task_to_keys[data_args.task_name]
# Padding strategy
if data_args.pad_to_max_length:
padding = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
padding = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
label_to_id = None
if config.label2id != PretrainedConfig(num_labels=num_labels).label2id and not is_regression:
# Some have all caps in their config, some don't.
label_name_to_id = {k.lower(): v for k, v in config.label2id.items()}
if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)}
else:
logger.warning(
"Your model seems to have been trained with labels, but they don't match the dataset: ",
f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
"\nIgnoring the model labels as a result.",
)
label_to_id = {label: i for i, label in enumerate(label_list)}
if label_to_id is not None:
config.label2id = label_to_id
config.id2label = {id: label for label, id in config.label2id.items()}
elif data_args.task_name is not None and not is_regression:
config.label2id = {l: i for i, l in enumerate(label_list)}
config.id2label = {id: label for label, id in config.label2id.items()}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
def preprocess_function(examples):
# Tokenize the texts
args = (
(examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
)
result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)
return result
datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache)
if data_args.pad_to_max_length:
data_collator = DefaultDataCollator(return_tensors="tf")
else:
data_collator = DataCollatorWithPadding(tokenizer, return_tensors="tf")
# endregion
# region Metric function
metric = evaluate.load("glue", data_args.task_name)
def compute_metrics(preds, label_ids):
preds = preds["logits"]
preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1)
result = metric.compute(predictions=preds, references=label_ids)
if len(result) > 1:
result["combined_score"] = np.mean(list(result.values())).item()
return result
# endregion
with training_args.strategy.scope():
# region Load pretrained model
if checkpoint is None:
model_path = model_args.model_name_or_path
else:
model_path = checkpoint
model = TFAutoModelForSequenceClassification.from_pretrained(
model_path,
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# endregion
# region Convert data to a tf.data.Dataset
dataset_options = tf.data.Options()
dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
num_replicas = training_args.strategy.num_replicas_in_sync
tf_data = dict()
max_samples = {
"train": data_args.max_train_samples,
"validation": data_args.max_eval_samples,
"validation_matched": data_args.max_eval_samples,
"validation_mismatched": data_args.max_eval_samples,
"test": data_args.max_predict_samples,
"test_matched": data_args.max_predict_samples,
"test_mismatched": data_args.max_predict_samples,
"user_data": None,
}
for key in datasets.keys():
if key == "train" or key.startswith("validation"):
assert "label" in datasets[key].features, f"Missing labels from {key} data!"
if key == "train":
shuffle = True
batch_size = training_args.per_device_train_batch_size * num_replicas
else:
shuffle = False
batch_size = training_args.per_device_eval_batch_size * num_replicas
samples_limit = max_samples[key]
dataset = datasets[key]
if samples_limit is not None:
dataset = dataset.select(range(samples_limit))
# model.prepare_tf_dataset() wraps a Hugging Face dataset in a tf.data.Dataset which is ready to use in
# training. This is the recommended way to use a Hugging Face dataset when training with Keras. You can also
# use the lower-level dataset.to_tf_dataset() method, but you will have to specify things like column names
# yourself if you use this method, whereas they are automatically inferred from the model input names when
# using model.prepare_tf_dataset()
# For more info see the docs:
# https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset
# https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.to_tf_dataset
data = model.prepare_tf_dataset(
dataset,
shuffle=shuffle,
batch_size=batch_size,
collate_fn=data_collator,
tokenizer=tokenizer,
)
data = data.with_options(dataset_options)
tf_data[key] = data
# endregion
# region Optimizer, loss and compilation
if training_args.do_train:
num_train_steps = len(tf_data["train"]) * training_args.num_train_epochs
if training_args.warmup_steps > 0:
num_warmup_steps = training_args.warmup_steps
elif training_args.warmup_ratio > 0:
num_warmup_steps = int(num_train_steps * training_args.warmup_ratio)
else:
num_warmup_steps = 0
optimizer, schedule = create_optimizer(
init_lr=training_args.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
adam_beta1=training_args.adam_beta1,
adam_beta2=training_args.adam_beta2,
adam_epsilon=training_args.adam_epsilon,
weight_decay_rate=training_args.weight_decay,
adam_global_clipnorm=training_args.max_grad_norm,
)
else:
optimizer = "adam" # Just write anything because we won't be using it
if is_regression:
metrics = []
else:
metrics = ["accuracy"]
model.compile(optimizer=optimizer, metrics=metrics, jit_compile=training_args.xla)
# endregion
# region Preparing push_to_hub and model card
push_to_hub_model_id = training_args.push_to_hub_model_id
model_name = model_args.model_name_or_path.split("/")[-1]
if not push_to_hub_model_id:
push_to_hub_model_id = f"{model_name}-finetuned-glue"
model_card_kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
model_card_kwargs["task_name"] = data_args.task_name
if training_args.push_to_hub:
callbacks = [
PushToHubCallback(
output_dir=training_args.output_dir,
model_id=push_to_hub_model_id,
organization=training_args.push_to_hub_organization,
token=training_args.push_to_hub_token,
tokenizer=tokenizer,
**model_card_kwargs,
)
]
else:
callbacks = []
# endregion
# region Training and validation
if training_args.do_train:
if training_args.do_eval and not data_args.task_name == "mnli":
# Do both evaluation and training in the Keras fit loop, unless the task is MNLI
# because MNLI has two validation sets
validation_data = tf_data["validation"]
else:
validation_data = None
model.fit(
tf_data["train"],
validation_data=validation_data,
epochs=int(training_args.num_train_epochs),
callbacks=callbacks,
)
# endregion
# region Evaluation
if training_args.do_eval:
# We normally do validation as part of the Keras fit loop, but we run it independently
# if there was no fit() step (because we didn't train the model) or if the task is MNLI,
# because MNLI has a separate validation-mismatched validation set
# In this example, we compute advanced metrics only at the end of training, and only compute
# loss and accuracy on the validation set each epoch, but
# if you'd like to compute metrics every epoch that are too complex to be written as
# standard Keras metrics, you can use our KerasMetricCallback. See
# https://huggingface.co/docs/transformers/main/en/main_classes/keras_callbacks
logger.info("*** Evaluate ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
if data_args.task_name == "mnli":
tasks = ["mnli", "mnli-mm"]
tf_datasets = [tf_data["validation_matched"], tf_data["validation_mismatched"]]
raw_datasets = [datasets["validation_matched"], datasets["validation_mismatched"]]
else:
tasks = [data_args.task_name]
tf_datasets = [tf_data["validation"]]
raw_datasets = [datasets["validation"]]
for raw_dataset, tf_dataset, task in zip(raw_datasets, tf_datasets, tasks):
eval_predictions = model.predict(tf_dataset)
eval_metrics = compute_metrics(eval_predictions, raw_dataset["label"])
print(f"Evaluation metrics ({task}):")
print(eval_metrics)
if training_args.output_dir is not None:
output_eval_file = os.path.join(training_args.output_dir, "all_results.json")
with open(output_eval_file, "w") as writer:
writer.write(json.dumps(eval_metrics))
# endregion
# region Prediction
if training_args.do_predict or data_args.predict_file:
logger.info("*** Predict ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
tasks = []
tf_datasets = []
raw_datasets = []
if training_args.do_predict:
if data_args.task_name == "mnli":
tasks.extend(["mnli", "mnli-mm"])
tf_datasets.extend([tf_data["test_matched"], tf_data["test_mismatched"]])
raw_datasets.extend([datasets["test_matched"], datasets["test_mismatched"]])
else:
tasks.append(data_args.task_name)
tf_datasets.append(tf_data["test"])
raw_datasets.append(datasets["test"])
if data_args.predict_file:
tasks.append("user_data")
tf_datasets.append(tf_data["user_data"])
raw_datasets.append(datasets["user_data"])
for raw_dataset, tf_dataset, task in zip(raw_datasets, tf_datasets, tasks):
test_predictions = model.predict(tf_dataset)
if "label" in raw_dataset:
test_metrics = compute_metrics(test_predictions, raw_dataset["label"])
print(f"Test metrics ({task}):")
print(test_metrics)
if is_regression:
predictions_to_write = np.squeeze(test_predictions["logits"])
else:
predictions_to_write = np.argmax(test_predictions["logits"], axis=1)
output_predict_file = os.path.join(training_args.output_dir, f"predict_results_{task}.txt")
with open(output_predict_file, "w") as writer:
logger.info(f"***** Writing prediction results for {task} *****")
writer.write("index\tprediction\n")
for index, item in enumerate(predictions_to_write):
if is_regression:
writer.write(f"{index}\t{item:3.3f}\n")
else:
item = model.config.id2label[item]
writer.write(f"{index}\t{item}\n")
# endregion
if training_args.output_dir is not None and not training_args.push_to_hub:
# If we're not pushing to hub, at least save a local copy when we're done
model.save_pretrained(training_args.output_dir)
if __name__ == "__main__":
main()
|
{
"content_hash": "300a112b2bd6f1ccb0f3aecdf9be97ba",
"timestamp": "",
"source": "github",
"line_count": 571,
"max_line_length": 131,
"avg_line_length": 42.8353765323993,
"alnum_prop": 0.6108589885113864,
"repo_name": "huggingface/transformers",
"id": "1a373ef364cda41e196ff63cc29d67249b807e0b",
"size": "25107",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/tensorflow/text-classification/run_glue.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6021"
},
{
"name": "C++",
"bytes": "12959"
},
{
"name": "Cuda",
"bytes": "175419"
},
{
"name": "Dockerfile",
"bytes": "18218"
},
{
"name": "Jsonnet",
"bytes": "937"
},
{
"name": "Makefile",
"bytes": "3430"
},
{
"name": "Python",
"bytes": "35742012"
},
{
"name": "Shell",
"bytes": "30374"
}
],
"symlink_target": ""
}
|
import os
import sys
import time
import fuse
import errno
import logging
import argparse
from relfs.sftp_utils import SFTPFileReader
# ------------------------------------------------------------------------------
class RoSFuseItem(object):
# --------------------------------------------------------------------------
def __init__(self):
pass
# --------------------------------------------------------------------------
def find_item(self, split_path):
if not split_path or split_path == ["."]:
return self
# ------------------------------------------------------------------------------
class RoSFuseFile(object):
# --------------------------------------------------------------------------
def __init__(self, file_reader, split_path):
self._file_reader = file_reader
self._path = os.path.join(*split_path)
self._file_reader.open_file(self._path)
# --------------------------------------------------------------------------
def __del__(self):
try:
self._file_reader.close_file(self._path)
except AttributeError:
pass
# --------------------------------------------------------------------------
def mode(self):
return 0o100440
# --------------------------------------------------------------------------
def size(self):
return self._file_reader.file_size(self._path)
# --------------------------------------------------------------------------
def read(self, length, offset):
return self._file_reader.read_file(self._path, length, offset)
# ------------------------------------------------------------------------------
class RoSFuseRoot(RoSFuseItem):
# --------------------------------------------------------------------------
def __init__(self, options):
RoSFuseItem.__init__(self)
self._file_reader = SFTPFileReader(options)
# --------------------------------------------------------------------------
def find_item(self, split_path):
if not split_path or split_path == ["."]:
return self
try:
return RoSFuseFile(self._file_reader, split_path)
except IOError:
pass
# --------------------------------------------------------------------------
def mode(self):
return 0o40440
# --------------------------------------------------------------------------
def size(self):
return 0
# --------------------------------------------------------------------------
def read(self, length, offset):
return str()
# ------------------------------------------------------------------------------
class RoSFuse(fuse.Operations):
# --------------------------------------------------------------------------
def __init__(self, options):
self._options = options
self._mount_time = time.time()
self._root = RoSFuseRoot(options)
self._open_files = dict()
# --------------------------------------------------------------------------
def _update(self):
pass
# --------------------------------------------------------------------------
def find_entry(self, path):
split_path = filter(bool, path.lstrip(os.sep).split(os.sep))
item = self._root.find_item(split_path)
if item is not None:
return item
raise fuse.FuseOSError(errno.ENOENT)
# --------------------------------------------------------------------------
def access(self, path, mode):
self._update()
if mode & os.R_OK:
return 0
raise fuse.FuseOSError(errno.EACCES)
# --------------------------------------------------------------------------
def chmod(self, path, mode):
self._update()
raise fuse.FuseOSError(errno.EROFS)
# --------------------------------------------------------------------------
def chown(self, path, uid, gid):
self._update()
raise fuse.FuseOSError(errno.EROFS)
# --------------------------------------------------------------------------
def getattr(self, path, fh=None):
self._update()
item = self.find_entry(path)
return {
"st_size": item.size(),
"st_mode": item.mode(),
"st_uid": os.getuid(),
"st_gid": os.getgid(),
"st_nlink": 1,
"st_atime": time.time(),
"st_mtime": time.time(),
"st_ctime": self._mount_time
}
# --------------------------------------------------------------------------
def readdir(self, path, fh):
self._update()
yield "."
yield ".."
# --------------------------------------------------------------------------
def readlink(self, path):
self._update()
raise fuse.FuseOSError(errno.EINVAL)
# --------------------------------------------------------------------------
def mknod(self, path, mode, dev):
self._update()
raise fuse.FuseOSError(errno.EROFS)
# --------------------------------------------------------------------------
def rmdir(self, path):
self._update()
raise fuse.FuseOSError(errno.EROFS)
# --------------------------------------------------------------------------
def mkdir(self, path, mode):
self._update()
raise fuse.FuseOSError(errno.EROFS)
# --------------------------------------------------------------------------
def statfs(self, path):
self._update()
return {
'f_bsize': 32,
'f_frsize': 32,
'f_bavail': 0,
'f_favail': 0,
'f_bfree': 0,
'f_ffree': 0,
'f_blocks': 0,
'f_files': 0,
'f_flag': 4096,
'f_namemax': 255}
# --------------------------------------------------------------------------
def unlink(self, path):
self._update()
raise fuse.FuseOSError(errno.EROFS)
# --------------------------------------------------------------------------
def symlink(self, name, target):
self._update()
raise fuse.FuseOSError(errno.EROFS)
# --------------------------------------------------------------------------
def rename(self, old, new):
self._update()
raise fuse.FuseOSError(errno.EROFS)
# --------------------------------------------------------------------------
def link(self, target, name):
self._update()
raise fuse.FuseOSError(errno.EROFS)
# --------------------------------------------------------------------------
def utimens(self, path, times=None):
self._update()
return time.time()
# --------------------------------------------------------------------------
def _get_free_fd(self):
try:
return max(self._open_files, key=self._open_files.get)
except ValueError:
return 3
# --------------------------------------------------------------------------
def open(self, path, flags):
self._update()
item = self.find_entry(path)
fd = self._get_free_fd()
self._open_files[fd] = item
return fd
# --------------------------------------------------------------------------
def create(self, path, mode, fi=None):
self._update()
raise fuse.FuseOSError(errno.EROFS)
# --------------------------------------------------------------------------
def read(self, path, length, offset, fh):
self._update()
try:
return self._open_files[fh].read(length, offset)
except KeyError:
raise fuse.FuseOSError(errno.EBADF)
# --------------------------------------------------------------------------
def write(self, path, buf, offset, fh):
self._update()
raise fuse.FuseOSError(errno.EROFS)
# --------------------------------------------------------------------------
def truncate(self, path, length, fh=None):
self._update()
raise fuse.FuseOSError(errno.EROFS)
# --------------------------------------------------------------------------
def release(self, path, fh):
self._update()
del self._open_files[fh]
# --------------------------------------------------------------------------
def flush(self, path, fh):
self._update()
# --------------------------------------------------------------------------
def fsync(self, path, fdatasync, fh):
self._update()
# ------------------------------------------------------------------------------
class RoSFuseArgParser(argparse.ArgumentParser):
# --------------------------------------------------------------------------
@staticmethod
def _mountable_directory(arg):
if not os.path.isdir(arg):
msg = "'%s' is not a directory path" % (arg)
raise argparse.ArgumentTypeError(msg)
return os.path.realpath(arg)
# --------------------------------------------------------------------------
def __init__(self):
argparse.ArgumentParser.__init__(
self,
prog="rosfusetp",
description="""
Read-only FUSE file-system driver,
allowing to read known files over SFTP
"""
)
self.add_argument(
"--mount-point", "-m",
dest="mount_point",
type=self._mountable_directory,
default=None,
action="store",
help="""Specifies the file-system mount-point path."""
)
self.add_argument(
"--host", "-H",
dest="hostname",
type=str,
default=None,
action="store",
help="""Specifies the remote host; can be SSH host config entry."""
)
self.add_argument(
"--allow-agent", "-A",
dest="allow_agent",
default=False,
action="store_true",
help="""Use the SSH key agent."""
)
self.add_argument(
"--password-only", "-P",
dest="password_only",
default=True, # TODO: False
action="store_true",
help="""Try only password authentication."""
)
self.add_argument(
"--prefix", "-p",
dest="remote_prefix",
type=str,
default=None,
action="store",
help="""The remote host directory prefix"""
)
# ------------------------------------------------------------------------------
def main():
logging.basicConfig()
options = RoSFuseArgParser().parse_args()
fuse.FUSE(
RoSFuse(options),
options.mount_point,
nothreads=True,
foreground=True
)
# ------------------------------------------------------------------------------
if __name__ == '__main__':
main()
# ------------------------------------------------------------------------------
|
{
"content_hash": "517f6b8b2d26cbc53fbb223e996be756",
"timestamp": "",
"source": "github",
"line_count": 316,
"max_line_length": 80,
"avg_line_length": 35.06645569620253,
"alnum_prop": 0.34238787113076435,
"repo_name": "matus-chochlik/various",
"id": "71856fb55d1e6c329f661aec1754d5ffe2dbc70e",
"size": "11197",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "relfs/rosfuse.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "44686"
},
{
"name": "C++",
"bytes": "76296"
},
{
"name": "Makefile",
"bytes": "8751"
},
{
"name": "Python",
"bytes": "295676"
},
{
"name": "QML",
"bytes": "19387"
},
{
"name": "QMake",
"bytes": "3981"
},
{
"name": "Roff",
"bytes": "13504"
},
{
"name": "Shell",
"bytes": "88324"
},
{
"name": "TeX",
"bytes": "199528"
},
{
"name": "Vim script",
"bytes": "1417"
},
{
"name": "XSLT",
"bytes": "2225"
}
],
"symlink_target": ""
}
|
import asyncio
import random
import threading
from time import sleep
import warnings
import dask
from dask import delayed
import pytest
from distributed import (
worker_client,
Client,
as_completed,
get_worker,
wait,
get_client,
)
from distributed.metrics import time
from distributed.utils_test import double, gen_cluster, inc
from distributed.utils_test import client, cluster_fixture, loop # noqa: F401
@gen_cluster(client=True)
async def test_submit_from_worker(c, s, a, b):
def func(x):
with worker_client() as c:
x = c.submit(inc, x)
y = c.submit(double, x)
result = x.result() + y.result()
return result
x, y = c.map(func, [10, 20])
xx, yy = await c._gather([x, y])
assert xx == 10 + 1 + (10 + 1) * 2
assert yy == 20 + 1 + (20 + 1) * 2
assert len(s.transition_log) > 10
assert len([id for id in s.wants_what if id.lower().startswith("client")]) == 1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2)
async def test_scatter_from_worker(c, s, a, b):
def func():
with worker_client() as c:
futures = c.scatter([1, 2, 3, 4, 5])
assert isinstance(futures, (list, tuple))
assert len(futures) == 5
x = dict(get_worker().data)
y = {f.key: i for f, i in zip(futures, [1, 2, 3, 4, 5])}
assert x == y
total = c.submit(sum, futures)
return total.result()
future = c.submit(func)
result = await future
assert result == sum([1, 2, 3, 4, 5])
def func():
with worker_client() as c:
correct = True
for data in [[1, 2], (1, 2), {1, 2}]:
futures = c.scatter(data)
correct &= type(futures) == type(data)
o = object()
futures = c.scatter({"x": o})
correct &= get_worker().data["x"] is o
return correct
future = c.submit(func)
result = await future
assert result is True
start = time()
while not all(v == 1 for v in s.nthreads.values()):
await asyncio.sleep(0.1)
assert time() < start + 5
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2)
async def test_scatter_singleton(c, s, a, b):
np = pytest.importorskip("numpy")
def func():
with worker_client() as c:
x = np.ones(5)
future = c.scatter(x)
assert future.type == np.ndarray
await c.submit(func)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2)
async def test_gather_multi_machine(c, s, a, b):
a_address = a.address
b_address = b.address
assert a_address != b_address
def func():
with worker_client() as ee:
x = ee.submit(inc, 1, workers=a_address)
y = ee.submit(inc, 2, workers=b_address)
xx, yy = ee.gather([x, y])
return xx, yy
future = c.submit(func)
result = await future
assert result == (2, 3)
@gen_cluster(client=True)
async def test_same_loop(c, s, a, b):
def f():
with worker_client() as lc:
return lc.loop is get_worker().loop
future = c.submit(f)
result = await future
assert result
def test_sync(client):
def mysum():
result = 0
sub_tasks = [delayed(double)(i) for i in range(100)]
with worker_client() as lc:
futures = lc.compute(sub_tasks)
for f in as_completed(futures):
result += f.result()
return result
assert delayed(mysum)().compute() == 9900
@gen_cluster(client=True)
async def test_async(c, s, a, b):
def mysum():
result = 0
sub_tasks = [delayed(double)(i) for i in range(100)]
with worker_client() as lc:
futures = lc.compute(sub_tasks)
for f in as_completed(futures):
result += f.result()
return result
future = c.compute(delayed(mysum)())
await future
start = time()
while len(a.data) + len(b.data) > 1:
await asyncio.sleep(0.1)
assert time() < start + 3
@gen_cluster(client=True, nthreads=[("127.0.0.1", 3)])
async def test_separate_thread_false(c, s, a):
a.count = 0
def f(i):
with worker_client(separate_thread=False) as client:
get_worker().count += 1
assert get_worker().count <= 3
sleep(random.random() / 40)
assert get_worker().count <= 3
get_worker().count -= 1
return i
futures = c.map(f, range(20))
results = await c._gather(futures)
assert list(results) == list(range(20))
@gen_cluster(client=True)
async def test_client_executor(c, s, a, b):
def mysum():
with worker_client() as c:
with c.get_executor() as e:
return sum(e.map(double, range(30)))
future = c.submit(mysum)
result = await future
assert result == 30 * 29
def test_dont_override_default_get(loop):
import dask.bag as db
def f(x):
with worker_client() as c:
return True
b = db.from_sequence([1, 2])
b2 = b.map(f)
with Client(
loop=loop, processes=False, set_as_default=True, dashboard_address=None
) as c:
assert dask.base.get_scheduler() == c.get
for i in range(2):
b2.compute()
assert dask.base.get_scheduler() == c.get
@gen_cluster(client=True)
async def test_local_client_warning(c, s, a, b):
from distributed import local_client
def func(x):
with warnings.catch_warnings(record=True) as record:
with local_client() as c:
x = c.submit(inc, x)
result = x.result()
assert any("worker_client" in str(r.message) for r in record)
return result
future = c.submit(func, 10)
result = await future
assert result == 11
@gen_cluster(client=True)
async def test_closing_worker_doesnt_close_client(c, s, a, b):
def func(x):
get_client()
return
await wait(c.map(func, range(10)))
await a.close()
assert c.status == "running"
def test_timeout(client):
def func():
with worker_client(timeout=0) as wc:
print("hello")
future = client.submit(func)
with pytest.raises(EnvironmentError):
result = future.result()
def test_secede_without_stealing_issue_1262():
"""
Tests that seceding works with the Stealing extension disabled
https://github.com/dask/distributed/issues/1262
"""
# turn off all extensions
extensions = []
# run the loop as an inner function so all workers are closed
# and exceptions can be examined
@gen_cluster(client=True, scheduler_kwargs={"extensions": extensions})
async def secede_test(c, s, a, b):
def func(x):
with worker_client() as wc:
y = wc.submit(lambda: 1 + x)
return wc.gather(y)
f = await c.gather(c.submit(func, 1))
return c, s, a, b, f
c, s, a, b, f = secede_test()
assert f == 2
# ensure no workers had errors
assert all([f.exception() is None for f in s._worker_coroutines])
@gen_cluster(client=True)
async def test_compute_within_worker_client(c, s, a, b):
@dask.delayed
def f():
with worker_client():
return dask.delayed(lambda x: x)(1).compute()
result = await c.compute(f())
assert result == 1
@gen_cluster(client=True)
async def test_worker_client_rejoins(c, s, a, b):
def f():
with worker_client():
pass
return threading.current_thread() in get_worker().executor._threads
result = await c.submit(f)
assert result
@gen_cluster()
async def test_submit_different_names(s, a, b):
# https://github.com/dask/distributed/issues/2058
da = pytest.importorskip("dask.array")
c = await Client(
"localhost:" + s.address.split(":")[-1], loop=s.loop, asynchronous=True
)
try:
X = c.persist(da.random.uniform(size=(100, 10), chunks=50))
await wait(X)
fut = await c.submit(lambda x: x.sum().compute(), X)
assert fut > 0
finally:
await c.close()
|
{
"content_hash": "a58b276707d72608851e30658cddb614",
"timestamp": "",
"source": "github",
"line_count": 317,
"max_line_length": 83,
"avg_line_length": 25.92429022082019,
"alnum_prop": 0.5715502555366269,
"repo_name": "blaze/distributed",
"id": "09ae20e8f203153d5ac89210ca8857288813dce3",
"size": "8218",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "distributed/tests/test_worker_client.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "236"
},
{
"name": "Python",
"bytes": "511624"
},
{
"name": "Shell",
"bytes": "1120"
}
],
"symlink_target": ""
}
|
from Orange.widgets import widget, gui
from Orange.widgets.settings import Setting
class OWWidgetName(widget.OWWidget):
name = "Widget Name"
id = "orange.widgets.widget_category.widget_name"
description = ""
icon = "icons/Unknown.svg"
priority = 10
category = ""
keywords = ["list", "of", "keywords"]
outputs = [("Name", type)]
inputs = [("Name", type, "handler")]
want_main_area = False
foo = Setting(True)
def __init__(self):
super().__init__()
# controls
gui.rubber(self.controlArea)
def handler(self, obj):
pass
|
{
"content_hash": "7b58af77717a435c2a9b5d9776618f52",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 53,
"avg_line_length": 23.307692307692307,
"alnum_prop": 0.6006600660066007,
"repo_name": "qPCR4vir/orange3",
"id": "d035825951f00920018ba074ad6c46fc39a7ec03",
"size": "606",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "Orange/widgets/widgetTemplate.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "20412"
},
{
"name": "C++",
"bytes": "1992"
},
{
"name": "GLSL",
"bytes": "75"
},
{
"name": "HTML",
"bytes": "3503"
},
{
"name": "JavaScript",
"bytes": "12007"
},
{
"name": "Jupyter Notebook",
"bytes": "6662"
},
{
"name": "NSIS",
"bytes": "20281"
},
{
"name": "Python",
"bytes": "4205054"
},
{
"name": "Shell",
"bytes": "48335"
}
],
"symlink_target": ""
}
|
from .copy_source import CopySource
class SapCloudForCustomerSource(CopySource):
"""A copy activity source for SAP Cloud for Customer source.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param source_retry_count: Source retry count. Type: integer (or
Expression with resultType integer).
:type source_retry_count: object
:param source_retry_wait: Source retry wait. Type: string (or Expression
with resultType string), pattern:
((\\d+)\\.)?(\\d\\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:type source_retry_wait: object
:param type: Constant filled by server.
:type type: str
:param query: SAP Cloud for Customer OData query. For example, "$top=1".
Type: string (or Expression with resultType string).
:type query: object
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'source_retry_count': {'key': 'sourceRetryCount', 'type': 'object'},
'source_retry_wait': {'key': 'sourceRetryWait', 'type': 'object'},
'type': {'key': 'type', 'type': 'str'},
'query': {'key': 'query', 'type': 'object'},
}
def __init__(self, additional_properties=None, source_retry_count=None, source_retry_wait=None, query=None):
super(SapCloudForCustomerSource, self).__init__(additional_properties=additional_properties, source_retry_count=source_retry_count, source_retry_wait=source_retry_wait)
self.query = query
self.type = 'SapCloudForCustomerSource'
|
{
"content_hash": "e8249646c9da923dddb5ff036f2075b5",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 176,
"avg_line_length": 43.12820512820513,
"alnum_prop": 0.6504161712247325,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "771684a97c5669b5f57dcedaa1b231168ba6c0f6",
"size": "2156",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-datafactory/azure/mgmt/datafactory/models/sap_cloud_for_customer_source.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
"""
Fab commands for ffs
"""
from fabric.api import task, hosts, local, lcd, cd, run
from fabric import operations
deadpan = 'happenup@deadpansincerity.com'
@task
def test():
"""
Run our unittests
"""
local('python -m pytest test')
@task
def make_docs():
"""
Rebuild the documentation
"""
with lcd('doc/'):
local('make html')
@task
@hosts(deadpan)
def upload_docs():
"""
Build, compress, upload and extract the latest docs
"""
with lcd('doc/build/html'):
local('rm -rf letterdocs.tar.gz')
local('tar zcvf letterdocs.tar.gz *')
operations.put('letterdocs.tar.gz', '/home/happenup/webapps/letterdocs/letterdocs.tar.gz')
with cd('/home/happenup/webapps/letterdocs/'):
run('tar zxvf letterdocs.tar.gz')
|
{
"content_hash": "96d1291f461f75889482e0386248377e",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 98,
"avg_line_length": 22.13888888888889,
"alnum_prop": 0.6273525721455459,
"repo_name": "davidmiller/letter",
"id": "d4bfb0c2c2be2c35bc930fc2113c6753263323a2",
"size": "797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabfile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "13"
},
{
"name": "Python",
"bytes": "34047"
},
{
"name": "Ruby",
"bytes": "916"
}
],
"symlink_target": ""
}
|
from model import *
from gym_torcs import TorcsEnv
from ReplayBuffer import ReplayBuffer
from utils import *
from collections import deque
import tensorflow as tf
def discrete_reinforce():
# initialization
tf.python.control_flow_ops = tf
env = TorcsEnv(vision = True,throttle = False,gear_change = False)
buff = ReplayBuffer(100000)
history = deque()
writer = tf.summary.FileWriter('tmp/discrete')
# env. initialization
ob = env.reset()
esp = 1.
lr = 0.0001
gamma = 0.99
tau = 0.01
batch_size = 32
max_epoch = 10000
max_steps = 10000
num_output = 5
# get model
actor = low_guide_v1(lr,num_output)
target_actor = low_guide_v1(lr,num_output)
transfer_weights(actor,target_actor)
# summary ops and phs
reward_ph = tf.placeholder(tf.float32)
loss_ph = tf.placeholder(tf.float32)
q_ph = tf.placeholder(tf.float32)
#target_q = tf.placeholder(tf.float32,[batch_size,num_output])
reward_summary = tf.summary.scalar('reward',reward_ph)
loss_summary = tf.summary.scalar('loss',loss_ph)
q_summary = tf.summary.scalar('estimated_q',q_ph)
# gradient inspection
grads = tf.gradients(actor.output,actor.trainable_weights)
grad_summary = [tf.summary.histogram('bp_grad-%d' % i,g) for (i,g) in enumerate(grads)]
grad_summary = tf.summary.merge(grad_summary)
for epoch in range(max_epoch):
history = deque()
[history.append(get_low_states(ob)) for i in range(4)]
total_reward = 0
total_loss = 0
for step in range(max_steps):
st = get_states(ob,history,False) # interpret and prepare the states,*** st is a stacked states***
act = actor.predict(st.reshape((1,) + st.shape)) # ask for action
estimated_max_q = np.max(act)
# input processing and step
act = get_inferred_steering(act,num_output) # convert the discrete decision to continuous
act,is_org = discrete_esp_process(esp,act,num_output)
esp -= 1./10000
ob,reward,done,_ = env.step([act]) # execute and observe
# post step proessing
total_reward += reward
st1 = get_states(ob,history,False)
# post observation
buff.add(st,act,reward,st1,done,step) # add experience
# training
if step < batch_size: continue
experiences = buff.getBatchMixed(batch_size)
X,y,a_t = preprocess_batch(experiences,actor,gamma,batch_size,target_actor)
y = prepare_label(X,a_t,y,actor,num_output)
loss = actor.train_on_batch(X,y)
total_loss += loss
update_network(actor,target_actor,tau)
# logging and stats
print 'Epoch: %d, Step: %d, Act: %f, Loss: %f,AI: %s' % (epoch,step,act,loss,str(is_org))
writer.add_summary(K.get_session().run(loss_summary,feed_dict = {loss_ph:loss}))
writer.add_summary(K.get_session().run(q_summary,feed_dict = {q_ph:estimated_max_q}))
writer.add_summary(K.get_session().run(grad_summary,feed_dict = {actor.input: X}))
# termination condition
if done:
ob = env.reset(epoch % 3 == 1)
break
print '************ Epoch %d : Reward %f ************' % (epoch,total_reward)
# post epoch stuff
if epoch % 10 == 0:
actor.save_weights('low_guide_v1_weights.h5')
# epoch summaries
writer.add_summary(K.get_session().run(reward_summary,feed_dict = {reward_ph:total_reward}))
if __name__ == '__main__':
discrete_reinforce()
''
|
{
"content_hash": "3b6283f39479440b45869ace6f126a18",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 101,
"avg_line_length": 31.048076923076923,
"alnum_prop": 0.6847321152059461,
"repo_name": "travistang/late_fyt",
"id": "f3c80228673eded1f65527315dfcf5348d0c613d",
"size": "3229",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "discrete.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "358950"
},
{
"name": "C",
"bytes": "1088525"
},
{
"name": "C++",
"bytes": "3701643"
},
{
"name": "CSS",
"bytes": "2093"
},
{
"name": "HTML",
"bytes": "727213"
},
{
"name": "Java",
"bytes": "870718"
},
{
"name": "JavaScript",
"bytes": "360"
},
{
"name": "M4",
"bytes": "5404"
},
{
"name": "Makefile",
"bytes": "360885"
},
{
"name": "NSIS",
"bytes": "120996"
},
{
"name": "Objective-C",
"bytes": "24134"
},
{
"name": "Python",
"bytes": "110376"
},
{
"name": "Roff",
"bytes": "6972"
},
{
"name": "Shell",
"bytes": "110228"
},
{
"name": "TeX",
"bytes": "156855"
},
{
"name": "XSLT",
"bytes": "18454"
}
],
"symlink_target": ""
}
|
"""
This script is used to generate all the possible reactions involving a given
set of reactants, including pressure-dependent effects if desired. This is
effectively the first step in the RMG rate-based mechanism generation algorithm.
The input file is a subset of that used with regular RMG jobs.
"""
import os.path
import argparse
import logging
from rmgpy.rmg.main import initializeLog, RMG
from rmgpy.chemkin import ChemkinWriter
from rmgpy.rmg.output import OutputHTMLWriter
def parseCommandLineArguments():
"""
Parse the command-line arguments being passed to RMG Py. This uses the
:mod:`argparse` module, which ensures that the command-line arguments are
sensible, parses them, and returns them.
"""
parser = argparse.ArgumentParser(description=
"""
Reaction Mechanism Generator (RMG) is an automatic chemical reaction
mechanism generator that constructs kinetic models composed of
elementary chemical reaction steps using a general understanding of
how molecules react.
""")
parser.add_argument('file', metavar='FILE', type=str, nargs=1,
help='a file describing the job to execute')
# Options for controlling the amount of information printed to the console
# By default a moderate level of information is printed; you can either
# ask for less (quiet), more (verbose), or much more (debug)
group = parser.add_mutually_exclusive_group()
group.add_argument('-q', '--quiet', action='store_true', help='only print warnings and errors')
group.add_argument('-v', '--verbose', action='store_true', help='print more verbose output')
group.add_argument('-d', '--debug', action='store_true', help='print debug information')
# Add options for controlling what directories files are written to
parser.add_argument('-o', '--output-directory', type=str, nargs=1, default='',
metavar='DIR', help='use DIR as output directory')
parser.add_argument('-s', '--scratch-directory', type=str, nargs=1, default='',
metavar='DIR', help='use DIR as scratch directory')
parser.add_argument('-l', '--library-directory', type=str, nargs=1, default='',
metavar='DIR', help='use DIR as library directory')
args = parser.parse_args()
args.walltime = '0'
args.restart = False
return args
def main():
"""
Driver function that parses command line arguments and passes them to the execute function.
"""
# Parse the command-line arguments (requires the argparse module)
args = parseCommandLineArguments()
# For output and scratch directories, if they are empty strings, set them
# to match the input file location
inputFile = args.file[0]
inputDirectory = os.path.abspath(os.path.dirname(inputFile))
if args.output_directory == '':
args.output_directory = inputDirectory
if args.scratch_directory == '':
args.scratch_directory = inputDirectory
# Initialize the logging system (resets the RMG.log file)
level = logging.INFO
if args.debug: level = 0
elif args.verbose: level = logging.DEBUG
elif args.quiet: level = logging.WARNING
kwargs = {
'scratch_directory': args.scratch_directory,
'restart': args.restart,
'walltime': args.walltime,
'log': level,
}
initializeLog(level, os.path.join(args.output_directory,'RMG.log'))
rmg = RMG(inputFile=inputFile, outputDirectory=args.output_directory)
# Add output listeners:
rmg.attach(ChemkinWriter(args.output_directory))
rmg.attach(OutputHTMLWriter(args.output_directory))
execute(rmg, **kwargs)
def execute(rmg, **kwargs):
"""
Generates all the possible reactions involving a given
set of reactants, including pressure-dependent effects if desired. This is
effectively the first step in the RMG rate-based mechanism generation algorithm.
The input file is a subset of that used with regular RMG jobs.
Returns an RMG object.
"""
import numpy
rmg.initialize(**kwargs)
rmg.reactionModel.enlarge(reactEdge=True,
unimolecularReact=rmg.unimolecularReact,
bimolecularReact=rmg.bimolecularReact)
# Show all core and edge species and reactions in the output
rmg.reactionModel.outputSpeciesList.extend(rmg.reactionModel.edge.species)
rmg.reactionModel.outputReactionList.extend(rmg.reactionModel.edge.reactions)
rmg.saveEverything()
rmg.finish()
return rmg
|
{
"content_hash": "58496598685486556dda476d762bf24e",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 99,
"avg_line_length": 36.739837398373986,
"alnum_prop": 0.6999336136313343,
"repo_name": "nickvandewiele/RMG-Py",
"id": "e9b703eb2fe6e38494e39bf03ae731be962e401d",
"size": "5965",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "rmgpy/tools/generate_reactions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "461"
},
{
"name": "Jupyter Notebook",
"bytes": "23683"
},
{
"name": "Makefile",
"bytes": "5832"
},
{
"name": "Python",
"bytes": "3423274"
},
{
"name": "Shell",
"bytes": "2733"
}
],
"symlink_target": ""
}
|
from sympy import Symbol, Dummy, sympify, latex, diff, sqrt as sym_sqrt
from sympy.utilities.lambdify import lambdify
import numpy as np
from importlib import import_module
from errorpro.units import parse_unit, convert_to_unit
from errorpro.dimensions.simplifiers import dim_simplify
from errorpro.dimensions.solvers import subs_symbols
from errorpro import pytex
def parse_expr(term, data, evaluate=None):
""" parses string to expression containing quantities
Args:
- term: string of mathematical term (can also be a sympy expression)
- data: dict of defined quantities
- evaluate: specifies if automatic simplifications should be done (passed on to sympify)
"""
try:
term=sympify(term, locals=data, evaluate=evaluate)
except(SyntaxError):
raise SyntaxError("error parsing term '%s'" % term)
for q in term.free_symbols:
if not isinstance(q,Quantity):
raise ValueError("Symbol '%s' is not defined." % q.name)
return term
def get_dimension(expr):
""" finds out physical dimension of a term containing quantities
Args:
- expr: Expr object possibly containing Quantity objects
Returns: Dimension object
"""
dim = expr
for var in expr.free_symbols:
if var.dim is None:
raise RuntimeError ("quantity '%s' doesn't have a dimension, yet." % var.name)
dim = subs_symbols(dim,{var.name:var.dim})
return dim_simplify(dim)
def adjust_to_unit (quant, unit=None):
""" calculates value and error of quantity according to specified unit
Args:
- quant: Quantity object to use
- unit: Unit object to adjust values to. If not specified, prefer_unit
of quantity will be used. If prefer_unit is None, Unit will be found
automatically.
Returns: tuple of adjusted value, error and unit
"""
if unit is None:
unit = quant.prefer_unit
factor, unit = convert_to_unit(quant.dim, unit)
factor = np.float_(factor)
value = None if quant.value is None else quant.value / factor
error = None if quant.error is None else quant.error / factor
return (value, error, unit)
def get_value(expr):
""" calculates number value of an expression possibly containing quantities
"""
calcFunction=lambdify(expr.free_symbols, expr, modules="numpy")
depValues=[]
for var in expr.free_symbols:
if var.value is None:
raise RuntimeError ("quantity '%s' doesn't have a value, yet." % var.name)
depValues.append(var.value)
return calcFunction(*depValues)
def get_error(expr):
""" calculates error of an expression possibly containing quantities
Returns: tuple of error and error formula (as Expr object)
"""
integrand = 0
error_formula = 0
for varToDiff in expr.free_symbols:
if varToDiff.error is not None:
differential = diff(expr,varToDiff)
error_formula += ( Symbol(varToDiff.name+"_err",positive=True) * differential )**2
diffFunction = lambdify(differential.free_symbols,differential, modules="numpy")
diffValues = []
for var in differential.free_symbols:
diffValues.append(var.value)
integrand += ( varToDiff.error*diffFunction(*diffValues) )**2
if isinstance(integrand,np.ndarray):
if (integrand==0).all():
return (None,None)
elif integrand == 0:
return (None,None)
return (np.sqrt(integrand),sym_sqrt (error_formula))
class Quantity(Symbol):
""" class for physical quantities storing name, value, error and physical dimension
"""
quantity_count = 0
dummy_count = 1
def __new__(cls, name=None, longname=None):
if name is None or name == "":
name = "NoName_"+str(Quantity.dummy_count)
Quantity.dummy_count += 1
self = Dummy.__new__(cls, name)
else:
self = Symbol.__new__(cls, name)
self.count = Quantity.quantity_count
Quantity.quantity_count += 1
self.abbrev = name
self.name = name
self.longname = longname
self.value = None
self.value_formula = None
self.error = None
self.error_formula = None
self.prefer_unit = None
self.dim = None
return self
def _repr_html_(self):
return qtable(self)
# TODO implementing this method screws up dependent quantities
#def __getitem__(self, sliced):
# slicedValue = None
# slicederror = None
# if self.value is not None:
# slicedValue = self.value[sliced]
# if self.error is not None:
# slicederror = self.error[sliced]
# q = Quantity()
# q.value = slicedValue
# q.error = slicederror
# q.prefer_unit = self.prefer_unit
# q.dim = self.dim
# return q
def qtable(*quantities, html=True, maxcols=5):
""" Represent quantites in a table.
Args:
quantities: List of quantity objects.
html: If True, output will be formatted to be displayable html.
Else, LaTeX and html code is returned in a tuple.
maxcols:
Maximum number of columns. Table will be split.
Returns:
String of html code (html=True) or tuple (LaTeX table, html table).
"""
if len(quantities) == 0:
return 'No quantities selected.'
cols = []
if html:
if not maxcols:
maxcols = len(quantities)
def chunks(l):
for i in range(0, len(quantities), maxcols):
yield l[i:i+maxcols]
html = []
ltx = []
for chunk in chunks(quantities):
l, h = qtable(*chunk, html=False, maxcols=None)
html.append(h)
ltx.append(l)
htmlb, htmlc = pytex.hide_div('Data', ''.join(html), hide = False)
ltxb, ltxc = pytex.hide_div('LaTeX', ''.join(ltx))
res = 'Displaying: %s<div width=20px/>%s%s<hr/>%s<br>%s' % (
', '.join('$%s$' % latex(q) for q in quantities),
htmlb, ltxb, htmlc, ltxc)
return res
for quant in quantities:
assert isinstance(quant, Quantity)
value, error, unit = adjust_to_unit(quant)
header = quant.longname + ' ' if quant.longname else ''
header += '$%s \\; \\mathrm{\\left[%s\\right]}$' % (
latex(quant), latex(unit))
column = [header]
if error is None:
if isinstance(value, np.ndarray):
column.extend(pytex.align_num_list(value))
else:
column.append(pytex.align_num(value))
else:
if isinstance(value, np.ndarray):
column.extend(pytex.format_valerr_list(value,error))
else:
column.append(pytex.format_valerr(value,error))
cols.append(column)
return (pytex.table_latex(cols), pytex.table_html(cols))
|
{
"content_hash": "787974023370cc6981baaaef208b32ae",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 94,
"avg_line_length": 31.373873873873872,
"alnum_prop": 0.6113424264178033,
"repo_name": "benti/Error-Pypagation",
"id": "e3c3dcf8d3fe7d22c4b18ac2261f537021f8c311",
"size": "6965",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "errorpro/quantities.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "94811"
},
{
"name": "Python",
"bytes": "143212"
}
],
"symlink_target": ""
}
|
from __future__ import division
import av
from .common import MethodLogger, TestCase, fate_suite
from .test_encode import assert_rgb_rotate, write_rgb_rotate
try:
from cStringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
class NonSeekableBuffer:
def __init__(self, data):
self.data = data
def read(self, n):
data = self.data[0:n]
self.data = self.data[n:]
return data
class TestPythonIO(TestCase):
def test_reading(self):
with open(fate_suite('mpeg2/mpeg2_field_encoding.ts'), 'rb') as fh:
wrapped = MethodLogger(fh)
container = av.open(wrapped)
self.assertEqual(container.format.name, 'mpegts')
self.assertEqual(container.format.long_name, "MPEG-TS (MPEG-2 Transport Stream)")
self.assertEqual(len(container.streams), 1)
self.assertEqual(container.size, 800000)
self.assertEqual(container.metadata, {})
# Make sure it did actually call "read".
reads = wrapped._filter('read')
self.assertTrue(reads)
def test_reading_no_seek(self):
with open(fate_suite('mpeg2/mpeg2_field_encoding.ts'), 'rb') as fh:
data = fh.read()
buf = NonSeekableBuffer(data)
wrapped = MethodLogger(buf)
container = av.open(wrapped)
self.assertEqual(container.format.name, 'mpegts')
self.assertEqual(container.format.long_name, "MPEG-TS (MPEG-2 Transport Stream)")
self.assertEqual(len(container.streams), 1)
self.assertEqual(container.metadata, {})
# Make sure it did actually call "read".
reads = wrapped._filter('read')
self.assertTrue(reads)
def test_basic_errors(self):
self.assertRaises(Exception, av.open, None)
self.assertRaises(Exception, av.open, None, 'w')
def test_writing(self):
path = self.sandboxed('writing.mov')
with open(path, 'wb') as fh:
wrapped = MethodLogger(fh)
output = av.open(wrapped, 'w', 'mov')
write_rgb_rotate(output)
output.close()
fh.close()
# Make sure it did actually write.
writes = wrapped._filter('write')
self.assertTrue(writes)
# Standard assertions.
assert_rgb_rotate(self, av.open(path))
def test_buffer_read_write(self):
buffer_ = StringIO()
wrapped = MethodLogger(buffer_)
write_rgb_rotate(av.open(wrapped, 'w', 'mp4'))
# Make sure it did actually write.
writes = wrapped._filter('write')
self.assertTrue(writes)
self.assertTrue(buffer_.tell())
# Standard assertions.
buffer_.seek(0)
assert_rgb_rotate(self, av.open(buffer_))
|
{
"content_hash": "838fe20e94570c350508cfb058b8e23e",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 93,
"avg_line_length": 28.836734693877553,
"alnum_prop": 0.6047416843595188,
"repo_name": "mikeboers/PyAV",
"id": "f622bdb65f56ebbdfba3c4e96f4c4af73aa05c2e",
"size": "2826",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_python_io.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1819"
},
{
"name": "Python",
"bytes": "517404"
},
{
"name": "Shell",
"bytes": "7128"
}
],
"symlink_target": ""
}
|
"""Switch platform for Sensibo integration."""
from __future__ import annotations
from collections.abc import Callable, Mapping
from dataclasses import dataclass
from typing import Any
from pysensibo.model import SensiboDevice
from homeassistant.components.switch import (
SwitchDeviceClass,
SwitchEntity,
SwitchEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DOMAIN
from .coordinator import SensiboDataUpdateCoordinator
from .entity import SensiboDeviceBaseEntity, async_handle_api_call
PARALLEL_UPDATES = 0
@dataclass
class DeviceBaseEntityDescriptionMixin:
"""Mixin for required Sensibo Device description keys."""
value_fn: Callable[[SensiboDevice], bool | None]
extra_fn: Callable[[SensiboDevice], dict[str, str | bool | None]] | None
command_on: str
command_off: str
data_key: str
@dataclass
class SensiboDeviceSwitchEntityDescription(
SwitchEntityDescription, DeviceBaseEntityDescriptionMixin
):
"""Describes Sensibo Switch entity."""
DEVICE_SWITCH_TYPES: tuple[SensiboDeviceSwitchEntityDescription, ...] = (
SensiboDeviceSwitchEntityDescription(
key="timer_on_switch",
device_class=SwitchDeviceClass.SWITCH,
name="Timer",
icon="mdi:timer",
value_fn=lambda data: data.timer_on,
extra_fn=lambda data: {"id": data.timer_id, "turn_on": data.timer_state_on},
command_on="async_turn_on_timer",
command_off="async_turn_off_timer",
data_key="timer_on",
),
SensiboDeviceSwitchEntityDescription(
key="climate_react_switch",
device_class=SwitchDeviceClass.SWITCH,
name="Climate React",
icon="mdi:wizard-hat",
value_fn=lambda data: data.smart_on,
extra_fn=lambda data: {"type": data.smart_type},
command_on="async_turn_on_off_smart",
command_off="async_turn_on_off_smart",
data_key="smart_on",
),
)
PURE_SWITCH_TYPES: tuple[SensiboDeviceSwitchEntityDescription, ...] = (
SensiboDeviceSwitchEntityDescription(
key="pure_boost_switch",
device_class=SwitchDeviceClass.SWITCH,
name="Pure Boost",
value_fn=lambda data: data.pure_boost_enabled,
extra_fn=None,
command_on="async_turn_on_off_pure_boost",
command_off="async_turn_on_off_pure_boost",
data_key="pure_boost_enabled",
),
)
DESCRIPTION_BY_MODELS = {"pure": PURE_SWITCH_TYPES}
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up Sensibo Switch platform."""
coordinator: SensiboDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
SensiboDeviceSwitch(coordinator, device_id, description)
for device_id, device_data in coordinator.data.parsed.items()
for description in DESCRIPTION_BY_MODELS.get(
device_data.model, DEVICE_SWITCH_TYPES
)
)
class SensiboDeviceSwitch(SensiboDeviceBaseEntity, SwitchEntity):
"""Representation of a Sensibo Device Switch."""
entity_description: SensiboDeviceSwitchEntityDescription
def __init__(
self,
coordinator: SensiboDataUpdateCoordinator,
device_id: str,
entity_description: SensiboDeviceSwitchEntityDescription,
) -> None:
"""Initiate Sensibo Device Switch."""
super().__init__(
coordinator,
device_id,
)
self.entity_description = entity_description
self._attr_unique_id = f"{device_id}-{entity_description.key}"
@property
def is_on(self) -> bool | None:
"""Return True if entity is on."""
return self.entity_description.value_fn(self.device_data)
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the entity on."""
func = getattr(SensiboDeviceSwitch, self.entity_description.command_on)
await func(
self,
key=self.entity_description.data_key,
value=True,
)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the entity off."""
func = getattr(SensiboDeviceSwitch, self.entity_description.command_off)
await func(
self,
key=self.entity_description.data_key,
value=True,
)
@property
def extra_state_attributes(self) -> Mapping[str, Any] | None:
"""Return additional attributes."""
if self.entity_description.extra_fn:
return self.entity_description.extra_fn(self.device_data)
return None
@async_handle_api_call
async def async_turn_on_timer(self, key: str, value: Any) -> bool:
"""Make service call to api for setting timer."""
new_state = bool(self.device_data.ac_states["on"] is False)
data = {
"minutesFromNow": 60,
"acState": {**self.device_data.ac_states, "on": new_state},
}
result = await self._client.async_set_timer(self._device_id, data)
return bool(result.get("status") == "success")
@async_handle_api_call
async def async_turn_off_timer(self, key: str, value: Any) -> bool:
"""Make service call to api for deleting timer."""
result = await self._client.async_del_timer(self._device_id)
return bool(result.get("status") == "success")
@async_handle_api_call
async def async_turn_on_off_pure_boost(self, key: str, value: Any) -> bool:
"""Make service call to api for setting Pure Boost."""
new_state = bool(self.device_data.pure_boost_enabled is False)
data: dict[str, Any] = {"enabled": new_state}
if self.device_data.pure_measure_integration is None:
data["sensitivity"] = "N"
data["measurementsIntegration"] = True
data["acIntegration"] = False
data["geoIntegration"] = False
data["primeIntegration"] = False
result = await self._client.async_set_pureboost(self._device_id, data)
return bool(result.get("status") == "success")
@async_handle_api_call
async def async_turn_on_off_smart(self, key: str, value: Any) -> bool:
"""Make service call to api for setting Climate React."""
if self.device_data.smart_type is None:
raise HomeAssistantError(
"Use Sensibo Enable Climate React Service once to enable switch or the Sensibo app"
)
new_state = bool(self.device_data.smart_on is False)
data: dict[str, Any] = {"enabled": new_state}
result = await self._client.async_enable_climate_react(self._device_id, data)
return bool(result.get("status") == "success")
|
{
"content_hash": "b948b87705e16d40d5c321dd51fbb50c",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 99,
"avg_line_length": 35.989583333333336,
"alnum_prop": 0.6542691751085383,
"repo_name": "w1ll1am23/home-assistant",
"id": "f57d72e5fb356f5e692d364af21306da6aaf2b28",
"size": "6910",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sensibo/switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52277012"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
""" A secret, a symmetric cryptographic key. """
from abc import ABCMeta, abstractmethod
class Secret(metaclass=ABCMeta):
""" A secret, a symmetric cryptographic key. """
@property
@abstractmethod
def algorithm(self): # -> str
""" Return the name of the algorithm, for example, "aes". """
|
{
"content_hash": "ecff503a7e99b89d105ef04f605fb630",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 69,
"avg_line_length": 26.666666666666668,
"alnum_prop": 0.65,
"repo_name": "jddixon/xlattice_py",
"id": "e22502102b3ca525b689dbdc4e990569f3aaa325",
"size": "354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/xlattice/secret.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6648"
},
{
"name": "Python",
"bytes": "42824"
},
{
"name": "Shell",
"bytes": "4408"
}
],
"symlink_target": ""
}
|
"""
ASGI entrypoint file for default channel layer.
Points to the channel layer configured as "default" so you can point
ASGI applications at "multichat.asgi:channel_layer" as their channel layer.
"""
import os
from channels.asgi import get_channel_layer
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "multichat.settings")
channel_layer = get_channel_layer()
|
{
"content_hash": "9637fe0c4fcca9ab2ba28576080b6c0e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 75,
"avg_line_length": 33,
"alnum_prop": 0.7851239669421488,
"repo_name": "sarthak-srivastava/chat",
"id": "5732f3090d2761471016b52e13aa07880da12949",
"size": "363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chat_app/multichat/asgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4793"
},
{
"name": "HTML",
"bytes": "10644"
},
{
"name": "JavaScript",
"bytes": "12317"
},
{
"name": "Python",
"bytes": "19028"
}
],
"symlink_target": ""
}
|
from rally.common import log as logging
from rally import consts
from rally.plugins.openstack.scenarios.sahara import utils
from rally.task.scenarios import base
from rally.task import validation
LOG = logging.getLogger(__name__)
class SaharaJob(utils.SaharaScenario):
"""Benchmark scenarios for Sahara jobs."""
@validation.required_services(consts.Service.SAHARA)
@validation.required_contexts("users", "sahara_image", "sahara_edp",
"sahara_cluster")
@base.scenario(context={"cleanup": ["sahara"]})
def create_launch_job(self, job_type, configs, job_idx=0):
"""Create and execute a Sahara EDP Job.
This scenario Creates a Job entity and launches an execution on a
Cluster.
:param job_type: type of the Data Processing Job
:param configs: config dict that will be passed to a Job Execution
:param job_idx: index of a job in a sequence. This index will be
used to create different atomic actions for each job
in a sequence
"""
mains = self.context["tenant"]["sahara_mains"]
libs = self.context["tenant"]["sahara_libs"]
name = self._generate_random_name(prefix="job_")
job = self.clients("sahara").jobs.create(name=name,
type=job_type,
description="",
mains=mains,
libs=libs)
cluster_id = self.context["tenant"]["sahara_cluster"]
if job_type.lower() == "java":
input_id = None
output_id = None
else:
input_id = self.context["tenant"]["sahara_input"]
output_id = self._create_output_ds().id
self._run_job_execution(job_id=job.id,
cluster_id=cluster_id,
input_id=input_id,
output_id=output_id,
configs=configs,
job_idx=job_idx)
@validation.required_services(consts.Service.SAHARA)
@validation.required_contexts("users", "sahara_image", "sahara_edp",
"sahara_cluster")
@base.scenario(context={"cleanup": ["sahara"]})
def create_launch_job_sequence(self, jobs):
"""Create and execute a sequence of the Sahara EDP Jobs.
This scenario Creates a Job entity and launches an execution on a
Cluster for every job object provided.
:param jobs: list of jobs that should be executed in one context
"""
for idx, job in enumerate(jobs):
LOG.debug("Launching Job. Sequence #%d" % idx)
self.create_launch_job(job["job_type"], job["configs"], idx)
@validation.required_services(consts.Service.SAHARA)
@validation.required_contexts("users", "sahara_image", "sahara_edp",
"sahara_cluster")
@base.scenario(context={"cleanup": ["sahara"]})
def create_launch_job_sequence_with_scaling(self, jobs, deltas):
"""Create and execute Sahara EDP Jobs on a scaling Cluster.
This scenario Creates a Job entity and launches an execution on a
Cluster for every job object provided. The Cluster is scaled according
to the deltas values and the sequence is launched again.
:param jobs: list of jobs that should be executed in one context
:param deltas: list of integers which will be used to add or
remove worker nodes from the cluster
"""
cluster_id = self.context["tenant"]["sahara_cluster"]
# Executing the sequence before the first scaling
self.create_launch_job_sequence(jobs)
for delta in deltas:
# The Cluster is fetched every time so that its node groups have
# correct 'count' values.
cluster = self.clients("sahara").clusters.get(cluster_id)
LOG.debug("Scaling cluster %s with delta %d" %
(cluster.name, delta))
if delta == 0:
# Zero scaling makes no sense.
continue
elif delta > 0:
self._scale_cluster_up(cluster, delta)
elif delta < 0:
self._scale_cluster_down(cluster, delta)
LOG.debug("Starting Job sequence")
self.create_launch_job_sequence(jobs)
|
{
"content_hash": "d55b18b0e9a58af5918733053b13bd2e",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 78,
"avg_line_length": 41.36363636363637,
"alnum_prop": 0.5703296703296703,
"repo_name": "shdowofdeath/rally",
"id": "1840688555c9aef1034369849bd94f835d21c2e9",
"size": "5180",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "rally/plugins/openstack/scenarios/sahara/jobs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "46737"
},
{
"name": "Python",
"bytes": "2421750"
},
{
"name": "Shell",
"bytes": "36795"
}
],
"symlink_target": ""
}
|
import yaml
import json
import datetime
from twisted.trial import unittest
from twisted.internet import defer, reactor
from rhumba import service, client
from rhumba.backends import zk
from .plugin import sleep
test_config = {
'noexpire': True,
'backend': 'rhumba.backends.zk',
'queues': [{
'id': 0, 'max_jobs': 1,
'name': 'testqueue',
'plugin': 'rhumba.tests.plugin'
}],
}
class Test(unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
self.service = service.RhumbaService(yaml.dump(test_config))
#backend = zookeeper.Backend(self.service.config)
#self.service.client = backend
yield self.service.startBackend()
yield self.service.client.cleanNodes('/rhumba/q')
yield self.service.client.cleanNodes('/rhumba/dq')
yield self.service.client.cleanNodes('/rhumba/crons')
yield self.service.client.cleanNodes('/rhumba/croner')
yield self.service.client.setupPaths()
self.service.setupQueues()
@defer.inlineCallbacks
def test_call(self):
queue = self.service.queues['testqueue']
uuid1 = yield self.service.client.queue('testqueue', 'test', {'count': 1, 'delay': 2})
yield sleep(0.1)
yield queue.queueRun()
item = yield self.service.client.getResult('testqueue', uuid1)
self.assertEquals(item['result'], None)
@defer.inlineCallbacks
def test_status(self):
yield self.service.setStatus('test123')
st = yield self.service.getStatus()
self.assertEquals(st, 'test123')
@defer.inlineCallbacks
def test_fanout(self):
queue = self.service.queues['testqueue']
suid = self.service.uuid
uuid1 = yield self.service.client.queue('testqueue', 'test', {'count': 1, 'delay': 1},
uids=[suid, 'justsomefakeuuiddoesntmatter'])
yield queue.queueFan()
result = yield self.service.client.getResult('testqueue', uuid1, suid)
self.assertEquals(result['result'], None)
self.service.uuid = 'justsomefakeuuiddoesntmatter'
yield queue.queueFan()
result = yield self.service.client.getResult('testqueue', uuid1,
self.service.uuid)
self.assertEquals(result['result'], None)
@defer.inlineCallbacks
def _get_messages(self):
try:
nodes = yield self.service.client.client.get_children('/rhumba/q/testqueue')
except:
nodes = []
queue = []
for node in nodes:
item = yield self.service.client.client.get('/rhumba/q/testqueue/%s' % node)
queue.append(json.loads(item[0])['message'])
defer.returnValue(queue)
@defer.inlineCallbacks
def test_cron(self):
queue = self.service.queues['testqueue']
yield self.service.checkCrons(datetime.datetime(2015, 3, 3, 5, 3, 0))
item = yield self.service.client._get_key('/crons/testqueue/call_everysecond')
self.assertEquals(type(float(item)), float)
queue = yield self._get_messages()
self.assertIn('everysecond', queue)
@defer.inlineCallbacks
def test_stats(self):
queue = self.service.queues['testqueue']
yield self.service.heartbeat()
yield queue.queueRun()
stats = yield self.service.client.getQueueMessageStats('testqueue')
servers = yield self.service.client.getClusterServers()
self.assertIn(self.service.hostname, servers)
s = yield self.service.client.clusterQueues()
self.assertEquals(s['testqueue'][0]['host'], self.service.hostname)
s = yield self.service.client.clusterStatus()
self.assertIn('testqueue', s['queues'].keys())
self.assertIn(self.service.hostname, s['workers'].keys())
|
{
"content_hash": "fb5a19dd11281e089c3ca2ae0cd2bcf4",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 94,
"avg_line_length": 28.288888888888888,
"alnum_prop": 0.6394344069128044,
"repo_name": "calston/rhumba",
"id": "34afffe59fd149fe54aad1c774df005b2ec0bac0",
"size": "3819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rhumba/tests/test_zk.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76197"
}
],
"symlink_target": ""
}
|
from flask import Flask
app = Flask(__name__)
app.config.from_object(__name__)
|
{
"content_hash": "39a63200f3a9588cb741d8ac51b2c88c",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 32,
"avg_line_length": 20,
"alnum_prop": 0.6875,
"repo_name": "wallarelvo/SmallCartography",
"id": "3893c01fd3fda6bb83b28b06d50f248aa7add8b0",
"size": "81",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "carto/reducer/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "6921"
},
{
"name": "Python",
"bytes": "18482"
},
{
"name": "Shell",
"bytes": "383"
}
],
"symlink_target": ""
}
|
import operator
if not hasattr(operator, "div"):
operator.div = operator.truediv
opnames = {
operator.add : "+",
operator.sub : "-",
operator.mul : "*",
operator.div : "/",
operator.floordiv : "//",
operator.mod : "%",
operator.pow : "**",
operator.xor : "^",
operator.lshift : "<<",
operator.rshift : ">>",
operator.and_ : "and",
operator.or_ : "or",
operator.not_ : "not",
operator.neg : "-",
operator.pos : "+",
operator.contains : "in",
operator.gt : ">",
operator.ge : ">=",
operator.lt : "<",
operator.le : "<=",
operator.eq : "==",
operator.ne : "!=",
}
class ExprMixin(object):
__slots__ = ()
def __add__(self, other):
return BinExpr(operator.add, self, other)
def __sub__(self, other):
return BinExpr(operator.sub, self, other)
def __mul__(self, other):
return BinExpr(operator.mul, self, other)
def __floordiv__(self, other):
return BinExpr(operator.floordiv, self, other)
def __truediv__(self, other):
return BinExpr(operator.div, self, other)
__div__ = __floordiv__
def __mod__(self, other):
return BinExpr(operator.mod, self, other)
def __pow__(self, other):
return BinExpr(operator.pow, self, other)
def __xor__(self, other):
return BinExpr(operator.xor, self, other)
def __rshift__(self, other):
return BinExpr(operator.rshift, self, other)
def __lshift__(self, other):
return BinExpr(operator.rshift, self, other)
def __and__(self, other):
return BinExpr(operator.and_, self, other)
def __or__(self, other):
return BinExpr(operator.or_, self, other)
def __radd__(self, other):
return BinExpr(operator.add, other, self)
def __rsub__(self, other):
return BinExpr(operator.sub, other, self)
def __rmul__(self, other):
return BinExpr(operator.mul, other, self)
def __rfloordiv__(self, other):
return BinExpr(operator.floordiv, other, self)
def __rtruediv__(self, other):
return BinExpr(operator.div, other, self)
__rdiv__ = __rfloordiv__
def __rmod__(self, other):
return BinExpr(operator.mod, other, self)
def __rpow__(self, other):
return BinExpr(operator.pow, other, self)
def __rxor__(self, other):
return BinExpr(operator.xor, other, self)
def __rrshift__(self, other):
return BinExpr(operator.rshift, other, self)
def __rlshift__(self, other):
return BinExpr(operator.rshift, other, self)
def __rand__(self, other):
return BinExpr(operator.and_, other, self)
def __ror__(self, other):
return BinExpr(operator.or_, other, self)
def __neg__(self):
return UniExpr(operator.neg, self)
def __pos__(self):
return UniExpr(operator.pos, self)
def __invert__(self):
return UniExpr(operator.not_, self)
__inv__ = __invert__
def __contains__(self, other):
return BinExpr(operator.contains, self, other)
def __gt__(self, other):
return BinExpr(operator.gt, self, other)
def __ge__(self, other):
return BinExpr(operator.ge, self, other)
def __lt__(self, other):
return BinExpr(operator.lt, self, other)
def __le__(self, other):
return BinExpr(operator.le, self, other)
def __eq__(self, other):
return BinExpr(operator.eq, self, other)
def __ne__(self, other):
return BinExpr(operator.ne, self, other)
class UniExpr(ExprMixin):
__slots__ = ["op", "operand"]
def __init__(self, op, operand):
self.op = op
self.operand = operand
def __repr__(self):
return "%s %r" % (opnames[self.op], self.operand)
def __call__(self, context):
operand = self.operand(context) if callable(self.operand) else self.operand
return self.op(operand)
class BinExpr(ExprMixin):
__slots__ = ["op", "lhs", "rhs"]
def __init__(self, op, lhs, rhs):
self.op = op
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return "(%r %s %r)" % (self.lhs, opnames[self.op], self.rhs)
def __call__(self, context):
lhs = self.lhs(context) if callable(self.lhs) else self.lhs
rhs = self.rhs(context) if callable(self.rhs) else self.rhs
return self.op(lhs, rhs)
class Path(ExprMixin):
__slots__ = ["__name", "__parent"]
def __init__(self, name, parent = None):
self.__name = name
self.__parent = parent
def __repr__(self):
if self.__parent is None:
return self.__name
return "%r.%s" % (self.__parent, self.__name)
def __call__(self, context):
if self.__parent is None:
return context
context2 = self.__parent(context)
return context2[self.__name]
def __getattr__(self, name):
return Path(name, self)
# where is `this` being referenced from?
this = Path("this")
|
{
"content_hash": "c98f61f52f38cf481b5a8b45146833d6",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 83,
"avg_line_length": 31.828025477707005,
"alnum_prop": 0.5717430458274965,
"repo_name": "gkonstantyno/construct",
"id": "b62efb964b063608675b6dafa3df0b47ae305d5b",
"size": "4997",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "construct/lib/expr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "337547"
},
{
"name": "Shell",
"bytes": "239"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
import environ
env = environ.Env(DEBUG=(bool, False),) # set default values and casting
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('housing_reviews')
DEBUG = env('DEBUG')
TEMPLATE_DEBUG = DEBUG
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES = {"default": env.db()}
MEDIA_ROOT = str(ROOT_DIR('uploads'))
MEDIA_URL = ''
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
STATIC_URL = '/static/'
SECRET_KEY = env('SECRET_KEY')
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
ADMINS = (
('St Andrews Housing Reviews Team', 'hello@standrews-housing-reviews.com'),
)
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-uk'
SITE_ID = 2 if DEBUG else 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
str(APPS_DIR.path('static')),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'djangosecure.middleware.SecurityMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'housing_reviews.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'housing_reviews.wsgi.application'
TEMPLATE_DIRS = (
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.debug',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
# allauth specific context processors
# 'allauth.account.context_processors.account',
# 'allauth.socialaccount.context_processors.socialaccount',
)
DJANGO_APPS = (
'admin_tools',
'admin_tools.theming',
'admin_tools.menu',
'admin_tools.dashboard',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
)
THIRD_PARTY_APPS = (
'gunicorn',
'django_extensions',
'model_utils',
'djangosecure',
'crispy_forms',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.facebook',
# 'allauth.socialaccount.providers.google',
'djangoratings',
'django_activeurl',
'analytical',
)
LOCAL_APPS = (
'housing_reviews',
'agencies',
'reviews',
)
INSTALLED_APPS = LOCAL_APPS + DJANGO_APPS + THIRD_PARTY_APPS
# Heroku #
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# END Heroku #
ALLOWED_HOSTS = '*'
SECURE_FRAME_DENY = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
# Crispy forms #
CRISPY_TEMPLATE_PACK = 'bootstrap'
def include_config(filename):
filename = str(ROOT_DIR('housing_reviews', 'settings', filename))
with open(filename) as f:
code = compile(f.read(), filename, 'exec')
exec code
include_config('logging.py')
include_config('email.py')
include_config('auth.py')
include_config('analytics.py')
include_config('mailchimp.py')
|
{
"content_hash": "20b355476b07c70ff8e33639468fbea1",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 79,
"avg_line_length": 28.692737430167597,
"alnum_prop": 0.7202102803738317,
"repo_name": "borfast/housing-reviews",
"id": "78796b1d9456a6c84212579b49df3bf2f813ab87",
"size": "5136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "housing_reviews/settings/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "75698"
},
{
"name": "HTML",
"bytes": "36333"
},
{
"name": "JavaScript",
"bytes": "1"
},
{
"name": "Python",
"bytes": "27775"
}
],
"symlink_target": ""
}
|
"""
Remove caveats to permissions
Revision ID: 84262e097c26
Revises: f345394c444f
Create Date: 2022-04-05 18:35:57.325801
"""
from alembic import op
revision = "84262e097c26"
down_revision = "f345394c444f"
def upgrade():
op.alter_column("macaroons", "caveats", new_column_name="permissions_caveat")
def downgrade():
op.alter_column("macaroons", "permissions_caveat", new_column_name="caveats")
|
{
"content_hash": "00ad00790e9da70d232718365b223738",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 81,
"avg_line_length": 20.45,
"alnum_prop": 0.7334963325183375,
"repo_name": "pypa/warehouse",
"id": "9bee3f5df6fae8474a99c1948a8b996a9ad8b4a1",
"size": "949",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "warehouse/migrations/versions/84262e097c26_rename_caveats_to_permissions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "676"
},
{
"name": "Dockerfile",
"bytes": "6745"
},
{
"name": "HCL",
"bytes": "42"
},
{
"name": "HTML",
"bytes": "663799"
},
{
"name": "JavaScript",
"bytes": "128585"
},
{
"name": "Makefile",
"bytes": "5068"
},
{
"name": "Mako",
"bytes": "2040"
},
{
"name": "Procfile",
"bytes": "527"
},
{
"name": "Python",
"bytes": "3315335"
},
{
"name": "SCSS",
"bytes": "205844"
},
{
"name": "Shell",
"bytes": "9424"
},
{
"name": "YARA",
"bytes": "9079"
}
],
"symlink_target": ""
}
|
import unittest
import sys
# automake build dir
sys.path.insert(0, '..')
sys.path.insert(0, '../.libs')
# cmake build dir
sys.path.insert(0, '../../../build/bindings/python')
from pywsman import *
class TestSequenceFunctions(unittest.TestCase):
def test_enum_release(self):
# set_debug(1) # enable to print logging to stderr
client = Client( "http://wsman:secret@localhost:5985/wsman" )
client.transport().set_auth_method(BASIC_AUTH_STR) # Windows winrm needs this
options = ClientOptions()
# uri = "http://schemas.microsoft.com/wbem/wsman/1/wmi/root/cimv2/Win32_OperatingSystem"
uri = "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ComputerSystem"
# options.set_dump_request() # enable to print SOAP request to stdout
doc = client.enumerate( options, None, uri)
root = doc.root()
assert root is not None
context = root.find(XML_NS_ENUMERATION, "EnumerationContext" )
doc = client.release( options, uri, context.__str__())
assert doc is not None
root = doc.root()
assert root is not None
resp = root.find(XML_NS_ADDRESSING, "Action" )
self.assertEquals( resp.__str__(), XML_NS_ENUMERATION + '/' + WSENUM_RELEASE_RESP )
print "Action %s" % resp
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "7ae1ba92d95f51485c514a54d347afea",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 89,
"avg_line_length": 32.68421052631579,
"alnum_prop": 0.6940418679549114,
"repo_name": "steakknife/openwsman",
"id": "fee16051de987f44ad9b35654d38633d75fc6ece",
"size": "1242",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bindings/python/tests/release.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
WSGI config for sciweather project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sciweather.settings")
application = get_wsgi_application()
|
{
"content_hash": "fdb42d9a626ee614224129785003c9ed",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.875,
"alnum_prop": 0.7738693467336684,
"repo_name": "wasit7/tutorials",
"id": "91a24a5fe21cd3f3ae7dc70f8c35943cd64c1a52",
"size": "398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "arduino_python/04_sciweather/sciweather/sciweather/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "161779"
},
{
"name": "Batchfile",
"bytes": "1953"
},
{
"name": "C",
"bytes": "580699"
},
{
"name": "C++",
"bytes": "500977"
},
{
"name": "CMake",
"bytes": "14548"
},
{
"name": "CSS",
"bytes": "12348"
},
{
"name": "Cuda",
"bytes": "16475"
},
{
"name": "Elixir",
"bytes": "391"
},
{
"name": "HTML",
"bytes": "81272"
},
{
"name": "JavaScript",
"bytes": "389"
},
{
"name": "Jupyter Notebook",
"bytes": "1175781"
},
{
"name": "Makefile",
"bytes": "8294"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Processing",
"bytes": "10267"
},
{
"name": "Python",
"bytes": "193149"
},
{
"name": "Shell",
"bytes": "559"
},
{
"name": "XSLT",
"bytes": "2042"
}
],
"symlink_target": ""
}
|
from vispy import scene, io
from vispy.testing import (requires_application, TestingCanvas,
run_tests_if_main)
from vispy.testing.image_tester import assert_image_approved
@requires_application()
def test_perspective_render():
with TestingCanvas(size=(300, 200)) as canvas:
grid = canvas.central_widget.add_grid()
grid.padding = 20
imdata = io.load_crate().astype('float32') / 255
views = []
images = []
for i, imethod in enumerate(['impostor', 'subdivide']):
for j, vmethod in enumerate(['fragment', 'viewport', 'fbo']):
v = grid.add_view(row=i, col=j, border_color='white')
v.camera = 'turntable'
v.camera.fov = 50
v.camera.distance = 30
v.clip_method = vmethod
views.append(v)
image = scene.visuals.Image(imdata, method=imethod,
grid=(4, 4))
image.transform = scene.STTransform(translate=(-12.8, -12.8),
scale=(0.1, 0.1))
v.add(image)
images.append(image)
image = canvas.render()
print("ViewBox shapes")
for v in views:
print(v.node_transform(canvas.canvas_cs).map(v.rect))
canvas.close()
# Allow many pixels to differ by a small amount--texture sampling and
# exact triangle position will differ across platforms. However a
# change in perspective or in the widget borders should trigger a
# failure.
assert_image_approved(image, 'scene/cameras/perspective_test.png',
'perspective test 1: 6 identical views with '
'correct perspective',
px_threshold=20,
px_count=60,
max_px_diff=200)
run_tests_if_main()
|
{
"content_hash": "6d0f9e6649107b03325fc99f95631871",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 77,
"avg_line_length": 39.11538461538461,
"alnum_prop": 0.5147492625368731,
"repo_name": "hronoses/vispy",
"id": "6fb47980507d6305fced2bf5f6201175995379ef",
"size": "2359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vispy/scene/cameras/tests/test_perspective.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "143081"
},
{
"name": "GLSL",
"bytes": "171513"
},
{
"name": "JavaScript",
"bytes": "5007"
},
{
"name": "Makefile",
"bytes": "1593"
},
{
"name": "PowerShell",
"bytes": "4151"
},
{
"name": "Python",
"bytes": "2858273"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ctdata', '0031_dataacademyabstractevent'),
]
operations = [
migrations.CreateModel(
name='DataAcademyLiveEvent',
fields=[
('dataacademyabstractevent_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='ctdata.DataAcademyAbstractEvent')),
],
options={
'abstract': False,
},
bases=('ctdata.dataacademyabstractevent',),
),
migrations.CreateModel(
name='DataAcademyWebEvent',
fields=[
('dataacademyabstractevent_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='ctdata.DataAcademyAbstractEvent')),
],
options={
'abstract': False,
},
bases=('ctdata.dataacademyabstractevent',),
),
]
|
{
"content_hash": "5220e6f53d697ea5b1acf740d33371c4",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 226,
"avg_line_length": 35.64705882352941,
"alnum_prop": 0.608085808580858,
"repo_name": "CT-Data-Collaborative/ctdata-wagtail-cms",
"id": "9288e601b6565a740051563770b9b39b36d4a02f",
"size": "1284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ctdata/migrations/0032_dataacademyliveevent_dataacademywebevent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "60702"
},
{
"name": "HTML",
"bytes": "103319"
},
{
"name": "JavaScript",
"bytes": "673447"
},
{
"name": "Nginx",
"bytes": "1201"
},
{
"name": "Python",
"bytes": "423981"
},
{
"name": "Shell",
"bytes": "8068"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.